content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
\name{find_equal_samples}
\alias{find_equal_samples}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Find equal samples
}
\description{
Finds samples that have the same peak values - x and y (equal data frames)
}
\usage{
find_equal_samples(sample.list)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{sample.list}{
list of data frames with the samples' peaks.
}
}
\value{
Returns a dataframe with two columns indicating which pair of samples are equal.
}
\examples{
## Example of finding equal samples
data(propolisSampleList)
equal.samples = find_equal_samples(propolisSampleList)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ equal }
\keyword{ sample }% __ONLY ONE__ keyword per line
| /man/find_equal_samples.Rd | no_license | Neal050617/specmine | R | false | false | 827 | rd | \name{find_equal_samples}
\alias{find_equal_samples}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Find equal samples
}
\description{
Finds samples that have the same peak values - x and y (equal data frames)
}
\usage{
find_equal_samples(sample.list)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{sample.list}{
list of data frames with the samples' peaks.
}
}
\value{
Returns a dataframe with two columns indicating which pair of samples are equal.
}
\examples{
## Example of finding equal samples
data(propolisSampleList)
equal.samples = find_equal_samples(propolisSampleList)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ equal }
\keyword{ sample }% __ONLY ONE__ keyword per line
|
#将数据集分为训练集和测试集,并查看数据集基本属性。数据为R自带IRIS数据
ind<-sample(2,nrow(iris),replace=TRUE,prob=c(0.7,0.3))
set.seed(100)
train<-iris[ind==1,]
test<-iris[ind==2,]
str(train)
str(test)
#选取randomforest –mtry节点值,对应误差最小为2,一般可默认。通常也是2记得
#mtry指定节点中用于二叉树的变量个数,默认情况下数据集变量个数的二次方根(分类模型)或三分之一(预测模型)
library(randomForest)
n <- length(names(train))
set.seed(100)
for (i in 1:(n-1)){
mtry_fit <- randomForest(Species~.,data=train,mtry=i)
err <- mean(mtry_fit$err.rate)
print(err)
}
#之后选择ntree值,ntree指定随机森林所包含的决策树数目,默认为500;.在600左右时,模型内误差基本稳定,故取ntree=600
set.seed(100)
ntree_fit <- randomForest(Species~.,data=train,mtry=2,ntree=1000)
plot(ntree_fit)
#看结果
set.seed(100)
rf <- randomForest(Species~.,data=train,mtry=2,ntree=600,importance=TRUE)
rf
#看重要性
importance <-importance(x=rf)
importance
set.seed(100)
varImpPlot(rf)
#最后验证并预测
pred1 <-predict(rf,data=train)
freq1 <- table(pred1,train$Species)
#验证矩阵中迹占整体情况
sum(diag(freq1))/sum(freq1)
plot(margin(rf,test$Species))
| /upload/R/18-yanjun Zhang-randomForest.R | no_license | YuminTHU/training | R | false | false | 1,303 | r | #将数据集分为训练集和测试集,并查看数据集基本属性。数据为R自带IRIS数据
ind<-sample(2,nrow(iris),replace=TRUE,prob=c(0.7,0.3))
set.seed(100)
train<-iris[ind==1,]
test<-iris[ind==2,]
str(train)
str(test)
#选取randomforest –mtry节点值,对应误差最小为2,一般可默认。通常也是2记得
#mtry指定节点中用于二叉树的变量个数,默认情况下数据集变量个数的二次方根(分类模型)或三分之一(预测模型)
library(randomForest)
n <- length(names(train))
set.seed(100)
for (i in 1:(n-1)){
mtry_fit <- randomForest(Species~.,data=train,mtry=i)
err <- mean(mtry_fit$err.rate)
print(err)
}
#之后选择ntree值,ntree指定随机森林所包含的决策树数目,默认为500;.在600左右时,模型内误差基本稳定,故取ntree=600
set.seed(100)
ntree_fit <- randomForest(Species~.,data=train,mtry=2,ntree=1000)
plot(ntree_fit)
#看结果
set.seed(100)
rf <- randomForest(Species~.,data=train,mtry=2,ntree=600,importance=TRUE)
rf
#看重要性
importance <-importance(x=rf)
importance
set.seed(100)
varImpPlot(rf)
#最后验证并预测
pred1 <-predict(rf,data=train)
freq1 <- table(pred1,train$Species)
#验证矩阵中迹占整体情况
sum(diag(freq1))/sum(freq1)
plot(margin(rf,test$Species))
|
# Script for creating the plots of chapter 6
# Author: Philip Schulz
x = seq(0,1,0.001)
entropy = -log2(x)*x-log2(1-x)*(1-x)
png("binaryEntropy.png", width=8, height=8, units="in", res=300)
plot(x,entropy,type="l", xlab=expression(Theta), ylab = "H(X)")
dev.off()
| /chapter6/makePlots.R | no_license | KiaraGrouwstra/LectureNotes | R | false | false | 266 | r | # Script for creating the plots of chapter 6
# Author: Philip Schulz
x = seq(0,1,0.001)
entropy = -log2(x)*x-log2(1-x)*(1-x)
png("binaryEntropy.png", width=8, height=8, units="in", res=300)
plot(x,entropy,type="l", xlab=expression(Theta), ylab = "H(X)")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/msgfPar-getters.R
\docType{methods}
\name{matches}
\alias{matches}
\alias{matches<-}
\alias{matches,msgfPar-method}
\alias{matches<-,msgfPar,numeric-method}
\alias{matches<-,msgfPar,msgfParMatches-method}
\title{Get and set the number of matches in msgfPar objects}
\usage{
matches(object)
matches(object) <- value
\S4method{matches}{msgfPar}(object)
\S4method{matches}{msgfPar,numeric}(object) <- value
\S4method{matches}{msgfPar,msgfParMatches}(object) <- value
}
\arguments{
\item{object}{An msgfPar object}
\item{value}{Either an integer or an msgfParMatches object}
}
\value{
In case of the getter an integer
}
\description{
These functions allow you to retrieve and set the number of matches per
spectrum returned by MS-GF+
}
\section{Methods (by class)}{
\itemize{
\item \code{msgfPar}: Get the number of matches reported per spectrum
\item \code{object = msgfPar,value = numeric}: Set the number of matches reported per spectrum using an
integer
\item \code{object = msgfPar,value = msgfParMatches}: Set the number of matches reported per spectrum using an
msgfParMatches object
}}
\examples{
parameters <- msgfPar(system.file(package='MSGFplus', 'extdata', 'milk-proteins.fasta'))
matches(parameters) <- 5
matches(parameters)
}
\seealso{
Other msgfPar-getter_setter: \code{\link{chargeRange}},
\code{\link{db}}, \code{\link{enzyme}},
\code{\link{fragmentation}}, \code{\link{instrument}},
\code{\link{isotopeError}}, \code{\link{lengthRange}},
\code{\link{mods}}, \code{\link{ntt}},
\code{\link{protocol}}, \code{\link{tda}},
\code{\link{tolerance}}
}
| /man/matches.Rd | no_license | ManuelPerisDiaz/MSGFplus | R | false | true | 1,674 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/msgfPar-getters.R
\docType{methods}
\name{matches}
\alias{matches}
\alias{matches<-}
\alias{matches,msgfPar-method}
\alias{matches<-,msgfPar,numeric-method}
\alias{matches<-,msgfPar,msgfParMatches-method}
\title{Get and set the number of matches in msgfPar objects}
\usage{
matches(object)
matches(object) <- value
\S4method{matches}{msgfPar}(object)
\S4method{matches}{msgfPar,numeric}(object) <- value
\S4method{matches}{msgfPar,msgfParMatches}(object) <- value
}
\arguments{
\item{object}{An msgfPar object}
\item{value}{Either an integer or an msgfParMatches object}
}
\value{
In case of the getter an integer
}
\description{
These functions allow you to retrieve and set the number of matches per
spectrum returned by MS-GF+
}
\section{Methods (by class)}{
\itemize{
\item \code{msgfPar}: Get the number of matches reported per spectrum
\item \code{object = msgfPar,value = numeric}: Set the number of matches reported per spectrum using an
integer
\item \code{object = msgfPar,value = msgfParMatches}: Set the number of matches reported per spectrum using an
msgfParMatches object
}}
\examples{
parameters <- msgfPar(system.file(package='MSGFplus', 'extdata', 'milk-proteins.fasta'))
matches(parameters) <- 5
matches(parameters)
}
\seealso{
Other msgfPar-getter_setter: \code{\link{chargeRange}},
\code{\link{db}}, \code{\link{enzyme}},
\code{\link{fragmentation}}, \code{\link{instrument}},
\code{\link{isotopeError}}, \code{\link{lengthRange}},
\code{\link{mods}}, \code{\link{ntt}},
\code{\link{protocol}}, \code{\link{tda}},
\code{\link{tolerance}}
}
|
Market_Direction = function(Combined_Results,Plot = T){
## Defining Market Status Based on Rolling Quarterly Performance
Market_DF = Combined_Results %>%
group_by(Date) %>%
summarise(Close = mean(Close,trim = 0.05,na.rm = T)) %>%
na.locf() %>%
ungroup() %>%
mutate(Indicator = runMax(Close,90),
Delta = (Close -Indicator)/Indicator) %>%
na.omit() %>%
mutate(Market_Status = factor(
case_when(
Delta <= -0.20 ~ "Bear",
Delta <= -0.10 ~ "Correction",
Delta < -0.05 ~ "Pullback",
Delta >= -0.05 ~ "Bull"
),
levels = c("Bull",
"Pullback",
"Correction",
"Bear"))) %>%
mutate(Market_Delta_50 = rollapply(Delta,
width = 50,
FUN = mean,
na.rm = T,
fill = NA,
align = "right")) %>%
mutate(Days = sequence(rle(as.numeric(Market_Status))$lengths))
## Creating Market Type Data
Plot_Date = max(Market_DF$Date) %m-% months(6)
MIND_DF = Market_DF %>%
mutate(SMA50 = rollapply(Close,
width = 50,
FUN = mean,
na.rm = T,
align = "right",
fill = NA)) %>%
filter(Date >= Plot_Date) %>%
arrange(Date)
## Pulling Current Status Information
Current_Status = Market_DF %>%
na.omit() %>%
arrange(desc(Date)) %>%
head(1)
## Plot Examining Market Direction Designation
if(Plot){
p1 = ggplot(MIND_DF,aes(x = Date,y = Close)) +
geom_point(aes(color = Market_Status)) +
geom_line(aes(y = SMA50),size = 1.5,linetype = 2) +
scale_x_date(breaks = scales::pretty_breaks(9)) +
labs(x = "Date",
y = "Close",
title = "Market Status of Past 6 Months",
subtitle = paste0("Current status = ",
Current_Status$Market_Status,
" :: Date = ",Current_Status$Date,
" :: Status for Past ",Current_Status$Days," Days",
"\n50 Day Slope = ",
scales::percent(
(MIND_DF$SMA50[nrow(MIND_DF)] - MIND_DF$SMA50[nrow(MIND_DF)-1])/
MIND_DF$SMA50[nrow(MIND_DF)-1])),
color = "Market Status")
print(p1)
}
return(Market_DF)
} | /Codes/Functions/Market_Direction.R | no_license | jfontestad/Stock-Strategy-Exploration | R | false | false | 2,602 | r | Market_Direction = function(Combined_Results,Plot = T){
## Defining Market Status Based on Rolling Quarterly Performance
Market_DF = Combined_Results %>%
group_by(Date) %>%
summarise(Close = mean(Close,trim = 0.05,na.rm = T)) %>%
na.locf() %>%
ungroup() %>%
mutate(Indicator = runMax(Close,90),
Delta = (Close -Indicator)/Indicator) %>%
na.omit() %>%
mutate(Market_Status = factor(
case_when(
Delta <= -0.20 ~ "Bear",
Delta <= -0.10 ~ "Correction",
Delta < -0.05 ~ "Pullback",
Delta >= -0.05 ~ "Bull"
),
levels = c("Bull",
"Pullback",
"Correction",
"Bear"))) %>%
mutate(Market_Delta_50 = rollapply(Delta,
width = 50,
FUN = mean,
na.rm = T,
fill = NA,
align = "right")) %>%
mutate(Days = sequence(rle(as.numeric(Market_Status))$lengths))
## Creating Market Type Data
Plot_Date = max(Market_DF$Date) %m-% months(6)
MIND_DF = Market_DF %>%
mutate(SMA50 = rollapply(Close,
width = 50,
FUN = mean,
na.rm = T,
align = "right",
fill = NA)) %>%
filter(Date >= Plot_Date) %>%
arrange(Date)
## Pulling Current Status Information
Current_Status = Market_DF %>%
na.omit() %>%
arrange(desc(Date)) %>%
head(1)
## Plot Examining Market Direction Designation
if(Plot){
p1 = ggplot(MIND_DF,aes(x = Date,y = Close)) +
geom_point(aes(color = Market_Status)) +
geom_line(aes(y = SMA50),size = 1.5,linetype = 2) +
scale_x_date(breaks = scales::pretty_breaks(9)) +
labs(x = "Date",
y = "Close",
title = "Market Status of Past 6 Months",
subtitle = paste0("Current status = ",
Current_Status$Market_Status,
" :: Date = ",Current_Status$Date,
" :: Status for Past ",Current_Status$Days," Days",
"\n50 Day Slope = ",
scales::percent(
(MIND_DF$SMA50[nrow(MIND_DF)] - MIND_DF$SMA50[nrow(MIND_DF)-1])/
MIND_DF$SMA50[nrow(MIND_DF)-1])),
color = "Market Status")
print(p1)
}
return(Market_DF)
} |
#' Updates a deep neural network's parameters using stochastic gradient descent
#' method and batch normalization
#'
#' This function finetunes a DArch network using SGD approach
#'
#' @param darch a darch instance
#' @param trainData training input
#' @param targetData training target
#' @param learn_rate_weight leanring rate for the weight matrices
#' @param learn_rate_bias learning rate for the biases
#' @param learn_rate_gamma learning rate for the gammas
#' @param errorFunc the error function to minimize during training
#' @param with_BN logical value, T to train the neural net with batch normalization
#'
#' @importFrom darch getLayer getDropoutMask getMomentum
#'
#' @return a darch instance with parameters updated with stochastic gradient descent
#'
finetune_SGD_bn <- function(darch,
trainData,
targetData,
learn_rate_weight = exp(-10),
learn_rate_bias = exp(-10),
learn_rate_gamma = exp(-10),
errorFunc = meanSquareErr,
with_BN = T) {
# stats <- getStats(darch)
ret <- backpropagate_delta_bn(darch, trainData, targetData, errorFunc, with_BN)
delta_weight <- ret[[1]]
delta_beta <- ret[[2]]
delta_gamma <- ret[[3]]
learnRateBiases <- learn_rate_bias
learnRateWeights <- learn_rate_weight
learnRateGamma <- learn_rate_gamma
numLayers <- length(delta_weight)
for(i in numLayers:1) {
weights <- getLayer(darch, i)[[1]]
biases <- weights[nrow(weights),,drop=F]
weights <- weights[1:(nrow(weights)-1),,drop=F]
gamma <- getLayer(darch, i)[[4]]
weightsChange_prev <- getLayer(darch, i)[[3]]
# Calculate the change in weights
# apply dropout mask to momentum
weightsInc <- (learnRateWeights * delta_weight[[i]])
weightsChange <- weightsInc + (getMomentum(darch) *
weightsChange_prev * getDropoutMask(darch, i-1)
)
weights <- weights - weightsChange
# Calculate the change in beta (biases)
biasesInc <- learnRateBiases * delta_beta[[i]][1,]
biases <- biases - biasesInc
# Calculate the change in gamma
gammaInc <- learnRateGamma * delta_gamma[[i]][1,]
gamma <- gamma - gammaInc
darch@layers[[i]][[1]] <- rbind(weights,biases)
darch@layers[[i]][[3]] <- weightsInc
darch@layers[[i]][[4]] <- gamma
}
# setStats(darch) <- stats
return(darch)
}
| /R/finetune_SGD.R | no_license | garymihalik/deeplearning | R | false | false | 2,547 | r | #' Updates a deep neural network's parameters using stochastic gradient descent
#' method and batch normalization
#'
#' This function finetunes a DArch network using SGD approach
#'
#' @param darch a darch instance
#' @param trainData training input
#' @param targetData training target
#' @param learn_rate_weight leanring rate for the weight matrices
#' @param learn_rate_bias learning rate for the biases
#' @param learn_rate_gamma learning rate for the gammas
#' @param errorFunc the error function to minimize during training
#' @param with_BN logical value, T to train the neural net with batch normalization
#'
#' @importFrom darch getLayer getDropoutMask getMomentum
#'
#' @return a darch instance with parameters updated with stochastic gradient descent
#'
finetune_SGD_bn <- function(darch,
trainData,
targetData,
learn_rate_weight = exp(-10),
learn_rate_bias = exp(-10),
learn_rate_gamma = exp(-10),
errorFunc = meanSquareErr,
with_BN = T) {
# stats <- getStats(darch)
ret <- backpropagate_delta_bn(darch, trainData, targetData, errorFunc, with_BN)
delta_weight <- ret[[1]]
delta_beta <- ret[[2]]
delta_gamma <- ret[[3]]
learnRateBiases <- learn_rate_bias
learnRateWeights <- learn_rate_weight
learnRateGamma <- learn_rate_gamma
numLayers <- length(delta_weight)
for(i in numLayers:1) {
weights <- getLayer(darch, i)[[1]]
biases <- weights[nrow(weights),,drop=F]
weights <- weights[1:(nrow(weights)-1),,drop=F]
gamma <- getLayer(darch, i)[[4]]
weightsChange_prev <- getLayer(darch, i)[[3]]
# Calculate the change in weights
# apply dropout mask to momentum
weightsInc <- (learnRateWeights * delta_weight[[i]])
weightsChange <- weightsInc + (getMomentum(darch) *
weightsChange_prev * getDropoutMask(darch, i-1)
)
weights <- weights - weightsChange
# Calculate the change in beta (biases)
biasesInc <- learnRateBiases * delta_beta[[i]][1,]
biases <- biases - biasesInc
# Calculate the change in gamma
gammaInc <- learnRateGamma * delta_gamma[[i]][1,]
gamma <- gamma - gammaInc
darch@layers[[i]][[1]] <- rbind(weights,biases)
darch@layers[[i]][[3]] <- weightsInc
darch@layers[[i]][[4]] <- gamma
}
# setStats(darch) <- stats
return(darch)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary_pokemon_data_partial.R
\name{summary_pokemon_data_partial}
\alias{summary_pokemon_data_partial}
\title{Show summary statistics of pokemons}
\usage{
summary_pokemon_data_partial(para = "Weight", summary = TRUE)
}
\arguments{
\item{para}{specific characteristic you want to check about pokemon data, like "weight", "height", "HP"..., set default to "Weight"}
\item{summary}{TURE if you want to see the summary statistics, FALSE if you do not want to see, set default to TRUE}
}
\value{
A dataset containing suammry statistics of pokemons that chosen by the user
}
\description{
Show summary statistics of pokemons
}
| /man/summary_pokemon_data_partial.Rd | no_license | sunqihui1221/QihuiSunFinal | R | false | true | 701 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary_pokemon_data_partial.R
\name{summary_pokemon_data_partial}
\alias{summary_pokemon_data_partial}
\title{Show summary statistics of pokemons}
\usage{
summary_pokemon_data_partial(para = "Weight", summary = TRUE)
}
\arguments{
\item{para}{specific characteristic you want to check about pokemon data, like "weight", "height", "HP"..., set default to "Weight"}
\item{summary}{TURE if you want to see the summary statistics, FALSE if you do not want to see, set default to TRUE}
}
\value{
A dataset containing suammry statistics of pokemons that chosen by the user
}
\description{
Show summary statistics of pokemons
}
|
# ALL-B12_deviationlistPGM.R
# 作成者:kaoru torii
# 作成日:2016/06/15
# 作成者:mamiko yonejima
# 作成日:2017/10/19
#########################
Dxt <- function(flowsheet){
flowsheet[, c(1, 2, 9)]
}
## Config #####
prtpath <- "//aronas/Datacenter/Trials/JPLSG/22_ALL-B12/04.03.02 定期モニタリングレポート/第16回/R/cleaning"
kDownLoadDate <- "_201201_1106" # フローシートのDL日付
kDev <- "ALL-B12_deviations_201201_1154.csv"
###############
# Read csv
list <- list.files(paste0(prtpath, "./rawdata"))
file.name <- sub(paste0(kDownLoadDate,".*"), "", list)
df.name <- sub(".*_", "", file.name)
setwd(paste0(prtpath, "./rawdata"))
for (i in 1:length(list)) {
assign(df.name[i], read.csv(list[i], as.is=T, na.strings = c("")))
}
# setwd(paste0(prtpath, "./dev/rawdata"))
deviations0 <- read.csv(kDev, as.is=T, na.strings = c(""))
# inputの読み込み
sheet_name <- read.csv("../input/sheet_name.csv")
#必要項目の抽出
for(i in c(1, 3:43)){
eval(parse(text = paste0("dxt_flowsheet", i, "<- Dxt(flowsheet", i, ")")))
}
for(i in 1:3){
eval(parse(text = paste0("dxt_risk", i, "<- Dxt(risk", i, ")")))
}
dxt_initial <- Dxt(initial)
# deviationsに和名シート名をマージ
deviations <- merge(deviations0, sheet_name, by = "シート名", all.x = T )
# 逸脱一覧のリストに作成日をマージさせるためフローシートのファイルを結合し、作成日リストを作成する(縦結合)
matSum <- dxt_initial
for(i in 1:3){
matSum <- rbind(matSum, eval(parse(text = paste0("dxt_risk", i))))
}
for(i in c(1, 3:43)){
matSum <- rbind(matSum, eval(parse(text = paste0("dxt_flowsheet", i))))
}
matSum$key <- paste0(matSum$症例登録番号, matSum$シート名英数字別名)
matSum <- matSum[, c(2, 4)]
# dvシート(逸脱一覧csv)grade4およびgrade5を削除(a1_入力値.表示データ.項目内のgradeが逸脱となっている行を削除する)
dxt_deviations <- deviations[substring(deviations$入力値.表示データ., 9, 9) != "-", ]
# IA day1投与日を削除
dxt_deviations <- dxt_deviations[dxt_deviations$フィールドラベル != "day1投与日(治療開始日)",]
# 強化療法のday1投与日を削除
dxt_deviations <- dxt_deviations[dxt_deviations$フィールドラベル != "day1投与日",]
# 強化療法の本コース最終投与日を削除
dxt_deviations_0 <- subset(dxt_deviations, dxt_deviations$フィールドラベル != "本コース試験治療薬剤最終投与日" )
dxt_deviations_1 <- dxt_deviations[dxt_deviations$フィールドラベル == "本コース試験治療薬剤最終投与日" ,]
dxt_deviations_2 <- dxt_deviations_1[dxt_deviations_1$シート名 == "フローシート:早期強化療法(IB)" | dxt_deviations_1$シート名 == "フローシート:早期強化療法(IB+L)" | dxt_deviations_1$シート名 == "フローシート:早期強化療法(IB+VL)", ]
dxt_deviations <- rbind(dxt_deviations_0, dxt_deviations_2)
# followupを削除
dxt_deviations <- dxt_deviations[dxt_deviations$フィールドラベル != "最終転帰確認日", ]
colnames(dxt_deviations)[2] <- "症例登録番号"
dxt_deviations$key <- paste0(dxt_deviations$症例登録番号, dxt_deviations$sheet.name)
#施設名を抽出
dxt_deviations$施設名<- sub("-.*","",dxt_deviations$施設名科名)
#必要な項目の抽出
dxt_deviations <- dxt_deviations[,c("症例登録番号", "施設名","シート名", "フィールドラベル", "入力値.表示データ.", "key")]
#リスクシートのマージ
risk <- merge(risk1, risk2, by = "症例登録番号", all = T)
#症例番号、暫定リスク、確定リスクの抽出
dxt_risk <- risk[, c(1, 65, 111)]
#中止届の必要項目の抽出
dxt_cancel <- cancel[,c("症例登録番号","中止時期.コース名.","治療終了.中止.理由","中止時期.day.week.","中止時期.日数.週数.")]
#マージをする
m_risk_dev <- merge(dxt_risk, dxt_deviations, by = "症例登録番号", all.y = T)
m_risk_dev_cancel <- merge(m_risk_dev, dxt_cancel, by = "症例登録番号", all.x = T)
result <- merge(matSum, m_risk_dev_cancel, by = "key", all.y = T)
result <- result[, -1]
#ソートする
# result<- result[order(result$順序付きフローシート順序),]
#csvファイルへの書き出し
result[is.na(result)] <- ""
write.csv(result, eval(parse(text = paste0("'", prtpath, "/output/deviation/deviations.csv'"))), row.names=FALSE)
| /programs/ALL-B12_deviationlist.R | no_license | nnh/ALL-B12 | R | false | false | 4,446 | r | # ALL-B12_deviationlistPGM.R
# 作成者:kaoru torii
# 作成日:2016/06/15
# 作成者:mamiko yonejima
# 作成日:2017/10/19
#########################
Dxt <- function(flowsheet){
flowsheet[, c(1, 2, 9)]
}
## Config #####
prtpath <- "//aronas/Datacenter/Trials/JPLSG/22_ALL-B12/04.03.02 定期モニタリングレポート/第16回/R/cleaning"
kDownLoadDate <- "_201201_1106" # フローシートのDL日付
kDev <- "ALL-B12_deviations_201201_1154.csv"
###############
# Read csv
list <- list.files(paste0(prtpath, "./rawdata"))
file.name <- sub(paste0(kDownLoadDate,".*"), "", list)
df.name <- sub(".*_", "", file.name)
setwd(paste0(prtpath, "./rawdata"))
for (i in 1:length(list)) {
assign(df.name[i], read.csv(list[i], as.is=T, na.strings = c("")))
}
# setwd(paste0(prtpath, "./dev/rawdata"))
deviations0 <- read.csv(kDev, as.is=T, na.strings = c(""))
# inputの読み込み
sheet_name <- read.csv("../input/sheet_name.csv")
#必要項目の抽出
for(i in c(1, 3:43)){
eval(parse(text = paste0("dxt_flowsheet", i, "<- Dxt(flowsheet", i, ")")))
}
for(i in 1:3){
eval(parse(text = paste0("dxt_risk", i, "<- Dxt(risk", i, ")")))
}
dxt_initial <- Dxt(initial)
# deviationsに和名シート名をマージ
deviations <- merge(deviations0, sheet_name, by = "シート名", all.x = T )
# 逸脱一覧のリストに作成日をマージさせるためフローシートのファイルを結合し、作成日リストを作成する(縦結合)
matSum <- dxt_initial
for(i in 1:3){
matSum <- rbind(matSum, eval(parse(text = paste0("dxt_risk", i))))
}
for(i in c(1, 3:43)){
matSum <- rbind(matSum, eval(parse(text = paste0("dxt_flowsheet", i))))
}
matSum$key <- paste0(matSum$症例登録番号, matSum$シート名英数字別名)
matSum <- matSum[, c(2, 4)]
# dvシート(逸脱一覧csv)grade4およびgrade5を削除(a1_入力値.表示データ.項目内のgradeが逸脱となっている行を削除する)
dxt_deviations <- deviations[substring(deviations$入力値.表示データ., 9, 9) != "-", ]
# IA day1投与日を削除
dxt_deviations <- dxt_deviations[dxt_deviations$フィールドラベル != "day1投与日(治療開始日)",]
# 強化療法のday1投与日を削除
dxt_deviations <- dxt_deviations[dxt_deviations$フィールドラベル != "day1投与日",]
# 強化療法の本コース最終投与日を削除
dxt_deviations_0 <- subset(dxt_deviations, dxt_deviations$フィールドラベル != "本コース試験治療薬剤最終投与日" )
dxt_deviations_1 <- dxt_deviations[dxt_deviations$フィールドラベル == "本コース試験治療薬剤最終投与日" ,]
dxt_deviations_2 <- dxt_deviations_1[dxt_deviations_1$シート名 == "フローシート:早期強化療法(IB)" | dxt_deviations_1$シート名 == "フローシート:早期強化療法(IB+L)" | dxt_deviations_1$シート名 == "フローシート:早期強化療法(IB+VL)", ]
dxt_deviations <- rbind(dxt_deviations_0, dxt_deviations_2)
# followupを削除
dxt_deviations <- dxt_deviations[dxt_deviations$フィールドラベル != "最終転帰確認日", ]
colnames(dxt_deviations)[2] <- "症例登録番号"
dxt_deviations$key <- paste0(dxt_deviations$症例登録番号, dxt_deviations$sheet.name)
#施設名を抽出
dxt_deviations$施設名<- sub("-.*","",dxt_deviations$施設名科名)
#必要な項目の抽出
dxt_deviations <- dxt_deviations[,c("症例登録番号", "施設名","シート名", "フィールドラベル", "入力値.表示データ.", "key")]
#リスクシートのマージ
risk <- merge(risk1, risk2, by = "症例登録番号", all = T)
#症例番号、暫定リスク、確定リスクの抽出
dxt_risk <- risk[, c(1, 65, 111)]
#中止届の必要項目の抽出
dxt_cancel <- cancel[,c("症例登録番号","中止時期.コース名.","治療終了.中止.理由","中止時期.day.week.","中止時期.日数.週数.")]
#マージをする
m_risk_dev <- merge(dxt_risk, dxt_deviations, by = "症例登録番号", all.y = T)
m_risk_dev_cancel <- merge(m_risk_dev, dxt_cancel, by = "症例登録番号", all.x = T)
result <- merge(matSum, m_risk_dev_cancel, by = "key", all.y = T)
result <- result[, -1]
#ソートする
# result<- result[order(result$順序付きフローシート順序),]
#csvファイルへの書き出し
result[is.na(result)] <- ""
write.csv(result, eval(parse(text = paste0("'", prtpath, "/output/deviation/deviations.csv'"))), row.names=FALSE)
|
## Here is my R file to be put into the repository | /TylersFile.R | no_license | wesenu/tutorial_git | R | false | false | 50 | r | ## Here is my R file to be put into the repository |
js_protocol <- jsonlite::read_json("./tools/js_protocol.json")
browser_protocol <- jsonlite::read_json("./tools/browser_protocol.json")
types <- c(string = "A character string. ",
boolean = "A logical. ",
integer = "An integer. ",
array = "A list of ",
number = "A numeric. ")
is_param_optional <- function(parameter) {
isTRUE(parameter$optional)
}
is_cmd_deprecated <- function(command) {
isTRUE(command$deprecated)
}
sanitize_help <- function(text) {
text <- gsub("[0..100]", "`[0..100]`", text, fixed = TRUE)
text <- gsub("[0..1]", "`[0..1]`", text, fixed = TRUE)
gsub("\\n", "\n#' ", text)
}
# Build command -----------------------------------------------------------
build_command_signature <- function(command) {
par_names <- c("promise", purrr::map_chr(command$parameters, "name"))
optionals <- c(FALSE, purrr::map_lgl(command$parameters, is_param_optional))
paste0("function(",
paste(paste0(par_names,
ifelse(optionals, " = NULL", "")
), collapse = ", "),
", awaitResult = TRUE)")
}
build_command_parameter_help <- function(parameter) {
declaration <- paste0(
"#' @param ", parameter$name, " ",
if (isTRUE(parameter$deprecated)) "Deprecated. ",
if (isTRUE(parameter$experimental)) "Experimental. ",
if (isTRUE(parameter$optional)) "Optional. ",
types[parameter$type],
if (!is.null(parameter$items)) paste0(parameter$items, ". "),
if (!is.null(parameter[["$ref"]])) paste0("A ", parameter[["$ref"]], ". ")
)
details <- paste(
parameter$description,
if (!is.null(parameter$enum))
paste0("Accepted values: ", paste(parameter$enum, collapse = ", "), ".")
)
text <- paste0(declaration, if (length(details) > 0) "\n", details)
sanitize_help(text)
}
build_command_help <- function(domain_name, command) {
title <- paste0("#' Send the command ", paste(domain_name, command$name, sep = "."), "\n#' ")
description <- paste0("#' ", command$description)
description <- paste0(sanitize_help(description), "\n#' ")
params <- c("#' @param promise An asynchronous result.",
purrr::map_chr(command$parameters, build_command_parameter_help),
"#' @param awaitResult Await for the command result?"
)
return_field <- paste0(
"#' ",
"\n#' @return An async value of class `promise`.",
"\n#' The value and the completion of the promise differ according to the value of `awaitResult`.",
"\n#' Its value is a named list of two elements: `ws` (the websocket connexion) and `result`.",
"\n#' When `awaitResult` is `TRUE`, the promise is fulfilled once the result of the command is received. In this case,",
if (length(command$returns) == 0) "\n#' `result` is a void named list."
else sprintf("\n#' `result` is a named list of length %i.", length(command$returns)),
"\n#' When `awaitResult` is `FALSE`, the promise is fulfilled once the command is sent:",
"\n#' `result` is equal to the previous result (`promise$result`).",
"\n#' In both cases, you can chain this promise with another command or event listener."
)
paste0(c(title, description, params, return_field, "#' @export"), collapse = "\n")
}
generate_command <- function(command, domain_name = NULL) {
r2help <- build_command_help(domain_name, command)
body <- paste0(paste(domain_name, command$name, sep = "."), " <- ", build_command_signature(command), " {\n",
sprintf(" method <- '%s.%s'\n", domain_name, command$name),
" args <- utils::head(rlang::fn_fmls_names(), -1)\n",
" args <- args[!sapply(mget(args), is.null)]\n",
" params <- mget(args)\n",
" params <- if (length(params) > 1) params[2:length(params)] else NULL\n",
" send(promise, method, params, awaitResult)\n",
"}\n")
paste(r2help, body, sep = "\n")
}
generate_commands_source_code <- function(domain) {
deprecated <- purrr::map_lgl(domain$commands, is_cmd_deprecated)
commands <- domain$commands[!deprecated]
file_content <- paste0(c(
"# DO NOT EDIT BY HAND\n#' @include send.R\nNULL",
purrr::map_chr(commands, generate_command, domain_name = domain$domain)
), collapse = "\n\n")
cat(file_content, file = paste0("R/commands_", domain$domain, ".R"))
}
purrr::walk(js_protocol$domains, generate_commands_source_code)
purrr::walk(browser_protocol$domains, generate_commands_source_code)
# Build event listener ----------------------------------------------------
build_event_parameter_help <- function(parameter) {
declaration <- paste0(
"#' @param ", parameter$name, " ",
if (isTRUE(parameter$deprecated)) "Deprecated. ",
if (isTRUE(parameter$experimental)) "Experimental. ",
types[parameter$type],
if (!is.null(parameter$items)) paste0(parameter$items, ". "),
if (!is.null(parameter[["$ref"]])) paste0("A ", parameter[["$ref"]], ". ")
)
details <- paste(
parameter$description,
paste0("Accepted values: ", paste(c(paste0("`~ .res$", parameter$name, "` (to refer to the previous result)"), parameter$enum), collapse = ", "), ".")
)
text <- paste0(declaration, if (length(details) > 0) "\n", details)
sanitize_help(text)
}
build_event_help <- function(domain_name, event) {
title <- paste0("#' Await the event ", paste(domain_name, event$name, sep = "."), " or create a callback", "\n#' ")
description <- paste0("#' ", event$description)
description <- paste0(sanitize_help(description), "\n#' ")
params <- c("#' @param promise An asynchronous result object.",
purrr::map_chr(event$parameters, build_event_parameter_help),
"#' @param .callback A callback function taking one argument. The object passed to",
"#' this function is the message received from Chrome: this is a named list",
paste0("#' with an element `method` (that is equal to `\"", event$name, "\"`)"),
"#' and an element `params` which is a named list.",
if (is.null(event$parameters)) "#' For this event, `params` is void."
else c(
"#' The `params` list is composed of",
paste0("#' the following element(s): ",
paste0("`", purrr::map_chr(event$parameters, "name"), "`",
ifelse(purrr::map_lgl(event$parameters, is_param_optional), " (optional) ", ""),
collapse = ", "
),
"."
)
)
)
return_field <- paste0(
"#' ",
"\n#' @return An async value of class `promise`.",
"\n#' The value and the completion of the promise differ according to the use of a callback function.",
"\n#' When `.callback` is `NULL`, the promise is fulfilled when the event is received.",
"\n#' Its value is a named list of two elements: `ws` (the websocket connexion) and `result`.",
"\n#' `result` is a named list: its elements are the parameters sended by Chrome. ",
"\n#' You can chain this promise with another command or event listener.",
"\n#' When `.callback` is not `NULL`, the promise is fulfilled as soon as the callback is created; the value",
"\n#' is a function without any argument that can be called to cancel the callback. When you use the",
"\n#' `.callback` argument, you cannot send the result to any other command or event listener."
)
paste0(c(title, "#' **Event description**: ", description, params, return_field, "#' @export"), collapse = "\n")
}
build_event_signature <- function(event) {
par_names <- purrr::map_chr(event$parameters, "name")
paste0("function(promise, ", if (length(par_names) > 0) paste0(paste(paste0(par_names, " = NULL"), collapse = ", "), ", "), ".callback = NULL)")
}
generate_event <- function(event, domain_name = NULL) {
r2help <- build_event_help(domain_name, event)
body <- paste0(paste(domain_name, event$name, sep = "."), " <- ", build_event_signature(event), " {\n",
sprintf(" method <- '%s.%s'\n", domain_name, event$name),
" args <- utils::head(rlang::fn_fmls_names(), -1)\n",
" args <- args[!sapply(mget(args), is.null)]\n",
" params <- mget(args)\n",
" params <- if (length(params) > 1) params[2:length(params)] else NULL\n",
" listen(promise, method, params, .callback)\n",
"}\n")
paste(r2help, body, sep = "\n")
}
generate_events_source_code <- function(domain) {
events <- domain$events
if (is.null(events)) return()
file_content <- paste0(c(
"# DO NOT EDIT BY HAND\n#' @include send.R\nNULL",
purrr::map_chr(events, generate_event, domain_name = domain$domain)
), collapse = "\n\n")
cat(file_content, file = paste0("R/events_", domain$domain, ".R"))
}
purrr::walk(js_protocol$domains, generate_events_source_code)
purrr::walk(browser_protocol$domains, generate_events_source_code)
# TODO detail the return object resulting of a command
# TODO check the remote protocol (in send)
| /tools/generator.R | permissive | RLesur/crrri | R | false | false | 9,306 | r | js_protocol <- jsonlite::read_json("./tools/js_protocol.json")
browser_protocol <- jsonlite::read_json("./tools/browser_protocol.json")
types <- c(string = "A character string. ",
boolean = "A logical. ",
integer = "An integer. ",
array = "A list of ",
number = "A numeric. ")
is_param_optional <- function(parameter) {
isTRUE(parameter$optional)
}
is_cmd_deprecated <- function(command) {
isTRUE(command$deprecated)
}
sanitize_help <- function(text) {
text <- gsub("[0..100]", "`[0..100]`", text, fixed = TRUE)
text <- gsub("[0..1]", "`[0..1]`", text, fixed = TRUE)
gsub("\\n", "\n#' ", text)
}
# Build command -----------------------------------------------------------
build_command_signature <- function(command) {
par_names <- c("promise", purrr::map_chr(command$parameters, "name"))
optionals <- c(FALSE, purrr::map_lgl(command$parameters, is_param_optional))
paste0("function(",
paste(paste0(par_names,
ifelse(optionals, " = NULL", "")
), collapse = ", "),
", awaitResult = TRUE)")
}
build_command_parameter_help <- function(parameter) {
declaration <- paste0(
"#' @param ", parameter$name, " ",
if (isTRUE(parameter$deprecated)) "Deprecated. ",
if (isTRUE(parameter$experimental)) "Experimental. ",
if (isTRUE(parameter$optional)) "Optional. ",
types[parameter$type],
if (!is.null(parameter$items)) paste0(parameter$items, ". "),
if (!is.null(parameter[["$ref"]])) paste0("A ", parameter[["$ref"]], ". ")
)
details <- paste(
parameter$description,
if (!is.null(parameter$enum))
paste0("Accepted values: ", paste(parameter$enum, collapse = ", "), ".")
)
text <- paste0(declaration, if (length(details) > 0) "\n", details)
sanitize_help(text)
}
build_command_help <- function(domain_name, command) {
title <- paste0("#' Send the command ", paste(domain_name, command$name, sep = "."), "\n#' ")
description <- paste0("#' ", command$description)
description <- paste0(sanitize_help(description), "\n#' ")
params <- c("#' @param promise An asynchronous result.",
purrr::map_chr(command$parameters, build_command_parameter_help),
"#' @param awaitResult Await for the command result?"
)
return_field <- paste0(
"#' ",
"\n#' @return An async value of class `promise`.",
"\n#' The value and the completion of the promise differ according to the value of `awaitResult`.",
"\n#' Its value is a named list of two elements: `ws` (the websocket connexion) and `result`.",
"\n#' When `awaitResult` is `TRUE`, the promise is fulfilled once the result of the command is received. In this case,",
if (length(command$returns) == 0) "\n#' `result` is a void named list."
else sprintf("\n#' `result` is a named list of length %i.", length(command$returns)),
"\n#' When `awaitResult` is `FALSE`, the promise is fulfilled once the command is sent:",
"\n#' `result` is equal to the previous result (`promise$result`).",
"\n#' In both cases, you can chain this promise with another command or event listener."
)
paste0(c(title, description, params, return_field, "#' @export"), collapse = "\n")
}
generate_command <- function(command, domain_name = NULL) {
r2help <- build_command_help(domain_name, command)
body <- paste0(paste(domain_name, command$name, sep = "."), " <- ", build_command_signature(command), " {\n",
sprintf(" method <- '%s.%s'\n", domain_name, command$name),
" args <- utils::head(rlang::fn_fmls_names(), -1)\n",
" args <- args[!sapply(mget(args), is.null)]\n",
" params <- mget(args)\n",
" params <- if (length(params) > 1) params[2:length(params)] else NULL\n",
" send(promise, method, params, awaitResult)\n",
"}\n")
paste(r2help, body, sep = "\n")
}
generate_commands_source_code <- function(domain) {
deprecated <- purrr::map_lgl(domain$commands, is_cmd_deprecated)
commands <- domain$commands[!deprecated]
file_content <- paste0(c(
"# DO NOT EDIT BY HAND\n#' @include send.R\nNULL",
purrr::map_chr(commands, generate_command, domain_name = domain$domain)
), collapse = "\n\n")
cat(file_content, file = paste0("R/commands_", domain$domain, ".R"))
}
purrr::walk(js_protocol$domains, generate_commands_source_code)
purrr::walk(browser_protocol$domains, generate_commands_source_code)
# Build event listener ----------------------------------------------------
build_event_parameter_help <- function(parameter) {
declaration <- paste0(
"#' @param ", parameter$name, " ",
if (isTRUE(parameter$deprecated)) "Deprecated. ",
if (isTRUE(parameter$experimental)) "Experimental. ",
types[parameter$type],
if (!is.null(parameter$items)) paste0(parameter$items, ". "),
if (!is.null(parameter[["$ref"]])) paste0("A ", parameter[["$ref"]], ". ")
)
details <- paste(
parameter$description,
paste0("Accepted values: ", paste(c(paste0("`~ .res$", parameter$name, "` (to refer to the previous result)"), parameter$enum), collapse = ", "), ".")
)
text <- paste0(declaration, if (length(details) > 0) "\n", details)
sanitize_help(text)
}
build_event_help <- function(domain_name, event) {
title <- paste0("#' Await the event ", paste(domain_name, event$name, sep = "."), " or create a callback", "\n#' ")
description <- paste0("#' ", event$description)
description <- paste0(sanitize_help(description), "\n#' ")
params <- c("#' @param promise An asynchronous result object.",
purrr::map_chr(event$parameters, build_event_parameter_help),
"#' @param .callback A callback function taking one argument. The object passed to",
"#' this function is the message received from Chrome: this is a named list",
paste0("#' with an element `method` (that is equal to `\"", event$name, "\"`)"),
"#' and an element `params` which is a named list.",
if (is.null(event$parameters)) "#' For this event, `params` is void."
else c(
"#' The `params` list is composed of",
paste0("#' the following element(s): ",
paste0("`", purrr::map_chr(event$parameters, "name"), "`",
ifelse(purrr::map_lgl(event$parameters, is_param_optional), " (optional) ", ""),
collapse = ", "
),
"."
)
)
)
return_field <- paste0(
"#' ",
"\n#' @return An async value of class `promise`.",
"\n#' The value and the completion of the promise differ according to the use of a callback function.",
"\n#' When `.callback` is `NULL`, the promise is fulfilled when the event is received.",
"\n#' Its value is a named list of two elements: `ws` (the websocket connexion) and `result`.",
"\n#' `result` is a named list: its elements are the parameters sended by Chrome. ",
"\n#' You can chain this promise with another command or event listener.",
"\n#' When `.callback` is not `NULL`, the promise is fulfilled as soon as the callback is created; the value",
"\n#' is a function without any argument that can be called to cancel the callback. When you use the",
"\n#' `.callback` argument, you cannot send the result to any other command or event listener."
)
paste0(c(title, "#' **Event description**: ", description, params, return_field, "#' @export"), collapse = "\n")
}
build_event_signature <- function(event) {
par_names <- purrr::map_chr(event$parameters, "name")
paste0("function(promise, ", if (length(par_names) > 0) paste0(paste(paste0(par_names, " = NULL"), collapse = ", "), ", "), ".callback = NULL)")
}
generate_event <- function(event, domain_name = NULL) {
r2help <- build_event_help(domain_name, event)
body <- paste0(paste(domain_name, event$name, sep = "."), " <- ", build_event_signature(event), " {\n",
sprintf(" method <- '%s.%s'\n", domain_name, event$name),
" args <- utils::head(rlang::fn_fmls_names(), -1)\n",
" args <- args[!sapply(mget(args), is.null)]\n",
" params <- mget(args)\n",
" params <- if (length(params) > 1) params[2:length(params)] else NULL\n",
" listen(promise, method, params, .callback)\n",
"}\n")
paste(r2help, body, sep = "\n")
}
generate_events_source_code <- function(domain) {
events <- domain$events
if (is.null(events)) return()
file_content <- paste0(c(
"# DO NOT EDIT BY HAND\n#' @include send.R\nNULL",
purrr::map_chr(events, generate_event, domain_name = domain$domain)
), collapse = "\n\n")
cat(file_content, file = paste0("R/events_", domain$domain, ".R"))
}
purrr::walk(js_protocol$domains, generate_events_source_code)
purrr::walk(browser_protocol$domains, generate_events_source_code)
# TODO detail the return object resulting of a command
# TODO check the remote protocol (in send)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redo_repetitions_referral_matrix.R
\name{redo_repetitions_referral_matrix}
\alias{redo_repetitions_referral_matrix}
\alias{redo_repetitions_referral_matrix.eventlog}
\alias{redo_repetitions_referral_matrix.activitylog}
\title{Referral matrix repetitons}
\usage{
redo_repetitions_referral_matrix(log, eventlog = deprecated())
\method{redo_repetitions_referral_matrix}{eventlog}(log, eventlog = deprecated())
\method{redo_repetitions_referral_matrix}{activitylog}(log, eventlog = deprecated())
}
\arguments{
\item{log}{\code{\link[bupaR]{log}}: Object of class \code{\link[bupaR]{log}} or derivatives (\code{\link[bupaR]{grouped_log}}, \code{\link[bupaR]{eventlog}}, \code{\link[bupaR]{activitylog}}, etc.).}
\item{eventlog}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}; please use \code{log} instead.}
}
\description{
Provides a list of initatiors and completers of redo repetitons
}
\section{Methods (by class)}{
\itemize{
\item \code{redo_repetitions_referral_matrix(eventlog)}: Compute matrix for eventlog
\item \code{redo_repetitions_referral_matrix(activitylog)}: Compute matrix for activitylog
}}
\references{
Swennen, M. (2018). Using Event Log Knowledge to Support Operational Exellence Techniques (Doctoral dissertation). Hasselt University.
}
\seealso{
\code{\link{number_of_repetitions}}
}
\concept{metrics_repetition}
| /man/redo_repetitions_referral_matrix.Rd | no_license | cran/edeaR | R | false | true | 1,573 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redo_repetitions_referral_matrix.R
\name{redo_repetitions_referral_matrix}
\alias{redo_repetitions_referral_matrix}
\alias{redo_repetitions_referral_matrix.eventlog}
\alias{redo_repetitions_referral_matrix.activitylog}
\title{Referral matrix repetitons}
\usage{
redo_repetitions_referral_matrix(log, eventlog = deprecated())
\method{redo_repetitions_referral_matrix}{eventlog}(log, eventlog = deprecated())
\method{redo_repetitions_referral_matrix}{activitylog}(log, eventlog = deprecated())
}
\arguments{
\item{log}{\code{\link[bupaR]{log}}: Object of class \code{\link[bupaR]{log}} or derivatives (\code{\link[bupaR]{grouped_log}}, \code{\link[bupaR]{eventlog}}, \code{\link[bupaR]{activitylog}}, etc.).}
\item{eventlog}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}; please use \code{log} instead.}
}
\description{
Provides a list of initatiors and completers of redo repetitons
}
\section{Methods (by class)}{
\itemize{
\item \code{redo_repetitions_referral_matrix(eventlog)}: Compute matrix for eventlog
\item \code{redo_repetitions_referral_matrix(activitylog)}: Compute matrix for activitylog
}}
\references{
Swennen, M. (2018). Using Event Log Knowledge to Support Operational Exellence Techniques (Doctoral dissertation). Hasselt University.
}
\seealso{
\code{\link{number_of_repetitions}}
}
\concept{metrics_repetition}
|
#' Get episodes of regime transformation (ERT)
#'
#' Helps to identify episodes of democratization (liberalization, democratic deepening) and autocratization (demcratic regression, autocratic regression) in the most recent vdem data set.
#'
#' \emph{Democratization} is an umbrella term for any movement towards demcracy - be it in autocracies or democracies.
#' \emph{liberalization} is defined as a subtype of democratiztion and specifically focuses on any movement towards democracy
#' which starts in autocracies. \emph{Democratic deepening} is also a subtype of democratization and
#' concerns all those which are already democratic and further improve their democratic traits (cf. Wilson et al., 2020).
#'
#' \emph{Autocratization} is defined as any movement towards autocracy which starts within democracies or autocracies (cf. Lührmann and Lindberg, Democratization, 2019).
#' \emph{Democratic regression} is defined as a subtype of autocratization and specifically focuses on any movement towards autocracy
#' which starts in democracies. \emph{Autocratic regression} is also a subtype of autocratization and
#' concerns all those which are already autocratic and further decline (cf. Boese et al., forthcoming in Democratization, 2020).
#'
#' @param data The data based on which the episodes are identified.
#' By default the most recent vdem data set.
#'
#' @param start_incl A threshold for detecting the onset of "potential" episodes.
#' By default a change in the EDI (Vdem's Electoral Democracy Index) of at least +/-0.01 from year(t-1) to year(t).
#'
#' @param cum_incl A threshold to identify a "manifest" episodes as a cumulative change of the EDI (Vdem's Electoral Democracy Index)
#' between the start and end of a sequence. By default a cumulative change of +/-0.1 on the EDI.
#'
#' @param year_turn A threshold to identify a sudden "turn" during a year of an ongoing episode (=failed democratization/autocratization).
#' By default a yearly change of +/-0.03 on the EDI (Vdem's Electoral Democracy Index). Note: Advanced users who wish to remove this criteria altogether
#' should set the value of year turn equal to cum turn. Setting this to zero would allow for an episode to terminate when any year of no change is encountered.
#'
#' @param cum_turn A threshold to identify a gradual "turn" during an ongoing episode (=failed democratization/autocratization).
#' By default a cumulative change of -0.1 on the EDI (Vdem's Electoral Democcracy Index) between the start and end of a sequence.
#'
#' @param tolerance A threshold to specify the number of "stasis" observations (\emph{i.e.}, observations neither increasing
#' or decreasing significantly) permitted before stopping a sequence. By default 5 years.
#'
#' @return A data frame specifying episodes of regime transformation in the most recent Vdem data set.
#'
#' Democratization episodes: democratic deepening for those episodes starting in democracy ("dem_ep_dem") and
#' liberalization for those episodes starting in autocracy ("dem_ep_aut"), further distinguishing successful episodes of democratic transitions ("success"), and three types of failure,
#' (1) preempted ("fail_preem"), (2) reverted ("fail_rev"), and (3) stabilized autocracy ("fail_stab").
#'
#' Autocratization episodes: democratic regression for those episodes starting in democracy ("aut_ep_dem") and
#' autocratic regression for those episodes starting in autocracy ("aut_ep_aut"), further distinguishing subtypes of democratic regression into (1) breakdown ("breakdown"), and (2) averted democratic regression ("averted").
#'
#'
#' @import dplyr
#' @import Rcpp
#' @importFrom hablar s
#' @import tidyr
#' @importFrom plm make.pconsecutive
#' @export
#'
#' @examples
#' #Don't run
#' #Get the episodes with standard parameters:
#' #episodes <- get_eps()
#'
### set the parameters ###
get_eps <- function(data = vdemdata::vdem,
start_incl = 0.01,
cum_incl = 0.1,
year_turn = 0.03, # NOTE: year_turn is implemented in the c++ script but still needs to be setted here, otherwise it cannot be changed by user of package´
cum_turn = 0.1,
tolerance = 5)
{
if(year_turn == 0)
print("You set year_turn = 0. Did you mean to do this? Doing so means an episode ends when it experiences a year of no annual change on the EDI. Perhaps, instead, you meant to set its value equal to cum_turn. See p.3 of the ERT codebook.")
### DATA CLEANING AND PREP ###
# selecting the variables we need to construct the episodes dataframe #
full.df <- data %>%
dplyr::select(country_name, country_id, country_text_id, year,
v2x_polyarchy, codingstart, codingend, matches("v2x_polyarchy", ignore.case = FALSE),
gapstart1, gapstart2, gapstart3, gapend1, gapend2, gapend3,
v2x_regime, matches("v2eltype", ignore.case = FALSE), v2elasmoff_ord) %>%
dplyr::filter(year >= 1900) %>%
dplyr::arrange(country_text_id, year) %>%
dplyr::group_by(country_id) %>%
# make codingstart 1900 or first year thereafter
dplyr::mutate(codingstart2 = min(hablar::s(ifelse(!is.na(v2x_regime), year, NA))),
# tag original sample for later use
origsample = 1) %>%
# we need to balance the dataset to deal with gaps in coding
# this balances the dataset
plm::make.pconsecutive(balanced = TRUE, index = c("country_id", "year")) %>%
dplyr::group_by(country_id) %>%
# this fills missing variables we need that are constant within countries
tidyr::fill(c(country_text_id, country_name, codingend, gapstart1, gapend1, gapstart2, gapend2,
gapstart3, gapend3)) %>%
tidyr::fill(c(country_text_id, country_name,codingend, gapstart1, gapend1, gapstart2, gapend2,
gapstart3, gapend3), .direction = "up") %>%
# here we need to recode the gaps as only during the period prior to and during the gap (for our censoring variables)
dplyr::mutate(gapstart = ifelse(year <= gapend1, gapstart1, NA),
gapend = ifelse(year <= gapend1, gapend1, NA),
gapstart = ifelse(!is.na(gapend2) & year > gapend1 & year <= gapend2, gapstart2, gapstart),
gapend = ifelse(!is.na(gapend2) & year > gapend1 & year <= gapend2, gapend2, gapend),
gapstart = ifelse(!is.na(gapend3) & year > gapend2 & year <= gapend3, gapstart3, gapstart),
gapend = ifelse(!is.na(gapend3) & year > gapend2 & year <= gapend3, gapend3, gapend)) %>%
#### CODING THE REGIME TYPE VARIABLES ###
dplyr::arrange(country_id, year) %>%
# here we code whether a regime change event on RoW occurred in the given country year, 1 = to democracy, -1 = to autocracy
dplyr::mutate(row_regch_event = ifelse(v2x_regime > 1 & dplyr::lag(v2x_regime < 2, n = 1), 1, 0),
row_regch_event = ifelse(v2x_regime < 2 & dplyr::lag(v2x_regime > 1, n = 1), -1, row_regch_event),
# here we code the year of the most recent RoW regime change event
row_regch_year = ifelse(row_regch_event == -1 | row_regch_event == 1, year, NA),
# here we code the filled regime change variable, telling us what was the type of the most recent RoW regime change
row_regch_filled = ifelse(!is.na(row_regch_year), row_regch_event, NA)) %>%
# intially we fill everything
tidyr::fill(c(row_regch_filled, row_regch_year)) %>%
# here we replace with NA for gaps
dplyr::mutate(row_regch_filled = ifelse(!is.na(row_regch_year) & ((!is.na(gapend1) & row_regch_year<gapstart1 & year>=gapstart1) |
(!is.na(gapend2) & row_regch_year<gapstart2 & year>=gapstart2) |
(!is.na(gapend3) & row_regch_year<gapstart3 & year>=gapstart3)),
NA, row_regch_filled),
row_regch_year = ifelse(is.na(row_regch_filled), NA, row_regch_year)) %>%
ungroup() %>%
group_by(country_id, row_regch_year) %>%
# here we check whether the RoW regime change is censored
# censored near end of coding
dplyr::mutate(row_regch_censored = ifelse(codingend - row_regch_year < tolerance, 1, 0),
# censored near gap
row_regch_censored = ifelse(!is.na(gapstart) & gapstart - row_regch_year < tolerance, 1, row_regch_censored),
# here we check to see if a regime change to democracy produced a founding election
dem_founding_elec = min(hablar::s(ifelse(v2x_regime > 1 & year >= row_regch_year & v2elasmoff_ord > 1 &
# must hold leg, exec, or CA election
(v2eltype_0 == 1 | v2eltype_4 == 1 | v2eltype_6 == 1),
year, NA))),
row_demtrans_dum = ifelse(row_regch_event == 1 & !is.na(dem_founding_elec), 1, NA),
row_demtrans_dum = ifelse(row_regch_event == 1 & is.na(dem_founding_elec), 0, row_demtrans_dum),
row_regch_censored = ifelse(row_demtrans_dum == 1, 0, row_regch_censored),
row_demtrans_dum = ifelse(row_regch_censored == 1 & row_demtrans_dum == 0, NA, row_demtrans_dum),
# here we check to see if a regime change to autocracy produced a democratic breakdown
# we start by looking for autocratic founding elections
aut_founding_elec = min(hablar::s(ifelse(v2x_regime==1 & year>=row_regch_year &
# must hold leg, exec, or CA election
(v2eltype_0 == 1 | v2eltype_4 ==1 | v2eltype_6 ==1),
year, NA))),
# we also check if it remained autocratic for the tolerance period
aut_stabilized = min(hablar::s(ifelse(v2x_regime==1 & year==row_regch_year &
dplyr::lead(v2x_regime==1, n=tolerance), 1, NA))),
# finally if it became closed
aut_closed = ifelse(row_regch_event==-1,1-min(hablar::s(v2x_regime)),NA),
# check to see if any of the above conditons hold
row_breakdown_dum = ifelse(row_regch_event==-1 & (!is.na(aut_founding_elec) |
(!is.na(aut_stabilized) & aut_stabilized==1) |
(!is.na(aut_closed) & aut_closed==1)), 1, NA),
row_breakdown_dum = ifelse(row_regch_event == -1 & is.na(row_breakdown_dum), 0, row_breakdown_dum),
row_regch_censored = ifelse(!is.na(row_breakdown_dum) & row_breakdown_dum==1, 0, row_regch_censored),
row_breakdown_dum = ifelse(!is.na(row_regch_censored) & row_regch_censored==1, NA, row_breakdown_dum)) %>%
# here we code the regimes based on our criteria for democracy and autocracy
ungroup() %>%
group_by(country_id) %>%
arrange(country_id, year) %>%
# year the country transitioned to democracy on RoW provided it held a founding election
dplyr::mutate(reg_start_year=ifelse(!is.na(dem_founding_elec) & row_regch_event==1, year, NA),
# year the country transitioned to autocracy on RoW provided closed, or electoral autocracy persisted or held election
reg_start_year=ifelse(!is.na(row_breakdown_dum) & row_breakdown_dum==1, year, reg_start_year),
# here we coding founding as first year observed
reg_start_year = ifelse(year==codingstart2, year, reg_start_year),
# here we code founding as first year observed after a gap
reg_start_year = ifelse(!is.na(gapend1) & year==gapend1+1, year, reg_start_year),
reg_start_year = ifelse(!is.na(gapend2) & year==gapend2+1, year, reg_start_year),
reg_start_year = ifelse(!is.na(gapend3) & year==gapend3+1, year, reg_start_year)) %>%
tidyr::fill(reg_start_year) %>%
dplyr::mutate(reg_start_year = ifelse(!is.na(reg_start_year) & ((!is.na(gapend1) & reg_start_year<gapstart1 & year>=gapstart1) | # here we replace with NA for gaps
(!is.na(gapend2) & reg_start_year<gapstart2 & year>=gapstart2) |
(!is.na(gapend3) & reg_start_year<gapstart3 & year>=gapstart3)),
NA, reg_start_year)) %>%
ungroup() %>%
group_by(country_id, reg_start_year) %>%
# regime type is democracy (1) if v2x_regime is democratic in starting year
dplyr::mutate(reg_type = ifelse(year == reg_start_year & v2x_regime > 1, 1, NA),
# regime type is autocratic (0) if v2x_regime is autocratic in starting year
reg_type = ifelse(year == reg_start_year & v2x_regime < 2, 0, reg_type),
# fill for entire regime period
reg_type = min(hablar::s(reg_type))) %>%
ungroup() %>%
group_by(country_id) %>%
arrange(country_id, year) %>%
# here we look for years where democratic becomes autocratic or vice versa
dplyr::mutate(reg_trans = ifelse(!is.na(reg_type), reg_type - dplyr::lag(reg_type, n=1), NA),
# then we need to recode the starting years based on actual regime changes
reg_start_year = ifelse(!is.na(reg_trans) & reg_trans!=0, year, NA),
# here we coding founding as first year observed
reg_start_year = ifelse(year==codingstart2, year, reg_start_year),
# here we code founding as first year observed after a gap
reg_start_year = ifelse(!is.na(gapend1) & year==gapend1+1, year, reg_start_year),
reg_start_year = ifelse(!is.na(gapend2) & year==gapend2+1, year, reg_start_year),
reg_start_year = ifelse(!is.na(gapend3) & year==gapend3+1, year, reg_start_year)) %>%
tidyr::fill(reg_start_year) %>%
# here we replace with NA for gaps
dplyr::mutate(reg_start_year = ifelse(!is.na(reg_start_year) & ((!is.na(gapend1) & reg_start_year<gapstart1 & year>=gapstart1) |
(!is.na(gapend2) & reg_start_year<gapstart2 & year>=gapstart2) |
(!is.na(gapend3) & reg_start_year<gapstart3 & year>=gapstart3)),
NA, reg_start_year)) %>%
ungroup() %>%
group_by(country_id, reg_start_year) %>%
# here we code the end of the regime
dplyr::mutate(reg_end_year = dplyr::last(year),
# here we code the id for the regime
reg_id = ifelse(!is.na(reg_start_year), paste(country_text_id, reg_start_year, reg_end_year, sep = "_"), NA),
# here we recode the demtrans and breakdown dummies based on actual regime changes
row_demtrans_dum = ifelse(reg_trans==0 | is.na(reg_trans), 0, row_demtrans_dum),
row_breakdown_dum = ifelse(reg_trans==0 | is.na(reg_trans), 0, row_breakdown_dum),
# here we create a founding election variable for democratic regimes
founding_elec = min(hablar::s(dem_founding_elec))) %>%
ungroup() %>%
# make sure the data are sorted and grouped properly before sending to C++!!!!
arrange(country_text_id, year) %>%
group_by(country_text_id) %>%
#### CODING THE DEMOCRATIZATION EPISODES ####
### detect and save potential episodes with the help of the c++ function find_seqs
dplyr::mutate(episode_id = find_seqs_dem(v2x_polyarchy, v2x_regime, reg_trans,
start_incl, year_turn = year_turn * -1, cum_turn = cum_turn * -1,
tolerance),
# set a temporary id for these potential episodes and group accordinly
character_id = ifelse(!is.na(episode_id), paste(country_text_id, episode_id, sep = "_"), NA)) %>%
dplyr::ungroup() %>%
dplyr::group_by(character_id) %>%
# general check: is there a potential democratization episode?
dplyr::mutate(dem_ep = ifelse(!is.na(episode_id), 1, 0),
# we check whether the cumulated change in each potential episode was substantial (> cum_inc), i.e. the episode is manifest
dem_ep = ifelse(dem_ep==1 & max(v2x_polyarchy, na.rm = T) - min(v2x_polyarchy, na.rm = T) >= cum_incl, 1, 0)) %>%
dplyr::ungroup() %>%
# then we clean out variables for non-manifest episodes
dplyr::mutate(episode_id = ifelse(dem_ep!=1, NA, episode_id),
character_id = ifelse(dem_ep!=1, NA, character_id)) %>%
dplyr::group_by(character_id) %>%
# generate the initial end year for the episode (note: we have to filter out the stasis years that C++ gives us, but we will do this later):
dplyr::mutate(dem_ep_end_year = ifelse(dem_ep==1, last(year), NA),
# find potentially censored episodes (note: we might change this later depending on termination)
dem_ep_censored = ifelse(dem_ep==1 & codingend-dem_ep_end_year<tolerance, 1, 0),
dem_ep_censored = ifelse(dem_ep==1 & !is.na(gapstart) & (gapstart-1)-dem_ep_end_year<tolerance, 1, dem_ep_censored),
# generate the start year for the potential episode as the first year after the pre-episode year
dem_ep_start_year = ifelse(dem_ep==1,first(year)+1, NA),
# here we code a dummy for the pre-episode year
dem_pre_ep_year = ifelse(dem_ep==1, ifelse(year == dplyr::first(year), 1, 0), 0),
# we create a unique identifier for episodes using the country_text_id, start, and end years
dem_ep_id = ifelse(dem_ep==1, paste(country_text_id, dem_ep_start_year, dem_ep_end_year, sep = "_"), NA)) %>%
dplyr::ungroup() %>%
# remove the old identifiers we no longer need
dplyr::select(-character_id, -episode_id) %>%
# make sure the data is sorted properly
dplyr::arrange(country_name, year) %>%
# just to make sure we have a dataframe
as.data.frame %>%
####### code termination type of democratization episode
# democratization episodes end when one of five things happens:
# 0. the case is censored
# 1. stasis: the case experiences no annual increase = start_incl for the tolerance period (or more)
# 2. year drop: the case experiences an annual drop <= year_turn
# 3. cumulative dro: the case experiences a gradual drop <= cum_turn over the tolerance period (or less)
# 4. breakdown: the case experienced a democratic breakdown (only for subtype 1: democratic deepening) or
# reverted to closed authoritarianism (only for subtype 2: liberalizing autocracy)
# first find the last positive change on EDI equal to the start_incl parameter
# this will become the new end of episodes at some point, once we clean things up
dplyr::group_by(dem_ep_id) %>%
dplyr::mutate(last_ch_year = max(hablar::s(ifelse(v2x_polyarchy-dplyr::lag(v2x_polyarchy, n=1)>=start_incl, year, NA))),
# here we just replace with NA non-episode years
last_ch_year = ifelse(dem_ep==0, NA, last_ch_year)) %>%
# here we check to see if the country reverted to a closed autocracy within the episode period (termination type #4)
# first lets make sure to group by the country (not the episode!) and arrange by country-year
dplyr::group_by(country_id) %>%
dplyr::arrange(country_id, year) %>%
# then we find years where a country moved from higher values on RoW to closed (0)
dplyr::mutate(back_closed = ifelse(dplyr::lead(v2x_regime, n=1) == 0 & v2x_regime > 0, year, NA)) %>%
# now we need to group by episode to fill the values within episodes
dplyr::ungroup() %>%
dplyr::group_by(dem_ep_id) %>%
# here we then find the first time in the episode that a change from another regime to closed autocracy occurs
# limits back_closed to episode years, making sure to exclude pre-episode year
dplyr::mutate(back_closed = ifelse(dem_ep==1 & year>=dem_ep_start_year, back_closed,NA),
# finds the first year within the episode where back_closed happens
back_closed = min(hablar::s(back_closed)),
# we recode the potential end date for the episode as the year before becoming closed
dem_ep_end_year = ifelse(!is.na(back_closed) & dem_ep_end_year>back_closed, back_closed, dem_ep_end_year)) %>%
# then we need to update our dem_ep_id with the new end date (we can clean up the other variables later)
dplyr::ungroup() %>%
dplyr::mutate(dem_ep_start_year = ifelse(dem_ep==1 & year>dem_ep_end_year, NA, dem_ep_start_year),
dem_ep_end_year = ifelse(dem_ep==1 & year>dem_ep_end_year, NA, dem_ep_end_year),
dem_ep = ifelse(dem_ep==1 & year>dem_ep_end_year, 0, dem_ep),
dem_ep_id = ifelse(dem_ep==1, paste(country_text_id, dem_ep_start_year, dem_ep_end_year, sep = "_"), NA)) %>%
# then we can update our last_ch_year variable to reflect the new range of years for the episodes that terminated due to back_closed
dplyr::group_by(dem_ep_id) %>%
dplyr::mutate(last_ch_year = max(hablar::s(ifelse(v2x_polyarchy-dplyr::lag(v2x_polyarchy, n=1)>=start_incl, year, NA))),
# here we just replace with NA non-episode years
last_ch_year = ifelse(dem_ep==0, NA, last_ch_year)) %>%
# now lets make sure to group by the country (not the episode!) and arrange by country-year
dplyr::group_by(country_id) %>%
dplyr::arrange(country_id, year)
# then check to see what happened after the episode had its last substantive change equal to start_incl
# we start with the yearly drop, aka year_turn
year_drop <- list()
# here we loop over the number of years (n) equal to the tolerance period after the last_change_year
for (i in 1:tolerance) {
# we calculate the first difference in the EDI for each of these yearly changes within the tolernce
year_drop[[i]] <- ifelse(full.df$year == full.df$last_ch_year & dplyr::lead(full.df$country_id, n=i)==full.df$country_id, dplyr::lead(full.df$v2x_polyarchy, n=i)-dplyr::lead(full.df$v2x_polyarchy, n=i-1), NA)
}
# then we generate a dataframe from these calculations
df1 <- do.call(cbind, lapply(year_drop, data.frame, stringsAsFactors=FALSE))
# this just renames the columns to match the years ahead we are looking
names <- paste0('year', seq(1:tolerance))
colnames(df1) <- names
# this transforms the result into a dataframe that we can use as a column in our existing dataframe
# first write a small function to deal with Inf
my.min <- function(x) ifelse(!all(is.na(x)), min(x, na.rm=T), NA)
year_drop <- df1 %>%
dplyr::mutate(year_drop = ifelse(apply(df1, 1, FUN = my.min) < year_turn*-1, 1,NA)) %>%
dplyr::select(year_drop)
# now we can also use the first-differences we calculated above to look for stasis as well
# note - we will have to clean this up later to account for cum_turn as well
stasis <- df1 %>%
# this checks whether the maximum annual change is less than start_incl over the tolerance period &
# that it is also greater than the year_turn parameter, i.e. stasis
dplyr::mutate(stasis = ifelse(apply(df1, 1, FUN = max) < start_incl & apply(df1, 1, FUN = min) >= year_turn*-1, 1,NA)) %>%
dplyr::select(stasis)
# now we look for a gradual drop equal to cum_drop over the tolerance period
cum_drop <- list()
# here we loop over time equal to the tolerance, looking for the difference between the last_change_year and that year on the EDI
for (i in 1:tolerance) {
cum_drop[[i]] <- ifelse(full.df$year == full.df$last_ch_year & dplyr::lead(full.df$country_id, n=i)==full.df$country_id, dplyr::lead(full.df$v2x_polyarchy, n=i)-full.df$v2x_polyarchy, NA)
}
# then we rename the columns and generate a dataframe we can use for our existing data
df <- do.call(cbind, lapply(cum_drop, data.frame, stringsAsFactors=FALSE))
names <- paste0('cum', seq(1:tolerance))
colnames(df) <- names
cum_drop <- df %>%
dplyr::mutate(cum_drop = ifelse(apply(df, 1, FUN = my.min) <= cum_turn*-1, 1,NA)) %>%
dplyr::select(cum_drop)
# merge these new columns to our full.df
full.df <- full.df %>%
tibble::rownames_to_column('newid') %>%
left_join(tibble::rownames_to_column(year_drop, 'newid'), by = 'newid') %>%
left_join(tibble::rownames_to_column(cum_drop, 'newid'), by = 'newid') %>%
left_join(tibble::rownames_to_column(stasis, 'newid'), by = 'newid') %>%
dplyr::select(-newid) %>%
# now we can finally code our termination variable
# first we group by episode
dplyr::group_by(dem_ep_id) %>%
dplyr::arrange(dem_ep_id, year) %>%
# first, lets fill everything in for the episode
dplyr::mutate(stasis = ifelse(dem_ep==1, max(hablar::s(stasis)), NA),
year_drop = ifelse(dem_ep==1, max(hablar::s(year_drop)), NA),
cum_drop = ifelse(dem_ep==1, max(hablar::s(cum_drop)), NA),
# then we can code the termination variable
dem_ep_termination = ifelse(dem_ep==1 & !is.na(stasis) & is.na(year_drop) & is.na(cum_drop)
& is.na(back_closed), 1, NA),
dem_ep_termination = ifelse(dem_ep==1 & !is.na(year_drop) & is.na(back_closed), 2, dem_ep_termination),
dem_ep_termination = ifelse(dem_ep==1 & !is.na(cum_drop) & is.na(year_drop) & is.na(back_closed), 3, dem_ep_termination),
dem_ep_termination = ifelse(dem_ep==1 & !is.na(back_closed), 4, dem_ep_termination),
dem_ep_termination = ifelse(dem_ep==1 & dem_ep_censored==1 & is.na(dem_ep_termination), 0, dem_ep_termination),
# now we can clean up the other variables to reflect the true end of the episodes that are not censored
# first, let's fix the censored variable
dem_ep_censored = ifelse(dem_ep_termination !=0 & dem_ep==1, 0, dem_ep_censored),
# then we recode the end year as the final positive change if not censored
dem_ep_end_year = ifelse(dem_ep_censored==0 & dem_ep==1, last_ch_year, dem_ep_end_year),
# then we clean up the other variables for non-episode years
dem_ep_termination = ifelse(dem_ep==1 & year>dem_ep_end_year, NA, dem_ep_termination),
dem_ep_start_year = ifelse(dem_ep==1 & year>dem_ep_end_year, NA, dem_ep_start_year),
dem_ep_end_year = ifelse(dem_ep==1 & year>dem_ep_end_year, NA, dem_ep_end_year),
dem_ep = ifelse(is.na(dem_ep_end_year), 0, dem_ep)) %>%
dplyr::ungroup() %>%
dplyr::mutate(dem_ep_id = ifelse(dem_ep==1, paste(country_text_id, dem_ep_start_year, dem_ep_end_year, sep = "_"), NA)) %>%
dplyr::group_by(country_id) %>%
dplyr::arrange(country_id, year) %>%
##### code the subtype and outcome of episode
# we code a variable that captures the subtype of democratization, were 1 = "democratic deepening" and 2 = "autocratic liberalization"
# note: the year of the democratic transition is included in the autocratic liberalization phase
dplyr::mutate(sub_dem_ep = ifelse(dem_ep==1 & reg_type==1 & reg_trans!=1, 1, 0),
sub_dem_ep = ifelse(dem_ep==1 & (reg_type==0 | (reg_type==1 & reg_trans==1)),
2, sub_dem_ep),
sub_dem_ep = ifelse(dem_pre_ep_year==1, dplyr::lead(sub_dem_ep, n=1), sub_dem_ep),
# we code the start and end dates for each subtype
# start year of episode
sub_dem_ep_start_year = ifelse(dem_ep==1 & (year==dem_ep_start_year |
# or year the subtype changed
(year>dem_ep_start_year & sub_dem_ep != dplyr::lag(sub_dem_ep, n=1))), year, NA),
# end year of episode
sub_dem_ep_end_year = ifelse(dem_ep==1 & (year==dem_ep_end_year |
# or year prior to change in subtype
(year<dem_ep_end_year & sub_dem_ep != dplyr::lead(sub_dem_ep, n=1))), year, NA)) %>%
dplyr::ungroup() %>%
dplyr::group_by(dem_ep_id) %>%
dplyr::arrange(dem_ep_id, year) %>%
tidyr::fill(sub_dem_ep_start_year) %>%
tidyr::fill(sub_dem_ep_end_year, sub_dem_ep_start_year, .direction="up") %>%
dplyr::mutate(sub_dem_ep_id = ifelse(dem_ep==1, paste(country_text_id, sub_dem_ep_start_year, sub_dem_ep_end_year, sep = "_"), NA)) %>%
# did a regime change on RoW during the episode produce a genuine democratic transition?
dplyr::mutate(dem_ep_outcome = ifelse(sub_dem_ep==2 & reg_trans==1 & dem_pre_ep_year==0, 1, NA),
# did a regime change on RoW during the episode fail to produce a democratic transition?
dem_ep_outcome = ifelse(sub_dem_ep==2 & any(row_regch_event==1 & dem_pre_ep_year==0) &
year==dem_ep_end_year & dem_ep_censored==0 &
is.na(dem_ep_outcome), 2, dem_ep_outcome),
# did the autocratic liberalization phase result in a stabilized electoral autocracy?
dem_ep_outcome = ifelse(sub_dem_ep==2 & year==dem_ep_end_year & dem_ep_termination==1
& is.na(dem_ep_outcome), 3, dem_ep_outcome),
# did the autocratic liberalization phase result in a failed liberalization?
dem_ep_outcome = ifelse(sub_dem_ep==2 & year==dem_ep_end_year &
(dem_ep_termination==2 | dem_ep_termination==3 | dem_ep_termination==4) &
is.na(dem_ep_outcome), 4, dem_ep_outcome),
# code the outcome for completed democratic deepening
dem_ep_outcome = ifelse(sub_dem_ep==1 & year==dem_ep_end_year &
dem_ep_censored==0 & is.na(dem_ep_outcome), 5, dem_ep_outcome),
# code censored episodes
dem_ep_outcome = ifelse(dem_ep==1 & dem_ep_censored==1 & is.na(dem_ep_outcome) & year==dem_ep_end_year, 6, dem_ep_outcome),
dem_ep_outcome = ifelse(dem_ep==0, 0, dem_ep_outcome)) %>%
dplyr::ungroup() %>%
dplyr::group_by(sub_dem_ep_id) %>%
dplyr::arrange(country_id, year) %>%
# fill for the entire subtype period
dplyr::mutate(dem_ep_outcome = min(hablar::s(dem_ep_outcome))) %>%
dplyr::ungroup() %>%
dplyr::group_by(dem_ep_id) %>%
dplyr::mutate(dem_ep_censored = ifelse(dem_ep==1 & max(dem_ep_outcome)!=6, 0, dem_ep_censored)) %>%
dplyr::ungroup() %>%
dplyr::group_by(country_text_id) %>%
dplyr::arrange(country_id, year) %>%
dplyr::select(-stasis)
#### CODING THE AUTOCRATIZATION EPISODES ####
### detect and save potential episodes with the help of the c++ function find_seqs
full.df <- full.df %>% dplyr::mutate(episode_id = find_seqs_aut(v2x_polyarchy, v2x_regime, reg_trans,
start_incl = start_incl * -1, year_turn, cum_turn, tolerance),
# set a temporary id for these potential episodes and group accordinly
character_id = ifelse(!is.na(episode_id), paste(country_text_id, episode_id, sep = "_"), NA)) %>%
dplyr::ungroup() %>%
dplyr::group_by(character_id) %>%
# general check: is there a potential autocratization episode?
dplyr::mutate(aut_ep = ifelse(!is.na(episode_id), 1, 0),
# we check whether the cumulated change in each potential episode was substantial (> cum_inc), i.e. the episode is manifest
aut_ep = ifelse(aut_ep == 1 & min(hablar::s(v2x_polyarchy)) - max(hablar::s(v2x_polyarchy)) <= cum_incl*-1, 1, 0)) %>%
ungroup() %>%
# then we clean out variables for non-manifest episodes
dplyr::mutate(episode_id = ifelse(aut_ep != 1, NA, episode_id),
character_id = ifelse(aut_ep != 1, NA, character_id)) %>%
group_by(character_id) %>%
# generate the initial end year for the episode (note: we have to filter out the stasis years that C++ gives us, but we will do this later):
dplyr::mutate(aut_ep_end_year = ifelse(aut_ep == 1, last(year), NA),
# find potentially censored episodes (note: we might change this later depending on termination)
aut_ep_censored = ifelse(aut_ep == 1 & codingend - aut_ep_end_year<tolerance, 1, 0),
aut_ep_censored = ifelse(aut_ep == 1 & !is.na(gapstart) & (gapstart-1)-aut_ep_end_year<tolerance, 1, aut_ep_censored),
# generate the start year for the potential episode as the first year after the pre-episode year
aut_ep_start_year = ifelse(aut_ep == 1, first(year) + 1, NA),
# here we code a dummy for the pre-episode year
aut_pre_ep_year = ifelse(aut_ep == 1, ifelse(year == dplyr::first(year), 1, 0), 0),
# we create a unique identifier for episodes and phases using the country_text_id, start, and end years
aut_ep_id = ifelse(aut_ep == 1, paste(country_text_id, aut_ep_start_year, aut_ep_end_year, sep = "_"), NA)) %>%
ungroup() %>%
# remove the old identifiers we no longer need
dplyr::select(-character_id, -episode_id) %>%
# make sure the data is sorted properly
dplyr::arrange(country_name, year) %>%
# just to make sure we have a dataframe
as.data.frame %>%
####### code termination type of autocratization episode
# autocrtization episodes end when one of three things happens:
# 1. the case experiences an annual increase >= year_turn
# 2. the case experiences a gradual increase >= cum_turn over the tolerance period (or less)
# 3. the case experiences no annual decrease = start_incl for the tolerance period (or more)
# first find the last negative change on EDI equal to the start_incl parameter
group_by(aut_ep_id) %>%
dplyr::mutate(last_ch_year = max(hablar::s(ifelse(v2x_polyarchy-dplyr::lag(v2x_polyarchy, n=1)<=start_incl*-1, year, NA))),
# here we just replace with NA non-episode years
last_ch_year = ifelse(aut_ep==0, NA, last_ch_year)) %>%
# now lets make sure to group by the country (not the episode!) and arrange by country-year
group_by(country_id) %>%
arrange(country_id, year)
#### then check to see what happened the after the episode had its last substantive change equal to start_incl
# we start with the yearly increase, aka year_turn
year_incr <- list()
# here we loop over the number of years (n) equal to the tolerance period after the last_change_year
for (i in 1:tolerance) {
# we calculate the first difference in the EDI for each of these yearly changes within the tolernce
year_incr[[i]] <- ifelse(full.df$year == full.df$last_ch_year & dplyr::lead(full.df$country_id, n=i)==full.df$country_id, dplyr::lead(full.df$v2x_polyarchy, n=i)-dplyr::lead(full.df$v2x_polyarchy, n=i-1), NA)
}
# then we generate a dataframe from these calculations
df1 <- do.call(cbind, lapply(year_incr, data.frame, stringsAsFactors=FALSE))
# this just renames the columns to match the years ahead we are looking
names <- paste0('year', seq(1:tolerance))
colnames(df1) <- names
# this transforms the result into a dataframe that we can use as a column in our existing dataframe
# first write a function to deal with INF warnings
my.max <- function(x) ifelse(!all(is.na(x)), max(x, na.rm=T), NA)
year_incr <- df1 %>%
dplyr::mutate(year_incr = ifelse(apply(df1, 1, FUN = my.max) > year_turn, 1,NA)) %>%
dplyr::select(year_incr)
# now we can also use the first-differences we calculated above to look for stasis as well
# this transforms the result into a dataframe that we can use as a column in our existing dataframe
# note - we will have to clean this up later to account for cum_turn as well
stasis <- df1 %>%
# this checks whether the maximum annual change is less than start_incl over the tolerance period &
# that it is also greater than the year_turn parameter, i.e. stasis
dplyr::mutate(stasis = ifelse(apply(df1, 1, FUN = min) > start_incl*-1 & apply(df1, 1, FUN = max) <= year_turn, 1,NA)) %>%
dplyr::select(stasis)
# now we look for a gradual drop equal to cum_drop over the tolerance period
cum_incr <- list()
# here we loop over time equal to the tolerance, looking for the difference between the last_change_year and that year on the EDI
for (i in 1:tolerance) {
cum_incr[[i]] <- ifelse(full.df$year == full.df$last_ch_year & dplyr::lead(full.df$country_id, n=i)==full.df$country_id, dplyr::lead(full.df$v2x_polyarchy, n=i)-full.df$v2x_polyarchy, NA)
}
# then we rename the columns and generate a dataframe we can use for our existing data
df <- do.call(cbind, lapply(cum_incr, data.frame, stringsAsFactors=FALSE))
names <- paste0('cum', seq(1:tolerance))
colnames(df) <- names
cum_incr <- df %>%
dplyr::mutate(cum_incr = ifelse(apply(df, 1, FUN = my.max) >= cum_turn, 1,NA)) %>%
dplyr::select(cum_incr)
# merge these new columns to our full.df
full.df <- full.df %>%
tibble::rownames_to_column('newid') %>%
left_join(tibble::rownames_to_column(year_incr, 'newid'), by = 'newid') %>%
left_join(tibble::rownames_to_column(cum_incr, 'newid'), by = 'newid') %>%
left_join(tibble::rownames_to_column(stasis, 'newid'), by = 'newid') %>%
# now lets make sure to group by the autocratization episode and arrange by country-year
ungroup() %>%
group_by(aut_ep_id) %>%
# now we can finally code our termination variable
# first, lets fill everything in for the episode
dplyr::mutate(stasis = ifelse(aut_ep==1, max(hablar::s(stasis)), NA),
year_incr = ifelse(aut_ep==1, max(hablar::s(year_incr)), NA),
cum_incr = ifelse(aut_ep==1, max(hablar::s(cum_incr)), NA),
# then we can code the termination variable
aut_ep_termination = ifelse(aut_ep==1 & !is.na(stasis) & is.na(year_incr) & is.na(cum_incr),
1, NA),
aut_ep_termination = ifelse(aut_ep==1 & !is.na(year_incr), 2, aut_ep_termination),
aut_ep_termination = ifelse(aut_ep==1 & !is.na(cum_incr) & is.na(year_incr), 3, aut_ep_termination),
aut_ep_termination = ifelse(aut_ep==1 & aut_ep_censored==1 & is.na(aut_ep_termination), 0, aut_ep_termination),
# now we can clean up the other variables to reflect the true end of the episodes that are not censored
# first, let's fix the censored variable
aut_ep_censored = ifelse(aut_ep_termination !=0 & aut_ep==1, 0, aut_ep_censored),
# then we recode the end year as the final positive change if not censored
aut_ep_end_year = ifelse(aut_ep_censored==0 & aut_ep==1, last_ch_year, aut_ep_end_year),
# then we clean up the other variables for non-episode years
aut_ep_termination = ifelse(aut_ep==1 & year>aut_ep_end_year, NA, aut_ep_termination),
aut_ep_start_year = ifelse(aut_ep==1 & year>aut_ep_end_year, NA, aut_ep_start_year),
aut_ep_end_year = ifelse(aut_ep==1 & year>aut_ep_end_year, NA, aut_ep_end_year),
aut_ep = ifelse(is.na(aut_ep_end_year), 0, aut_ep)) %>%
ungroup() %>%
dplyr::mutate(aut_ep_id = ifelse(aut_ep==1, paste(country_text_id, aut_ep_start_year, aut_ep_end_year, sep = "_"), NA)) %>%
group_by(aut_ep_id) %>%
arrange(country_id, year) %>%
##### code the phase and outcome type of episode
# we code a variable that captures the type or "phase" of autocratization, were 1 = "democratic regression" and 2 = "autocratic regression"
# note: the year of the democratic breakdown is included in the democratic regression phase
dplyr::mutate(sub_aut_ep = ifelse(aut_ep==1 & (reg_type==1 | (reg_type==0 & reg_trans==-1)), 1, 0),
sub_aut_ep = ifelse(aut_ep==1 & reg_type==0 & reg_trans!=-1, 2, sub_aut_ep),
sub_aut_ep = ifelse(aut_pre_ep_year==1, dplyr::lead(sub_aut_ep, n=1), sub_aut_ep),
# we code the start and end dates for these phases
sub_aut_ep_start_year = ifelse(aut_ep==1 & (year==aut_ep_start_year |
# or year the subtype changed
(year>aut_ep_start_year & sub_aut_ep != dplyr::lag(sub_aut_ep, n=1))), year, NA),
# end year of episode
sub_aut_ep_end_year = ifelse(aut_ep==1 & (year==aut_ep_end_year |
# or year prior to change in subtype
(year<aut_ep_end_year & sub_aut_ep != dplyr::lead(sub_aut_ep, n=1))), year, NA)) %>%
tidyr::fill(sub_aut_ep_start_year) %>%
tidyr::fill(sub_aut_ep_end_year, sub_aut_ep_start_year, .direction="up") %>%
dplyr::mutate(sub_aut_ep_id = ifelse(aut_ep==1, paste(country_text_id, sub_aut_ep_start_year, sub_aut_ep_end_year, sep = "_"), NA)) %>%
ungroup() %>%
group_by(aut_ep_id) %>%
# did a regime change on RoW during the episode of democratic regression produce a genuine democratic breakdown?
dplyr::mutate(aut_ep_outcome = ifelse(sub_aut_ep==1 & reg_trans==-1 & aut_pre_ep_year==0, 1, NA),
# did the episode of democratic regression fail to produce a democratic breakdown?
aut_ep_outcome = ifelse(sub_aut_ep==1 & aut_ep_censored==0 & aut_pre_ep_year==0 &
is.na(aut_ep_outcome), 2, aut_ep_outcome),
# code the outcome for completed autocratic regression
aut_ep_outcome = ifelse(sub_aut_ep==2 & year==aut_ep_end_year &
aut_ep_censored==0 & is.na(aut_ep_outcome), 3, aut_ep_outcome),
# code censored episodes
aut_ep_outcome = ifelse(aut_ep==1 & aut_ep_censored==1 & is.na(aut_ep_outcome) & year==aut_ep_end_year, 4, aut_ep_outcome),
aut_ep_outcome = ifelse(aut_ep==0, 0, aut_ep_outcome)) %>%
dplyr::ungroup() %>%
dplyr::group_by(sub_aut_ep_id) %>%
dplyr::arrange(country_id, year) %>%
# fill for the entire phase of episode
dplyr::mutate(aut_ep_outcome = min(hablar::s(aut_ep_outcome))) %>%
dplyr::ungroup() %>%
dplyr::group_by(aut_ep_id) %>%
dplyr::mutate(aut_ep_censored = ifelse(aut_ep==1 & max(aut_ep_outcome)!=4, 0, aut_ep_censored)) %>%
dplyr::ungroup() %>%
dplyr::group_by(country_text_id) %>%
dplyr::arrange(country_id, year) %>%
# clean out values from pre-episode year
dplyr::mutate(dem_ep = ifelse(dem_pre_ep_year==1, 0, dem_ep),
dem_ep_termination = ifelse(dem_pre_ep_year==1, NA, dem_ep_termination),
sub_dem_ep = ifelse(dem_pre_ep_year==1, 0, sub_dem_ep),
dem_ep_outcome_all = dem_ep_outcome,
dem_ep_outcome = ifelse(dem_pre_ep_year==1, 0, dem_ep_outcome),
dem_ep_censored = ifelse(dem_pre_ep_year==1, 0, dem_ep_censored),
aut_ep = ifelse(aut_pre_ep_year==1, 0, aut_ep),
aut_ep_termination = ifelse(aut_pre_ep_year==1, NA, aut_ep_termination),
sub_aut_ep = ifelse(aut_pre_ep_year==1, 0, sub_aut_ep),
aut_ep_outcome_all = aut_ep_outcome,
aut_ep_outcome = ifelse(aut_pre_ep_year==1, 0, aut_ep_outcome),
aut_ep_censored = ifelse(aut_pre_ep_year==1, 0, aut_ep_censored)) %>%
# select the variables we need to keep
dplyr::filter(!is.na(origsample)) %>%
dplyr::select(country_id, country_text_id, country_name, year, v2x_regime, v2x_polyarchy, v2x_polyarchy_codelow, v2x_polyarchy_codehigh,
reg_start_year, reg_end_year, reg_id, reg_type, reg_trans, founding_elec, row_regch_event, row_regch_censored,
dem_ep, dem_ep_id, dem_ep_start_year, dem_ep_end_year, dem_pre_ep_year, dem_ep_termination,
sub_dem_ep, sub_dem_ep_id, sub_dem_ep_start_year, sub_dem_ep_end_year, dem_ep_outcome, dem_ep_censored,
aut_ep, aut_ep_id, aut_ep_start_year, aut_ep_end_year, aut_pre_ep_year, aut_ep_termination,
sub_aut_ep, sub_aut_ep_id, sub_aut_ep_start_year, sub_aut_ep_end_year, aut_ep_outcome, aut_ep_censored)
{
return(full.df)
}
}
### done ;-) ###
| /R/get_eps.R | no_license | abedgell/vdemdata | R | false | false | 45,809 | r | #' Get episodes of regime transformation (ERT)
#'
#' Helps to identify episodes of democratization (liberalization, democratic deepening) and autocratization (demcratic regression, autocratic regression) in the most recent vdem data set.
#'
#' \emph{Democratization} is an umbrella term for any movement towards demcracy - be it in autocracies or democracies.
#' \emph{liberalization} is defined as a subtype of democratiztion and specifically focuses on any movement towards democracy
#' which starts in autocracies. \emph{Democratic deepening} is also a subtype of democratization and
#' concerns all those which are already democratic and further improve their democratic traits (cf. Wilson et al., 2020).
#'
#' \emph{Autocratization} is defined as any movement towards autocracy which starts within democracies or autocracies (cf. Lührmann and Lindberg, Democratization, 2019).
#' \emph{Democratic regression} is defined as a subtype of autocratization and specifically focuses on any movement towards autocracy
#' which starts in democracies. \emph{Autocratic regression} is also a subtype of autocratization and
#' concerns all those which are already autocratic and further decline (cf. Boese et al., forthcoming in Democratization, 2020).
#'
#' @param data The data based on which the episodes are identified.
#' By default the most recent vdem data set.
#'
#' @param start_incl A threshold for detecting the onset of "potential" episodes.
#' By default a change in the EDI (Vdem's Electoral Democracy Index) of at least +/-0.01 from year(t-1) to year(t).
#'
#' @param cum_incl A threshold to identify a "manifest" episodes as a cumulative change of the EDI (Vdem's Electoral Democracy Index)
#' between the start and end of a sequence. By default a cumulative change of +/-0.1 on the EDI.
#'
#' @param year_turn A threshold to identify a sudden "turn" during a year of an ongoing episode (=failed democratization/autocratization).
#' By default a yearly change of +/-0.03 on the EDI (Vdem's Electoral Democracy Index). Note: Advanced users who wish to remove this criteria altogether
#' should set the value of year turn equal to cum turn. Setting this to zero would allow for an episode to terminate when any year of no change is encountered.
#'
#' @param cum_turn A threshold to identify a gradual "turn" during an ongoing episode (=failed democratization/autocratization).
#' By default a cumulative change of -0.1 on the EDI (Vdem's Electoral Democcracy Index) between the start and end of a sequence.
#'
#' @param tolerance A threshold to specify the number of "stasis" observations (\emph{i.e.}, observations neither increasing
#' or decreasing significantly) permitted before stopping a sequence. By default 5 years.
#'
#' @return A data frame specifying episodes of regime transformation in the most recent Vdem data set.
#'
#' Democratization episodes: democratic deepening for those episodes starting in democracy ("dem_ep_dem") and
#' liberalization for those episodes starting in autocracy ("dem_ep_aut"), further distinguishing successful episodes of democratic transitions ("success"), and three types of failure,
#' (1) preempted ("fail_preem"), (2) reverted ("fail_rev"), and (3) stabilized autocracy ("fail_stab").
#'
#' Autocratization episodes: democratic regression for those episodes starting in democracy ("aut_ep_dem") and
#' autocratic regression for those episodes starting in autocracy ("aut_ep_aut"), further distinguishing subtypes of democratic regression into (1) breakdown ("breakdown"), and (2) averted democratic regression ("averted").
#'
#'
#' @import dplyr
#' @import Rcpp
#' @importFrom hablar s
#' @import tidyr
#' @importFrom plm make.pconsecutive
#' @export
#'
#' @examples
#' #Don't run
#' #Get the episodes with standard parameters:
#' #episodes <- get_eps()
#'
### set the parameters ###
get_eps <- function(data = vdemdata::vdem,
start_incl = 0.01,
cum_incl = 0.1,
year_turn = 0.03, # NOTE: year_turn is implemented in the c++ script but still needs to be setted here, otherwise it cannot be changed by user of package´
cum_turn = 0.1,
tolerance = 5)
{
if(year_turn == 0)
print("You set year_turn = 0. Did you mean to do this? Doing so means an episode ends when it experiences a year of no annual change on the EDI. Perhaps, instead, you meant to set its value equal to cum_turn. See p.3 of the ERT codebook.")
### DATA CLEANING AND PREP ###
# selecting the variables we need to construct the episodes dataframe #
full.df <- data %>%
dplyr::select(country_name, country_id, country_text_id, year,
v2x_polyarchy, codingstart, codingend, matches("v2x_polyarchy", ignore.case = FALSE),
gapstart1, gapstart2, gapstart3, gapend1, gapend2, gapend3,
v2x_regime, matches("v2eltype", ignore.case = FALSE), v2elasmoff_ord) %>%
dplyr::filter(year >= 1900) %>%
dplyr::arrange(country_text_id, year) %>%
dplyr::group_by(country_id) %>%
# make codingstart 1900 or first year thereafter
dplyr::mutate(codingstart2 = min(hablar::s(ifelse(!is.na(v2x_regime), year, NA))),
# tag original sample for later use
origsample = 1) %>%
# we need to balance the dataset to deal with gaps in coding
# this balances the dataset
plm::make.pconsecutive(balanced = TRUE, index = c("country_id", "year")) %>%
dplyr::group_by(country_id) %>%
# this fills missing variables we need that are constant within countries
tidyr::fill(c(country_text_id, country_name, codingend, gapstart1, gapend1, gapstart2, gapend2,
gapstart3, gapend3)) %>%
tidyr::fill(c(country_text_id, country_name,codingend, gapstart1, gapend1, gapstart2, gapend2,
gapstart3, gapend3), .direction = "up") %>%
# here we need to recode the gaps as only during the period prior to and during the gap (for our censoring variables)
dplyr::mutate(gapstart = ifelse(year <= gapend1, gapstart1, NA),
gapend = ifelse(year <= gapend1, gapend1, NA),
gapstart = ifelse(!is.na(gapend2) & year > gapend1 & year <= gapend2, gapstart2, gapstart),
gapend = ifelse(!is.na(gapend2) & year > gapend1 & year <= gapend2, gapend2, gapend),
gapstart = ifelse(!is.na(gapend3) & year > gapend2 & year <= gapend3, gapstart3, gapstart),
gapend = ifelse(!is.na(gapend3) & year > gapend2 & year <= gapend3, gapend3, gapend)) %>%
#### CODING THE REGIME TYPE VARIABLES ###
dplyr::arrange(country_id, year) %>%
# here we code whether a regime change event on RoW occurred in the given country year, 1 = to democracy, -1 = to autocracy
dplyr::mutate(row_regch_event = ifelse(v2x_regime > 1 & dplyr::lag(v2x_regime < 2, n = 1), 1, 0),
row_regch_event = ifelse(v2x_regime < 2 & dplyr::lag(v2x_regime > 1, n = 1), -1, row_regch_event),
# here we code the year of the most recent RoW regime change event
row_regch_year = ifelse(row_regch_event == -1 | row_regch_event == 1, year, NA),
# here we code the filled regime change variable, telling us what was the type of the most recent RoW regime change
row_regch_filled = ifelse(!is.na(row_regch_year), row_regch_event, NA)) %>%
# intially we fill everything
tidyr::fill(c(row_regch_filled, row_regch_year)) %>%
# here we replace with NA for gaps
dplyr::mutate(row_regch_filled = ifelse(!is.na(row_regch_year) & ((!is.na(gapend1) & row_regch_year<gapstart1 & year>=gapstart1) |
(!is.na(gapend2) & row_regch_year<gapstart2 & year>=gapstart2) |
(!is.na(gapend3) & row_regch_year<gapstart3 & year>=gapstart3)),
NA, row_regch_filled),
row_regch_year = ifelse(is.na(row_regch_filled), NA, row_regch_year)) %>%
ungroup() %>%
group_by(country_id, row_regch_year) %>%
# here we check whether the RoW regime change is censored
# censored near end of coding
dplyr::mutate(row_regch_censored = ifelse(codingend - row_regch_year < tolerance, 1, 0),
# censored near gap
row_regch_censored = ifelse(!is.na(gapstart) & gapstart - row_regch_year < tolerance, 1, row_regch_censored),
# here we check to see if a regime change to democracy produced a founding election
dem_founding_elec = min(hablar::s(ifelse(v2x_regime > 1 & year >= row_regch_year & v2elasmoff_ord > 1 &
# must hold leg, exec, or CA election
(v2eltype_0 == 1 | v2eltype_4 == 1 | v2eltype_6 == 1),
year, NA))),
row_demtrans_dum = ifelse(row_regch_event == 1 & !is.na(dem_founding_elec), 1, NA),
row_demtrans_dum = ifelse(row_regch_event == 1 & is.na(dem_founding_elec), 0, row_demtrans_dum),
row_regch_censored = ifelse(row_demtrans_dum == 1, 0, row_regch_censored),
row_demtrans_dum = ifelse(row_regch_censored == 1 & row_demtrans_dum == 0, NA, row_demtrans_dum),
# here we check to see if a regime change to autocracy produced a democratic breakdown
# we start by looking for autocratic founding elections
aut_founding_elec = min(hablar::s(ifelse(v2x_regime==1 & year>=row_regch_year &
# must hold leg, exec, or CA election
(v2eltype_0 == 1 | v2eltype_4 ==1 | v2eltype_6 ==1),
year, NA))),
# we also check if it remained autocratic for the tolerance period
aut_stabilized = min(hablar::s(ifelse(v2x_regime==1 & year==row_regch_year &
dplyr::lead(v2x_regime==1, n=tolerance), 1, NA))),
# finally if it became closed
aut_closed = ifelse(row_regch_event==-1,1-min(hablar::s(v2x_regime)),NA),
# check to see if any of the above conditons hold
row_breakdown_dum = ifelse(row_regch_event==-1 & (!is.na(aut_founding_elec) |
(!is.na(aut_stabilized) & aut_stabilized==1) |
(!is.na(aut_closed) & aut_closed==1)), 1, NA),
row_breakdown_dum = ifelse(row_regch_event == -1 & is.na(row_breakdown_dum), 0, row_breakdown_dum),
row_regch_censored = ifelse(!is.na(row_breakdown_dum) & row_breakdown_dum==1, 0, row_regch_censored),
row_breakdown_dum = ifelse(!is.na(row_regch_censored) & row_regch_censored==1, NA, row_breakdown_dum)) %>%
# here we code the regimes based on our criteria for democracy and autocracy
ungroup() %>%
group_by(country_id) %>%
arrange(country_id, year) %>%
# year the country transitioned to democracy on RoW provided it held a founding election
dplyr::mutate(reg_start_year=ifelse(!is.na(dem_founding_elec) & row_regch_event==1, year, NA),
# year the country transitioned to autocracy on RoW provided closed, or electoral autocracy persisted or held election
reg_start_year=ifelse(!is.na(row_breakdown_dum) & row_breakdown_dum==1, year, reg_start_year),
# here we coding founding as first year observed
reg_start_year = ifelse(year==codingstart2, year, reg_start_year),
# here we code founding as first year observed after a gap
reg_start_year = ifelse(!is.na(gapend1) & year==gapend1+1, year, reg_start_year),
reg_start_year = ifelse(!is.na(gapend2) & year==gapend2+1, year, reg_start_year),
reg_start_year = ifelse(!is.na(gapend3) & year==gapend3+1, year, reg_start_year)) %>%
tidyr::fill(reg_start_year) %>%
dplyr::mutate(reg_start_year = ifelse(!is.na(reg_start_year) & ((!is.na(gapend1) & reg_start_year<gapstart1 & year>=gapstart1) | # here we replace with NA for gaps
(!is.na(gapend2) & reg_start_year<gapstart2 & year>=gapstart2) |
(!is.na(gapend3) & reg_start_year<gapstart3 & year>=gapstart3)),
NA, reg_start_year)) %>%
ungroup() %>%
group_by(country_id, reg_start_year) %>%
# regime type is democracy (1) if v2x_regime is democratic in starting year
dplyr::mutate(reg_type = ifelse(year == reg_start_year & v2x_regime > 1, 1, NA),
# regime type is autocratic (0) if v2x_regime is autocratic in starting year
reg_type = ifelse(year == reg_start_year & v2x_regime < 2, 0, reg_type),
# fill for entire regime period
reg_type = min(hablar::s(reg_type))) %>%
ungroup() %>%
group_by(country_id) %>%
arrange(country_id, year) %>%
# here we look for years where democratic becomes autocratic or vice versa
dplyr::mutate(reg_trans = ifelse(!is.na(reg_type), reg_type - dplyr::lag(reg_type, n=1), NA),
# then we need to recode the starting years based on actual regime changes
reg_start_year = ifelse(!is.na(reg_trans) & reg_trans!=0, year, NA),
# here we coding founding as first year observed
reg_start_year = ifelse(year==codingstart2, year, reg_start_year),
# here we code founding as first year observed after a gap
reg_start_year = ifelse(!is.na(gapend1) & year==gapend1+1, year, reg_start_year),
reg_start_year = ifelse(!is.na(gapend2) & year==gapend2+1, year, reg_start_year),
reg_start_year = ifelse(!is.na(gapend3) & year==gapend3+1, year, reg_start_year)) %>%
tidyr::fill(reg_start_year) %>%
# here we replace with NA for gaps
dplyr::mutate(reg_start_year = ifelse(!is.na(reg_start_year) & ((!is.na(gapend1) & reg_start_year<gapstart1 & year>=gapstart1) |
(!is.na(gapend2) & reg_start_year<gapstart2 & year>=gapstart2) |
(!is.na(gapend3) & reg_start_year<gapstart3 & year>=gapstart3)),
NA, reg_start_year)) %>%
ungroup() %>%
group_by(country_id, reg_start_year) %>%
# here we code the end of the regime
dplyr::mutate(reg_end_year = dplyr::last(year),
# here we code the id for the regime
reg_id = ifelse(!is.na(reg_start_year), paste(country_text_id, reg_start_year, reg_end_year, sep = "_"), NA),
# here we recode the demtrans and breakdown dummies based on actual regime changes
row_demtrans_dum = ifelse(reg_trans==0 | is.na(reg_trans), 0, row_demtrans_dum),
row_breakdown_dum = ifelse(reg_trans==0 | is.na(reg_trans), 0, row_breakdown_dum),
# here we create a founding election variable for democratic regimes
founding_elec = min(hablar::s(dem_founding_elec))) %>%
ungroup() %>%
# make sure the data are sorted and grouped properly before sending to C++!!!!
arrange(country_text_id, year) %>%
group_by(country_text_id) %>%
#### CODING THE DEMOCRATIZATION EPISODES ####
### detect and save potential episodes with the help of the c++ function find_seqs
dplyr::mutate(episode_id = find_seqs_dem(v2x_polyarchy, v2x_regime, reg_trans,
start_incl, year_turn = year_turn * -1, cum_turn = cum_turn * -1,
tolerance),
# set a temporary id for these potential episodes and group accordinly
character_id = ifelse(!is.na(episode_id), paste(country_text_id, episode_id, sep = "_"), NA)) %>%
dplyr::ungroup() %>%
dplyr::group_by(character_id) %>%
# general check: is there a potential democratization episode?
dplyr::mutate(dem_ep = ifelse(!is.na(episode_id), 1, 0),
# we check whether the cumulated change in each potential episode was substantial (> cum_inc), i.e. the episode is manifest
dem_ep = ifelse(dem_ep==1 & max(v2x_polyarchy, na.rm = T) - min(v2x_polyarchy, na.rm = T) >= cum_incl, 1, 0)) %>%
dplyr::ungroup() %>%
# then we clean out variables for non-manifest episodes
dplyr::mutate(episode_id = ifelse(dem_ep!=1, NA, episode_id),
character_id = ifelse(dem_ep!=1, NA, character_id)) %>%
dplyr::group_by(character_id) %>%
# generate the initial end year for the episode (note: we have to filter out the stasis years that C++ gives us, but we will do this later):
dplyr::mutate(dem_ep_end_year = ifelse(dem_ep==1, last(year), NA),
# find potentially censored episodes (note: we might change this later depending on termination)
dem_ep_censored = ifelse(dem_ep==1 & codingend-dem_ep_end_year<tolerance, 1, 0),
dem_ep_censored = ifelse(dem_ep==1 & !is.na(gapstart) & (gapstart-1)-dem_ep_end_year<tolerance, 1, dem_ep_censored),
# generate the start year for the potential episode as the first year after the pre-episode year
dem_ep_start_year = ifelse(dem_ep==1,first(year)+1, NA),
# here we code a dummy for the pre-episode year
dem_pre_ep_year = ifelse(dem_ep==1, ifelse(year == dplyr::first(year), 1, 0), 0),
# we create a unique identifier for episodes using the country_text_id, start, and end years
dem_ep_id = ifelse(dem_ep==1, paste(country_text_id, dem_ep_start_year, dem_ep_end_year, sep = "_"), NA)) %>%
dplyr::ungroup() %>%
# remove the old identifiers we no longer need
dplyr::select(-character_id, -episode_id) %>%
# make sure the data is sorted properly
dplyr::arrange(country_name, year) %>%
# just to make sure we have a dataframe
as.data.frame %>%
####### code termination type of democratization episode
# democratization episodes end when one of five things happens:
# 0. the case is censored
# 1. stasis: the case experiences no annual increase = start_incl for the tolerance period (or more)
# 2. year drop: the case experiences an annual drop <= year_turn
# 3. cumulative dro: the case experiences a gradual drop <= cum_turn over the tolerance period (or less)
# 4. breakdown: the case experienced a democratic breakdown (only for subtype 1: democratic deepening) or
# reverted to closed authoritarianism (only for subtype 2: liberalizing autocracy)
# first find the last positive change on EDI equal to the start_incl parameter
# this will become the new end of episodes at some point, once we clean things up
dplyr::group_by(dem_ep_id) %>%
dplyr::mutate(last_ch_year = max(hablar::s(ifelse(v2x_polyarchy-dplyr::lag(v2x_polyarchy, n=1)>=start_incl, year, NA))),
# here we just replace with NA non-episode years
last_ch_year = ifelse(dem_ep==0, NA, last_ch_year)) %>%
# here we check to see if the country reverted to a closed autocracy within the episode period (termination type #4)
# first lets make sure to group by the country (not the episode!) and arrange by country-year
dplyr::group_by(country_id) %>%
dplyr::arrange(country_id, year) %>%
# then we find years where a country moved from higher values on RoW to closed (0)
dplyr::mutate(back_closed = ifelse(dplyr::lead(v2x_regime, n=1) == 0 & v2x_regime > 0, year, NA)) %>%
# now we need to group by episode to fill the values within episodes
dplyr::ungroup() %>%
dplyr::group_by(dem_ep_id) %>%
# here we then find the first time in the episode that a change from another regime to closed autocracy occurs
# limits back_closed to episode years, making sure to exclude pre-episode year
dplyr::mutate(back_closed = ifelse(dem_ep==1 & year>=dem_ep_start_year, back_closed,NA),
# finds the first year within the episode where back_closed happens
back_closed = min(hablar::s(back_closed)),
# we recode the potential end date for the episode as the year before becoming closed
dem_ep_end_year = ifelse(!is.na(back_closed) & dem_ep_end_year>back_closed, back_closed, dem_ep_end_year)) %>%
# then we need to update our dem_ep_id with the new end date (we can clean up the other variables later)
dplyr::ungroup() %>%
dplyr::mutate(dem_ep_start_year = ifelse(dem_ep==1 & year>dem_ep_end_year, NA, dem_ep_start_year),
dem_ep_end_year = ifelse(dem_ep==1 & year>dem_ep_end_year, NA, dem_ep_end_year),
dem_ep = ifelse(dem_ep==1 & year>dem_ep_end_year, 0, dem_ep),
dem_ep_id = ifelse(dem_ep==1, paste(country_text_id, dem_ep_start_year, dem_ep_end_year, sep = "_"), NA)) %>%
# then we can update our last_ch_year variable to reflect the new range of years for the episodes that terminated due to back_closed
dplyr::group_by(dem_ep_id) %>%
dplyr::mutate(last_ch_year = max(hablar::s(ifelse(v2x_polyarchy-dplyr::lag(v2x_polyarchy, n=1)>=start_incl, year, NA))),
# here we just replace with NA non-episode years
last_ch_year = ifelse(dem_ep==0, NA, last_ch_year)) %>%
# now lets make sure to group by the country (not the episode!) and arrange by country-year
dplyr::group_by(country_id) %>%
dplyr::arrange(country_id, year)
# then check to see what happened after the episode had its last substantive change equal to start_incl
# we start with the yearly drop, aka year_turn
year_drop <- list()
# here we loop over the number of years (n) equal to the tolerance period after the last_change_year
for (i in 1:tolerance) {
# we calculate the first difference in the EDI for each of these yearly changes within the tolernce
year_drop[[i]] <- ifelse(full.df$year == full.df$last_ch_year & dplyr::lead(full.df$country_id, n=i)==full.df$country_id, dplyr::lead(full.df$v2x_polyarchy, n=i)-dplyr::lead(full.df$v2x_polyarchy, n=i-1), NA)
}
# then we generate a dataframe from these calculations
df1 <- do.call(cbind, lapply(year_drop, data.frame, stringsAsFactors=FALSE))
# this just renames the columns to match the years ahead we are looking
names <- paste0('year', seq(1:tolerance))
colnames(df1) <- names
# this transforms the result into a dataframe that we can use as a column in our existing dataframe
# first write a small function to deal with Inf
my.min <- function(x) ifelse(!all(is.na(x)), min(x, na.rm=T), NA)
year_drop <- df1 %>%
dplyr::mutate(year_drop = ifelse(apply(df1, 1, FUN = my.min) < year_turn*-1, 1,NA)) %>%
dplyr::select(year_drop)
# now we can also use the first-differences we calculated above to look for stasis as well
# note - we will have to clean this up later to account for cum_turn as well
stasis <- df1 %>%
# this checks whether the maximum annual change is less than start_incl over the tolerance period &
# that it is also greater than the year_turn parameter, i.e. stasis
dplyr::mutate(stasis = ifelse(apply(df1, 1, FUN = max) < start_incl & apply(df1, 1, FUN = min) >= year_turn*-1, 1,NA)) %>%
dplyr::select(stasis)
# now we look for a gradual drop equal to cum_drop over the tolerance period
cum_drop <- list()
# here we loop over time equal to the tolerance, looking for the difference between the last_change_year and that year on the EDI
for (i in 1:tolerance) {
cum_drop[[i]] <- ifelse(full.df$year == full.df$last_ch_year & dplyr::lead(full.df$country_id, n=i)==full.df$country_id, dplyr::lead(full.df$v2x_polyarchy, n=i)-full.df$v2x_polyarchy, NA)
}
# then we rename the columns and generate a dataframe we can use for our existing data
df <- do.call(cbind, lapply(cum_drop, data.frame, stringsAsFactors=FALSE))
names <- paste0('cum', seq(1:tolerance))
colnames(df) <- names
cum_drop <- df %>%
dplyr::mutate(cum_drop = ifelse(apply(df, 1, FUN = my.min) <= cum_turn*-1, 1,NA)) %>%
dplyr::select(cum_drop)
# merge these new columns to our full.df
full.df <- full.df %>%
tibble::rownames_to_column('newid') %>%
left_join(tibble::rownames_to_column(year_drop, 'newid'), by = 'newid') %>%
left_join(tibble::rownames_to_column(cum_drop, 'newid'), by = 'newid') %>%
left_join(tibble::rownames_to_column(stasis, 'newid'), by = 'newid') %>%
dplyr::select(-newid) %>%
# now we can finally code our termination variable
# first we group by episode
dplyr::group_by(dem_ep_id) %>%
dplyr::arrange(dem_ep_id, year) %>%
# first, lets fill everything in for the episode
dplyr::mutate(stasis = ifelse(dem_ep==1, max(hablar::s(stasis)), NA),
year_drop = ifelse(dem_ep==1, max(hablar::s(year_drop)), NA),
cum_drop = ifelse(dem_ep==1, max(hablar::s(cum_drop)), NA),
# then we can code the termination variable
dem_ep_termination = ifelse(dem_ep==1 & !is.na(stasis) & is.na(year_drop) & is.na(cum_drop)
& is.na(back_closed), 1, NA),
dem_ep_termination = ifelse(dem_ep==1 & !is.na(year_drop) & is.na(back_closed), 2, dem_ep_termination),
dem_ep_termination = ifelse(dem_ep==1 & !is.na(cum_drop) & is.na(year_drop) & is.na(back_closed), 3, dem_ep_termination),
dem_ep_termination = ifelse(dem_ep==1 & !is.na(back_closed), 4, dem_ep_termination),
dem_ep_termination = ifelse(dem_ep==1 & dem_ep_censored==1 & is.na(dem_ep_termination), 0, dem_ep_termination),
# now we can clean up the other variables to reflect the true end of the episodes that are not censored
# first, let's fix the censored variable
dem_ep_censored = ifelse(dem_ep_termination !=0 & dem_ep==1, 0, dem_ep_censored),
# then we recode the end year as the final positive change if not censored
dem_ep_end_year = ifelse(dem_ep_censored==0 & dem_ep==1, last_ch_year, dem_ep_end_year),
# then we clean up the other variables for non-episode years
dem_ep_termination = ifelse(dem_ep==1 & year>dem_ep_end_year, NA, dem_ep_termination),
dem_ep_start_year = ifelse(dem_ep==1 & year>dem_ep_end_year, NA, dem_ep_start_year),
dem_ep_end_year = ifelse(dem_ep==1 & year>dem_ep_end_year, NA, dem_ep_end_year),
dem_ep = ifelse(is.na(dem_ep_end_year), 0, dem_ep)) %>%
dplyr::ungroup() %>%
dplyr::mutate(dem_ep_id = ifelse(dem_ep==1, paste(country_text_id, dem_ep_start_year, dem_ep_end_year, sep = "_"), NA)) %>%
dplyr::group_by(country_id) %>%
dplyr::arrange(country_id, year) %>%
##### code the subtype and outcome of episode
# we code a variable that captures the subtype of democratization, were 1 = "democratic deepening" and 2 = "autocratic liberalization"
# note: the year of the democratic transition is included in the autocratic liberalization phase
dplyr::mutate(sub_dem_ep = ifelse(dem_ep==1 & reg_type==1 & reg_trans!=1, 1, 0),
sub_dem_ep = ifelse(dem_ep==1 & (reg_type==0 | (reg_type==1 & reg_trans==1)),
2, sub_dem_ep),
sub_dem_ep = ifelse(dem_pre_ep_year==1, dplyr::lead(sub_dem_ep, n=1), sub_dem_ep),
# we code the start and end dates for each subtype
# start year of episode
sub_dem_ep_start_year = ifelse(dem_ep==1 & (year==dem_ep_start_year |
# or year the subtype changed
(year>dem_ep_start_year & sub_dem_ep != dplyr::lag(sub_dem_ep, n=1))), year, NA),
# end year of episode
sub_dem_ep_end_year = ifelse(dem_ep==1 & (year==dem_ep_end_year |
# or year prior to change in subtype
(year<dem_ep_end_year & sub_dem_ep != dplyr::lead(sub_dem_ep, n=1))), year, NA)) %>%
dplyr::ungroup() %>%
dplyr::group_by(dem_ep_id) %>%
dplyr::arrange(dem_ep_id, year) %>%
tidyr::fill(sub_dem_ep_start_year) %>%
tidyr::fill(sub_dem_ep_end_year, sub_dem_ep_start_year, .direction="up") %>%
dplyr::mutate(sub_dem_ep_id = ifelse(dem_ep==1, paste(country_text_id, sub_dem_ep_start_year, sub_dem_ep_end_year, sep = "_"), NA)) %>%
# did a regime change on RoW during the episode produce a genuine democratic transition?
dplyr::mutate(dem_ep_outcome = ifelse(sub_dem_ep==2 & reg_trans==1 & dem_pre_ep_year==0, 1, NA),
# did a regime change on RoW during the episode fail to produce a democratic transition?
dem_ep_outcome = ifelse(sub_dem_ep==2 & any(row_regch_event==1 & dem_pre_ep_year==0) &
year==dem_ep_end_year & dem_ep_censored==0 &
is.na(dem_ep_outcome), 2, dem_ep_outcome),
# did the autocratic liberalization phase result in a stabilized electoral autocracy?
dem_ep_outcome = ifelse(sub_dem_ep==2 & year==dem_ep_end_year & dem_ep_termination==1
& is.na(dem_ep_outcome), 3, dem_ep_outcome),
# did the autocratic liberalization phase result in a failed liberalization?
dem_ep_outcome = ifelse(sub_dem_ep==2 & year==dem_ep_end_year &
(dem_ep_termination==2 | dem_ep_termination==3 | dem_ep_termination==4) &
is.na(dem_ep_outcome), 4, dem_ep_outcome),
# code the outcome for completed democratic deepening
dem_ep_outcome = ifelse(sub_dem_ep==1 & year==dem_ep_end_year &
dem_ep_censored==0 & is.na(dem_ep_outcome), 5, dem_ep_outcome),
# code censored episodes
dem_ep_outcome = ifelse(dem_ep==1 & dem_ep_censored==1 & is.na(dem_ep_outcome) & year==dem_ep_end_year, 6, dem_ep_outcome),
dem_ep_outcome = ifelse(dem_ep==0, 0, dem_ep_outcome)) %>%
dplyr::ungroup() %>%
dplyr::group_by(sub_dem_ep_id) %>%
dplyr::arrange(country_id, year) %>%
# fill for the entire subtype period
dplyr::mutate(dem_ep_outcome = min(hablar::s(dem_ep_outcome))) %>%
dplyr::ungroup() %>%
dplyr::group_by(dem_ep_id) %>%
dplyr::mutate(dem_ep_censored = ifelse(dem_ep==1 & max(dem_ep_outcome)!=6, 0, dem_ep_censored)) %>%
dplyr::ungroup() %>%
dplyr::group_by(country_text_id) %>%
dplyr::arrange(country_id, year) %>%
dplyr::select(-stasis)
#### CODING THE AUTOCRATIZATION EPISODES ####
### detect and save potential episodes with the help of the c++ function find_seqs
full.df <- full.df %>% dplyr::mutate(episode_id = find_seqs_aut(v2x_polyarchy, v2x_regime, reg_trans,
start_incl = start_incl * -1, year_turn, cum_turn, tolerance),
# set a temporary id for these potential episodes and group accordinly
character_id = ifelse(!is.na(episode_id), paste(country_text_id, episode_id, sep = "_"), NA)) %>%
dplyr::ungroup() %>%
dplyr::group_by(character_id) %>%
# general check: is there a potential autocratization episode?
dplyr::mutate(aut_ep = ifelse(!is.na(episode_id), 1, 0),
# we check whether the cumulated change in each potential episode was substantial (> cum_inc), i.e. the episode is manifest
aut_ep = ifelse(aut_ep == 1 & min(hablar::s(v2x_polyarchy)) - max(hablar::s(v2x_polyarchy)) <= cum_incl*-1, 1, 0)) %>%
ungroup() %>%
# then we clean out variables for non-manifest episodes
dplyr::mutate(episode_id = ifelse(aut_ep != 1, NA, episode_id),
character_id = ifelse(aut_ep != 1, NA, character_id)) %>%
group_by(character_id) %>%
# generate the initial end year for the episode (note: we have to filter out the stasis years that C++ gives us, but we will do this later):
dplyr::mutate(aut_ep_end_year = ifelse(aut_ep == 1, last(year), NA),
# find potentially censored episodes (note: we might change this later depending on termination)
aut_ep_censored = ifelse(aut_ep == 1 & codingend - aut_ep_end_year<tolerance, 1, 0),
aut_ep_censored = ifelse(aut_ep == 1 & !is.na(gapstart) & (gapstart-1)-aut_ep_end_year<tolerance, 1, aut_ep_censored),
# generate the start year for the potential episode as the first year after the pre-episode year
aut_ep_start_year = ifelse(aut_ep == 1, first(year) + 1, NA),
# here we code a dummy for the pre-episode year
aut_pre_ep_year = ifelse(aut_ep == 1, ifelse(year == dplyr::first(year), 1, 0), 0),
# we create a unique identifier for episodes and phases using the country_text_id, start, and end years
aut_ep_id = ifelse(aut_ep == 1, paste(country_text_id, aut_ep_start_year, aut_ep_end_year, sep = "_"), NA)) %>%
ungroup() %>%
# remove the old identifiers we no longer need
dplyr::select(-character_id, -episode_id) %>%
# make sure the data is sorted properly
dplyr::arrange(country_name, year) %>%
# just to make sure we have a dataframe
as.data.frame %>%
####### code termination type of autocratization episode
# autocrtization episodes end when one of three things happens:
# 1. the case experiences an annual increase >= year_turn
# 2. the case experiences a gradual increase >= cum_turn over the tolerance period (or less)
# 3. the case experiences no annual decrease = start_incl for the tolerance period (or more)
# first find the last negative change on EDI equal to the start_incl parameter
group_by(aut_ep_id) %>%
dplyr::mutate(last_ch_year = max(hablar::s(ifelse(v2x_polyarchy-dplyr::lag(v2x_polyarchy, n=1)<=start_incl*-1, year, NA))),
# here we just replace with NA non-episode years
last_ch_year = ifelse(aut_ep==0, NA, last_ch_year)) %>%
# now lets make sure to group by the country (not the episode!) and arrange by country-year
group_by(country_id) %>%
arrange(country_id, year)
#### then check to see what happened the after the episode had its last substantive change equal to start_incl
# we start with the yearly increase, aka year_turn
year_incr <- list()
# here we loop over the number of years (n) equal to the tolerance period after the last_change_year
for (i in 1:tolerance) {
# we calculate the first difference in the EDI for each of these yearly changes within the tolernce
year_incr[[i]] <- ifelse(full.df$year == full.df$last_ch_year & dplyr::lead(full.df$country_id, n=i)==full.df$country_id, dplyr::lead(full.df$v2x_polyarchy, n=i)-dplyr::lead(full.df$v2x_polyarchy, n=i-1), NA)
}
# then we generate a dataframe from these calculations
df1 <- do.call(cbind, lapply(year_incr, data.frame, stringsAsFactors=FALSE))
# this just renames the columns to match the years ahead we are looking
names <- paste0('year', seq(1:tolerance))
colnames(df1) <- names
# this transforms the result into a dataframe that we can use as a column in our existing dataframe
# first write a function to deal with INF warnings
my.max <- function(x) ifelse(!all(is.na(x)), max(x, na.rm=T), NA)
year_incr <- df1 %>%
dplyr::mutate(year_incr = ifelse(apply(df1, 1, FUN = my.max) > year_turn, 1,NA)) %>%
dplyr::select(year_incr)
# now we can also use the first-differences we calculated above to look for stasis as well
# this transforms the result into a dataframe that we can use as a column in our existing dataframe
# note - we will have to clean this up later to account for cum_turn as well
stasis <- df1 %>%
# this checks whether the maximum annual change is less than start_incl over the tolerance period &
# that it is also greater than the year_turn parameter, i.e. stasis
dplyr::mutate(stasis = ifelse(apply(df1, 1, FUN = min) > start_incl*-1 & apply(df1, 1, FUN = max) <= year_turn, 1,NA)) %>%
dplyr::select(stasis)
# now we look for a gradual drop equal to cum_drop over the tolerance period
cum_incr <- list()
# here we loop over time equal to the tolerance, looking for the difference between the last_change_year and that year on the EDI
for (i in 1:tolerance) {
cum_incr[[i]] <- ifelse(full.df$year == full.df$last_ch_year & dplyr::lead(full.df$country_id, n=i)==full.df$country_id, dplyr::lead(full.df$v2x_polyarchy, n=i)-full.df$v2x_polyarchy, NA)
}
# then we rename the columns and generate a dataframe we can use for our existing data
df <- do.call(cbind, lapply(cum_incr, data.frame, stringsAsFactors=FALSE))
names <- paste0('cum', seq(1:tolerance))
colnames(df) <- names
cum_incr <- df %>%
dplyr::mutate(cum_incr = ifelse(apply(df, 1, FUN = my.max) >= cum_turn, 1,NA)) %>%
dplyr::select(cum_incr)
# merge these new columns to our full.df
full.df <- full.df %>%
tibble::rownames_to_column('newid') %>%
left_join(tibble::rownames_to_column(year_incr, 'newid'), by = 'newid') %>%
left_join(tibble::rownames_to_column(cum_incr, 'newid'), by = 'newid') %>%
left_join(tibble::rownames_to_column(stasis, 'newid'), by = 'newid') %>%
# now lets make sure to group by the autocratization episode and arrange by country-year
ungroup() %>%
group_by(aut_ep_id) %>%
# now we can finally code our termination variable
# first, lets fill everything in for the episode
dplyr::mutate(stasis = ifelse(aut_ep==1, max(hablar::s(stasis)), NA),
year_incr = ifelse(aut_ep==1, max(hablar::s(year_incr)), NA),
cum_incr = ifelse(aut_ep==1, max(hablar::s(cum_incr)), NA),
# then we can code the termination variable
aut_ep_termination = ifelse(aut_ep==1 & !is.na(stasis) & is.na(year_incr) & is.na(cum_incr),
1, NA),
aut_ep_termination = ifelse(aut_ep==1 & !is.na(year_incr), 2, aut_ep_termination),
aut_ep_termination = ifelse(aut_ep==1 & !is.na(cum_incr) & is.na(year_incr), 3, aut_ep_termination),
aut_ep_termination = ifelse(aut_ep==1 & aut_ep_censored==1 & is.na(aut_ep_termination), 0, aut_ep_termination),
# now we can clean up the other variables to reflect the true end of the episodes that are not censored
# first, let's fix the censored variable
aut_ep_censored = ifelse(aut_ep_termination !=0 & aut_ep==1, 0, aut_ep_censored),
# then we recode the end year as the final positive change if not censored
aut_ep_end_year = ifelse(aut_ep_censored==0 & aut_ep==1, last_ch_year, aut_ep_end_year),
# then we clean up the other variables for non-episode years
aut_ep_termination = ifelse(aut_ep==1 & year>aut_ep_end_year, NA, aut_ep_termination),
aut_ep_start_year = ifelse(aut_ep==1 & year>aut_ep_end_year, NA, aut_ep_start_year),
aut_ep_end_year = ifelse(aut_ep==1 & year>aut_ep_end_year, NA, aut_ep_end_year),
aut_ep = ifelse(is.na(aut_ep_end_year), 0, aut_ep)) %>%
ungroup() %>%
dplyr::mutate(aut_ep_id = ifelse(aut_ep==1, paste(country_text_id, aut_ep_start_year, aut_ep_end_year, sep = "_"), NA)) %>%
group_by(aut_ep_id) %>%
arrange(country_id, year) %>%
##### code the phase and outcome type of episode
# we code a variable that captures the type or "phase" of autocratization, were 1 = "democratic regression" and 2 = "autocratic regression"
# note: the year of the democratic breakdown is included in the democratic regression phase
dplyr::mutate(sub_aut_ep = ifelse(aut_ep==1 & (reg_type==1 | (reg_type==0 & reg_trans==-1)), 1, 0),
sub_aut_ep = ifelse(aut_ep==1 & reg_type==0 & reg_trans!=-1, 2, sub_aut_ep),
sub_aut_ep = ifelse(aut_pre_ep_year==1, dplyr::lead(sub_aut_ep, n=1), sub_aut_ep),
# we code the start and end dates for these phases
sub_aut_ep_start_year = ifelse(aut_ep==1 & (year==aut_ep_start_year |
# or year the subtype changed
(year>aut_ep_start_year & sub_aut_ep != dplyr::lag(sub_aut_ep, n=1))), year, NA),
# end year of episode
sub_aut_ep_end_year = ifelse(aut_ep==1 & (year==aut_ep_end_year |
# or year prior to change in subtype
(year<aut_ep_end_year & sub_aut_ep != dplyr::lead(sub_aut_ep, n=1))), year, NA)) %>%
tidyr::fill(sub_aut_ep_start_year) %>%
tidyr::fill(sub_aut_ep_end_year, sub_aut_ep_start_year, .direction="up") %>%
dplyr::mutate(sub_aut_ep_id = ifelse(aut_ep==1, paste(country_text_id, sub_aut_ep_start_year, sub_aut_ep_end_year, sep = "_"), NA)) %>%
ungroup() %>%
group_by(aut_ep_id) %>%
# did a regime change on RoW during the episode of democratic regression produce a genuine democratic breakdown?
dplyr::mutate(aut_ep_outcome = ifelse(sub_aut_ep==1 & reg_trans==-1 & aut_pre_ep_year==0, 1, NA),
# did the episode of democratic regression fail to produce a democratic breakdown?
aut_ep_outcome = ifelse(sub_aut_ep==1 & aut_ep_censored==0 & aut_pre_ep_year==0 &
is.na(aut_ep_outcome), 2, aut_ep_outcome),
# code the outcome for completed autocratic regression
aut_ep_outcome = ifelse(sub_aut_ep==2 & year==aut_ep_end_year &
aut_ep_censored==0 & is.na(aut_ep_outcome), 3, aut_ep_outcome),
# code censored episodes
aut_ep_outcome = ifelse(aut_ep==1 & aut_ep_censored==1 & is.na(aut_ep_outcome) & year==aut_ep_end_year, 4, aut_ep_outcome),
aut_ep_outcome = ifelse(aut_ep==0, 0, aut_ep_outcome)) %>%
dplyr::ungroup() %>%
dplyr::group_by(sub_aut_ep_id) %>%
dplyr::arrange(country_id, year) %>%
# fill for the entire phase of episode
dplyr::mutate(aut_ep_outcome = min(hablar::s(aut_ep_outcome))) %>%
dplyr::ungroup() %>%
dplyr::group_by(aut_ep_id) %>%
dplyr::mutate(aut_ep_censored = ifelse(aut_ep==1 & max(aut_ep_outcome)!=4, 0, aut_ep_censored)) %>%
dplyr::ungroup() %>%
dplyr::group_by(country_text_id) %>%
dplyr::arrange(country_id, year) %>%
# clean out values from pre-episode year
dplyr::mutate(dem_ep = ifelse(dem_pre_ep_year==1, 0, dem_ep),
dem_ep_termination = ifelse(dem_pre_ep_year==1, NA, dem_ep_termination),
sub_dem_ep = ifelse(dem_pre_ep_year==1, 0, sub_dem_ep),
dem_ep_outcome_all = dem_ep_outcome,
dem_ep_outcome = ifelse(dem_pre_ep_year==1, 0, dem_ep_outcome),
dem_ep_censored = ifelse(dem_pre_ep_year==1, 0, dem_ep_censored),
aut_ep = ifelse(aut_pre_ep_year==1, 0, aut_ep),
aut_ep_termination = ifelse(aut_pre_ep_year==1, NA, aut_ep_termination),
sub_aut_ep = ifelse(aut_pre_ep_year==1, 0, sub_aut_ep),
aut_ep_outcome_all = aut_ep_outcome,
aut_ep_outcome = ifelse(aut_pre_ep_year==1, 0, aut_ep_outcome),
aut_ep_censored = ifelse(aut_pre_ep_year==1, 0, aut_ep_censored)) %>%
# select the variables we need to keep
dplyr::filter(!is.na(origsample)) %>%
dplyr::select(country_id, country_text_id, country_name, year, v2x_regime, v2x_polyarchy, v2x_polyarchy_codelow, v2x_polyarchy_codehigh,
reg_start_year, reg_end_year, reg_id, reg_type, reg_trans, founding_elec, row_regch_event, row_regch_censored,
dem_ep, dem_ep_id, dem_ep_start_year, dem_ep_end_year, dem_pre_ep_year, dem_ep_termination,
sub_dem_ep, sub_dem_ep_id, sub_dem_ep_start_year, sub_dem_ep_end_year, dem_ep_outcome, dem_ep_censored,
aut_ep, aut_ep_id, aut_ep_start_year, aut_ep_end_year, aut_pre_ep_year, aut_ep_termination,
sub_aut_ep, sub_aut_ep_id, sub_aut_ep_start_year, sub_aut_ep_end_year, aut_ep_outcome, aut_ep_censored)
{
return(full.df)
}
}
### done ;-) ###
|
test_GraphMFPT <- function() {
# TESTS
# 1) returns matrix of dim 1,1 if networks with 1 vertex input
# 2) uses v vertex names (under the attribute name 'name') as row names if available
# 3) uses g vertex names (under the attribute name 'name') as column names if available
# 4) returns only the rows corresultsponding to v, in the order v was input
# 5) correctly incorperates edge weights
# 6) doesn't return negative distances
# 7) returns the correct results on a small test graph
# setup
g.single <- graph.empty(1, directed=FALSE)
edge.attr.weight <- "test.weights"
edges <- c(1,4, 1,8, 1,9, 1,10, 1,11, 2,5, 2,6, 2,9, 3,7, 3,10, 3,12, 4,2, 4,8, 4,11, 5,3, 5,6, 5,9, 6,9, 7,10, 7,11, 7,12, 8,11, 9,10, 10,12)
weights <- rep(1, length(edges) / 2)
weights[c(2, 7, 13, 16, 18, 22)] <- 0.1 # move 8 and 6 away
g <- graph.empty(max(edges), directed=FALSE)
g <- add.edges(g, edges)
g <- set.edge.attribute(g, edge.attr.weight, value=weights)
g.unnamed <- g
g.named <- set.vertex.attribute(g, "name", value=paste("gene", 1:vcount(g), sep=""))
n.vertex.unconnected <- 10
g.unconnected <- graph.empty(n.vertex.unconnected, directed=FALSE)
v <- c(4,2,12)
# run function
results <- list()
results[[1]] <- GraphMFPT(g.single) # 1 vertex
results[[2]] <- GraphMFPT(g.unnamed) # >1 vertex, v = V(g), no vertex names
results[[3]] <- GraphMFPT(g.named) # >1 vertex, v = V(g), vertex names
results[[4]] <- GraphMFPT(g.named, v=v) # >1 vertex, v = subset, vertex names
results[[5]] <- GraphMFPT(g.named, edge.attr.weight=edge.attr.weight) # >1 vertex, v = V(g), no vertex name, edge weights
results[[6]] <- GraphMFPT(g.unconnected) # unconnected network
# conduct tests
checkTrue(all(dim(results[[1]]) - c(1, 1) < 10e-10)) # 1
checkTrue(is.null(rownames(results[[2]]))) # 2
checkIdentical(rownames(results[[3]]), V(g.named)$name) # 2
checkIdentical(rownames(results[[4]]), V(g.named)$name[v]) # 2
checkTrue(is.null(colnames(results[[2]]))) # 3
checkIdentical(colnames(results[[3]]), V(g.named)$name) # 3
checkIdentical(colnames(results[[4]]), V(g.named)$name) # 3
checkIdentical(results[[4]], results[[3]][v, ]) # 4
checkTrue(all(c(8, 6) %in% tail(order(results[[5]][1, ]), 2))) # 5
for (result in results) checkEquals(sum(result < 0), 0) # 6
for (result in results[c(2, 3)]) {
checkTrue(all(c(1, 4, 8, 11) %in% head(order(result[8, ]), 4))) # 7
checkTrue(all(c(2, 5, 6, 9) %in% head(order(result[6, ]), 4))) # 7
checkTrue(all(c(3, 7, 10, 12) %in% head(order(result[12, ]), 4))) # 7
}
}
| /inst/unitTests/test_GraphMFPT.R | no_license | alexjcornish/SANTA | R | false | false | 2,742 | r | test_GraphMFPT <- function() {
# TESTS
# 1) returns matrix of dim 1,1 if networks with 1 vertex input
# 2) uses v vertex names (under the attribute name 'name') as row names if available
# 3) uses g vertex names (under the attribute name 'name') as column names if available
# 4) returns only the rows corresultsponding to v, in the order v was input
# 5) correctly incorperates edge weights
# 6) doesn't return negative distances
# 7) returns the correct results on a small test graph
# setup
g.single <- graph.empty(1, directed=FALSE)
edge.attr.weight <- "test.weights"
edges <- c(1,4, 1,8, 1,9, 1,10, 1,11, 2,5, 2,6, 2,9, 3,7, 3,10, 3,12, 4,2, 4,8, 4,11, 5,3, 5,6, 5,9, 6,9, 7,10, 7,11, 7,12, 8,11, 9,10, 10,12)
weights <- rep(1, length(edges) / 2)
weights[c(2, 7, 13, 16, 18, 22)] <- 0.1 # move 8 and 6 away
g <- graph.empty(max(edges), directed=FALSE)
g <- add.edges(g, edges)
g <- set.edge.attribute(g, edge.attr.weight, value=weights)
g.unnamed <- g
g.named <- set.vertex.attribute(g, "name", value=paste("gene", 1:vcount(g), sep=""))
n.vertex.unconnected <- 10
g.unconnected <- graph.empty(n.vertex.unconnected, directed=FALSE)
v <- c(4,2,12)
# run function
results <- list()
results[[1]] <- GraphMFPT(g.single) # 1 vertex
results[[2]] <- GraphMFPT(g.unnamed) # >1 vertex, v = V(g), no vertex names
results[[3]] <- GraphMFPT(g.named) # >1 vertex, v = V(g), vertex names
results[[4]] <- GraphMFPT(g.named, v=v) # >1 vertex, v = subset, vertex names
results[[5]] <- GraphMFPT(g.named, edge.attr.weight=edge.attr.weight) # >1 vertex, v = V(g), no vertex name, edge weights
results[[6]] <- GraphMFPT(g.unconnected) # unconnected network
# conduct tests
checkTrue(all(dim(results[[1]]) - c(1, 1) < 10e-10)) # 1
checkTrue(is.null(rownames(results[[2]]))) # 2
checkIdentical(rownames(results[[3]]), V(g.named)$name) # 2
checkIdentical(rownames(results[[4]]), V(g.named)$name[v]) # 2
checkTrue(is.null(colnames(results[[2]]))) # 3
checkIdentical(colnames(results[[3]]), V(g.named)$name) # 3
checkIdentical(colnames(results[[4]]), V(g.named)$name) # 3
checkIdentical(results[[4]], results[[3]][v, ]) # 4
checkTrue(all(c(8, 6) %in% tail(order(results[[5]][1, ]), 2))) # 5
for (result in results) checkEquals(sum(result < 0), 0) # 6
for (result in results[c(2, 3)]) {
checkTrue(all(c(1, 4, 8, 11) %in% head(order(result[8, ]), 4))) # 7
checkTrue(all(c(2, 5, 6, 9) %in% head(order(result[6, ]), 4))) # 7
checkTrue(all(c(3, 7, 10, 12) %in% head(order(result[12, ]), 4))) # 7
}
}
|
\name{cparprobit}
\alias{cparprobit}
\title{
Conditionally Parametric probit for two choices
}
\description{
Estimates a probit model with two choices by maximizing a locally weighted likelihood function -- the probit equivalent of cparlwr
}
\usage{
cparprobit(form,nonpar,window=.25,bandwidth=0,kern="tcub",
distance="Mahal",target=NULL,data=NULL,minp=NULL)
}
\arguments{
\item{form }{Model formula}
\item{nonpar }{List of either one or two variables for \emph{z}.
Formats: \emph{cparprobit(y~xlist, nonpar=~z1, ...)} or \emph{cparprobit(y~xlist, nonpar=~z1+z2, ...)}.
Important: note the "~" before the first \emph{z} variable.
}
\item{window }{Window size. Default: 0.25. }
\item{bandwidth }{Bandwidth. Default: not used.}
\item{kern }{Kernel weighting functions. Default is the tri-cube. Options include "rect", "tria", "epan", "bisq", "tcub", "trwt", and "gauss".}
\item{distance }{Options: "Euclid", "Mahal", or "Latlong" for Euclidean, Mahalanobis, or "great-circle" geographic distance.
May be abbreviated to the first letter but must be capitalized.
Note: \emph{cparprobit} looks for the first two letters to determine which variable is latitude and which is longitude,
so the data set must be attached first or specified using the data option; options like data$latitude will not work. Default: Mahal. }
\item{target}{If \emph{target = NULL}, uses the \emph{maketarget} command to form targets using the values specified for \emph{window},
\emph{bandwidth}, and \emph{kern}. If \emph{target="alldata"}, each observation is used as a target value for \emph{x}.
A set of target values can be supplied directly.}
\item{data }{A data frame containing the data. Default: use data in the current working directory}
\item{minp}{Specifies a limit for the estimated probability. Any estimated probability lower than \emph{minp} will be set to \emph{minp} and
any probability higher than 1-\emph{minp} will be set to 1-\emph{minp}. By default, the estimated probabilities are bounded by 0 and 1.}
}
\value{
\item{target}{The target points for the original estimation of the function.}
\item{xcoef.target}{Estimated coefficients, \emph{B(z)}, at the target values of \emph{z}.}
\item{xcoef.target.se}{Standard errors for \emph{B(z)} at the target values of \emph{z}.}
\item{xcoef}{Estimated coefficients, \emph{B(z)}, at the original data points.}
\item{xcoef.se}{Standard errors for \emph{B(z)} with \emph{z} evaluated at all points in the data set.}
\item{p}{The estimated probabilities.}
\item{lnl}{The log-likelihood value.}
}
\details{
The list of explanatory variables is specified in the base model formula while \emph{Z} is specified using \emph{nonpar}.
\emph{X} can include any number of explanatory variables, but \emph{Z} must have at most two.
The model is estimated by maximizing the following weighted log-likelihood function at each target point:
\deqn{ \sum_{i=1}^n w_i \{ y_i log(\Phi (X_i \beta)) + (1-y_i) log(1-\Phi (X_i \beta) ) \} }{\sum w_i { y_i log(\Phi (X_i \beta)) + (1-y_i) log(1-\Phi_i (X \beta) ) } }
where y is the discrete dependent variable and X is the set of explanatory variables.
When \emph{Z} includes a single variable, \eqn{w_i} is a simple kernel weighting function: \eqn{ w_i = K((z_i - z_0 )/(sd(z)*h)) }.
When \emph{Z} includes two variables (e.g., nonpar=~z1+z2), the method for specifying \emph{w} depends on the \emph{distance} option.
Under either option, the \emph{i}th row of the matrix \emph{Z} = (z1, z2) is transformed such
that \eqn{z_i = \sqrt{z_i * V * t(z_i)}.}{z_i = sqrt(z_i * V * t(z_i)).} Under the "Mahal" option, \emph{V} is the inverse of cov(\emph{Z}).
Under the \emph{"Euclid"} option, \emph{V} is the inverse of diag(cov(\emph{Z})).
After this transformation, the weights again reduce to the simple kernel weighting function \eqn{K((z_i - z_0 )/(sd(z)*h))}.
\emph{h} is specified by the \emph{bandwidth} or \emph{window} option.
The great circle formula is used to construct the distances used to form the weights when \emph{distance = "Latlong"};
in this case, the variable list for \emph{nonpar} must be listed as
\emph{nonpar = ~latitude+longitude} (or \emph{~lo+la} or \emph{~lat+long}, etc), with the longitude and latitude variables expressed in degrees
(e.g., -87.627800 and 41.881998 for one observation of longitude and latitude, respectively).
The order in which latitude and longitude are listed does not matter and the function only looks for the
first two letters to determine which variable is latitude and which is longitude.
It is important to note that the great circle distance measure is left in miles rather than being standardized.
Thus, the window option should be specified when \emph{distance = "Latlong"} or the bandwidth should be adjusted to account for the scale.
The kernel weighting function becomes \emph{K(distance/h)} under the \emph{"Latlong"} option.
Following White (1982), the covariance matrix for a quasi-maximum likelihood model is \eqn{A^{-1}BA^{-1} }, where
\deqn{A = \sum_{i=1}^n w_i \frac{\partial^2 LnL_i}{\partial \beta \partial \beta ^\prime} }{A = \sum w_i d^2LnL_i/d\beta d\beta' }
\deqn{B = \sum_{i=1}^n w_i^2 \frac{\partial LnL_i}{\partial \beta}\frac{\partial LnL_i}{\partial \beta ^\prime} }{B = \sum w_i^2 (dLnL_i/d\beta)(dLnL_i/d\beta') }
For the probit model,
\deqn{ A = \sum_{i=1}^n w_i P_i(1 - P_i) X_i X_i ^\prime }{ A = \sum w_i P_i(1 - P_i) X_i X_i' }
\deqn{ B = \sum_{i=1}^n w_i^2 (y_i - P_i)^2 X_i X_i ^\prime }{ B = \sum w_i^2 (y_i - P_i)^2 X_i X_i' }
The covariance matrix is calculated at all target points and the implied standard errors are then interpolated to each data point.
Available kernel weighting functions include the following:
\tabular{lll}{
Kernel \tab Call abbreviation \tab Kernel function K(z) \cr
Rectangular \tab ``rect'' \tab \eqn{\frac{1}{2} I(|z| <1)}{1/2 * I(|z|<1)} \cr
Triangular \tab ``tria'' \tab \eqn{(1-|z|)I(|z|<1)}{(1-|z|) * I(|z|<1)}\cr
Epanechnikov \tab ``epan'' \tab \eqn{\frac{3}{4} (1-z^2) * I(|z| <1)}{3/4 * (1-z^2)*I(|z| < 1)} \cr
Bi-Square \tab ``bisq'' \tab \eqn{\frac{15}{16} (1-z^2)^2 * I(|z| <1)}{15/16 * (1-z^2)^2 * I(|z| < 1)} \cr
Tri-Cube \tab ``tcub'' \tab \eqn{\frac{70}{81} (1-|z|^3)^3 * I(|z| <1)}{70/81 * (1-|z|^3)^3 * I(|z| < 1)} \cr
Tri-Weight \tab ``trwt'' \tab \eqn{\frac{35}{32} (1-z^2)^3 * I(|z| <1)}{35/32 * (1-z^2)^3 * I(|z| < 1)} \cr
Gaussian \tab ``gauss'' \tab \eqn{(2\pi)^{-.5} e^{-z^2/2}}{2pi^{-.5} exp(-z^2/2)} \cr }
}
\references{
Fan, Jianqing, Nancy E. Heckman, and M.P. Wand, "Local Polynomial Kernel Regression for Generalized Linear Models and Quasi-Likelihood Functions,"
\emph{Journal of the American Statistical Association} 90 (1995), 141-150.
Loader, Clive. \emph{Local Regression and Likelihood.} New York: Springer, 1999.
McMillen, Daniel P. and John F. McDonald, "Locally Weighted Maximum Likelihood Estimation: Monte Carlo Evidence and an Application,"
in Luc Anselin, Raymond J.G.M. Florax, and Sergio J. Rey, eds., \emph{Advances in Spatial Econometrics}, Springer-Verlag, New York (2004), 225-239.
Tibshirani, Robert and Trevor Hastie, "Local Likelihood Estimation," \emph{Journal of the American Statistical Association} 82 (1987), 559-568.
}
\seealso{
\link{cparlogit}
\link{cparmlogit}
\link{gmmlogit}
\link{gmmprobit}
\link{splogit}
\link{spprobit}
\link{spprobitml}
}
\examples{
set.seed(5647)
data(cookdata)
cookdata <- cookdata[!is.na(cookdata$AGE),]
n = nrow(cookdata)
cookdata$ystar <- cookdata$DCBD - .5*cookdata$AGE
cookdata$y <- cookdata$ystar - mean(cookdata$ystar) + rnorm(n,sd=4) > 0
tvect <- maketarget(~LONGITUDE+LATITUDE,window=.5,data=cookdata)$target
fit <- cparprobit(y~DCBD+AGE,~LONGITUDE+LATITUDE,window=.5,
target=tvect,distance="Latlong",data=cookdata,minp=0.001)
}
\keyword{Discrete Choice Models}
\keyword{Probit}
\keyword{Conditionally Parametric}
\keyword{Nonparametric}
| /McSpatial/man/cparprobit.Rd | no_license | albrizre/spatstat.revdep | R | false | false | 7,977 | rd | \name{cparprobit}
\alias{cparprobit}
\title{
Conditionally Parametric probit for two choices
}
\description{
Estimates a probit model with two choices by maximizing a locally weighted likelihood function -- the probit equivalent of cparlwr
}
\usage{
cparprobit(form,nonpar,window=.25,bandwidth=0,kern="tcub",
distance="Mahal",target=NULL,data=NULL,minp=NULL)
}
\arguments{
\item{form }{Model formula}
\item{nonpar }{List of either one or two variables for \emph{z}.
Formats: \emph{cparprobit(y~xlist, nonpar=~z1, ...)} or \emph{cparprobit(y~xlist, nonpar=~z1+z2, ...)}.
Important: note the "~" before the first \emph{z} variable.
}
\item{window }{Window size. Default: 0.25. }
\item{bandwidth }{Bandwidth. Default: not used.}
\item{kern }{Kernel weighting functions. Default is the tri-cube. Options include "rect", "tria", "epan", "bisq", "tcub", "trwt", and "gauss".}
\item{distance }{Options: "Euclid", "Mahal", or "Latlong" for Euclidean, Mahalanobis, or "great-circle" geographic distance.
May be abbreviated to the first letter but must be capitalized.
Note: \emph{cparprobit} looks for the first two letters to determine which variable is latitude and which is longitude,
so the data set must be attached first or specified using the data option; options like data$latitude will not work. Default: Mahal. }
\item{target}{If \emph{target = NULL}, uses the \emph{maketarget} command to form targets using the values specified for \emph{window},
\emph{bandwidth}, and \emph{kern}. If \emph{target="alldata"}, each observation is used as a target value for \emph{x}.
A set of target values can be supplied directly.}
\item{data }{A data frame containing the data. Default: use data in the current working directory}
\item{minp}{Specifies a limit for the estimated probability. Any estimated probability lower than \emph{minp} will be set to \emph{minp} and
any probability higher than 1-\emph{minp} will be set to 1-\emph{minp}. By default, the estimated probabilities are bounded by 0 and 1.}
}
\value{
\item{target}{The target points for the original estimation of the function.}
\item{xcoef.target}{Estimated coefficients, \emph{B(z)}, at the target values of \emph{z}.}
\item{xcoef.target.se}{Standard errors for \emph{B(z)} at the target values of \emph{z}.}
\item{xcoef}{Estimated coefficients, \emph{B(z)}, at the original data points.}
\item{xcoef.se}{Standard errors for \emph{B(z)} with \emph{z} evaluated at all points in the data set.}
\item{p}{The estimated probabilities.}
\item{lnl}{The log-likelihood value.}
}
\details{
The list of explanatory variables is specified in the base model formula while \emph{Z} is specified using \emph{nonpar}.
\emph{X} can include any number of explanatory variables, but \emph{Z} must have at most two.
The model is estimated by maximizing the following weighted log-likelihood function at each target point:
\deqn{ \sum_{i=1}^n w_i \{ y_i log(\Phi (X_i \beta)) + (1-y_i) log(1-\Phi (X_i \beta) ) \} }{\sum w_i { y_i log(\Phi (X_i \beta)) + (1-y_i) log(1-\Phi_i (X \beta) ) } }
where y is the discrete dependent variable and X is the set of explanatory variables.
When \emph{Z} includes a single variable, \eqn{w_i} is a simple kernel weighting function: \eqn{ w_i = K((z_i - z_0 )/(sd(z)*h)) }.
When \emph{Z} includes two variables (e.g., nonpar=~z1+z2), the method for specifying \emph{w} depends on the \emph{distance} option.
Under either option, the \emph{i}th row of the matrix \emph{Z} = (z1, z2) is transformed such
that \eqn{z_i = \sqrt{z_i * V * t(z_i)}.}{z_i = sqrt(z_i * V * t(z_i)).} Under the "Mahal" option, \emph{V} is the inverse of cov(\emph{Z}).
Under the \emph{"Euclid"} option, \emph{V} is the inverse of diag(cov(\emph{Z})).
After this transformation, the weights again reduce to the simple kernel weighting function \eqn{K((z_i - z_0 )/(sd(z)*h))}.
\emph{h} is specified by the \emph{bandwidth} or \emph{window} option.
The great circle formula is used to construct the distances used to form the weights when \emph{distance = "Latlong"};
in this case, the variable list for \emph{nonpar} must be listed as
\emph{nonpar = ~latitude+longitude} (or \emph{~lo+la} or \emph{~lat+long}, etc), with the longitude and latitude variables expressed in degrees
(e.g., -87.627800 and 41.881998 for one observation of longitude and latitude, respectively).
The order in which latitude and longitude are listed does not matter and the function only looks for the
first two letters to determine which variable is latitude and which is longitude.
It is important to note that the great circle distance measure is left in miles rather than being standardized.
Thus, the window option should be specified when \emph{distance = "Latlong"} or the bandwidth should be adjusted to account for the scale.
The kernel weighting function becomes \emph{K(distance/h)} under the \emph{"Latlong"} option.
Following White (1982), the covariance matrix for a quasi-maximum likelihood model is \eqn{A^{-1}BA^{-1} }, where
\deqn{A = \sum_{i=1}^n w_i \frac{\partial^2 LnL_i}{\partial \beta \partial \beta ^\prime} }{A = \sum w_i d^2LnL_i/d\beta d\beta' }
\deqn{B = \sum_{i=1}^n w_i^2 \frac{\partial LnL_i}{\partial \beta}\frac{\partial LnL_i}{\partial \beta ^\prime} }{B = \sum w_i^2 (dLnL_i/d\beta)(dLnL_i/d\beta') }
For the probit model,
\deqn{ A = \sum_{i=1}^n w_i P_i(1 - P_i) X_i X_i ^\prime }{ A = \sum w_i P_i(1 - P_i) X_i X_i' }
\deqn{ B = \sum_{i=1}^n w_i^2 (y_i - P_i)^2 X_i X_i ^\prime }{ B = \sum w_i^2 (y_i - P_i)^2 X_i X_i' }
The covariance matrix is calculated at all target points and the implied standard errors are then interpolated to each data point.
Available kernel weighting functions include the following:
\tabular{lll}{
Kernel \tab Call abbreviation \tab Kernel function K(z) \cr
Rectangular \tab ``rect'' \tab \eqn{\frac{1}{2} I(|z| <1)}{1/2 * I(|z|<1)} \cr
Triangular \tab ``tria'' \tab \eqn{(1-|z|)I(|z|<1)}{(1-|z|) * I(|z|<1)}\cr
Epanechnikov \tab ``epan'' \tab \eqn{\frac{3}{4} (1-z^2) * I(|z| <1)}{3/4 * (1-z^2)*I(|z| < 1)} \cr
Bi-Square \tab ``bisq'' \tab \eqn{\frac{15}{16} (1-z^2)^2 * I(|z| <1)}{15/16 * (1-z^2)^2 * I(|z| < 1)} \cr
Tri-Cube \tab ``tcub'' \tab \eqn{\frac{70}{81} (1-|z|^3)^3 * I(|z| <1)}{70/81 * (1-|z|^3)^3 * I(|z| < 1)} \cr
Tri-Weight \tab ``trwt'' \tab \eqn{\frac{35}{32} (1-z^2)^3 * I(|z| <1)}{35/32 * (1-z^2)^3 * I(|z| < 1)} \cr
Gaussian \tab ``gauss'' \tab \eqn{(2\pi)^{-.5} e^{-z^2/2}}{2pi^{-.5} exp(-z^2/2)} \cr }
}
\references{
Fan, Jianqing, Nancy E. Heckman, and M.P. Wand, "Local Polynomial Kernel Regression for Generalized Linear Models and Quasi-Likelihood Functions,"
\emph{Journal of the American Statistical Association} 90 (1995), 141-150.
Loader, Clive. \emph{Local Regression and Likelihood.} New York: Springer, 1999.
McMillen, Daniel P. and John F. McDonald, "Locally Weighted Maximum Likelihood Estimation: Monte Carlo Evidence and an Application,"
in Luc Anselin, Raymond J.G.M. Florax, and Sergio J. Rey, eds., \emph{Advances in Spatial Econometrics}, Springer-Verlag, New York (2004), 225-239.
Tibshirani, Robert and Trevor Hastie, "Local Likelihood Estimation," \emph{Journal of the American Statistical Association} 82 (1987), 559-568.
}
\seealso{
\link{cparlogit}
\link{cparmlogit}
\link{gmmlogit}
\link{gmmprobit}
\link{splogit}
\link{spprobit}
\link{spprobitml}
}
\examples{
set.seed(5647)
data(cookdata)
cookdata <- cookdata[!is.na(cookdata$AGE),]
n = nrow(cookdata)
cookdata$ystar <- cookdata$DCBD - .5*cookdata$AGE
cookdata$y <- cookdata$ystar - mean(cookdata$ystar) + rnorm(n,sd=4) > 0
tvect <- maketarget(~LONGITUDE+LATITUDE,window=.5,data=cookdata)$target
fit <- cparprobit(y~DCBD+AGE,~LONGITUDE+LATITUDE,window=.5,
target=tvect,distance="Latlong",data=cookdata,minp=0.001)
}
\keyword{Discrete Choice Models}
\keyword{Probit}
\keyword{Conditionally Parametric}
\keyword{Nonparametric}
|
name = "Denis"
nchar(name)
nchar("name")
a = as.Date("2014-06-28")
b = "2014-06-28"
class(a)
class(b) | /06 - Working with Dates.R | no_license | DenisOliveira1/course_r_programming_tutorial | R | false | false | 102 | r | name = "Denis"
nchar(name)
nchar("name")
a = as.Date("2014-06-28")
b = "2014-06-28"
class(a)
class(b) |
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(-1.72131968218895e+83, -7.88781071482505e+93, 1.0823131123826e-105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 1.12289452133682e-309))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615847791-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 724 | r | testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(-1.72131968218895e+83, -7.88781071482505e+93, 1.0823131123826e-105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 1.12289452133682e-309))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
# Exercise 1: plot 3.
# Load data in hpc from file.
load(file='data/hpc.data')
# Open png.
png(filename='plots/plot3.png')
# Build plot.
plot(hpc$datetime,
hpc$Sub_metering_1,
type = 'l',
col = 'black',
xlab = '',
ylab = 'Energy sub metering')
lines(hpc$datetime, hpc$Sub_metering_2, col = 'red')
lines(hpc$datetime, hpc$Sub_metering_3, col = 'blue')
legend('topright',
legend=c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'),
col=c('black', 'red', 'blue'),
lty='solid')
dev.off()
| /plot3.R | no_license | yamonk/ExData_Plotting1 | R | false | false | 542 | r | # Exercise 1: plot 3.
# Load data in hpc from file.
load(file='data/hpc.data')
# Open png.
png(filename='plots/plot3.png')
# Build plot.
plot(hpc$datetime,
hpc$Sub_metering_1,
type = 'l',
col = 'black',
xlab = '',
ylab = 'Energy sub metering')
lines(hpc$datetime, hpc$Sub_metering_2, col = 'red')
lines(hpc$datetime, hpc$Sub_metering_3, col = 'blue')
legend('topright',
legend=c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'),
col=c('black', 'red', 'blue'),
lty='solid')
dev.off()
|
## Last updated August 28, 2013
##With help from Matt Settles / Matt Pennell; Version of NEWfunction.R
##Modified May15,2013 by HEM to correct NCBI names, and add "_"
# Will have to install this package
#source("http://bioconductor.org/biocLite.R")
#biocLite("Biostrings")
library(Biostrings) # load package
#setwd("~/Dropbox/Hannah-Dave/SanJuans/HannahFINAL/2_SpeciesList/") # Navigate to the directory with PHLAWD output to be parsed
# This function will take the full alignment from the PHLAWD output and remove the NCBI ID,
# and keep only the longest unique sequences if there are multiple hits for a single species
parsePHLAWD <- function(fasta.file){
GBseqs <- readDNAStringSet(fasta.file) #read .aln.full
namesGB <- names(GBseqs) #get the full NCBI names
print(length(namesGB))
split <- strsplit(namesGB, split="|", fixed=TRUE) #split names
species.name <- sapply(split, "[", 2L) #get just the genus_species_var...
genus.name <- sapply(strsplit(species.name, split="_"), "[", 1L)
species.name2 <- sapply(strsplit(species.name, split="_"), "[", 2L)
combinedname <- paste(genus.name, species.name2, sep="_") #get just genus_species
sizes <- rowSums(alphabetFrequency(GBseqs)[,c("A","C","T","G")]) #get the nucleotide lenght of each sequence
ord <- order(combinedname, -sizes)
seqs <- GBseqs[ord] #order by lenght of sequence, longest first
namesGBord <- names(seqs) #get the full NCBI names in correct order
combinedname <- combinedname[ord]
ID <- duplicated(combinedname) # identify duplicated combined names
uniques <- seqs[!ID] #get only the unique sequences, choosing the longest since it is the first in the list
uniquesnames <- combinedname[!ID]
print(length(uniques))
file.name <- strsplit(fasta.file, split=".", fixed=TRUE)[[1]][[1]]
species_uniques <- uniques
names(species_uniques) <- uniquesnames
writeXStringSet(species_uniques, file=paste(file.name, "unique", sep=".", format="fasta"))
names(uniques) <- namesGBord[!ID] #full NCBI names
writeXStringSet(uniques, file=paste(file.name, "unique.GB", sep=".", format="fasta"))
#return(combinedname)
#names(uniques) <- paste(genus.name[!ID], species.name2[!ID], sep="") # to get without space, eg for the Lamiales project b/c Nancy's seqs were like this, match for Mafft
}
## To execute, run the above funtion, then call the file that you would like to parse. See the example atpB.FINAL.aln.full below:
#parsePHLAWD("atpB.FINAL.aln.full")
## Output: *unique.fasta == the alignment trimed to just the longest sequences, i.e. the unique seqs
## *unique.GB.fasta == same as the above, but with the ncbi info. and the species names
######## Fix names of files that were removed
#setwd("~/Documents/Idaho/Tank/Projects/SanJuans/FINAL/2b_Remove/")
parseREMOVED <- function(fasta.file){
rem <- readDNAStringSet(fasta.file) #read .aln.full
namesRem <- names(rem) #get the full NCBI names
print(length(namesRem)) #62
split <- strsplit(namesRem, split="|", fixed=TRUE) #split names
species.name <- sapply(split, "[", 2L) #get just the genus_species_var...
genus.name <- sapply(strsplit(species.name, split="_"), "[", 1L)
species.name2 <- sapply(strsplit(species.name, split="_"), "[", 2L)
combinedname <- paste(genus.name, species.name2, sep="_") #get just genus_species
names(rem) <- combinedname
file.name <- strsplit(fasta.file, split=".", fixed=TRUE)[[1]][[1]]
writeXStringSet(rem, file=paste(file.name, "unique.rem.name", sep=".", format="fasta"))
}
#parseREMOVED("atpB.unique.GB.fasta.rem") #62
parseALIGNMENT <- function(fasta.file){
GBseqs <- readDNAStringSet(fasta.file) #read .aln.full
namesGB <- names(GBseqs) #get the full NCBI names
print(length(namesGB))
combinedname <- namesGB #get just genus_species
file.name <- strsplit(fasta.file, split=".", fixed=TRUE)[[1]][[1]]
write.csv(combinedname, file=paste(file.name, "species", ".csv", sep="."))
return(combinedname)
#names(uniques) <- paste(genus.name[!ID], species.name2[!ID], sep="") # to get without space, eg for the Lamiales project b/c Nancy's seqs were like this, match for Mafft
}
#fasta.file <- "~/Dropbox/Work/FranceLab/FranceProjects/IslandComparaive/WeigletWG/8Archepleagos/231014/4_Concatenate/align.concat.8arch.241014.fst"
#extractID <- read.csv("~/Dropbox/Work/FranceLab/FranceProjects/Islandcomparaive/WeigletWG/8Archepleagos/ExtractID/output241014.txt")
parseALIGNMENT.Input.to.acceptedName <- function(fasta.file, extractID, file.name){
dim(extractID) #6477 = number of species used in include file == speices in island dataset
## Get just genus species for input names
split <- strsplit(as.character(extractID$input_name), split=" ", fixed=TRUE) #split names
species.name <- sapply(split, "[", 2L) #get just the genus_species_var...
species.name2 <- strsplit(species.name, split="-", fixed=TRUE)
species.name3 <- sapply(species.name2, "[", 1L) #get just the genus_species_var...
genus.name <- sapply(split, "[", 1L)
combinedname.input <- paste(genus.name, species.name3, sep=" ") #get just genus_species
extractID$input_name <- combinedname.input
#write.csv(extractID.uniques$input_name, file="TEST.csv")
extractID.uniques <- subset(extractID,!duplicated(extractID$input_name)) #remove duplicated input names
dim(extractID.uniques) #4637 4
GBseqs <- readDNAStringSet(fasta.file) #read concatenated alignmenzt
print(length(GBseqs)) # 4383 = number of species in alignment
#print(dim(combinedname[(which(combinedname %in% extractID$input_name))])) # check to make sure all the names in alighment map to GenBank ID
#i = 4316
matched.acceptedID.align <- DNAStringSet()
matched.accepted.align <- DNAStringSet()
matched.input.align <- DNAStringSet()
for (i in 1:length(GBseqs)){
namesGB <- names(GBseqs[i])
splitnamesGB <- strsplit(as.character(namesGB), split="_", fixed=TRUE) #split names
splitnamesGBspecies.name <- sapply(splitnamesGB, "[", 2L) #get just the genus_species_var...
splitnamesGBgenus.name <- sapply(splitnamesGB, "[", 1L)
combinedname <- paste(splitnamesGBgenus.name, splitnamesGBspecies.name, sep=" ") #get just genus_species
tmp <- extractID.uniques[extractID.uniques$input_name == combinedname,]
if (nrow(tmp) == 0){
print(paste(namesGB, "does not match PHLAWD includefile"))
matched.acceptedID.align <- c(matched.acceptedID.align)
matched.accepted.align <- c(matched.accepted.align)
} else {
acceptedID <- paste(as.character(tmp$accepted_name), tmp$ncbi_id, sep ="|" )
seq.acceptedID <- GBseqs[i]
names(seq.acceptedID) <- acceptedID
matched.acceptedID.align <- c(matched.acceptedID.align, seq.acceptedID)
accepted <- as.character(tmp$accepted_name)
seq.accepted <- GBseqs[i]
names(seq.accepted) <- accepted
matched.accepted.align <- c(matched.accepted.align, seq.accepted)
input <- as.character(tmp$input_name)
seq.input <- GBseqs[i]
names(seq.input) <- input
matched.input.align <- c(matched.input.align, seq.input)
}
}
matched.acceptedID.align #4377
matched.accepted.align #4377
matched.input.align #4377
writeXStringSet(matched.acceptedID.align, file=paste(file.name, "acceptedID", "fst", sep="."))
writeXStringSet(matched.accepted.align, file=paste(file.name, "accepted", "fst", sep="."))
writeXStringSet(matched.input.align, file=paste(file.name, "input", "fst", sep="."))
#names(uniques) <- paste(genus.name[!ID], species.name2[!ID], sep="") # to get without space, eg for the Lamiales project b/c Nancy's seqs were like this, match for Mafft
}
#parseALIGNMENT.Input.to.acceptedName(fasta.file, extractID, file.name="align.concat.8arch.241014")
#[1] "Picris_sp does not match PHLAWD includefile"
#[1] "Chenopodium_glaucum does not match PHLAWD includefile"
#[1] "Polypodium_polypodioides does not match PHLAWD includefile"
#[1] "Hedyotis_corymbosa does not match PHLAWD includefile"
#[1] "Polygonum_chinense does not match PHLAWD includefile"
#[1] "Phelipanche_purpurea does not match PHLAWD includefile"
| /R/functions/ParsePHLAWD.R | no_license | hmarx/Alpine-Sky-Islands | R | false | false | 8,224 | r | ## Last updated August 28, 2013
##With help from Matt Settles / Matt Pennell; Version of NEWfunction.R
##Modified May15,2013 by HEM to correct NCBI names, and add "_"
# Will have to install this package
#source("http://bioconductor.org/biocLite.R")
#biocLite("Biostrings")
library(Biostrings) # load package
#setwd("~/Dropbox/Hannah-Dave/SanJuans/HannahFINAL/2_SpeciesList/") # Navigate to the directory with PHLAWD output to be parsed
# This function will take the full alignment from the PHLAWD output and remove the NCBI ID,
# and keep only the longest unique sequences if there are multiple hits for a single species
parsePHLAWD <- function(fasta.file){
GBseqs <- readDNAStringSet(fasta.file) #read .aln.full
namesGB <- names(GBseqs) #get the full NCBI names
print(length(namesGB))
split <- strsplit(namesGB, split="|", fixed=TRUE) #split names
species.name <- sapply(split, "[", 2L) #get just the genus_species_var...
genus.name <- sapply(strsplit(species.name, split="_"), "[", 1L)
species.name2 <- sapply(strsplit(species.name, split="_"), "[", 2L)
combinedname <- paste(genus.name, species.name2, sep="_") #get just genus_species
sizes <- rowSums(alphabetFrequency(GBseqs)[,c("A","C","T","G")]) #get the nucleotide lenght of each sequence
ord <- order(combinedname, -sizes)
seqs <- GBseqs[ord] #order by lenght of sequence, longest first
namesGBord <- names(seqs) #get the full NCBI names in correct order
combinedname <- combinedname[ord]
ID <- duplicated(combinedname) # identify duplicated combined names
uniques <- seqs[!ID] #get only the unique sequences, choosing the longest since it is the first in the list
uniquesnames <- combinedname[!ID]
print(length(uniques))
file.name <- strsplit(fasta.file, split=".", fixed=TRUE)[[1]][[1]]
species_uniques <- uniques
names(species_uniques) <- uniquesnames
writeXStringSet(species_uniques, file=paste(file.name, "unique", sep=".", format="fasta"))
names(uniques) <- namesGBord[!ID] #full NCBI names
writeXStringSet(uniques, file=paste(file.name, "unique.GB", sep=".", format="fasta"))
#return(combinedname)
#names(uniques) <- paste(genus.name[!ID], species.name2[!ID], sep="") # to get without space, eg for the Lamiales project b/c Nancy's seqs were like this, match for Mafft
}
## To execute, run the above funtion, then call the file that you would like to parse. See the example atpB.FINAL.aln.full below:
#parsePHLAWD("atpB.FINAL.aln.full")
## Output: *unique.fasta == the alignment trimed to just the longest sequences, i.e. the unique seqs
## *unique.GB.fasta == same as the above, but with the ncbi info. and the species names
######## Fix names of files that were removed
#setwd("~/Documents/Idaho/Tank/Projects/SanJuans/FINAL/2b_Remove/")
parseREMOVED <- function(fasta.file){
rem <- readDNAStringSet(fasta.file) #read .aln.full
namesRem <- names(rem) #get the full NCBI names
print(length(namesRem)) #62
split <- strsplit(namesRem, split="|", fixed=TRUE) #split names
species.name <- sapply(split, "[", 2L) #get just the genus_species_var...
genus.name <- sapply(strsplit(species.name, split="_"), "[", 1L)
species.name2 <- sapply(strsplit(species.name, split="_"), "[", 2L)
combinedname <- paste(genus.name, species.name2, sep="_") #get just genus_species
names(rem) <- combinedname
file.name <- strsplit(fasta.file, split=".", fixed=TRUE)[[1]][[1]]
writeXStringSet(rem, file=paste(file.name, "unique.rem.name", sep=".", format="fasta"))
}
#parseREMOVED("atpB.unique.GB.fasta.rem") #62
parseALIGNMENT <- function(fasta.file){
GBseqs <- readDNAStringSet(fasta.file) #read .aln.full
namesGB <- names(GBseqs) #get the full NCBI names
print(length(namesGB))
combinedname <- namesGB #get just genus_species
file.name <- strsplit(fasta.file, split=".", fixed=TRUE)[[1]][[1]]
write.csv(combinedname, file=paste(file.name, "species", ".csv", sep="."))
return(combinedname)
#names(uniques) <- paste(genus.name[!ID], species.name2[!ID], sep="") # to get without space, eg for the Lamiales project b/c Nancy's seqs were like this, match for Mafft
}
#fasta.file <- "~/Dropbox/Work/FranceLab/FranceProjects/IslandComparaive/WeigletWG/8Archepleagos/231014/4_Concatenate/align.concat.8arch.241014.fst"
#extractID <- read.csv("~/Dropbox/Work/FranceLab/FranceProjects/Islandcomparaive/WeigletWG/8Archepleagos/ExtractID/output241014.txt")
parseALIGNMENT.Input.to.acceptedName <- function(fasta.file, extractID, file.name){
dim(extractID) #6477 = number of species used in include file == speices in island dataset
## Get just genus species for input names
split <- strsplit(as.character(extractID$input_name), split=" ", fixed=TRUE) #split names
species.name <- sapply(split, "[", 2L) #get just the genus_species_var...
species.name2 <- strsplit(species.name, split="-", fixed=TRUE)
species.name3 <- sapply(species.name2, "[", 1L) #get just the genus_species_var...
genus.name <- sapply(split, "[", 1L)
combinedname.input <- paste(genus.name, species.name3, sep=" ") #get just genus_species
extractID$input_name <- combinedname.input
#write.csv(extractID.uniques$input_name, file="TEST.csv")
extractID.uniques <- subset(extractID,!duplicated(extractID$input_name)) #remove duplicated input names
dim(extractID.uniques) #4637 4
GBseqs <- readDNAStringSet(fasta.file) #read concatenated alignmenzt
print(length(GBseqs)) # 4383 = number of species in alignment
#print(dim(combinedname[(which(combinedname %in% extractID$input_name))])) # check to make sure all the names in alighment map to GenBank ID
#i = 4316
matched.acceptedID.align <- DNAStringSet()
matched.accepted.align <- DNAStringSet()
matched.input.align <- DNAStringSet()
for (i in 1:length(GBseqs)){
namesGB <- names(GBseqs[i])
splitnamesGB <- strsplit(as.character(namesGB), split="_", fixed=TRUE) #split names
splitnamesGBspecies.name <- sapply(splitnamesGB, "[", 2L) #get just the genus_species_var...
splitnamesGBgenus.name <- sapply(splitnamesGB, "[", 1L)
combinedname <- paste(splitnamesGBgenus.name, splitnamesGBspecies.name, sep=" ") #get just genus_species
tmp <- extractID.uniques[extractID.uniques$input_name == combinedname,]
if (nrow(tmp) == 0){
print(paste(namesGB, "does not match PHLAWD includefile"))
matched.acceptedID.align <- c(matched.acceptedID.align)
matched.accepted.align <- c(matched.accepted.align)
} else {
acceptedID <- paste(as.character(tmp$accepted_name), tmp$ncbi_id, sep ="|" )
seq.acceptedID <- GBseqs[i]
names(seq.acceptedID) <- acceptedID
matched.acceptedID.align <- c(matched.acceptedID.align, seq.acceptedID)
accepted <- as.character(tmp$accepted_name)
seq.accepted <- GBseqs[i]
names(seq.accepted) <- accepted
matched.accepted.align <- c(matched.accepted.align, seq.accepted)
input <- as.character(tmp$input_name)
seq.input <- GBseqs[i]
names(seq.input) <- input
matched.input.align <- c(matched.input.align, seq.input)
}
}
matched.acceptedID.align #4377
matched.accepted.align #4377
matched.input.align #4377
writeXStringSet(matched.acceptedID.align, file=paste(file.name, "acceptedID", "fst", sep="."))
writeXStringSet(matched.accepted.align, file=paste(file.name, "accepted", "fst", sep="."))
writeXStringSet(matched.input.align, file=paste(file.name, "input", "fst", sep="."))
#names(uniques) <- paste(genus.name[!ID], species.name2[!ID], sep="") # to get without space, eg for the Lamiales project b/c Nancy's seqs were like this, match for Mafft
}
#parseALIGNMENT.Input.to.acceptedName(fasta.file, extractID, file.name="align.concat.8arch.241014")
#[1] "Picris_sp does not match PHLAWD includefile"
#[1] "Chenopodium_glaucum does not match PHLAWD includefile"
#[1] "Polypodium_polypodioides does not match PHLAWD includefile"
#[1] "Hedyotis_corymbosa does not match PHLAWD includefile"
#[1] "Polygonum_chinense does not match PHLAWD includefile"
#[1] "Phelipanche_purpurea does not match PHLAWD includefile"
|
#' Read Olympus Vanta, Panalytical XRF files
#'
#' The standard Olympus Vanta file presents all elemental concentrations in ppm, and
#' all errors as 1 standard deviation. The default Panalytical output format
#' specifies the unit for each measurement, and does not consider error. These
#' functions do their best to keep all available information in the output,
#' standardizing the columns xrf_info, date_time, and sample_id. Concentration
#' columns end in `conc`, standard deviation columns end in `sd`, and count
#' columns end in `Iraw` or `Inet`.
#'
#' @param path The location of the file
#' @param sample_id_col The column containing the sample identifier
#' @param tz Timezone of specified times
#'
#' @return A data.frame
#' @export
#'
#' @examples
#' read_olympus_vanta(system.file("xrf_files/olympus_vanta_test.csv", package = "paleoxrf"))
#' read_panalytical_txt(system.file("xrf_files/panalytical_test.txt", package = "paleoxrf"))
#'
read_olympus_vanta <- function(path, sample_id_col = "info", tz = "UTC") {
sample_id_col <- enquo(sample_id_col)
# read second line as column names
. <- NULL; rm(.) # CMD hack
oly_colnames <- readr::read_csv(
path,
skip = 1, n_max = 1, col_names = FALSE,
col_types = readr::cols(.default = readr::col_character())
) %>%
t() %>%
.[, 1, drop = TRUE] %>%
unname()
# replace last blank col name
oly_colnames[is.na(oly_colnames)] <- "no_col_name"
# read in csv
oly <- readr::read_csv(
path,
col_names = oly_colnames,
skip = 2,
col_types = readr::cols(
.default = readr::col_character(),
Date = readr::col_date(),
Time = readr::col_time(),
no_col_name = readr::col_skip()
)
)
oly$xrf_info <- "Olympus Vanta"
oly$date_time <- lubridate::force_tz(lubridate::as_datetime(oly$Date, tz = "UTC") + oly$Time, tz)
oly$sample_id <- dplyr::pull(oly, !!sample_id_col)
oly <- oly %>%
dplyr::mutate_at(dplyr::vars(ends_with("Concentration"), ends_with("Error 1s")), as.numeric) %>%
dplyr::select("xrf_info", "date_time", "sample_id", dplyr::everything())
# change suffixes on column names
colnames(oly) <- colnames(oly) %>%
stringr::str_replace("\\s*Concentration$", "_conc") %>%
stringr::str_replace("\\s*Error 1s", "_sd")
# return df
oly
}
#' @rdname read_olympus_vanta
#' @export
read_panalytical_txt <- function(path, sample_id_col = "Ident", tz = "UTC") {
sample_id_col <- enquo(sample_id_col)
col_names <- readr::read_tsv(
path,
col_types = readr::cols(.default = readr::col_character()),
col_names = FALSE,
skip = 0,
n_max = 2
) %>%
t() %>%
as.data.frame(stringsAsFactors = FALSE) %>%
tidyr::fill("V1", .direction = "down") %>%
purrr::transpose() %>%
purrr::map_chr(function(x) paste(stats::na.omit(unlist(x)), collapse = "_"))
# the last column is a blank one, not whatever the last element was
col_names[length(col_names)] <- "blank_column"
# this uses the column names we just generated to read the file
xrf_raw <- readr::read_tsv(
path,
col_names = col_names,
skip = 2,
col_types = readr::cols(
.default = readr::col_character(),
blank_column = readr::col_skip()
)
)
xrf_raw$xrf_info <- "Panalytical Epsilon 1"
xrf_raw$sample_id <- dplyr::pull(xrf_raw, !!sample_id_col)
xrf_raw$date_time <- lubridate::force_tz(lubridate::dmy_hms(xrf_raw$Time, tz = "UTC"), tz)
# tidy columns
xrf_raw <- xrf_raw %>%
dplyr::filter(!stringr::str_detect(.data$Seq, "Ave|SDev")) %>%
dplyr::mutate_at(dplyr::vars(ends_with("_C"), ends_with("_Iraw"), ends_with("_Inet")), as.numeric) %>%
dplyr::select("xrf_info", "date_time", "sample_id", dplyr::everything())
# change suffixes on column names
colnames(xrf_raw) <- colnames(xrf_raw) %>%
stringr::str_replace("_Unit$", "_unit") %>%
stringr::str_replace("_C$", "_conc")
xrf_raw
}
| /R/read_xrf.R | no_license | paleolimbot/paleoxrf | R | false | false | 3,916 | r |
#' Read Olympus Vanta, Panalytical XRF files
#'
#' The standard Olympus Vanta file presents all elemental concentrations in ppm, and
#' all errors as 1 standard deviation. The default Panalytical output format
#' specifies the unit for each measurement, and does not consider error. These
#' functions do their best to keep all available information in the output,
#' standardizing the columns xrf_info, date_time, and sample_id. Concentration
#' columns end in `conc`, standard deviation columns end in `sd`, and count
#' columns end in `Iraw` or `Inet`.
#'
#' @param path The location of the file
#' @param sample_id_col The column containing the sample identifier
#' @param tz Timezone of specified times
#'
#' @return A data.frame
#' @export
#'
#' @examples
#' read_olympus_vanta(system.file("xrf_files/olympus_vanta_test.csv", package = "paleoxrf"))
#' read_panalytical_txt(system.file("xrf_files/panalytical_test.txt", package = "paleoxrf"))
#'
read_olympus_vanta <- function(path, sample_id_col = "info", tz = "UTC") {
sample_id_col <- enquo(sample_id_col)
# read second line as column names
. <- NULL; rm(.) # CMD hack
oly_colnames <- readr::read_csv(
path,
skip = 1, n_max = 1, col_names = FALSE,
col_types = readr::cols(.default = readr::col_character())
) %>%
t() %>%
.[, 1, drop = TRUE] %>%
unname()
# replace last blank col name
oly_colnames[is.na(oly_colnames)] <- "no_col_name"
# read in csv
oly <- readr::read_csv(
path,
col_names = oly_colnames,
skip = 2,
col_types = readr::cols(
.default = readr::col_character(),
Date = readr::col_date(),
Time = readr::col_time(),
no_col_name = readr::col_skip()
)
)
oly$xrf_info <- "Olympus Vanta"
oly$date_time <- lubridate::force_tz(lubridate::as_datetime(oly$Date, tz = "UTC") + oly$Time, tz)
oly$sample_id <- dplyr::pull(oly, !!sample_id_col)
oly <- oly %>%
dplyr::mutate_at(dplyr::vars(ends_with("Concentration"), ends_with("Error 1s")), as.numeric) %>%
dplyr::select("xrf_info", "date_time", "sample_id", dplyr::everything())
# change suffixes on column names
colnames(oly) <- colnames(oly) %>%
stringr::str_replace("\\s*Concentration$", "_conc") %>%
stringr::str_replace("\\s*Error 1s", "_sd")
# return df
oly
}
#' @rdname read_olympus_vanta
#' @export
read_panalytical_txt <- function(path, sample_id_col = "Ident", tz = "UTC") {
sample_id_col <- enquo(sample_id_col)
col_names <- readr::read_tsv(
path,
col_types = readr::cols(.default = readr::col_character()),
col_names = FALSE,
skip = 0,
n_max = 2
) %>%
t() %>%
as.data.frame(stringsAsFactors = FALSE) %>%
tidyr::fill("V1", .direction = "down") %>%
purrr::transpose() %>%
purrr::map_chr(function(x) paste(stats::na.omit(unlist(x)), collapse = "_"))
# the last column is a blank one, not whatever the last element was
col_names[length(col_names)] <- "blank_column"
# this uses the column names we just generated to read the file
xrf_raw <- readr::read_tsv(
path,
col_names = col_names,
skip = 2,
col_types = readr::cols(
.default = readr::col_character(),
blank_column = readr::col_skip()
)
)
xrf_raw$xrf_info <- "Panalytical Epsilon 1"
xrf_raw$sample_id <- dplyr::pull(xrf_raw, !!sample_id_col)
xrf_raw$date_time <- lubridate::force_tz(lubridate::dmy_hms(xrf_raw$Time, tz = "UTC"), tz)
# tidy columns
xrf_raw <- xrf_raw %>%
dplyr::filter(!stringr::str_detect(.data$Seq, "Ave|SDev")) %>%
dplyr::mutate_at(dplyr::vars(ends_with("_C"), ends_with("_Iraw"), ends_with("_Inet")), as.numeric) %>%
dplyr::select("xrf_info", "date_time", "sample_id", dplyr::everything())
# change suffixes on column names
colnames(xrf_raw) <- colnames(xrf_raw) %>%
stringr::str_replace("_Unit$", "_unit") %>%
stringr::str_replace("_C$", "_conc")
xrf_raw
}
|
#' Basic virtual reference class
#'
#' Virtual ("template") Reference Class for all RCs
#'
#' This reference class contains fields (aka "attributes")
#' and methods (aka "procedures") for all basic RCs.
#'
#' @field package.name character. Name of package
#' @field object.name character. Name of object
#' @field verbose logical. Are methods verbose when called?
#'
#' #@import ncdf4
#' @importFrom methods new
#' @exportClass rcvirtual.basic
#'
setRefClass(
Class = "rcvirtual.basic",
contains = c("VIRTUAL"),
fields = list(
package.name = "character",
object.name = "character",
timestamp = "POSIXct",
verbose = "logical"
),
methods = list(
# ------------------------------------------------------
# Initializer methods ----------------------------------
# ------------------------------------------------------
initialize = function(package.name = "anonymous",
object.name = "anonymous",
verbose = TRUE,
autoconstruct = FALSE) {
"Default method to initialize basic objects"
.self$package.name <- package.name
.self$object.name <- object.name
.self$verbose <- verbose
.self$timestamp <- Sys.time()
if (autoconstruct) .self$construct()
},
construct = function() {
"Construct basic objects"
if (.self$verbose) {
cat("Constructing object", object.name,
"for package", .self$package.name, "\n")
}
},
# ------------------------------------------------------
# Set methods ------------------------------------------
# ------------------------------------------------------
set.verbose = function(verbose) {
"Changes verbosity level"
.self$verbose <- verbose
},
set.name = function(new.name){
"Sets the name of a random variable/vector."
q <- paste0("Changed the name of ",
class(.self),
", from '", .self$object.name,
"' to '", new.name,"'")
print(q, quote = FALSE)
.self$object.name <- new.name
},
# ------------------------------------------------------
# Get methods ------------------------------------------
# ------------------------------------------------------
get.name = function(){
"Provides the object's name."
q <- paste0("Name of ", class(.self), ": '",
.self$object.name, "'")
print(q, quote = FALSE)
},
get.rdata = function(fullpath) {
"Reads an .RData or .rda file and passes
its contents as a list"
load(fullpath)
nm <- objects()
nm <- nm[nm != "fullpath"]
out <- lapply(nm, FUN = function(x) eval(get(x)))
if (length(nm) == 1) {
out <- out[[1]]
} else {
names(out) <- nm
}
return(out)
},
get.txt = function(fullpath) {
"Reads a .txt file and passes
its contents as a dataframe"
out <- read.table(fullpath, header = TRUE)
return(out)
},
get.csv = function(fullpath, unlist = FALSE){
"General method to import comma separated value files"
mydt <- read.csv(
fullpath, header = TRUE, stringsAsFactors = FALSE
)
if (unlist) {
mydt <- if (is.list(mydt)) unlist(mydt)
}
return(mydt)
},
# get.netcdf = function(fullpath, var.name = NULL){
# "Retrieves data from any netcdf file on disk and
# returns a list"
#
# a <- ncdf4::nc_open(fullpath)
# if (is.null(var.name)) {
# vname <- a$var.names[1]
# } else {
# vname <- var.name
# }
# filedata <- vector("list", length = a$ndims + a$nvars)
# for (i in 1:a$ndims) {
# filedata[[i]] <- a$dim[[i]]$vals
# }
# for (j in 1:a$nvars) {
# i <- a$ndims + j
# myid <- names(a$var)[j]
# filedata[[i]] <- ncdf4::ncvar_get(a, varid = myid)
# }
# names(filedata) <- c(names(a$dim), names(a$var))
# return(filedata)
# },
get.args = function(function.name.pattern) {
'Lists the arguments in all the exclusive functions
that match the pattern provided'
em <- .self$methods()
fnames <- em$exclusive[grepl(function.name.pattern, em$exclusive)]
fargs <- lapply(fnames, FUN = function(fn) {
formalArgs(eval(parse(text = paste0('.self$', fn))))
})
names(fargs) <- fnames
return(fargs)
},
# ------------------------------------------------------
# Is methods -------------------------------------------
# ------------------------------------------------------
# ------------------------------------------------------
# User methods -----------------------------------------
# ------------------------------------------------------
fields = function() {
"Lists the fields available in this object"
get(class(.self)[1])$fields()
},
methods = function() {
"Lists the methods available in this object"
r5methods <- c("callSuper", "copy",
"export", "field", "getClass",
"getRefClass", "import", "initFields",
"show", "trace", "untrace",
"usingMethods", ".objectPackage",
".objectParent")
all.methods <- get(class(.self)[1])$methods()
sub.crit <- mapply(all.methods, FUN = function(x){
all(x != r5methods)
})
sub.methods <- all.methods[sub.crit]
up.crit <- mapply(sub.methods, FUN = function(x){
grepl("#", x)
})
up.methods <- sub.methods[up.crit]
my.methods <- sub.methods[!up.crit]
out <- list(exclusive = my.methods,
inherited = up.methods,
general = r5methods)
return(out)
},
help = function(method = .self$methods()) {
"Prints the description under a specific method"
get(class(.self)[1])$help(as.character(method))
},
validate = function() {
"Validate basic objects"
if (.self$verbose) {
cat("Validating object", .self$object.name, "\n")
}
}
)
)
| /R/v-basic.R | permissive | rtlemos/rcvirtual | R | false | false | 6,206 | r | #' Basic virtual reference class
#'
#' Virtual ("template") Reference Class for all RCs
#'
#' This reference class contains fields (aka "attributes")
#' and methods (aka "procedures") for all basic RCs.
#'
#' @field package.name character. Name of package
#' @field object.name character. Name of object
#' @field verbose logical. Are methods verbose when called?
#'
#' #@import ncdf4
#' @importFrom methods new
#' @exportClass rcvirtual.basic
#'
setRefClass(
Class = "rcvirtual.basic",
contains = c("VIRTUAL"),
fields = list(
package.name = "character",
object.name = "character",
timestamp = "POSIXct",
verbose = "logical"
),
methods = list(
# ------------------------------------------------------
# Initializer methods ----------------------------------
# ------------------------------------------------------
initialize = function(package.name = "anonymous",
object.name = "anonymous",
verbose = TRUE,
autoconstruct = FALSE) {
"Default method to initialize basic objects"
.self$package.name <- package.name
.self$object.name <- object.name
.self$verbose <- verbose
.self$timestamp <- Sys.time()
if (autoconstruct) .self$construct()
},
construct = function() {
"Construct basic objects"
if (.self$verbose) {
cat("Constructing object", object.name,
"for package", .self$package.name, "\n")
}
},
# ------------------------------------------------------
# Set methods ------------------------------------------
# ------------------------------------------------------
set.verbose = function(verbose) {
"Changes verbosity level"
.self$verbose <- verbose
},
set.name = function(new.name){
"Sets the name of a random variable/vector."
q <- paste0("Changed the name of ",
class(.self),
", from '", .self$object.name,
"' to '", new.name,"'")
print(q, quote = FALSE)
.self$object.name <- new.name
},
# ------------------------------------------------------
# Get methods ------------------------------------------
# ------------------------------------------------------
get.name = function(){
"Provides the object's name."
q <- paste0("Name of ", class(.self), ": '",
.self$object.name, "'")
print(q, quote = FALSE)
},
get.rdata = function(fullpath) {
"Reads an .RData or .rda file and passes
its contents as a list"
load(fullpath)
nm <- objects()
nm <- nm[nm != "fullpath"]
out <- lapply(nm, FUN = function(x) eval(get(x)))
if (length(nm) == 1) {
out <- out[[1]]
} else {
names(out) <- nm
}
return(out)
},
get.txt = function(fullpath) {
"Reads a .txt file and passes
its contents as a dataframe"
out <- read.table(fullpath, header = TRUE)
return(out)
},
get.csv = function(fullpath, unlist = FALSE){
"General method to import comma separated value files"
mydt <- read.csv(
fullpath, header = TRUE, stringsAsFactors = FALSE
)
if (unlist) {
mydt <- if (is.list(mydt)) unlist(mydt)
}
return(mydt)
},
# get.netcdf = function(fullpath, var.name = NULL){
# "Retrieves data from any netcdf file on disk and
# returns a list"
#
# a <- ncdf4::nc_open(fullpath)
# if (is.null(var.name)) {
# vname <- a$var.names[1]
# } else {
# vname <- var.name
# }
# filedata <- vector("list", length = a$ndims + a$nvars)
# for (i in 1:a$ndims) {
# filedata[[i]] <- a$dim[[i]]$vals
# }
# for (j in 1:a$nvars) {
# i <- a$ndims + j
# myid <- names(a$var)[j]
# filedata[[i]] <- ncdf4::ncvar_get(a, varid = myid)
# }
# names(filedata) <- c(names(a$dim), names(a$var))
# return(filedata)
# },
get.args = function(function.name.pattern) {
'Lists the arguments in all the exclusive functions
that match the pattern provided'
em <- .self$methods()
fnames <- em$exclusive[grepl(function.name.pattern, em$exclusive)]
fargs <- lapply(fnames, FUN = function(fn) {
formalArgs(eval(parse(text = paste0('.self$', fn))))
})
names(fargs) <- fnames
return(fargs)
},
# ------------------------------------------------------
# Is methods -------------------------------------------
# ------------------------------------------------------
# ------------------------------------------------------
# User methods -----------------------------------------
# ------------------------------------------------------
fields = function() {
"Lists the fields available in this object"
get(class(.self)[1])$fields()
},
methods = function() {
"Lists the methods available in this object"
r5methods <- c("callSuper", "copy",
"export", "field", "getClass",
"getRefClass", "import", "initFields",
"show", "trace", "untrace",
"usingMethods", ".objectPackage",
".objectParent")
all.methods <- get(class(.self)[1])$methods()
sub.crit <- mapply(all.methods, FUN = function(x){
all(x != r5methods)
})
sub.methods <- all.methods[sub.crit]
up.crit <- mapply(sub.methods, FUN = function(x){
grepl("#", x)
})
up.methods <- sub.methods[up.crit]
my.methods <- sub.methods[!up.crit]
out <- list(exclusive = my.methods,
inherited = up.methods,
general = r5methods)
return(out)
},
help = function(method = .self$methods()) {
"Prints the description under a specific method"
get(class(.self)[1])$help(as.character(method))
},
validate = function() {
"Validate basic objects"
if (.self$verbose) {
cat("Validating object", .self$object.name, "\n")
}
}
)
)
|
library(gRapfa)
### Name: apfa2NS
### Title: APFA to node symbol array
### Aliases: apfa2NS
### ** Examples
library(gRapfa)
data(Wheeze)
G <- st(Wheeze)
G.c <- contract.last.level(G)
ns.array <- apfa2NS(G.c)
| /data/genthat_extracted_code/gRapfa/examples/apfa2NS.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 215 | r | library(gRapfa)
### Name: apfa2NS
### Title: APFA to node symbol array
### Aliases: apfa2NS
### ** Examples
library(gRapfa)
data(Wheeze)
G <- st(Wheeze)
G.c <- contract.last.level(G)
ns.array <- apfa2NS(G.c)
|
# Read WhatsApp Export ----------------------------------------------------
# Contact, DateTime, MessageType, Message, MessageLength
# pacman::p_install_gh("JBGruber/rwhatsapp")
pacman::p_load(tidyverse, rwhatsapp, lubridate)
# Import ------------------------------------------------------------------
d_raw <- rwa_read("C:/Users/kputs/Downloads/WhatsApp Chat with Emily Kay Piellusch.txt")
d_prep <-
d_raw %>%
filter(!is.na(author)) %>%
mutate(Contact = author %>% unique() %>% setdiff("Kevin")) %>%
mutate(MessageType = ifelse(author == "Kevin", "Sent", "Received")) %>%
mutate(Message = str_replace(text, "<Media omitted>", ".")) %>%
mutate(MessageLength = str_length(Message)) %>%
select(Contact, DateTime = time, MessageType, Message, MessageLength, emoji_name, author) %>%
print()
d_prep %>% filter(date(DateTime) == "2019-8-09")
my_fx <- fx_sms_import("C:/Users/kputs/Downloads/WhatsApp Chat with Emily Kay Piellusch.txt")
my_fx %>% filter(date(DateTime) == "2019-8-09") %>% view()
# Export ------------------------------------------------------------------
export_label <-
d_prep %>%
summarise(max_date = DateTime %>% max() %>% date()) %>%
pull()
d_export <-
d_prep %>%
select(-emoji_name, -author)
d_export %>% write_rds(str_glue("data/new/wa_{export_label}.rds"))
# Visuals -----------------------------------------------------------------
d_prep %>%
ggplot(aes(x = DateTime, fill = MessageType)) +
geom_density(alpha = 0.50)
d_prep %>%
group_by(day = date(DateTime), MessageType) %>%
summarise(length_sum = sum(MessageLength)) %>%
ggplot(aes(x = day, y = length_sum, fill = MessageType)) +
geom_col(position = "dodge")
d_prep %>%
select(author, emoji_name) %>%
unnest(emoji_name) %>%
count(author, emoji_name, sort = TRUE)
# Testing! ----------------------------------------------------------------
# Testing Text Import -----------------------------------------------------
# fruits <- "apples and oranges and pears and bananas\npineapples and mangos and guavas"
#
# fruits %>%
# str_split("\n") %>%
# map_df(enframe, name = "id", value = "message")
#
# spotify_text <- "Here’s a song for you… Rebel Rebel by Seu Jorge https://open.spotify.com/track/5mZYRyOPWVlTtPGWHJCbAL?si=vbIw1Ty1SBqrH-nu3hTmVA\nnewline"
# spotify_text <- "https://open.spotify.com/episode/6Lt33QIVpvBk9fpHKrRJ91?s\nalala"
# spotify_regex <- "https://open.spotify.*\n"
#
# str_view(spotify_text, spotify_regex)
# str_replace(spotify_text, spotify_regex, "")
# Test JBGruber's Package -------------------------------------------------
# test_rwa <- rwa_read("C:/Users/kputs/Downloads/WhatsApp Chat with Emily Kay Piellusch.txt")
# test_rwa %>% view()
# Read Text File ----------------------------------------------------------
# raw <- read_file("C:/Users/kputs/Downloads/WhatsApp Chat with Emily Kay Piellusch.txt")
#
# table <-
# raw %>%
# str_replace_all("https://open.spotify.com*\n", "<spotify>\n") %>%
# str_replace_all("https://www.reddit.com.*\n", "<reddit>\n") %>%
# str_replace_all("\n\n", "<newline>") %>%
# str_split("\n") %>%
# map_df(enframe, name = "id", value = "text")
#
# table %>% View()
#
# tidy <-
# table %>%
# # slice(1:10) %>%
# slice(710:715) %>%
# separate(text, c("dt", "message"), sep = " - ", extra = "merge") %>% print()
# mutate(seconds = str_pad(id, pad = "0", width = 2, side = "left")) %>%
# mutate(dts = str_c(dt, seconds, sep = ":")) %>%
# mutate(DateTime = parse_datetime(dts, format = "%D, %T")) %>%
# select(DateTime, message) %>%
# filter(!str_detect(message, "Messages to this chat and calls")) %>%
# separate(message, c("raw_contact", "Message"), sep = ":", extra = "merge") %>%
# mutate(Message = str_trim(Message, side = "both")) %>%
# mutate(Contact = raw_contact %>% unique() %>% setdiff("Kevin")) %>%
# mutate(MessageType = ifelse(raw_contact == "Kevin", "Sent", "Received")) %>%
# mutate(Message = str_replace(Message, "<Media omitted>", ".")) %>%
# mutate(MessageLength = str_length(Message)) %>%
# select(Contact, DateTime, MessageType, Message, MessageLength) %>%
# print()
#
# view(tidy, name = "tidy")
| /r/test - import_whatsapp.R | permissive | kputschko/kp_messages | R | false | false | 4,162 | r |
# Read WhatsApp Export ----------------------------------------------------
# Contact, DateTime, MessageType, Message, MessageLength
# pacman::p_install_gh("JBGruber/rwhatsapp")
pacman::p_load(tidyverse, rwhatsapp, lubridate)
# Import ------------------------------------------------------------------
d_raw <- rwa_read("C:/Users/kputs/Downloads/WhatsApp Chat with Emily Kay Piellusch.txt")
d_prep <-
d_raw %>%
filter(!is.na(author)) %>%
mutate(Contact = author %>% unique() %>% setdiff("Kevin")) %>%
mutate(MessageType = ifelse(author == "Kevin", "Sent", "Received")) %>%
mutate(Message = str_replace(text, "<Media omitted>", ".")) %>%
mutate(MessageLength = str_length(Message)) %>%
select(Contact, DateTime = time, MessageType, Message, MessageLength, emoji_name, author) %>%
print()
d_prep %>% filter(date(DateTime) == "2019-8-09")
my_fx <- fx_sms_import("C:/Users/kputs/Downloads/WhatsApp Chat with Emily Kay Piellusch.txt")
my_fx %>% filter(date(DateTime) == "2019-8-09") %>% view()
# Export ------------------------------------------------------------------
export_label <-
d_prep %>%
summarise(max_date = DateTime %>% max() %>% date()) %>%
pull()
d_export <-
d_prep %>%
select(-emoji_name, -author)
d_export %>% write_rds(str_glue("data/new/wa_{export_label}.rds"))
# Visuals -----------------------------------------------------------------
d_prep %>%
ggplot(aes(x = DateTime, fill = MessageType)) +
geom_density(alpha = 0.50)
d_prep %>%
group_by(day = date(DateTime), MessageType) %>%
summarise(length_sum = sum(MessageLength)) %>%
ggplot(aes(x = day, y = length_sum, fill = MessageType)) +
geom_col(position = "dodge")
d_prep %>%
select(author, emoji_name) %>%
unnest(emoji_name) %>%
count(author, emoji_name, sort = TRUE)
# Testing! ----------------------------------------------------------------
# Testing Text Import -----------------------------------------------------
# fruits <- "apples and oranges and pears and bananas\npineapples and mangos and guavas"
#
# fruits %>%
# str_split("\n") %>%
# map_df(enframe, name = "id", value = "message")
#
# spotify_text <- "Here’s a song for you… Rebel Rebel by Seu Jorge https://open.spotify.com/track/5mZYRyOPWVlTtPGWHJCbAL?si=vbIw1Ty1SBqrH-nu3hTmVA\nnewline"
# spotify_text <- "https://open.spotify.com/episode/6Lt33QIVpvBk9fpHKrRJ91?s\nalala"
# spotify_regex <- "https://open.spotify.*\n"
#
# str_view(spotify_text, spotify_regex)
# str_replace(spotify_text, spotify_regex, "")
# Test JBGruber's Package -------------------------------------------------
# test_rwa <- rwa_read("C:/Users/kputs/Downloads/WhatsApp Chat with Emily Kay Piellusch.txt")
# test_rwa %>% view()
# Read Text File ----------------------------------------------------------
# raw <- read_file("C:/Users/kputs/Downloads/WhatsApp Chat with Emily Kay Piellusch.txt")
#
# table <-
# raw %>%
# str_replace_all("https://open.spotify.com*\n", "<spotify>\n") %>%
# str_replace_all("https://www.reddit.com.*\n", "<reddit>\n") %>%
# str_replace_all("\n\n", "<newline>") %>%
# str_split("\n") %>%
# map_df(enframe, name = "id", value = "text")
#
# table %>% View()
#
# tidy <-
# table %>%
# # slice(1:10) %>%
# slice(710:715) %>%
# separate(text, c("dt", "message"), sep = " - ", extra = "merge") %>% print()
# mutate(seconds = str_pad(id, pad = "0", width = 2, side = "left")) %>%
# mutate(dts = str_c(dt, seconds, sep = ":")) %>%
# mutate(DateTime = parse_datetime(dts, format = "%D, %T")) %>%
# select(DateTime, message) %>%
# filter(!str_detect(message, "Messages to this chat and calls")) %>%
# separate(message, c("raw_contact", "Message"), sep = ":", extra = "merge") %>%
# mutate(Message = str_trim(Message, side = "both")) %>%
# mutate(Contact = raw_contact %>% unique() %>% setdiff("Kevin")) %>%
# mutate(MessageType = ifelse(raw_contact == "Kevin", "Sent", "Received")) %>%
# mutate(Message = str_replace(Message, "<Media omitted>", ".")) %>%
# mutate(MessageLength = str_length(Message)) %>%
# select(Contact, DateTime, MessageType, Message, MessageLength) %>%
# print()
#
# view(tidy, name = "tidy")
|
data <- read.table("household_power_consumption.txt",
sep=";", skip=66637, nrows=2880, na.strings="?")
names(data) <- read.table("household_power_consumption.txt",
sep=";", nrows=1, as.is= TRUE)
data$DateTime = strptime(paste(data$Date,data$Time), format= "%d/%m/%Y %H:%M:%S")
png(file = "plot4.png")
par(mfrow = c(2, 2))
plot(data$DateTime, data$Global_active_power, type="l",
ylab="Global Active Power", xlab="")
plot(data$DateTime, data$Voltage, type="l",
ylab="Voltage", xlab="datetime")
plot(data$DateTime, data$Sub_metering_1, type="n",
ylab="Energy sub metering", xlab="")
lines(data$DateTime, data$Sub_metering_1, col="black")
lines(data$DateTime, data$Sub_metering_2, col="red")
lines(data$DateTime, data$Sub_metering_3, col="blue")
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(data$DateTime, data$Global_reactive_power, type="l",
ylab="Global_reactive_Power", xlab="datetime")
dev.off() | /plot4.R | no_license | Alex-Mishin/ExData_Plotting1 | R | false | false | 1,062 | r | data <- read.table("household_power_consumption.txt",
sep=";", skip=66637, nrows=2880, na.strings="?")
names(data) <- read.table("household_power_consumption.txt",
sep=";", nrows=1, as.is= TRUE)
data$DateTime = strptime(paste(data$Date,data$Time), format= "%d/%m/%Y %H:%M:%S")
png(file = "plot4.png")
par(mfrow = c(2, 2))
plot(data$DateTime, data$Global_active_power, type="l",
ylab="Global Active Power", xlab="")
plot(data$DateTime, data$Voltage, type="l",
ylab="Voltage", xlab="datetime")
plot(data$DateTime, data$Sub_metering_1, type="n",
ylab="Energy sub metering", xlab="")
lines(data$DateTime, data$Sub_metering_1, col="black")
lines(data$DateTime, data$Sub_metering_2, col="red")
lines(data$DateTime, data$Sub_metering_3, col="blue")
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(data$DateTime, data$Global_reactive_power, type="l",
ylab="Global_reactive_Power", xlab="datetime")
dev.off() |
##################################################################
## Step 4: Unbias the lengths ##
##################################################################
source("R/common.R")
# Caller
step <- "Step 4: Unbias the lengths"
# Purpose
explanations <- "Models reproduce data biases.
We don't want our model to only learn to make long sentences.
Short sentences should also be corrected.
- From the long sentences, we generate sentences of all sizes
- Whether to tokenize or replace the sentences is a parameter
# that means that a sentence can either serve to generate sentences of various sizes
# (used multiple times, but different length)
# or be used only one time for an specific size
# (used one time)
- Save to disk
"
print(banner(step))
print(boxup(explanations, centre = F))
# LIBRARIES ------
suppressMessages(library(furrr))
suppressMessages(library(purrr))
suppressMessages(library(future))
suppressMessages(library(dplyr))
suppressMessages(library(tokenizers))
suppressMessages(library(disk.frame))
suppressMessages(library(progress))
#setup_disk.frame(workers = 2)
# HYPER ------
hyper1 <- '
# This script tries to reduce the bias towards large sentences.
# if keep_size = "constant": 1 subset of each sentence will be used as the Truth sentence
# its size will vary between one word and all the words (sampled)
# if keep_size= "multiply": a sentence with n words will have n subsets (one with one word,
# another with 2 words, ..., until the maximum number of words in the dataset)'
#* Define ---
keep_size <- "multiply" # constant or multiply or no_touched
#* Print to console ---
print(boxup(hyper1, centre = F))
print(sprintf("Keep_size set to: %s", keep_size))
# FILES ------
info(logger, "Loading data...")
# last_file<-last_version("data_working/", pattern = ".feather",overall_pattern = "ML_formatted")
# # READ FEATHER ------
# df<- feather::read_feather(last_file)
# # TO RM
# df<- df[1:2000000,]
df.frame <- disk.frame("data_working/3.5_ML_formatted")
print(sprintf("Number of chunks: %s", nchunks(df.frame)))
## ---------------------------------------------------------------
## Sample ngrams of diff size from sentences -
## ---------------------------------------------------------------
info(logger, "WORD COUNT PER SENTENCE")
# WORD COUNT PER SENTENCE ------
print("# sentence per word count")
freq <- cmap(df.frame, ~ count_words(.x[, "Truth"]$Truth), lazy = FALSE) %>% unlist()
table(freq) %>% print()
n_sen <- nrow(df.frame)
print(sprintf("Number of sentences: %s", n_sen))
max_word <- max(freq)
rm(freq)
# FN
unbias_by_chunk<- function(sentences, ids,...) {
#on.exit({rm(list = ls(1),pos=1);rm(list=ls());unlink(paste0(normalizePath(tempdir()), "/", dir(tempdir())), recursive = TRUE);gc()})
#sentences <- chunk[, "Truth"]$Truth # chunk or batch
#ids <- chunk[, "id"]$id # chunk or batch
# iterate over chunk
new_sentences_df <- map2_dfr(sentences, ids, function(x, y) {
max_word <- tokenizers::count_words(x)
df <- data.frame("ngram" = tokenizers::tokenize_ngrams(x, n_min = 1, n = max_word, simplify = TRUE))
df <- df %>% mutate(count = tokenizers::count_words(ngram))
df <- df %>%
group_by(count) %>%
tidyr::nest(data = c(ngram))
df <- df %>%
mutate("one" = map_chr(data, ~ sample(.[[1]], 1))) %>%
ungroup()
df$id <- y
df <- df %>% select(id, "Truth" = one)
return(df)
})
return(new_sentences_df)
}
# SAMPLE DIFFERENT SIZES ------
info(logger, "Sampling sentences...")
if (keep_size == "constant") {
# KEEP SIZE == "Constant" ------
info(logger, "Constant strategy selected")
df.frame_caller <- cmap(
df.frame,
function(chunk) {
sentences <- chunk[, "Truth"]$Truth # chunk or batch
# iterate over chunk
new_sentences <- map_chr(sentences, function(x) {
x <- tokenizers::tokenize_ngrams(x, n_min = 1, n = 16, simplify = T)
return(sample(x, 1))
})
new_sentences <- as_tibble(list(
"id" = chunk[, "id"]$id,
"Truth" = new_sentences
))
return(new_sentences)
}
)
df.frame_caller %>% compute(outdir = "data_working/4_unbias_constant", overwrite = TRUE)
state <- 1 # No problem
} else if (keep_size == "multiply") {
# KEEP SIZE == "Multiply" ------
info(logger, "Multiply strategy selected")
outdir <- "data_working/4_unbias_multiply/"
overwrite_check(outdir,overwrite = TRUE)
files <- list.files("data_working/3.5_ML_formatted/", full.names = TRUE)
files_shortname <- list.files("data_working/3.5_ML_formatted/", full.names = FALSE)
cid = get_chunk_ids(df.frame, full.names = TRUE)
pb<-progress::progress_bar$new(total = length(cid), force = T)
for(ii in seq_along(cid)){
cl<-parallelly::makeClusterPSOCK(workers = 11)
cl<- parallelly::autoStopCluster(cl)
future::plan(future::cluster, workers = cl)
ds = disk.frame::get_chunk(df.frame, cid[ii], full.names = TRUE)
res<-furrr::future_pmap_dfr(ds,~unbias_by_chunk(sentences = ..2, ids = ..1)) # careful
fst::write_fst(res, file.path(outdir, files_shortname[ii]))
pb$tick()
rm(list = "cl")
gc()
}
state <- 1 # No problem
} else if (keep_size == "no_touched") {
# KEEP SIZE == "no_touched" ------
info(logger, "'Leave as is' strategy selected")
state <- 1 # No problem
} else {
# wrong KEEP SIZE ------
message("keep_size method not defined")
state <- 0 # problem
}
## ---------------------------------------------------------------
## Write to disk -
## ---------------------------------------------------------------
# if (state > 0) {
# info(logger, "Writing to disk")
# # Write feather ---
# #* feather
# filename <- get_versioned_file_name("data_working/", paste("4_Unbiased", keep_size, sep = "_"), file_suffix = ".feather")
# write_feather(df, path = filename)
# }
| /R/4_unbias_length.R | no_license | camilodlt/ML_Gutenberg | R | false | false | 6,084 | r | ##################################################################
## Step 4: Unbias the lengths ##
##################################################################
source("R/common.R")
# Caller
step <- "Step 4: Unbias the lengths"
# Purpose
explanations <- "Models reproduce data biases.
We don't want our model to only learn to make long sentences.
Short sentences should also be corrected.
- From the long sentences, we generate sentences of all sizes
- Whether to tokenize or replace the sentences is a parameter
# that means that a sentence can either serve to generate sentences of various sizes
# (used multiple times, but different length)
# or be used only one time for an specific size
# (used one time)
- Save to disk
"
print(banner(step))
print(boxup(explanations, centre = F))
# LIBRARIES ------
suppressMessages(library(furrr))
suppressMessages(library(purrr))
suppressMessages(library(future))
suppressMessages(library(dplyr))
suppressMessages(library(tokenizers))
suppressMessages(library(disk.frame))
suppressMessages(library(progress))
#setup_disk.frame(workers = 2)
# HYPER ------
hyper1 <- '
# This script tries to reduce the bias towards large sentences.
# if keep_size = "constant": 1 subset of each sentence will be used as the Truth sentence
# its size will vary between one word and all the words (sampled)
# if keep_size= "multiply": a sentence with n words will have n subsets (one with one word,
# another with 2 words, ..., until the maximum number of words in the dataset)'
#* Define ---
keep_size <- "multiply" # constant or multiply or no_touched
#* Print to console ---
print(boxup(hyper1, centre = F))
print(sprintf("Keep_size set to: %s", keep_size))
# FILES ------
info(logger, "Loading data...")
# last_file<-last_version("data_working/", pattern = ".feather",overall_pattern = "ML_formatted")
# # READ FEATHER ------
# df<- feather::read_feather(last_file)
# # TO RM
# df<- df[1:2000000,]
df.frame <- disk.frame("data_working/3.5_ML_formatted")
print(sprintf("Number of chunks: %s", nchunks(df.frame)))
## ---------------------------------------------------------------
## Sample ngrams of diff size from sentences -
## ---------------------------------------------------------------
info(logger, "WORD COUNT PER SENTENCE")
# WORD COUNT PER SENTENCE ------
print("# sentence per word count")
freq <- cmap(df.frame, ~ count_words(.x[, "Truth"]$Truth), lazy = FALSE) %>% unlist()
table(freq) %>% print()
n_sen <- nrow(df.frame)
print(sprintf("Number of sentences: %s", n_sen))
max_word <- max(freq)
rm(freq)
# FN
unbias_by_chunk<- function(sentences, ids,...) {
#on.exit({rm(list = ls(1),pos=1);rm(list=ls());unlink(paste0(normalizePath(tempdir()), "/", dir(tempdir())), recursive = TRUE);gc()})
#sentences <- chunk[, "Truth"]$Truth # chunk or batch
#ids <- chunk[, "id"]$id # chunk or batch
# iterate over chunk
new_sentences_df <- map2_dfr(sentences, ids, function(x, y) {
max_word <- tokenizers::count_words(x)
df <- data.frame("ngram" = tokenizers::tokenize_ngrams(x, n_min = 1, n = max_word, simplify = TRUE))
df <- df %>% mutate(count = tokenizers::count_words(ngram))
df <- df %>%
group_by(count) %>%
tidyr::nest(data = c(ngram))
df <- df %>%
mutate("one" = map_chr(data, ~ sample(.[[1]], 1))) %>%
ungroup()
df$id <- y
df <- df %>% select(id, "Truth" = one)
return(df)
})
return(new_sentences_df)
}
# SAMPLE DIFFERENT SIZES ------
info(logger, "Sampling sentences...")
if (keep_size == "constant") {
# KEEP SIZE == "Constant" ------
info(logger, "Constant strategy selected")
df.frame_caller <- cmap(
df.frame,
function(chunk) {
sentences <- chunk[, "Truth"]$Truth # chunk or batch
# iterate over chunk
new_sentences <- map_chr(sentences, function(x) {
x <- tokenizers::tokenize_ngrams(x, n_min = 1, n = 16, simplify = T)
return(sample(x, 1))
})
new_sentences <- as_tibble(list(
"id" = chunk[, "id"]$id,
"Truth" = new_sentences
))
return(new_sentences)
}
)
df.frame_caller %>% compute(outdir = "data_working/4_unbias_constant", overwrite = TRUE)
state <- 1 # No problem
} else if (keep_size == "multiply") {
# KEEP SIZE == "Multiply" ------
info(logger, "Multiply strategy selected")
outdir <- "data_working/4_unbias_multiply/"
overwrite_check(outdir,overwrite = TRUE)
files <- list.files("data_working/3.5_ML_formatted/", full.names = TRUE)
files_shortname <- list.files("data_working/3.5_ML_formatted/", full.names = FALSE)
cid = get_chunk_ids(df.frame, full.names = TRUE)
pb<-progress::progress_bar$new(total = length(cid), force = T)
for(ii in seq_along(cid)){
cl<-parallelly::makeClusterPSOCK(workers = 11)
cl<- parallelly::autoStopCluster(cl)
future::plan(future::cluster, workers = cl)
ds = disk.frame::get_chunk(df.frame, cid[ii], full.names = TRUE)
res<-furrr::future_pmap_dfr(ds,~unbias_by_chunk(sentences = ..2, ids = ..1)) # careful
fst::write_fst(res, file.path(outdir, files_shortname[ii]))
pb$tick()
rm(list = "cl")
gc()
}
state <- 1 # No problem
} else if (keep_size == "no_touched") {
# KEEP SIZE == "no_touched" ------
info(logger, "'Leave as is' strategy selected")
state <- 1 # No problem
} else {
# wrong KEEP SIZE ------
message("keep_size method not defined")
state <- 0 # problem
}
## ---------------------------------------------------------------
## Write to disk -
## ---------------------------------------------------------------
# if (state > 0) {
# info(logger, "Writing to disk")
# # Write feather ---
# #* feather
# filename <- get_versioned_file_name("data_working/", paste("4_Unbiased", keep_size, sep = "_"), file_suffix = ".feather")
# write_feather(df, path = filename)
# }
|
###
### Before this script run utils.R
### All the RData containg the datasets can be build with data_preproc.R script
###
library(spBayes)
library(MBA)
library(fields)
library(geoR)
library(sp)
library(maptools)
library(rgdal)
library(MASS)
library(RColorBrewer)
library(gstat)
library(sf)
## Load training set ##
# Make sure that the current working director is correct
# Change TRUE / FALSE to run both / one training set
train_both = TRUE
load("Train_Sud_145stations.RData")
if(train_both) {
tmp = Station.data
load("Train_Nord_106stations.RData")
Station.data = rbind(tmp, Station.data)
}
## NOTE ON TUNING PARAMETERS
# for smaller dataset (train_both = FALSE) use 3 for phi, 0.15 for sigma.sq, 0.2 for tau.sq
# for large dataset (train_both = TRUE) use 5 for phi, 0.05 for sigma.sq, 0.05 for tau.sq
rm(train_both, tmp)
rm(lat_max, lat_min, long_max, long_min) # for the moment I remove them (what if we use both?)
coords = as.matrix(Station.data[,c("Longitude","Latitude")])
DEMAND = Station.data[,c("N.Trips")]
beta.POPULATION = Station.data[,c("Block.population")]
beta.LANES = Station.data[,c("Lane.count")]
beta.SUBWAY = Station.data[,c("Dist.metro")]
beta.PROXIMITY = Station.data[,c("Proximity.score")]
beta.LANDMARKS = Station.data[,c("Landmarks")]
##### spLM SETUP #####
n.samples = 20000 #try also 5000,10000,20000
## Priors specification
W = st_distance(st_as_sf(Station.data, coords = c("Longitude", "Latitude"), crs = 4326))
attr(W,"units") = NULL
attr(W,"class") = NULL
diag(W) = min(W[W!=0])
# the closer is the minimum distance in the dataset, the lower the acceptance rate
min_dist = min(W)
max_dist = max(W)
### Priors for phi ###
phi.prior.a = -log(0.05)/max_dist
phi.prior.b = -log(0.05)/min_dist
# Linear model frequentist fit
freq_model = lm(DEMAND~beta.POPULATION+beta.LANES+beta.SUBWAY)
summary(freq_model)
### Priors for beta,sigma2,tau2 ###
beta.ini <- as.numeric(freq_model$coeff)
(summary(freq_model)$sigma)^2 # estimated variance of residuals
# Rate : sets the proportion of spatial and random process in the priors for sigma2 and tau2
rate = 0.8
phi.ini = 0.035 # arbitrary value between prior.a and prior.b
sigma2.ini = 1/rgamma(1,2,(summary(freq_model)$sigma)^2*rate) # beta = rate% of res std error
tau2.ini = 1/rgamma(1,2,(summary(freq_model)$sigma)^2*(1-rate)) # beta = (1-rate)% of res std error
rm(W, min_dist, max_dist)
###############################
###### 1. STANDARD MODEL ######
###############################
sp.model.v1 <- spLM(DEMAND~beta.POPULATION+beta.LANES+beta.SUBWAY,
data=Station.data, coords=coords,
starting=list("phi"=phi.ini,"sigma.sq"=sigma2.ini,
"tau.sq"=tau2.ini),
tuning=list("phi"=5, "sigma.sq"=0.05, #try small tuning params
"tau.sq"=0.05),
priors=list("beta.flat", "phi.Unif"=c(phi.prior.a, phi.prior.b),
"sigma.sq.IG"=c(2, (summary(freq_model)$sigma)^2*rate),
"tau.sq.IG"=c(2, (summary(freq_model)$sigma)^2*(1-rate))),
cov.model="exponential", # EXPONENTIAL COVARIANCE function
n.samples=n.samples)
# Using smaller tuning parameters the acceptance rate can easily increase
# Facendo tendere la distanza minima per phi prior a 0, l'acceptance rate diminuisce
# meno di quanto non faccia aumentando i tuning parameters
#summary(mcmc(sp.model$p.theta.samples))
# Posterior samples of beta coefficients and spatial effects (w)
burn.in = floor(0.5*n.samples) # try 0.25,0.5,0.75
sp.model.v1 = spRecover(sp.model.v1, start=burn.in)
beta.samples = sp.model.v1$p.beta.recover.samples
w.samples = sp.model.v1$p.w.recover.samples
#summary(beta.samples)
#summary(w.samples)
# Traceplots and posterior marginal distribution of beta parameters
x11()
par(mai=rep(0.4,4))
plot(beta.samples[,1:4])
# Traceplots and posterior marginal distribution of covariance parameters
x11()
par(mai=rep(0.4,4))
plot(sp.model.v1$p.theta.samples[,1:3])
sp.model.v1.mc = mcmc(sp.model.v1$p.theta.samples)
# Acceptance rate
1-rejectionRate(sp.model.v1.mc)
# Autocorrelation plot
x11()
acfplot(sp.model.v1.mc, lag.max=100)
# Cumulative mean plot
x11()
cumuplot(sp.model.v1.mc)
# Effective sample size:
effectiveSize(sp.model.v1.mc)
# Goodness of fit
lpml.v1 = LPML_fun(sp.model.v1)
waic.v1 = WAIC(sp.model.v1)
################################
####### 2.with PROXIMITY #######
################################
n.samples = 20000 #try also 5000,10000,20000
freq_model = lm(DEMAND~beta.POPULATION+beta.LANES+beta.SUBWAY+beta.PROXIMITY)
(summary(freq_model)$sigma)^2 # estimated variance of residuals
sigma2.ini = 1/rgamma(1,2,(summary(freq_model)$sigma)^2*rate) # beta = rate% of res std error
tau2.ini = 1/rgamma(1,2,(summary(freq_model)$sigma)^2*(1-rate)) # beta = (1-rate)% of res std error
sp.model.v2 <- spLM(DEMAND~beta.POPULATION+beta.LANES+beta.SUBWAY+beta.PROXIMITY,
data=Station.data, coords=coords,
starting=list("phi"=phi.ini,"sigma.sq"=sigma2.ini,
"tau.sq"=tau2.ini),
tuning=list("phi"=5, "sigma.sq"=0.05,
"tau.sq"=0.05),
priors=list("beta.flat", "phi.Unif"=c(phi.prior.a, phi.prior.b),
"sigma.sq.IG"=c(2, (summary(freq_model)$sigma)^2*rate),
"tau.sq.IG"=c(2, (summary(freq_model)$sigma)^2*(1-rate))),
cov.model="exponential", # EXPONENTIAL COVARIANCE function
n.samples=n.samples)
#summary(mcmc(sp.model.v2$p.theta.samples))
# Posterior samples of beta coefficients and spatial effects (w)
burn.in = floor(0.5*n.samples) #0.25,0.5,0.75
sp.model.v2 = spRecover(sp.model.v2, start=burn.in)
beta.samples.v2 = sp.model.v2$p.beta.recover.samples
w.samples.v2 = sp.model.v2$p.w.recover.samples
# Traceplots and posterior marginal distribution of beta parameters
x11()
par(mai=rep(0.4,4))
plot(beta.samples.v2[,1:5])
# Traceplots and posterior marginal distribution of covariance parameters
x11()
par(mai=rep(0.4,4))
plot(sp.model.v2$p.theta.samples[,1:3])
sp.model.v2.mc = mcmc(sp.model.v2$p.theta.samples)
# Acceptance rate
1-rejectionRate(sp.model.v2.mc)
# Autocorrelation plot
x11()
acfplot(sp.model.v2.mc,lag.max = 100)
# Cumulative mean plot
x11()
cumuplot(sp.model.v2.mc)
# Effective sample size:
effectiveSize(sp.model.v2.mc)
# Goodness of fit
lpml.v2 = LPML_fun(sp.model.v2)
waic.v2 = WAIC(sp.model.v2)
##############################################
####### 3.with PROXIMITY and LANDMARKS #######
##############################################
n.samples = 20000 #try also 5000,10000,20000
freq_model = lm(DEMAND~beta.POPULATION+beta.LANES+beta.SUBWAY+beta.PROXIMITY+beta.LANDMARKS)
(summary(freq_model)$sigma)^2 # estimated variance of residuals
sigma2.ini = 1/rgamma(1,2,(summary(freq_model)$sigma)^2*rate) # beta = rate% of res std error
tau2.ini = 1/rgamma(1,2,(summary(freq_model)$sigma)^2*(1-rate)) # beta = (1-rate)% of res std error
sp.model.v3 <- spLM(DEMAND~beta.POPULATION+beta.LANES+beta.SUBWAY+beta.PROXIMITY+beta.LANDMARKS,
data=Station.data, coords=coords,
starting=list("phi"=phi.ini,"sigma.sq"=sigma2.ini,
"tau.sq"=tau2.ini),
tuning=list("phi"=5, "sigma.sq"=0.05,
"tau.sq"=0.05),
priors=list("beta.flat", "phi.Unif"=c(phi.prior.a, phi.prior.b),
"sigma.sq.IG"=c(2, (summary(freq_model)$sigma)^2*rate),
"tau.sq.IG"=c(2, (summary(freq_model)$sigma)^2*(1-rate))),
cov.model="exponential", # EXPONENTIAL COVARIANCE function
n.samples=n.samples)
#summary(mcmc(sp.model.v3$p.theta.samples))
# Posterior samples of beta coefficients and spatial effects (w)
burn.in = floor(0.5*n.samples) #0.25,0.5,0.75
sp.model.v3 = spRecover(sp.model.v3, start=burn.in)
beta.samples.v3 = sp.model.v3$p.beta.recover.samples
w.samples.v3 = sp.model.v3$p.w.recover.samples
#summary(beta.samples.v3)
#summary(w.samples.v3)
# Traceplots and posterior marginal distribution of beta parameters
x11()
par(mai=rep(0.4,4))
plot(beta.samples.v3[,1:6])
# Traceplots and posterior marginal distribution of covariance parameters
x11()
par(mai=rep(0.4,4))
plot(sp.model.v3$p.theta.samples[,1:3])
sp.model.v3.mc = mcmc(sp.model.v3$p.theta.samples)
# Acceptance rate
1-rejectionRate(sp.model.v3.mc)
# Autocorrelation plot
x11()
acfplot(sp.model.v3.mc, lag.max = 100)
# Cumulative mean plot
x11()
cumuplot(sp.model.v3.mc)
# Effective sample size:
effectiveSize(sp.model.v3.mc)
# Goodness of fit
lpml.v3 = LPML_fun(sp.model.v3)
waic.v3 = WAIC(sp.model.v3)
#################################
####### 4. with LANDMARKS #######
#################################
n.samples = 20000 #try also 5000,10000,20000
freq_model = lm(DEMAND~beta.POPULATION+beta.LANES+beta.SUBWAY+beta.LANDMARKS)
(summary(freq_model)$sigma)^2 # estimated variance of residuals
sigma2.ini = 1/rgamma(1,2,(summary(freq_model)$sigma)^2*rate) # beta = rate% of res std error
tau2.ini = 1/rgamma(1,2,(summary(freq_model)$sigma)^2*(1-rate)) # beta = (1-rate)% of res std error
sp.model.v4 <- spLM(DEMAND~beta.POPULATION+beta.LANES+beta.SUBWAY+beta.LANDMARKS,
data=Station.data, coords=coords,
starting=list("phi"=phi.ini,"sigma.sq"=sigma2.ini,
"tau.sq"=tau2.ini),
tuning=list("phi"=5, "sigma.sq"=0.05, #try small tuning params
"tau.sq"=0.05),
priors=list("beta.flat", "phi.Unif"=c(phi.prior.a, phi.prior.b),
"sigma.sq.IG"=c(2, (summary(freq_model)$sigma)^2*rate),
"tau.sq.IG"=c(2, (summary(freq_model)$sigma)^2*(1-rate))),
cov.model="exponential", # EXPONENTIAL COVARIANCE function
n.samples=n.samples)
#summary(mcmc(sp.model.v4$p.theta.samples))
# Posterior samples of beta coefficients and spatial effects (w)
burn.in = floor(0.5*n.samples) #0.25,0.5,0.75
sp.model.v4 = spRecover(sp.model.v4, start=burn.in)
beta.samples.v4 = sp.model.v4$p.beta.recover.samples
w.samples.v4 = sp.model.v4$p.w.recover.samples
#summary(beta.samples.v4)
#summary(w.samples.v4)
# Traceplots and posterior marginal distribution of beta parameters
x11()
par(mai=rep(0.4,4))
plot(beta.samples.v4[,1:5])
# Traceplots and posterior marginal distribution of covariance parameters
x11()
par(mai=rep(0.4,4))
plot(sp.model.v4$p.theta.samples[,1:3])
sp.model.v4.mc = mcmc(sp.model.v4$p.theta.samples)
# Acceptance rate
1-rejectionRate(sp.model.v4.mc)
# Autocorrelation plot
x11()
acfplot(sp.model.v4.mc, lag.max = 100)
# Cumulative mean plot
#x11()
#cumuplot(sp.model.v4.mc)
# Effective sample size:
effectiveSize(sp.model.v4.mc)
# Goodness of fit
lpml.v4 = LPML_fun(sp.model.v4)
waic.v4 = WAIC(sp.model.v4)
##### SUMMARIZE GOODNESS OF FIT CRITERIA #####
gof = matrix(nrow=3, ncol=4)
rownames(gof) <- c("LPML", "WAIC", "MSE")
colnames(gof) <- c("model1", "model2", "model3", "model4")
gof[1,1] = lpml.v1
gof[1,2] = lpml.v2
gof[1,3] = lpml.v3
gof[1,4] = lpml.v4
gof[2,1] = waic.v1
gof[2,2] = waic.v2
gof[2,3] = waic.v3
gof[2,4] = waic.v4
gof
rm(lpml.v1, lpml.v2, lpml.v3, lpml.v4, waic.v1, waic.v2, waic.v3, waic.v4)
rm(LPML_fun, WAIC)
rm(n.samples, phi.ini, phi.prior.a, phi.prior.b, rate, sigma2.ini, tau2.ini, beta.ini)
rm(sp.model.v1.mc, sp.model.v2.mc, sp.model.v3.mc, sp.model.v4.mc)
rm(beta.LANDMARKS, beta.LANES, beta.POPULATION, beta.PROXIMITY, beta.SUBWAY)
rm(beta.samples, beta.samples.v2, beta.samples.v3, beta.samples.v4)
rm(burn.in, freq_model, w.samples, w.samples.v2, w.samples.v3, w.samples.v4)
######################
##### PREDICTION #####
######################
# Plot the prediction surface
# sp.model: model to use for prediction
# coords: coords of the prediction points
# covars: design matrix (i.e. covariates) of prediction points
# n.model: number of the model (used only for plot labelling)
predict <- function(sp.model, coords, covars, n.model) {
pred = spPredict(sp.model, pred.coords = coords, pred.covars = covars)
y.hat <- rowMeans(pred$p.y.predictive.samples)
x11()
y.pred.surf <- mba.surf(cbind(coords, y.hat), no.X=100, no.Y=100, extend=TRUE)$xyz.est
image(y.pred.surf, xaxs = "r", yaxs = "r", main=paste("Predicted response Model", n.model))
points(coords, pch=1, cex=1)
contour(y.pred.surf, add=T)
legend(1.5,2.5, legend=c("Obs.", "Pred."), pch=c(1,19),
cex=c(1,1), bg="white")
return(pred)
}
# Compute MSE
mse <- function(true, pred) {
if (length(true) != length(pred)){
stop("Lengths don't match")
}
return(sum((true - pred)^2)/length(true))
}
####################################
###### PREDICTION (on a grid) ######
####################################
load("Prediction_Grid.RData")
##### Model 1 Grid Prediction : DEMAND ~ POPULATION + LANES + SUBWAY #####
covars = cbind(rep(1.0, length(Grid.data[,1])), as.matrix(Grid.data[,c("Block.population", "Lane.count", "Dist.metro")])) # Add the intercept
pred.v1 = predict(sp.model.v1, Grid.data[,c("Longitude", "Latitude")], covars, n.model=1)
##### Model 4 Grid Prediction : DEMAND ~ POPULATION + LANES + SUBWAY + LANDMARKS #####
covars = cbind(rep(1.0, length(Grid.data[,1])), as.matrix(Grid.data[,c("Block.population", "Lane.count", "Dist.metro", "Landmarks")])) # Add the intercept
pred.v4 = predict(sp.model.v4, Grid.data[,c("Longitude", "Latitude")], covars, n.model=4)
############################################
###### PREDICTION (at station points) ######
############################################
load("Test_centre.RData")
coords = as.matrix(Test_centre[,c("Longitude","Latitude")])
DEMAND = Test_centre$N.Trips
# Plot of the real observed demand
x11()
obs.surf <-
mba.surf(cbind(coords, DEMAND), no.X=100, no.Y=100, extend=TRUE)$xyz.est
image(obs.surf, xaxs = "r", yaxs = "r", main="Observed response")
points(coords)
contour(obs.surf, add=T)
##### Model 1 Stations Prediction : DEMAND ~ POPULATION + LANES + SUBWAY #####
covars = cbind(rep(1.0, length(Test_centre[,1])), as.matrix(Test_centre[,c("Block.population", "Lane.count", "Dist.metro")]))
pred.v1 = predict(sp.model.v1, Test_centre[,c("Longitude", "Latitude")], covars, n.model=1)
mse1 = mse(Test_centre$N.Trips, rowMeans(pred.v1$p.y.predictive.samples))
##### Model 2 Stations Prediction : DEMAND ~ POPULATION + LANES + SUBWAY + PROXIMITY #####
covars = cbind(rep(1.0, length(Test_centre[,1])), as.matrix(Test_centre[,c("Block.population", "Lane.count", "Dist.metro", "Proximity.score")]))
pred.v2 = predict(sp.model.v2, Test_centre[,c("Longitude", "Latitude")], covars, n.model=2)
mse2 = mse(Test_centre$N.Trips, rowMeans(pred.v2$p.y.predictive.samples))
##### Model 3 Stations Prediction : DEMAND ~ POPULATION + LANES + SUBWAY + PROXIMITY + LANDMARKS #####
covars = cbind(rep(1.0, length(Test_centre[,1])), as.matrix(Test_centre[,c("Block.population", "Lane.count", "Dist.metro", "Proximity.score", "Landmarks")]))
pred.v3 = predict(sp.model.v3, Test_centre[,c("Longitude", "Latitude")], covars, n.model=3)
mse3 = mse(Test_centre$N.Trips, rowMeans(pred.v3$p.y.predictive.samples))
##### Model 4 Stations Prediction : DEMAND ~ POPULATION + LANES + SUBWAY + LANDMARKS #####
covars = cbind(rep(1.0, length(Test_centre[,1])), as.matrix(Test_centre[,c("Block.population", "Lane.count", "Dist.metro", "Landmarks")]))
pred.v4 = predict(sp.model.v4, Test_centre[,c("Longitude", "Latitude")], covars, n.model=4)
mse4 = mse(Test_centre$N.Trips, rowMeans(pred.v4$p.y.predictive.samples))
gof[3,]=c(mse1,mse2,mse3,mse4)
gof
| /script/models.R | no_license | edpere/NYC-bikes-bayesian | R | false | false | 15,852 | r | ###
### Before this script run utils.R
### All the RData containg the datasets can be build with data_preproc.R script
###
library(spBayes)
library(MBA)
library(fields)
library(geoR)
library(sp)
library(maptools)
library(rgdal)
library(MASS)
library(RColorBrewer)
library(gstat)
library(sf)
## Load training set ##
# Make sure that the current working director is correct
# Change TRUE / FALSE to run both / one training set
train_both = TRUE
load("Train_Sud_145stations.RData")
if(train_both) {
tmp = Station.data
load("Train_Nord_106stations.RData")
Station.data = rbind(tmp, Station.data)
}
## NOTE ON TUNING PARAMETERS
# for smaller dataset (train_both = FALSE) use 3 for phi, 0.15 for sigma.sq, 0.2 for tau.sq
# for large dataset (train_both = TRUE) use 5 for phi, 0.05 for sigma.sq, 0.05 for tau.sq
rm(train_both, tmp)
rm(lat_max, lat_min, long_max, long_min) # for the moment I remove them (what if we use both?)
coords = as.matrix(Station.data[,c("Longitude","Latitude")])
DEMAND = Station.data[,c("N.Trips")]
beta.POPULATION = Station.data[,c("Block.population")]
beta.LANES = Station.data[,c("Lane.count")]
beta.SUBWAY = Station.data[,c("Dist.metro")]
beta.PROXIMITY = Station.data[,c("Proximity.score")]
beta.LANDMARKS = Station.data[,c("Landmarks")]
##### spLM SETUP #####
n.samples = 20000 #try also 5000,10000,20000
## Priors specification
W = st_distance(st_as_sf(Station.data, coords = c("Longitude", "Latitude"), crs = 4326))
attr(W,"units") = NULL
attr(W,"class") = NULL
diag(W) = min(W[W!=0])
# the closer is the minimum distance in the dataset, the lower the acceptance rate
min_dist = min(W)
max_dist = max(W)
### Priors for phi ###
phi.prior.a = -log(0.05)/max_dist
phi.prior.b = -log(0.05)/min_dist
# Linear model frequentist fit
freq_model = lm(DEMAND~beta.POPULATION+beta.LANES+beta.SUBWAY)
summary(freq_model)
### Priors for beta,sigma2,tau2 ###
beta.ini <- as.numeric(freq_model$coeff)
(summary(freq_model)$sigma)^2 # estimated variance of residuals
# Rate : sets the proportion of spatial and random process in the priors for sigma2 and tau2
rate = 0.8
phi.ini = 0.035 # arbitrary value between prior.a and prior.b
sigma2.ini = 1/rgamma(1,2,(summary(freq_model)$sigma)^2*rate) # beta = rate% of res std error
tau2.ini = 1/rgamma(1,2,(summary(freq_model)$sigma)^2*(1-rate)) # beta = (1-rate)% of res std error
rm(W, min_dist, max_dist)
###############################
###### 1. STANDARD MODEL ######
###############################
sp.model.v1 <- spLM(DEMAND~beta.POPULATION+beta.LANES+beta.SUBWAY,
data=Station.data, coords=coords,
starting=list("phi"=phi.ini,"sigma.sq"=sigma2.ini,
"tau.sq"=tau2.ini),
tuning=list("phi"=5, "sigma.sq"=0.05, #try small tuning params
"tau.sq"=0.05),
priors=list("beta.flat", "phi.Unif"=c(phi.prior.a, phi.prior.b),
"sigma.sq.IG"=c(2, (summary(freq_model)$sigma)^2*rate),
"tau.sq.IG"=c(2, (summary(freq_model)$sigma)^2*(1-rate))),
cov.model="exponential", # EXPONENTIAL COVARIANCE function
n.samples=n.samples)
# Using smaller tuning parameters the acceptance rate can easily increase
# Facendo tendere la distanza minima per phi prior a 0, l'acceptance rate diminuisce
# meno di quanto non faccia aumentando i tuning parameters
#summary(mcmc(sp.model$p.theta.samples))
# Posterior samples of beta coefficients and spatial effects (w)
burn.in = floor(0.5*n.samples) # try 0.25,0.5,0.75
sp.model.v1 = spRecover(sp.model.v1, start=burn.in)
beta.samples = sp.model.v1$p.beta.recover.samples
w.samples = sp.model.v1$p.w.recover.samples
#summary(beta.samples)
#summary(w.samples)
# Traceplots and posterior marginal distribution of beta parameters
x11()
par(mai=rep(0.4,4))
plot(beta.samples[,1:4])
# Traceplots and posterior marginal distribution of covariance parameters
x11()
par(mai=rep(0.4,4))
plot(sp.model.v1$p.theta.samples[,1:3])
sp.model.v1.mc = mcmc(sp.model.v1$p.theta.samples)
# Acceptance rate
1-rejectionRate(sp.model.v1.mc)
# Autocorrelation plot
x11()
acfplot(sp.model.v1.mc, lag.max=100)
# Cumulative mean plot
x11()
cumuplot(sp.model.v1.mc)
# Effective sample size:
effectiveSize(sp.model.v1.mc)
# Goodness of fit
lpml.v1 = LPML_fun(sp.model.v1)
waic.v1 = WAIC(sp.model.v1)
################################
####### 2.with PROXIMITY #######
################################
n.samples = 20000 #try also 5000,10000,20000
freq_model = lm(DEMAND~beta.POPULATION+beta.LANES+beta.SUBWAY+beta.PROXIMITY)
(summary(freq_model)$sigma)^2 # estimated variance of residuals
sigma2.ini = 1/rgamma(1,2,(summary(freq_model)$sigma)^2*rate) # beta = rate% of res std error
tau2.ini = 1/rgamma(1,2,(summary(freq_model)$sigma)^2*(1-rate)) # beta = (1-rate)% of res std error
sp.model.v2 <- spLM(DEMAND~beta.POPULATION+beta.LANES+beta.SUBWAY+beta.PROXIMITY,
data=Station.data, coords=coords,
starting=list("phi"=phi.ini,"sigma.sq"=sigma2.ini,
"tau.sq"=tau2.ini),
tuning=list("phi"=5, "sigma.sq"=0.05,
"tau.sq"=0.05),
priors=list("beta.flat", "phi.Unif"=c(phi.prior.a, phi.prior.b),
"sigma.sq.IG"=c(2, (summary(freq_model)$sigma)^2*rate),
"tau.sq.IG"=c(2, (summary(freq_model)$sigma)^2*(1-rate))),
cov.model="exponential", # EXPONENTIAL COVARIANCE function
n.samples=n.samples)
#summary(mcmc(sp.model.v2$p.theta.samples))
# Posterior samples of beta coefficients and spatial effects (w)
burn.in = floor(0.5*n.samples) #0.25,0.5,0.75
sp.model.v2 = spRecover(sp.model.v2, start=burn.in)
beta.samples.v2 = sp.model.v2$p.beta.recover.samples
w.samples.v2 = sp.model.v2$p.w.recover.samples
# Traceplots and posterior marginal distribution of beta parameters
x11()
par(mai=rep(0.4,4))
plot(beta.samples.v2[,1:5])
# Traceplots and posterior marginal distribution of covariance parameters
x11()
par(mai=rep(0.4,4))
plot(sp.model.v2$p.theta.samples[,1:3])
sp.model.v2.mc = mcmc(sp.model.v2$p.theta.samples)
# Acceptance rate
1-rejectionRate(sp.model.v2.mc)
# Autocorrelation plot
x11()
acfplot(sp.model.v2.mc,lag.max = 100)
# Cumulative mean plot
x11()
cumuplot(sp.model.v2.mc)
# Effective sample size:
effectiveSize(sp.model.v2.mc)
# Goodness of fit
lpml.v2 = LPML_fun(sp.model.v2)
waic.v2 = WAIC(sp.model.v2)
##############################################
####### 3.with PROXIMITY and LANDMARKS #######
##############################################
n.samples = 20000 #try also 5000,10000,20000
freq_model = lm(DEMAND~beta.POPULATION+beta.LANES+beta.SUBWAY+beta.PROXIMITY+beta.LANDMARKS)
(summary(freq_model)$sigma)^2 # estimated variance of residuals
sigma2.ini = 1/rgamma(1,2,(summary(freq_model)$sigma)^2*rate) # beta = rate% of res std error
tau2.ini = 1/rgamma(1,2,(summary(freq_model)$sigma)^2*(1-rate)) # beta = (1-rate)% of res std error
sp.model.v3 <- spLM(DEMAND~beta.POPULATION+beta.LANES+beta.SUBWAY+beta.PROXIMITY+beta.LANDMARKS,
data=Station.data, coords=coords,
starting=list("phi"=phi.ini,"sigma.sq"=sigma2.ini,
"tau.sq"=tau2.ini),
tuning=list("phi"=5, "sigma.sq"=0.05,
"tau.sq"=0.05),
priors=list("beta.flat", "phi.Unif"=c(phi.prior.a, phi.prior.b),
"sigma.sq.IG"=c(2, (summary(freq_model)$sigma)^2*rate),
"tau.sq.IG"=c(2, (summary(freq_model)$sigma)^2*(1-rate))),
cov.model="exponential", # EXPONENTIAL COVARIANCE function
n.samples=n.samples)
#summary(mcmc(sp.model.v3$p.theta.samples))
# Posterior samples of beta coefficients and spatial effects (w)
burn.in = floor(0.5*n.samples) #0.25,0.5,0.75
sp.model.v3 = spRecover(sp.model.v3, start=burn.in)
beta.samples.v3 = sp.model.v3$p.beta.recover.samples
w.samples.v3 = sp.model.v3$p.w.recover.samples
#summary(beta.samples.v3)
#summary(w.samples.v3)
# Traceplots and posterior marginal distribution of beta parameters
x11()
par(mai=rep(0.4,4))
plot(beta.samples.v3[,1:6])
# Traceplots and posterior marginal distribution of covariance parameters
x11()
par(mai=rep(0.4,4))
plot(sp.model.v3$p.theta.samples[,1:3])
sp.model.v3.mc = mcmc(sp.model.v3$p.theta.samples)
# Acceptance rate
1-rejectionRate(sp.model.v3.mc)
# Autocorrelation plot
x11()
acfplot(sp.model.v3.mc, lag.max = 100)
# Cumulative mean plot
x11()
cumuplot(sp.model.v3.mc)
# Effective sample size:
effectiveSize(sp.model.v3.mc)
# Goodness of fit
lpml.v3 = LPML_fun(sp.model.v3)
waic.v3 = WAIC(sp.model.v3)
#################################
####### 4. with LANDMARKS #######
#################################
n.samples = 20000 #try also 5000,10000,20000
freq_model = lm(DEMAND~beta.POPULATION+beta.LANES+beta.SUBWAY+beta.LANDMARKS)
(summary(freq_model)$sigma)^2 # estimated variance of residuals
sigma2.ini = 1/rgamma(1,2,(summary(freq_model)$sigma)^2*rate) # beta = rate% of res std error
tau2.ini = 1/rgamma(1,2,(summary(freq_model)$sigma)^2*(1-rate)) # beta = (1-rate)% of res std error
sp.model.v4 <- spLM(DEMAND~beta.POPULATION+beta.LANES+beta.SUBWAY+beta.LANDMARKS,
data=Station.data, coords=coords,
starting=list("phi"=phi.ini,"sigma.sq"=sigma2.ini,
"tau.sq"=tau2.ini),
tuning=list("phi"=5, "sigma.sq"=0.05, #try small tuning params
"tau.sq"=0.05),
priors=list("beta.flat", "phi.Unif"=c(phi.prior.a, phi.prior.b),
"sigma.sq.IG"=c(2, (summary(freq_model)$sigma)^2*rate),
"tau.sq.IG"=c(2, (summary(freq_model)$sigma)^2*(1-rate))),
cov.model="exponential", # EXPONENTIAL COVARIANCE function
n.samples=n.samples)
#summary(mcmc(sp.model.v4$p.theta.samples))
# Posterior samples of beta coefficients and spatial effects (w)
burn.in = floor(0.5*n.samples) #0.25,0.5,0.75
sp.model.v4 = spRecover(sp.model.v4, start=burn.in)
beta.samples.v4 = sp.model.v4$p.beta.recover.samples
w.samples.v4 = sp.model.v4$p.w.recover.samples
#summary(beta.samples.v4)
#summary(w.samples.v4)
# Traceplots and posterior marginal distribution of beta parameters
x11()
par(mai=rep(0.4,4))
plot(beta.samples.v4[,1:5])
# Traceplots and posterior marginal distribution of covariance parameters
x11()
par(mai=rep(0.4,4))
plot(sp.model.v4$p.theta.samples[,1:3])
sp.model.v4.mc = mcmc(sp.model.v4$p.theta.samples)
# Acceptance rate
1-rejectionRate(sp.model.v4.mc)
# Autocorrelation plot
x11()
acfplot(sp.model.v4.mc, lag.max = 100)
# Cumulative mean plot
#x11()
#cumuplot(sp.model.v4.mc)
# Effective sample size:
effectiveSize(sp.model.v4.mc)
# Goodness of fit
lpml.v4 = LPML_fun(sp.model.v4)
waic.v4 = WAIC(sp.model.v4)
##### SUMMARIZE GOODNESS OF FIT CRITERIA #####
gof = matrix(nrow=3, ncol=4)
rownames(gof) <- c("LPML", "WAIC", "MSE")
colnames(gof) <- c("model1", "model2", "model3", "model4")
gof[1,1] = lpml.v1
gof[1,2] = lpml.v2
gof[1,3] = lpml.v3
gof[1,4] = lpml.v4
gof[2,1] = waic.v1
gof[2,2] = waic.v2
gof[2,3] = waic.v3
gof[2,4] = waic.v4
gof
rm(lpml.v1, lpml.v2, lpml.v3, lpml.v4, waic.v1, waic.v2, waic.v3, waic.v4)
rm(LPML_fun, WAIC)
rm(n.samples, phi.ini, phi.prior.a, phi.prior.b, rate, sigma2.ini, tau2.ini, beta.ini)
rm(sp.model.v1.mc, sp.model.v2.mc, sp.model.v3.mc, sp.model.v4.mc)
rm(beta.LANDMARKS, beta.LANES, beta.POPULATION, beta.PROXIMITY, beta.SUBWAY)
rm(beta.samples, beta.samples.v2, beta.samples.v3, beta.samples.v4)
rm(burn.in, freq_model, w.samples, w.samples.v2, w.samples.v3, w.samples.v4)
######################
##### PREDICTION #####
######################
# Plot the prediction surface
# sp.model: model to use for prediction
# coords: coords of the prediction points
# covars: design matrix (i.e. covariates) of prediction points
# n.model: number of the model (used only for plot labelling)
predict <- function(sp.model, coords, covars, n.model) {
pred = spPredict(sp.model, pred.coords = coords, pred.covars = covars)
y.hat <- rowMeans(pred$p.y.predictive.samples)
x11()
y.pred.surf <- mba.surf(cbind(coords, y.hat), no.X=100, no.Y=100, extend=TRUE)$xyz.est
image(y.pred.surf, xaxs = "r", yaxs = "r", main=paste("Predicted response Model", n.model))
points(coords, pch=1, cex=1)
contour(y.pred.surf, add=T)
legend(1.5,2.5, legend=c("Obs.", "Pred."), pch=c(1,19),
cex=c(1,1), bg="white")
return(pred)
}
# Compute MSE
mse <- function(true, pred) {
if (length(true) != length(pred)){
stop("Lengths don't match")
}
return(sum((true - pred)^2)/length(true))
}
####################################
###### PREDICTION (on a grid) ######
####################################
load("Prediction_Grid.RData")
##### Model 1 Grid Prediction : DEMAND ~ POPULATION + LANES + SUBWAY #####
covars = cbind(rep(1.0, length(Grid.data[,1])), as.matrix(Grid.data[,c("Block.population", "Lane.count", "Dist.metro")])) # Add the intercept
pred.v1 = predict(sp.model.v1, Grid.data[,c("Longitude", "Latitude")], covars, n.model=1)
##### Model 4 Grid Prediction : DEMAND ~ POPULATION + LANES + SUBWAY + LANDMARKS #####
covars = cbind(rep(1.0, length(Grid.data[,1])), as.matrix(Grid.data[,c("Block.population", "Lane.count", "Dist.metro", "Landmarks")])) # Add the intercept
pred.v4 = predict(sp.model.v4, Grid.data[,c("Longitude", "Latitude")], covars, n.model=4)
############################################
###### PREDICTION (at station points) ######
############################################
load("Test_centre.RData")
coords = as.matrix(Test_centre[,c("Longitude","Latitude")])
DEMAND = Test_centre$N.Trips
# Plot of the real observed demand
x11()
obs.surf <-
mba.surf(cbind(coords, DEMAND), no.X=100, no.Y=100, extend=TRUE)$xyz.est
image(obs.surf, xaxs = "r", yaxs = "r", main="Observed response")
points(coords)
contour(obs.surf, add=T)
##### Model 1 Stations Prediction : DEMAND ~ POPULATION + LANES + SUBWAY #####
covars = cbind(rep(1.0, length(Test_centre[,1])), as.matrix(Test_centre[,c("Block.population", "Lane.count", "Dist.metro")]))
pred.v1 = predict(sp.model.v1, Test_centre[,c("Longitude", "Latitude")], covars, n.model=1)
mse1 = mse(Test_centre$N.Trips, rowMeans(pred.v1$p.y.predictive.samples))
##### Model 2 Stations Prediction : DEMAND ~ POPULATION + LANES + SUBWAY + PROXIMITY #####
covars = cbind(rep(1.0, length(Test_centre[,1])), as.matrix(Test_centre[,c("Block.population", "Lane.count", "Dist.metro", "Proximity.score")]))
pred.v2 = predict(sp.model.v2, Test_centre[,c("Longitude", "Latitude")], covars, n.model=2)
mse2 = mse(Test_centre$N.Trips, rowMeans(pred.v2$p.y.predictive.samples))
##### Model 3 Stations Prediction : DEMAND ~ POPULATION + LANES + SUBWAY + PROXIMITY + LANDMARKS #####
covars = cbind(rep(1.0, length(Test_centre[,1])), as.matrix(Test_centre[,c("Block.population", "Lane.count", "Dist.metro", "Proximity.score", "Landmarks")]))
pred.v3 = predict(sp.model.v3, Test_centre[,c("Longitude", "Latitude")], covars, n.model=3)
mse3 = mse(Test_centre$N.Trips, rowMeans(pred.v3$p.y.predictive.samples))
##### Model 4 Stations Prediction : DEMAND ~ POPULATION + LANES + SUBWAY + LANDMARKS #####
covars = cbind(rep(1.0, length(Test_centre[,1])), as.matrix(Test_centre[,c("Block.population", "Lane.count", "Dist.metro", "Landmarks")]))
pred.v4 = predict(sp.model.v4, Test_centre[,c("Longitude", "Latitude")], covars, n.model=4)
mse4 = mse(Test_centre$N.Trips, rowMeans(pred.v4$p.y.predictive.samples))
gof[3,]=c(mse1,mse2,mse3,mse4)
gof
|
#' Andrews Experimental Forest vertebrates in Mack Creek
#'
#' Cutthroat trout and salamander sizes
#'
#' @source {Gregory, S.V. and I. Arismendi. 2020. Aquatic Vertebrate Population Study in Mack Creek, Andrews Experimental Forest, 1987 to present ver 14. Environmental Data Initiative. https://doi.org/10.6073/pasta/7c78d662e847cdbe33584add8f809165 (Accessed 2021-02-20).}
#' \url{https://portal.edirepository.org/nis/mapbrowse?packageid=knb-lter-and.4027.14}
"and_vertebrates"
| /R/and_vertebrates_doc.R | permissive | sophiasternberg/ssandv | R | false | false | 483 | r |
#' Andrews Experimental Forest vertebrates in Mack Creek
#'
#' Cutthroat trout and salamander sizes
#'
#' @source {Gregory, S.V. and I. Arismendi. 2020. Aquatic Vertebrate Population Study in Mack Creek, Andrews Experimental Forest, 1987 to present ver 14. Environmental Data Initiative. https://doi.org/10.6073/pasta/7c78d662e847cdbe33584add8f809165 (Accessed 2021-02-20).}
#' \url{https://portal.edirepository.org/nis/mapbrowse?packageid=knb-lter-and.4027.14}
"and_vertebrates"
|
rm(list = ls())
library(Daniel)
library(dplyr)
library(nnet)
CalcCImultinom <- function(fit)
{
s <- summary(fit)
coef <- s$coefficients
ses <- s$standard.errors
ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2]
ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2]
return(rbind(ci.1,ci.2))
}
#key
# A, B,C,D,E,F - betaE[2] = 1.25, 1.5, 1.75, 2, 2.25, 2.5
# A,B,C, D, E,F - betaU = 2,3,4,5,6,7
patt <- "FB"
beta0 <- c(-6, -5)
betaE <- c(log(2.5), log(2.5))
betaU <- c(log(1.5), log(3))
sigmaU <- 1
n.sample <- 50000
n.sim <- 1000
AllY <- matrix(nr = n.sim, nc = 3)
sace.diff1 <- sace.diff2 <- ace.diff1 <- ace.diff2 <-
sace.or1 <- sace.or2 <- ace.or1 <- ace.or2 <-
or.approx1 <- or.approx2 <- or.approx.true1 <- or.approx.true2 <-
pop.never.s1 <- pop.never.s2 <- vector(length = n.sim)
ci1 <- ci2 <- matrix(nr = n.sim, nc = 2)
for (j in 1:n.sim)
{
CatIndex(j)
# Simulate genetic score
U <- rnorm(n.sample, 0, sd = sigmaU)
#### Calcualte probabilites for each subtype with and without the exposure ####
e1E0 <- exp(beta0[1] + betaU[1]*U)
e1E1 <- exp(beta0[1] + betaE[1] + betaU[1]*U)
e2E0 <- exp(beta0[2] + betaU[2]*U)
e2E1 <- exp(beta0[2] + betaE[2] + betaU[2]*U)
prE0Y1 <- e1E0/(1 + e1E0 + e2E0)
prE0Y2 <- e2E0/(1 + e1E0 + e2E0)
prE1Y1 <- e1E1/(1 + e1E1 + e2E1)
prE1Y2 <- e2E1/(1 + e1E1 + e2E1)
probsE0 <- cbind(prE0Y1, prE0Y2, 1 - prE0Y1 - prE0Y2)
probsE1 <- cbind(prE1Y1, prE1Y2, 1 - prE1Y1 - prE1Y2)
# Simulate subtypes #
Yctrl <- Ytrt <- vector(length = n.sample)
X <- rbinom(n = n.sample, 1, 0.5)
for (i in 1:n.sample)
{
Yctrl[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE0[i, ])
Ytrt[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE1[i, ])
}
Y <- (1-X)*Yctrl + X*Ytrt
AllY[j, ] <- table(Y)
Y1ctrl <- Yctrl==1
Y1trt <- Ytrt==1
Y2ctrl <- Yctrl==2
Y2trt <- Ytrt==2
pop.never.s1[j] <- mean(Y1ctrl==0 & Y1trt==0)
pop.never.s2[j] <- mean(Y2ctrl==0 & Y2trt==0)
# estimate causal parameters
sace.diff1[j] <- mean((Y1trt - Y1ctrl)[Y2ctrl==0 & Y2trt==0])
sace.diff2[j]<- mean((Y2trt - Y2ctrl)[Y1ctrl==0 & Y1trt==0])
ace.diff1[j] <- mean((Y1trt[Y2trt==0 & X==1]) - mean(Y1ctrl[Y2ctrl==0 & X==0]))
ace.diff2[j] <- mean((Y2trt[Y1trt==0 & X==1]) - mean(Y2ctrl[Y1ctrl==0 & X==0]))
# Ypo <- c(Yctrl, Ytrt)
# Upo <- rep(U,2)
# Xpo <- rep(x = c(0,1), each = n.sample)
# fit.full.po <- multinom(Ypo~ Xpo + Upo)
# fit.po <- multinom(Ypo~ Xpo)
fit <- multinom(Y~ X)
cis <- CalcCImultinom(fit)
ci1[j, ] <- cis[1, ]
ci2[j, ] <- cis[2, ]
Y1only <- Y[Y<2]
X1only <- X[Y<2]
U1only <-U[Y<2]
Y2only <- Y[Y!=1]
X2only <- X[Y!=1]
U2only <-U[Y!=1]
Y2only[Y2only>0] <- 1
vec.for.or.1only <- c(sum((1 - Y1only) * (1 - X1only)) , sum(Y1only * (1 - X1only)),
sum((1 - Y1only) * X1only), sum(Y1only*X1only))
vec.for.or.2only <- c(sum((1 - Y2only) * (1 - X2only)) , sum(Y2only * (1 - X2only)),
sum((1 - Y2only) * X2only), sum(Y2only*X2only))
ace.or1[j] <- CalcOR(vec.for.or.1only)
ace.or2[j] <- CalcOR(vec.for.or.2only)
Y1only.sace <- Y[Ytrt <2 & Yctrl < 2]
X1only.sace <- X[Ytrt <2 & Yctrl < 2]
U1only.sace <-U[Ytrt <2 & Yctrl < 2]
Y2only.sace <- Y[Ytrt!=1 & Y1ctrl!=1]
X2only.sace <- X[Ytrt!=1 & Y1ctrl!=1]
U2only.sace <-U[Ytrt!=1 & Y1ctrl!=1]
Y2only.sace[Y2only.sace>0] <- 1
vec.for.or.sace1 <- c(sum((1 - Y1only.sace) * (1 - X1only.sace)) , sum(Y1only.sace * (1 - X1only.sace)),
sum((1 - Y1only.sace) * X1only.sace), sum(Y1only.sace*X1only.sace))
vec.for.or.sace2 <- c(sum((1 - Y2only.sace) * (1 - X2only.sace)) , sum(Y2only.sace * (1 - X2only.sace)),
sum((1 - Y2only.sace) * X2only.sace), sum(Y2only.sace*X2only.sace))
sace.or1[j] <- CalcOR(vec.for.or.sace1)
sace.or2[j] <- CalcOR(vec.for.or.sace2)
Y1 <- Y==1
Y2 <- Y==2
fit.logistic.Y1 <- glm(Y1 ~ X, family = "binomial")
fit.logistic.true.Y1 <- glm(Y1 ~ X + U, family = "binomial")
fit.logistic.Y2 <- glm(Y2 ~ X, family = "binomial")
fit.logistic.true.Y2 <- glm(Y2 ~ X + U, family = "binomial")
or.approx1[j] <- exp(coef(fit.logistic.Y1)[2])
or.approx.true1[j] <- exp(coef(fit.logistic.true.Y1)[2])
or.approx2[j] <- exp(coef(fit.logistic.Y2)[2])
or.approx.true2[j] <- exp(coef(fit.logistic.true.Y2)[2])
}
save.image(paste0("CMPEn50krareScen18",patt,".RData"))
| /Simulations/Scripts/R/Rare/Scenario 18/CMPEn50KrareScen18FB.R | no_license | yadevi/CausalMPE | R | false | false | 4,220 | r | rm(list = ls())
library(Daniel)
library(dplyr)
library(nnet)
CalcCImultinom <- function(fit)
{
s <- summary(fit)
coef <- s$coefficients
ses <- s$standard.errors
ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2]
ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2]
return(rbind(ci.1,ci.2))
}
#key
# A, B,C,D,E,F - betaE[2] = 1.25, 1.5, 1.75, 2, 2.25, 2.5
# A,B,C, D, E,F - betaU = 2,3,4,5,6,7
patt <- "FB"
beta0 <- c(-6, -5)
betaE <- c(log(2.5), log(2.5))
betaU <- c(log(1.5), log(3))
sigmaU <- 1
n.sample <- 50000
n.sim <- 1000
AllY <- matrix(nr = n.sim, nc = 3)
sace.diff1 <- sace.diff2 <- ace.diff1 <- ace.diff2 <-
sace.or1 <- sace.or2 <- ace.or1 <- ace.or2 <-
or.approx1 <- or.approx2 <- or.approx.true1 <- or.approx.true2 <-
pop.never.s1 <- pop.never.s2 <- vector(length = n.sim)
ci1 <- ci2 <- matrix(nr = n.sim, nc = 2)
for (j in 1:n.sim)
{
CatIndex(j)
# Simulate genetic score
U <- rnorm(n.sample, 0, sd = sigmaU)
#### Calcualte probabilites for each subtype with and without the exposure ####
e1E0 <- exp(beta0[1] + betaU[1]*U)
e1E1 <- exp(beta0[1] + betaE[1] + betaU[1]*U)
e2E0 <- exp(beta0[2] + betaU[2]*U)
e2E1 <- exp(beta0[2] + betaE[2] + betaU[2]*U)
prE0Y1 <- e1E0/(1 + e1E0 + e2E0)
prE0Y2 <- e2E0/(1 + e1E0 + e2E0)
prE1Y1 <- e1E1/(1 + e1E1 + e2E1)
prE1Y2 <- e2E1/(1 + e1E1 + e2E1)
probsE0 <- cbind(prE0Y1, prE0Y2, 1 - prE0Y1 - prE0Y2)
probsE1 <- cbind(prE1Y1, prE1Y2, 1 - prE1Y1 - prE1Y2)
# Simulate subtypes #
Yctrl <- Ytrt <- vector(length = n.sample)
X <- rbinom(n = n.sample, 1, 0.5)
for (i in 1:n.sample)
{
Yctrl[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE0[i, ])
Ytrt[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE1[i, ])
}
Y <- (1-X)*Yctrl + X*Ytrt
AllY[j, ] <- table(Y)
Y1ctrl <- Yctrl==1
Y1trt <- Ytrt==1
Y2ctrl <- Yctrl==2
Y2trt <- Ytrt==2
pop.never.s1[j] <- mean(Y1ctrl==0 & Y1trt==0)
pop.never.s2[j] <- mean(Y2ctrl==0 & Y2trt==0)
# estimate causal parameters
sace.diff1[j] <- mean((Y1trt - Y1ctrl)[Y2ctrl==0 & Y2trt==0])
sace.diff2[j]<- mean((Y2trt - Y2ctrl)[Y1ctrl==0 & Y1trt==0])
ace.diff1[j] <- mean((Y1trt[Y2trt==0 & X==1]) - mean(Y1ctrl[Y2ctrl==0 & X==0]))
ace.diff2[j] <- mean((Y2trt[Y1trt==0 & X==1]) - mean(Y2ctrl[Y1ctrl==0 & X==0]))
# Ypo <- c(Yctrl, Ytrt)
# Upo <- rep(U,2)
# Xpo <- rep(x = c(0,1), each = n.sample)
# fit.full.po <- multinom(Ypo~ Xpo + Upo)
# fit.po <- multinom(Ypo~ Xpo)
fit <- multinom(Y~ X)
cis <- CalcCImultinom(fit)
ci1[j, ] <- cis[1, ]
ci2[j, ] <- cis[2, ]
Y1only <- Y[Y<2]
X1only <- X[Y<2]
U1only <-U[Y<2]
Y2only <- Y[Y!=1]
X2only <- X[Y!=1]
U2only <-U[Y!=1]
Y2only[Y2only>0] <- 1
vec.for.or.1only <- c(sum((1 - Y1only) * (1 - X1only)) , sum(Y1only * (1 - X1only)),
sum((1 - Y1only) * X1only), sum(Y1only*X1only))
vec.for.or.2only <- c(sum((1 - Y2only) * (1 - X2only)) , sum(Y2only * (1 - X2only)),
sum((1 - Y2only) * X2only), sum(Y2only*X2only))
ace.or1[j] <- CalcOR(vec.for.or.1only)
ace.or2[j] <- CalcOR(vec.for.or.2only)
Y1only.sace <- Y[Ytrt <2 & Yctrl < 2]
X1only.sace <- X[Ytrt <2 & Yctrl < 2]
U1only.sace <-U[Ytrt <2 & Yctrl < 2]
Y2only.sace <- Y[Ytrt!=1 & Y1ctrl!=1]
X2only.sace <- X[Ytrt!=1 & Y1ctrl!=1]
U2only.sace <-U[Ytrt!=1 & Y1ctrl!=1]
Y2only.sace[Y2only.sace>0] <- 1
vec.for.or.sace1 <- c(sum((1 - Y1only.sace) * (1 - X1only.sace)) , sum(Y1only.sace * (1 - X1only.sace)),
sum((1 - Y1only.sace) * X1only.sace), sum(Y1only.sace*X1only.sace))
vec.for.or.sace2 <- c(sum((1 - Y2only.sace) * (1 - X2only.sace)) , sum(Y2only.sace * (1 - X2only.sace)),
sum((1 - Y2only.sace) * X2only.sace), sum(Y2only.sace*X2only.sace))
sace.or1[j] <- CalcOR(vec.for.or.sace1)
sace.or2[j] <- CalcOR(vec.for.or.sace2)
Y1 <- Y==1
Y2 <- Y==2
fit.logistic.Y1 <- glm(Y1 ~ X, family = "binomial")
fit.logistic.true.Y1 <- glm(Y1 ~ X + U, family = "binomial")
fit.logistic.Y2 <- glm(Y2 ~ X, family = "binomial")
fit.logistic.true.Y2 <- glm(Y2 ~ X + U, family = "binomial")
or.approx1[j] <- exp(coef(fit.logistic.Y1)[2])
or.approx.true1[j] <- exp(coef(fit.logistic.true.Y1)[2])
or.approx2[j] <- exp(coef(fit.logistic.Y2)[2])
or.approx.true2[j] <- exp(coef(fit.logistic.true.Y2)[2])
}
save.image(paste0("CMPEn50krareScen18",patt,".RData"))
|
### Download file if does not exist ###
filename <- "exdata%2Fdata%2FNEI_data.zip"
if (!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(fileURL, filename, method="curl")
}
### Unzip files if they do not exist ###
if (!file.exists("summarySCC_PM25.rds")) {
unzip(filename)
}
### Read files ###
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
### Sum up emissions by year ###
baltimore <- NEI[which(NEI$fips=="24510"),]
totalyearlyemissions <- aggregate(Emissions~year+type, data=baltimore, FUN="sum")
library(ggplot2)
### Plot graph ###
png("plot3.png")
options("scipen" = 20)
ggplot(data=totalyearlyemissions, aes(x=year,y=Emissions,color=type))+geom_line()+ggtitle("Total Emissions for each Source Type from 1999 to 2008, Baltimore")
dev.off()
| /RScripts/ExData_Plotting2/plot3.R | no_license | lchen-24/scripts | R | false | false | 868 | r | ### Download file if does not exist ###
filename <- "exdata%2Fdata%2FNEI_data.zip"
if (!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(fileURL, filename, method="curl")
}
### Unzip files if they do not exist ###
if (!file.exists("summarySCC_PM25.rds")) {
unzip(filename)
}
### Read files ###
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
### Sum up emissions by year ###
baltimore <- NEI[which(NEI$fips=="24510"),]
totalyearlyemissions <- aggregate(Emissions~year+type, data=baltimore, FUN="sum")
library(ggplot2)
### Plot graph ###
png("plot3.png")
options("scipen" = 20)
ggplot(data=totalyearlyemissions, aes(x=year,y=Emissions,color=type))+geom_line()+ggtitle("Total Emissions for each Source Type from 1999 to 2008, Baltimore")
dev.off()
|
m_table=read.csv("c:/Users/Suhas Xavier/Desktop/sw_membership_table.csv")
sw_table=read.csv("c:/Users/Suhas Xavier/Desktop/sw_data.csv")
usernames=m_table$email
#Calculate individual weight
tmpdf=data.frame(Date=as.character(),email=as.numeric(), Score=as.numeric())
if(!file.exists("C:/Users/Suhas Xavier/Desktop/sw_weight.csv"))
{
write.csv(tmpdf,file="C:/Users/Suhas Xavier/Desktop/sw_weight.csv",row.names = F,col.names = F)
}
for(i in 1:length(unique(usernames)))
{
tname=as.character(m_table[m_table$email==usernames[i],"project_name"])
exp=3
this_user_data=sw_table[sw_table$email==as.character(usernames[i]),]
dates=as.character(tail(this_user_data$date,1))
if(nrow(this_user_data)>=7)
{
#diff gives difference of all values, the unique values indicate the actual changes, sum them up and average over 3
df2=tail(this_user_data,7)
inp1=length(unique(diff(df2$in_pogress)))
tot1=length(unique(diff(df2$to_test)))
don1=length(unique(diff(df2$done)))
tot_len=(sum(inp1,tot1,don1))/3
msg=""
fin_score=0
if(tot_len<=1)
{
fin_score=1
msg=paste(dates,"NO Scrumwise Activity!!",sep=" ")
}
else if(tot_len>1 & tot_len<=2)
{
fin_score=3
msg=paste(dates,"Low Scrumwise Activity!!",sep=" ")
}
else if(tot_len>2 & tot_len<=3)
{
fin_score=5
msg=paste(dates,"Good Scrumwise Activity!!",sep=" ")
}
else if(tot_len>3) {
fin_score=3
msg=paste(dates,"Too amny tasks assigned to you!",sep=" ")
}
df_temp=data.frame(usernames[i],msg)
df_holder=data.frame(dates,usernames[i],fin_score,tname,exp)
print(df_holder)
write.table(df_holder,file="C:/Users/Suhas Xavier/Desktop/sw_Weight.csv",row.names = F,col.names = F,sep=",",append = T,na="0")
write.table(df_temp,file="C:/Users/Suhas Xavier/Desktop/notification_table.csv",row.names = F,col.names = F,sep=",",append = T)
# print(df_holder)
print(df_temp)
}
}
closeAllConnections() | /SWWeight.R | no_license | suhasxavier/CADashboard_R | R | false | false | 2,045 | r | m_table=read.csv("c:/Users/Suhas Xavier/Desktop/sw_membership_table.csv")
sw_table=read.csv("c:/Users/Suhas Xavier/Desktop/sw_data.csv")
usernames=m_table$email
#Calculate individual weight
tmpdf=data.frame(Date=as.character(),email=as.numeric(), Score=as.numeric())
if(!file.exists("C:/Users/Suhas Xavier/Desktop/sw_weight.csv"))
{
write.csv(tmpdf,file="C:/Users/Suhas Xavier/Desktop/sw_weight.csv",row.names = F,col.names = F)
}
for(i in 1:length(unique(usernames)))
{
tname=as.character(m_table[m_table$email==usernames[i],"project_name"])
exp=3
this_user_data=sw_table[sw_table$email==as.character(usernames[i]),]
dates=as.character(tail(this_user_data$date,1))
if(nrow(this_user_data)>=7)
{
#diff gives difference of all values, the unique values indicate the actual changes, sum them up and average over 3
df2=tail(this_user_data,7)
inp1=length(unique(diff(df2$in_pogress)))
tot1=length(unique(diff(df2$to_test)))
don1=length(unique(diff(df2$done)))
tot_len=(sum(inp1,tot1,don1))/3
msg=""
fin_score=0
if(tot_len<=1)
{
fin_score=1
msg=paste(dates,"NO Scrumwise Activity!!",sep=" ")
}
else if(tot_len>1 & tot_len<=2)
{
fin_score=3
msg=paste(dates,"Low Scrumwise Activity!!",sep=" ")
}
else if(tot_len>2 & tot_len<=3)
{
fin_score=5
msg=paste(dates,"Good Scrumwise Activity!!",sep=" ")
}
else if(tot_len>3) {
fin_score=3
msg=paste(dates,"Too amny tasks assigned to you!",sep=" ")
}
df_temp=data.frame(usernames[i],msg)
df_holder=data.frame(dates,usernames[i],fin_score,tname,exp)
print(df_holder)
write.table(df_holder,file="C:/Users/Suhas Xavier/Desktop/sw_Weight.csv",row.names = F,col.names = F,sep=",",append = T,na="0")
write.table(df_temp,file="C:/Users/Suhas Xavier/Desktop/notification_table.csv",row.names = F,col.names = F,sep=",",append = T)
# print(df_holder)
print(df_temp)
}
}
closeAllConnections() |
#### BTLm ####
### Wrapper function for estimateAbility ###
### Restructures data and executes estimateAbility ###
BTLm <- function( Data, epsilonCorrect = .003, est.iters = 4 )
{
### Preparations ###
repr <- unique( c( Data$Repr1, Data$Repr2 ) )
Abil <- data.frame( Repr = repr, Ability = 0, se = 0 )
rm( repr )
### Observed Score ###
## in Data
Obs1 <- aggregate( Data$Score, by = list( Repr = Data$Repr1 ), FUN = "sum" )
Obs2 <- aggregate( 1 - Data$Score, by = list( Repr = Data$Repr2 ), FUN = "sum" )
Obs <- rbind( Obs1, Obs2)
Obs <- aggregate( Obs$x, by = list( Repr = Obs$Repr ), FUN = "sum")
Abil <- merge( Abil, Obs, by = "Repr" )
names( Abil )[4] <- c( "Observed" )
rm( Obs1, Obs2, Obs )
Comp1 <- aggregate( Data$Score, by = list( Repr = Data$Repr1 ), FUN = "length" )
Comp2 <- aggregate( Data$Score, by = list( Repr = Data$Repr2 ), FUN = "length" )
Comp <- rbind( Comp1, Comp2)
Comp <- aggregate( Comp$x, by = list( Repr = Comp$Repr ), FUN = "sum")
Abil <- merge( Abil, Comp, by = "Repr" )
names( Abil )[5] <- c( "totalComp" )
rm( Comp1, Comp2, Comp )
## Correct Abil$Observed
interm <- Abil$totalComp - 2 * epsilonCorrect
interm <- interm * Abil$Observed / Abil$totalComp
Abil$Observed <- epsilonCorrect + interm
rm( interm )
# clean up
Abil <- Abil[ , -5 ]
### Estimate Abilities ###
for( i in est.iters:0 )
{
## find the corresponding ability values for each representation in pair
Repr1ID <- match( Data$Repr1, table = Abil$Repr )
Repr2ID <- match( Data$Repr2, table = Abil$Repr )
Data$AbilR1 <- Abil$Ability[ Repr1ID ]
Data$AbilR2 <- Abil$Ability[ Repr2ID ]
rm( Repr1ID, Repr2ID )
Abil <- BTLm.est( Data = Data, Abil = Abil, counter = i )
}
rm(i)
### Output ###
return( Abil )
}
#### BTLm.est ####
### The actual BTL optimization function ###
BTLm.est <- function(Data, Abil, counter)
{
## calculate expected score
Data$raschp <- RaschProb( Data$AbilR1, Data$AbilR2 )
raschp1 <- aggregate( Data$raschp, by = list( Repr = Data$Repr1 ), FUN = "sum" )
raschp2 <- aggregate( ( 1 - Data$raschp ), by = list( Repr = Data$Repr2 ), FUN = "sum" )
raschp <- rbind( raschp1, raschp2 )
raschp <- aggregate( raschp$x, by = list( Repr = raschp$Repr ), FUN = "sum" )
# merge with Abil
Abil <- merge( Abil, raschp, by = "Repr", all.y = F )
names( Abil )[5] <- "Expected"
rm( raschp1, raschp2, raschp )
## calculate expected info
Data$finfo <- FisherInfo( p = Data$raschp )
finfo1 <- aggregate( Data$finfo, by = list( Repr = Data$Repr1 ), FUN = "sum" )
finfo2 <- aggregate( Data$finfo, by = list( Repr = Data$Repr2 ), FUN = "sum" )
finfo <- rbind( finfo1, finfo2 )
finfo <- aggregate( finfo$x, by = list( Repr = finfo$Repr), FUN = "sum" )
Abil <- merge( Abil, finfo, by = "Repr", all.y = F )
names( Abil )[6] <- "ExpectedInfo"
rm( finfo1, finfo2, finfo )
if( counter != 0 )
{
## estimate new ability
Abil$AbilityN <- Abil$Ability + ( Abil$Observed - Abil$Expected ) / Abil$ExpectedInfo
} else
{
## calculate se
Abil$seN <- 1 / sqrt( Abil$ExpectedInfo )
return( data.frame( Repr = Abil$Repr, Ability = Abil$Ability, se = Abil$seN ) )
}
return( data.frame( Repr = Abil$Repr, Ability = Abil$AbilityN, se = Abil$se,
Observed = Abil$Observed ) )
} | /dist/DPACanalyses/R/iterativeML.R | no_license | SanVerhavert/DPACanalyses | R | false | false | 3,405 | r | #### BTLm ####
### Wrapper function for estimateAbility ###
### Restructures data and executes estimateAbility ###
BTLm <- function( Data, epsilonCorrect = .003, est.iters = 4 )
{
### Preparations ###
repr <- unique( c( Data$Repr1, Data$Repr2 ) )
Abil <- data.frame( Repr = repr, Ability = 0, se = 0 )
rm( repr )
### Observed Score ###
## in Data
Obs1 <- aggregate( Data$Score, by = list( Repr = Data$Repr1 ), FUN = "sum" )
Obs2 <- aggregate( 1 - Data$Score, by = list( Repr = Data$Repr2 ), FUN = "sum" )
Obs <- rbind( Obs1, Obs2)
Obs <- aggregate( Obs$x, by = list( Repr = Obs$Repr ), FUN = "sum")
Abil <- merge( Abil, Obs, by = "Repr" )
names( Abil )[4] <- c( "Observed" )
rm( Obs1, Obs2, Obs )
Comp1 <- aggregate( Data$Score, by = list( Repr = Data$Repr1 ), FUN = "length" )
Comp2 <- aggregate( Data$Score, by = list( Repr = Data$Repr2 ), FUN = "length" )
Comp <- rbind( Comp1, Comp2)
Comp <- aggregate( Comp$x, by = list( Repr = Comp$Repr ), FUN = "sum")
Abil <- merge( Abil, Comp, by = "Repr" )
names( Abil )[5] <- c( "totalComp" )
rm( Comp1, Comp2, Comp )
## Correct Abil$Observed
interm <- Abil$totalComp - 2 * epsilonCorrect
interm <- interm * Abil$Observed / Abil$totalComp
Abil$Observed <- epsilonCorrect + interm
rm( interm )
# clean up
Abil <- Abil[ , -5 ]
### Estimate Abilities ###
for( i in est.iters:0 )
{
## find the corresponding ability values for each representation in pair
Repr1ID <- match( Data$Repr1, table = Abil$Repr )
Repr2ID <- match( Data$Repr2, table = Abil$Repr )
Data$AbilR1 <- Abil$Ability[ Repr1ID ]
Data$AbilR2 <- Abil$Ability[ Repr2ID ]
rm( Repr1ID, Repr2ID )
Abil <- BTLm.est( Data = Data, Abil = Abil, counter = i )
}
rm(i)
### Output ###
return( Abil )
}
#### BTLm.est ####
### The actual BTL optimization function ###
BTLm.est <- function(Data, Abil, counter)
{
## calculate expected score
Data$raschp <- RaschProb( Data$AbilR1, Data$AbilR2 )
raschp1 <- aggregate( Data$raschp, by = list( Repr = Data$Repr1 ), FUN = "sum" )
raschp2 <- aggregate( ( 1 - Data$raschp ), by = list( Repr = Data$Repr2 ), FUN = "sum" )
raschp <- rbind( raschp1, raschp2 )
raschp <- aggregate( raschp$x, by = list( Repr = raschp$Repr ), FUN = "sum" )
# merge with Abil
Abil <- merge( Abil, raschp, by = "Repr", all.y = F )
names( Abil )[5] <- "Expected"
rm( raschp1, raschp2, raschp )
## calculate expected info
Data$finfo <- FisherInfo( p = Data$raschp )
finfo1 <- aggregate( Data$finfo, by = list( Repr = Data$Repr1 ), FUN = "sum" )
finfo2 <- aggregate( Data$finfo, by = list( Repr = Data$Repr2 ), FUN = "sum" )
finfo <- rbind( finfo1, finfo2 )
finfo <- aggregate( finfo$x, by = list( Repr = finfo$Repr), FUN = "sum" )
Abil <- merge( Abil, finfo, by = "Repr", all.y = F )
names( Abil )[6] <- "ExpectedInfo"
rm( finfo1, finfo2, finfo )
if( counter != 0 )
{
## estimate new ability
Abil$AbilityN <- Abil$Ability + ( Abil$Observed - Abil$Expected ) / Abil$ExpectedInfo
} else
{
## calculate se
Abil$seN <- 1 / sqrt( Abil$ExpectedInfo )
return( data.frame( Repr = Abil$Repr, Ability = Abil$Ability, se = Abil$seN ) )
}
return( data.frame( Repr = Abil$Repr, Ability = Abil$AbilityN, se = Abil$se,
Observed = Abil$Observed ) )
} |
library(FAwR)
### Name: SSallometric
### Title: Self-starting version of the allometric function y = a x^b.
### Aliases: SSallometric
### Keywords: models
### ** Examples
SSallometric(10, 2, 3)
data(sweetgum)
nls(vol.m3 ~ SSallometric(dbh.cm, alpha, beta), data = sweetgum)
| /data/genthat_extracted_code/FAwR/examples/SSallometric.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 283 | r | library(FAwR)
### Name: SSallometric
### Title: Self-starting version of the allometric function y = a x^b.
### Aliases: SSallometric
### Keywords: models
### ** Examples
SSallometric(10, 2, 3)
data(sweetgum)
nls(vol.m3 ~ SSallometric(dbh.cm, alpha, beta), data = sweetgum)
|
#' @title use the probability of a fit distribution in simulated data for exp dist
#' @description use the probability of a single observation as the true population parameter and simulates logTPM values based on a fitted exp distribution as the population dist. takes the observation probabilty and adjusts by an epsilon to then remap onto a tpm value.
#' @param dum the simulated data frame
#' @param delta.max the integer to adjust the probability values by
#' @param lamb lambda rate
#' @export
#' @return a data frame of logTPM values
simCorrect<-function(dum=NULL,delta.max=0.05,lamb=fit.exp$estimate["rate"]){
stopifnot(is.null(dum)==FALSE)
DC0h_mm1<-dum$DC0h_mm1
DC0h_mm05.prob<-dum$true.prob+delta.max/2
if(any(DC0h_mm05.prob>1)){
DC0h_mm05.prob[which(DC0h_mm05.prob>1)]<-dum$true.prob[which(DC0h_mm05.prob>1)]
}
DC0h_mm05.logTpm<-qexp(DC0h_mm05.prob,rate=lamb)
DC0h_mmN05.prob<-dum$true.prob-delta.max
if(any(DC0h_mmN05.prob<0)){
DC0h_mmN05.prob[which(DC0h_mmN05.prob<0)]<-dum$true.prob[which(DC0h_mmN05.prob<0)]
}
DC0h_mmN05.logTpm<-qexp(DC0h_mmN05.prob,rate=lamb)
dum<-data.frame(DC0h_mm1=DC0h_mm1,
DC0h_mm05=DC0h_mm05.logTpm,
DC0h_mmN05=DC0h_mmN05.logTpm,
true.prob=dum$true.prob,
DC0h_mm05.prob=DC0h_mm05.prob,
DC0h_mmN05.prob=DC0h_mmN05.prob,
delta=DC0h_mm05.prob-dum$true.prob,
delta2=DC0h_mmN05.prob-dum$true.prob)
####
return(dum)
}#main
| /inst/extdata/junk/simCorrect.R | no_license | arcolombo/rToolKit | R | false | false | 1,494 | r | #' @title use the probability of a fit distribution in simulated data for exp dist
#' @description use the probability of a single observation as the true population parameter and simulates logTPM values based on a fitted exp distribution as the population dist. takes the observation probabilty and adjusts by an epsilon to then remap onto a tpm value.
#' @param dum the simulated data frame
#' @param delta.max the integer to adjust the probability values by
#' @param lamb lambda rate
#' @export
#' @return a data frame of logTPM values
simCorrect<-function(dum=NULL,delta.max=0.05,lamb=fit.exp$estimate["rate"]){
stopifnot(is.null(dum)==FALSE)
DC0h_mm1<-dum$DC0h_mm1
DC0h_mm05.prob<-dum$true.prob+delta.max/2
if(any(DC0h_mm05.prob>1)){
DC0h_mm05.prob[which(DC0h_mm05.prob>1)]<-dum$true.prob[which(DC0h_mm05.prob>1)]
}
DC0h_mm05.logTpm<-qexp(DC0h_mm05.prob,rate=lamb)
DC0h_mmN05.prob<-dum$true.prob-delta.max
if(any(DC0h_mmN05.prob<0)){
DC0h_mmN05.prob[which(DC0h_mmN05.prob<0)]<-dum$true.prob[which(DC0h_mmN05.prob<0)]
}
DC0h_mmN05.logTpm<-qexp(DC0h_mmN05.prob,rate=lamb)
dum<-data.frame(DC0h_mm1=DC0h_mm1,
DC0h_mm05=DC0h_mm05.logTpm,
DC0h_mmN05=DC0h_mmN05.logTpm,
true.prob=dum$true.prob,
DC0h_mm05.prob=DC0h_mm05.prob,
DC0h_mmN05.prob=DC0h_mmN05.prob,
delta=DC0h_mm05.prob-dum$true.prob,
delta2=DC0h_mmN05.prob-dum$true.prob)
####
return(dum)
}#main
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/headers.r
\name{headers}
\alias{headers}
\title{Extract the headers from a response}
\usage{
headers(x)
}
\arguments{
\item{x}{A request object}
}
\description{
Extract the headers from a response
}
\examples{
\dontrun{
r <- GET("http://httpbin.org/get")
headers(r)
}
}
\seealso{
\code{\link[=add_headers]{add_headers()}} to send additional headers in a
request
}
| /man/headers.Rd | permissive | r-lib/httr | R | false | true | 442 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/headers.r
\name{headers}
\alias{headers}
\title{Extract the headers from a response}
\usage{
headers(x)
}
\arguments{
\item{x}{A request object}
}
\description{
Extract the headers from a response
}
\examples{
\dontrun{
r <- GET("http://httpbin.org/get")
headers(r)
}
}
\seealso{
\code{\link[=add_headers]{add_headers()}} to send additional headers in a
request
}
|
##########################################################################################
# Generate plots
#
# - data set: BCR-XL-sim
# - plot type: performance metrics
# - method: diffcyt methods
#
# - main results
#
# Lukas Weber, May 2018
##########################################################################################
library(iCOBRA)
library(ggplot2)
library(cowplot) # note: cowplot masks 'ggsave' from ggplot2
# load saved results
DIR_RDATA <- "../../../../RData/BCR_XL_sim/main"
load(file.path(DIR_RDATA, "outputs_BCR_XL_sim_diffcyt_DS_limma_main.RData"))
load(file.path(DIR_RDATA, "outputs_BCR_XL_sim_diffcyt_DS_LMM_main.RData"))
# path to save plots
DIR_PLOTS <- "../../../../plots/BCR_XL_sim/main_performance"
################
# Generate plots
################
# -------------------------------------
# Pre-processing steps for iCOBRA plots
# -------------------------------------
# create 'COBRAData' object
data <- list(diffcyt_DS_limma = out_diffcyt_DS_limma_main,
diffcyt_DS_LMM = out_diffcyt_DS_LMM_main)
# check
stopifnot(all(sapply(data, function(d) all(d$B_cell == data[[1]]$B_cell))))
# note: provide all available values
# 'padj' is required for threshold points on TPR-FDR curves
# depending on availability, plotting functions use 'score', then 'pval', then 'padj'
cobradata <- COBRAData(pval = data.frame(diffcyt_DS_limma = data[["diffcyt_DS_limma"]][, "p_val"],
diffcyt_DS_LMM = data[["diffcyt_DS_LMM"]][, "p_val"]),
padj = data.frame(diffcyt_DS_limma = data[["diffcyt_DS_limma"]][, "p_adj"],
diffcyt_DS_LMM = data[["diffcyt_DS_LMM"]][, "p_adj"]),
truth = data.frame(B_cell = data[["diffcyt_DS_limma"]][, "B_cell"]))
# calculate performance scores
# (note: can ignore warning messages when 'padj' not available)
cobraperf <- calculate_performance(cobradata,
binary_truth = "B_cell",
aspects = c("roc", "fdrtpr", "fdrtprcurve", "tpr", "fpr"))
# color scheme
colors <- c("firebrick1", "darkviolet")
colors <- colors[1:length(data)]
names(colors) <- names(data)
# prepare plotting object
cobraplot <- prepare_data_for_plot(cobraperf,
colorscheme = colors,
conditionalfill = FALSE)
# re-order legend
cobraplot <- reorder_levels(cobraplot, levels = names(data))
# ----------
# ROC curves
# ----------
# create plot
p_ROC <-
plot_roc(cobraplot, linewidth = 0.75) +
coord_fixed() +
xlab("False positive rate") +
ylab("True positive rate") +
ggtitle("BCR-XL-sim: main results", subtitle = "ROC curve") +
theme_bw() +
theme(strip.text.x = element_blank()) +
guides(color = guide_legend("method"))
# save plot
fn <- file.path(DIR_PLOTS, "panels", "results_BCR_XL_sim_diffcyt_main_ROC.pdf")
ggsave(fn, width = 4.75, height = 3.5)
# --------------
# TPR-FDR curves
# --------------
# create plot
p_TPRFDR <-
plot_fdrtprcurve(cobraplot, linewidth = 0.75, pointsize = 4) +
scale_shape_manual(values = c(15, 19, 17), labels = c(0.01, 0.05, 0.1)) +
coord_fixed() +
xlab("False discovery rate") +
ylab("True positive rate") +
scale_x_continuous(breaks = seq(0, 1, by = 0.2)) +
ggtitle("BCR-XL-sim: main results", subtitle = "TPR vs. FDR") +
theme_bw() +
theme(strip.text.x = element_blank()) +
guides(shape = guide_legend("FDR threshold", override.aes = list(size = 4), order = 1),
color = guide_legend("method", order = 2))
# save plot
fn <- file.path(DIR_PLOTS, "panels", "results_BCR_XL_sim_diffcyt_main_TPRFDR.pdf")
ggsave(fn, width = 4.75, height = 3.5)
# ---------
# TPR plots
# ---------
# create plot
p_TPR <-
plot_tpr(cobraplot, pointsize = 4) +
scale_shape_manual(values = c(15, 19, 17), labels = c(0.01, 0.05, 0.1)) +
#coord_fixed() +
xlab("True positive rate") +
ggtitle("BCR-XL-sim: main results", subtitle = "TPR") +
theme_bw() +
theme(strip.text.x = element_blank(),
axis.text.y = element_blank()) +
guides(shape = guide_legend("FDR threshold", override.aes = list(size = 4), order = 1),
color = guide_legend("method", override.aes = list(shape = 19, size = 4), order = 2))
# save plot
fn <- file.path(DIR_PLOTS, "panels", "results_BCR_XL_sim_diffcyt_main_TPR.pdf")
ggsave(fn, width = 4.5, height = 3.5)
# ---------
# FPR plots
# ---------
# create plot
p_FPR <-
plot_fpr(cobraplot, pointsize = 4) +
scale_shape_manual(values = c(15, 19, 17), labels = c(0.01, 0.05, 0.1)) +
#coord_fixed() +
xlab("False positive rate") +
ggtitle("BCR-XL-sim: main results", subtitle = "FPR") +
theme_bw() +
theme(strip.text.x = element_blank(),
axis.text.y = element_blank()) +
guides(shape = guide_legend("FDR threshold", override.aes = list(size = 4), order = 1),
color = guide_legend("method", override.aes = list(shape = 19, size = 4), order = 2))
# save plot
fn <- file.path(DIR_PLOTS, "panels", "results_BCR_XL_sim_diffcyt_main_FPR.pdf")
ggsave(fn, width = 4.5, height = 3.5)
##################
# Multi-panel plot
##################
plots_list <- list(p_ROC, p_TPRFDR, p_TPR, p_FPR)
# modify plot elements
plots_list <- lapply(plots_list, function(p) {
p +
labs(title = p$labels$subtitle, subtitle = element_blank()) +
theme(legend.position = "none")
})
plots_multi <- plot_grid(plotlist = plots_list,
nrow = 1, ncol = 4, align = "hv", axis = "bl")
# add combined title
title_single <- p_ROC$labels$title
plots_title <- ggdraw() + draw_label(title_single)
plots_multi <- plot_grid(plots_title, plots_multi, ncol = 1, rel_heights = c(1, 7))
# add combined legend
legend_single <- get_legend(plots_list[[2]] + theme(legend.position = "right"))
plots_multi <- plot_grid(plots_multi, legend_single, nrow = 1, rel_widths = c(6, 1))
# save multi-panel plot
fn <- file.path(DIR_PLOTS, "results_BCR_XL_sim_diffcyt_main_performance.pdf")
ggsave(fn, width = 10, height = 2.625)
#################################
# Multi-panel plot: 2 panels only
#################################
plots_list <- list(p_ROC, p_TPRFDR)
# modify plot elements
plots_list <- lapply(plots_list, function(p) {
p +
labs(title = p$labels$subtitle, subtitle = element_blank()) +
theme(legend.position = "none")
})
plots_multi <- plot_grid(plotlist = plots_list,
nrow = 1, ncol = 2, align = "hv", axis = "bl")
# add combined title
title_single <- p_ROC$labels$title
plots_title <- ggdraw() + draw_label(title_single)
plots_multi <- plot_grid(plots_title, plots_multi, ncol = 1, rel_heights = c(1, 7))
# add combined legend
legend_single <- get_legend(plots_list[[2]] + theme(legend.position = "right"))
plots_multi <- plot_grid(plots_multi, legend_single, nrow = 1, rel_widths = c(3.2, 1))
# save multi-panel plot
fn <- file.path(DIR_PLOTS, "results_BCR_XL_sim_diffcyt_main_performance_2_panels.pdf")
ggsave(fn, width = 6, height = 2.625)
###################################
# Save timestamp file for Makefiles
###################################
file_timestamp <- file.path(DIR_PLOTS, "timestamp.txt")
sink(file_timestamp)
Sys.time()
sink()
| /BCR_XL_sim/3_generate_plots/main_performance/plots_BCR_XL_sim_diffcyt_main_performance.R | permissive | lmweber/diffcyt-evaluations | R | false | false | 7,316 | r | ##########################################################################################
# Generate plots
#
# - data set: BCR-XL-sim
# - plot type: performance metrics
# - method: diffcyt methods
#
# - main results
#
# Lukas Weber, May 2018
##########################################################################################
library(iCOBRA)
library(ggplot2)
library(cowplot) # note: cowplot masks 'ggsave' from ggplot2
# load saved results
DIR_RDATA <- "../../../../RData/BCR_XL_sim/main"
load(file.path(DIR_RDATA, "outputs_BCR_XL_sim_diffcyt_DS_limma_main.RData"))
load(file.path(DIR_RDATA, "outputs_BCR_XL_sim_diffcyt_DS_LMM_main.RData"))
# path to save plots
DIR_PLOTS <- "../../../../plots/BCR_XL_sim/main_performance"
################
# Generate plots
################
# -------------------------------------
# Pre-processing steps for iCOBRA plots
# -------------------------------------
# create 'COBRAData' object
data <- list(diffcyt_DS_limma = out_diffcyt_DS_limma_main,
diffcyt_DS_LMM = out_diffcyt_DS_LMM_main)
# check
stopifnot(all(sapply(data, function(d) all(d$B_cell == data[[1]]$B_cell))))
# note: provide all available values
# 'padj' is required for threshold points on TPR-FDR curves
# depending on availability, plotting functions use 'score', then 'pval', then 'padj'
cobradata <- COBRAData(pval = data.frame(diffcyt_DS_limma = data[["diffcyt_DS_limma"]][, "p_val"],
diffcyt_DS_LMM = data[["diffcyt_DS_LMM"]][, "p_val"]),
padj = data.frame(diffcyt_DS_limma = data[["diffcyt_DS_limma"]][, "p_adj"],
diffcyt_DS_LMM = data[["diffcyt_DS_LMM"]][, "p_adj"]),
truth = data.frame(B_cell = data[["diffcyt_DS_limma"]][, "B_cell"]))
# calculate performance scores
# (note: can ignore warning messages when 'padj' not available)
cobraperf <- calculate_performance(cobradata,
binary_truth = "B_cell",
aspects = c("roc", "fdrtpr", "fdrtprcurve", "tpr", "fpr"))
# color scheme
colors <- c("firebrick1", "darkviolet")
colors <- colors[1:length(data)]
names(colors) <- names(data)
# prepare plotting object
cobraplot <- prepare_data_for_plot(cobraperf,
colorscheme = colors,
conditionalfill = FALSE)
# re-order legend
cobraplot <- reorder_levels(cobraplot, levels = names(data))
# ----------
# ROC curves
# ----------
# create plot
p_ROC <-
plot_roc(cobraplot, linewidth = 0.75) +
coord_fixed() +
xlab("False positive rate") +
ylab("True positive rate") +
ggtitle("BCR-XL-sim: main results", subtitle = "ROC curve") +
theme_bw() +
theme(strip.text.x = element_blank()) +
guides(color = guide_legend("method"))
# save plot
fn <- file.path(DIR_PLOTS, "panels", "results_BCR_XL_sim_diffcyt_main_ROC.pdf")
ggsave(fn, width = 4.75, height = 3.5)
# --------------
# TPR-FDR curves
# --------------
# create plot
p_TPRFDR <-
plot_fdrtprcurve(cobraplot, linewidth = 0.75, pointsize = 4) +
scale_shape_manual(values = c(15, 19, 17), labels = c(0.01, 0.05, 0.1)) +
coord_fixed() +
xlab("False discovery rate") +
ylab("True positive rate") +
scale_x_continuous(breaks = seq(0, 1, by = 0.2)) +
ggtitle("BCR-XL-sim: main results", subtitle = "TPR vs. FDR") +
theme_bw() +
theme(strip.text.x = element_blank()) +
guides(shape = guide_legend("FDR threshold", override.aes = list(size = 4), order = 1),
color = guide_legend("method", order = 2))
# save plot
fn <- file.path(DIR_PLOTS, "panels", "results_BCR_XL_sim_diffcyt_main_TPRFDR.pdf")
ggsave(fn, width = 4.75, height = 3.5)
# ---------
# TPR plots
# ---------
# create plot
p_TPR <-
plot_tpr(cobraplot, pointsize = 4) +
scale_shape_manual(values = c(15, 19, 17), labels = c(0.01, 0.05, 0.1)) +
#coord_fixed() +
xlab("True positive rate") +
ggtitle("BCR-XL-sim: main results", subtitle = "TPR") +
theme_bw() +
theme(strip.text.x = element_blank(),
axis.text.y = element_blank()) +
guides(shape = guide_legend("FDR threshold", override.aes = list(size = 4), order = 1),
color = guide_legend("method", override.aes = list(shape = 19, size = 4), order = 2))
# save plot
fn <- file.path(DIR_PLOTS, "panels", "results_BCR_XL_sim_diffcyt_main_TPR.pdf")
ggsave(fn, width = 4.5, height = 3.5)
# ---------
# FPR plots
# ---------
# create plot
p_FPR <-
plot_fpr(cobraplot, pointsize = 4) +
scale_shape_manual(values = c(15, 19, 17), labels = c(0.01, 0.05, 0.1)) +
#coord_fixed() +
xlab("False positive rate") +
ggtitle("BCR-XL-sim: main results", subtitle = "FPR") +
theme_bw() +
theme(strip.text.x = element_blank(),
axis.text.y = element_blank()) +
guides(shape = guide_legend("FDR threshold", override.aes = list(size = 4), order = 1),
color = guide_legend("method", override.aes = list(shape = 19, size = 4), order = 2))
# save plot
fn <- file.path(DIR_PLOTS, "panels", "results_BCR_XL_sim_diffcyt_main_FPR.pdf")
ggsave(fn, width = 4.5, height = 3.5)
##################
# Multi-panel plot
##################
plots_list <- list(p_ROC, p_TPRFDR, p_TPR, p_FPR)
# modify plot elements
plots_list <- lapply(plots_list, function(p) {
p +
labs(title = p$labels$subtitle, subtitle = element_blank()) +
theme(legend.position = "none")
})
plots_multi <- plot_grid(plotlist = plots_list,
nrow = 1, ncol = 4, align = "hv", axis = "bl")
# add combined title
title_single <- p_ROC$labels$title
plots_title <- ggdraw() + draw_label(title_single)
plots_multi <- plot_grid(plots_title, plots_multi, ncol = 1, rel_heights = c(1, 7))
# add combined legend
legend_single <- get_legend(plots_list[[2]] + theme(legend.position = "right"))
plots_multi <- plot_grid(plots_multi, legend_single, nrow = 1, rel_widths = c(6, 1))
# save multi-panel plot
fn <- file.path(DIR_PLOTS, "results_BCR_XL_sim_diffcyt_main_performance.pdf")
ggsave(fn, width = 10, height = 2.625)
#################################
# Multi-panel plot: 2 panels only
#################################
plots_list <- list(p_ROC, p_TPRFDR)
# modify plot elements
plots_list <- lapply(plots_list, function(p) {
p +
labs(title = p$labels$subtitle, subtitle = element_blank()) +
theme(legend.position = "none")
})
plots_multi <- plot_grid(plotlist = plots_list,
nrow = 1, ncol = 2, align = "hv", axis = "bl")
# add combined title
title_single <- p_ROC$labels$title
plots_title <- ggdraw() + draw_label(title_single)
plots_multi <- plot_grid(plots_title, plots_multi, ncol = 1, rel_heights = c(1, 7))
# add combined legend
legend_single <- get_legend(plots_list[[2]] + theme(legend.position = "right"))
plots_multi <- plot_grid(plots_multi, legend_single, nrow = 1, rel_widths = c(3.2, 1))
# save multi-panel plot
fn <- file.path(DIR_PLOTS, "results_BCR_XL_sim_diffcyt_main_performance_2_panels.pdf")
ggsave(fn, width = 6, height = 2.625)
###################################
# Save timestamp file for Makefiles
###################################
file_timestamp <- file.path(DIR_PLOTS, "timestamp.txt")
sink(file_timestamp)
Sys.time()
sink()
|
library(xts)
### Name: coredata.xts
### Title: Extract/Replace Core Data of an xts Object
### Aliases: coredata.xts xcoredata xcoredata<-
### Keywords: utilities
### ** Examples
data(sample_matrix)
x <- as.xts(sample_matrix, myattr=100)
coredata(x)
xcoredata(x)
| /data/genthat_extracted_code/xts/examples/coredata.xts.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 269 | r | library(xts)
### Name: coredata.xts
### Title: Extract/Replace Core Data of an xts Object
### Aliases: coredata.xts xcoredata xcoredata<-
### Keywords: utilities
### ** Examples
data(sample_matrix)
x <- as.xts(sample_matrix, myattr=100)
coredata(x)
xcoredata(x)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/avg_meth.R
\name{gpatterns.global_meth_trend}
\alias{gpatterns.global_meth_trend}
\title{Plot global methylation stratified on other tracks}
\usage{
gpatterns.global_meth_trend(
tracks,
strat_track = .gpatterns.cg_cont_500_track,
strat_breaks = seq(0, 0.08, by = 0.002),
intervals = .gpatterns.genome_cpgs_intervals,
iterator = .gpatterns.genome_cpgs_intervals,
min_cov = NULL,
min_cgs = NULL,
names = NULL,
groups = NULL,
group_name = NULL,
include.lowest = TRUE,
ncol = 2,
nrow = 2,
width = 600,
height = 560,
fig_fn = NULL,
xlab = strat_track,
ylim = c(0, 1),
title = "",
legend = TRUE,
colors = NULL,
parallel = getOption("gpatterns.parallel")
)
}
\arguments{
\item{tracks}{tracks to plot}
\item{strat_track}{track to stratify average methylation by. default is CG content}
\item{strat_breaks}{breaks to determine the bins of strat_track}
\item{intervals}{genomic scope for which the function is applied}
\item{iterator}{track expression iterator (of both tracks and strat_track)}
\item{min_cov}{minimal coverage of each track}
\item{min_cgs}{minimal number of CpGs per bin}
\item{names}{alternative names for the track}
\item{groups}{a vector the same length of \code{tracks} with group for each track. Each group will on a different facet.}
\item{group_name}{name of the grouping variable (e.g. tumor, sample, patient, experiment)}
\item{include.lowest}{if 'TRUE', the lowest value of the range determined by breaks is included}
\item{ncol}{number of columns}
\item{nrow}{number of rows}
\item{width}{plot width (if fig_fn is not NULL)}
\item{height}{plot height (if fig_fn is not NULL)}
\item{fig_fn}{output filename for the figure (if NULL, figure would be returned)}
\item{xlab}{label for the x axis}
\item{ylim}{ylim of the plot}
\item{title}{title for the plot}
\item{legend}{add legend}
\item{colors}{custom colors}
\item{parallel}{get trends parallely}
}
\value{
list with trend data frame (under 'trend') and the plot (under 'p')
}
\description{
calculates the average methylation \code{(m / m + um)} in each
bin of \code{strat_track} and plots it. By default, plots the average methylation
in different bins of CpG content. This can be used as a sanity check for methylation
data - in general, methylation is high for regions with low CpG density,
and low for CpG dense regions (e.g. CpG islands).
}
\examples{
}
| /man/gpatterns.global_meth_trend.Rd | no_license | tanaylab/gpatterns | R | false | true | 2,472 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/avg_meth.R
\name{gpatterns.global_meth_trend}
\alias{gpatterns.global_meth_trend}
\title{Plot global methylation stratified on other tracks}
\usage{
gpatterns.global_meth_trend(
tracks,
strat_track = .gpatterns.cg_cont_500_track,
strat_breaks = seq(0, 0.08, by = 0.002),
intervals = .gpatterns.genome_cpgs_intervals,
iterator = .gpatterns.genome_cpgs_intervals,
min_cov = NULL,
min_cgs = NULL,
names = NULL,
groups = NULL,
group_name = NULL,
include.lowest = TRUE,
ncol = 2,
nrow = 2,
width = 600,
height = 560,
fig_fn = NULL,
xlab = strat_track,
ylim = c(0, 1),
title = "",
legend = TRUE,
colors = NULL,
parallel = getOption("gpatterns.parallel")
)
}
\arguments{
\item{tracks}{tracks to plot}
\item{strat_track}{track to stratify average methylation by. default is CG content}
\item{strat_breaks}{breaks to determine the bins of strat_track}
\item{intervals}{genomic scope for which the function is applied}
\item{iterator}{track expression iterator (of both tracks and strat_track)}
\item{min_cov}{minimal coverage of each track}
\item{min_cgs}{minimal number of CpGs per bin}
\item{names}{alternative names for the track}
\item{groups}{a vector the same length of \code{tracks} with group for each track. Each group will on a different facet.}
\item{group_name}{name of the grouping variable (e.g. tumor, sample, patient, experiment)}
\item{include.lowest}{if 'TRUE', the lowest value of the range determined by breaks is included}
\item{ncol}{number of columns}
\item{nrow}{number of rows}
\item{width}{plot width (if fig_fn is not NULL)}
\item{height}{plot height (if fig_fn is not NULL)}
\item{fig_fn}{output filename for the figure (if NULL, figure would be returned)}
\item{xlab}{label for the x axis}
\item{ylim}{ylim of the plot}
\item{title}{title for the plot}
\item{legend}{add legend}
\item{colors}{custom colors}
\item{parallel}{get trends parallely}
}
\value{
list with trend data frame (under 'trend') and the plot (under 'p')
}
\description{
calculates the average methylation \code{(m / m + um)} in each
bin of \code{strat_track} and plots it. By default, plots the average methylation
in different bins of CpG content. This can be used as a sanity check for methylation
data - in general, methylation is high for regions with low CpG density,
and low for CpG dense regions (e.g. CpG islands).
}
\examples{
}
|
## this will replace soilDB::estimateColorMixture() as an alternative / fallback
## method for mixMunsell() when reference spectra are missing
.estimateColorMixture <- function(chips, w) {
# convert to CIELAB
.lab <- parseMunsell(chips, returnLAB = TRUE)
# weighted mean
.L <- weighted.mean(.lab[['L']], w = w, na.rm = TRUE)
.A <- weighted.mean(.lab[['A']], w = w, na.rm = TRUE)
.B <- weighted.mean(.lab[['B']], w = w, na.rm = TRUE)
# LAB -> sRGB
mixed.color <- data.frame(convertColor(cbind(.L, .A, .B), from='Lab', to='sRGB', from.ref.white='D65', to.ref.white = 'D65'))
names(mixed.color) <- c('r', 'g', 'b')
# back to Munsell
m <- rgb2munsell(mixed.color[, c('r', 'g', 'b')])
# pack into expected structure
# scaled distance is only for spectral distance evaluated against the entire library
res <- data.frame(
munsell = sprintf('%s %s/%s', m$hue, m$value, m$chroma),
distance = m$sigma,
scaledDistance = NA,
distanceMetric = 'dE00',
mixingMethod = 'estimate',
stringsAsFactors = FALSE
)
return(res)
}
# helper function for printing out value / chroma ranges by hue
.summarizeMunsellSpectraRanges <- function() {
# make R CMD CHECK happy
munsell.spectra <- NULL
# note: this is incompatible with LazyData: true
# load look-up table from our package
load(system.file("data/munsell.spectra.rda", package="aqp")[1])
# set hue position
munsell.spectra$hue <- factor(munsell.spectra$hue, levels = huePosition(returnHues = TRUE))
# remove non-standard hues (what are they for?)
munsell.spectra <- na.omit(munsell.spectra)
x <- split(munsell.spectra, munsell.spectra$hue)
x <- lapply(x, function(i) {
data.frame(
hue = i$hue[1],
value = sprintf("%s-%s", min(i$value), max(i$value)),
chroma = sprintf("%s-%s", min(i$chroma), max(i$chroma)),
stringsAsFactors = FALSE
)
})
x <- do.call('rbind', x)
return(x)
}
## TODO: is this generic enough to use elsewhere?
# weighted geometric mean
# https://en.wikipedia.org/wiki/Weighted_geometric_mean
# note: function will fail if any(v) == 0
.wgm <- function(v, w) {
r <- sum(w * log(v)) / sum(w)
r <- exp(r)
return(r)
}
# another possible approach using only sRGB coordinates
# http://scottburns.us/wp-content/uploads/2015/04/ILSS.txt
# related ticket
# https://github.com/ncss-tech/aqp/issues/101
# in response to the commentary here:
# https://soiltxnmyforum.cals.vt.edu/forum/read.php?3,1984,1987#msg-1987
# inspiration / calculations based on:
# https://arxiv.org/ftp/arxiv/papers/1710/1710.06364.pdf
# related discussion here:
# https://stackoverflow.com/questions/10254022/implementing-kubelka-munk-like-krita-to-mix-colours-color-like-paint/29967630#29967630
# base spectral library:
# http://www.munsellcolourscienceforpainters.com/MunsellResources/SpectralReflectancesOf2007MunsellBookOfColorGlossy.txt
# see /misc/util/Munsell for:
# * spectral library prep
# * interpolation of odd chroma
# * reshaping for rapid look-up
#'
#' @title Mix Munsell Colors via Spectral Library
#'
#' @description Simulate mixing of colors in Munsell notation, similar to the way in which mixtures of pigments operate.
#'
#' @param x vector of colors in Munsell notation
#'
#' @param w vector of proportions, can sum to any number
#'
#' @param mixingMethod approach used to simulate a mixture:
#' * `reference` : simulate a subtractive mixture of pigments, selecting `n` closest reference spectra from [`munsell.spectra.wide`]
#'
#' * `exact`: simulate a subtractive mixture of pigments, color conversion via CIE1931 color-matching functions (see details)
#'
#' * `estimate` : closest Munsell chip to a weighted mean of CIELAB coordinates
#'
#' * `adaptive` : use reference spectra when possible, falling-back to weighted mean of CIELAB coordinates
#'
#' @param n number of closest matching color chips (`mixingMethod = spectra` only)
#'
#' @param keepMixedSpec keep weighted geometric mean spectra, final result is a `list` (`mixingMethod = spectra` only)
#'
#' @param distThreshold spectral distance used to compute `scaledDistance`, default value is based on an analysis of spectral distances associated with adjacent Munsell color chips. This argument is only used with `mixingMethod = 'reference'`.
#'
#' @param ... additional arguments to [`spec2Munsell`]
#'
#' @author D.E. Beaudette
#'
#' @references
#'
#' Marcus, R.T. (1998). The Measurement of Color. In K. Nassau (Ed.), Color for Science, Art, and Technology (pp. 32-96). North-Holland.
#'
#'
#'
#' \itemize{
#' \item{inspiration / calculations based on the work of Scott Burns: }{\url{https://arxiv.org/ftp/arxiv/papers/1710/1710.06364.pdf}}
#'
#' \item{related discussion on Stack Overflow: }{\url{https://stackoverflow.com/questions/10254022/implementing-kubelka-munk-like-krita-to-mix-colours-color-like-paint/29967630#29967630}}
#'
#' \item{spectral library source: }{\url{https://www.munsellcolourscienceforpainters.com/MunsellResources/SpectralReflectancesOf2007MunsellBookOfColorGlossy.txt}}
#'
#' }
#'
#'
#' @details
#' An accurate simulation of pigment mixtures ("subtractive" color mixtures) is incredibly complex due to factors that aren't easily measured or controlled: pigment solubility, pigment particle size distribution, water content, substrate composition, and physical obstruction to name a few. That said, it is possible to simulate reasonable, subtractive color mixtures given a reference spectra library (350-800nm) and some assumptions about pigment qualities and lighting. For the purposes of estimating a mixture of soil colors (these are pigments after all) we can relax these assumptions and assume a standard light source. The only missing piece is the spectral library for all Munsell chips in our color books.
#'
#' Thankfully, [Scott Burns has outlined the entire process](https://arxiv.org/ftp/arxiv/papers/1710/1710.06364.pdf), and Paul Centore has provided a Munsell color chip [reflectance spectra library](https://www.munsellcolourscienceforpainters.com). The estimation of a subtractive mixture of soil colors can proceed as follows:
#'
#' 1. look up the associated spectra for each color in `x`
#' 2. compute the weighted (`w` argument) geometric mean of the spectra
#' 3. convert the spectral mixture to the closest Munsell color via:
#' * search for the closest `n` matching spectra in the reference library (`mixtureMethod = 'reference'`)
#' * direct conversion of spectra to closest Munsell color via [`spec2Munsell`] ( (`mixtureMethod = 'exact'`))
#' 4. suggest resulting Munsell chip(s) as the best candidate for a simulated mixture
#'
#' Key assumptions include:
#'
#' * similar particle size distribution
#' * similar mineralogy (i.e. pigmentation qualities)
#' * similar water content.
#'
#' For the purposes of estimating (for example) a "mixed soil color within the top 18cm of soil" these assumptions are usually valid. Again, these are estimates that are ultimately "snapped" to the nearest chip and not do not need to approach the accuracy of paint-matching systems.
#'
#' A message is printed when `scaledDistance` is larger than 1.
#'
#' @return A `data.frame` with the closest matching Munsell color(s):
#'
#' * `munsell`: Munsell notation of the n-closest spectra
#' * `distance`: spectral (Gower) distance to the n-closest spectra
#' * `scaledDistance`: spectral distance scaled by `distThreshold`
#' * `mixingMethod`: method used for each mixture
#'
#' When `keepMixedSpec = TRUE` then a `list`:
#'
#' * `mixed`: a `data.frame` containing the same elements as above
#' * `spec`: spectra for the 1st closest match
#'
#'
#'
#' @seealso \code{\link{munsell.spectra}}
#'
#' @examples
#'
#' # try a couple different methods
#' cols <- c('10YR 6/2', '5YR 5/6', '10B 4/4')
#' mixMunsell(cols, mixingMethod = 'reference')
#' mixMunsell(cols, mixingMethod = 'exact')
#' mixMunsell(cols, mixingMethod = 'estimate')
#'
#'
mixMunsell <- function(x, w = rep(1, times = length(x)) / length(x), mixingMethod = c('reference', 'exact', 'estimate', 'adaptive', 'spectra'), n = 1, keepMixedSpec = FALSE, distThreshold = 0.025, ...) {
# satisfy R CMD check
munsell.spectra.wide <- NULL
# enforce mixing method
mixingMethod <- match.arg(mixingMethod)
# backwards compatibility: "spectra" will be deprecated in the future
if(mixingMethod == 'spectra') {
message('please use `mixingMethod = "reference"`')
mixingMethod <- 'reference'
}
# multiple matches only possible when using mixingMethod == 'reference'
if((n > 1) & mixingMethod != 'reference') {
stop('`n` is only valid for `mixingMethod = "reference"`', call. = FALSE)
}
# mixed spectra and multiple matches only possible when using mixingMethod == 'reference'
if(keepMixedSpec & ! mixingMethod %in% c('reference', 'exact')) {
stop('`keepMixedSpec` is only valid for mixingMethod = "reference" or "exact"', call. = FALSE)
}
# sanity check, need this for gower::gower_topn()
if(!requireNamespace('gower'))
stop('package `gower` is required', call.=FALSE)
# can't mix a single color, just give it back at 0 distance
if (length(unique(x)) == 1) {
res <- data.frame(
munsell = x[1],
distance = 0,
scaledDistance = NA,
distanceMetric = NA,
mixingMethod = NA,
stringsAsFactors = FALSE
)
return(res)
}
# must have as many weights as length of x
if (length(x) != length(w) && length(w) != 1) {
stop('w should have same length as x or length one')
} else if (length(w) == 1) {
# cannot mix with zero weights
stopifnot(w > 0)
# a recycled weight is same as function default
w <- rep(1, times = length(x)) / length(x)
}
## TODO: move 0-weight / NA handling up in the logic
# more informative error for colors missing
if (any(w[is.na(x)] > 0)) {
stop('cannot mix missing (NA) colors with weight greater than zero')
}
# more informative error for weights missing
if (any(is.na(w))) {
stop('cannot mix colors with missing (NA) weight')
}
# remove 0-weighted colors
x <- x[w > 0]
w <- w[w > 0]
# x with weights > 0 must contain valid Munsell
if (any(is.na(parseMunsell(x)))) {
stop('input must be valid Munsell notation, neutral hues and missing not supported')
}
## main branch: mixing method
# estimate via wtd.mean CIELAB
if(mixingMethod == 'estimate') {
# simple estimation by weighted mean CIELAB
res <- .estimateColorMixture(chips = x, w = w)
# stop here
return(res)
} else {
# spectral mixing if possible
# wide version for fast searches
load(system.file("data/munsell.spectra.wide.rda", package="aqp")[1])
# subset reference spectra for colors
# note that search results are not in the same order as x
# result are columns of spectra
munsell.names <- names(munsell.spectra.wide)
idx <- which(munsell.names %in% x)
s <- munsell.spectra.wide[, idx, drop = FALSE]
# sanity check: if there aren't sufficient reference spectra then return NA
# must be at least the same number of spectra (columns) as unique colors specified
if(ncol(s) < length(unique(x))){
# helpful message
missing.chips <- setdiff(x, munsell.names)
msg <- sprintf(
'reference spectra not available: %s',
paste(missing.chips, collapse = ', ')
)
message(msg)
# fall-back to wt. mean LAB
if(mixingMethod == 'adaptive') {
# assumes cleaned data
res <- .estimateColorMixture(chips = x, w = w)
} else {
# otherwise return an empty result
res <- data.frame(
munsell = NA,
distance = NA,
scaledDistance = NA,
distanceMetric = NA,
mixingMethod = NA,
stringsAsFactors = FALSE
)
}
# done
return(res)
}
## proceeding to simulate spectral mixture
# empty vector for mixture
mixed <- vector(mode = 'numeric', length = nrow(s))
# iterate over wavelength (columns in first spectra)
for(i in seq_along(mixed)) {
# prepare values:
# select the i-th wavelength (row)
# down-grade to a vector
vals <- unlist(s[i, ])
## TODO: this wastes some time when weights are obvious, move outside of loop
## -> convert to tapply, order doesn't matter as long as names are preserved
# aggregate weights by "chip" -- in case length(x) != length(unique(x))
wagg <- aggregate(w, by = list(chip = x), FUN = sum)
# mix via weighted geometric mean
mixed[i] <- .wgm(v = vals, w = wagg$x[match(names(s), wagg$chip)])
}
## "exact" matching
if(mixingMethod %in% c('exact', 'adaptive')) {
##
# S = R * illuminant (D65 is the default)
# XYZ = AT %*% standard observer (CIE1964 is the default)
# XYZ -> sRGB -> Munsell
mx <- spec2Munsell(mixed, ...)
# NOTE: ... are passed to rgb2munsell()
# convert = TRUE: mx is a data.frame
# convert = FALSE: mx is a matrix
if(inherits(mx, 'matrix')) {
# mx is a matrix
dimnames(mx)[[2]] <- c('r', 'g', 'b')
# include sRGB coordinates, this is different than what is typically returned by this function
res <- data.frame(
mx,
munsell = NA,
distance = NA,
scaledDistance = NA,
distanceMetric = NA,
mixingMethod = 'exact',
stringsAsFactors = FALSE
)
} else {
# mx is a data.frame
res <- data.frame(
munsell = sprintf("%s %s/%s", mx$hue, mx$value, mx$chroma),
distance = mx$sigma,
scaledDistance = NA,
distanceMetric = 'dE00',
mixingMethod = 'exact',
stringsAsFactors = FALSE
)
}
# final-cleanup and return is performed outside of if/else
} else {
## reference "spectra" method
## operations on data.table likely faster
# Gower distance: looks good, ~5x faster due to compiled code
# https://cran.r-project.org/web/packages/gower/vignettes/intro.pdf
# would make sense to reshape reference data
# NOTE: arguments to rgb2munsell() are silently ignored
## TODO: time wasted here
# reshape reference spectra: wavelength to columns
z <- t(munsell.spectra.wide[, -1])
# top n matches
d <- gower::gower_topn(
data.frame(rbind(mixed)),
data.frame(z),
n = n
)
res <- data.frame(
munsell = dimnames(z)[[1]][d$index],
distance = d$distance[, 1],
scaledDistance = d$distance[, 1] / distThreshold,
distanceMetric = 'Gower',
mixingMethod = 'reference',
stringsAsFactors = FALSE
)
# report possibly problematic mixtures
if(any(res$scaledDistance > 1)) {
message('closest match has a spectral distance that is large, results may be unreliable')
}
}
## all done with "exact", "spectra", and "adaptive"
# clean-up row names
row.names(res) <- NULL
# optionally return weighted geometric mean (mixed) spectra
if(keepMixedSpec) {
return(
list(
mixed = res,
spec = mixed
)
)
} else {
# not returning the mixed spectra
return(res)
}
}
}
| /R/mixMunsell.R | no_license | Memo1986/aqp | R | false | false | 15,664 | r |
## this will replace soilDB::estimateColorMixture() as an alternative / fallback
## method for mixMunsell() when reference spectra are missing
.estimateColorMixture <- function(chips, w) {
# convert to CIELAB
.lab <- parseMunsell(chips, returnLAB = TRUE)
# weighted mean
.L <- weighted.mean(.lab[['L']], w = w, na.rm = TRUE)
.A <- weighted.mean(.lab[['A']], w = w, na.rm = TRUE)
.B <- weighted.mean(.lab[['B']], w = w, na.rm = TRUE)
# LAB -> sRGB
mixed.color <- data.frame(convertColor(cbind(.L, .A, .B), from='Lab', to='sRGB', from.ref.white='D65', to.ref.white = 'D65'))
names(mixed.color) <- c('r', 'g', 'b')
# back to Munsell
m <- rgb2munsell(mixed.color[, c('r', 'g', 'b')])
# pack into expected structure
# scaled distance is only for spectral distance evaluated against the entire library
res <- data.frame(
munsell = sprintf('%s %s/%s', m$hue, m$value, m$chroma),
distance = m$sigma,
scaledDistance = NA,
distanceMetric = 'dE00',
mixingMethod = 'estimate',
stringsAsFactors = FALSE
)
return(res)
}
# helper function for printing out value / chroma ranges by hue
.summarizeMunsellSpectraRanges <- function() {
# make R CMD CHECK happy
munsell.spectra <- NULL
# note: this is incompatible with LazyData: true
# load look-up table from our package
load(system.file("data/munsell.spectra.rda", package="aqp")[1])
# set hue position
munsell.spectra$hue <- factor(munsell.spectra$hue, levels = huePosition(returnHues = TRUE))
# remove non-standard hues (what are they for?)
munsell.spectra <- na.omit(munsell.spectra)
x <- split(munsell.spectra, munsell.spectra$hue)
x <- lapply(x, function(i) {
data.frame(
hue = i$hue[1],
value = sprintf("%s-%s", min(i$value), max(i$value)),
chroma = sprintf("%s-%s", min(i$chroma), max(i$chroma)),
stringsAsFactors = FALSE
)
})
x <- do.call('rbind', x)
return(x)
}
## TODO: is this generic enough to use elsewhere?
# weighted geometric mean
# https://en.wikipedia.org/wiki/Weighted_geometric_mean
# note: function will fail if any(v) == 0
.wgm <- function(v, w) {
r <- sum(w * log(v)) / sum(w)
r <- exp(r)
return(r)
}
# another possible approach using only sRGB coordinates
# http://scottburns.us/wp-content/uploads/2015/04/ILSS.txt
# related ticket
# https://github.com/ncss-tech/aqp/issues/101
# in response to the commentary here:
# https://soiltxnmyforum.cals.vt.edu/forum/read.php?3,1984,1987#msg-1987
# inspiration / calculations based on:
# https://arxiv.org/ftp/arxiv/papers/1710/1710.06364.pdf
# related discussion here:
# https://stackoverflow.com/questions/10254022/implementing-kubelka-munk-like-krita-to-mix-colours-color-like-paint/29967630#29967630
# base spectral library:
# http://www.munsellcolourscienceforpainters.com/MunsellResources/SpectralReflectancesOf2007MunsellBookOfColorGlossy.txt
# see /misc/util/Munsell for:
# * spectral library prep
# * interpolation of odd chroma
# * reshaping for rapid look-up
#'
#' @title Mix Munsell Colors via Spectral Library
#'
#' @description Simulate mixing of colors in Munsell notation, similar to the way in which mixtures of pigments operate.
#'
#' @param x vector of colors in Munsell notation
#'
#' @param w vector of proportions, can sum to any number
#'
#' @param mixingMethod approach used to simulate a mixture:
#' * `reference` : simulate a subtractive mixture of pigments, selecting `n` closest reference spectra from [`munsell.spectra.wide`]
#'
#' * `exact`: simulate a subtractive mixture of pigments, color conversion via CIE1931 color-matching functions (see details)
#'
#' * `estimate` : closest Munsell chip to a weighted mean of CIELAB coordinates
#'
#' * `adaptive` : use reference spectra when possible, falling-back to weighted mean of CIELAB coordinates
#'
#' @param n number of closest matching color chips (`mixingMethod = spectra` only)
#'
#' @param keepMixedSpec keep weighted geometric mean spectra, final result is a `list` (`mixingMethod = spectra` only)
#'
#' @param distThreshold spectral distance used to compute `scaledDistance`, default value is based on an analysis of spectral distances associated with adjacent Munsell color chips. This argument is only used with `mixingMethod = 'reference'`.
#'
#' @param ... additional arguments to [`spec2Munsell`]
#'
#' @author D.E. Beaudette
#'
#' @references
#'
#' Marcus, R.T. (1998). The Measurement of Color. In K. Nassau (Ed.), Color for Science, Art, and Technology (pp. 32-96). North-Holland.
#'
#'
#'
#' \itemize{
#' \item{inspiration / calculations based on the work of Scott Burns: }{\url{https://arxiv.org/ftp/arxiv/papers/1710/1710.06364.pdf}}
#'
#' \item{related discussion on Stack Overflow: }{\url{https://stackoverflow.com/questions/10254022/implementing-kubelka-munk-like-krita-to-mix-colours-color-like-paint/29967630#29967630}}
#'
#' \item{spectral library source: }{\url{https://www.munsellcolourscienceforpainters.com/MunsellResources/SpectralReflectancesOf2007MunsellBookOfColorGlossy.txt}}
#'
#' }
#'
#'
#' @details
#' An accurate simulation of pigment mixtures ("subtractive" color mixtures) is incredibly complex due to factors that aren't easily measured or controlled: pigment solubility, pigment particle size distribution, water content, substrate composition, and physical obstruction to name a few. That said, it is possible to simulate reasonable, subtractive color mixtures given a reference spectra library (350-800nm) and some assumptions about pigment qualities and lighting. For the purposes of estimating a mixture of soil colors (these are pigments after all) we can relax these assumptions and assume a standard light source. The only missing piece is the spectral library for all Munsell chips in our color books.
#'
#' Thankfully, [Scott Burns has outlined the entire process](https://arxiv.org/ftp/arxiv/papers/1710/1710.06364.pdf), and Paul Centore has provided a Munsell color chip [reflectance spectra library](https://www.munsellcolourscienceforpainters.com). The estimation of a subtractive mixture of soil colors can proceed as follows:
#'
#' 1. look up the associated spectra for each color in `x`
#' 2. compute the weighted (`w` argument) geometric mean of the spectra
#' 3. convert the spectral mixture to the closest Munsell color via:
#' * search for the closest `n` matching spectra in the reference library (`mixtureMethod = 'reference'`)
#' * direct conversion of spectra to closest Munsell color via [`spec2Munsell`] ( (`mixtureMethod = 'exact'`))
#' 4. suggest resulting Munsell chip(s) as the best candidate for a simulated mixture
#'
#' Key assumptions include:
#'
#' * similar particle size distribution
#' * similar mineralogy (i.e. pigmentation qualities)
#' * similar water content.
#'
#' For the purposes of estimating (for example) a "mixed soil color within the top 18cm of soil" these assumptions are usually valid. Again, these are estimates that are ultimately "snapped" to the nearest chip and not do not need to approach the accuracy of paint-matching systems.
#'
#' A message is printed when `scaledDistance` is larger than 1.
#'
#' @return A `data.frame` with the closest matching Munsell color(s):
#'
#' * `munsell`: Munsell notation of the n-closest spectra
#' * `distance`: spectral (Gower) distance to the n-closest spectra
#' * `scaledDistance`: spectral distance scaled by `distThreshold`
#' * `mixingMethod`: method used for each mixture
#'
#' When `keepMixedSpec = TRUE` then a `list`:
#'
#' * `mixed`: a `data.frame` containing the same elements as above
#' * `spec`: spectra for the 1st closest match
#'
#'
#'
#' @seealso \code{\link{munsell.spectra}}
#'
#' @examples
#'
#' # try a couple different methods
#' cols <- c('10YR 6/2', '5YR 5/6', '10B 4/4')
#' mixMunsell(cols, mixingMethod = 'reference')
#' mixMunsell(cols, mixingMethod = 'exact')
#' mixMunsell(cols, mixingMethod = 'estimate')
#'
#'
mixMunsell <- function(x, w = rep(1, times = length(x)) / length(x), mixingMethod = c('reference', 'exact', 'estimate', 'adaptive', 'spectra'), n = 1, keepMixedSpec = FALSE, distThreshold = 0.025, ...) {
# satisfy R CMD check
munsell.spectra.wide <- NULL
# enforce mixing method
mixingMethod <- match.arg(mixingMethod)
# backwards compatibility: "spectra" will be deprecated in the future
if(mixingMethod == 'spectra') {
message('please use `mixingMethod = "reference"`')
mixingMethod <- 'reference'
}
# multiple matches only possible when using mixingMethod == 'reference'
if((n > 1) & mixingMethod != 'reference') {
stop('`n` is only valid for `mixingMethod = "reference"`', call. = FALSE)
}
# mixed spectra and multiple matches only possible when using mixingMethod == 'reference'
if(keepMixedSpec & ! mixingMethod %in% c('reference', 'exact')) {
stop('`keepMixedSpec` is only valid for mixingMethod = "reference" or "exact"', call. = FALSE)
}
# sanity check, need this for gower::gower_topn()
if(!requireNamespace('gower'))
stop('package `gower` is required', call.=FALSE)
# can't mix a single color, just give it back at 0 distance
if (length(unique(x)) == 1) {
res <- data.frame(
munsell = x[1],
distance = 0,
scaledDistance = NA,
distanceMetric = NA,
mixingMethod = NA,
stringsAsFactors = FALSE
)
return(res)
}
# must have as many weights as length of x
if (length(x) != length(w) && length(w) != 1) {
stop('w should have same length as x or length one')
} else if (length(w) == 1) {
# cannot mix with zero weights
stopifnot(w > 0)
# a recycled weight is same as function default
w <- rep(1, times = length(x)) / length(x)
}
## TODO: move 0-weight / NA handling up in the logic
# more informative error for colors missing
if (any(w[is.na(x)] > 0)) {
stop('cannot mix missing (NA) colors with weight greater than zero')
}
# more informative error for weights missing
if (any(is.na(w))) {
stop('cannot mix colors with missing (NA) weight')
}
# remove 0-weighted colors
x <- x[w > 0]
w <- w[w > 0]
# x with weights > 0 must contain valid Munsell
if (any(is.na(parseMunsell(x)))) {
stop('input must be valid Munsell notation, neutral hues and missing not supported')
}
## main branch: mixing method
# estimate via wtd.mean CIELAB
if(mixingMethod == 'estimate') {
# simple estimation by weighted mean CIELAB
res <- .estimateColorMixture(chips = x, w = w)
# stop here
return(res)
} else {
# spectral mixing if possible
# wide version for fast searches
load(system.file("data/munsell.spectra.wide.rda", package="aqp")[1])
# subset reference spectra for colors
# note that search results are not in the same order as x
# result are columns of spectra
munsell.names <- names(munsell.spectra.wide)
idx <- which(munsell.names %in% x)
s <- munsell.spectra.wide[, idx, drop = FALSE]
# sanity check: if there aren't sufficient reference spectra then return NA
# must be at least the same number of spectra (columns) as unique colors specified
if(ncol(s) < length(unique(x))){
# helpful message
missing.chips <- setdiff(x, munsell.names)
msg <- sprintf(
'reference spectra not available: %s',
paste(missing.chips, collapse = ', ')
)
message(msg)
# fall-back to wt. mean LAB
if(mixingMethod == 'adaptive') {
# assumes cleaned data
res <- .estimateColorMixture(chips = x, w = w)
} else {
# otherwise return an empty result
res <- data.frame(
munsell = NA,
distance = NA,
scaledDistance = NA,
distanceMetric = NA,
mixingMethod = NA,
stringsAsFactors = FALSE
)
}
# done
return(res)
}
## proceeding to simulate spectral mixture
# empty vector for mixture
mixed <- vector(mode = 'numeric', length = nrow(s))
# iterate over wavelength (columns in first spectra)
for(i in seq_along(mixed)) {
# prepare values:
# select the i-th wavelength (row)
# down-grade to a vector
vals <- unlist(s[i, ])
## TODO: this wastes some time when weights are obvious, move outside of loop
## -> convert to tapply, order doesn't matter as long as names are preserved
# aggregate weights by "chip" -- in case length(x) != length(unique(x))
wagg <- aggregate(w, by = list(chip = x), FUN = sum)
# mix via weighted geometric mean
mixed[i] <- .wgm(v = vals, w = wagg$x[match(names(s), wagg$chip)])
}
## "exact" matching
if(mixingMethod %in% c('exact', 'adaptive')) {
##
# S = R * illuminant (D65 is the default)
# XYZ = AT %*% standard observer (CIE1964 is the default)
# XYZ -> sRGB -> Munsell
mx <- spec2Munsell(mixed, ...)
# NOTE: ... are passed to rgb2munsell()
# convert = TRUE: mx is a data.frame
# convert = FALSE: mx is a matrix
if(inherits(mx, 'matrix')) {
# mx is a matrix
dimnames(mx)[[2]] <- c('r', 'g', 'b')
# include sRGB coordinates, this is different than what is typically returned by this function
res <- data.frame(
mx,
munsell = NA,
distance = NA,
scaledDistance = NA,
distanceMetric = NA,
mixingMethod = 'exact',
stringsAsFactors = FALSE
)
} else {
# mx is a data.frame
res <- data.frame(
munsell = sprintf("%s %s/%s", mx$hue, mx$value, mx$chroma),
distance = mx$sigma,
scaledDistance = NA,
distanceMetric = 'dE00',
mixingMethod = 'exact',
stringsAsFactors = FALSE
)
}
# final-cleanup and return is performed outside of if/else
} else {
## reference "spectra" method
## operations on data.table likely faster
# Gower distance: looks good, ~5x faster due to compiled code
# https://cran.r-project.org/web/packages/gower/vignettes/intro.pdf
# would make sense to reshape reference data
# NOTE: arguments to rgb2munsell() are silently ignored
## TODO: time wasted here
# reshape reference spectra: wavelength to columns
z <- t(munsell.spectra.wide[, -1])
# top n matches
d <- gower::gower_topn(
data.frame(rbind(mixed)),
data.frame(z),
n = n
)
res <- data.frame(
munsell = dimnames(z)[[1]][d$index],
distance = d$distance[, 1],
scaledDistance = d$distance[, 1] / distThreshold,
distanceMetric = 'Gower',
mixingMethod = 'reference',
stringsAsFactors = FALSE
)
# report possibly problematic mixtures
if(any(res$scaledDistance > 1)) {
message('closest match has a spectral distance that is large, results may be unreliable')
}
}
## all done with "exact", "spectra", and "adaptive"
# clean-up row names
row.names(res) <- NULL
# optionally return weighted geometric mean (mixed) spectra
if(keepMixedSpec) {
return(
list(
mixed = res,
spec = mixed
)
)
} else {
# not returning the mixed spectra
return(res)
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_fisherk.R
\name{calc_fisherk}
\alias{calc_fisherk}
\title{Find Standardized Cumulants of Data based on Fisher's k-statistics}
\usage{
calc_fisherk(x)
}
\arguments{
\item{x}{a vector of data}
}
\value{
A vector of the mean, standard deviation, skewness, standardized kurtosis, and standardized fifth and sixth cumulants
}
\description{
This function uses Fisher's k-statistics to calculate the mean, standard deviation, skewness,
standardized kurtosis, and standardized fifth and sixth cumulants given a vector of data. The result can be used
as input to \code{\link[SimMultiCorrData]{find_constants}} or for data simulation.
}
\examples{
x <- rgamma(n = 10000, 10, 10)
calc_fisherk(x)
}
\references{
Fisher RA (1928). Moments and Product Moments of Sampling Distributions. Proc. London Math. Soc. 30, 199-238. \doi{10.1112/plms/s2-30.1.199}.
Headrick TC, Sheng Y, & Hodis FA (2007). Numerical Computing and Graphics for the Power Method Transformation Using
Mathematica. Journal of Statistical Software, 19(3), 1 - 17. \doi{10.18637/jss.v019.i03}
}
\seealso{
\code{\link[SimMultiCorrData]{calc_theory}}, \code{\link[SimMultiCorrData]{calc_moments}},
\code{\link[SimMultiCorrData]{find_constants}}
}
\keyword{Fisher}
\keyword{cumulants,}
| /man/calc_fisherk.Rd | no_license | shaoyoucheng/SimMultiCorrData | R | false | true | 1,343 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_fisherk.R
\name{calc_fisherk}
\alias{calc_fisherk}
\title{Find Standardized Cumulants of Data based on Fisher's k-statistics}
\usage{
calc_fisherk(x)
}
\arguments{
\item{x}{a vector of data}
}
\value{
A vector of the mean, standard deviation, skewness, standardized kurtosis, and standardized fifth and sixth cumulants
}
\description{
This function uses Fisher's k-statistics to calculate the mean, standard deviation, skewness,
standardized kurtosis, and standardized fifth and sixth cumulants given a vector of data. The result can be used
as input to \code{\link[SimMultiCorrData]{find_constants}} or for data simulation.
}
\examples{
x <- rgamma(n = 10000, 10, 10)
calc_fisherk(x)
}
\references{
Fisher RA (1928). Moments and Product Moments of Sampling Distributions. Proc. London Math. Soc. 30, 199-238. \doi{10.1112/plms/s2-30.1.199}.
Headrick TC, Sheng Y, & Hodis FA (2007). Numerical Computing and Graphics for the Power Method Transformation Using
Mathematica. Journal of Statistical Software, 19(3), 1 - 17. \doi{10.18637/jss.v019.i03}
}
\seealso{
\code{\link[SimMultiCorrData]{calc_theory}}, \code{\link[SimMultiCorrData]{calc_moments}},
\code{\link[SimMultiCorrData]{find_constants}}
}
\keyword{Fisher}
\keyword{cumulants,}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/waf_operations.R
\name{waf_delete_sql_injection_match_set}
\alias{waf_delete_sql_injection_match_set}
\title{Permanently deletes a SqlInjectionMatchSet}
\usage{
waf_delete_sql_injection_match_set(SqlInjectionMatchSetId, ChangeToken)
}
\arguments{
\item{SqlInjectionMatchSetId}{[required] The \code{SqlInjectionMatchSetId} of the SqlInjectionMatchSet that you want
to delete. \code{SqlInjectionMatchSetId} is returned by
CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets.}
\item{ChangeToken}{[required] The value returned by the most recent call to GetChangeToken.}
}
\description{
Permanently deletes a SqlInjectionMatchSet. You can\'t delete a
\code{SqlInjectionMatchSet} if it\'s still used in any \code{Rules} or if it still
contains any SqlInjectionMatchTuple objects.
}
\details{
If you just want to remove a \code{SqlInjectionMatchSet} from a \code{Rule}, use
UpdateRule.
To permanently delete a \code{SqlInjectionMatchSet} from AWS WAF, perform the
following steps:
\enumerate{
\item Update the \code{SqlInjectionMatchSet} to remove filters, if any. For
more information, see UpdateSqlInjectionMatchSet.
\item Use GetChangeToken to get the change token that you provide in the
\code{ChangeToken} parameter of a \code{DeleteSqlInjectionMatchSet} request.
\item Submit a \code{DeleteSqlInjectionMatchSet} request.
}
}
\section{Request syntax}{
\preformatted{svc$delete_sql_injection_match_set(
SqlInjectionMatchSetId = "string",
ChangeToken = "string"
)
}
}
\examples{
# The following example deletes a SQL injection match set with the ID
# example1ds3t-46da-4fdb-b8d5-abc321j569j5.
\donttest{svc$delete_sql_injection_match_set(
ChangeToken = "abcd12f2-46da-4fdb-b8d5-fbd4c466928f",
SqlInjectionMatchSetId = "example1ds3t-46da-4fdb-b8d5-abc321j569j5"
)}
}
\keyword{internal}
| /cran/paws.security.identity/man/waf_delete_sql_injection_match_set.Rd | permissive | ryanb8/paws | R | false | true | 1,881 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/waf_operations.R
\name{waf_delete_sql_injection_match_set}
\alias{waf_delete_sql_injection_match_set}
\title{Permanently deletes a SqlInjectionMatchSet}
\usage{
waf_delete_sql_injection_match_set(SqlInjectionMatchSetId, ChangeToken)
}
\arguments{
\item{SqlInjectionMatchSetId}{[required] The \code{SqlInjectionMatchSetId} of the SqlInjectionMatchSet that you want
to delete. \code{SqlInjectionMatchSetId} is returned by
CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets.}
\item{ChangeToken}{[required] The value returned by the most recent call to GetChangeToken.}
}
\description{
Permanently deletes a SqlInjectionMatchSet. You can\'t delete a
\code{SqlInjectionMatchSet} if it\'s still used in any \code{Rules} or if it still
contains any SqlInjectionMatchTuple objects.
}
\details{
If you just want to remove a \code{SqlInjectionMatchSet} from a \code{Rule}, use
UpdateRule.
To permanently delete a \code{SqlInjectionMatchSet} from AWS WAF, perform the
following steps:
\enumerate{
\item Update the \code{SqlInjectionMatchSet} to remove filters, if any. For
more information, see UpdateSqlInjectionMatchSet.
\item Use GetChangeToken to get the change token that you provide in the
\code{ChangeToken} parameter of a \code{DeleteSqlInjectionMatchSet} request.
\item Submit a \code{DeleteSqlInjectionMatchSet} request.
}
}
\section{Request syntax}{
\preformatted{svc$delete_sql_injection_match_set(
SqlInjectionMatchSetId = "string",
ChangeToken = "string"
)
}
}
\examples{
# The following example deletes a SQL injection match set with the ID
# example1ds3t-46da-4fdb-b8d5-abc321j569j5.
\donttest{svc$delete_sql_injection_match_set(
ChangeToken = "abcd12f2-46da-4fdb-b8d5-fbd4c466928f",
SqlInjectionMatchSetId = "example1ds3t-46da-4fdb-b8d5-abc321j569j5"
)}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Obregon-TitoAJ_2015.R
\name{Obregon-TitoAJ_2015}
\alias{Obregon-TitoAJ_2015}
\alias{Obregon-TitoAJ_2015.genefamilies_relab.stool}
\alias{Obregon-TitoAJ_2015.marker_abundance.stool}
\alias{Obregon-TitoAJ_2015.marker_presence.stool}
\alias{Obregon-TitoAJ_2015.metaphlan_bugs_list.stool}
\alias{Obregon-TitoAJ_2015.pathabundance_relab.stool}
\alias{Obregon-TitoAJ_2015.pathcoverage.stool}
\title{Data from the Obregon-TitoAJ_2015 study}
\description{
Data from the Obregon-TitoAJ_2015 study
}
\details{
Note that Obregon_TitoAJ_2015 is deprecated, use Obregon-TitoAJ_2015 instead.
}
\section{Datasets}{
\subsection{Obregon-TitoAJ_2015.genefamilies_relab.stool}{
An ExpressionSet with 58 samples and 1,185,621 features specific to the stool body site
}
\subsection{Obregon-TitoAJ_2015.marker_abundance.stool}{
An ExpressionSet with 58 samples and 96,336 features specific to the stool body site
}
\subsection{Obregon-TitoAJ_2015.marker_presence.stool}{
An ExpressionSet with 58 samples and 86,352 features specific to the stool body site
}
\subsection{Obregon-TitoAJ_2015.metaphlan_bugs_list.stool}{
An ExpressionSet with 58 samples and 1,094 features specific to the stool body site
}
\subsection{Obregon-TitoAJ_2015.pathabundance_relab.stool}{
An ExpressionSet with 58 samples and 9,801 features specific to the stool body site
}
\subsection{Obregon-TitoAJ_2015.pathcoverage.stool}{
An ExpressionSet with 58 samples and 9,801 features specific to the stool body site
}
}
\section{Source}{
\subsection{Title}{
Subsistence strategies in traditional societies distinguish gut microbiomes.
}
\subsection{Author}{
Obregon-Tito AJ, Tito RY, Metcalf J, Sankaranarayanan K, Clemente JC, Ursell LK, Zech Xu Z, Van Treuren W, Knight R, Gaffney PM, Spicer P, Lawson P, Marin-Reyes L, Trujillo-Villarroel O, Foster M, Guija-Poma E, Troncoso-Corzo L, Warinner C, Ozga AT, Lewis CM
}
\subsection{Lab}{
[1] 1] Department of Anthropology, University of Oklahoma, Dale Hall Tower, 521 Norman, Oklahoma 73019, USA [2] Universidad Cientifica del Sur, Lima 18, Peru [3] City of Hope, NCI-designated Comprehensive Cancer Center, Duarte, California 91010, USA., [2] 1] Department of Anthropology, University of Oklahoma, Dale Hall Tower, 521 Norman, Oklahoma 73019, USA [2] Universidad Cientifica del Sur, Lima 18, Peru, [3] City of Hope, NCI-designated Comprehensive Cancer Center, Duarte, California 91010, USA., [4] Department of Anthropology, University of Oklahoma, Dale Hall Tower, 521 Norman, Oklahoma 73019, USA., [5] Department of Chemistry and Biochemistry, University of Colorado, Boulder, Colorado 80309, USA., [6] Departments of Pediatrics and Computer Science &Engineering University of California San Diego, La Jolla, CA 92093, USA., [7] Oklahoma Medical Research Foundation, Oklahoma City, Oklahoma 73104, USA., [8] Instituto Nacional de Salud, Lima 11, Peru, [9] Old Dominion University, Norfolk, Virginia 23529, USA., [10] Universidad Cientifica del Sur, Lima 18, Peru
}
\subsection{PMID}{
25807110
}
}
\examples{
`Obregon-TitoAJ_2015.metaphlan_bugs_list.stool`()
}
| /man/Obregon-TitoAJ_2015.Rd | permissive | pythseq/curatedMetagenomicData | R | false | true | 3,187 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Obregon-TitoAJ_2015.R
\name{Obregon-TitoAJ_2015}
\alias{Obregon-TitoAJ_2015}
\alias{Obregon-TitoAJ_2015.genefamilies_relab.stool}
\alias{Obregon-TitoAJ_2015.marker_abundance.stool}
\alias{Obregon-TitoAJ_2015.marker_presence.stool}
\alias{Obregon-TitoAJ_2015.metaphlan_bugs_list.stool}
\alias{Obregon-TitoAJ_2015.pathabundance_relab.stool}
\alias{Obregon-TitoAJ_2015.pathcoverage.stool}
\title{Data from the Obregon-TitoAJ_2015 study}
\description{
Data from the Obregon-TitoAJ_2015 study
}
\details{
Note that Obregon_TitoAJ_2015 is deprecated, use Obregon-TitoAJ_2015 instead.
}
\section{Datasets}{
\subsection{Obregon-TitoAJ_2015.genefamilies_relab.stool}{
An ExpressionSet with 58 samples and 1,185,621 features specific to the stool body site
}
\subsection{Obregon-TitoAJ_2015.marker_abundance.stool}{
An ExpressionSet with 58 samples and 96,336 features specific to the stool body site
}
\subsection{Obregon-TitoAJ_2015.marker_presence.stool}{
An ExpressionSet with 58 samples and 86,352 features specific to the stool body site
}
\subsection{Obregon-TitoAJ_2015.metaphlan_bugs_list.stool}{
An ExpressionSet with 58 samples and 1,094 features specific to the stool body site
}
\subsection{Obregon-TitoAJ_2015.pathabundance_relab.stool}{
An ExpressionSet with 58 samples and 9,801 features specific to the stool body site
}
\subsection{Obregon-TitoAJ_2015.pathcoverage.stool}{
An ExpressionSet with 58 samples and 9,801 features specific to the stool body site
}
}
\section{Source}{
\subsection{Title}{
Subsistence strategies in traditional societies distinguish gut microbiomes.
}
\subsection{Author}{
Obregon-Tito AJ, Tito RY, Metcalf J, Sankaranarayanan K, Clemente JC, Ursell LK, Zech Xu Z, Van Treuren W, Knight R, Gaffney PM, Spicer P, Lawson P, Marin-Reyes L, Trujillo-Villarroel O, Foster M, Guija-Poma E, Troncoso-Corzo L, Warinner C, Ozga AT, Lewis CM
}
\subsection{Lab}{
[1] 1] Department of Anthropology, University of Oklahoma, Dale Hall Tower, 521 Norman, Oklahoma 73019, USA [2] Universidad Cientifica del Sur, Lima 18, Peru [3] City of Hope, NCI-designated Comprehensive Cancer Center, Duarte, California 91010, USA., [2] 1] Department of Anthropology, University of Oklahoma, Dale Hall Tower, 521 Norman, Oklahoma 73019, USA [2] Universidad Cientifica del Sur, Lima 18, Peru, [3] City of Hope, NCI-designated Comprehensive Cancer Center, Duarte, California 91010, USA., [4] Department of Anthropology, University of Oklahoma, Dale Hall Tower, 521 Norman, Oklahoma 73019, USA., [5] Department of Chemistry and Biochemistry, University of Colorado, Boulder, Colorado 80309, USA., [6] Departments of Pediatrics and Computer Science &Engineering University of California San Diego, La Jolla, CA 92093, USA., [7] Oklahoma Medical Research Foundation, Oklahoma City, Oklahoma 73104, USA., [8] Instituto Nacional de Salud, Lima 11, Peru, [9] Old Dominion University, Norfolk, Virginia 23529, USA., [10] Universidad Cientifica del Sur, Lima 18, Peru
}
\subsection{PMID}{
25807110
}
}
\examples{
`Obregon-TitoAJ_2015.metaphlan_bugs_list.stool`()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simple_functions (conflicted copy 2021-04-14
% 155304).R, R/simple_functions.R
\name{create_group_membership}
\alias{create_group_membership}
\title{create_group_membership}
\usage{
create_group_membership(group_id, user_id)
create_group_membership(group_id, user_id)
}
\arguments{
\item{group_id}{the group id (integer)}
\item{user_id}{the canvas id of the user (string). Alternatively, the sis_id can
be used by setting "sis_login_id:" in front of the sis_id.}
}
\value{
server response. Either 200 status code if everything went correctly or a specific http status warning.
server response. Either 200 status code if everything went correctly or a specific http status warning.
}
\description{
This function is used to create a group membership for a specific user in
a specific course.
This function is used to create a group membership for a specific user in
a specific course.
}
\examples{
create_group_membership(group_id = 4095, user_id = "sis_login_id: pbosman")
create_group_membership(group_id = 4095, user_id = "sis_login_id: pbosman")
}
| /man/create_group_membership.Rd | no_license | ICTO-FMG/uvacanvas | R | false | true | 1,140 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simple_functions (conflicted copy 2021-04-14
% 155304).R, R/simple_functions.R
\name{create_group_membership}
\alias{create_group_membership}
\title{create_group_membership}
\usage{
create_group_membership(group_id, user_id)
create_group_membership(group_id, user_id)
}
\arguments{
\item{group_id}{the group id (integer)}
\item{user_id}{the canvas id of the user (string). Alternatively, the sis_id can
be used by setting "sis_login_id:" in front of the sis_id.}
}
\value{
server response. Either 200 status code if everything went correctly or a specific http status warning.
server response. Either 200 status code if everything went correctly or a specific http status warning.
}
\description{
This function is used to create a group membership for a specific user in
a specific course.
This function is used to create a group membership for a specific user in
a specific course.
}
\examples{
create_group_membership(group_id = 4095, user_id = "sis_login_id: pbosman")
create_group_membership(group_id = 4095, user_id = "sis_login_id: pbosman")
}
|
context("Station Search")
library(CDECRetrieve)
test_that("cdec_stations returns an error when you enter an invalid station_id", {
expect_error(cdec_stations("XXX"), "request returned no data, please check input values")
})
test_that("cdec station errors when nearby_city invalid", {
expect_error(cdec_stations(nearby_city = "XXXX"))
})
test_that("cdec stations returns a dataframe when you enter a valid station id", {
expect_is(cdec_stations(station_id = "EMM"), c("tbl_df", "tbl", "data.frame"))
})
test_that("cdec stations returns a dataframe with at least one row", {
expect_gt(nrow(cdec_stations(nearby_city = "Sacramento")), 0)
})
test_that("the colnames are correct", {
d <- cdec_stations(station_id = "EMM")
expect_equal(colnames(d), c("station_id", "name", "river_basin", "county", "longitude",
"latitude", "elevation", "operator", "state"))
})
| /tests/testthat/test-stations-search.R | no_license | cran/CDECRetrieve | R | false | false | 928 | r | context("Station Search")
library(CDECRetrieve)
test_that("cdec_stations returns an error when you enter an invalid station_id", {
expect_error(cdec_stations("XXX"), "request returned no data, please check input values")
})
test_that("cdec station errors when nearby_city invalid", {
expect_error(cdec_stations(nearby_city = "XXXX"))
})
test_that("cdec stations returns a dataframe when you enter a valid station id", {
expect_is(cdec_stations(station_id = "EMM"), c("tbl_df", "tbl", "data.frame"))
})
test_that("cdec stations returns a dataframe with at least one row", {
expect_gt(nrow(cdec_stations(nearby_city = "Sacramento")), 0)
})
test_that("the colnames are correct", {
d <- cdec_stations(station_id = "EMM")
expect_equal(colnames(d), c("station_id", "name", "river_basin", "county", "longitude",
"latitude", "elevation", "operator", "state"))
})
|
################################################################################
# functions to set initial values and take information from r_state
# when available
#
# Note: putting functions in R/radiant.R produces
# Error in eval(expr, envir, enclos) : object 'r_state' not found
# because exported functions cannot access variables in the environment
# created by shinyServer
################################################################################
observe({
# reset r_state on dataset change ... when you are not on the
# Manage > Data tab
if(is.null(r_state$dataset) || is.null(input$dataset)) return()
if(input$datatabs != "Manage" || input$nav_radiant != "Data")
if(r_state$dataset != input$dataset) r_state <<- list()
})
## Can't export the state_... function through R/radiant.R
## Error in checkboxGroupInput("help_data", NULL, help_data, selected = state_init_list("help_data", :
## could not find function "state_init"
# Set initial value for shiny input (e.g., radio button or checkbox)
state_init <- function(inputvar, init = "")
if(is.null(r_state[[inputvar]])) init else r_state[[inputvar]]
# library(dplyr)
# r_state <- list()
# state_init("test")
# state_init("test",0)
# r_state$test <- c("a","b")
# state_init("test",0)
# Set initial value for shiny input from a list of values
state_single <- function(inputvar, vals, init = character(0))
if(is.null(r_state[[inputvar]])) init else vals[vals == r_state[[inputvar]]]
# library(dplyr)
# r_state <- list()
# state_single("test",1,1:10)
# r_state$test <- 8
# state_single("test",1,1:10)
# state_single("test",1,1:5)
# Set initial values for variable selection (e.g., selection used in another analysis)
state_multiple <- function(inputvar, vals, init = character(0)) {
if(is.null(r_state[[inputvar]]))
# "a" %in% character(0) --> FALSE, letters[FALSE] --> character(0)
vals[vals %in% init]
else
vals[vals %in% r_state[[inputvar]]]
}
################################################################################
# function to save app state on refresh or crash
################################################################################
saveStateOnRefresh <- function(session = session) {
session$onSessionEnded(function() {
isolate({
if(not_pressed(input$resetState) &&
not_pressed(input$quitApp) &&
is.null(input$uploadState)) {
assign(ip_inputs, reactiveValuesToList(input), envir = .GlobalEnv)
assign(ip_data, reactiveValuesToList(r_data), envir = .GlobalEnv)
assign(ip_dump, now(), envir = .GlobalEnv)
if(running_local) rm(r_env, envir = .GlobalEnv)
}
})
})
}
################################################################
# functions used across tools in radiant
################################################################
.changedata <- function(new_col, new_col_name = "", dataset = input$dataset) {
if(nrow(r_data[[dataset]]) == new_col %>% nrow &&
new_col_name[1] != "")
r_data[[dataset]][,new_col_name] <- new_col
}
# .changedata <- changedata
# .getdata <- getdata
# changedata_names <- function(oldnames, newnames)
# r_data[[input$dataset]] %<>% rename_(.dots = setNames(oldnames, newnames))
.getdata <- reactive({
if(is_empty(input$data_filter) | input$show_filter == FALSE) return(r_data[[input$dataset]])
selcom <- gsub("\\s","", input$data_filter)
if(selcom != "") {
seldat <- try(filter_(r_data[[input$dataset]], selcom), silent = TRUE)
if(is(seldat, 'try-error')) {
isolate(r_data$filter_error <- attr(seldat,"condition")$message)
} else {
isolate(r_data$filter_error <- "")
return(seldat)
}
} else {
isolate(r_data$filter_error <- "")
}
r_data[[input$dataset]]
})
getdata_class <- reactive({
# r_data[[input$dataset]][1,,drop = FALSE] %>% getdata_class_fun
r_data[[input$dataset]] %>% getdata_class_fun
})
getdata_class_fun <- function(dat) {
sapply(dat, function(x) class(x)[1]) %>%
gsub("ordered","factor", .) %>%
gsub("POSIXct","date", .) %>%
gsub("POSIXct","date", .) %>%
gsub("Date","date", .)
}
groupable_vars <- reactive({
.getdata() %>%
summarise_each(funs(n_distinct)) %>%
{ . < 10 } %>%
which(.) %>%
varnames()[.]
})
two_level_vars <- reactive({
.getdata() %>%
summarise_each(funs(n_distinct)) %>%
{ . == 2 } %>%
which(.) %>%
varnames()[.]
})
varnames <- reactive({
getdata_class() %>% names %>%
set_names(., paste0(., " {", getdata_class(), "}"))
})
# cleaning up the arguments for data_filter and defaults passed to report
clean_args <- function(rep_args, rep_default = list()) {
if(!is.null(rep_args$data_filter)) {
if(rep_args$data_filter == "")
rep_args$data_filter <- NULL
else
rep_args$data_filter %<>% gsub("\\n","", .) %>% gsub("\"","\'",.)
}
if(length(rep_default) == 0) rep_default[names(rep_args)] <- ""
# removing default arguments before sending to report feature
for(i in names(rep_args))
if(rep_args[[i]][1] == rep_default[[i]]) rep_args[[i]] <- NULL
rep_args
}
# check if a variable is null or not in the selected data.frame
not_available <- function(x)
if(any(is.null(x)) || (sum(x %in% varnames()) < length(x))) TRUE else FALSE
# check if a button was NOT pressed
not_pressed <- function(x) if(is.null(x) || x == 0) TRUE else FALSE
# check if string variable is defined
is_empty <- function(x, empty = "") if(is.null(x) || x == empty) TRUE else FALSE
# check for duplicate entries
has_duplicates <- function(x)
if(length(unique(x)) < length(x)) TRUE else FALSE
# is x some type of date variable
is_date <- function(x) is.Date(x) | is.POSIXct(x) | is.POSIXt(x)
# convert a date variable to character for printing
d2c <- function(x) if(is_date(x)) as.character(x) else x
# truncate character fields for show_data_snippet
trunc_char <- function(x) if(is.character(x)) strtrim(x,10) else x
# show a few rows of a dataframe
show_data_snippet <- function(dat = input$dataset, nshow = 5, title = "") {
{ if(is.character(dat) && length(dat) == 1) r_data[[dat]] else dat } %>%
slice(1:min(nshow,nrow(.))) %>%
mutate_each(funs(d2c)) %>%
mutate_each(funs(trunc_char)) %>%
xtable::xtable(.) %>%
print(type='html', print.results = FALSE, include.rownames = FALSE) %>%
paste0(title, .) %>%
sub("<table border=1>","<table class='table table-condensed table-hover'>", .) %>%
paste0(.,'<label>',nshow,' (max) rows shown. See View-tab for details.</label>') %>%
enc2utf8
}
suggest_data <- function(text = "", dat = "diamonds")
paste0(text, "For an example dataset go to Data > Manage, select the 'examples' radio button,\nand press the 'Load examples' button. Then select the \'", dat, "\' dataset")
################################################################
# functions used to create Shiny in and outputs
################################################################
returnTextAreaInput <- function(inputId, label = NULL, value = "") {
tagList(
tags$label(label, `for` = inputId),br(),
tags$textarea(id=inputId, type = "text", rows="2",
class="returnTextArea form-control", value)
)
}
plot_width <- function()
if(is.null(input$viz_plot_width)) r_data$plot_width else input$viz_plot_width
plot_height <- function()
if(is.null(input$viz_plot_height)) r_data$plot_height else input$viz_plot_height
# fun_name is a string of the main function name
# rfun_name is a string of the reactive wrapper that calls the main function
# out_name is the name of the output, set to fun_name by default
register_print_output <- function(fun_name, rfun_name,
out_name = fun_name) {
# Generate output for the summary tab
output[[out_name]] <- renderPrint({
# when no analysis was conducted (e.g., no variables selected)
get(rfun_name)() %>%
{ if(is.character(.)) cat(.,"\n") else . } %>% rm
})
}
# fun_name is a string of the main function name
# rfun_name is a string of the reactive wrapper that calls the main function
# out_name is the name of the output, set to fun_name by default
register_plot_output <- function(fun_name, rfun_name,
out_name = fun_name,
width_fun = "plot_width",
height_fun = "plot_height") {
# Generate output for the plots tab
output[[out_name]] <- renderPlot({
# when no analysis was conducted (e.g., no variables selected)
get(rfun_name)() %>%
{ if(is.character(.)) {
plot(x = 1, type = 'n', main= . , axes = FALSE, xlab = "", ylab = "")
} else {
withProgress(message = 'Making plot', value = 0, { . })
}
}
}, width=get(width_fun), height=get(height_fun))
}
stat_tab_panel <- function(menu, tool, tool_ui, output_panels,
data = input$dataset) {
sidebarLayout(
sidebarPanel(
wellPanel(
HTML(paste("<label><strong>Menu:",menu,"</strong></label><br>")),
HTML(paste("<label><strong>Tool:",tool,"</strong></label><br>")),
if(!is.null(data))
HTML(paste("<label><strong>Data:",data,"</strong></label>"))
),
uiOutput(tool_ui)
),
mainPanel(
output_panels
)
)
}
################################################################
# functions used for app help
################################################################
help_modal <- function(modal_title, link, help_file) {
sprintf("<div class='modal fade' id='%s' tabindex='-1' role='dialog' aria-labelledby='%s_label' aria-hidden='true'>
<div class='modal-dialog'>
<div class='modal-content'>
<div class='modal-header'>
<button type='button' class='close' data-dismiss='modal' aria-label='Close'><span aria-hidden='true'>×</span></button>
<h4 class='modal-title' id='%s_label'>%s</h4>
</div>
<div class='modal-body'>%s<br>
© International Potato Center (2015) <a rel='license' href='http://creativecommons.org/licenses/by-nc-sa/4.0/' target='_blank'><img alt='Creative Commons License' style='border-width:0' src ='imgs/80x15.png' /></a>
</div>
</div>
</div>
</div>
<i title='Help' class='glyphicon glyphicon-question-sign' data-toggle='modal' data-target='#%s'></i>",
link, link, link, modal_title, help_file, link) %>%
enc2utf8 %>% HTML
}
help_and_report <- function(modal_title, fun_name, help_file) {
sprintf("<div class='modal fade' id='%s_help' tabindex='-1' role='dialog' aria-labelledby='%s_help_label' aria-hidden='true'>
<div class='modal-dialog'>
<div class='modal-content'>
<div class='modal-header'>
<button type='button' class='close' data-dismiss='modal' aria-label='Close'><span aria-hidden='true'>×</span></button>
<h4 class='modal-title' id='%s_help_label'>%s</h4>
</div>
<div class='modal-body'>%s<br>
© International Potato Center (2015) <a rel='license' href='http://creativecommons.org/licenses/by-nc-sa/4.0/' target='_blank'><img alt='Creative Commons License' style='border-width:0' src ='imgs/80x15.png' /></a>
</div>
</div>
</div>
</div>
<i title='Help' class='glyphicon glyphicon-question-sign alignleft' data-toggle='modal' data-target='#%s_help'></i>
<i title='Report results' class='glyphicon glyphicon-book action-button shiny-bound-input alignright' href='#%s_report' id='%s_report'></i>
<div style='clear: both;'></div>",
fun_name, fun_name, fun_name, modal_title, help_file, fun_name, fun_name, fun_name) %>%
enc2utf8 %>% HTML %>% withMathJax()
}
# function to render .md files to html
inclMD <- function(path) {
markdown::markdownToHTML(path, fragment.only = TRUE, options = c(""),
stylesheet=file.path("..",app_dir,"www/empty.css"))
}
# function to render .Rmd files to html - does not embed image or add css
inclRmd <- function(path) {
paste(readLines(path, warn = FALSE), collapse = '\n') %>%
knitr::knit2html(text = ., fragment.only = TRUE, options = "",
stylesheet = file.path("..",base_dir,"www/empty.css"))
}
| /inst/hidap/radiant.R | no_license | lukawanjohi/hidap | R | false | false | 12,452 | r | ################################################################################
# functions to set initial values and take information from r_state
# when available
#
# Note: putting functions in R/radiant.R produces
# Error in eval(expr, envir, enclos) : object 'r_state' not found
# because exported functions cannot access variables in the environment
# created by shinyServer
################################################################################
observe({
# reset r_state on dataset change ... when you are not on the
# Manage > Data tab
if(is.null(r_state$dataset) || is.null(input$dataset)) return()
if(input$datatabs != "Manage" || input$nav_radiant != "Data")
if(r_state$dataset != input$dataset) r_state <<- list()
})
## Can't export the state_... function through R/radiant.R
## Error in checkboxGroupInput("help_data", NULL, help_data, selected = state_init_list("help_data", :
## could not find function "state_init"
# Set initial value for shiny input (e.g., radio button or checkbox)
state_init <- function(inputvar, init = "")
if(is.null(r_state[[inputvar]])) init else r_state[[inputvar]]
# library(dplyr)
# r_state <- list()
# state_init("test")
# state_init("test",0)
# r_state$test <- c("a","b")
# state_init("test",0)
# Set initial value for shiny input from a list of values
state_single <- function(inputvar, vals, init = character(0))
if(is.null(r_state[[inputvar]])) init else vals[vals == r_state[[inputvar]]]
# library(dplyr)
# r_state <- list()
# state_single("test",1,1:10)
# r_state$test <- 8
# state_single("test",1,1:10)
# state_single("test",1,1:5)
# Set initial values for variable selection (e.g., selection used in another analysis)
state_multiple <- function(inputvar, vals, init = character(0)) {
if(is.null(r_state[[inputvar]]))
# "a" %in% character(0) --> FALSE, letters[FALSE] --> character(0)
vals[vals %in% init]
else
vals[vals %in% r_state[[inputvar]]]
}
################################################################################
# function to save app state on refresh or crash
################################################################################
saveStateOnRefresh <- function(session = session) {
session$onSessionEnded(function() {
isolate({
if(not_pressed(input$resetState) &&
not_pressed(input$quitApp) &&
is.null(input$uploadState)) {
assign(ip_inputs, reactiveValuesToList(input), envir = .GlobalEnv)
assign(ip_data, reactiveValuesToList(r_data), envir = .GlobalEnv)
assign(ip_dump, now(), envir = .GlobalEnv)
if(running_local) rm(r_env, envir = .GlobalEnv)
}
})
})
}
################################################################
# functions used across tools in radiant
################################################################
.changedata <- function(new_col, new_col_name = "", dataset = input$dataset) {
if(nrow(r_data[[dataset]]) == new_col %>% nrow &&
new_col_name[1] != "")
r_data[[dataset]][,new_col_name] <- new_col
}
# .changedata <- changedata
# .getdata <- getdata
# changedata_names <- function(oldnames, newnames)
# r_data[[input$dataset]] %<>% rename_(.dots = setNames(oldnames, newnames))
.getdata <- reactive({
if(is_empty(input$data_filter) | input$show_filter == FALSE) return(r_data[[input$dataset]])
selcom <- gsub("\\s","", input$data_filter)
if(selcom != "") {
seldat <- try(filter_(r_data[[input$dataset]], selcom), silent = TRUE)
if(is(seldat, 'try-error')) {
isolate(r_data$filter_error <- attr(seldat,"condition")$message)
} else {
isolate(r_data$filter_error <- "")
return(seldat)
}
} else {
isolate(r_data$filter_error <- "")
}
r_data[[input$dataset]]
})
getdata_class <- reactive({
# r_data[[input$dataset]][1,,drop = FALSE] %>% getdata_class_fun
r_data[[input$dataset]] %>% getdata_class_fun
})
getdata_class_fun <- function(dat) {
sapply(dat, function(x) class(x)[1]) %>%
gsub("ordered","factor", .) %>%
gsub("POSIXct","date", .) %>%
gsub("POSIXct","date", .) %>%
gsub("Date","date", .)
}
groupable_vars <- reactive({
.getdata() %>%
summarise_each(funs(n_distinct)) %>%
{ . < 10 } %>%
which(.) %>%
varnames()[.]
})
two_level_vars <- reactive({
.getdata() %>%
summarise_each(funs(n_distinct)) %>%
{ . == 2 } %>%
which(.) %>%
varnames()[.]
})
varnames <- reactive({
getdata_class() %>% names %>%
set_names(., paste0(., " {", getdata_class(), "}"))
})
# cleaning up the arguments for data_filter and defaults passed to report
clean_args <- function(rep_args, rep_default = list()) {
if(!is.null(rep_args$data_filter)) {
if(rep_args$data_filter == "")
rep_args$data_filter <- NULL
else
rep_args$data_filter %<>% gsub("\\n","", .) %>% gsub("\"","\'",.)
}
if(length(rep_default) == 0) rep_default[names(rep_args)] <- ""
# removing default arguments before sending to report feature
for(i in names(rep_args))
if(rep_args[[i]][1] == rep_default[[i]]) rep_args[[i]] <- NULL
rep_args
}
# check if a variable is null or not in the selected data.frame
not_available <- function(x)
if(any(is.null(x)) || (sum(x %in% varnames()) < length(x))) TRUE else FALSE
# check if a button was NOT pressed
not_pressed <- function(x) if(is.null(x) || x == 0) TRUE else FALSE
# check if string variable is defined
is_empty <- function(x, empty = "") if(is.null(x) || x == empty) TRUE else FALSE
# check for duplicate entries
has_duplicates <- function(x)
if(length(unique(x)) < length(x)) TRUE else FALSE
# is x some type of date variable
is_date <- function(x) is.Date(x) | is.POSIXct(x) | is.POSIXt(x)
# convert a date variable to character for printing
d2c <- function(x) if(is_date(x)) as.character(x) else x
# truncate character fields for show_data_snippet
trunc_char <- function(x) if(is.character(x)) strtrim(x,10) else x
# show a few rows of a dataframe
show_data_snippet <- function(dat = input$dataset, nshow = 5, title = "") {
{ if(is.character(dat) && length(dat) == 1) r_data[[dat]] else dat } %>%
slice(1:min(nshow,nrow(.))) %>%
mutate_each(funs(d2c)) %>%
mutate_each(funs(trunc_char)) %>%
xtable::xtable(.) %>%
print(type='html', print.results = FALSE, include.rownames = FALSE) %>%
paste0(title, .) %>%
sub("<table border=1>","<table class='table table-condensed table-hover'>", .) %>%
paste0(.,'<label>',nshow,' (max) rows shown. See View-tab for details.</label>') %>%
enc2utf8
}
suggest_data <- function(text = "", dat = "diamonds")
paste0(text, "For an example dataset go to Data > Manage, select the 'examples' radio button,\nand press the 'Load examples' button. Then select the \'", dat, "\' dataset")
################################################################
# functions used to create Shiny in and outputs
################################################################
returnTextAreaInput <- function(inputId, label = NULL, value = "") {
tagList(
tags$label(label, `for` = inputId),br(),
tags$textarea(id=inputId, type = "text", rows="2",
class="returnTextArea form-control", value)
)
}
plot_width <- function()
if(is.null(input$viz_plot_width)) r_data$plot_width else input$viz_plot_width
plot_height <- function()
if(is.null(input$viz_plot_height)) r_data$plot_height else input$viz_plot_height
# fun_name is a string of the main function name
# rfun_name is a string of the reactive wrapper that calls the main function
# out_name is the name of the output, set to fun_name by default
register_print_output <- function(fun_name, rfun_name,
out_name = fun_name) {
# Generate output for the summary tab
output[[out_name]] <- renderPrint({
# when no analysis was conducted (e.g., no variables selected)
get(rfun_name)() %>%
{ if(is.character(.)) cat(.,"\n") else . } %>% rm
})
}
# fun_name is a string of the main function name
# rfun_name is a string of the reactive wrapper that calls the main function
# out_name is the name of the output, set to fun_name by default
register_plot_output <- function(fun_name, rfun_name,
out_name = fun_name,
width_fun = "plot_width",
height_fun = "plot_height") {
# Generate output for the plots tab
output[[out_name]] <- renderPlot({
# when no analysis was conducted (e.g., no variables selected)
get(rfun_name)() %>%
{ if(is.character(.)) {
plot(x = 1, type = 'n', main= . , axes = FALSE, xlab = "", ylab = "")
} else {
withProgress(message = 'Making plot', value = 0, { . })
}
}
}, width=get(width_fun), height=get(height_fun))
}
stat_tab_panel <- function(menu, tool, tool_ui, output_panels,
data = input$dataset) {
sidebarLayout(
sidebarPanel(
wellPanel(
HTML(paste("<label><strong>Menu:",menu,"</strong></label><br>")),
HTML(paste("<label><strong>Tool:",tool,"</strong></label><br>")),
if(!is.null(data))
HTML(paste("<label><strong>Data:",data,"</strong></label>"))
),
uiOutput(tool_ui)
),
mainPanel(
output_panels
)
)
}
################################################################
# functions used for app help
################################################################
help_modal <- function(modal_title, link, help_file) {
sprintf("<div class='modal fade' id='%s' tabindex='-1' role='dialog' aria-labelledby='%s_label' aria-hidden='true'>
<div class='modal-dialog'>
<div class='modal-content'>
<div class='modal-header'>
<button type='button' class='close' data-dismiss='modal' aria-label='Close'><span aria-hidden='true'>×</span></button>
<h4 class='modal-title' id='%s_label'>%s</h4>
</div>
<div class='modal-body'>%s<br>
© International Potato Center (2015) <a rel='license' href='http://creativecommons.org/licenses/by-nc-sa/4.0/' target='_blank'><img alt='Creative Commons License' style='border-width:0' src ='imgs/80x15.png' /></a>
</div>
</div>
</div>
</div>
<i title='Help' class='glyphicon glyphicon-question-sign' data-toggle='modal' data-target='#%s'></i>",
link, link, link, modal_title, help_file, link) %>%
enc2utf8 %>% HTML
}
help_and_report <- function(modal_title, fun_name, help_file) {
sprintf("<div class='modal fade' id='%s_help' tabindex='-1' role='dialog' aria-labelledby='%s_help_label' aria-hidden='true'>
<div class='modal-dialog'>
<div class='modal-content'>
<div class='modal-header'>
<button type='button' class='close' data-dismiss='modal' aria-label='Close'><span aria-hidden='true'>×</span></button>
<h4 class='modal-title' id='%s_help_label'>%s</h4>
</div>
<div class='modal-body'>%s<br>
© International Potato Center (2015) <a rel='license' href='http://creativecommons.org/licenses/by-nc-sa/4.0/' target='_blank'><img alt='Creative Commons License' style='border-width:0' src ='imgs/80x15.png' /></a>
</div>
</div>
</div>
</div>
<i title='Help' class='glyphicon glyphicon-question-sign alignleft' data-toggle='modal' data-target='#%s_help'></i>
<i title='Report results' class='glyphicon glyphicon-book action-button shiny-bound-input alignright' href='#%s_report' id='%s_report'></i>
<div style='clear: both;'></div>",
fun_name, fun_name, fun_name, modal_title, help_file, fun_name, fun_name, fun_name) %>%
enc2utf8 %>% HTML %>% withMathJax()
}
# function to render .md files to html
inclMD <- function(path) {
markdown::markdownToHTML(path, fragment.only = TRUE, options = c(""),
stylesheet=file.path("..",app_dir,"www/empty.css"))
}
# function to render .Rmd files to html - does not embed image or add css
inclRmd <- function(path) {
paste(readLines(path, warn = FALSE), collapse = '\n') %>%
knitr::knit2html(text = ., fragment.only = TRUE, options = "",
stylesheet = file.path("..",base_dir,"www/empty.css"))
}
|
library (rPorta)
library(gtools)
extremal <- function(o,pnames=letters[1:length(o)])
# generates a matrix whose rows are the extremal points for an order (o,
# specified as as a set of numbers with no equalites allowed) on a vector
# of parameters with names pnames.
# e.g., o=1:3, pnames=letters(1:length(o)), a <= b <= c
# o=3:1, a >= b >= c etc.)
{
o <- rank(o)
if (length(o)!=length(unique(o)))
stop("No equalites allowed in the order specificaiton")
n <- length(o)
out <- matrix(as.numeric(upper.tri(matrix(nrow=n+1,ncol=n+1))),nrow=n+1)[,-1]
colnames(out) <- pnames[o]
out[,pnames]
}
extremal.and <-function(o1,o2=NULL,
pnames1=letters[1:length(o1)],pnames2=NULL)
# And between two sets of orders (by default the same twice=monotonic)
{
if (is.null(o2)) o2 <- o1
if (is.null(pnames2)) pnames2 <- LETTERS[1:length(o2)]
o1 <- extremal(o1,pnames1)
o2 <- extremal(o2,pnames2)
o <-cbind(matrix(rep(o2,each=dim(o2)[1]),ncol=dim(o2)[2]),
do.call("rbind", replicate(dim(o2)[1], o2, simplify=F))
)
colnames(o) <- c(pnames1,pnames2)
o
}
make.hull <- function(omat,pnames=NULL,mono=FALSE)
# Makes poi object, unique vertices of convex hull on orders specified in
# the rows of omat. If makes the joint order extremals
{
if ( is.null(dim(omat)) )
stop("Orders must be specified as rows of a matrix")
if ( is.null(pnames) ) {
if ( dim(omat)[2]>26 )
stop("Automatic variable naming only works up to 26 variables")
pnames <- letters[1:dim(omat)[2]]
}
if (!mono) out <- extremal(omat[1,],pnames) else
out <- extremal.and(omat[1,],omat[1,],pnames)
nrow <- nrow(omat)
if (nrow>1) for (i in 2:nrow)
if (!mono) out <- rbind(out,extremal(omat[i,],pnames)) else
out <- rbind(out,extremal.and(omat[i,],omat[i,],pnames))
rownames(out) <- apply(out,1,paste,collapse=",")
out <- out[!duplicated(rownames(out)),]
as.poi(out[sort(rownames(out)),])
}
is.hull.vector <- function(ieqFileObject,test)
# Tests if a point specified in vector test is in a convex hull
{
signs <- as.matrix(ieqFileObject@inequalities@sign)
coef <- as.matrix(ieqFileObject@inequalities@num)/
as.matrix(ieqFileObject@inequalities@den)
lhs <- coef[,-(length(test)+1)] %*% test
ok <- logical(length(lhs))
ok[signs==-1] <- lhs <= coef[signs==-1,length(test)+1]
ok[signs==0] <- lhs == coef[signs==0,length(test)+1]
ok[signs==1] <- lhs >= coef[signs==1,length(test)+1]
all(ok)
}
is.hull <- function(ieqFileObject,test)
# Tests if multiple points specified in matrix test
# (one point per column) are in a convex hull
{
signs <- as.matrix(ieqFileObject@inequalities@sign)
if (!all(signs==-1))
stop("A sign is not -1")
coef <- as.matrix(ieqFileObject@inequalities@num)/
as.matrix(ieqFileObject@inequalities@den)
mult <- coef[,-(dim(test)[1]+1)]
rhs <- coef[,dim(test)[1]+1]
nz <- mult !=0
ok <- !logical(dim(test)[2])
for (i in 1:nrow(coef)) {
ok[ok] <- mult[i,nz[i,],drop=FALSE]%*%test[nz[i,],ok,drop=FALSE] <= rhs[i]
}
ok
}
make.trace <- function(ntrace,ndim,trace.increasing=TRUE)
# Create trace model orders given ntrace levels and ndim levels
{
npoint <- ntrace*ndim
trace <- permutations(npoint,npoint)
nvec <- dim(trace)[1]
tracei <- matrix(1:npoint,ncol=ndim)
ok <- !logical(nvec)
if (trace.increasing) d <- 1 else d <- -1
for (i in 1:ndim) {
ok[ok] <- apply(trace[ok,],1,function(x){
all(diff(x[x%in%tracei[,i]])==d) })
}
trace <- trace[ok,]
attr(trace,"ndim") = ndim
trace
}
get.lap <- function(trace)
# Filter output of make.trace to remove non-overlapping orders
{
ndim <- attr(trace,"ndim")
tracei <- matrix(1:dim(trace)[2],ncol=ndim)
laps <- matrix(!logical(dim(trace)[1]*ndim),ncol=ndim)
for (i in 1:ndim) {
laps[,i] <- apply(trace,1,function(x){
all(abs(diff(c(1:length(x))[x%in%tracei[,i]]))==1) })
}
trace <- trace[!apply(laps,1,all),,drop=FALSE]
attr(trace,"ndim") = ndim
trace
}
get.dim <- function(trace,dim.ord=1:attr(trace,"dim"))
# Filter output of make.trace to remove orders not respecting dim.ord
{
ndim <- attr(trace,"ndim")
tracei <- matrix(1:dim(trace)[2],ncol=ndim)
ok <- !logical(dim(trace)[1])
for (i in 2:ndim) {
ok[ok] <- apply(trace[ok,],1,function(x){
all(c(1:length(x))[x%in%tracei[,i-1]] < c(1:length(x))[x%in%tracei[,i]])
})
}
trace <- trace[ok,,drop=FALSE]
attr(trace,"ndim") = ndim
trace
}
ci.p <- function(p,S,percent=95)
# percent credible intrval for p obtained from S samples
{
c(qbeta(percent/200,p*S+1,S-p*S+1),
qbeta(1-percent/200,p*S+1,S-p*S+1))
}
get.BF <- function(p,n,ieq,stopp=.1,ntest=1e6,maxrep=100,minrep=2,verbose=FALSE)
# Sample nrep*ntest sequentially to get BF if is.na(stopp) otherwise can pull
# out early when absolute % change on an interation is < stopp for two in a row
{
BF <-
mean(is.hull(ieq,matrix(rbinom(ntest*length(p),n,p),ncol=ntest)/as.vector(n)))/
mean(is.hull(ieq,matrix(rbinom(ntest*length(p),n,0.5),ncol=ntest)/as.vector(n)))
if (verbose) print(paste(ntest,":",BF))
dBFr <- Inf
for (i in 2:maxrep)
{
BFi <-
mean(is.hull(ieq,matrix(rbinom(ntest*length(p),n,p),ncol=ntest)/as.vector(n)))/
mean(is.hull(ieq,matrix(rbinom(ntest*length(p),n,0.5),ncol=ntest)/as.vector(n)))
oldBF <- BF
olddBFr <- dBFr
BF <- (i-1)*BF/i + BFi/i
dBFr <- abs((BF-oldBF)/oldBF)
if (!is.na(stopp) && ((dBFr < stopp/100) & (olddBFr < stopp/100)) )
return(BF)
if (verbose) print(paste(ntest*i,":",BF))
}
BF
}
| /data-raw/Supplementary/ABF.R | no_license | psadil/staHB | R | false | false | 5,583 | r | library (rPorta)
library(gtools)
extremal <- function(o,pnames=letters[1:length(o)])
# generates a matrix whose rows are the extremal points for an order (o,
# specified as as a set of numbers with no equalites allowed) on a vector
# of parameters with names pnames.
# e.g., o=1:3, pnames=letters(1:length(o)), a <= b <= c
# o=3:1, a >= b >= c etc.)
{
o <- rank(o)
if (length(o)!=length(unique(o)))
stop("No equalites allowed in the order specificaiton")
n <- length(o)
out <- matrix(as.numeric(upper.tri(matrix(nrow=n+1,ncol=n+1))),nrow=n+1)[,-1]
colnames(out) <- pnames[o]
out[,pnames]
}
extremal.and <-function(o1,o2=NULL,
pnames1=letters[1:length(o1)],pnames2=NULL)
# And between two sets of orders (by default the same twice=monotonic)
{
if (is.null(o2)) o2 <- o1
if (is.null(pnames2)) pnames2 <- LETTERS[1:length(o2)]
o1 <- extremal(o1,pnames1)
o2 <- extremal(o2,pnames2)
o <-cbind(matrix(rep(o2,each=dim(o2)[1]),ncol=dim(o2)[2]),
do.call("rbind", replicate(dim(o2)[1], o2, simplify=F))
)
colnames(o) <- c(pnames1,pnames2)
o
}
make.hull <- function(omat,pnames=NULL,mono=FALSE)
# Makes poi object, unique vertices of convex hull on orders specified in
# the rows of omat. If makes the joint order extremals
{
if ( is.null(dim(omat)) )
stop("Orders must be specified as rows of a matrix")
if ( is.null(pnames) ) {
if ( dim(omat)[2]>26 )
stop("Automatic variable naming only works up to 26 variables")
pnames <- letters[1:dim(omat)[2]]
}
if (!mono) out <- extremal(omat[1,],pnames) else
out <- extremal.and(omat[1,],omat[1,],pnames)
nrow <- nrow(omat)
if (nrow>1) for (i in 2:nrow)
if (!mono) out <- rbind(out,extremal(omat[i,],pnames)) else
out <- rbind(out,extremal.and(omat[i,],omat[i,],pnames))
rownames(out) <- apply(out,1,paste,collapse=",")
out <- out[!duplicated(rownames(out)),]
as.poi(out[sort(rownames(out)),])
}
is.hull.vector <- function(ieqFileObject,test)
# Tests if a point specified in vector test is in a convex hull
{
signs <- as.matrix(ieqFileObject@inequalities@sign)
coef <- as.matrix(ieqFileObject@inequalities@num)/
as.matrix(ieqFileObject@inequalities@den)
lhs <- coef[,-(length(test)+1)] %*% test
ok <- logical(length(lhs))
ok[signs==-1] <- lhs <= coef[signs==-1,length(test)+1]
ok[signs==0] <- lhs == coef[signs==0,length(test)+1]
ok[signs==1] <- lhs >= coef[signs==1,length(test)+1]
all(ok)
}
is.hull <- function(ieqFileObject,test)
# Tests if multiple points specified in matrix test
# (one point per column) are in a convex hull
{
signs <- as.matrix(ieqFileObject@inequalities@sign)
if (!all(signs==-1))
stop("A sign is not -1")
coef <- as.matrix(ieqFileObject@inequalities@num)/
as.matrix(ieqFileObject@inequalities@den)
mult <- coef[,-(dim(test)[1]+1)]
rhs <- coef[,dim(test)[1]+1]
nz <- mult !=0
ok <- !logical(dim(test)[2])
for (i in 1:nrow(coef)) {
ok[ok] <- mult[i,nz[i,],drop=FALSE]%*%test[nz[i,],ok,drop=FALSE] <= rhs[i]
}
ok
}
make.trace <- function(ntrace,ndim,trace.increasing=TRUE)
# Create trace model orders given ntrace levels and ndim levels
{
npoint <- ntrace*ndim
trace <- permutations(npoint,npoint)
nvec <- dim(trace)[1]
tracei <- matrix(1:npoint,ncol=ndim)
ok <- !logical(nvec)
if (trace.increasing) d <- 1 else d <- -1
for (i in 1:ndim) {
ok[ok] <- apply(trace[ok,],1,function(x){
all(diff(x[x%in%tracei[,i]])==d) })
}
trace <- trace[ok,]
attr(trace,"ndim") = ndim
trace
}
get.lap <- function(trace)
# Filter output of make.trace to remove non-overlapping orders
{
ndim <- attr(trace,"ndim")
tracei <- matrix(1:dim(trace)[2],ncol=ndim)
laps <- matrix(!logical(dim(trace)[1]*ndim),ncol=ndim)
for (i in 1:ndim) {
laps[,i] <- apply(trace,1,function(x){
all(abs(diff(c(1:length(x))[x%in%tracei[,i]]))==1) })
}
trace <- trace[!apply(laps,1,all),,drop=FALSE]
attr(trace,"ndim") = ndim
trace
}
get.dim <- function(trace,dim.ord=1:attr(trace,"dim"))
# Filter output of make.trace to remove orders not respecting dim.ord
{
ndim <- attr(trace,"ndim")
tracei <- matrix(1:dim(trace)[2],ncol=ndim)
ok <- !logical(dim(trace)[1])
for (i in 2:ndim) {
ok[ok] <- apply(trace[ok,],1,function(x){
all(c(1:length(x))[x%in%tracei[,i-1]] < c(1:length(x))[x%in%tracei[,i]])
})
}
trace <- trace[ok,,drop=FALSE]
attr(trace,"ndim") = ndim
trace
}
ci.p <- function(p,S,percent=95)
# percent credible intrval for p obtained from S samples
{
c(qbeta(percent/200,p*S+1,S-p*S+1),
qbeta(1-percent/200,p*S+1,S-p*S+1))
}
get.BF <- function(p,n,ieq,stopp=.1,ntest=1e6,maxrep=100,minrep=2,verbose=FALSE)
# Sample nrep*ntest sequentially to get BF if is.na(stopp) otherwise can pull
# out early when absolute % change on an interation is < stopp for two in a row
{
BF <-
mean(is.hull(ieq,matrix(rbinom(ntest*length(p),n,p),ncol=ntest)/as.vector(n)))/
mean(is.hull(ieq,matrix(rbinom(ntest*length(p),n,0.5),ncol=ntest)/as.vector(n)))
if (verbose) print(paste(ntest,":",BF))
dBFr <- Inf
for (i in 2:maxrep)
{
BFi <-
mean(is.hull(ieq,matrix(rbinom(ntest*length(p),n,p),ncol=ntest)/as.vector(n)))/
mean(is.hull(ieq,matrix(rbinom(ntest*length(p),n,0.5),ncol=ntest)/as.vector(n)))
oldBF <- BF
olddBFr <- dBFr
BF <- (i-1)*BF/i + BFi/i
dBFr <- abs((BF-oldBF)/oldBF)
if (!is.na(stopp) && ((dBFr < stopp/100) & (olddBFr < stopp/100)) )
return(BF)
if (verbose) print(paste(ntest*i,":",BF))
}
BF
}
|
#AYESHA HARGEY
#3650393
#16th April 2019
#Basic Statistics - Day 3
#Linear regression
#Load libraries
library(tidyverse)
library(ggplot2)
#Linear Model
#eruptions as a function of time // of the dataset 'faithful'
eruption.lm <- lm(eruptions ~ waiting, data = faithful) #naming the linear model
summary(eruption.lm) #summary of linear model
str(eruption.lm)
#null hypothesis is rejected
#there is a relationship between waiting time and eruption time
#create a graph for the data faithful
#x-axis: waiting
#y-axis: eruption
#properly labelled
faithful <- faithful
eruption_plot <- ggplot(faithful, aes(x = waiting, y = eruptions)) +
geom_point() +
geom_smooth(method = "lm", aes(colour = "Salmon")) +
labs(x = "Waiting Time (minutes)", y = "Eruptions (minutes)") +
ggtitle("The Relationship between Eruption Time and Waiting Time of Old Faithful") +
theme_bw () +
theme(legend.position = "none") +
theme(axis.text.x = element_text(angle = 40, hjust = 1, colour = "black", size=12),
axis.text.y = element_text(hjust = 1, colour = "black", size=12),
plot.background = element_rect(fill = "#f0eae8"),
plot.title = element_text(size=16, face="bold", hjust=0.5)) +
geom_label(aes(x = 40, y = 4.5), hjust = 0, #adding the box with all the information
label = paste("Adj R2 = ",signif(summary(eruption.lm)$adj.r.squared, 5),
"\nIntercept =",signif(eruption.lm$coef[[1]],5 ),
" \nSlope =",signif(eruption.lm$coef[[2]], 5),
" \nP =",signif(summary(eruption.lm)$coef[2,4], 5)))
eruption_plot
| /Basic_Stats/Day_3_Biostats.R | no_license | ahargey/Intro_R_UWC | R | false | false | 1,638 | r | #AYESHA HARGEY
#3650393
#16th April 2019
#Basic Statistics - Day 3
#Linear regression
#Load libraries
library(tidyverse)
library(ggplot2)
#Linear Model
#eruptions as a function of time // of the dataset 'faithful'
eruption.lm <- lm(eruptions ~ waiting, data = faithful) #naming the linear model
summary(eruption.lm) #summary of linear model
str(eruption.lm)
#null hypothesis is rejected
#there is a relationship between waiting time and eruption time
#create a graph for the data faithful
#x-axis: waiting
#y-axis: eruption
#properly labelled
faithful <- faithful
eruption_plot <- ggplot(faithful, aes(x = waiting, y = eruptions)) +
geom_point() +
geom_smooth(method = "lm", aes(colour = "Salmon")) +
labs(x = "Waiting Time (minutes)", y = "Eruptions (minutes)") +
ggtitle("The Relationship between Eruption Time and Waiting Time of Old Faithful") +
theme_bw () +
theme(legend.position = "none") +
theme(axis.text.x = element_text(angle = 40, hjust = 1, colour = "black", size=12),
axis.text.y = element_text(hjust = 1, colour = "black", size=12),
plot.background = element_rect(fill = "#f0eae8"),
plot.title = element_text(size=16, face="bold", hjust=0.5)) +
geom_label(aes(x = 40, y = 4.5), hjust = 0, #adding the box with all the information
label = paste("Adj R2 = ",signif(summary(eruption.lm)$adj.r.squared, 5),
"\nIntercept =",signif(eruption.lm$coef[[1]],5 ),
" \nSlope =",signif(eruption.lm$coef[[2]], 5),
" \nP =",signif(summary(eruption.lm)$coef[2,4], 5)))
eruption_plot
|
library(shiny)
library(shinydashboard)
library(shinyjs)
shinyUI(fluidPage(
dashboardHeader(),
dashboardSidebar(),
dashboardBody(
conditionalPanel(condition = "output.setupComplete",
textInput("phrase", "Enter Phrase", ""),
actionButton("submit", "Enter"),
textOutput("nextWord")
),
conditionalPanel(condition = "!output.setupComplete",
box( title = "Loading, Please Wait.", background="olive"))
))) | /ui.R | no_license | pnicewicz421/nlp | R | false | false | 539 | r | library(shiny)
library(shinydashboard)
library(shinyjs)
shinyUI(fluidPage(
dashboardHeader(),
dashboardSidebar(),
dashboardBody(
conditionalPanel(condition = "output.setupComplete",
textInput("phrase", "Enter Phrase", ""),
actionButton("submit", "Enter"),
textOutput("nextWord")
),
conditionalPanel(condition = "!output.setupComplete",
box( title = "Loading, Please Wait.", background="olive"))
))) |
#' Check Cached Data
#'
#' Checks that the cached data is available.
#' @return invisible(NULL)
#' @export
#' @examples
#'\dontrun{
#' check_mdl_cache()
#' }
check_mdl_cache <- function(){
my_filename <- file.path(mdl_get_cache_dir(), mdl_get_cache_filename())
if(!file.exists(my_filename)){
cli::cli_alert_danger("Cached data not found or not readable: {my_filename}")
return(invisible(NULL))
}
suppressMessages(
test_conn <- tryCatch(
mdl_get_cache_connection(access="RO"),
error = function(e){
e
})
)
if("error" %in% class(test_conn)){
cli::cli_alert_danger(stringr::str_replace(test_conn$message,"\n"," "))
return(invisible())
}
if("SQLiteConnection" %in% class(test_conn)){
cli::cli_alert_success("Cache database accessible and readable.")
}
## Check available tables
n_tables <- length(DBI::dbListTables(test_conn) )
cli::cli_alert_info("{n_tables} tables cached.")
}
| /R/check_mdl_cache.R | permissive | NAlcan/moodleR | R | false | false | 950 | r | #' Check Cached Data
#'
#' Checks that the cached data is available.
#' @return invisible(NULL)
#' @export
#' @examples
#'\dontrun{
#' check_mdl_cache()
#' }
check_mdl_cache <- function(){
my_filename <- file.path(mdl_get_cache_dir(), mdl_get_cache_filename())
if(!file.exists(my_filename)){
cli::cli_alert_danger("Cached data not found or not readable: {my_filename}")
return(invisible(NULL))
}
suppressMessages(
test_conn <- tryCatch(
mdl_get_cache_connection(access="RO"),
error = function(e){
e
})
)
if("error" %in% class(test_conn)){
cli::cli_alert_danger(stringr::str_replace(test_conn$message,"\n"," "))
return(invisible())
}
if("SQLiteConnection" %in% class(test_conn)){
cli::cli_alert_success("Cache database accessible and readable.")
}
## Check available tables
n_tables <- length(DBI::dbListTables(test_conn) )
cli::cli_alert_info("{n_tables} tables cached.")
}
|
#' Source a Script with MLflow Params
#'
#' This function should not be used interactively. It is designed to be called via `Rscript` from
#' the terminal or through the MLflow CLI.
#'
#' @param uri Path to an R script, can be a quoted or unquoted string.
#' @keywords internal
#' @export
mlflow_source <- function(uri) {
if (interactive()) stop(
"`mlflow_source()` cannot be used interactively; use `mlflow_run()` instead.",
call. = FALSE
)
uri <- as.character(substitute(uri))
.globals$run_params <- list()
command_args <- parse_command_line(commandArgs(trailingOnly = TRUE))
if (!is.null(command_args)) {
purrr::iwalk(command_args, function(value, key) {
.globals$run_params[[key]] <- value
})
}
tryCatch(
suppressPackageStartupMessages(source(uri, local = parent.frame())),
error = function(cnd) {
message(cnd, "\n")
mlflow_end_run(status = "FAILED")
},
interrupt = function(cnd) mlflow_end_run(status = "KILLED"),
finally = {
if (!is.null(mlflow_get_active_run_id())) mlflow_end_run(status = "FAILED")
clear_run_params()
}
)
invisible(NULL)
}
clear_run_params <- function() {
rlang::env_unbind(.globals, "run_params")
}
| /mlflow/R/mlflow/R/project-source.R | permissive | mlflow/mlflow | R | false | false | 1,225 | r | #' Source a Script with MLflow Params
#'
#' This function should not be used interactively. It is designed to be called via `Rscript` from
#' the terminal or through the MLflow CLI.
#'
#' @param uri Path to an R script, can be a quoted or unquoted string.
#' @keywords internal
#' @export
mlflow_source <- function(uri) {
if (interactive()) stop(
"`mlflow_source()` cannot be used interactively; use `mlflow_run()` instead.",
call. = FALSE
)
uri <- as.character(substitute(uri))
.globals$run_params <- list()
command_args <- parse_command_line(commandArgs(trailingOnly = TRUE))
if (!is.null(command_args)) {
purrr::iwalk(command_args, function(value, key) {
.globals$run_params[[key]] <- value
})
}
tryCatch(
suppressPackageStartupMessages(source(uri, local = parent.frame())),
error = function(cnd) {
message(cnd, "\n")
mlflow_end_run(status = "FAILED")
},
interrupt = function(cnd) mlflow_end_run(status = "KILLED"),
finally = {
if (!is.null(mlflow_get_active_run_id())) mlflow_end_run(status = "FAILED")
clear_run_params()
}
)
invisible(NULL)
}
clear_run_params <- function() {
rlang::env_unbind(.globals, "run_params")
}
|
#' Random DNA sequence
#'
#' Creates a n long sequence of random nucleotides.
#'
#' @param n integer
#'
#' @return dna string of n-length
#' @export
#'
#' @examples
#' random_dna(10)
random_dna <- function(n){
nucleotides <- sample(c("A", "T", "G", "C"), size = n, replace = TRUE)
dna = paste0(nucleotides, collapse = "")
return(dna)
} | /R/random_dna.R | permissive | rforbiodatascience21/2021_group_09_rpackage | R | false | false | 342 | r | #' Random DNA sequence
#'
#' Creates a n long sequence of random nucleotides.
#'
#' @param n integer
#'
#' @return dna string of n-length
#' @export
#'
#' @examples
#' random_dna(10)
random_dna <- function(n){
nucleotides <- sample(c("A", "T", "G", "C"), size = n, replace = TRUE)
dna = paste0(nucleotides, collapse = "")
return(dna)
} |
### Calculate ES alpha_G and beta_G by iteratively perturbing parameters ###
### and attempting re-invasion ###
BHS <- function(n,m0,m1,T3=0.6794521,tau_s=100){
exp(-m0*T3) / ( 1 + (m1/m0)*(1-exp(-m0*T3))*n/(tau_s/10) )
}
RICKERS <- function(n,m0,m1,T3=0.6794521,tau_s=100) {
exp(-(m0+m1*n/(tau_s/10))*T3)
}
# original model in m^2
# DD functions use density in 0.01 m^2 = 10 x 10 cm plots
# But we want to use 0.1m^2 plots (to match scale of quadrats)
# Therefore, tau_s set to 10 instead of 100
# In infinite-area models, all that matters for ES G is that same areas
# are used for plant and seed densities (scaling irrelevant - just alters
# densities, not ES G)
logitnorm <- function(x,mu,sigma){
plogis(x) * dnorm(x, mean=mu, sd=sigma)
}
# code borrowed from logitnorm package
logitmean <- function(mu,sigma){
integrate(logitnorm, mu=mu, sigma=sigma,
lower=-Inf,
upper=Inf
)$value
}
logitnormint <- Vectorize(function(mu,sigma,intsd=10,...){
integrate(logitnorm,
mu=mu,sigma=sigma,
lower=mu-intsd*sigma,
upper=mu+intsd*sigma,
...)$value
})
nbtmean <- function(mu,phi){
denom <- 1 - (phi/(mu+phi))^phi
ifelse(denom==0 | mu==0, 0, mu/denom)
}
# mean for hurdle model
# ifelse prevents calculation failing when expected reproduction = 0
nbtnorm <- function(x,eta,sigma,phi){
nbtmean(mu=exp(x),phi=phi) * dnorm(x, mean=eta, sd=sigma)
}
nbtlnmean <- function(eta,sigma,phi,intsd=10){
integrate(nbtnorm,
eta=eta,sigma=sigma,phi=phi,
lower=eta-intsd*sigma,
upper=eta+intsd*sigma
)$value
}
# finite limits required to stop integration from crashing
# - calculate probability of each value from lognormal distribution
# - each of these values produces a mean from a trunc negbin distribution
# - then integrate to calculate the mean of these means
# - can be done because just calculating mean of means for each plot type
fnn <- function(g,lgmu,x_z_t,eps_y_p_t,eps_y_r_t,pli,tau_d=100){
# g = log(N[g]) for a given plot
with(pli, {
dg <- dnorm(g,mean=lgmu,sd=sig_s_g)
nl <- length(g)
x_t <- matrix(nr=nl,nc=4)
x_t[,1:3] <- rep(x_z_t,each=nl)
x_t[,4] <- g - log(tau_d/10)
# tau_d/10 density adjustment explained above
pi_bar_t <- beta_p %*% t(x_t) + eps_y_p_t
eta_bar_t <- beta_r %*% t(x_t) + eps_y_r_t
# each density (lng) has own associated world of sites
# but spatial aspects of pr(Y>0) and pr(Y|Y>0) considered independent,
# so can be simply added together
# can't average across years in this way because non-independent
pr_t <- rs_t <- rep(NA,nl)
for(l in 1:nl){
pr_t[l] <- logitmean(
mu = pi_bar_t[l],
sigma = sqrt(sig_s_p^2 + sig_o_p^2)
)
eta_t <- eta_bar_t[l]
rs_t[l] <- nbtlnmean(
eta = eta_t,
sigma = sig_s_r,
phi = phi_r
)
} # close l loop
lnY_t <- g + log(pr_t) + log(rs_t)
# expected log density of new seeds for each possible germinant density
# log-transforming to try and improve numerical stability
return(exp(log(dg) + lnY_t))
# expected overall mean density of seeds
})
} # close g function
fixG <- function(w,a,b){
plogis(a+b*w)
}
pradj <- function(pr,mu,phi){
q <- dnbinom(0, mu=mu, size=phi) # Pr(Y>0)
return(pr / (1-q)) # zero-inflated
}
sprinkle <- function(x,kseq,probs){
rmultinom(n=kseq, size=x, prob=probs)
}
ressim <- function(amr,bmr,pli,nc=5,intsd=10,tau_d=100){
# nc = n consecutive t that ns must be < nsmin
with(pli, {
ns <- ng <- nn <- Ye <- rep(NA,nt)
Gres <- fixG(w,amr,bmr)
ns[1] <- n0
t <- 1
while(t <= nt
& ifelse(t < nc, TRUE, FALSE %in% (ns[(t-(nc-1)):t] < nsmin))
){
ng[t] <- Sg * Gres[t] * ns[t]
if(ng[t] >= ngmin){
if(nk==0){
# essentially one site with eps_s = 0
x_t <- c(x_z[t,],log(ng[t])-log(tau_d/10))
pi_bar_t <- sum(beta_p * x_t) + eps_y_p[t]
eta_bar_t <- sum(beta_r * x_t) + eps_y_r[t]
pr_t <- logitnormint(mu=pi_bar_t,sigma=sig_o_p)
rs_t <- nbtmean(exp(eta_bar_t),phi_r)
nn[t] <- ng[t] * pr_t * rs_t
} # nk==0
if(nk==Inf){
lgmu <- log(ng[t]) - (sig_s_g^2 / 2)
# arithmetic mean = ng[i,t,]
# logarithmic sd = sig_s_g[i,,j]
# mean of lognormal distribution = log(am) - sig^2 / 2
intlo <- lgmu - intsd * sig_s_g
inthi <- lgmu + intsd * sig_s_g
# setting range to 10 sds to improve convergence
# (outside this range, ng=0 -> nn=0)
nn[t] <- integrate(fnn,
lower=intlo, upper=inthi,
lgmu=lgmu,
x_z_t=x_z[t,],
eps_y_p_t=eps_y_p[t],
eps_y_r_t=eps_y_r[t],
pli=pli
)$value
} # nk==Inf
} # close if function
if(ng[t] < ngmin){
nn[t] <- 0
}
Ye[t] <- ifelse(nn[t]==0, 0, nn[t] * DDFUN(nn[t]*(1-theta_g),m0,m1) / ng[t])
if(t<nt) ns[t+1] <- ns[t] * ( (1-Gres[t])*So + Gres[t]*Ye[t] )
t <- t + 1
} # close t loop
return(data.frame(Gres=Gres,Ye=Ye))
}) # close with function
}
invade_infinite <- function(ami,bmi,Gres,Ye,pli){
with(pli, {
if(NA %in% Ye){
invaded <- TRUE
# if resident goes extinct, invader establishes immediately
}
if(!NA %in% Ye){
# t = final value at which loop stopped
Ginv <- fixG(w,ami,bmi)
delta_r <- log((1-Ginv)*So + Ginv*Ye) - log((1-Gres)*So + Gres*Ye)
invaded <- mean(delta_r[(nb+1):nt]) > 0
}
list(invaded=invaded)
})
}
invade_finite <- function(amr,bmr,ami,bmi,pli,nc=5,tau_d=100,mumax=10^6){
# nc = n consecutive t that ns must be < nsmin
require(countreg)
require(MASS)
with(pli, {
ns <- array(dim=c(nt,2)) # 2 = res and inv
Gres <- fixG(w,amr,bmr)
Ginv <- fixG(w,ami,bmi)
ns[1,] <- c(round(n0*nk/10,0),0)
ns[1,][ns[1,]==0] <- 1
# if starting density is 0, set to 1 instead
t <- 1
kseq <- 1:nk
ng_k <- matrix(nr=nk,nc=2)
while(t < nt
& ns[t,1] > 0
& (t <= (nb+1) | ns[t,2] > 0)
){
if(t==(nb+1)){
ns[t,2] <- 1
# 1 invader introduced at t = nb + 1
}
ng <- rbinom(2,prob=c(Gres[t],Ginv[t]),size=ns[t,])
no <- rbinom(2,prob=So,size=ns[t,]-ng)
if(sum(ng)==0){
nnb <- rep(0,2)
}
if(sum(ng)>0){
ng_k[] <- sapply(ng,sprinkle,kseq,probs=eps_s_g)
# using spatial terms as weights
# (normalised within function to sum to 1)
ngt_k <- rowSums(ng_k) # total germinants (residents and invaders)
isg_k <- ngt_k>0 # binary: are there any germinants in plot?
nkg <- sum(isg_k) # total number of *plots* with germinants
x_k <- array(dim=c(nkg,4))
x_k[,1:3] <- rep(x_z[t,],each=nkg)
x_k[,4] <- log(ngt_k[isg_k]) - log(tau_d/10)
eps_o_p_k <- rnorm(nkg,0,sig_o_p)
pr_k <- plogis(beta_p %*% t(x_k) + eps_y_p[t] + eps_s_p[isg_k] + eps_o_p_k)
mu_k <- exp(beta_r %*% t(x_k) + eps_y_r[t] + eps_s_r[isg_k])
mu_k[mu_k>mumax] <- mumax
# maximum expected per-capita reproduction
# prevents negative binomial distribution with p=0
pradj_k <- pradj(pr_k,mu_k,phi_r)
qu <- pradj_k <= 1 # quick plots
nqu <- sum(qu)
nsl <- nkg-nqu
nr_kq <- array(dim=c(nqu,2))
nr_ks <- array(dim=c(nkg-nqu,2))
nr_kq[] <- rbinom(nqu*2,prob=rep(pradj_k[qu],2),size=ng_k[isg_k][qu])
nr_ks[] <- rbinom(nsl*2,prob=rep(pr_k[!qu],2),size=ng_k[isg_k][!qu])
nr_all <- sum(nr_kq) + sum(nr_ks)
if(nr_all==0){
nnb <- c(0,0)
}
if(nr_all>0){
qp <- nr_kq > 0
nn1m <- matrix(0,nr=nrow(nr_kq),nc=ncol(nr_kq))
nn1m[qp] <- rnbinom(sum(qp),
prob=phi_r/(phi_r+rep(mu_k[qu],2)[qp]),
size=phi_r * nr_kq[qp]
)
nn1 <- colSums(nn1m)
whichpos <- which(nr_ks > 0)
isinv_k <- factor(whichpos > nsl,levels=c(FALSE,TRUE))
# if TRUE, then in 2nd column
isinv_r <- rep(isinv_k,nr_ks[whichpos])
mus <- rep(rep(mu_k[!qu],2)[whichpos],nr_ks[whichpos])
y_all <- rztnbinom(n=sum(nr_ks), mu=mus, size=phi_r)
nn2 <- tapply(y_all,isinv_r,sum)
nn2[is.na(nn2)] <- 0 # required when no reproducers in resident / invader
nnt <- nn1 + nn2
Sn <- ifelse(nnt==0,0,DDFUN(nnt/nk,m0,m1))
# ifelse needed because doing separately for res and inv
# division by 10 (i.e. scaling up to m^2) occurs within DDFUN
nnb <- rbinom(2,prob=Sn,size=nnt)
}
}
ns[t+1,] <- nnb + no
t <- t + 1
} # close t loop
# simulation stops when either resident or invader has gone extinct
# (or when maximum time limit has been reached -> coalition)
if(ns[t,2] > 0){
invaded <- TRUE
}
if(ns[t,2] == 0){
invaded <- FALSE
}
list(invaded=invaded,tex=t)
}) # close with function
}
evolve <- function(pdi,tau_p=100){
with(pdi,{
require(MASS)
finite <- nk>0 & nk<Inf
pli <- as.list(pdi)
if(ddfun=="BHS") pli$DDFUN <- BHS
if(ddfun=="RICKERS") pli$DDFUN <- RICKERS
pli$beta_p <- cbind(beta_p1,beta_p2,beta_p3,beta_p4)
pli$beta_r <- cbind(beta_r1,beta_r2,beta_r3,beta_r4)
zw_mu <- c(z=zm,w=wm) - log(tau_p)
zw_sig <- matrix(c(zs^2,rep(rho*zs*ws,2),ws^2),nr=2,nc=2)
zw <- mvrnorm(n=nt, mu=zw_mu, Sigma=zw_sig)
pli$eps_y_p <- rnorm(nt,0,1) * sig_y_p
pli$eps_y_r <- rnorm(nt,0,1) * sig_y_r
pli$x_z <- matrix(nr=nt,nc=3)
pli$x_z[,1] <- 1 # intercept
pli$x_z[,2] <- zw[,"z"]
pli$x_z[,3] <- zw[,"z"]^2
pli$w <- zw[,"w"]
if(finite==FALSE){
es <- data.frame(amr=rep(NA,times=nr),
bmr=rep(NA,times=nr)
)
es[1,] <- c(am0,bm0)
}
if(finite==TRUE){
es <- data.frame(amr=rep(NA,times=nr),
bmr=rep(NA,times=nr),
tex=rep(NA,times=nr)
)
es[1,] <- c(am0,bm0,NA)
}
for(r in 1:(nr-1)){
ami <- es$amr[r] + rnorm(1,0,smut_a)
bmi <- es$bmr[r] + rnorm(1,0,smut_b)
if(finite==FALSE){
rd <- with(es[r,], ressim(amr,bmr,pli))
ess <- invade_infinite(ami,bmi,rd$Gres,rd$Ye,pli)
}
if(finite==TRUE){
pli$eps_s_p <- rnorm(nk,0,sig_s_p)
pli$eps_s_r <- rnorm(nk,0,sig_s_r)
# done before g because want to match set.seed
pli$eps_s_g <- exp(rnorm(nk,0,sig_s_g))
zsites <- rbinom(nk,size=1,prob=theta_g)
while(sum(zsites)==nk){
zsites <- rbinom(nk,size=1,prob=theta_g)
}
# theta = prob of zero
# redraw until have at least one non-zero site
pli$eps_s_g[zsites==1] <- 0
ess <- with(es[r,],
invade_finite(amr,bmr,ami,bmi,pli)
)
}
if(ess$invaded==TRUE){
es[r+1,1:2] <- c(ami,bmi)
}
if(ess$invaded==FALSE){
es[r+1,1:2] <- es[r,1:2]
}
if(finite==TRUE){
es[r,3] <- ess$tex
}
} # close r loop
list(zw=zw,es=es) # eps_y_p=eps_y_p,eps_y_r=eps_y_r
}) # close with function
} | /Source/ESS_functions.R | no_license | callum-lawson/Annuals | R | false | false | 12,102 | r | ### Calculate ES alpha_G and beta_G by iteratively perturbing parameters ###
### and attempting re-invasion ###
BHS <- function(n,m0,m1,T3=0.6794521,tau_s=100){
exp(-m0*T3) / ( 1 + (m1/m0)*(1-exp(-m0*T3))*n/(tau_s/10) )
}
RICKERS <- function(n,m0,m1,T3=0.6794521,tau_s=100) {
exp(-(m0+m1*n/(tau_s/10))*T3)
}
# original model in m^2
# DD functions use density in 0.01 m^2 = 10 x 10 cm plots
# But we want to use 0.1m^2 plots (to match scale of quadrats)
# Therefore, tau_s set to 10 instead of 100
# In infinite-area models, all that matters for ES G is that same areas
# are used for plant and seed densities (scaling irrelevant - just alters
# densities, not ES G)
logitnorm <- function(x,mu,sigma){
plogis(x) * dnorm(x, mean=mu, sd=sigma)
}
# code borrowed from logitnorm package
logitmean <- function(mu,sigma){
integrate(logitnorm, mu=mu, sigma=sigma,
lower=-Inf,
upper=Inf
)$value
}
logitnormint <- Vectorize(function(mu,sigma,intsd=10,...){
integrate(logitnorm,
mu=mu,sigma=sigma,
lower=mu-intsd*sigma,
upper=mu+intsd*sigma,
...)$value
})
nbtmean <- function(mu,phi){
denom <- 1 - (phi/(mu+phi))^phi
ifelse(denom==0 | mu==0, 0, mu/denom)
}
# mean for hurdle model
# ifelse prevents calculation failing when expected reproduction = 0
nbtnorm <- function(x,eta,sigma,phi){
nbtmean(mu=exp(x),phi=phi) * dnorm(x, mean=eta, sd=sigma)
}
nbtlnmean <- function(eta,sigma,phi,intsd=10){
integrate(nbtnorm,
eta=eta,sigma=sigma,phi=phi,
lower=eta-intsd*sigma,
upper=eta+intsd*sigma
)$value
}
# finite limits required to stop integration from crashing
# - calculate probability of each value from lognormal distribution
# - each of these values produces a mean from a trunc negbin distribution
# - then integrate to calculate the mean of these means
# - can be done because just calculating mean of means for each plot type
fnn <- function(g,lgmu,x_z_t,eps_y_p_t,eps_y_r_t,pli,tau_d=100){
# g = log(N[g]) for a given plot
with(pli, {
dg <- dnorm(g,mean=lgmu,sd=sig_s_g)
nl <- length(g)
x_t <- matrix(nr=nl,nc=4)
x_t[,1:3] <- rep(x_z_t,each=nl)
x_t[,4] <- g - log(tau_d/10)
# tau_d/10 density adjustment explained above
pi_bar_t <- beta_p %*% t(x_t) + eps_y_p_t
eta_bar_t <- beta_r %*% t(x_t) + eps_y_r_t
# each density (lng) has own associated world of sites
# but spatial aspects of pr(Y>0) and pr(Y|Y>0) considered independent,
# so can be simply added together
# can't average across years in this way because non-independent
pr_t <- rs_t <- rep(NA,nl)
for(l in 1:nl){
pr_t[l] <- logitmean(
mu = pi_bar_t[l],
sigma = sqrt(sig_s_p^2 + sig_o_p^2)
)
eta_t <- eta_bar_t[l]
rs_t[l] <- nbtlnmean(
eta = eta_t,
sigma = sig_s_r,
phi = phi_r
)
} # close l loop
lnY_t <- g + log(pr_t) + log(rs_t)
# expected log density of new seeds for each possible germinant density
# log-transforming to try and improve numerical stability
return(exp(log(dg) + lnY_t))
# expected overall mean density of seeds
})
} # close g function
fixG <- function(w,a,b){
plogis(a+b*w)
}
pradj <- function(pr,mu,phi){
q <- dnbinom(0, mu=mu, size=phi) # Pr(Y>0)
return(pr / (1-q)) # zero-inflated
}
sprinkle <- function(x,kseq,probs){
rmultinom(n=kseq, size=x, prob=probs)
}
ressim <- function(amr,bmr,pli,nc=5,intsd=10,tau_d=100){
# nc = n consecutive t that ns must be < nsmin
with(pli, {
ns <- ng <- nn <- Ye <- rep(NA,nt)
Gres <- fixG(w,amr,bmr)
ns[1] <- n0
t <- 1
while(t <= nt
& ifelse(t < nc, TRUE, FALSE %in% (ns[(t-(nc-1)):t] < nsmin))
){
ng[t] <- Sg * Gres[t] * ns[t]
if(ng[t] >= ngmin){
if(nk==0){
# essentially one site with eps_s = 0
x_t <- c(x_z[t,],log(ng[t])-log(tau_d/10))
pi_bar_t <- sum(beta_p * x_t) + eps_y_p[t]
eta_bar_t <- sum(beta_r * x_t) + eps_y_r[t]
pr_t <- logitnormint(mu=pi_bar_t,sigma=sig_o_p)
rs_t <- nbtmean(exp(eta_bar_t),phi_r)
nn[t] <- ng[t] * pr_t * rs_t
} # nk==0
if(nk==Inf){
lgmu <- log(ng[t]) - (sig_s_g^2 / 2)
# arithmetic mean = ng[i,t,]
# logarithmic sd = sig_s_g[i,,j]
# mean of lognormal distribution = log(am) - sig^2 / 2
intlo <- lgmu - intsd * sig_s_g
inthi <- lgmu + intsd * sig_s_g
# setting range to 10 sds to improve convergence
# (outside this range, ng=0 -> nn=0)
nn[t] <- integrate(fnn,
lower=intlo, upper=inthi,
lgmu=lgmu,
x_z_t=x_z[t,],
eps_y_p_t=eps_y_p[t],
eps_y_r_t=eps_y_r[t],
pli=pli
)$value
} # nk==Inf
} # close if function
if(ng[t] < ngmin){
nn[t] <- 0
}
Ye[t] <- ifelse(nn[t]==0, 0, nn[t] * DDFUN(nn[t]*(1-theta_g),m0,m1) / ng[t])
if(t<nt) ns[t+1] <- ns[t] * ( (1-Gres[t])*So + Gres[t]*Ye[t] )
t <- t + 1
} # close t loop
return(data.frame(Gres=Gres,Ye=Ye))
}) # close with function
}
invade_infinite <- function(ami,bmi,Gres,Ye,pli){
with(pli, {
if(NA %in% Ye){
invaded <- TRUE
# if resident goes extinct, invader establishes immediately
}
if(!NA %in% Ye){
# t = final value at which loop stopped
Ginv <- fixG(w,ami,bmi)
delta_r <- log((1-Ginv)*So + Ginv*Ye) - log((1-Gres)*So + Gres*Ye)
invaded <- mean(delta_r[(nb+1):nt]) > 0
}
list(invaded=invaded)
})
}
invade_finite <- function(amr,bmr,ami,bmi,pli,nc=5,tau_d=100,mumax=10^6){
# nc = n consecutive t that ns must be < nsmin
require(countreg)
require(MASS)
with(pli, {
ns <- array(dim=c(nt,2)) # 2 = res and inv
Gres <- fixG(w,amr,bmr)
Ginv <- fixG(w,ami,bmi)
ns[1,] <- c(round(n0*nk/10,0),0)
ns[1,][ns[1,]==0] <- 1
# if starting density is 0, set to 1 instead
t <- 1
kseq <- 1:nk
ng_k <- matrix(nr=nk,nc=2)
while(t < nt
& ns[t,1] > 0
& (t <= (nb+1) | ns[t,2] > 0)
){
if(t==(nb+1)){
ns[t,2] <- 1
# 1 invader introduced at t = nb + 1
}
ng <- rbinom(2,prob=c(Gres[t],Ginv[t]),size=ns[t,])
no <- rbinom(2,prob=So,size=ns[t,]-ng)
if(sum(ng)==0){
nnb <- rep(0,2)
}
if(sum(ng)>0){
ng_k[] <- sapply(ng,sprinkle,kseq,probs=eps_s_g)
# using spatial terms as weights
# (normalised within function to sum to 1)
ngt_k <- rowSums(ng_k) # total germinants (residents and invaders)
isg_k <- ngt_k>0 # binary: are there any germinants in plot?
nkg <- sum(isg_k) # total number of *plots* with germinants
x_k <- array(dim=c(nkg,4))
x_k[,1:3] <- rep(x_z[t,],each=nkg)
x_k[,4] <- log(ngt_k[isg_k]) - log(tau_d/10)
eps_o_p_k <- rnorm(nkg,0,sig_o_p)
pr_k <- plogis(beta_p %*% t(x_k) + eps_y_p[t] + eps_s_p[isg_k] + eps_o_p_k)
mu_k <- exp(beta_r %*% t(x_k) + eps_y_r[t] + eps_s_r[isg_k])
mu_k[mu_k>mumax] <- mumax
# maximum expected per-capita reproduction
# prevents negative binomial distribution with p=0
pradj_k <- pradj(pr_k,mu_k,phi_r)
qu <- pradj_k <= 1 # quick plots
nqu <- sum(qu)
nsl <- nkg-nqu
nr_kq <- array(dim=c(nqu,2))
nr_ks <- array(dim=c(nkg-nqu,2))
nr_kq[] <- rbinom(nqu*2,prob=rep(pradj_k[qu],2),size=ng_k[isg_k][qu])
nr_ks[] <- rbinom(nsl*2,prob=rep(pr_k[!qu],2),size=ng_k[isg_k][!qu])
nr_all <- sum(nr_kq) + sum(nr_ks)
if(nr_all==0){
nnb <- c(0,0)
}
if(nr_all>0){
qp <- nr_kq > 0
nn1m <- matrix(0,nr=nrow(nr_kq),nc=ncol(nr_kq))
nn1m[qp] <- rnbinom(sum(qp),
prob=phi_r/(phi_r+rep(mu_k[qu],2)[qp]),
size=phi_r * nr_kq[qp]
)
nn1 <- colSums(nn1m)
whichpos <- which(nr_ks > 0)
isinv_k <- factor(whichpos > nsl,levels=c(FALSE,TRUE))
# if TRUE, then in 2nd column
isinv_r <- rep(isinv_k,nr_ks[whichpos])
mus <- rep(rep(mu_k[!qu],2)[whichpos],nr_ks[whichpos])
y_all <- rztnbinom(n=sum(nr_ks), mu=mus, size=phi_r)
nn2 <- tapply(y_all,isinv_r,sum)
nn2[is.na(nn2)] <- 0 # required when no reproducers in resident / invader
nnt <- nn1 + nn2
Sn <- ifelse(nnt==0,0,DDFUN(nnt/nk,m0,m1))
# ifelse needed because doing separately for res and inv
# division by 10 (i.e. scaling up to m^2) occurs within DDFUN
nnb <- rbinom(2,prob=Sn,size=nnt)
}
}
ns[t+1,] <- nnb + no
t <- t + 1
} # close t loop
# simulation stops when either resident or invader has gone extinct
# (or when maximum time limit has been reached -> coalition)
if(ns[t,2] > 0){
invaded <- TRUE
}
if(ns[t,2] == 0){
invaded <- FALSE
}
list(invaded=invaded,tex=t)
}) # close with function
}
evolve <- function(pdi,tau_p=100){
with(pdi,{
require(MASS)
finite <- nk>0 & nk<Inf
pli <- as.list(pdi)
if(ddfun=="BHS") pli$DDFUN <- BHS
if(ddfun=="RICKERS") pli$DDFUN <- RICKERS
pli$beta_p <- cbind(beta_p1,beta_p2,beta_p3,beta_p4)
pli$beta_r <- cbind(beta_r1,beta_r2,beta_r3,beta_r4)
zw_mu <- c(z=zm,w=wm) - log(tau_p)
zw_sig <- matrix(c(zs^2,rep(rho*zs*ws,2),ws^2),nr=2,nc=2)
zw <- mvrnorm(n=nt, mu=zw_mu, Sigma=zw_sig)
pli$eps_y_p <- rnorm(nt,0,1) * sig_y_p
pli$eps_y_r <- rnorm(nt,0,1) * sig_y_r
pli$x_z <- matrix(nr=nt,nc=3)
pli$x_z[,1] <- 1 # intercept
pli$x_z[,2] <- zw[,"z"]
pli$x_z[,3] <- zw[,"z"]^2
pli$w <- zw[,"w"]
if(finite==FALSE){
es <- data.frame(amr=rep(NA,times=nr),
bmr=rep(NA,times=nr)
)
es[1,] <- c(am0,bm0)
}
if(finite==TRUE){
es <- data.frame(amr=rep(NA,times=nr),
bmr=rep(NA,times=nr),
tex=rep(NA,times=nr)
)
es[1,] <- c(am0,bm0,NA)
}
for(r in 1:(nr-1)){
ami <- es$amr[r] + rnorm(1,0,smut_a)
bmi <- es$bmr[r] + rnorm(1,0,smut_b)
if(finite==FALSE){
rd <- with(es[r,], ressim(amr,bmr,pli))
ess <- invade_infinite(ami,bmi,rd$Gres,rd$Ye,pli)
}
if(finite==TRUE){
pli$eps_s_p <- rnorm(nk,0,sig_s_p)
pli$eps_s_r <- rnorm(nk,0,sig_s_r)
# done before g because want to match set.seed
pli$eps_s_g <- exp(rnorm(nk,0,sig_s_g))
zsites <- rbinom(nk,size=1,prob=theta_g)
while(sum(zsites)==nk){
zsites <- rbinom(nk,size=1,prob=theta_g)
}
# theta = prob of zero
# redraw until have at least one non-zero site
pli$eps_s_g[zsites==1] <- 0
ess <- with(es[r,],
invade_finite(amr,bmr,ami,bmi,pli)
)
}
if(ess$invaded==TRUE){
es[r+1,1:2] <- c(ami,bmi)
}
if(ess$invaded==FALSE){
es[r+1,1:2] <- es[r,1:2]
}
if(finite==TRUE){
es[r,3] <- ess$tex
}
} # close r loop
list(zw=zw,es=es) # eps_y_p=eps_y_p,eps_y_r=eps_y_r
}) # close with function
} |
## Create a cached matrix object for an invertible matrix
makeCacheMatrix <- function(x = matrix()) {
cachedInv <- NULL
set <- function(y) {
x <<- y
cachedInv <<- NULL
}
get <- function() x
setInv <- function(inv) cachedInv <<- inv
getInv <- function() cachedInv
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## computes the inverse of the cached matrix object
cacheSolve <- function(x, ...) {
inv <- x$getInv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
cached <- x$get()
inv <- solve(cached, ...)
x$setInv(inv)
inv
} | /cachematrix.R | no_license | songsey/rprog-assgn2 | R | false | false | 709 | r | ## Create a cached matrix object for an invertible matrix
makeCacheMatrix <- function(x = matrix()) {
cachedInv <- NULL
set <- function(y) {
x <<- y
cachedInv <<- NULL
}
get <- function() x
setInv <- function(inv) cachedInv <<- inv
getInv <- function() cachedInv
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## computes the inverse of the cached matrix object
cacheSolve <- function(x, ...) {
inv <- x$getInv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
cached <- x$get()
inv <- solve(cached, ...)
x$setInv(inv)
inv
} |
## ---- eval=FALSE---------------------------------------------------------
# install.packages("nproc", repos = "http://cran.us.r-project.org")
## ------------------------------------------------------------------------
library(nproc)
## ------------------------------------------------------------------------
n = 1000
set.seed(0)
x = matrix(rnorm(n*2),n,2)
c = 1+3*x[,1]
y = rbinom(n,1,1/(1+exp(-c)))
## ------------------------------------------------------------------------
plot(x[y==1,],col=1,xlim=c(-4,4),xlab='x1',ylab='x2')
points(x[y==0,],col=2,pch=2)
legend("topright",legend=c('Class 1','Class 0'),col=1:2,pch=c(1,2))
## ------------------------------------------------------------------------
fit = npc(x, y, method = "lda", alpha = 0.05)
## ------------------------------------------------------------------------
xtest = matrix(rnorm(n*2),n,2)
ctest = 1+3*xtest[,1]
ytest = rbinom(n,1,1/(1+exp(-ctest)))
## ------------------------------------------------------------------------
pred = predict(fit,xtest)
fit.score = predict(fit,x)
accuracy = mean(pred$pred.label==ytest)
cat("Overall Accuracy: ", accuracy,'\n')
ind0 = which(ytest==0)
typeI = mean(pred$pred.label[ind0]!=ytest[ind0]) #type I error on test set
cat('Type I error: ', typeI, '\n')
## ------------------------------------------------------------------------
fit = npc(x, y, method = "logistic", alpha = 0.1)
pred = predict(fit,xtest)
accuracy = mean(pred$pred.label==ytest)
cat("Overall Accuracy: ", accuracy,'\n')
ind0 = which(ytest==0)
typeI = mean(pred$pred.label[ind0]!=ytest[ind0]) #type I error on test set
cat('Type I error: ', typeI, '\n')
## ------------------------------------------------------------------------
fit = npc(x, y, method = "logistic", alpha = 0.1, split = 11)
pred = predict(fit,xtest)
accuracy = mean(pred$pred.label==ytest)
cat("Overall Accuracy: ", accuracy,'\n')
ind0 = which(ytest==0)
typeI = mean(pred$pred.label[ind0]!=ytest[ind0]) #type I error on test set
cat('Type I error: ', typeI, '\n')
## ------------------------------------------------------------------------
methodlist = c("logistic", "penlog", "svm", "randomforest",
"lda", "nb", "ada")
loc.prob = NULL
for(method in methodlist){
fit = npc(x, y, method = method, alpha = 0.05, loc.prob = loc.prob)
loc.prob = fit$loc.prob #Recycle the loc.prob from the fit to same time for the next fit
pred = predict(fit,xtest)
accuracy = mean(pred$pred.label==ytest)
cat(method, ': Overall Accuracy: ', accuracy,'\n')
ind0 = which(ytest==0)
typeI = mean(pred$pred.label[ind0]!=ytest[ind0]) #type I error on test set
cat(method, ': Type I error: ', typeI, '\n')
}
## ------------------------------------------------------------------------
fit2 = npc(y = y, score = fit.score$pred.score,
pred.score = pred$pred.score, loc.prob = loc.prob, method = 'custom')
## ------------------------------------------------------------------------
fit = nproc(x, y, method = "svm", loc.prob.lo = loc.prob)
plot(fit)
## ------------------------------------------------------------------------
fit = nproc(x, y, method = "lda", loc.prob.lo = loc.prob)
plot(fit)
## ------------------------------------------------------------------------
fit = nproc(x, y, method = "logistic", conf = TRUE)
plot(fit)
## ------------------------------------------------------------------------
fit = nproc(x, y, method = c('svm','logistic','lda'), conf = T)
plot(fit)
| /nproc/inst/doc/nproc-demo.R | no_license | ingted/R-Examples | R | false | false | 3,455 | r | ## ---- eval=FALSE---------------------------------------------------------
# install.packages("nproc", repos = "http://cran.us.r-project.org")
## ------------------------------------------------------------------------
library(nproc)
## ------------------------------------------------------------------------
n = 1000
set.seed(0)
x = matrix(rnorm(n*2),n,2)
c = 1+3*x[,1]
y = rbinom(n,1,1/(1+exp(-c)))
## ------------------------------------------------------------------------
plot(x[y==1,],col=1,xlim=c(-4,4),xlab='x1',ylab='x2')
points(x[y==0,],col=2,pch=2)
legend("topright",legend=c('Class 1','Class 0'),col=1:2,pch=c(1,2))
## ------------------------------------------------------------------------
fit = npc(x, y, method = "lda", alpha = 0.05)
## ------------------------------------------------------------------------
xtest = matrix(rnorm(n*2),n,2)
ctest = 1+3*xtest[,1]
ytest = rbinom(n,1,1/(1+exp(-ctest)))
## ------------------------------------------------------------------------
pred = predict(fit,xtest)
fit.score = predict(fit,x)
accuracy = mean(pred$pred.label==ytest)
cat("Overall Accuracy: ", accuracy,'\n')
ind0 = which(ytest==0)
typeI = mean(pred$pred.label[ind0]!=ytest[ind0]) #type I error on test set
cat('Type I error: ', typeI, '\n')
## ------------------------------------------------------------------------
fit = npc(x, y, method = "logistic", alpha = 0.1)
pred = predict(fit,xtest)
accuracy = mean(pred$pred.label==ytest)
cat("Overall Accuracy: ", accuracy,'\n')
ind0 = which(ytest==0)
typeI = mean(pred$pred.label[ind0]!=ytest[ind0]) #type I error on test set
cat('Type I error: ', typeI, '\n')
## ------------------------------------------------------------------------
fit = npc(x, y, method = "logistic", alpha = 0.1, split = 11)
pred = predict(fit,xtest)
accuracy = mean(pred$pred.label==ytest)
cat("Overall Accuracy: ", accuracy,'\n')
ind0 = which(ytest==0)
typeI = mean(pred$pred.label[ind0]!=ytest[ind0]) #type I error on test set
cat('Type I error: ', typeI, '\n')
## ------------------------------------------------------------------------
methodlist = c("logistic", "penlog", "svm", "randomforest",
"lda", "nb", "ada")
loc.prob = NULL
for(method in methodlist){
fit = npc(x, y, method = method, alpha = 0.05, loc.prob = loc.prob)
loc.prob = fit$loc.prob #Recycle the loc.prob from the fit to same time for the next fit
pred = predict(fit,xtest)
accuracy = mean(pred$pred.label==ytest)
cat(method, ': Overall Accuracy: ', accuracy,'\n')
ind0 = which(ytest==0)
typeI = mean(pred$pred.label[ind0]!=ytest[ind0]) #type I error on test set
cat(method, ': Type I error: ', typeI, '\n')
}
## ------------------------------------------------------------------------
fit2 = npc(y = y, score = fit.score$pred.score,
pred.score = pred$pred.score, loc.prob = loc.prob, method = 'custom')
## ------------------------------------------------------------------------
fit = nproc(x, y, method = "svm", loc.prob.lo = loc.prob)
plot(fit)
## ------------------------------------------------------------------------
fit = nproc(x, y, method = "lda", loc.prob.lo = loc.prob)
plot(fit)
## ------------------------------------------------------------------------
fit = nproc(x, y, method = "logistic", conf = TRUE)
plot(fit)
## ------------------------------------------------------------------------
fit = nproc(x, y, method = c('svm','logistic','lda'), conf = T)
plot(fit)
|
#' Create a multiselect input control
#'
#' @description A user-friendly replacement for select boxes with the multiple attribute
#'
#' @param inputId The \code{input} slot that will be used to access the value.
#' @param label Display label for the control, or \code{NULL} for no label.
#' @param choices List of values to select from.
#' @param selected The initially selected value.
#' @param width The width of the input, e.g. \code{400px}, or \code{100\%}.
#' @param choiceNames List of names to display to the user.
#' @param choiceValues List of values corresponding to \code{choiceNames}.
#' @param options List of options passed to multi (\code{enable_search = FALSE} for disabling the search bar for example).
#'
#' @return A multiselect control
#'
#' @importFrom jsonlite toJSON
#' @importFrom htmltools validateCssUnit tags
#'
#' @export
#'
#' @seealso \link{updateMultiInput} to update value server-side.
#'
#' @examples
#' \dontrun{
#' ## Only run examples in interactive R sessions
#' if (interactive()) {
#'
#' library("shiny")
#' library("shinyWidgets")
#'
#'
#' # simple use
#'
#' ui <- fluidPage(
#' multiInput(
#' inputId = "id", label = "Fruits :",
#' choices = c("Banana", "Blueberry", "Cherry",
#' "Coconut", "Grapefruit", "Kiwi",
#' "Lemon", "Lime", "Mango", "Orange",
#' "Papaya"),
#' selected = "Banana", width = "350px"
#' ),
#' verbatimTextOutput(outputId = "res")
#' )
#'
#' server <- function(input, output, session) {
#' output$res <- renderPrint({
#' input$id
#' })
#' }
#'
#' shinyApp(ui = ui, server = server)
#'
#'
#' # with options
#'
#' ui <- fluidPage(
#' multiInput(
#' inputId = "id", label = "Fruits :",
#' choices = c("Banana", "Blueberry", "Cherry",
#' "Coconut", "Grapefruit", "Kiwi",
#' "Lemon", "Lime", "Mango", "Orange",
#' "Papaya"),
#' selected = "Banana", width = "400px",
#' options = list(
#' enable_search = FALSE,
#' non_selected_header = "Choose between:",
#' selected_header = "You have selected:"
#' )
#' ),
#' verbatimTextOutput(outputId = "res")
#' )
#'
#' server <- function(input, output, session) {
#' output$res <- renderPrint({
#' input$id
#' })
#' }
#'
#' shinyApp(ui = ui, server = server)
#'
#' }
#' }
multiInput <- function(inputId, label, choices = NULL, selected = NULL, options = NULL, width = NULL, choiceNames = NULL, choiceValues = NULL) {
selected <- shiny::restoreInput(id = inputId, default = selected)
selectTag <- htmltools::tags$select(
id = inputId, multiple = "multiple", class= "form-control multijs",
makeChoices(choices = choices, choiceNames = choiceNames,
choiceValues = choiceValues, selected = selected)
)
multiTag <- htmltools::tags$div(
class = "form-group shiny-input-container",
style = if(!is.null(width)) paste("width:", htmltools::validateCssUnit(width)),
htmltools::tags$label(class = "control-label", `for` = inputId, label),
selectTag,
tags$script(
type = "application/json", `data-for` = inputId,
jsonlite::toJSON(options, auto_unbox = TRUE, json_verbatim = TRUE)
)
# htmltools::tags$script(
# sprintf("$('#%s').multi(%s);",
# escape_jquery(inputId), jsonlite::toJSON(options, auto_unbox = TRUE))
# )
)
attachShinyWidgetsDep(multiTag, "multi")
}
makeChoices <- function(choices = NULL, choiceNames = NULL, choiceValues = NULL, selected = NULL) {
if (is.null(choices)) {
if (is.null(choiceValues))
stop("If choices = NULL, choiceValues must be not NULL")
if (length(choiceNames) != length(choiceValues)) {
stop("`choiceNames` and `choiceValues` must have the same length.")
}
choiceValues <- as.list(choiceValues)
choiceNames <- as.list(choiceNames)
tagList(
lapply(
X = seq_along(choiceNames),
FUN = function(i) {
htmltools::tags$option(value = choiceValues[[i]], as.character(choiceNames[[i]]),
selected = if(choiceValues[[i]] %in% selected) "selected")
}
)
)
} else {
choices <- choicesWithNames(choices)
tagList(
lapply(
X = seq_along(choices), FUN = function(i) {
htmltools::tags$option(value = choices[[i]], names(choices)[i],
selected = if(choices[[i]] %in% selected) "selected")
}
)
)
}
}
#' @title Change the value of a multi input on the client
#'
#' @description Change the value of a multi input on the client
#'
#' @param session The session object passed to function given to shinyServer.
#' @param inputId The id of the input object.
#' @param label The label to set.
#' @param selected The values selected. To select none, use \code{character(0)}.
#' @param choices The new choices for the input.
#'
#' @seealso \code{\link{multiInput}}
#'
#' @note Thanks to \href{https://github.com/ifellows}{Ian Fellows} for this one !
#'
#' @export
#'
#' @importFrom utils capture.output
#'
#' @examples
#' \dontrun{
#'
#' if (interactive()) {
#'
#' library(shiny)
#' library(shinyWidgets)
#'
#' fruits <- c("Banana", "Blueberry", "Cherry",
#' "Coconut", "Grapefruit", "Kiwi",
#' "Lemon", "Lime", "Mango", "Orange",
#' "Papaya")
#'
#' ui <- fluidPage(
#' tags$h2("Multi update"),
#' multiInput(
#' inputId = "my_multi",
#' label = "Fruits :",
#' choices = fruits,
#' selected = "Banana",
#' width = "350px"
#' ),
#' verbatimTextOutput(outputId = "res"),
#' selectInput(
#' inputId = "selected",
#' label = "Update selected:",
#' choices = fruits,
#' multiple = TRUE
#' ),
#' textInput(inputId = "label", label = "Update label:")
#' )
#'
#' server <- function(input, output, session) {
#'
#' output$res <- renderPrint(input$my_multi)
#'
#' observeEvent(input$selected, {
#' updateMultiInput(
#' session = session,
#' inputId = "my_multi",
#' selected = input$selected
#' )
#' })
#'
#' observeEvent(input$label, {
#' updateMultiInput(
#' session = session,
#' inputId = "my_multi",
#' label = input$label
#' )
#' }, ignoreInit = TRUE)
#' }
#'
#' shinyApp(ui, server)
#'
#' }
#'
#' }
updateMultiInput <- function (session, inputId, label = NULL, selected = NULL, choices = NULL) {
choices <- if (!is.null(choices))
choicesWithNames(choices)
if (!is.null(selected))
selected <- validateSelected(selected, choices, inputId)
options <- if (!is.null(choices))
paste(capture.output(makeChoices(choices, selected = selected)), collapse = "\n")
message <- dropNulls(list(label = label, options = options, value = selected))
session$sendInputMessage(inputId, message)
}
| /R/input-multi.R | permissive | hrngultekin/shinyWidgets | R | false | false | 6,800 | r | #' Create a multiselect input control
#'
#' @description A user-friendly replacement for select boxes with the multiple attribute
#'
#' @param inputId The \code{input} slot that will be used to access the value.
#' @param label Display label for the control, or \code{NULL} for no label.
#' @param choices List of values to select from.
#' @param selected The initially selected value.
#' @param width The width of the input, e.g. \code{400px}, or \code{100\%}.
#' @param choiceNames List of names to display to the user.
#' @param choiceValues List of values corresponding to \code{choiceNames}.
#' @param options List of options passed to multi (\code{enable_search = FALSE} for disabling the search bar for example).
#'
#' @return A multiselect control
#'
#' @importFrom jsonlite toJSON
#' @importFrom htmltools validateCssUnit tags
#'
#' @export
#'
#' @seealso \link{updateMultiInput} to update value server-side.
#'
#' @examples
#' \dontrun{
#' ## Only run examples in interactive R sessions
#' if (interactive()) {
#'
#' library("shiny")
#' library("shinyWidgets")
#'
#'
#' # simple use
#'
#' ui <- fluidPage(
#' multiInput(
#' inputId = "id", label = "Fruits :",
#' choices = c("Banana", "Blueberry", "Cherry",
#' "Coconut", "Grapefruit", "Kiwi",
#' "Lemon", "Lime", "Mango", "Orange",
#' "Papaya"),
#' selected = "Banana", width = "350px"
#' ),
#' verbatimTextOutput(outputId = "res")
#' )
#'
#' server <- function(input, output, session) {
#' output$res <- renderPrint({
#' input$id
#' })
#' }
#'
#' shinyApp(ui = ui, server = server)
#'
#'
#' # with options
#'
#' ui <- fluidPage(
#' multiInput(
#' inputId = "id", label = "Fruits :",
#' choices = c("Banana", "Blueberry", "Cherry",
#' "Coconut", "Grapefruit", "Kiwi",
#' "Lemon", "Lime", "Mango", "Orange",
#' "Papaya"),
#' selected = "Banana", width = "400px",
#' options = list(
#' enable_search = FALSE,
#' non_selected_header = "Choose between:",
#' selected_header = "You have selected:"
#' )
#' ),
#' verbatimTextOutput(outputId = "res")
#' )
#'
#' server <- function(input, output, session) {
#' output$res <- renderPrint({
#' input$id
#' })
#' }
#'
#' shinyApp(ui = ui, server = server)
#'
#' }
#' }
multiInput <- function(inputId, label, choices = NULL, selected = NULL, options = NULL, width = NULL, choiceNames = NULL, choiceValues = NULL) {
selected <- shiny::restoreInput(id = inputId, default = selected)
selectTag <- htmltools::tags$select(
id = inputId, multiple = "multiple", class= "form-control multijs",
makeChoices(choices = choices, choiceNames = choiceNames,
choiceValues = choiceValues, selected = selected)
)
multiTag <- htmltools::tags$div(
class = "form-group shiny-input-container",
style = if(!is.null(width)) paste("width:", htmltools::validateCssUnit(width)),
htmltools::tags$label(class = "control-label", `for` = inputId, label),
selectTag,
tags$script(
type = "application/json", `data-for` = inputId,
jsonlite::toJSON(options, auto_unbox = TRUE, json_verbatim = TRUE)
)
# htmltools::tags$script(
# sprintf("$('#%s').multi(%s);",
# escape_jquery(inputId), jsonlite::toJSON(options, auto_unbox = TRUE))
# )
)
attachShinyWidgetsDep(multiTag, "multi")
}
makeChoices <- function(choices = NULL, choiceNames = NULL, choiceValues = NULL, selected = NULL) {
if (is.null(choices)) {
if (is.null(choiceValues))
stop("If choices = NULL, choiceValues must be not NULL")
if (length(choiceNames) != length(choiceValues)) {
stop("`choiceNames` and `choiceValues` must have the same length.")
}
choiceValues <- as.list(choiceValues)
choiceNames <- as.list(choiceNames)
tagList(
lapply(
X = seq_along(choiceNames),
FUN = function(i) {
htmltools::tags$option(value = choiceValues[[i]], as.character(choiceNames[[i]]),
selected = if(choiceValues[[i]] %in% selected) "selected")
}
)
)
} else {
choices <- choicesWithNames(choices)
tagList(
lapply(
X = seq_along(choices), FUN = function(i) {
htmltools::tags$option(value = choices[[i]], names(choices)[i],
selected = if(choices[[i]] %in% selected) "selected")
}
)
)
}
}
#' @title Change the value of a multi input on the client
#'
#' @description Change the value of a multi input on the client
#'
#' @param session The session object passed to function given to shinyServer.
#' @param inputId The id of the input object.
#' @param label The label to set.
#' @param selected The values selected. To select none, use \code{character(0)}.
#' @param choices The new choices for the input.
#'
#' @seealso \code{\link{multiInput}}
#'
#' @note Thanks to \href{https://github.com/ifellows}{Ian Fellows} for this one !
#'
#' @export
#'
#' @importFrom utils capture.output
#'
#' @examples
#' \dontrun{
#'
#' if (interactive()) {
#'
#' library(shiny)
#' library(shinyWidgets)
#'
#' fruits <- c("Banana", "Blueberry", "Cherry",
#' "Coconut", "Grapefruit", "Kiwi",
#' "Lemon", "Lime", "Mango", "Orange",
#' "Papaya")
#'
#' ui <- fluidPage(
#' tags$h2("Multi update"),
#' multiInput(
#' inputId = "my_multi",
#' label = "Fruits :",
#' choices = fruits,
#' selected = "Banana",
#' width = "350px"
#' ),
#' verbatimTextOutput(outputId = "res"),
#' selectInput(
#' inputId = "selected",
#' label = "Update selected:",
#' choices = fruits,
#' multiple = TRUE
#' ),
#' textInput(inputId = "label", label = "Update label:")
#' )
#'
#' server <- function(input, output, session) {
#'
#' output$res <- renderPrint(input$my_multi)
#'
#' observeEvent(input$selected, {
#' updateMultiInput(
#' session = session,
#' inputId = "my_multi",
#' selected = input$selected
#' )
#' })
#'
#' observeEvent(input$label, {
#' updateMultiInput(
#' session = session,
#' inputId = "my_multi",
#' label = input$label
#' )
#' }, ignoreInit = TRUE)
#' }
#'
#' shinyApp(ui, server)
#'
#' }
#'
#' }
updateMultiInput <- function (session, inputId, label = NULL, selected = NULL, choices = NULL) {
choices <- if (!is.null(choices))
choicesWithNames(choices)
if (!is.null(selected))
selected <- validateSelected(selected, choices, inputId)
options <- if (!is.null(choices))
paste(capture.output(makeChoices(choices, selected = selected)), collapse = "\n")
message <- dropNulls(list(label = label, options = options, value = selected))
session$sendInputMessage(inputId, message)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/9-deprecated.R
\name{permuteTheta_false}
\alias{permuteTheta_false}
\title{Permute Theta}
\usage{
permuteTheta_false(counts, group, p = 64)
}
\arguments{
\item{counts}{A data.frame or matrix. A "count matrix" with
subjects as rows and features as columns. Note that this matrix
does not necessarily have to contain counts.}
\item{group}{A character vector. Group or sub-group memberships,
ordered according to the row names in \code{counts}.}
\item{p}{An integer. The number of permutation cycles.}
}
\description{
Permute differential proportionality measure, theta.
}
\details{
For back-end use only.
}
| /man/permuteTheta_false.Rd | no_license | tpq/propr | R | false | true | 685 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/9-deprecated.R
\name{permuteTheta_false}
\alias{permuteTheta_false}
\title{Permute Theta}
\usage{
permuteTheta_false(counts, group, p = 64)
}
\arguments{
\item{counts}{A data.frame or matrix. A "count matrix" with
subjects as rows and features as columns. Note that this matrix
does not necessarily have to contain counts.}
\item{group}{A character vector. Group or sub-group memberships,
ordered according to the row names in \code{counts}.}
\item{p}{An integer. The number of permutation cycles.}
}
\description{
Permute differential proportionality measure, theta.
}
\details{
For back-end use only.
}
|
### Library of Initial Dosing Algorithms
### This is a function which takes the following arguments to run:
#-"avatars": A data.frame including the avatar study population,
#-"dosing_algorithm": A desired initial dosing algorithm,
#-"units": "English" or "Non_English" as described below.
### Concerning the argument "units", please note:
### 1- If the avatars' characteristics are based on English units of measurements (e.g., pound, inch), then the arguments should be set to "English". The initial dosing function takes care of the conversion of the characteristics' values to the appropriate ones that each initial dosing algorithms accepts.
### 2- If the avatars' characteristics are NOT based on English units of measurements (e.g., kg, meter), then the arguments should be set to "Non_English".
###################################################################################################
####################################** NAMING CONVENTIONS **#######################################
### The data frame must include the following column names, with the corresponding values #########
### VARIABLE: "COLUMN NAME" AND CORRESPONDING VALUE ######
### Race: "RACE" takes a value of "Unknown","Asian","White", or "Black or African American"####
### Age: "AGE" takes an integer value representing the number of years lived by the patient###
### Height: "HEIGHT" takes a numeric value representing the height of patient. see units note ###
### Weight: "WEIGHT" takes a numeric value representing the weight of patient. see units note ###
### CYP2C9: "CYP2C9" takes a value of *1/*1, *1/*2, *1/*3, *2/*2, *2/*3, *3/*3, or "Unknown" ###
### VKORC1.1639: "VKORC1G" takes a value of "A/A", "A/G", "G/G", or "Unknown" ###
### VKORC1.1173: "VKORC1T" takes a value of C/C, C/T, T/T, or "Unknown" ###
### Enzyme inducer status: "ENZ" takes a value of Y or N ###
### Amiodarone status: "AMI" takes a value of Y or N ###
### Gender: "GENDER" takes a value of Y or N ###
### Smoker: "SMOKER" takes a value of Y or N ###
### Deep vein thrombosis: "DVT" takes a value of Y or N ###
### Target INR: "TINR" takes a numeric value, usually 2.5 or 3 ###
###################################################################################################
###################################################################################################
initial_dose <- function(avatars, dosing_algorithm, units="English"){
##### Units conversion
if(units=="English"){
unitw=.454
unith=2.54
}else{
unitw=1
unith=1
}
##### Create an array to hold calculated initial doses
InitialDose=array(0,dim=c(nrow(avatars),2))
colnames(InitialDose)<-c("InitialDose","BSA")
##### BSA Calculation
BSA=((avatars$WEIGHT*unitw)^.425*(avatars$HEIGHT*unith)^.725*.007184)#BSA is calculated based
#on the DuBois' method
#Citation: DuBois D, Du"Bois DF. A formula to estimate the approximate surface area if
#height and weight be known. Arch Int Med 1916;17:863-71.
InitialDose[,2]<-BSA
############ Dosing Algorithms Start Here #############
### PGx initial dosing algorithm derived from COAG paper (PG-COAG). According to the paper
### this is used to calculate daily dosage for days 1, 2, and 3.
### Citation: Kimmel, Stephen E., Benjamin French, Scott E. Kasner, Julie A. Johnson, Jeffrey L. Anderson, Brian F. Gage, Yves D. Rosenberg et al."A pharmacogenetic versus a clinical algorithm for warfarin dosing." New England Journal of Medicine 369, no. 24 (2013): 2283-2293.
if(dosing_algorithm=="pginitial_COAG"){
CYPdummy3<-avatars$CYP2C9
CYPdummy2<-avatars$CYP2C9
VKORdummy<-avatars$VKORC1G
levels(CYPdummy3)<-list(absent=c("*1/*1","*1/*2","*2/*2"),#takes integer value of 1
hetero=c("*1/*3","*2/*3"), #takes integer value of 2
homo=c("*3/*3")) #takes integer value of 3
levels(CYPdummy2)<-list(absent=c("*1/*1","*1/*3","*3/*3"),#takes integer value of 1
hetero=c("*1/*2","*2/*3"), #takes integer value of 2
homo=c("*2/*2")) #takes integer value of 3
levels(VKORdummy)<-list(GG="G/G",AG="A/G",AA="A/A") #takes integer value of 1,2, and 3 respectively
InitialDose[,1]=round(exp(0.9751
-0.2066*(as.integer(CYPdummy2)-1)
-0.4008*(as.integer(CYPdummy3)-1)
-0.3238*(as.integer(VKORdummy)-1)
-0.00745*avatars$AGE
-0.0901*(avatars$RACE=="Black or African American")
+0.0922*(avatars$SMOKER=="Y")
+0.4317*BSA
-0.2538*(avatars$AMI=="Y")
+0.2029*avatars$TINR
+0.0664*(avatars$DVT=="Y")#DVT/PE as indication for Warfarin
),2)
avatars<-cbind(avatars,InitialDose)
avatars
}
### Clinical initial dosing algorithm derived from COAG paper (Clinical-COAG). According to
### the paper this is used to calculate daily dosage for days 1, 2, and 3.
### Citation: Kimmel, Stephen E., Benjamin French, Scott E. Kasner, Julie A. Johnson, Jeffrey L. Anderson, Brian F. Gage, Yves D. Rosenberg et al. "A pharmacogenetic versus a clinical algorithm for warfarin dosing." New England Journal of Medicine 369, no. 24 (2013): 2283-2293.
else if(dosing_algorithm=="clinitial_COAG"){
InitialDose[,1]=round(exp(0.613
-0.0075*avatars$AGE
+0.156*(avatars$RACE=="Black or African American")
+0.108*(avatars$SMOKER=="Y")
+0.425*BSA
-0.257*(avatars$AMI=="Y")
+0.216*avatars$TINR
+0.0784*(avatars$DVT=="Y")),2)#DVT/PE as indication for Warfarin
avatars<-cbind(avatars,InitialDose)
}
### PGx initial dosing algorithm derived from CoumaGen-I paper. According to the paper
### this is used to calculate dosage for days 1 and 2. Twice the calculated dose is used for
### days 1 and 2.
### Citation: Anderson, J. L., Horne, B. D., Stevens, S. M., Grove, A. S., Barton, S., Nicholas, Z. P., ... & Carlquist, J. F. (2007). Randomized trial of genotype-guided versus standard warfarin dosing in patients initiating oral anticoagulation. Circulation, 116(22), 2563-2570.
### Here, we use VKORC1T to indicate the genotype VKORC1
else if(dosing_algorithm=="pginitial_couma1"){
for(i in 1:nrow(avatars)){
InitialDose[i,1]=if(avatars$AMI[i]=="Y"){#25% dose reduction for patients taking Amiodarone
round(.75*(1.64 +exp(3.984
+ 0 *(avatars$CYP2C9[i]=="*1/*1")
- 0.197 *(avatars$CYP2C9[i]=="*1/*2")
- 0.360 *(avatars$CYP2C9[i]=="*1/*3")
- 0.947 *(avatars$CYP2C9[i]=="*2/*3")
- 0.265 *(avatars$CYP2C9[i]=="*2/*2")
- 1.892 *(avatars$CYP2C9[i]=="*3/*3")
- 0.304 *(avatars$VKORC1T[i]=="C/T")
- 0.569 *(avatars$VKORC1T[i]=="T/T")
+ 0 *(avatars$VKORC1T[i]=="C/C")
- 0.009 *avatars$AGE[i]
+ 0.094 *(avatars$GENDER[i]=="M")
+ 0 *(avatars$GENDER[i]=="F")
+ 0.003 * avatars$WEIGHT[i]*unitw))*2/7,2)
} else{
round((1.64 +exp(3.984
+ 0 *(avatars$CYP2C9[i]=="*1/*1")
- 0.197 *(avatars$CYP2C9[i]=="*1/*2")
- 0.360 *(avatars$CYP2C9[i]=="*1/*3")
- 0.947 *(avatars$CYP2C9[i]=="*2/*3")
- 0.265 *(avatars$CYP2C9[i]=="*2/*2")
- 1.892 *(avatars$CYP2C9[i]=="*3/*3")
- 0.304 *(avatars$VKORC1T[i]=="C/T")
- 0.569 *(avatars$VKORC1T[i]=="T/T")
+ 0 *(avatars$VKORC1T[i]=="C/C")
- 0.009 *avatars$AGE[i]
+ 0.094 *(avatars$GENDER[i]=="M")
+ 0 *(avatars$GENDER[i]=="F")
+ 0.003 * avatars$WEIGHT[i]*unitw))*2/7,2)
}
}
avatars=cbind(avatars,InitialDose)
return(avatars)
}
### PGx initial dosing algorithm derived from CoumaGen-II paper for arm PG-1 (PG-Couma2).
###According to the paper this is used to calculate daily dosage for days 1 and 2.
###Twice the calculated dose is used for days 1 and 2.
### Citation: Anderson, Jeffrey L., Benjamin D. Horne, Scott M. Stevens, Scott C. Woller, Kent M. Samuelson, Justin W. Mansfield, Michelle Robinson et al. "A randomized and clinical effectiveness trial comparing two pharmacogenetic algorithms and standard care for individualizing warfarin dosing (CoumaGen-II)."Circulation 125, no. 16 (2012): 1997-2005.
else if(dosing_algorithm=="pg1initial_couma2"){
InitialDose[,1]<-round(((5.5922
-0.2523*(avatars$AGE%/%10)#converting years to decades
+0.0089*avatars$HEIGHT*unith
+0.0124*avatars$WEIGHT*unitw
-0.8410*(avatars$VKORC1G=="A/G")#VKORC1- rs9923231
-1.6901*(avatars$VKORC1G=="A/A")
-0.4199*(avatars$VKORC1G=="Unknown")
-0.5202*(avatars$CYP2C9=="*1/*2")
-0.9356*(avatars$CYP2C9=="*1/*3")
-0.9789*(avatars$CYP2C9=="*2/*2")
-0.8313*(avatars$CYP2C9=="*2/*3")
-2.1565*(avatars$CYP2C9=="*3/*3")
-0.1486*(avatars$CYP2C9=="Unknown")
+0.0821*(avatars$RACE=="Asian")
-0.2953*(avatars$RACE=="Black or African American")
-0.1661*(avatars$RACE=="Unknown")
+1.1889*(avatars$ENZ=="Y")
-0.6427*(avatars$AMI=="Y")
-0.3468*(avatars$AMI=="Unknown"))^2)/7,2)
avatars<-cbind(avatars,InitialDose)
}
### PGx initial dosing algorithm derived from CoumaGen-II paper for arm PG-2 (PG-Couma2).
###According to the paper this is used to calculate daily dosage for days 1 and 2.
###Twice the calculated dose is used for days 1 and 2.
### Citation: Anderson, Jeffrey L., Benjamin D. Horne, Scott M. Stevens, Scott C. Woller, Kent M. Samuelson, Justin W. Mansfield, Michelle Robinson et al. "A randomized and clinical effectiveness trial comparing two pharmacogenetic algorithms and standard care for individualizing warfarin dosing (CoumaGen-II)."Circulation 125, no. 16 (2012): 1997-2005.
else if(dosing_algorithm=="pg2initial_couma2"){
InitialDose[,1]<-round(((5.5922
-0.2523*(avatars$AGE%/%10)#converting years to decades
+0.0089*avatars$HEIGHT*unith
+0.0124*avatars$WEIGHT*unitw
-0.8410*(avatars$VKORC1G=="A/G")#VKORC1- rs9923231
-1.6901*(avatars$VKORC1G=="A/A")
-0.4199*(avatars$VKORC1G=="Unknown")
+0.0821*(avatars$RACE=="Asian")
-0.2953*(avatars$RACE=="Black or African American")
-0.1661*(avatars$RACE=="Unknown")
+1.1889*(avatars$ENZ=="Y")
-0.6427*(avatars$AMI=="Y")
-0.3468*(avatars$AMI=="Unknown"))^2)/7,2)
avatars<-cbind(avatars,InitialDose)
}
### PGx initial dosing algorithm derived from Gage paper (PG-Gage). According to the paper
### this is used to calculate maintenance dose.
### We assume twice the calculated maintenance dose is used for days 1 and 2.
### Citation: Gage, B. F., C. Eby, J. A. Johnson, E. Deych, M. J. Rieder, P. M. Ridker, P. E. Milligan et al. "Use of pharmacogenetic and clinical factors to predict the therapeutic dose of warfarin." Clinical Pharmacology & Therapeutics 84, no. 3 (2008): 326-331.
else if(dosing_algorithm=="pginitial_GAGE"){
InitialDose[,1]<-round(
exp(0.9751+0.423*BSA
-0.00745*avatars$AGE
-0.3238*(avatars$VKORC1G=="A/G")#VKOR3673G
-0.4008*(avatars$CYP2C9=="*1/*3")
-0.4008*(avatars$CYP2C9=="*2/*3")
-0.4008*2*(avatars$CYP2C9=="*3/*3")
-0.2066*(avatars$CYP2C9=="*1/*2")
-0.2066*(avatars$CYP2C9=="*2/*3")
-0.2066*2*(avatars$CYP2C9=="*2/*2")
+0.2029*avatars$TINR
-0.2538*(avatars$AMI=="Y")
+0.0922*(avatars$SMOKER=="Y")
+0.0901*(avatars$RACE=="Black or African American")
+0.0664*(avatars$DVT=="Y"))#DVT/PE as indication for Warfarin
,2)
avatars<-cbind(avatars,InitialDose)
}
### PGx initial dosing algorithm derived from Gage paper (PG-Gage). According to the paper
### this is used to calculate maintenance dose.
### We assume twice the calculated maintenance dose is used for days 1 and 2.
### Citation: Gage, B. F., C. Eby, J. A. Johnson, E. Deych, M. J. Rieder, P. M. Ridker, P. E. Milligan et al. "Use of pharmacogenetic and clinical factors to predict the therapeutic dose of warfarin." Clinical Pharmacology & Therapeutics 84, no. 3 (2008): 326-331
else if(dosing_algorithm=="clinical_GAGE"){
InitialDose[,1]<-round(
exp(0.613+.425*BSA
-0.0075*avatars$AGE
+0.1560*(avatars$RACE=="Black or African American")
+0.2160*avatars$TINR
-0.2570*(avatars$AMI=="Y")
+0.1080*(avatars$SMOKER=="Y")
+0.0784*(avatars$DVT=="Y"))*2
,2)
}
### PGx initial dosing algorithm derived from IWPC paper (PG-IWPC). According to the paper
### this is used to calculate daily dosage for days 1 and 2.
### Citation: International Warfarin Pharmacogenetics Consortium. "Estimation of the warfarin dose with clinical and pharmacogenetic data."The New England journal of medicine 360, no. 8 (2009): 753-64.
else if(dosing_algorithm=="pginitial_IWPC"){
InitialDose[,1]<-round((( 5.6044
-0.2614*(avatars$AGE%/%10)#converting years to decades
+0.0087*unith*avatars$HEIGHT
+0.0128*unitw*avatars$WEIGHT
-0.8677*(avatars$VKORC1G=="A/G")#rs9923231
-1.6974*(avatars$VKORC1G=="A/A")
-0.4854*(avatars$VKORC1G=="Unknown")
-0.5211*(avatars$CYP2C9=="*1/*2")
-0.9357*(avatars$CYP2C9=="*1/*3")
-1.0616*(avatars$CYP2C9=="*2/*2")
-1.9206*(avatars$CYP2C9=="*2/*3")
-2.3312*(avatars$CYP2C9=="*3/*3")
-0.2188*(avatars$CYP2C9=="Unknown")
-0.1092*(avatars$RACE=="Asian")
-0.2760*(avatars$RACE=="Black or African American")
-1.032*(avatars$RACE=="Unknown")#Unknown: Missing or Mixed race
+1.1816*(avatars$ENZ=="Y")
-0.5503*(avatars$AMI=="Y"))^2)/7,2)
avatars<-cbind(avatars,InitialDose)
avatars
}
### Clinical initial dosing algorithm derived from IWPC paper (Clinical-IWPC). According to the paper
### this is used to calculate daily dosage for days 1 and 2.
### Citation: International Warfarin Pharmacogenetics Consortium. "Estimation of the warfarin dose with clinical and pharmacogenetic data." The New England journal of medicine 360, no. 8 (2009): 753.
else if(dosing_algorithm=="clinitial_IWPC"){
InitialDose[,1]<-
((4.0376
-0.2546*(avatars$AGE%/%10)#converting years to decades
+0.0118*avatars$HEIGHT*unith
+0.0134*avatars$WEIGHT*unitw
-0.6752*(avatars$RACE=="Asian")
+0.4060*(avatars$RACE=="Black or African American")
+0.0443*(avatars$RACE=="Unknown")#Unknown: Missing of Mixed race
+1.2799*(avatars$ENZ=="Y")
-0.5695*(avatars$AMI=="Y"))^2)/7
avatars=cbind(avatars,InitialDose)
return(avatars)
}
else if(dosing_algorithm=="STD_couma1"){
InitialDose[,1]<- 2 * 5
avatars<-cbind(avatars,InitialDose)
} else if (dosing_algorithm=="STD_couma2"){
InitialDose[,1]<- 2 * 5
avatars<-cbind(avatars,InitialDose)
} else if (dosing_algorithm=="STD_EU_PACT"){
D<-round((( 5.6044
-0.02614*(avatars$AGE)
+0.0087*unith*avatars$HEIGHT
+0.0128*unitw*avatars$WEIGHT
-0.8677*(avatars$VKORC1G=="A/G")#rs9923231
-1.6974*(avatars$VKORC1G=="A/A")
-0.5211*(avatars$CYP2C9=="*1/*2")
-0.9357*(avatars$CYP2C9=="*1/*3")
-1.0616*(avatars$CYP2C9=="*2/*2")
-1.9206*(avatars$CYP2C9=="*2/*3")
-2.3312*(avatars$CYP2C9=="*3/*3")
-0.5503*(avatars$AMI=="Y"))^2)/7,2)
k<-vector("numeric",nrow(avatars))
for(i in 1:nrow(avatars)){
if((avatars$CYP2C9[i]=="*1/*1")){
k[i]=0.0189}
else if((avatars$CYP2C9[i]=="*1/*2")){
k[i]=0.0158
}
else if((avatars$CYP2C9[i]=="*1/*3")){
k[i]=0.0132
}
else if((avatars$CYP2C9[i]=="*2/*2")){
k[i]=0.0130
}
else if((avatars$CYP2C9[i]=="*2/*3")){
k[i]=0.009
}
else if((avatars$CYP2C9[i]=="*3/*3")){
k[i]=0.0075
}
}
LD3<-D/((1-exp(k*-24))*(1+exp(k*-24)+exp(-2*k*24)))#where 24 is the number of hours
x<-round((LD3-D)*(1.5)+D,2)
InitialDose[,1]<-x
avatars<-cbind(avatars,InitialDose)
avatars
}
else{
print("wacka wacka")
}
return(avatars)
}
| /archive/1.0/initial_dosing.R | no_license | MarcusWalz/RogueClinicalAvatars | R | false | false | 19,117 | r | ### Library of Initial Dosing Algorithms
### This is a function which takes the following arguments to run:
#-"avatars": A data.frame including the avatar study population,
#-"dosing_algorithm": A desired initial dosing algorithm,
#-"units": "English" or "Non_English" as described below.
### Concerning the argument "units", please note:
### 1- If the avatars' characteristics are based on English units of measurements (e.g., pound, inch), then the arguments should be set to "English". The initial dosing function takes care of the conversion of the characteristics' values to the appropriate ones that each initial dosing algorithms accepts.
### 2- If the avatars' characteristics are NOT based on English units of measurements (e.g., kg, meter), then the arguments should be set to "Non_English".
###################################################################################################
####################################** NAMING CONVENTIONS **#######################################
### The data frame must include the following column names, with the corresponding values #########
### VARIABLE: "COLUMN NAME" AND CORRESPONDING VALUE ######
### Race: "RACE" takes a value of "Unknown","Asian","White", or "Black or African American"####
### Age: "AGE" takes an integer value representing the number of years lived by the patient###
### Height: "HEIGHT" takes a numeric value representing the height of patient. see units note ###
### Weight: "WEIGHT" takes a numeric value representing the weight of patient. see units note ###
### CYP2C9: "CYP2C9" takes a value of *1/*1, *1/*2, *1/*3, *2/*2, *2/*3, *3/*3, or "Unknown" ###
### VKORC1.1639: "VKORC1G" takes a value of "A/A", "A/G", "G/G", or "Unknown" ###
### VKORC1.1173: "VKORC1T" takes a value of C/C, C/T, T/T, or "Unknown" ###
### Enzyme inducer status: "ENZ" takes a value of Y or N ###
### Amiodarone status: "AMI" takes a value of Y or N ###
### Gender: "GENDER" takes a value of Y or N ###
### Smoker: "SMOKER" takes a value of Y or N ###
### Deep vein thrombosis: "DVT" takes a value of Y or N ###
### Target INR: "TINR" takes a numeric value, usually 2.5 or 3 ###
###################################################################################################
###################################################################################################
initial_dose <- function(avatars, dosing_algorithm, units="English"){
##### Units conversion
if(units=="English"){
unitw=.454
unith=2.54
}else{
unitw=1
unith=1
}
##### Create an array to hold calculated initial doses
InitialDose=array(0,dim=c(nrow(avatars),2))
colnames(InitialDose)<-c("InitialDose","BSA")
##### BSA Calculation
BSA=((avatars$WEIGHT*unitw)^.425*(avatars$HEIGHT*unith)^.725*.007184)#BSA is calculated based
#on the DuBois' method
#Citation: DuBois D, Du"Bois DF. A formula to estimate the approximate surface area if
#height and weight be known. Arch Int Med 1916;17:863-71.
InitialDose[,2]<-BSA
############ Dosing Algorithms Start Here #############
### PGx initial dosing algorithm derived from COAG paper (PG-COAG). According to the paper
### this is used to calculate daily dosage for days 1, 2, and 3.
### Citation: Kimmel, Stephen E., Benjamin French, Scott E. Kasner, Julie A. Johnson, Jeffrey L. Anderson, Brian F. Gage, Yves D. Rosenberg et al."A pharmacogenetic versus a clinical algorithm for warfarin dosing." New England Journal of Medicine 369, no. 24 (2013): 2283-2293.
if(dosing_algorithm=="pginitial_COAG"){
CYPdummy3<-avatars$CYP2C9
CYPdummy2<-avatars$CYP2C9
VKORdummy<-avatars$VKORC1G
levels(CYPdummy3)<-list(absent=c("*1/*1","*1/*2","*2/*2"),#takes integer value of 1
hetero=c("*1/*3","*2/*3"), #takes integer value of 2
homo=c("*3/*3")) #takes integer value of 3
levels(CYPdummy2)<-list(absent=c("*1/*1","*1/*3","*3/*3"),#takes integer value of 1
hetero=c("*1/*2","*2/*3"), #takes integer value of 2
homo=c("*2/*2")) #takes integer value of 3
levels(VKORdummy)<-list(GG="G/G",AG="A/G",AA="A/A") #takes integer value of 1,2, and 3 respectively
InitialDose[,1]=round(exp(0.9751
-0.2066*(as.integer(CYPdummy2)-1)
-0.4008*(as.integer(CYPdummy3)-1)
-0.3238*(as.integer(VKORdummy)-1)
-0.00745*avatars$AGE
-0.0901*(avatars$RACE=="Black or African American")
+0.0922*(avatars$SMOKER=="Y")
+0.4317*BSA
-0.2538*(avatars$AMI=="Y")
+0.2029*avatars$TINR
+0.0664*(avatars$DVT=="Y")#DVT/PE as indication for Warfarin
),2)
avatars<-cbind(avatars,InitialDose)
avatars
}
### Clinical initial dosing algorithm derived from COAG paper (Clinical-COAG). According to
### the paper this is used to calculate daily dosage for days 1, 2, and 3.
### Citation: Kimmel, Stephen E., Benjamin French, Scott E. Kasner, Julie A. Johnson, Jeffrey L. Anderson, Brian F. Gage, Yves D. Rosenberg et al. "A pharmacogenetic versus a clinical algorithm for warfarin dosing." New England Journal of Medicine 369, no. 24 (2013): 2283-2293.
else if(dosing_algorithm=="clinitial_COAG"){
InitialDose[,1]=round(exp(0.613
-0.0075*avatars$AGE
+0.156*(avatars$RACE=="Black or African American")
+0.108*(avatars$SMOKER=="Y")
+0.425*BSA
-0.257*(avatars$AMI=="Y")
+0.216*avatars$TINR
+0.0784*(avatars$DVT=="Y")),2)#DVT/PE as indication for Warfarin
avatars<-cbind(avatars,InitialDose)
}
### PGx initial dosing algorithm derived from CoumaGen-I paper. According to the paper
### this is used to calculate dosage for days 1 and 2. Twice the calculated dose is used for
### days 1 and 2.
### Citation: Anderson, J. L., Horne, B. D., Stevens, S. M., Grove, A. S., Barton, S., Nicholas, Z. P., ... & Carlquist, J. F. (2007). Randomized trial of genotype-guided versus standard warfarin dosing in patients initiating oral anticoagulation. Circulation, 116(22), 2563-2570.
### Here, we use VKORC1T to indicate the genotype VKORC1
else if(dosing_algorithm=="pginitial_couma1"){
for(i in 1:nrow(avatars)){
InitialDose[i,1]=if(avatars$AMI[i]=="Y"){#25% dose reduction for patients taking Amiodarone
round(.75*(1.64 +exp(3.984
+ 0 *(avatars$CYP2C9[i]=="*1/*1")
- 0.197 *(avatars$CYP2C9[i]=="*1/*2")
- 0.360 *(avatars$CYP2C9[i]=="*1/*3")
- 0.947 *(avatars$CYP2C9[i]=="*2/*3")
- 0.265 *(avatars$CYP2C9[i]=="*2/*2")
- 1.892 *(avatars$CYP2C9[i]=="*3/*3")
- 0.304 *(avatars$VKORC1T[i]=="C/T")
- 0.569 *(avatars$VKORC1T[i]=="T/T")
+ 0 *(avatars$VKORC1T[i]=="C/C")
- 0.009 *avatars$AGE[i]
+ 0.094 *(avatars$GENDER[i]=="M")
+ 0 *(avatars$GENDER[i]=="F")
+ 0.003 * avatars$WEIGHT[i]*unitw))*2/7,2)
} else{
round((1.64 +exp(3.984
+ 0 *(avatars$CYP2C9[i]=="*1/*1")
- 0.197 *(avatars$CYP2C9[i]=="*1/*2")
- 0.360 *(avatars$CYP2C9[i]=="*1/*3")
- 0.947 *(avatars$CYP2C9[i]=="*2/*3")
- 0.265 *(avatars$CYP2C9[i]=="*2/*2")
- 1.892 *(avatars$CYP2C9[i]=="*3/*3")
- 0.304 *(avatars$VKORC1T[i]=="C/T")
- 0.569 *(avatars$VKORC1T[i]=="T/T")
+ 0 *(avatars$VKORC1T[i]=="C/C")
- 0.009 *avatars$AGE[i]
+ 0.094 *(avatars$GENDER[i]=="M")
+ 0 *(avatars$GENDER[i]=="F")
+ 0.003 * avatars$WEIGHT[i]*unitw))*2/7,2)
}
}
avatars=cbind(avatars,InitialDose)
return(avatars)
}
### PGx initial dosing algorithm derived from CoumaGen-II paper for arm PG-1 (PG-Couma2).
###According to the paper this is used to calculate daily dosage for days 1 and 2.
###Twice the calculated dose is used for days 1 and 2.
### Citation: Anderson, Jeffrey L., Benjamin D. Horne, Scott M. Stevens, Scott C. Woller, Kent M. Samuelson, Justin W. Mansfield, Michelle Robinson et al. "A randomized and clinical effectiveness trial comparing two pharmacogenetic algorithms and standard care for individualizing warfarin dosing (CoumaGen-II)."Circulation 125, no. 16 (2012): 1997-2005.
else if(dosing_algorithm=="pg1initial_couma2"){
InitialDose[,1]<-round(((5.5922
-0.2523*(avatars$AGE%/%10)#converting years to decades
+0.0089*avatars$HEIGHT*unith
+0.0124*avatars$WEIGHT*unitw
-0.8410*(avatars$VKORC1G=="A/G")#VKORC1- rs9923231
-1.6901*(avatars$VKORC1G=="A/A")
-0.4199*(avatars$VKORC1G=="Unknown")
-0.5202*(avatars$CYP2C9=="*1/*2")
-0.9356*(avatars$CYP2C9=="*1/*3")
-0.9789*(avatars$CYP2C9=="*2/*2")
-0.8313*(avatars$CYP2C9=="*2/*3")
-2.1565*(avatars$CYP2C9=="*3/*3")
-0.1486*(avatars$CYP2C9=="Unknown")
+0.0821*(avatars$RACE=="Asian")
-0.2953*(avatars$RACE=="Black or African American")
-0.1661*(avatars$RACE=="Unknown")
+1.1889*(avatars$ENZ=="Y")
-0.6427*(avatars$AMI=="Y")
-0.3468*(avatars$AMI=="Unknown"))^2)/7,2)
avatars<-cbind(avatars,InitialDose)
}
### PGx initial dosing algorithm derived from CoumaGen-II paper for arm PG-2 (PG-Couma2).
###According to the paper this is used to calculate daily dosage for days 1 and 2.
###Twice the calculated dose is used for days 1 and 2.
### Citation: Anderson, Jeffrey L., Benjamin D. Horne, Scott M. Stevens, Scott C. Woller, Kent M. Samuelson, Justin W. Mansfield, Michelle Robinson et al. "A randomized and clinical effectiveness trial comparing two pharmacogenetic algorithms and standard care for individualizing warfarin dosing (CoumaGen-II)."Circulation 125, no. 16 (2012): 1997-2005.
else if(dosing_algorithm=="pg2initial_couma2"){
InitialDose[,1]<-round(((5.5922
-0.2523*(avatars$AGE%/%10)#converting years to decades
+0.0089*avatars$HEIGHT*unith
+0.0124*avatars$WEIGHT*unitw
-0.8410*(avatars$VKORC1G=="A/G")#VKORC1- rs9923231
-1.6901*(avatars$VKORC1G=="A/A")
-0.4199*(avatars$VKORC1G=="Unknown")
+0.0821*(avatars$RACE=="Asian")
-0.2953*(avatars$RACE=="Black or African American")
-0.1661*(avatars$RACE=="Unknown")
+1.1889*(avatars$ENZ=="Y")
-0.6427*(avatars$AMI=="Y")
-0.3468*(avatars$AMI=="Unknown"))^2)/7,2)
avatars<-cbind(avatars,InitialDose)
}
### PGx initial dosing algorithm derived from Gage paper (PG-Gage). According to the paper
### this is used to calculate maintenance dose.
### We assume twice the calculated maintenance dose is used for days 1 and 2.
### Citation: Gage, B. F., C. Eby, J. A. Johnson, E. Deych, M. J. Rieder, P. M. Ridker, P. E. Milligan et al. "Use of pharmacogenetic and clinical factors to predict the therapeutic dose of warfarin." Clinical Pharmacology & Therapeutics 84, no. 3 (2008): 326-331.
else if(dosing_algorithm=="pginitial_GAGE"){
InitialDose[,1]<-round(
exp(0.9751+0.423*BSA
-0.00745*avatars$AGE
-0.3238*(avatars$VKORC1G=="A/G")#VKOR3673G
-0.4008*(avatars$CYP2C9=="*1/*3")
-0.4008*(avatars$CYP2C9=="*2/*3")
-0.4008*2*(avatars$CYP2C9=="*3/*3")
-0.2066*(avatars$CYP2C9=="*1/*2")
-0.2066*(avatars$CYP2C9=="*2/*3")
-0.2066*2*(avatars$CYP2C9=="*2/*2")
+0.2029*avatars$TINR
-0.2538*(avatars$AMI=="Y")
+0.0922*(avatars$SMOKER=="Y")
+0.0901*(avatars$RACE=="Black or African American")
+0.0664*(avatars$DVT=="Y"))#DVT/PE as indication for Warfarin
,2)
avatars<-cbind(avatars,InitialDose)
}
### PGx initial dosing algorithm derived from Gage paper (PG-Gage). According to the paper
### this is used to calculate maintenance dose.
### We assume twice the calculated maintenance dose is used for days 1 and 2.
### Citation: Gage, B. F., C. Eby, J. A. Johnson, E. Deych, M. J. Rieder, P. M. Ridker, P. E. Milligan et al. "Use of pharmacogenetic and clinical factors to predict the therapeutic dose of warfarin." Clinical Pharmacology & Therapeutics 84, no. 3 (2008): 326-331
else if(dosing_algorithm=="clinical_GAGE"){
InitialDose[,1]<-round(
exp(0.613+.425*BSA
-0.0075*avatars$AGE
+0.1560*(avatars$RACE=="Black or African American")
+0.2160*avatars$TINR
-0.2570*(avatars$AMI=="Y")
+0.1080*(avatars$SMOKER=="Y")
+0.0784*(avatars$DVT=="Y"))*2
,2)
}
### PGx initial dosing algorithm derived from IWPC paper (PG-IWPC). According to the paper
### this is used to calculate daily dosage for days 1 and 2.
### Citation: International Warfarin Pharmacogenetics Consortium. "Estimation of the warfarin dose with clinical and pharmacogenetic data."The New England journal of medicine 360, no. 8 (2009): 753-64.
else if(dosing_algorithm=="pginitial_IWPC"){
InitialDose[,1]<-round((( 5.6044
-0.2614*(avatars$AGE%/%10)#converting years to decades
+0.0087*unith*avatars$HEIGHT
+0.0128*unitw*avatars$WEIGHT
-0.8677*(avatars$VKORC1G=="A/G")#rs9923231
-1.6974*(avatars$VKORC1G=="A/A")
-0.4854*(avatars$VKORC1G=="Unknown")
-0.5211*(avatars$CYP2C9=="*1/*2")
-0.9357*(avatars$CYP2C9=="*1/*3")
-1.0616*(avatars$CYP2C9=="*2/*2")
-1.9206*(avatars$CYP2C9=="*2/*3")
-2.3312*(avatars$CYP2C9=="*3/*3")
-0.2188*(avatars$CYP2C9=="Unknown")
-0.1092*(avatars$RACE=="Asian")
-0.2760*(avatars$RACE=="Black or African American")
-1.032*(avatars$RACE=="Unknown")#Unknown: Missing or Mixed race
+1.1816*(avatars$ENZ=="Y")
-0.5503*(avatars$AMI=="Y"))^2)/7,2)
avatars<-cbind(avatars,InitialDose)
avatars
}
### Clinical initial dosing algorithm derived from IWPC paper (Clinical-IWPC). According to the paper
### this is used to calculate daily dosage for days 1 and 2.
### Citation: International Warfarin Pharmacogenetics Consortium. "Estimation of the warfarin dose with clinical and pharmacogenetic data." The New England journal of medicine 360, no. 8 (2009): 753.
else if(dosing_algorithm=="clinitial_IWPC"){
InitialDose[,1]<-
((4.0376
-0.2546*(avatars$AGE%/%10)#converting years to decades
+0.0118*avatars$HEIGHT*unith
+0.0134*avatars$WEIGHT*unitw
-0.6752*(avatars$RACE=="Asian")
+0.4060*(avatars$RACE=="Black or African American")
+0.0443*(avatars$RACE=="Unknown")#Unknown: Missing of Mixed race
+1.2799*(avatars$ENZ=="Y")
-0.5695*(avatars$AMI=="Y"))^2)/7
avatars=cbind(avatars,InitialDose)
return(avatars)
}
else if(dosing_algorithm=="STD_couma1"){
InitialDose[,1]<- 2 * 5
avatars<-cbind(avatars,InitialDose)
} else if (dosing_algorithm=="STD_couma2"){
InitialDose[,1]<- 2 * 5
avatars<-cbind(avatars,InitialDose)
} else if (dosing_algorithm=="STD_EU_PACT"){
D<-round((( 5.6044
-0.02614*(avatars$AGE)
+0.0087*unith*avatars$HEIGHT
+0.0128*unitw*avatars$WEIGHT
-0.8677*(avatars$VKORC1G=="A/G")#rs9923231
-1.6974*(avatars$VKORC1G=="A/A")
-0.5211*(avatars$CYP2C9=="*1/*2")
-0.9357*(avatars$CYP2C9=="*1/*3")
-1.0616*(avatars$CYP2C9=="*2/*2")
-1.9206*(avatars$CYP2C9=="*2/*3")
-2.3312*(avatars$CYP2C9=="*3/*3")
-0.5503*(avatars$AMI=="Y"))^2)/7,2)
k<-vector("numeric",nrow(avatars))
for(i in 1:nrow(avatars)){
if((avatars$CYP2C9[i]=="*1/*1")){
k[i]=0.0189}
else if((avatars$CYP2C9[i]=="*1/*2")){
k[i]=0.0158
}
else if((avatars$CYP2C9[i]=="*1/*3")){
k[i]=0.0132
}
else if((avatars$CYP2C9[i]=="*2/*2")){
k[i]=0.0130
}
else if((avatars$CYP2C9[i]=="*2/*3")){
k[i]=0.009
}
else if((avatars$CYP2C9[i]=="*3/*3")){
k[i]=0.0075
}
}
LD3<-D/((1-exp(k*-24))*(1+exp(k*-24)+exp(-2*k*24)))#where 24 is the number of hours
x<-round((LD3-D)*(1.5)+D,2)
InitialDose[,1]<-x
avatars<-cbind(avatars,InitialDose)
avatars
}
else{
print("wacka wacka")
}
return(avatars)
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function creates the object which has field called inverse, for storting matrix inverse.
## This function does the creation and initialization of the makeCacheMatrix object.
## The inverse is reset to NULL each time the initialization happens.
makeCacheMatrix <- function(x = matrix()) {
#'inverse' is the variable used to store the inverse of the matrix object.
#'initialise the "inverse" to NULL.
inverse <- NULL
#set function initializes the matrix with the specified values.
set <- function(y) {
x <<- y
inverse <<- NULL
}
#get function returns the value.
get <- function() x
#setinverse function calculates the inverse.
setinverse <- function(solve) inverse <<- solve
#returns the inverse.
getinverse <- function() inverse
#create the list with all the parameters.
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## This function returns the inverse of the matrix from the cache , if already computed.
## Else it computes the inverse and returns the inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
}
| /cachematrix.R | no_license | sreenivasaupadhyaya/ProgrammingAssignment2 | R | false | false | 1,608 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function creates the object which has field called inverse, for storting matrix inverse.
## This function does the creation and initialization of the makeCacheMatrix object.
## The inverse is reset to NULL each time the initialization happens.
makeCacheMatrix <- function(x = matrix()) {
#'inverse' is the variable used to store the inverse of the matrix object.
#'initialise the "inverse" to NULL.
inverse <- NULL
#set function initializes the matrix with the specified values.
set <- function(y) {
x <<- y
inverse <<- NULL
}
#get function returns the value.
get <- function() x
#setinverse function calculates the inverse.
setinverse <- function(solve) inverse <<- solve
#returns the inverse.
getinverse <- function() inverse
#create the list with all the parameters.
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## This function returns the inverse of the matrix from the cache , if already computed.
## Else it computes the inverse and returns the inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
}
|
##
## DEPENDENCIES
##
#
# > flightsBySunset.csv
#
##
## Load Libraries
library(tidyverse)
library(motus)
library(stats)
library(lmtest)
library(survival)
# Set viewport to 2 x 2 grid
par(mfrow=c(2,2))
# Read in flights dataframe
flightBySunset <- read_csv("flightBySunset.csv")
# Get some stats on the number of flights
flightStats <- flightBySunset %>%
group_by(markerNumber) %>%
summarise(age = age[1], nflights = n()) %>%
group_by(age) %>%
summarise(a = mean(nflights), sd = sd(nflights), nflights = sum(nflights), n = n())
flightStats
flightBySunset %>%
group_by(markerNumber) %>%
summarise(bandsite = bandsite[1], nflights = n()) %>%
group_by(bandsite) %>%
summarise(a = mean(nflights), sd = sd(nflights), nflights = sum(nflights), n = n())
####
# MODELS
####
######
### NUMBER OF FLIGHT
######
nFlights <- flightBySunset %>%
# filter(!isDep) %>%
group_by(markerNumber) %>%
summarise(age = age[1], bandsite = bandsite[1], n = n()-1)
nFlights %>%
ggplot(aes(bandsite, n))+
geom_boxplot()+
facet_grid(age~.)
nFlights %>%
ggplot(aes(n)) +
geom_histogram()+
facet_grid(age~bandsite)
nFlights %>%
ggplot(aes(n)) +
geom_histogram()
sum(nFlights$n)
nFlights %>%
group_by(age) %>%
summarise(a = mean(n), sd = sd(n), nInd = n(), n = sum(n))
nFlights %>%
group_by(bandsite) %>%
summarise(a = mean(n), sd = sd(n), nInd = n(), n = sum(n))
flightBySunset %>%
group_by(markerNumber) %>%
summarise(age = age[1], bandsite = bandsite[1]) %>%
group_by(age) %>%
tally()
flightBySunset %>%
group_by(markerNumber) %>%
summarise(age = age[1], bandsite = bandsite[1], isDep = length(which(isDep==F))) %>%
group_by(bandsite) %>%
summarise(p = length(which(isDep>0))/n())
nFlights %>%
group_by(bandsite) %>%
summarise(n = n())
preDep.flightBySunset <- flightBySunset %>% filter(!isDep)
# Test: Is there a difference in the likelihood of individuals making flights among ages and bandsites?
glm1 <- glm(data = nFlights, formula = (n > 0) ~ age * bandsite, family = binomial)
glm2 <- glm(data = nFlights, formula = (n > 0) ~ age + bandsite, family = binomial)
anova(glm1, glm2, test="Chi")
glm1.1 <- nFlights %>% glm(formula = (n > 0) ~ bandsite, family = binomial)
glm1.2 <- nFlights %>% glm(formula = (n > 0) ~ age, family = binomial)
plot(glm1)
summary(glm1)
anova(glm1, test="Chi")
anova(glm1, update(glm1,.~ -age:bandsite), test = 'Chi')
anova(glm1.2, glm1, test = 'LRT')
anova(glm1.2, glm1, test = 'LRT')
lrtest(glm1.1, glm1)
anova(glm1.1, glm1, test = 'LRT')
# Test: Do individuals at remote sites make more nocturnal flights and
# do adults make fewer flights than hatch-years and show no difference between sites?
glm2 <- nFlights %>% glm(formula = n ~ age * bandsite, family = poisson)
plot(glm2)
summary(glm2)
anova(glm2, test = 'F')
lrtest(glm2, glm1)
######
### PRE-DEPARTURE FLIGHT TIMES
######
flightBySunset %>%
filter(!isDep) %>%
ggplot(aes(bandsite, (bySunset)))+
geom_boxplot()+
facet_grid(age~.)+
ylab('Minutes since sunset')+
xlab('Banding site')
flightBySunset %>%
filter(!isDep) %>%
ggplot(aes(log(bySunset))) +
geom_histogram()+
facet_grid(age~bandsite)
glm3 <- flightBySunset %>% filter(!isDep) %>% glm(formula = log(bySunset) ~ age * bandsite, family = gaussian)
plot(glm3)
summary(glm3)
anova(glm3, test = 'F')
######
### DEPARTURE TIMES (LOG)
######
dFlights <- flightBySunset %>% filter(isDep) %>% select(markerNumber, bySunset, age, bandsite, ts)
dFlights %>%
ggplot(aes(bandsite, (bySunset)))+
geom_boxplot()+
facet_grid(age~.)+
ylab('Minutes since sunset')+
xlab('Banding site')
dFlights %>%
ggplot(aes(log(bySunset))) +
geom_histogram()+
facet_grid(age~bandsite)
dFlights %>%
group_by(age) %>%
summarise(a = mean(bySunset), sd = sd(bySunset), n = n())
dFlights %>%
group_by(bandsite) %>%
summarise(a = mean(bySunset), sd = sd(bySunset), n = n())
# Test: Do individuals at remote sites depart earlier in the evening and
# do adults depart later than hatch-years and show no difference between sites?
glm6 <- dFlights %>% glm(formula = log(bySunset) ~ age * bandsite, family = gaussian)
plot(glm6)
summary(glm6)
anova(glm6, test = "F")
######
### NUMBER FLIGHT PRIOR TO DEPARTURE
######
dDep <- flightBySunset %>%
rowwise() %>%
# mutate(depDate = dFlights[dFlights$markerNumber == markerNumber,]$ts)
mutate(daysToDep = difftime(dFlights[dFlights$markerNumber == markerNumber,]$ts, ts, units = 'days'))
dDep %>%
filter(daysToDep > 0) %>%
ggplot(aes(as.integer(daysToDep), group = age, color= age)) +
geom_density() +
facet_grid(.~bandsite)
######
### DEPARTURE DATE
######
flightBySunset %>%
filter(isDep) %>%
ggplot(aes(jdate, group = age, color = age)) +
geom_density() +
facet_grid(.~bandsite)
glm7 <- flightBySunset %>% filter(isDep) %>% glm(formula = (jdate) ~ age * bandsite, family = gaussian)
plot(glm7)
summary(glm7)
anova(glm7, test = "F")
######
### PROBABILITY OF DEPARTURE
######
tagMeta2 <- read_rds('tagMeta.rds') %>%
group_by(markerNumber) %>%
summarise(tagDeployStart = tagDeployStart[1]) %>%
mutate(tagDeployStart = as.POSIXct(tagDeployStart, origin = '1970-01-01'),
tagDeploy.jdate = as.integer(format(tagDeployStart, format = '%j')))
jdate.range <- range(flightBySunset$jdate)
depRange <- seq(jdate.range[1]-1, jdate.range[2])
numDep <- sapply(depRange, function(x){length(which(x>=flightBySunset$jdate))})
probDep <- tibble(jdate = depRange,
numDep = numDep,
probDep = numDep/max(numDep))
probDep %>%
ggplot(aes(jdate, probDep))+
geom_line()
coxph.flightByWeather <- flightByWeather %>%
filter(isDep) %>%
mutate(year = as.factor(year(ts))) %>%
left_join(tagMeta2, by = 'markerNumber') %>%
filter(year(ts) == year(tagDeployStart)) %>%
mutate(idleTime = jdate - tagDeploy.jdate,
departure.jdate = jdate)
test <- coxph.flightByWeather[rep(row.names(coxph.flightByWeather), coxph.flightByWeather$idleTime),]
test$start <- test$tagDeploy.jdate + (sequence(coxph.flightByWeather$idleTime)-1)
test$end <- test$tagDeploy.jdate + (sequence(coxph.flightByWeather$idleTime))
test <- test %>% mutate(event = ifelse(end == jdate, 1, 0))
coxph.flightByWeather %>%
mutate(tagDeployStart = format(tagDeployStart, '%m-%d')) %>%
ggplot(aes(as.Date(tagDeployStart, format = '%m-%d'), paste(age, bandsite)))+
geom_point()+
# geom_histogram()+
scale_x_date()+
facet_grid(.~year)
with(test, Surv(start, end, event))
coxph1 <- coxph(Surv(jdate) ~ age + bandsite + year + cc + wind.dir + wind.abs, data = coxph.flightByWeather)
survfit1 <- survfit(coxph1)
coxph1 <- coxph(Surv(start, end, event) ~ age + bandsite + year + cc + wind.dir + wind.abs, data = test)
summary(coxph1)
par(mfrow(c(1,1)))
plot(survfit1)
plot(as.integer(coxph1$y)[1:1193], coxph1$residuals)
length(as.integer(coxph1$residuals)[1192:1200])
with(lung, Surv(time))
heart
Surv(type = 'left')
| /chapter2_models.R | no_license | leberrigan/flights | R | false | false | 7,014 | r | ##
## DEPENDENCIES
##
#
# > flightsBySunset.csv
#
##
## Load Libraries
library(tidyverse)
library(motus)
library(stats)
library(lmtest)
library(survival)
# Set viewport to 2 x 2 grid
par(mfrow=c(2,2))
# Read in flights dataframe
flightBySunset <- read_csv("flightBySunset.csv")
# Get some stats on the number of flights
flightStats <- flightBySunset %>%
group_by(markerNumber) %>%
summarise(age = age[1], nflights = n()) %>%
group_by(age) %>%
summarise(a = mean(nflights), sd = sd(nflights), nflights = sum(nflights), n = n())
flightStats
flightBySunset %>%
group_by(markerNumber) %>%
summarise(bandsite = bandsite[1], nflights = n()) %>%
group_by(bandsite) %>%
summarise(a = mean(nflights), sd = sd(nflights), nflights = sum(nflights), n = n())
####
# MODELS
####
######
### NUMBER OF FLIGHT
######
nFlights <- flightBySunset %>%
# filter(!isDep) %>%
group_by(markerNumber) %>%
summarise(age = age[1], bandsite = bandsite[1], n = n()-1)
nFlights %>%
ggplot(aes(bandsite, n))+
geom_boxplot()+
facet_grid(age~.)
nFlights %>%
ggplot(aes(n)) +
geom_histogram()+
facet_grid(age~bandsite)
nFlights %>%
ggplot(aes(n)) +
geom_histogram()
sum(nFlights$n)
nFlights %>%
group_by(age) %>%
summarise(a = mean(n), sd = sd(n), nInd = n(), n = sum(n))
nFlights %>%
group_by(bandsite) %>%
summarise(a = mean(n), sd = sd(n), nInd = n(), n = sum(n))
flightBySunset %>%
group_by(markerNumber) %>%
summarise(age = age[1], bandsite = bandsite[1]) %>%
group_by(age) %>%
tally()
flightBySunset %>%
group_by(markerNumber) %>%
summarise(age = age[1], bandsite = bandsite[1], isDep = length(which(isDep==F))) %>%
group_by(bandsite) %>%
summarise(p = length(which(isDep>0))/n())
nFlights %>%
group_by(bandsite) %>%
summarise(n = n())
preDep.flightBySunset <- flightBySunset %>% filter(!isDep)
# Test: Is there a difference in the likelihood of individuals making flights among ages and bandsites?
glm1 <- glm(data = nFlights, formula = (n > 0) ~ age * bandsite, family = binomial)
glm2 <- glm(data = nFlights, formula = (n > 0) ~ age + bandsite, family = binomial)
anova(glm1, glm2, test="Chi")
glm1.1 <- nFlights %>% glm(formula = (n > 0) ~ bandsite, family = binomial)
glm1.2 <- nFlights %>% glm(formula = (n > 0) ~ age, family = binomial)
plot(glm1)
summary(glm1)
anova(glm1, test="Chi")
anova(glm1, update(glm1,.~ -age:bandsite), test = 'Chi')
anova(glm1.2, glm1, test = 'LRT')
anova(glm1.2, glm1, test = 'LRT')
lrtest(glm1.1, glm1)
anova(glm1.1, glm1, test = 'LRT')
# Test: Do individuals at remote sites make more nocturnal flights and
# do adults make fewer flights than hatch-years and show no difference between sites?
glm2 <- nFlights %>% glm(formula = n ~ age * bandsite, family = poisson)
plot(glm2)
summary(glm2)
anova(glm2, test = 'F')
lrtest(glm2, glm1)
######
### PRE-DEPARTURE FLIGHT TIMES
######
flightBySunset %>%
filter(!isDep) %>%
ggplot(aes(bandsite, (bySunset)))+
geom_boxplot()+
facet_grid(age~.)+
ylab('Minutes since sunset')+
xlab('Banding site')
flightBySunset %>%
filter(!isDep) %>%
ggplot(aes(log(bySunset))) +
geom_histogram()+
facet_grid(age~bandsite)
glm3 <- flightBySunset %>% filter(!isDep) %>% glm(formula = log(bySunset) ~ age * bandsite, family = gaussian)
plot(glm3)
summary(glm3)
anova(glm3, test = 'F')
######
### DEPARTURE TIMES (LOG)
######
dFlights <- flightBySunset %>% filter(isDep) %>% select(markerNumber, bySunset, age, bandsite, ts)
dFlights %>%
ggplot(aes(bandsite, (bySunset)))+
geom_boxplot()+
facet_grid(age~.)+
ylab('Minutes since sunset')+
xlab('Banding site')
dFlights %>%
ggplot(aes(log(bySunset))) +
geom_histogram()+
facet_grid(age~bandsite)
dFlights %>%
group_by(age) %>%
summarise(a = mean(bySunset), sd = sd(bySunset), n = n())
dFlights %>%
group_by(bandsite) %>%
summarise(a = mean(bySunset), sd = sd(bySunset), n = n())
# Test: Do individuals at remote sites depart earlier in the evening and
# do adults depart later than hatch-years and show no difference between sites?
glm6 <- dFlights %>% glm(formula = log(bySunset) ~ age * bandsite, family = gaussian)
plot(glm6)
summary(glm6)
anova(glm6, test = "F")
######
### NUMBER FLIGHT PRIOR TO DEPARTURE
######
dDep <- flightBySunset %>%
rowwise() %>%
# mutate(depDate = dFlights[dFlights$markerNumber == markerNumber,]$ts)
mutate(daysToDep = difftime(dFlights[dFlights$markerNumber == markerNumber,]$ts, ts, units = 'days'))
dDep %>%
filter(daysToDep > 0) %>%
ggplot(aes(as.integer(daysToDep), group = age, color= age)) +
geom_density() +
facet_grid(.~bandsite)
######
### DEPARTURE DATE
######
flightBySunset %>%
filter(isDep) %>%
ggplot(aes(jdate, group = age, color = age)) +
geom_density() +
facet_grid(.~bandsite)
glm7 <- flightBySunset %>% filter(isDep) %>% glm(formula = (jdate) ~ age * bandsite, family = gaussian)
plot(glm7)
summary(glm7)
anova(glm7, test = "F")
######
### PROBABILITY OF DEPARTURE
######
tagMeta2 <- read_rds('tagMeta.rds') %>%
group_by(markerNumber) %>%
summarise(tagDeployStart = tagDeployStart[1]) %>%
mutate(tagDeployStart = as.POSIXct(tagDeployStart, origin = '1970-01-01'),
tagDeploy.jdate = as.integer(format(tagDeployStart, format = '%j')))
jdate.range <- range(flightBySunset$jdate)
depRange <- seq(jdate.range[1]-1, jdate.range[2])
numDep <- sapply(depRange, function(x){length(which(x>=flightBySunset$jdate))})
probDep <- tibble(jdate = depRange,
numDep = numDep,
probDep = numDep/max(numDep))
probDep %>%
ggplot(aes(jdate, probDep))+
geom_line()
coxph.flightByWeather <- flightByWeather %>%
filter(isDep) %>%
mutate(year = as.factor(year(ts))) %>%
left_join(tagMeta2, by = 'markerNumber') %>%
filter(year(ts) == year(tagDeployStart)) %>%
mutate(idleTime = jdate - tagDeploy.jdate,
departure.jdate = jdate)
test <- coxph.flightByWeather[rep(row.names(coxph.flightByWeather), coxph.flightByWeather$idleTime),]
test$start <- test$tagDeploy.jdate + (sequence(coxph.flightByWeather$idleTime)-1)
test$end <- test$tagDeploy.jdate + (sequence(coxph.flightByWeather$idleTime))
test <- test %>% mutate(event = ifelse(end == jdate, 1, 0))
coxph.flightByWeather %>%
mutate(tagDeployStart = format(tagDeployStart, '%m-%d')) %>%
ggplot(aes(as.Date(tagDeployStart, format = '%m-%d'), paste(age, bandsite)))+
geom_point()+
# geom_histogram()+
scale_x_date()+
facet_grid(.~year)
with(test, Surv(start, end, event))
coxph1 <- coxph(Surv(jdate) ~ age + bandsite + year + cc + wind.dir + wind.abs, data = coxph.flightByWeather)
survfit1 <- survfit(coxph1)
coxph1 <- coxph(Surv(start, end, event) ~ age + bandsite + year + cc + wind.dir + wind.abs, data = test)
summary(coxph1)
par(mfrow(c(1,1)))
plot(survfit1)
plot(as.integer(coxph1$y)[1:1193], coxph1$residuals)
length(as.integer(coxph1$residuals)[1192:1200])
with(lung, Surv(time))
heart
Surv(type = 'left')
|
library(optparse)
option_list <- list(
make_option(c("-i", "--input_prefix"), type="character", default=NULL,
help="The prefix of all these BrainXcan analysis results",
metavar="character"),
# make_option(c("-m", "--idp_meta_file"), type="character", default=NULL,
# help="A meta file for annotating IDPs",
# metavar="character"),
make_option(c("-c", "--color_code_yaml"), type="character", default=NULL,
help="Color coding",
metavar="character"),
make_option(c("-r", "--rlib"), type="character", default=NULL,
help="The path to report helper functions",
metavar="character"),
make_option(c("-n", "--ntop"), type="numeric", default=NULL,
help="Number of top IDP associations to show",
metavar="character"),
make_option(c("-p", "--phenotype_name"), type="character", default=NULL,
help="Phenotype name to show in the report",
metavar="character"),
make_option(c("-t", "--rmd_template"), type="character", default=NULL,
help="R Markdown template",
metavar="character"),
make_option(c("-o", "--output_html"), type="character", default=NULL,
help="Output HTML filename",
metavar="character")
)
opt_parser <- OptionParser(option_list=option_list)
opt <- parse_args(opt_parser)
params = list(
color_code_yaml = opt$color_code_yaml,
input_prefix = opt$input_prefix,
rlib = opt$rlib,
phenotype_name = opt$phenotype_name,
ntop = opt$ntop
)
rmarkdown::render(
opt$rmd_template,
params = params,
envir = new.env(),
output_dir = dirname(opt$output_html),
output_file = basename(opt$output_html),
knit_root_dir = dirname(opt$output_html)
)
| /brainxcan/vis/report_compilation.R | permissive | CalvinLeeUBIC/brainxcan | R | false | false | 1,844 | r | library(optparse)
option_list <- list(
make_option(c("-i", "--input_prefix"), type="character", default=NULL,
help="The prefix of all these BrainXcan analysis results",
metavar="character"),
# make_option(c("-m", "--idp_meta_file"), type="character", default=NULL,
# help="A meta file for annotating IDPs",
# metavar="character"),
make_option(c("-c", "--color_code_yaml"), type="character", default=NULL,
help="Color coding",
metavar="character"),
make_option(c("-r", "--rlib"), type="character", default=NULL,
help="The path to report helper functions",
metavar="character"),
make_option(c("-n", "--ntop"), type="numeric", default=NULL,
help="Number of top IDP associations to show",
metavar="character"),
make_option(c("-p", "--phenotype_name"), type="character", default=NULL,
help="Phenotype name to show in the report",
metavar="character"),
make_option(c("-t", "--rmd_template"), type="character", default=NULL,
help="R Markdown template",
metavar="character"),
make_option(c("-o", "--output_html"), type="character", default=NULL,
help="Output HTML filename",
metavar="character")
)
opt_parser <- OptionParser(option_list=option_list)
opt <- parse_args(opt_parser)
params = list(
color_code_yaml = opt$color_code_yaml,
input_prefix = opt$input_prefix,
rlib = opt$rlib,
phenotype_name = opt$phenotype_name,
ntop = opt$ntop
)
rmarkdown::render(
opt$rmd_template,
params = params,
envir = new.env(),
output_dir = dirname(opt$output_html),
output_file = basename(opt$output_html),
knit_root_dir = dirname(opt$output_html)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_SBS96_signature.R
\name{plot_SBS96_signature}
\alias{plot_SBS96_signature}
\title{Plot an SBS96 signature or series of signatures}
\usage{
plot_SBS96_signature(
x,
label = "Signature",
title = NULL,
xlabel = "Base Context",
ylabel = "Count",
ylimits = NULL,
usePercent = FALSE,
countsAsProportions = FALSE,
facetCondition = NULL
)
}
\arguments{
\item{x}{A TidySig dataframe/tibble}
\item{label}{The right-side (i.e., facet) label.
Usually "Signature" or "Sample" or a sample ID.}
\item{title}{A title for the plot}
\item{xlabel}{An x-axis label}
\item{ylabel}{A y-axis label}
\item{ylimits}{Use custom ylimits (useful for normalizing the views of multiple signatures)}
\item{usePercent}{Use percent scales (rather than counts)}
\item{countsAsProportions}{Convert the input data (in counts) to per-signature proportions}
\item{facetCondition}{a condition to generate facet columns.}
}
\value{
a ggplot2 object
}
\description{
Plot an SBS96 signature or series of signatures
}
| /man/plot_SBS96_signature.Rd | permissive | wooyaalee/tidysig | R | false | true | 1,084 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_SBS96_signature.R
\name{plot_SBS96_signature}
\alias{plot_SBS96_signature}
\title{Plot an SBS96 signature or series of signatures}
\usage{
plot_SBS96_signature(
x,
label = "Signature",
title = NULL,
xlabel = "Base Context",
ylabel = "Count",
ylimits = NULL,
usePercent = FALSE,
countsAsProportions = FALSE,
facetCondition = NULL
)
}
\arguments{
\item{x}{A TidySig dataframe/tibble}
\item{label}{The right-side (i.e., facet) label.
Usually "Signature" or "Sample" or a sample ID.}
\item{title}{A title for the plot}
\item{xlabel}{An x-axis label}
\item{ylabel}{A y-axis label}
\item{ylimits}{Use custom ylimits (useful for normalizing the views of multiple signatures)}
\item{usePercent}{Use percent scales (rather than counts)}
\item{countsAsProportions}{Convert the input data (in counts) to per-signature proportions}
\item{facetCondition}{a condition to generate facet columns.}
}
\value{
a ggplot2 object
}
\description{
Plot an SBS96 signature or series of signatures
}
|
#
# This is the user interface for the week4dp application. The application supports spot
# checking of various machine learning algorithms against binary classification data by
# providing box plots of Accuracy and Kappa measures, data summaries and confusion matrices.
#
# The interface contains 3 main sections:
# - title and introductory text
# - input panel with a list of checkboxes against machine learning methods
# - tab panel with tabs for
# o help
# o boxplots
# o accuracy and kappa summary data
# o confusion matrix displaying values
# o confusion matrix displaying sensitivity and specificity
# o confusion matrix displaying positive and negative predictive values
#
#
library(shiny)
library(shinyjs)
shinyUI(fluidPage(
useShinyjs(),
div(
id = "loading_page",
h1("Loading...")
),
div(
id = "main_content",
# Application title
titlePanel("Binary Classifier - Spot Check"),
#
# The introduction
#
fluidRow(
p("Selecting an appropriate machine learning algorithm to best predict binary classes from data is not a deterministic process. Quickly and roughly testing the data or a subset of the data with a range of different machine learning methods may give an indication of which algorithms are worth pursuing in greater detail. This application provides summary plots and data to support decision making and is based on an article by Machine Learning Mastery called ", a(href="http://machinelearningmastery.com/evaluate-machine-learning-algorithms-with-r/", "How to Evaluate Machine Learning Algorithms with R.", target="_blank"))
),
#
fluidRow(
column(3,
#
# The input checkboxes
#
inputPanel(
checkboxGroupInput("method", "Show ML Method:",
c("lda" = "lda", "glm" = "glm",
"glmnet" = "glmnet",
"svmRadial" = "svmRadial", "knn" = "knn",
"nb" = "nb", "rpart" = "rpart",
"C5.0" = "c50", "treebag" = "treebag",
"rf" = "rf"),
selected=c( "nb",
"rpart"))
)
),
column(9,
#
# The tabs
#
tabsetPanel(
tabPanel("Help",
tags$h4("Initialising the App"),
p("All of the machine learning models are built up front and can take a few minutes to complete. While this is happening the word 'Loading...' appears in the top left hand corner of the window. Please be patient during this computation."),
tags$h4("Using the App"),
p("Select and deselect the machine learning methods using the checkboxes on the left. Select the required tab above to view the Boxplot, Confusion Matrix, Sensitivity and Specificity or Predictive Values for the selected Machine Learning methods."),
tags$h4("Description"),
p("In this application the data that is used has been downloaded from the UCI Machine Learning Repository and is called the Vertebral Column Data Set. This biomedical data set was built by Dr. Henrique da Mota and consists of classifying patients as belonging to one out of two categories: Normal (100 patients) or Abnormal (210 patients). More information can be found ", a(href="https://archive.ics.uci.edu/ml/datasets/Vertebral+Column", "here.", target="_blank")),
p("Although the Vertebral Column Data Set is used for this demonstration, the application will work with any binary classification data set which can be loaded into a dataframe where the outcome field is labelled 'result'."),
p("The caret package in R is used to build the models according to the 10 methods listed against the checkboxes to the left. Resampling is used to establish the in-sample Accuracy and Kappa data for each of the models while the confusion matrices, sensitivity/specificity and predictive values are calculated from a single in-sample prediction of outcomes.")
),
#
# The data and plot tabs
#
tabPanel("Boxplot", plotOutput("bwplot")),
tabPanel("Summary", verbatimTextOutput("dataSummary")),
tabPanel("Confusion Matrices", uiOutput("cmvals.ui")),
tabPanel("Sensitivity/Specificity", uiOutput("cmsenspec.ui")),
tabPanel("Predictive Vals", uiOutput("cmpospred.ui"))
)
)
)
)
)
)
| /week4dp/ui.R | no_license | dysartcoal/CourseraDataProducts | R | false | false | 5,362 | r | #
# This is the user interface for the week4dp application. The application supports spot
# checking of various machine learning algorithms against binary classification data by
# providing box plots of Accuracy and Kappa measures, data summaries and confusion matrices.
#
# The interface contains 3 main sections:
# - title and introductory text
# - input panel with a list of checkboxes against machine learning methods
# - tab panel with tabs for
# o help
# o boxplots
# o accuracy and kappa summary data
# o confusion matrix displaying values
# o confusion matrix displaying sensitivity and specificity
# o confusion matrix displaying positive and negative predictive values
#
#
library(shiny)
library(shinyjs)
shinyUI(fluidPage(
useShinyjs(),
div(
id = "loading_page",
h1("Loading...")
),
div(
id = "main_content",
# Application title
titlePanel("Binary Classifier - Spot Check"),
#
# The introduction
#
fluidRow(
p("Selecting an appropriate machine learning algorithm to best predict binary classes from data is not a deterministic process. Quickly and roughly testing the data or a subset of the data with a range of different machine learning methods may give an indication of which algorithms are worth pursuing in greater detail. This application provides summary plots and data to support decision making and is based on an article by Machine Learning Mastery called ", a(href="http://machinelearningmastery.com/evaluate-machine-learning-algorithms-with-r/", "How to Evaluate Machine Learning Algorithms with R.", target="_blank"))
),
#
fluidRow(
column(3,
#
# The input checkboxes
#
inputPanel(
checkboxGroupInput("method", "Show ML Method:",
c("lda" = "lda", "glm" = "glm",
"glmnet" = "glmnet",
"svmRadial" = "svmRadial", "knn" = "knn",
"nb" = "nb", "rpart" = "rpart",
"C5.0" = "c50", "treebag" = "treebag",
"rf" = "rf"),
selected=c( "nb",
"rpart"))
)
),
column(9,
#
# The tabs
#
tabsetPanel(
tabPanel("Help",
tags$h4("Initialising the App"),
p("All of the machine learning models are built up front and can take a few minutes to complete. While this is happening the word 'Loading...' appears in the top left hand corner of the window. Please be patient during this computation."),
tags$h4("Using the App"),
p("Select and deselect the machine learning methods using the checkboxes on the left. Select the required tab above to view the Boxplot, Confusion Matrix, Sensitivity and Specificity or Predictive Values for the selected Machine Learning methods."),
tags$h4("Description"),
p("In this application the data that is used has been downloaded from the UCI Machine Learning Repository and is called the Vertebral Column Data Set. This biomedical data set was built by Dr. Henrique da Mota and consists of classifying patients as belonging to one out of two categories: Normal (100 patients) or Abnormal (210 patients). More information can be found ", a(href="https://archive.ics.uci.edu/ml/datasets/Vertebral+Column", "here.", target="_blank")),
p("Although the Vertebral Column Data Set is used for this demonstration, the application will work with any binary classification data set which can be loaded into a dataframe where the outcome field is labelled 'result'."),
p("The caret package in R is used to build the models according to the 10 methods listed against the checkboxes to the left. Resampling is used to establish the in-sample Accuracy and Kappa data for each of the models while the confusion matrices, sensitivity/specificity and predictive values are calculated from a single in-sample prediction of outcomes.")
),
#
# The data and plot tabs
#
tabPanel("Boxplot", plotOutput("bwplot")),
tabPanel("Summary", verbatimTextOutput("dataSummary")),
tabPanel("Confusion Matrices", uiOutput("cmvals.ui")),
tabPanel("Sensitivity/Specificity", uiOutput("cmsenspec.ui")),
tabPanel("Predictive Vals", uiOutput("cmpospred.ui"))
)
)
)
)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zgcamusa_L103.water_mapping.R
\name{module_gcamusa_L103.water_mapping}
\alias{module_gcamusa_L103.water_mapping}
\title{module_gcamusa_L103.water_mapping}
\usage{
module_gcamusa_L103.water_mapping(command, ...)
}
\arguments{
\item{command}{API command to execute}
\item{...}{other optional parameters, depending on command}
}
\value{
Depends on \code{command}: either a vector of required inputs,
a vector of output names, or (if \code{command} is "MAKE") all
the generated outputs: \code{L103.water_mapping_R_GLU_B_W_Ws_share}, \code{L103.water_mapping_R_LS_W_Ws_share},
\code{L103.water_mapping_R_B_W_Ws_share}, \code{L103.water_mapping_R_PRI_W_Ws_share}
There was no corresponding file in the original data system.
}
\description{
Calculate percentage shares to map water demands from USA regional level to state and basin.
}
\details{
Water demands by USA region / sector to basin and state.
}
\author{
NTG Oct 2019
}
| /input/gcamdata/man/module_gcamusa_L103.water_mapping.Rd | permissive | JGCRI/gcam-core | R | false | true | 1,002 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zgcamusa_L103.water_mapping.R
\name{module_gcamusa_L103.water_mapping}
\alias{module_gcamusa_L103.water_mapping}
\title{module_gcamusa_L103.water_mapping}
\usage{
module_gcamusa_L103.water_mapping(command, ...)
}
\arguments{
\item{command}{API command to execute}
\item{...}{other optional parameters, depending on command}
}
\value{
Depends on \code{command}: either a vector of required inputs,
a vector of output names, or (if \code{command} is "MAKE") all
the generated outputs: \code{L103.water_mapping_R_GLU_B_W_Ws_share}, \code{L103.water_mapping_R_LS_W_Ws_share},
\code{L103.water_mapping_R_B_W_Ws_share}, \code{L103.water_mapping_R_PRI_W_Ws_share}
There was no corresponding file in the original data system.
}
\description{
Calculate percentage shares to map water demands from USA regional level to state and basin.
}
\details{
Water demands by USA region / sector to basin and state.
}
\author{
NTG Oct 2019
}
|
#!/usr/bin/r -t
#
# Copyright (C) 2010 - 2014 Dirk Eddelbuettel, Romain Francois and Kevin Ushey
#
# This file is part of Rcpp.
#
# Rcpp is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Rcpp is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rcpp. If not, see <http://www.gnu.org/licenses/>.
.runThisTest <- Sys.getenv("RunAllRcppTests") == "yes"
if (.runThisTest) {
.setUp <- Rcpp:::unitTestSetup("Matrix.cpp")
test.List.column <- function(){
x <- matrix( 1:16+.5, nc = 4 )
res <- runit_Row_Column_sugar( x )
target <- list(
x[1,],
x[,1],
x[2,],
x[,2],
x[2,] + x[,2]
)
checkEquals( res, target, msg = "column and row as sugar" )
}
test.NumericMatrix <- function(){
x <- matrix( 1:16 + .5, ncol = 4 )
checkEquals( matrix_numeric(x), sum(diag(x)), msg = "matrix indexing" )
y <- as.vector( x )
checkException( matrix_numeric(y) , msg = "not a matrix" )
}
test.CharacterMatrix <- function(){
x <- matrix( letters[1:16], ncol = 4 )
checkEquals( matrix_character(x), paste( diag(x), collapse = "" ) )
}
test.GenericMatrix <- function( ){
g <- function(y){
sapply( y, function(x) seq(from=x, to = 16) )
}
x <- matrix( g(1:16), ncol = 4 )
checkEquals( matrix_generic(x), g(diag(matrix(1:16,ncol=4))), msg = "GenericMatrix" )
}
test.IntegerMatrix.diag <- function(){
expected <- matrix( 0L, nrow = 5, ncol = 5 )
diag( expected ) <- 1L
checkEquals( matrix_integer_diag(), expected, msg = "IntegerMatrix::diag" )
}
test.CharacterMatrix.diag <- function(){
expected <- matrix( "", nrow = 5, ncol = 5 )
diag( expected ) <- "foo"
checkEquals( matrix_character_diag(), expected, msg = "CharacterMatrix::diag" )
}
test.NumericMatrix.Ctors <- function(){
x <- matrix(0, 3, 3)
checkEquals( matrix_numeric_ctor1(), x, msg = "matrix from single int" )
x <- matrix(0, 3, 3)
checkEquals( matrix_numeric_ctor2(), x, msg = "matrix from two int" )
}
test.IntegerVector.matrix.indexing <- function(){
x <- matrix( 1:16, ncol = 4 )
checkEquals( integer_matrix_indexing(x), sum(diag(x)), msg = "matrix indexing" )
checkEquals( diag(integer_matrix_indexing_lhs(x)), 2*0:3, msg = "matrix indexing lhs" )
y <- as.vector( x )
checkException( integer_matrix_indexing_lhs(y) , msg = "not a matrix" )
}
test.NumericMatrix.row <- function(){
x <- matrix( 1:16 + .5, ncol = 4 )
checkEquals( runit_NumericMatrix_row( x ), sum( x[1,] ), msg = "iterating over a row" )
}
test.CharacterMatrix.row <- function(){
m <- matrix( letters, ncol = 2 )
checkEquals( runit_CharacterMatrix_row(m), paste( m[1,], collapse = "" ), msg = "CharacterVector::Row" )
}
test.List.row <- function(){
m <- lapply( 1:16, function(i) seq(from=1, to = i ) )
dim( m ) <- c( 4, 4 )
checkEquals( runit_GenericMatrix_row( m ), 1 + 0:3*4, msg = "List::Row" )
}
test.NumericMatrix.column <- function(){
x <- matrix( 1:16 + .5, ncol = 4 )
checkEquals( runit_NumericMatrix_column( x ), sum( x[,1] ) , msg = "iterating over a column" )
}
test.NumericMatrix.cumsum <- function(){
x <- matrix( 1:8 + .5, ncol = 2 )
checkEquals( runit_NumericMatrix_cumsum( x ), t(apply(x, 1, cumsum)) , msg = "cumsum" )
}
test.CharacterMatrix.column <- function(){
m <- matrix( letters, ncol = 2 )
checkEquals( runit_CharacterMatrix_column(m), paste( m[,1], collapse = "" ), msg = "CharacterVector::Column" )
}
test.List.column <- function(){
m <- lapply( 1:16, function(i) seq(from=1, to = i ) )
dim( m ) <- c( 4, 4 )
checkEquals( runit_GenericMatrix_column( m ), 1:4, msg = "List::Column" )
}
test.NumericMatrix.colsum <- function( ){
probs <- matrix(1:12,nrow=3)
checkEquals( runit_NumericMatrix_colsum( probs ), t(apply(probs,1,cumsum)) )
}
test.NumericMatrix.rowsum <- function( ){
probs <- matrix(1:12,nrow=3)
checkEquals( runit_NumericMatrix_rowsum( probs ), apply(probs,2,cumsum) )
}
test.NumericMatrix.SubMatrix <- function( ){
target <- rbind( c(3,4,5,5), c(3,4,5,5), 0 )
checkEquals( runit_SubMatrix(), target, msg = "SubMatrix" )
}
test.NumericMatrix.opequals <- function() {
m <- matrix(1:4, nrow=2)
checkEquals(m, matrix_opequals(m))
}
test.NumericMatrix.rownames.colnames.proxy <- function() {
m <- matrix(as.numeric(1:4), nrow = 2)
runit_rownames_colnames_proxy(m, letters[1:2], LETTERS[1:2])
checkEquals(rownames(m), letters[1:2])
checkEquals(colnames(m), LETTERS[1:2])
checkException(runit_rownames_colnames_proxy(m, letters[1:3], letters[1:3]))
checkException(runit_rownames_colnames_proxy(m, letters[1:2], NULL))
m <- matrix(as.numeric(1:9), nrow = 3)
runit_rownames_proxy(m)
checkEquals(rownames(m), c("A", "B", "C"))
checkEquals(colnames(m), NULL)
}
test.NumericMatrix.no.init <- function() {
m <- runit_no_init_matrix()
checkEquals(m, matrix(c(0, 1, 2, 3), nrow = 2))
}
test.NumericMatrix.const.Column <- function(){
m <- matrix(as.numeric(1:9), nrow = 3)
res <- runit_const_Matrix_column(m)
checkEquals( m[,1], m[,2] )
}
test.IntegerMatrix.accessor.with.bounds.checking <- function() {
m <- matrix(seq(1L, 12, by=1L), nrow=4L, ncol=3L)
checkEquals(mat_access_with_bounds_checking(m, 0, 0), 1)
checkEquals(mat_access_with_bounds_checking(m, 1, 2), 10)
checkEquals(mat_access_with_bounds_checking(m, 3, 2), 12)
checkException(mat_access_with_bounds_checking(m, 4, 2) , msg = "index out of bounds not detected" )
checkException(mat_access_with_bounds_checking(m, 3, 3) , msg = "index out of bounds not detected" )
checkException(mat_access_with_bounds_checking(m, 3, -1) , msg = "index out of bounds not detected" )
checkException(mat_access_with_bounds_checking(m, -1, 2) , msg = "index out of bounds not detected" )
checkException(mat_access_with_bounds_checking(m, -1, -1) , msg = "index out of bounds not detected" )
}
test.IntegerMatrix.transpose <- function() {
M <- matrix(1:12, 3, 4)
checkEquals(transposeInteger(M), t(M), msg="integer transpose")
rownames(M) <- letters[1:nrow(M)]
checkEquals(transposeInteger(M), t(M), msg="integer transpose with rownames")
colnames(M) <- LETTERS[1:ncol(M)]
checkEquals(transposeInteger(M), t(M), msg="integer transpose with row and colnames")
}
test.NumericMatrix.transpose <- function() {
M <- matrix(1.0 * (1:12), 3, 4)
checkEquals(transposeNumeric(M), t(M), msg="numeric transpose")
rownames(M) <- letters[1:nrow(M)]
checkEquals(transposeNumeric(M), t(M), msg="numeric transpose with rownames")
colnames(M) <- LETTERS[1:ncol(M)]
checkEquals(transposeNumeric(M), t(M), msg="numeric transpose with row and colnames")
}
test.CharacterMatrix.transpose <- function() {
M <- matrix(as.character(1:12), 3, 4)
checkEquals(transposeCharacter(M), t(M), msg="character transpose")
rownames(M) <- letters[1:nrow(M)]
checkEquals(transposeCharacter(M), t(M), msg="character transpose with rownames")
colnames(M) <- LETTERS[1:ncol(M)]
checkEquals(transposeCharacter(M), t(M), msg="character transpose with row and colnames")
}
}
| /packrat/lib/x86_64-apple-darwin13.4.0/3.2.3/Rcpp/unitTests/runit.Matrix.R | permissive | LanceAtKS/LC | R | false | false | 7,907 | r | #!/usr/bin/r -t
#
# Copyright (C) 2010 - 2014 Dirk Eddelbuettel, Romain Francois and Kevin Ushey
#
# This file is part of Rcpp.
#
# Rcpp is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Rcpp is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rcpp. If not, see <http://www.gnu.org/licenses/>.
.runThisTest <- Sys.getenv("RunAllRcppTests") == "yes"
if (.runThisTest) {
.setUp <- Rcpp:::unitTestSetup("Matrix.cpp")
test.List.column <- function(){
x <- matrix( 1:16+.5, nc = 4 )
res <- runit_Row_Column_sugar( x )
target <- list(
x[1,],
x[,1],
x[2,],
x[,2],
x[2,] + x[,2]
)
checkEquals( res, target, msg = "column and row as sugar" )
}
test.NumericMatrix <- function(){
x <- matrix( 1:16 + .5, ncol = 4 )
checkEquals( matrix_numeric(x), sum(diag(x)), msg = "matrix indexing" )
y <- as.vector( x )
checkException( matrix_numeric(y) , msg = "not a matrix" )
}
test.CharacterMatrix <- function(){
x <- matrix( letters[1:16], ncol = 4 )
checkEquals( matrix_character(x), paste( diag(x), collapse = "" ) )
}
test.GenericMatrix <- function( ){
g <- function(y){
sapply( y, function(x) seq(from=x, to = 16) )
}
x <- matrix( g(1:16), ncol = 4 )
checkEquals( matrix_generic(x), g(diag(matrix(1:16,ncol=4))), msg = "GenericMatrix" )
}
test.IntegerMatrix.diag <- function(){
expected <- matrix( 0L, nrow = 5, ncol = 5 )
diag( expected ) <- 1L
checkEquals( matrix_integer_diag(), expected, msg = "IntegerMatrix::diag" )
}
test.CharacterMatrix.diag <- function(){
expected <- matrix( "", nrow = 5, ncol = 5 )
diag( expected ) <- "foo"
checkEquals( matrix_character_diag(), expected, msg = "CharacterMatrix::diag" )
}
test.NumericMatrix.Ctors <- function(){
x <- matrix(0, 3, 3)
checkEquals( matrix_numeric_ctor1(), x, msg = "matrix from single int" )
x <- matrix(0, 3, 3)
checkEquals( matrix_numeric_ctor2(), x, msg = "matrix from two int" )
}
test.IntegerVector.matrix.indexing <- function(){
x <- matrix( 1:16, ncol = 4 )
checkEquals( integer_matrix_indexing(x), sum(diag(x)), msg = "matrix indexing" )
checkEquals( diag(integer_matrix_indexing_lhs(x)), 2*0:3, msg = "matrix indexing lhs" )
y <- as.vector( x )
checkException( integer_matrix_indexing_lhs(y) , msg = "not a matrix" )
}
test.NumericMatrix.row <- function(){
x <- matrix( 1:16 + .5, ncol = 4 )
checkEquals( runit_NumericMatrix_row( x ), sum( x[1,] ), msg = "iterating over a row" )
}
test.CharacterMatrix.row <- function(){
m <- matrix( letters, ncol = 2 )
checkEquals( runit_CharacterMatrix_row(m), paste( m[1,], collapse = "" ), msg = "CharacterVector::Row" )
}
test.List.row <- function(){
m <- lapply( 1:16, function(i) seq(from=1, to = i ) )
dim( m ) <- c( 4, 4 )
checkEquals( runit_GenericMatrix_row( m ), 1 + 0:3*4, msg = "List::Row" )
}
test.NumericMatrix.column <- function(){
x <- matrix( 1:16 + .5, ncol = 4 )
checkEquals( runit_NumericMatrix_column( x ), sum( x[,1] ) , msg = "iterating over a column" )
}
test.NumericMatrix.cumsum <- function(){
x <- matrix( 1:8 + .5, ncol = 2 )
checkEquals( runit_NumericMatrix_cumsum( x ), t(apply(x, 1, cumsum)) , msg = "cumsum" )
}
test.CharacterMatrix.column <- function(){
m <- matrix( letters, ncol = 2 )
checkEquals( runit_CharacterMatrix_column(m), paste( m[,1], collapse = "" ), msg = "CharacterVector::Column" )
}
test.List.column <- function(){
m <- lapply( 1:16, function(i) seq(from=1, to = i ) )
dim( m ) <- c( 4, 4 )
checkEquals( runit_GenericMatrix_column( m ), 1:4, msg = "List::Column" )
}
test.NumericMatrix.colsum <- function( ){
probs <- matrix(1:12,nrow=3)
checkEquals( runit_NumericMatrix_colsum( probs ), t(apply(probs,1,cumsum)) )
}
test.NumericMatrix.rowsum <- function( ){
probs <- matrix(1:12,nrow=3)
checkEquals( runit_NumericMatrix_rowsum( probs ), apply(probs,2,cumsum) )
}
test.NumericMatrix.SubMatrix <- function( ){
target <- rbind( c(3,4,5,5), c(3,4,5,5), 0 )
checkEquals( runit_SubMatrix(), target, msg = "SubMatrix" )
}
test.NumericMatrix.opequals <- function() {
m <- matrix(1:4, nrow=2)
checkEquals(m, matrix_opequals(m))
}
test.NumericMatrix.rownames.colnames.proxy <- function() {
m <- matrix(as.numeric(1:4), nrow = 2)
runit_rownames_colnames_proxy(m, letters[1:2], LETTERS[1:2])
checkEquals(rownames(m), letters[1:2])
checkEquals(colnames(m), LETTERS[1:2])
checkException(runit_rownames_colnames_proxy(m, letters[1:3], letters[1:3]))
checkException(runit_rownames_colnames_proxy(m, letters[1:2], NULL))
m <- matrix(as.numeric(1:9), nrow = 3)
runit_rownames_proxy(m)
checkEquals(rownames(m), c("A", "B", "C"))
checkEquals(colnames(m), NULL)
}
test.NumericMatrix.no.init <- function() {
m <- runit_no_init_matrix()
checkEquals(m, matrix(c(0, 1, 2, 3), nrow = 2))
}
test.NumericMatrix.const.Column <- function(){
m <- matrix(as.numeric(1:9), nrow = 3)
res <- runit_const_Matrix_column(m)
checkEquals( m[,1], m[,2] )
}
test.IntegerMatrix.accessor.with.bounds.checking <- function() {
m <- matrix(seq(1L, 12, by=1L), nrow=4L, ncol=3L)
checkEquals(mat_access_with_bounds_checking(m, 0, 0), 1)
checkEquals(mat_access_with_bounds_checking(m, 1, 2), 10)
checkEquals(mat_access_with_bounds_checking(m, 3, 2), 12)
checkException(mat_access_with_bounds_checking(m, 4, 2) , msg = "index out of bounds not detected" )
checkException(mat_access_with_bounds_checking(m, 3, 3) , msg = "index out of bounds not detected" )
checkException(mat_access_with_bounds_checking(m, 3, -1) , msg = "index out of bounds not detected" )
checkException(mat_access_with_bounds_checking(m, -1, 2) , msg = "index out of bounds not detected" )
checkException(mat_access_with_bounds_checking(m, -1, -1) , msg = "index out of bounds not detected" )
}
test.IntegerMatrix.transpose <- function() {
M <- matrix(1:12, 3, 4)
checkEquals(transposeInteger(M), t(M), msg="integer transpose")
rownames(M) <- letters[1:nrow(M)]
checkEquals(transposeInteger(M), t(M), msg="integer transpose with rownames")
colnames(M) <- LETTERS[1:ncol(M)]
checkEquals(transposeInteger(M), t(M), msg="integer transpose with row and colnames")
}
test.NumericMatrix.transpose <- function() {
M <- matrix(1.0 * (1:12), 3, 4)
checkEquals(transposeNumeric(M), t(M), msg="numeric transpose")
rownames(M) <- letters[1:nrow(M)]
checkEquals(transposeNumeric(M), t(M), msg="numeric transpose with rownames")
colnames(M) <- LETTERS[1:ncol(M)]
checkEquals(transposeNumeric(M), t(M), msg="numeric transpose with row and colnames")
}
test.CharacterMatrix.transpose <- function() {
M <- matrix(as.character(1:12), 3, 4)
checkEquals(transposeCharacter(M), t(M), msg="character transpose")
rownames(M) <- letters[1:nrow(M)]
checkEquals(transposeCharacter(M), t(M), msg="character transpose with rownames")
colnames(M) <- LETTERS[1:ncol(M)]
checkEquals(transposeCharacter(M), t(M), msg="character transpose with row and colnames")
}
}
|
library(ggplot2)
library(olsrr)
library(car)
library(ellipse)
data <- read.table("HW1-Prob.csv", header = TRUE, sep = ',')
ggplot(data, aes(x=z_2, y=resp)) +
geom_point(color='#2980B9', size = 4) +
geom_smooth(method=lm, color='#2C3E50')+
theme_light()
fit <- lm(resp ~ z_1 + z_2, data=data)
summary(fit) # show results
y = data[1]
z_1 = data[2]
z_2 = data[3]
Y = as.matrix(y)
Z = as.matrix(cbind(1,z_1, z_2)) #design matrix
beta_hat <- solve(t(Z)%*%Z)%*%(t(Z)%*%Y)
#8c
confint(fit, level=0.95)
plot(ellipse(fit, which = c('z_1', 'z_2'), level = 0.95), type = 'l')
points(fit$coefficients['z_1'], fit$coefficients['z_2'])
#8d
full.mod <- lm(formula = resp ~ z_1 + z_2, data = data)
reduced.mod <- lm(formula = resp ~ z_1, data = data)
anova(reduced.mod, full.mod, test = "LRT")
#8e
est_resid_var = (summary(fit)$sigma)**2
#this matrix computation also yields the same result
#(t(Y-Z%*%beta_hat)%*%(Y-Z%*%beta_hat))/12
z_0 = matrix(c(1,7,8))
z0prime = t(z_0)
y_0_hat = z0prime%*%beta_hat
t = qt(1-0.025,12) # 95% CI with df = n-r-1 =15-2-1= 12
ZprimeZ_inv = solve(t(Z)%*%Z)
sqrt_component = sqrt(est_resid_var*z0prime%*%ZprimeZ_inv%*%z_0)
right_CI = y_0_hat + (t* sqrt_component)
left_CI = y_0_hat - (t* sqrt_component)
#8f
sqrt_component_unobs = sqrt(est_resid_var*(1+z0prime%*%ZprimeZ_inv%*%z_0))
right_CI_unobs = y_0_hat + (t* sqrt_component_unobs)
left_CI_unobs = y_0_hat - (t* sqrt_component_unobs)
#8g
#outliers
H = round (Z%*%solve(t(Z)%*%Z)%*%t(Z),2)
qqPlot(fit)
outlierTest(fit)
dat_no_outlier <- data[-c(3,6), ]
no_outlier <- lm(resp ~ z_1 + z_2, data=dat_no_outlier)
summary(no_outlier)
#Leverage
hatvalues(fit)
hv <- as.data.frame(hatvalues(fit))
mn <-mean(hatvalues(fit))
hv$warn <- ifelse(hv[, 'hatvalues(fit)']>3*mn, 'x3',
ifelse(hv[, 'hatvalues(fit)']>2*mn, 'x3', '-' ))
hv
plot(hatvalues(fit), type = "h")
dat_no_lev<- data[-c(1,2,3,4,6,14,15), ]
no_lev <- lm(resp ~ z_1 + z_2, data=dat_no_lev)
summary(no_lev)
leveragePlots(fit)
#Influential with cooks D
ols_plot_cooksd_chart(fit)
dat_no_inf<- data[-c(3,6,14), ]
no_inf <- lm(resp ~ z_1 + z_2, data=dat_no_inf)
summary(no_inf)
| /HW1/Answers/8.R | no_license | hanambrose/Linear-and-Multilinear-Models | R | false | false | 2,141 | r | library(ggplot2)
library(olsrr)
library(car)
library(ellipse)
data <- read.table("HW1-Prob.csv", header = TRUE, sep = ',')
ggplot(data, aes(x=z_2, y=resp)) +
geom_point(color='#2980B9', size = 4) +
geom_smooth(method=lm, color='#2C3E50')+
theme_light()
fit <- lm(resp ~ z_1 + z_2, data=data)
summary(fit) # show results
y = data[1]
z_1 = data[2]
z_2 = data[3]
Y = as.matrix(y)
Z = as.matrix(cbind(1,z_1, z_2)) #design matrix
beta_hat <- solve(t(Z)%*%Z)%*%(t(Z)%*%Y)
#8c
confint(fit, level=0.95)
plot(ellipse(fit, which = c('z_1', 'z_2'), level = 0.95), type = 'l')
points(fit$coefficients['z_1'], fit$coefficients['z_2'])
#8d
full.mod <- lm(formula = resp ~ z_1 + z_2, data = data)
reduced.mod <- lm(formula = resp ~ z_1, data = data)
anova(reduced.mod, full.mod, test = "LRT")
#8e
est_resid_var = (summary(fit)$sigma)**2
#this matrix computation also yields the same result
#(t(Y-Z%*%beta_hat)%*%(Y-Z%*%beta_hat))/12
z_0 = matrix(c(1,7,8))
z0prime = t(z_0)
y_0_hat = z0prime%*%beta_hat
t = qt(1-0.025,12) # 95% CI with df = n-r-1 =15-2-1= 12
ZprimeZ_inv = solve(t(Z)%*%Z)
sqrt_component = sqrt(est_resid_var*z0prime%*%ZprimeZ_inv%*%z_0)
right_CI = y_0_hat + (t* sqrt_component)
left_CI = y_0_hat - (t* sqrt_component)
#8f
sqrt_component_unobs = sqrt(est_resid_var*(1+z0prime%*%ZprimeZ_inv%*%z_0))
right_CI_unobs = y_0_hat + (t* sqrt_component_unobs)
left_CI_unobs = y_0_hat - (t* sqrt_component_unobs)
#8g
#outliers
H = round (Z%*%solve(t(Z)%*%Z)%*%t(Z),2)
qqPlot(fit)
outlierTest(fit)
dat_no_outlier <- data[-c(3,6), ]
no_outlier <- lm(resp ~ z_1 + z_2, data=dat_no_outlier)
summary(no_outlier)
#Leverage
hatvalues(fit)
hv <- as.data.frame(hatvalues(fit))
mn <-mean(hatvalues(fit))
hv$warn <- ifelse(hv[, 'hatvalues(fit)']>3*mn, 'x3',
ifelse(hv[, 'hatvalues(fit)']>2*mn, 'x3', '-' ))
hv
plot(hatvalues(fit), type = "h")
dat_no_lev<- data[-c(1,2,3,4,6,14,15), ]
no_lev <- lm(resp ~ z_1 + z_2, data=dat_no_lev)
summary(no_lev)
leveragePlots(fit)
#Influential with cooks D
ols_plot_cooksd_chart(fit)
dat_no_inf<- data[-c(3,6,14), ]
no_inf <- lm(resp ~ z_1 + z_2, data=dat_no_inf)
summary(no_inf)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/functions_targetHub.R
\name{as.list.targetHub}
\alias{as.list.targetHub}
\title{Function that creates a list from a targetHub object.}
\usage{
\method{as.list}{targetHub}(x, ...)
}
\arguments{
\item{x}{targetHub object}
\item{...}{other arguments}
}
\value{
list
}
\description{
Function that creates a list from a targetHub object.
}
| /miRNAtargetpackage/man/as.list.targetHub.Rd | no_license | camgu844/miRNA | R | false | false | 423 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/functions_targetHub.R
\name{as.list.targetHub}
\alias{as.list.targetHub}
\title{Function that creates a list from a targetHub object.}
\usage{
\method{as.list}{targetHub}(x, ...)
}
\arguments{
\item{x}{targetHub object}
\item{...}{other arguments}
}
\value{
list
}
\description{
Function that creates a list from a targetHub object.
}
|
testlist <- list(cost = structure(c(5.93969937143721e+180, 4.80544713909544e-268, 5.27956925781844e-134, 4.33437141566104e-293, 5.44822701017748e+306, 3.79280313177243e+61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(6L, 3L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929459241516e+86, 2.45741775099414e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641272026e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296, 8.47565288269902e+60, 6.92144078002958e-125 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result) | /epiphy/inst/testfiles/costTotCPP/AFL_costTotCPP/costTotCPP_valgrind_files/1615925964-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 1,120 | r | testlist <- list(cost = structure(c(5.93969937143721e+180, 4.80544713909544e-268, 5.27956925781844e-134, 4.33437141566104e-293, 5.44822701017748e+306, 3.79280313177243e+61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(6L, 3L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929459241516e+86, 2.45741775099414e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641272026e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296, 8.47565288269902e+60, 6.92144078002958e-125 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result) |
options("RzmqJobQueue.logfile" = "/tmp/client.log")
options("RzmqJobQueue.level" = log4r:::DEBUG)
library(RzmqJobQueue)
if (!exists("argv")) argv <- Inf
if (argv[1] != Inf) {
for(i in 1:argv[1]) {
do_job("tcp://localhost:12345")
}
} else {
while(TRUE) {
do_job("tcp://localhost:12345")
}
}
| /tests/client.single.tests.R | no_license | wush978/RzmqJobQueue | R | false | false | 306 | r | options("RzmqJobQueue.logfile" = "/tmp/client.log")
options("RzmqJobQueue.level" = log4r:::DEBUG)
library(RzmqJobQueue)
if (!exists("argv")) argv <- Inf
if (argv[1] != Inf) {
for(i in 1:argv[1]) {
do_job("tcp://localhost:12345")
}
} else {
while(TRUE) {
do_job("tcp://localhost:12345")
}
}
|
# Project name : SPOJ: OMWG - One more weird game
# Author : Wojciech Raszka
# Date created : 2019-02-17
# Description :
# Status : Accepted (23250636)
# Comment : You can see on the grid as a grid graph where squares are vertices and neighboring squeres are edges. As it known the nxm grid graph has 2nm - n - m edges.
f <- file('stdin', open='r')
T = as.integer(readLines(f, n=1))
for (t in 1:T){
nm = unlist(strsplit(readLines(f, n=1), " "))
n = as.integer(nm[1])
m = as.integer(nm[2])
write(2*n*m - n - m, stdout())
}
| /SPOJ/OMWG_One more weird game/One more weird game.R | no_license | GitPistachio/Competitive-programming | R | false | false | 553 | r | # Project name : SPOJ: OMWG - One more weird game
# Author : Wojciech Raszka
# Date created : 2019-02-17
# Description :
# Status : Accepted (23250636)
# Comment : You can see on the grid as a grid graph where squares are vertices and neighboring squeres are edges. As it known the nxm grid graph has 2nm - n - m edges.
f <- file('stdin', open='r')
T = as.integer(readLines(f, n=1))
for (t in 1:T){
nm = unlist(strsplit(readLines(f, n=1), " "))
n = as.integer(nm[1])
m = as.integer(nm[2])
write(2*n*m - n - m, stdout())
}
|
rename.fkt <- function(x, textbreak=F) {
renaming <- read.table('renaming.txt', sep='\t',header=T)
as.character(sapply(x, function(x) {
x=x
for (i in 1:nrow(renaming)) {
if (renaming$type[i]=="grepl") {
if (grepl(renaming$source[i], x)[1]==T) x = renaming$target[i]
}
if (renaming$type[i]=="gsub") {
x=gsub(renaming$source[i], renaming$target[i], x,fixed=F)
}
}
if (textbreak==F) {
x=gsub('[-]', '',x)
}
return(as.character(x))
}))
}
rename.fkt.break = function(x) rename.fkt(x, textbreak=T)
| /tools/proc_rename.R | no_license | hannesdatta/brand-equity-journal-of-marketing | R | false | false | 542 | r |
rename.fkt <- function(x, textbreak=F) {
renaming <- read.table('renaming.txt', sep='\t',header=T)
as.character(sapply(x, function(x) {
x=x
for (i in 1:nrow(renaming)) {
if (renaming$type[i]=="grepl") {
if (grepl(renaming$source[i], x)[1]==T) x = renaming$target[i]
}
if (renaming$type[i]=="gsub") {
x=gsub(renaming$source[i], renaming$target[i], x,fixed=F)
}
}
if (textbreak==F) {
x=gsub('[-]', '',x)
}
return(as.character(x))
}))
}
rename.fkt.break = function(x) rename.fkt(x, textbreak=T)
|
print.anchors.chopit <- function(x,...) {
summary(x)
}
| /R/print.anchors.chopit.R | no_license | cran/anchors | R | false | false | 57 | r | print.anchors.chopit <- function(x,...) {
summary(x)
}
|
\name{plotIllnessDeathModel}
\alias{plotIllnessDeathModel}
\title{Plotting an illness-death-model.}
\usage{
plotIllnessDeathModel(stateLabels, style = 1, recovery = FALSE, ...)
}
\arguments{
\item{stateLabels}{Labels for the three boxes.}
\item{style}{Either \code{1} or anything else, switches
the orientation of the graph. Hard to explain in words,
see examples.}
\item{recovery}{Logical. If \code{TRUE} there will be an
arrow from the illness state to the initial state.}
\item{\dots}{Arguments passed to plot.Hist.}
}
\description{
Plotting an illness-death-model using \code{plot.Hist}.
}
\examples{
plotIllnessDeathModel()
plotIllnessDeathModel(style=2)
plotIllnessDeathModel(style=2,
stateLabels=c("a","b\\nc","d"),
box1.col="yellow",
box2.col="green",
box3.col="red")
}
\author{
Thomas Alexander Gerds <tag@biostat.ku.dk>
}
\seealso{
\code{\link{plotCompetingRiskModel}},
\code{\link{plot.Hist}}
}
\keyword{survival}
| /man/plotIllnessDeathModel.Rd | no_license | ElianoMarques/prodlim | R | false | false | 1,031 | rd | \name{plotIllnessDeathModel}
\alias{plotIllnessDeathModel}
\title{Plotting an illness-death-model.}
\usage{
plotIllnessDeathModel(stateLabels, style = 1, recovery = FALSE, ...)
}
\arguments{
\item{stateLabels}{Labels for the three boxes.}
\item{style}{Either \code{1} or anything else, switches
the orientation of the graph. Hard to explain in words,
see examples.}
\item{recovery}{Logical. If \code{TRUE} there will be an
arrow from the illness state to the initial state.}
\item{\dots}{Arguments passed to plot.Hist.}
}
\description{
Plotting an illness-death-model using \code{plot.Hist}.
}
\examples{
plotIllnessDeathModel()
plotIllnessDeathModel(style=2)
plotIllnessDeathModel(style=2,
stateLabels=c("a","b\\nc","d"),
box1.col="yellow",
box2.col="green",
box3.col="red")
}
\author{
Thomas Alexander Gerds <tag@biostat.ku.dk>
}
\seealso{
\code{\link{plotCompetingRiskModel}},
\code{\link{plot.Hist}}
}
\keyword{survival}
|
source( "masternegloglikereduced1.R" )
source("eudicottree.R" )
library( "expm" )
source( "Qmatrixwoodherb3.R" )
source("Pruning2.R")
bichrom.dataset<-read.table( "eudicotvals.txt",header=FALSE,sep=",",stringsAsFactors=FALSE)
last.state=50
uniform.samples<-read.csv("sample221.csv",header=FALSE)
a<- as.numeric(t(uniform.samples))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,10)
mle<-try(optim(par=a,fn=negloglikelihood.wh, method= "Nelder-Mead", bichrom.phy=angiosperm.tree, bichrom.data=bichrom.dataset,max.chromosome=last.state,pi.0=p.0),silent=TRUE)
print(mle)
if(class(mle)=="try-error"){results<-rep(NA,10)}else{
results[1:9]<-exp(mle$par)
results[10]<-mle$value}
write.table(results,file="results221.csv",sep=",")
| /Reduced model optimizations/explorelikereduced221.R | no_license | roszenil/Bichromdryad | R | false | false | 750 | r | source( "masternegloglikereduced1.R" )
source("eudicottree.R" )
library( "expm" )
source( "Qmatrixwoodherb3.R" )
source("Pruning2.R")
bichrom.dataset<-read.table( "eudicotvals.txt",header=FALSE,sep=",",stringsAsFactors=FALSE)
last.state=50
uniform.samples<-read.csv("sample221.csv",header=FALSE)
a<- as.numeric(t(uniform.samples))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,10)
mle<-try(optim(par=a,fn=negloglikelihood.wh, method= "Nelder-Mead", bichrom.phy=angiosperm.tree, bichrom.data=bichrom.dataset,max.chromosome=last.state,pi.0=p.0),silent=TRUE)
print(mle)
if(class(mle)=="try-error"){results<-rep(NA,10)}else{
results[1:9]<-exp(mle$par)
results[10]<-mle$value}
write.table(results,file="results221.csv",sep=",")
|
# Yige Wu @WashU May 2020
## plot cell type on integration UMAP
# set up libraries and output directory -----------------------------------
## set working directory
# dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
dir_base = "~/Library/CloudStorage/Box-Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA"
setwd(dir_base)
packages = c(
"rstudioapi",
"plyr",
"dplyr",
"stringr",
"reshape2",
"data.table",
"ggplot2"
)
for (pkg_name_tmp in packages) {
library(package = pkg_name_tmp, character.only = T)
}
source("./ccRCC_snRNA_analysis/functions.R")
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
## input UMAP info per barcode
barcode2cluster_df <- fread(input = "./Resources/Analysis_Results/integration/seuratintegrate_34_ccRCC_samples/FindClusters_30_ccRCC_tumorcells_changeresolutions/20220405.v1/ccRCC.34Sample.Tumorcells.Integrated.ReciprocalPCA.Metadata.ByResolution.20220405.v1.tsv", data.table = F)
## input meta data
metadata_df <- fread(data.table = F, input = "./Resources/Analysis_Results/sample_info/make_meta_data/20210809.v1/meta_data.20210809.v1.tsv")
# make plot data----------------------------------------------------------
barcode2cluster_df$sample <- mapvalues(x = barcode2cluster_df$orig.ident, from = metadata_df$Aliquot.snRNA, to = as.vector(metadata_df$Aliquot.snRNA.WU))
barcode2cluster_df$clusterid_plot <- paste0("MC",(barcode2cluster_df$integrated_snn_res.1 + 1))
plot_data_df <- barcode2cluster_df %>%
group_by(sample, clusterid_plot) %>%
summarise(number_cells_bycluster_bysample = n())
plot_data_df2 <- barcode2cluster_df %>%
group_by(sample) %>%
summarise(number_cells_bysample = n())
plot_data_df3 <- barcode2cluster_df %>%
group_by(clusterid_plot) %>%
summarise(number_cells_bycluster = n())
plot_data_df$number_cells_bysample <- mapvalues(x = plot_data_df$sample, from = plot_data_df2$sample, to = as.vector(plot_data_df2$number_cells_bysample))
plot_data_df$number_cells_bysample <- as.numeric(plot_data_df$number_cells_bysample)
plot_data_df$number_cells_bycluster <- mapvalues(x = plot_data_df$clusterid_plot, from = plot_data_df3$clusterid_plot, to = as.vector(plot_data_df3$number_cells_bycluster))
plot_data_df$number_cells_bycluster <- as.numeric(plot_data_df$number_cells_bycluster)
plot_data_df <- plot_data_df %>%
mutate(perc_cells_bysample_eachcluster = (number_cells_bycluster_bysample/number_cells_bycluster)*100)
plot_data_df$clusterid_plot <- factor(x = plot_data_df$clusterid_plot, levels = paste0("MC", 1:18))
# make colors -------------------------------------------------------------
sampleids_ordered <- sort(unique(plot_data_df$sample))
colors_cellgroup <- Polychrome::palette36.colors(n = length(sampleids_ordered))
names(colors_cellgroup) <- sampleids_ordered
# make plots --------------------------------------------------------------
p <- ggplot()
p <- p + geom_bar(data = plot_data_df,
mapping = aes(x = clusterid_plot, y = perc_cells_bysample_eachcluster, fill = sample), stat = "identity")
p <- p + scale_fill_manual(values = colors_cellgroup)
p <- p + guides(fill = guide_legend(override.aes = list(size=4), title = NULL))
p <- p + ylab("% cells by sample")
p <- p + theme_classic()
p <- p + theme(#axis.ticks.x=element_blank(), axis.line = element_blank(),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5, size = 15, color = "black"),
axis.text.y = element_text(size = 15, color = "black"), axis.title.x = element_blank(), axis.title.y = element_text(size = 15))
p <- p + theme(legend.position="right", aspect.ratio=1, legend.text = element_text(size = 14))
## save as pdf
file2write <- paste0(dir_out, "perc_cells_bysample_eachcluster", ".pdf")
pdf(file = file2write, width = 8, height = 5, useDingbats = F)
print(p)
dev.off()
## save as png
file2write <- paste0(dir_out, "perc_cells_bysample_eachcluster", ".png")
png(filename = file2write, width = 1200, height = 800, res = 150)
print(p)
dev.off()
# | /integration/seuratintegrate_34_ccRCC_samples/plotting/barplot_30ccRCC_tumorcellreclustered_count_res1_bycluster_bysample.R | no_license | ding-lab/ccRCC_snRNA_analysis | R | false | false | 4,202 | r | # Yige Wu @WashU May 2020
## plot cell type on integration UMAP
# set up libraries and output directory -----------------------------------
## set working directory
# dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
dir_base = "~/Library/CloudStorage/Box-Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA"
setwd(dir_base)
packages = c(
"rstudioapi",
"plyr",
"dplyr",
"stringr",
"reshape2",
"data.table",
"ggplot2"
)
for (pkg_name_tmp in packages) {
library(package = pkg_name_tmp, character.only = T)
}
source("./ccRCC_snRNA_analysis/functions.R")
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
## input UMAP info per barcode
barcode2cluster_df <- fread(input = "./Resources/Analysis_Results/integration/seuratintegrate_34_ccRCC_samples/FindClusters_30_ccRCC_tumorcells_changeresolutions/20220405.v1/ccRCC.34Sample.Tumorcells.Integrated.ReciprocalPCA.Metadata.ByResolution.20220405.v1.tsv", data.table = F)
## input meta data
metadata_df <- fread(data.table = F, input = "./Resources/Analysis_Results/sample_info/make_meta_data/20210809.v1/meta_data.20210809.v1.tsv")
# make plot data----------------------------------------------------------
barcode2cluster_df$sample <- mapvalues(x = barcode2cluster_df$orig.ident, from = metadata_df$Aliquot.snRNA, to = as.vector(metadata_df$Aliquot.snRNA.WU))
barcode2cluster_df$clusterid_plot <- paste0("MC",(barcode2cluster_df$integrated_snn_res.1 + 1))
plot_data_df <- barcode2cluster_df %>%
group_by(sample, clusterid_plot) %>%
summarise(number_cells_bycluster_bysample = n())
plot_data_df2 <- barcode2cluster_df %>%
group_by(sample) %>%
summarise(number_cells_bysample = n())
plot_data_df3 <- barcode2cluster_df %>%
group_by(clusterid_plot) %>%
summarise(number_cells_bycluster = n())
plot_data_df$number_cells_bysample <- mapvalues(x = plot_data_df$sample, from = plot_data_df2$sample, to = as.vector(plot_data_df2$number_cells_bysample))
plot_data_df$number_cells_bysample <- as.numeric(plot_data_df$number_cells_bysample)
plot_data_df$number_cells_bycluster <- mapvalues(x = plot_data_df$clusterid_plot, from = plot_data_df3$clusterid_plot, to = as.vector(plot_data_df3$number_cells_bycluster))
plot_data_df$number_cells_bycluster <- as.numeric(plot_data_df$number_cells_bycluster)
plot_data_df <- plot_data_df %>%
mutate(perc_cells_bysample_eachcluster = (number_cells_bycluster_bysample/number_cells_bycluster)*100)
plot_data_df$clusterid_plot <- factor(x = plot_data_df$clusterid_plot, levels = paste0("MC", 1:18))
# make colors -------------------------------------------------------------
sampleids_ordered <- sort(unique(plot_data_df$sample))
colors_cellgroup <- Polychrome::palette36.colors(n = length(sampleids_ordered))
names(colors_cellgroup) <- sampleids_ordered
# make plots --------------------------------------------------------------
p <- ggplot()
p <- p + geom_bar(data = plot_data_df,
mapping = aes(x = clusterid_plot, y = perc_cells_bysample_eachcluster, fill = sample), stat = "identity")
p <- p + scale_fill_manual(values = colors_cellgroup)
p <- p + guides(fill = guide_legend(override.aes = list(size=4), title = NULL))
p <- p + ylab("% cells by sample")
p <- p + theme_classic()
p <- p + theme(#axis.ticks.x=element_blank(), axis.line = element_blank(),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5, size = 15, color = "black"),
axis.text.y = element_text(size = 15, color = "black"), axis.title.x = element_blank(), axis.title.y = element_text(size = 15))
p <- p + theme(legend.position="right", aspect.ratio=1, legend.text = element_text(size = 14))
## save as pdf
file2write <- paste0(dir_out, "perc_cells_bysample_eachcluster", ".pdf")
pdf(file = file2write, width = 8, height = 5, useDingbats = F)
print(p)
dev.off()
## save as png
file2write <- paste0(dir_out, "perc_cells_bysample_eachcluster", ".png")
png(filename = file2write, width = 1200, height = 800, res = 150)
print(p)
dev.off()
# |
#' set multiple echarts layout
#'
#' Use the same layout orgnization as original grDevice layout function.
#'
#'
#' @param multiEcharts A multiple echarts object to set the layout.
#' @export
echartsLayout <- function(multiEcharts){
print(class(multiEcharts))
}
#' Reports whether x is a option object
#' @param x An object to test
#' @export
is.option <- function(x) inherits(x, "option")
#' Set recharts option
#'
#' @export
#'
option <- function(...) {
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts title option
#'
#' @export
#'
eTitle = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts general option
#'
#' @export
#'
eOption = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts toolbox option
#'
#' @export
#'
eToolbox = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts drag-recaluculation option
#'
#' @export
#'
eCalculable = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts legend option
#'
#' @export
#'
eLegend = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts tooltip option
#'
#' @export
#'
eTooltip = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts dataRange option
#'
#' @export
#'
eDataRange = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts x Axis option
#'
#' @export
#'
eAxis.X = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts y Axis option
#'
#' @export
#'
eAxis.Y = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts polar option
#'
#' @export
#'
ePolar = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts x dataZoom option
#'
#' @export
#'
eDataZoom = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts theme option
#'
#' @export
#'
eTheme = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts grid option
#' @export
#'
eGrid = function(...){
elements <- list(...)
structure(elements, class ="option")
}
"setFunctionName" <- function(e2name){
e2name <- strstrip(e2name)
functionName = gsub("\\(.*", "", e2name)
#print(functionName)
setFuncList <- c("eOption", "eTitle", "eToolbox", "eCalculable", "eLegend", "eTooltip", "eDataRange",
"eAxis.X", "eAxis.Y", "ePolar", "eDataZoom", "eTheme", "option", 'eGrid')
if (!functionName %in% setFuncList){
stop(paste("unspported eCharts setting function inputs", functionName))
return(FALSE)
}else{
return(functionName)
}
}
#' Modify a recharts by adding on new components.
#'
#' @param e1 An object of class \code{recharts}
#' @param e2 A component to add to \code{e1}
#'
#' @export
#'
#' @seealso \code{\link{set}}
#' @method + echarts
"+.echarts" <- function(e1, e2){
e2name <- deparse(substitute(e2))
optType <- setFunctionName(e2name)
switch(optType,
eTitle = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eTitleSet(e1, optionList=e2))
}
},
eToolbox = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eToolboxSet(e1, optionList=e2))
}
},
eCalculable = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eCalculableSet(e1, optionList=e2))
}
},
eTheme = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eThemeSet(e1, optionList=e2))
}
},
eTooltip = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eTooltipSet(e1, optionList=e2))
}
},
eLegend = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eLegendSet(e1, optionList=e2))
}
},
eDataRange = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eDataRangeSet(e1, optionList=e2))
}
},
eAxis.X = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eAxis.XSet(e1, optionList=e2))
}
},
eAxis.Y = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eAxis.YSet(e1, optionList=e2))
}
},
ePolar = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(ePolarSet(e1, optionList=e2))
}
},
eDataZoom = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eDataZoomSet(e1, optionList=e2))
}
},
eOption = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(optionSet(e1, optionList=e2))
}
},
eGrid = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eGridSet(e1, optionList=e2))
}
}
)
}
#' @export
"%+%" <- `+.echarts`
#' Merge the two ECharts into one output .
#'
#' @param e1 An object of class \code{recharts}
#' @param e2 An object of class \code{recharts}
#'
#' @export
#'
#' @seealso \code{\link{set}}
#' @method & echarts
"&.echarts" <- function(e1, e2){
if(!(inherits(e1, "echarts") & inherits(e2, "echarts")))
stop("only echarts object can be merged into one widgets...")
chart = htmlwidgets::appendContent(e1, e2)
class(chart)[3] = "multi-ecahrts"
return(chart)
}
#' @export
"%&%" <- `&.echarts`
| /R/plot.recharts.R | permissive | takewiki/recharts3 | R | false | false | 5,815 | r |
#' set multiple echarts layout
#'
#' Use the same layout orgnization as original grDevice layout function.
#'
#'
#' @param multiEcharts A multiple echarts object to set the layout.
#' @export
echartsLayout <- function(multiEcharts){
print(class(multiEcharts))
}
#' Reports whether x is a option object
#' @param x An object to test
#' @export
is.option <- function(x) inherits(x, "option")
#' Set recharts option
#'
#' @export
#'
option <- function(...) {
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts title option
#'
#' @export
#'
eTitle = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts general option
#'
#' @export
#'
eOption = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts toolbox option
#'
#' @export
#'
eToolbox = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts drag-recaluculation option
#'
#' @export
#'
eCalculable = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts legend option
#'
#' @export
#'
eLegend = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts tooltip option
#'
#' @export
#'
eTooltip = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts dataRange option
#'
#' @export
#'
eDataRange = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts x Axis option
#'
#' @export
#'
eAxis.X = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts y Axis option
#'
#' @export
#'
eAxis.Y = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts polar option
#'
#' @export
#'
ePolar = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts x dataZoom option
#'
#' @export
#'
eDataZoom = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts theme option
#'
#' @export
#'
eTheme = function(...){
elements <- list(...)
structure(elements, class ="option")
}
#' Set recharts grid option
#' @export
#'
eGrid = function(...){
elements <- list(...)
structure(elements, class ="option")
}
"setFunctionName" <- function(e2name){
e2name <- strstrip(e2name)
functionName = gsub("\\(.*", "", e2name)
#print(functionName)
setFuncList <- c("eOption", "eTitle", "eToolbox", "eCalculable", "eLegend", "eTooltip", "eDataRange",
"eAxis.X", "eAxis.Y", "ePolar", "eDataZoom", "eTheme", "option", 'eGrid')
if (!functionName %in% setFuncList){
stop(paste("unspported eCharts setting function inputs", functionName))
return(FALSE)
}else{
return(functionName)
}
}
#' Modify a recharts by adding on new components.
#'
#' @param e1 An object of class \code{recharts}
#' @param e2 A component to add to \code{e1}
#'
#' @export
#'
#' @seealso \code{\link{set}}
#' @method + echarts
"+.echarts" <- function(e1, e2){
e2name <- deparse(substitute(e2))
optType <- setFunctionName(e2name)
switch(optType,
eTitle = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eTitleSet(e1, optionList=e2))
}
},
eToolbox = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eToolboxSet(e1, optionList=e2))
}
},
eCalculable = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eCalculableSet(e1, optionList=e2))
}
},
eTheme = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eThemeSet(e1, optionList=e2))
}
},
eTooltip = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eTooltipSet(e1, optionList=e2))
}
},
eLegend = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eLegendSet(e1, optionList=e2))
}
},
eDataRange = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eDataRangeSet(e1, optionList=e2))
}
},
eAxis.X = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eAxis.XSet(e1, optionList=e2))
}
},
eAxis.Y = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eAxis.YSet(e1, optionList=e2))
}
},
ePolar = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(ePolarSet(e1, optionList=e2))
}
},
eDataZoom = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eDataZoomSet(e1, optionList=e2))
}
},
eOption = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(optionSet(e1, optionList=e2))
}
},
eGrid = {
if ("echarts" %in% class(e1) & is.option(e2)){
class(e2) <- "list"
return(eGridSet(e1, optionList=e2))
}
}
)
}
#' @export
"%+%" <- `+.echarts`
#' Merge the two ECharts into one output .
#'
#' @param e1 An object of class \code{recharts}
#' @param e2 An object of class \code{recharts}
#'
#' @export
#'
#' @seealso \code{\link{set}}
#' @method & echarts
"&.echarts" <- function(e1, e2){
if(!(inherits(e1, "echarts") & inherits(e2, "echarts")))
stop("only echarts object can be merged into one widgets...")
chart = htmlwidgets::appendContent(e1, e2)
class(chart)[3] = "multi-ecahrts"
return(chart)
}
#' @export
"%&%" <- `&.echarts`
|
get_loadings <- function(m, n, cor_min=.20, get=c("loadings", "scores", "aov_scores"),thresh=.2) {
if (class(m) != "data.frame") stop ("your data must be formatted as a data.frame")
if (sum(sapply(m, is.factor) == T) != 1) stop ("your data.frame must contain a single factor or grouping variable")
if (ncol(m) - sum(sapply(m, is.numeric) == T) != 1) stop ("all columns except one must be numeric")
d <- m[, sapply(m, is.numeric)]
cat_id <- m[, sapply(m, is.factor)]
m_cor <- cor(d, method = "pearson")
diag(m_cor) <- 0
threshold <- apply(m_cor, 1, function(x) max(abs(x), na.rm = T) > thresh)
m_trim <- d[, threshold]
m_z <- data.frame(scale(m_trim, center = TRUE, scale = TRUE))
fa1 <- factanal(m_trim, factors = n, rotation="promax")
f_loadings <- as.data.frame(unclass(fa1$loadings))
if(get=="loadings") return(f_loadings)
idx <- seq(1:ncol(f_loadings))
g_scores <- lapply(idx, function(i){
pos <- row.names(f_loadings)[which(f_loadings[,i] > 0.35,arr.ind=T)]
neg <- row.names(f_loadings)[which(f_loadings[,i] < -0.35,arr.ind=T)]
pos_sums <- rowSums(m_z[pos])
neg_sums <- rowSums(m_z[neg])
dim_score <- mapply(function (x,y) x-y, pos_sums, neg_sums)
dim_score <- data.frame(cbind(dim_score, as.character(cat_id)), stringsAsFactors = F)
colnames(dim_score) <- c("score", "group")
dim_score$score <- as.numeric(dim_score$score)
if(get=="aov_scores") return(dim_score)
group_score <- aggregate(score~group, dim_score, mean)
return(group_score)
})
if(get=="aov_scores") a_scores <- lapply(idx, function(i) data.table::setnames(g_scores[[i]], c(colnames(f_loadings[i]), paste0("group", i))))
if(get=="scores") g_scores <- lapply(idx, function(i) data.table::setnames(g_scores[[i]], c("group", colnames(f_loadings[i]))))
if(get=="aov_scores") a_scores <- do.call("cbind", a_scores)
if(get=="scores") g_scores <- suppressWarnings(Reduce(function(...) merge(..., by = "group", all=T), g_scores))
if(get=="aov_scores") return(a_scores)
if(get=="scores") return(g_scores)
}
plot_scree <- function(m, cor_min=.20, get=c("loadings", "scores")) {
d <- m[, sapply(ds_norm, is.numeric)]
m_cor <- cor(d, method = "pearson")
diag(m_cor) <- 0
threshold <- apply(m_cor, 1, function(x) max(abs(x), na.rm = T) > .2)
m_trim <- d[, threshold]
ev <- eigen(cor(m_trim))
ap <- parallel(subject=nrow(m_trim), var=ncol(m_trim), rep=100, cent=.05)
nS <- nScree(x=ev$values, aparallel=ap$eigen$qevpea)
plotnScree(nS, legend = F)
}
plot_scores <- function(loadings, scores, f) {
x <- loadings[order(loadings[,f], decreasing = T),]
pos <- row.names(x)[which(x[,f] > 0.35,arr.ind=T)]
neg <- row.names(x)[which(x[,f] < -0.35,arr.ind=T)]
vegan::linestack(scores[,f+1], scores$group, axis=T,
air=1.3, hoff=6, at=-1, font=2)
title(main = paste(pos, collapse='\n'), sub = paste(neg, collapse='\n'),
cex.main = 1, font.main=2, cex.sub = 1, font.sub= 2)
}
| /resources/functions/mda_functions.R | no_license | follperson/YoutubeDocuscope | R | false | false | 2,986 | r |
get_loadings <- function(m, n, cor_min=.20, get=c("loadings", "scores", "aov_scores"),thresh=.2) {
if (class(m) != "data.frame") stop ("your data must be formatted as a data.frame")
if (sum(sapply(m, is.factor) == T) != 1) stop ("your data.frame must contain a single factor or grouping variable")
if (ncol(m) - sum(sapply(m, is.numeric) == T) != 1) stop ("all columns except one must be numeric")
d <- m[, sapply(m, is.numeric)]
cat_id <- m[, sapply(m, is.factor)]
m_cor <- cor(d, method = "pearson")
diag(m_cor) <- 0
threshold <- apply(m_cor, 1, function(x) max(abs(x), na.rm = T) > thresh)
m_trim <- d[, threshold]
m_z <- data.frame(scale(m_trim, center = TRUE, scale = TRUE))
fa1 <- factanal(m_trim, factors = n, rotation="promax")
f_loadings <- as.data.frame(unclass(fa1$loadings))
if(get=="loadings") return(f_loadings)
idx <- seq(1:ncol(f_loadings))
g_scores <- lapply(idx, function(i){
pos <- row.names(f_loadings)[which(f_loadings[,i] > 0.35,arr.ind=T)]
neg <- row.names(f_loadings)[which(f_loadings[,i] < -0.35,arr.ind=T)]
pos_sums <- rowSums(m_z[pos])
neg_sums <- rowSums(m_z[neg])
dim_score <- mapply(function (x,y) x-y, pos_sums, neg_sums)
dim_score <- data.frame(cbind(dim_score, as.character(cat_id)), stringsAsFactors = F)
colnames(dim_score) <- c("score", "group")
dim_score$score <- as.numeric(dim_score$score)
if(get=="aov_scores") return(dim_score)
group_score <- aggregate(score~group, dim_score, mean)
return(group_score)
})
if(get=="aov_scores") a_scores <- lapply(idx, function(i) data.table::setnames(g_scores[[i]], c(colnames(f_loadings[i]), paste0("group", i))))
if(get=="scores") g_scores <- lapply(idx, function(i) data.table::setnames(g_scores[[i]], c("group", colnames(f_loadings[i]))))
if(get=="aov_scores") a_scores <- do.call("cbind", a_scores)
if(get=="scores") g_scores <- suppressWarnings(Reduce(function(...) merge(..., by = "group", all=T), g_scores))
if(get=="aov_scores") return(a_scores)
if(get=="scores") return(g_scores)
}
plot_scree <- function(m, cor_min=.20, get=c("loadings", "scores")) {
d <- m[, sapply(ds_norm, is.numeric)]
m_cor <- cor(d, method = "pearson")
diag(m_cor) <- 0
threshold <- apply(m_cor, 1, function(x) max(abs(x), na.rm = T) > .2)
m_trim <- d[, threshold]
ev <- eigen(cor(m_trim))
ap <- parallel(subject=nrow(m_trim), var=ncol(m_trim), rep=100, cent=.05)
nS <- nScree(x=ev$values, aparallel=ap$eigen$qevpea)
plotnScree(nS, legend = F)
}
plot_scores <- function(loadings, scores, f) {
x <- loadings[order(loadings[,f], decreasing = T),]
pos <- row.names(x)[which(x[,f] > 0.35,arr.ind=T)]
neg <- row.names(x)[which(x[,f] < -0.35,arr.ind=T)]
vegan::linestack(scores[,f+1], scores$group, axis=T,
air=1.3, hoff=6, at=-1, font=2)
title(main = paste(pos, collapse='\n'), sub = paste(neg, collapse='\n'),
cex.main = 1, font.main=2, cex.sub = 1, font.sub= 2)
}
|
Telecoms-tweets-database/Database initial setup.R
#Setup of sqlite database to store tweets
library(dplyr)
telecoms_db = src_sqlite("Telecoms tweets database",create = T)
copy_to(telecoms_db,final_file,temporary = F) | /Twitter Crawler/scripts/Database Initial Setup.R | no_license | acmy1/TheArtandScienceofData | R | false | false | 216 | r | Telecoms-tweets-database/Database initial setup.R
#Setup of sqlite database to store tweets
library(dplyr)
telecoms_db = src_sqlite("Telecoms tweets database",create = T)
copy_to(telecoms_db,final_file,temporary = F) |
# ZHENG XIN, r0766879, KU LEUVEN
# R version: R version 3.5.0 (2018-04-23) -- "Joy in Playing"
# R packages and Data preparation ====
library(lmtest)
library(MASS)
library(gvlma)
library(rstatix)
library(psych)
library(DescTools)
library(performance)
library(car)
library(robustbase)
library(caret)
library(TeachingDemos)
library(segmented)
library(nortest)
# Data preparation
rm(list = ls())
data.full <- read.table('invertebrate.txt', header = T)
set.seed(0766879)
d.test <- sample(1:dim(data.full)[1], 200 )
data.test <- data.full[d.test, ]
data.training <- data.full[-d.test, ]
# Q1====
# Perform an exploratory analysis of the variables
# (compute descriptive statistics and make histograms, boxplots, scatter plots, . . . )
attach(data.training)
# Descriptive statistics
str(data.training)
summary(data.training)
# Correlation Matrix with P-values
cor_mat(data.training)
cor_pmat(data.training)
cor <- cor(data.training[, !names(data.training) == 'SWI']) # correlation between predictor variables
cor # High correlation bwtween duration and temperature (Question 6)
dim(data.training)
# Exploratory analysis
histNorm <- function(x, densCol = "darkblue", xlab = ''){
m <- mean(x)
std <- sqrt(var(x))
h <- max(hist(x,plot=FALSE)$density)
d <- dnorm(x, mean=m, sd=std)
maxY <- max(h,d)
hist(x, prob=TRUE,
xlab = xlab, ylab="Frequency", ylim=c(0, maxY),
main="Histogram")
curve(dnorm(x, mean=m, sd=std),
col=densCol, lwd=2, add=TRUE)
}
par(mfrow = c(3,2))
histNorm(data.training$SWI, xlab = "SWI")
histNorm(data.training$SWF, xlab = "SWF")
histNorm(data.training$temperature, xlab = "temperature")
histNorm(data.training$size, xlab = "size")
histNorm(data.training$management, xlab = "management")
# management as a Categorical predictor, not normally distributed
histNorm(data.training$duration, xlab = "duration")
# boxplots
par(mfrow = c(3,2))
boxplot(SWI, main = "Boxplot of SWI") # two outliers, both smaller than 4.5.
boxplot(SWF, main = "Boxplot of SWF") # three outliers
boxplot(temperature, main = "Boxplot of temperature") # three outliers
boxplot(size, main = "Boxplot of size")
boxplot(management, main = "Boxplot of management")
boxplot(duration, main = "Boxplot of duration") # one outlier
lab_y <- seq(1,200)
source("https://raw.githubusercontent.com/talgalili/R-code-snippets/master/boxplot.with.outlier.label.r")
# Load the function to label all the outliers in a boxplot
par(mfrow = c(2,3))
# OBS 51, 286
boxplot.with.outlier.label(SWI, row.names(data.training), main = "Boxplot of SWI")
# OBS 51, 139, 351
boxplot.with.outlier.label(SWF, row.names(data.training), main = "Boxplot of SWF")
# OBS 1, 3, 397
boxplot.with.outlier.label(temperature, row.names(data.training), main = "Boxplot of temperature")
boxplot.with.outlier.label(size, row.names(data.training), main = "Boxplot of size")
boxplot.with.outlier.label(management, row.names(data.training), main = "Boxplot of management")
# OBS 7
boxplot.with.outlier.label(duration, row.names(data.training), main = "Boxplot of duration")
# ~ 1, 3, 7, 51, 139, 286, 351, 397
# Scatter plot
par(mfrow = c(1,1))
pairs(data.training)
pairs(data.training, panel = function(x,y) {points(x,y); lines(lowess(x,y), col = "red")})
pairs.panels(data.training) # Robust fitting is done using lowess regression.
pairs.panels(data.training, lm=TRUE) # lm=TRUE, linear regression fits are shown for both y by x and x by y.
# Q2====
# fit a linear first-order regression model with SWI as outcome
# and SWF, temperature, size and management (not duration!) as predictors.
data.training <- data.training[, !names(data.training) == 'duration']
n_test <- dim(data.training)[1]
n_test
p <- dim(data.training)[2]
p
# linear first-order regression model
fit <- lm(SWI ~ SWF+temperature+size+management, data = data.training)
summary(fit) # test whether a particular regression coefficient is significantly different from zero.
# ANOVA, test whether the regression model as a whole is performing significantly better than a null model
anova(fit)
# SWF, temperature, management are significant, size non-significant
# Individual confidence intervals
alpha <- 0.05
confint(fit, level = 1 - alpha)
# Simultaneous confidence intervals with Bonferroni correction
alpha <- 0.05
confint(fit, level = 1 - alpha / 2)
# (a) Check whether a first-order model adequately captures the variability in the outcome====
# Multiple R-squared: 0.5805, Adjusted R-squared: 0.5719
# R^2
summary(fit)
summary(fit)$r.squared
# 58% of the total variance in the outcome is explained by the first-order model
# (b) Check the Gauss-Markov conditions====
# Check model assumptions
fit.res <- residuals(fit)
fit.stdres <- stdres(fit)
fit.fittedvalues <- fitted.values(fit)
par(mfrow = c(2,2))
qqnorm(fit.stdres, main="")
qqline(fit.stdres)
plot(fit.res, xlab = "Index", ylab = "Residual")
plot(fit.fittedvalues, fit.res, xlab = "Fitted value", ylab = "Residual")
lines(lowess(fit.res ~ fit.fittedvalues), col = "red")
plot(fit.stdres, xlab = "Index", ylab = "Standardized residual", ylim = c(-3,3))
abline(h = -2.5, lty = 2)
abline(h = 2.5, lty = 2)
# UL: small deviations from normal distributed residuals
# UR: pattern indicates Homoscedasticity (no heteroscedastic errors)
# BL: curved band suggests linearity assumption is not satisfied
# BR: outliers
par(mfrow = c(2,2))
plot(SWF, fit$residuals, ylab = "Residual")
lines(lowess(fit$residuals ~ SWF), col = "red")
# plot indicates the linear model is defective (add quadratic terms)
plot(temperature, fit$residuals, ylab = "Residual")
lines(lowess(fit$residuals ~ temperature), col = "red")
# plot indicates the errors are heteroscedastic
plot(size, fit$residuals, ylab = "Residual")
lines(lowess(fit$residuals ~ size), col = "red")
plot(management, fit$residuals, ylab = "Residual")
lines(lowess(fit$residuals ~ management), col = "red")
par(mfrow = c(1,1))
# Gauss-Markov conditions tests
summary(gvlma.lm(fit))
# Checking the normality of the residuals
plot(fit, which = 2)
# Shapiro-Wilk test and Kolmogorov-Smirnov test Testing Normality
shapiro.test(residuals(fit))
LillieTest(residuals(fit))
check_normality(fit) # OK: Residuals appear as normally distributed
# Checking the linearity of the relationship
plot(fit, which = 1)
# plot the relationship between the fitted values and the observed values for the outcome variable
# A straight line suggests that there’s nothing grossly wrong
plot(fit.fittedvalues, SWI, xlab = "Fitted Values", ylab = "Observed Values")
lines(lowess(SWI ~ fit.fittedvalues), col = 'red')
# for each individual predictor
par(mfrow = c(2,2))
# partial-residual plots, cannot contain interactions
termplot(fit, partial.resid = TRUE)
crPlots(fit)
ceresPlots(fit) # less prone to leakage of nonlinearity among the predictors.
residualPlots(model = fit) # Adding SWF^2
# this function also reports the results of a bunch of curvature tests.
# For a predictor variable X, this test is equivalent to adding a new predictor
# to the model corresponding to X^2. If it comes up significant, it implies that
# there is some nonlinear relationship between the variable and the residuals.
par(mfrow = c(1,1))
# Checking the homogeneity of variance
plot(fit, which = 3)
ncvTest(fit)
bptest(fit, ~ SWF + temperature + size + management) # there’s no violation of heteroskedasticity
coeftest(fit, vcov= hccm)
# if homogeneity of variance is violated, sandwich estimators is applied.
# Because the homogeneity of variance assumption wasn’t violated,
# these t tests are pretty much identical to the former ones in the summary(fit)
# Checking independence, which we assume to be met
DurbinWatsonTest(fit, alternative="two.sided", data=data.training)
durbinWatsonTest(fit, alternative="two.sided", data=data.training)
# (c) Check whether there is (severe) multicollinearity====
# Correlation
corx <- cor
# small correlations between variables
# VIF: the largest VIF is larger than 10, or
# if the mean of the VIF values is considerably larger than 1.
VIF <- diag(solve(corx))
max(VIF)
mean(VIF)
# Eigenvalues: A condition number nj > 30 is an indication for multicollinearity.
corx.eig <- eigen(corx)$values
corx.eig
sqrt(max(corx.eig)/corx.eig)
# indicating no multicollinearity
# (d) Check whether there are influential outliers====
plot(fit, which = 4)
plot(fit, which = 5)
plot(fit, which = 6)
# This function creates a “bubble” plot of Studentized residuals versus hat values,
# with the areas of the circles representing the observations proportional to Cook's distance.
# Vertical reference lines are drawn at twice and three times the average hat value,
# horizontal reference lines at -2, 0, and 2 on the Studentized-residual scale.
influencePlot(fit, main="influence Plot", sub="cook's distance")
# added-variable partial-regression plots
# identify data points with high leverage and
# influential data points that might not have high leverage.
par(mfrow = c(2,2))
avPlot(fit, variable = 'SWF')
avPlot(fit, variable = 'management')
avPlot(fit, variable = 'temperature')
avPlot(fit, variable = 'size')
par(mfrow = c(1,1))
# Classical approaches to find the vertical outliers and the leverage points====
# Standardized residuals
par(mfrow = c(1,1))
fit.stdres <- stdres(fit)
plot(fit.stdres, ylim = c(-4,4), ylab = "Standardized residuals")
abline(h = c(-2.5,2.5), col = "red")
label_x <- seq(1,200)
text(subset(fit.stdres,fit.stdres >2.5), labels=row.names(subset(data.training, fit.stdres > 2.5)),
x = as.character(label_x[fit.stdres >2.5]), cex = 0.7, pos = 1)
text(subset(fit.stdres,fit.stdres < -2.5), labels=row.names(subset(data.training, fit.stdres < -2.5)),
x = as.character(label_x[fit.stdres < -2.5]), cex = 0.7, pos = 1)
which(fit.stdres > 2.5 | fit.stdres < -2.5)
# Studentized residuals
fit.studres <- studres(fit)
plot(fit.studres, ylim = c(-4,4), ylab = "Studentized residuals")
abline(h = c(-2.5,2.5), col = "red")
text(subset(fit.studres,fit.studres >2.5), labels=row.names(subset(data.training, fit.studres > 2.5)),
x = as.character(label_x[fit.studres >2.5]), cex = 0.7, pos = 1)
text(subset(fit.studres,fit.studres < -2.5), labels=row.names(subset(data.training, fit.studres < -2.5)),
x = as.character(label_x[fit.studres < -2.5]), cex = 0.7, pos = 1)
which(fit.studres > 2.5 | fit.studres < -2.5)
# Classical approaches to find the leverage points
# Diagonal elements of hat matrix
fit.influence <- influence(fit)
plot(fit.influence$hat, ylab = "Diagonal elements of hat matrix")
h = 2*p/n_test
abline(h = h, col = "red")
text(subset(fit.influence$hat,fit.influence$hat > h),
labels=row.names(subset(data.training, fit.influence$hat > h)),
x = as.character(label_x[fit.influence$hat > h]), cex = 0.7, pos = 1)
which(fit.influence$hat > h)
# measures of influence
# DFFITS
fit.dffits <- dffits(fit)
plot(fit.dffits, ylab = "DFFITS")
h = 2*sqrt(p/n_test)
abline(h = h, col = "red")
text(subset(fit.dffits,fit.dffits > h), labels=row.names(subset(data.training, fit.dffits > h)),
x = as.character(label_x[fit.dffits > h]), cex = 0.7, pos = 1)
which(fit.dffits > h)
# Cook's distance
fit.Cd <- cooks.distance(fit)
plot(fit.Cd, ylab = "Cook's distance")
abline(h = 1, col = "red")
which(fit.Cd > 1)
# DFBETAS
fit.dfbetas <- dfbetas(fit)
plot(fit.dfbetas, ylab = "DFBETAS")
h = 2/sqrt(n_test)
abline(h = h, col = "red")
x = fit.dfbetas[,2] > h
text(subset(fit.dfbetas[,2], x), labels=row.names(subset(data.training, x)),
x = data.frame(fit.dfbetas)[,1][x], cex = 0.7, pos = 4)
which(fit.dfbetas[,2] > h)
# Outliers are not noticed by Cook's distance, but DFFITS and DFBETAS are more powerful.
# Bonferroni Outlier Test
outlierTest(fit) # No outliers with Bonferroni p < 0.05
# robust diagnostic plot====
# Reweighted LTS (maximal (50%) breakdown value)
par(mfrow = c(1,1))
RLTS <- ltsReg(SWI ~ SWF+temperature+size+management, data = data.training)
summary(RLTS)
summary(RLTS)$r.squared # 63%
# Note: It is strongly recommend using lmrob() instead of ltsReg!
lmrob <- lmrob(SWI ~ SWF+temperature+size+management, data = data.training)
summary(lmrob)
summary(lmrob)$r.squared # 60%
# rqq: Normal Q-Q plot of the standardized residuals;
# rindex: plot of the standardized residuals versus their index;
# rfit: plot of the standardized residuals versus fitted values;
# rdiag: regression diagnostic plot.
plot(RLTS, which = 'rqq')
plot(RLTS, which = 'rindex')
plot(RLTS, which = 'rfit') # No. 113, 151, 190, 198 --> OBS 218, 286, 371, 395
plot(RLTS, which = 'rdiag')
# Diagnostic plot
RLTS.stdres <- RLTS$residuals/RLTS$scale
plot(RLTS$RD, RLTS.stdres, ylim = c(-5,5),
xlab = "Robust distance", ylab = "Standardized 50% LTS residuals",
main = 'Regression Diagnostic Plot')
v = sqrt(qchisq(0.975, p - 1))
abline(v = sqrt(qchisq(0.975, p - 1)), col = "red")
abline(h = c(-2.5,2.5), col = "red")
text(subset(RLTS.stdres,RLTS.stdres >2.5), labels=row.names(subset(data.training, RLTS.stdres > 2.5)),
x = as.character(RLTS$RD[RLTS.stdres >2.5]), cex = 0.7, pos = 2)
text(subset(RLTS.stdres,RLTS.stdres < -2.5), labels=row.names(subset(data.training, RLTS.stdres < -2.5)),
x = as.character(RLTS$RD[RLTS.stdres < -2.5]), cex = 0.7, pos = 2)
which(RLTS.stdres > 2.5 | RLTS.stdres < -2.5) # vertical outliers: OBS 218 286 371 395
text(subset(RLTS.stdres, RLTS$RD > v), labels=row.names(subset(data.training, RLTS$RD > v)),
x = as.character(RLTS$RD[RLTS$RD > v]), cex = 0.7, pos = 1)
which(RLTS$RD > v) # good leverage points: OBS 1, 3, 27
# RLTS (30% breakdown value)
RLTS2 <- ltsReg(SWI ~ I(SWF^2)+temperature+management, data = data.training, alpha = 0.7)
summary(RLTS2)
# Detection of outliers
plot(RLTS2, which = 'rqq')
plot(RLTS2, which = 'rindex')
plot(RLTS2, which = 'rfit')
plot(RLTS2, which = 'rdiag')
RLTS2.stdres <- RLTS2$residuals/RLTS2$scale
plot(RLTS2$RD, RLTS2.stdres, ylim = c(-5,5),
xlab = "Robust distance", ylab = "Standardized 30% LTS residuals",
main = 'Regression Diagnostic Plot')
v = sqrt(qchisq(0.975, p - 1))
abline(v = sqrt(qchisq(0.975, p - 1)), col = "red")
abline(h = c(-2.5,2.5), col = "red")
text(subset(RLTS2.stdres,RLTS2.stdres >2.5),
labels=row.names(subset(data.training, RLTS2.stdres > 2.5)),
x = as.character(RLTS2$RD[RLTS2.stdres >2.5]), cex = 0.7, pos = 2)
text(subset(RLTS2.stdres,RLTS2.stdres < -2.5),
labels=row.names(subset(data.training, RLTS2.stdres < -2.5)),
x = as.character(RLTS2$RD[RLTS2.stdres < -2.5]), cex = 0.7, pos = 2)
which(RLTS2.stdres > 2.5 | RLTS2.stdres < -2.5)
text(subset(RLTS2.stdres, RLTS2$RD > v), labels=row.names(subset(data.training, RLTS2$RD > v)),
x = as.character(RLTS2$RD[RLTS2$RD > v]), cex = 0.7, pos = 1)
which(RLTS2$RD > v)
# Q3====
# Build a good linear regression model may containing higher-order terms, interactions,
# transformed variables and/or other methods to improve the model assumptions.
# Model 1: Variable selection with Interaction terms====
# First look at the interaction terms. Generally the third and higher order interactions
# are weak and hard to interpret, so look at the main effects and second order interactions.
# The R formula syntax using ^2 to mean "all two-way interactions of the variables".
fit_with <- lm(SWI ~ (SWF + temperature + management + size)^2, data = data.training)
summary(fit_with) # temperature:management interaction term is significant at the 5% level
# Backward elimination based on AIC
fit.full <- lm(SWI ~ (SWF + temperature + management + size)^2, data = data.training)
fit.full
stepAIC(fit.full, scope = list(upper = ~ (SWF + temperature + management + size)^2, lower = ~ 1),
direction = "backward")
# AIC=-339.91 to AIC=-348.44
# SWI ~ SWF + temperature + management + temperature:management
# Forward selection based on AIC
fit.null <- lm(SWI ~ 1, data = data.training)
fit.null
stepAIC(fit.null, scope = list(upper = ~ (SWF + temperature + management + size)^2, lower = ~ 1),
direction = "forward")
# AIC=-179.47 to AIC=-348.44
# SWI ~ SWF + temperature + management + temperature:management
# Stepwise selection based on AIC (started at full model)
stepAIC(fit.full, scope=list(upper = ~ (SWF + temperature + management + size)^2, lower = ~ 1),
direction = "both")
# AIC=-339.91 to AIC=-348.44
# SWI ~ SWF + temperature + management + temperature:management
# Stepwise selection based on AIC (started at null model)
stepAIC(fit.null, scope=list(upper = ~ (SWF + temperature + management + size)^2, lower = ~ 1),
direction = "both")
# AIC=-179.47 to AIC=-348.44
# SWI ~ SWF + temperature + management + temperature:management
fit_with <- lm(formula = SWI ~ SWF + temperature * management, data = data.training)
summary(fit_with) # temperature:management interaction term is only significant at the 10% level
# Reason 1: Many statisticians use a much larger significance level for the AB interaction F test than
# what they use for the main effects. The reason is to get a higher chance to detect existing interactions.
summary(fit_with)$r.squared # 0.5872287, 59% of the total variance in the outcome is explained
# Reason 2: stepAIC is equivalent to applying a hypothesis test with the significance level 0.157.
# According to the stepwise selection procedure, the model with the interaction term has smaller AIC
# as well as larger goodness-of-fit (R^2)
# Reason 3: As for interpretation, the longer being subject to nature management, the higher stability of
# nature area, the less infulence of the temperature change.
relweights <- function(fit, ...) {
R <- cor(fit$model)
nvar <- ncol(R)
rxx <- R[2:nvar, 2:nvar]
rxy <- R[2:nvar, 1]
svd <- eigen(rxx)
evec <- svd$vectors
ev <- svd$values
delta <- diag(sqrt(ev))
# correlations between original predictors and new orthogonal variables
lambda <- evec %*% delta %*% t(evec)
lambdasq <- lambda^2
# regression coefficients of Y on orthogonal variables
beta <- solve(lambda) %*% rxy
rsquare <- colSums(beta^2)
rawwgt <- lambdasq %*% beta^2
import <- (rawwgt/rsquare) * 100
lbls <- names(fit$model[2:nvar])
rownames(import) <- lbls
colnames(import) <- "Weights"
# plot results
barplot(t(import), names.arg = lbls, ylab = "% of R-Square",
xlab = "Predictor Variables", main = "Relative Importance of Predictor Variables",
sub = paste("R-Square = ", round(rsquare, digits = 3)),
...)
return(import)
}
relweights(fit, col = "lightgrey")
relweights(fit_with, col = "blue")
# size is dropped
# Model 2: Variable selection without Interaction terms====
# Backward elimination based on F-statistic/t-statistic
dropterm(fit.full, test = "F")
fit_drop <- update(fit.full, ~ . - temperature:size)
dropterm(fit_drop, test = "F")
fit_drop <- update(fit_drop, ~ . - temperature:size - SWF:management)
dropterm(fit_drop, test = "F")
fit_drop <- update(fit_drop, ~ . - temperature:size - SWF:management - SWF:size)
dropterm(fit_drop, test = "F")
fit_drop <- update(fit_drop, ~ . - temperature:size - SWF:management - SWF:size - management:size)
dropterm(fit_drop, test = "F")
fit_drop <- update(fit_drop, ~ . - temperature:size - SWF:management - SWF:size - management:size - size)
dropterm(fit_drop, test = "F")
fit_drop <- update(fit_drop, ~ . - temperature:size - SWF:management - SWF:size - management:size - size - SWF:temperature)
dropterm(fit_drop, test = "F")
fit_drop <- update(fit_drop, ~ . - temperature:size - SWF:management - SWF:size - management:size - size - SWF:temperature - temperature:management)
dropterm(fit_drop, test = "F")
# SWI ~ SWF + temperature + management
# Forward selection based on F-statistic/t-statistic
addterm(fit.null, ~ . + SWF + temperature + size + management + SWF:temperature + SWF:management + SWF:size +
temperature:management + temperature:size + management:size, test = "F")
fit_add <- update(fit.null, ~ . + SWF)
addterm(fit_add, ~ . + temperature + size + management + SWF:temperature + SWF:management + SWF:size +
temperature:management + temperature:size + management:size, test = "F")
fit_add <- update(fit_add, ~ . + temperature)
addterm(fit_add, ~. + size + management + SWF:temperature + SWF:management + SWF:size +
temperature:management + temperature:size + management:size, test = "F")
fit_add <- update(fit_add, ~ . + management)
addterm(fit_add, ~. + size + SWF:temperature + SWF:management + SWF:size +
temperature:management + temperature:size + management:size, test = "F")
# SWI ~ SWF + temperature + management
fit_without <- lm(formula = SWI ~ SWF + temperature + management, data = data.training)
summary(fit_without)
summary(fit_without)$r.squared # 0.5804086, 58% of the total variance in the outcome is explained
anova(fit_with, fit_without) # Reason 1: P = 0.07421, not significantly different between the two models
# −2log-likelihood+kn, where n represents the number of parameters in the fitted model, and k=2 for the usual AIC, or
# k=log(N) (N being the number of observations) for the so-called BIC or SBC (Schwarz's Bayesian criterion)
stepAIC(fit_without, scope = list(upper = ~ SWF + temperature * management, lower = ~ 1), direction = "both")
AIC(fit_with, fit_without)
# Reason 2: AIC=-347.16 to AIC=-348.44, little increase in AIC value
# Reason 3: When an interaction isn’t significant, drop it if you are just checking for the presence of an interaction
# to make sure you are specifying the model correctly. The interaction uses up df,
# changes the meaning of the lower order coefficients and complicates the model.
# But if you actually hypothesized an interaction that wasn’t significant, leave it in the model.
# The insignificant interaction means something in this case – it helps you evaluate your hypothesis.
# Taking it out can do more damage in specification error than in will in the loss of df.
# Reason 4: Overall, no improvement on the assumptions of the model 1, which has a few outliers
# Model 3: Adding the Quadratic term of SWF====
fit3_with <- lm(SWI ~ SWF + I(SWF^2) + temperature * management, data = data.training)
summary(fit3_with) # SWF is non-significant, temperature:management is only significant at the 10% level
summary(fit3_with)$r.squared # 61% of the total variance in the outcome is explained
# Stepwise selection
fit.full <- lm(SWI ~ SWF + I(SWF^2) + temperature * management,
data = data.training)
fit.full
fit.null <- lm(SWI ~ 1, data = data.training)
fit.null
# Stepwise selection based on AIC (started at full model)
stepAIC(fit.full, scope=list(upper = ~ SWF + I(SWF^2) + temperature * management, lower = ~ 1), direction = "both")
# AIC=-357.84 to AIC=-359.58
# SWI ~ I(SWF^2) + temperature + management + temperature:management
# Stepwise selection based on AIC (started at null model)
stepAIC(fit.null, scope=list(upper = ~ SWF + I(SWF^2) + temperature * management, lower = ~ 1), direction = "both")
# AIC=-179.47 to AIC=-359.58
# SWI ~ I(SWF^2) + temperature + management + temperature:management
fit3_with <- lm(SWI ~ I(SWF^2) + temperature * management, data = data.training)
summary(fit3_with)
summary(fit3_with)$r.squared # 61% of the total variance in the outcome is explained
fit3_without <- lm(SWI ~ SWF + I(SWF^2) + temperature + management, data = data.training)
summary(fit3_without) # SWF is non-significant
summary(fit3_without)$r.squared # 60% of the total variance in the outcome is explained
# Stepwise selection
fit.full <- lm(SWI ~ SWF + I(SWF^2) + temperature + management,
data = data.training)
fit.full
fit.null <- lm(SWI ~ 1, data = data.training)
fit.null
# Stepwise selection based on AIC (started at full model)
stepAIC(fit.full, scope=list(upper = ~ SWF + I(SWF^2) + temperature + management, lower = ~ 1), direction = "both")
# AIC=-357.03 to AIC=-358.68
# SWI ~ I(SWF^2) + temperature + management
# Stepwise selection based on AIC (started at null model)
stepAIC(fit.null, scope=list(upper = ~ SWF + I(SWF^2) + temperature + management, lower = ~ 1), direction = "both")
# AIC=-179.47 to AIC=-358.68
# SWI ~ I(SWF^2) + temperature + management
fit3_without <- lm(SWI ~ I(SWF^2) + temperature + management, data = data.training)
summary(fit3_without)
summary(fit3_without)$r.squared # 60% of the total variance in the outcome is explained
# Model 4: Transformations====
# Box-Cox transformation on Y, one of the solutions to the problem of linearality ====
sum(data.training$SWI <= 0) # response should be strictly positive
par(mfrow = c(1,1))
out_without <- boxcox(SWI ~ I(SWF^2)+temperature+management, plotit = TRUE, data = data.training)
lambda_without <- out_without$x[which(out_without$y == max(out_without$y))]
lambda_without # lambda = 0.7878788
out_with <- boxcox(SWI ~ I(SWF^2)+temperature*management, plotit = TRUE, data = data.training)
lambda_with <- out_with$x[which(out_with$y == max(out_with$y))]
lambda_with # lambda = 0.8282828
# powerTransform uses the maximum likelihood-like approach of Box and Cox (1964) to select a transformatiion
# of a univariate or multivariate response for normality, linearity and/or constant variance.
powerTransform(fit3_with, family="bcnPower")
powerTransform(fit3_without, family="bcnPower")
# lambda is approximately equal to 1, no Box-cox transformation
# X Variable transformation of temperature====
# try segmented linear regression/Piecewise linear regression====
fit_without_segmented <- segmented.lm(fit3_without, seg.Z = ~temperature, psi = c(12, 27), control=seg.control(display=FALSE))
summary(fit_without_segmented)
# Estimated Break-Point(s): 12.748, 27.200
fit_without_segmented.res <- residuals(fit_without_segmented)
fit_without_segmented.stdres <- stdres(fit_without_segmented)
fit_without_segmented.fittedvalues <- fitted.values(fit_without_segmented)
par(mfrow = c(2,2))
qqnorm(fit_without_segmented.stdres, main="")
qqline(fit_without_segmented.stdres)
plot(fit_without_segmented.res, xlab = "Index", ylab = "Residual")
plot(fit_without_segmented.fittedvalues, fit_without_segmented.res, xlab = "Fitted value", ylab = "Residual")
lines(lowess(fit_without_segmented.res ~ fit_without_segmented.fittedvalues), col = "red")
plot(fit_without_segmented.stdres, xlab = "Index", ylab = "Standardized residual", ylim = c(-3,3))
abline(h = -2.5, lty = 2)
abline(h = 2.5, lty = 2)
par(mfrow = c(1,3))
plot(I(SWF^2), fit_without_segmented$residuals, ylab = "Residual")
lines(lowess(fit_without_segmented$residuals ~ I(SWF^2)), col = "red")
# plot indicates the errors are heteroscedastic
plot(temperature, fit_without_segmented$residuals, ylab = "Residual", main = 'Piecewise')
lines(lowess(fit_without_segmented$residuals ~ temperature), col = "red")
# plot indicates the linear model is defective (curve segmentation > 20) and the errors are heteroscedastic
plot(management, fit_without_segmented$residuals, ylab = "Residual")
lines(lowess(fit_without_segmented$residuals ~ management), col = "red")
par(mfrow = c(1,1))
fit_with_segmented <- segmented.lm(fit3_with, seg.Z = ~temperature, psi = c(12, 27), control=seg.control(display=FALSE))
# Estimated Break-Point(s): 12.621, 27.200
summary(fit_with_segmented)
fit_with_segmented.res <- residuals(fit_with_segmented)
fit_with_segmented.stdres <- stdres(fit_with_segmented)
fit_with_segmented.fittedvalues <- fitted.values(fit_with_segmented)
par(mfrow = c(2,2))
qqnorm(fit_with_segmented.stdres, main="")
qqline(fit_with_segmented.stdres)
plot(fit_with_segmented.res, xlab = "Index", ylab = "Residual")
plot(fit_with_segmented.fittedvalues, fit_with_segmented.res, xlab = "Fitted value", ylab = "Residual")
lines(lowess(fit_with_segmented.res ~ fit_with_segmented.fittedvalues), col = "red")
plot(fit_with_segmented.stdres, xlab = "Index", ylab = "Standardized residual", ylim = c(-3,3))
abline(h = -2.5, lty = 2)
abline(h = 2.5, lty = 2)
par(mfrow = c(2,2))
plot(I(SWF^2), fit_with_segmented$residuals, ylab = "Residual")
lines(lowess(fit_with_segmented$residuals ~ I(SWF^2)), col = "red")
# plot indicates the errors are heteroscedastic
plot(temperature, fit_with_segmented$residuals, ylab = "Residual")
lines(lowess(fit_with_segmented$residuals ~ temperature), col = "red")
# plot indicates the linear model is defective (curve segmentation > 20) and the errors are heteroscedastic
plot(management, fit_with_segmented$residuals, ylab = "Residual")
lines(lowess(fit_with_segmented$residuals ~ management), col = "red")
plot(temperature*management, fit_with_segmented$residuals, ylab = "Residual")
lines(lowess(fit_with_segmented$residuals ~ temperature*management), col = "red")
par(mfrow = c(1,1))
# link function====
boxTidwell(SWI ~ I(SWF^2) + temperature + abs(management+0.00000001),
other.x = ~ size, data = data.training)
# lambda = 0.5, but not significant with P = 0.2012
plot(SWI ~ temperature, data = data.training)
lines(lowess(SWI ~ temperature))
plot(SWI ~ sqrt(temperature), data = data.training)
lines(lowess(SWI ~ sqrt(temperature)))
plot(SWI ~ logit(temperature), data = data.training)
lines(lowess(SWI ~ logit(temperature)))
plot(SWI ~ I(temperature^2), data = data.training)
lines(lowess(SWI ~ I(temperature^2)))
plot(SWI ~ log(temperature), data = data.training)
lines(lowess(SWI ~ log(temperature)))
# try the logarithms transformation, logit transformation, square-root transformation
# and even the quadratic term, in order to spread out the tails of the distribution.
f1 <- lm(SWI ~ I(SWF^2)+I(temperature^2)+management, data = data.training)
f11 <- lm(SWI ~ I(SWF^2)+I(temperature^2)*management, data = data.training)
f2<- lm(SWI ~ I(SWF^2)+logit(temperature)+management, data = data.training)
f22<- lm(SWI ~ I(SWF^2)+logit(temperature)*management, data = data.training)
f3 <- lm(SWI ~ I(SWF^2)+log(temperature)+management, data = data.training)
f33 <- lm(SWI ~ I(SWF^2)+log(temperature)*management, data = data.training)
fit4_without <- lm(SWI ~ I(SWF^2)+sqrt(temperature)+management, data = data.training)
fit4_with <- lm(SWI ~ I(SWF^2)+sqrt(temperature)*management, data = data.training)
# Model 5: Weighted least squares model=====
fit4_without <- lm(formula = SWI ~ I(SWF^2) + temperature + management, data = data.training)
w_without <- 1/lm(abs(stdres(fit4_without)) ~ I(SWF^2)+ temperature +management, data = data.training)$fitted.values^2
fit5_nontrafo <- lm(SWI ~ I(SWF^2)+ temperature +management, weight = w_without, data = data.training)
fit4_without_trafo <- lm(formula = SWI ~ I(SWF^2) + sqrt(temperature) + management, data = data.training)
w_trafo <- 1/lm(abs(stdres(fit4_without_trafo)) ~ I(SWF^2)+ sqrt(temperature) +management, data = data.training)$fitted.values^2
fit5_trafo <- lm(SWI ~ I(SWF^2)+ sqrt(temperature) +management, weight = w_trafo, data = data.training)
fit4_with <- lm(formula = SWI ~ I(SWF^2) + temperature * management, data = data.training)
w_with <- 1/lm(abs(stdres(fit4_with)) ~ I(SWF^2)+temperature*management, data = data.training)$fitted.values^2
fit5_with_nontrafo <- lm(SWI ~ I(SWF^2)+temperature*management, weight = w_with, data = data.training)
fit4_with_trafo <- lm(formula = SWI ~ I(SWF^2) + sqrt(temperature) * management, data = data.training)
w_with_trafo <- 1/lm(abs(stdres(fit4_with_trafo)) ~ I(SWF^2)+sqrt(temperature) *management, data = data.training)$fitted.values^2
fit5_with_trafo <- lm(SWI ~ I(SWF^2)+sqrt(temperature) *management, weight = w_with_trafo, data = data.training)
# Model 6: Boxcox transformation of Model 5====
out_without <- boxcox(fit5_nontrafo, plotit = TRUE)
lambda_without <- out_without$x[which(out_without$y == max(out_without$y))]
lambda_without # sqrt(y)
out_without <- boxcox(fit5_trafo, plotit = TRUE)
lambda_without <- out_without$x[which(out_without$y == max(out_without$y))]
lambda_without # sqrt(y)
fit6_nontrafo <- lm(SWI^0.5 ~ I(SWF^2)+temperature+management,
weight = w_without, data = data.training)
fit6_trafo <- lm(SWI^0.5 ~ I(SWF^2)+sqrt(temperature)+management,
weight = w_trafo, data = data.training)
# Check model assumptions
# Leave-one-out methods: PRESS
# Models with small PRESSp values (or PRESSp/n) are considered good candidate models
PRESS1 <- sum((residuals(fit5_nontrafo) / (1 - lm.influence(fit5_nontrafo)$hat))^2)
PRESS2 <- sum((residuals(fit5_trafo) / (1 - lm.influence(fit5_trafo)$hat))^2)
PRESS3 <- sum((residuals(fit6_nontrafo) / (1 - lm.influence(fit6_nontrafo)$hat))^2)
PRESS4 <- sum((residuals(fit6_trafo) / (1 - lm.influence(fit6_trafo)$hat))^2)
PRESS5 <- sum((residuals(fit5_with_nontrafo) / (1 - lm.influence(fit5_with_nontrafo)$hat))^2)
PRESS6 <- sum((residuals(fit5_with_trafo) / (1 - lm.influence(fit5_with_trafo)$hat))^2)
PRESS <- c(PRESS1, PRESS2,PRESS3, PRESS4,PRESS5, PRESS6)
names(PRESS) <- c("fit5_nontrafo", 'fit5_trafo','fit6_nontrafo',"fit6_trafo",'fit5_with_nontrafo','fit5_with_trafo')
sort(PRESS)
# MSE
MSE1 <- summary(fit5_nontrafo)$sigma^2
MSE2 <- summary(fit5_trafo)$sigma^2
MSE3 <- summary(fit6_nontrafo)$sigma^2
MSE4 <- summary(fit6_trafo)$sigma^2
MSE5 <- summary(fit5_with_nontrafo)$sigma^2
MSE6 <- summary(fit5_with_trafo)$sigma^2
MSE <- c(MSE1, MSE2, MSE3,MSE4, MSE5, MSE6)
names(MSE) <- c("fit5_nontrafo", 'fit5_trafo','fit6_nontrafo',"fit6_trafo",'fit5_with_nontrafo','fit5_with_trafo')
sort(MSE)
detach(data.training)
# Q4====
# Fit both models to the validation data. Investigate and compare their performance.
# model 5 fit5_nontrafo; model 6 fit6_nontrafo.
attach(data.test)
# Fit both models to the validation data. Investigate and compare their performance.
# model 6 fit6_trafo; model 6 fit6_nontrafo.
attach(data.test)
# Model fit6_nontrafo: SWI^0.5 ~ I(SWF^2) + temperature+management, weights = w_without
model5_OLS <- lm(SWI ~ I(SWF^2) + temperature + management, data = data.test)
w5_without <- 1/lm(abs(stdres(model5_OLS)) ~ I(SWF^2)+ temperature +management, data = data.test)$fitted.values^2
model6_nontrafo.val <- lm(SWI^0.5 ~ I(SWF^2) + temperature + management, weights = w5_without, data = data.test)
summary(model6_nontrafo.val); summary(fit6_nontrafo)
summary(model6_nontrafo.val)$r.squared
# Model fit6_trafo: SWI^0.5 ~ I(SWF^2) + sqrt(temperature)+management, weights = w_without
model5_trafo <- lm(SWI ~ I(SWF^2) + sqrt(temperature) + management, data = data.test)
w5_trafo <- 1/lm(abs(stdres(model5_trafo)) ~ I(SWF^2)+ sqrt(temperature) +management, data = data.test)$fitted.values^2
model6_trafo.val <- lm(SWI^0.5 ~ I(SWF^2) + sqrt(temperature) + management, weights = w5_trafo, data = data.test)
summary(model6_trafo.val); summary(fit6_trafo)
summary(model6_trafo.val)$r.squared
# Compare estimated coefficients and standard errors
# Individual confidence intervals
alpha <- 0.05
confint(fit6_trafo, level = 1 - alpha)
confint(model6_trafo.val, level = 1 - alpha)
confint(model6_nontrafo.val, level = 1 - alpha)
confint(fit6_nontrafo, level = 1 - alpha)
# Simultaneous confidence intervals with Bonferroni correction
alpha <- 0.05
confint(fit6_trafo, level = 1 - alpha/2)
confint(model6_trafo.val, level = 1 - alpha/2)
confint(model6_nontrafo.val, level = 1 - alpha/2)
confint(fit6_nontrafo, level = 1 - alpha/2)
# Prediction
# A prediction interval reflects the uncertainty of a single value,
# while a confidence interval reflects the uncertainty of the predicted mean.
pred_trafo <- predict(fit6_trafo, newdata = data.test, interval = "prediction")
pred_nontrafo <- predict(fit6_nontrafo, newdata = data.test, interval = "prediction")
predict(fit6_trafo, newdata = data.test, interval = "confidence")
predict(fit6_nontrafo, newdata = data.test, interval = "confidence")
# MSEP
MSEP1 <- mean((predict(model6_trafo.val, newdata = data.test) - SWI)^2)
MSEP2 <- mean((predict(model6_nontrafo.val, newdata = data.test) - SWI)^2)
MSEP <- c(MSEP1, MSEP2)
names(MSEP) <- c("model6_trafo.val", "model6_nontrafo.val")
sort(MSEP)
# Leave-one-out methods: PRESS
# Models with small PRESSp values (or PRESSp/n) are considered good candidate models
PRESS1 <- sum((residuals(model6_trafo.val) / (1 - lm.influence(model6_trafo.val)$hat))^2)
PRESS2 <- sum((residuals(model6_nontrafo.val) / (1 - lm.influence(model6_nontrafo.val)$hat))^2)
PRESS <- c(PRESS1, PRESS2)
names(PRESS) <- c('model6_trafo.val',"model6_nontrafo.val")
sort(PRESS)
# MSE
MSE1 <- summary(model6_trafo.val)$sigma^2
MSE2 <- summary(model6_nontrafo.val)$sigma^2
MSE <- c(MSE1, MSE2)
names(MSE) <- c("model6_trafo.val", 'model6_nontrafo.val')
sort(MSE)
detach(data.test)
# Q5====
# fit your ultimate model (fit6_nontrafo) of preference to the full dataset.
attach(data.full)
# Model fit6_nontrafo: SWI^0.5 ~ I(SWF^2) + temperature+management, weights = w_without
fit_full <- lm(SWI ~ I(SWF^2) + temperature + management, data = data.full)
w_full <- 1/lm(abs(stdres(fit_full)) ~ I(SWF^2)+ temperature +management, data = data.full)$fitted.values^2
model6.full <- lm(SWI^0.5 ~ I(SWF^2) + temperature + management, weights = w_full, data = data.full)
summary(model6.full)
summary(model6.full)$r.squared
# Individual confidence intervals
alpha <- 0.05
confint(fit6_nontrafo, level = 1 - alpha)
confint(model6_nontrafo.val, level = 1 - alpha)
confint(model6.full, level = 1 - alpha)
# Simultaneous confidence intervals with Bonferroni correction
alpha <- 0.05
confint(fit6_nontrafo, level = 1 - alpha/2)
confint(model6_nontrafo.val, level = 1 - alpha/2)
confint(model6.full, level = 1 - alpha/2)
# Check model assumptions
model6.full.res <- residuals(model6.full)
model6.full.stdres <- stdres(model6.full)
model6.full.fittedvalues <- fitted.values(model6.full)
par(mfrow = c(2,2))
qqnorm(model6.full.stdres, main="")
qqline(model6.full.stdres)
plot(model6.full.res, xlab = "Index", ylab = "Residual")
plot(model6.full.fittedvalues, model6.full.res, xlab = "Fitted value", ylab = "Residual")
lines(lowess(model6.full.res ~ model6.full.fittedvalues), col = "red")
plot(model6.full.stdres, xlab = "Index", ylab = "Standardized residual", ylim = c(-3,3))
abline(h = -2.5, lty = 2)
abline(h = 2.5, lty = 2)
# UL: small deviations from normal distributed residuals
# UR: pattern indicates no heteroscedastic errors
# BL: linearity assumption is satisfied
# BR: outliers
par(mfrow = c(1,3))
plot(I(SWF^2), model6.full$residuals, ylab = "Residual")
lines(lowess(model6.full$residuals ~ I(SWF^2)), col = "red")
plot(temperature, model6.full$residuals, ylab = "Residual")
lines(lowess(model6.full$residuals ~ temperature), col = "red")
plot(management, model6.full$residuals, ylab = "Residual")
lines(lowess(model6.full$residuals ~ management), col = "red")
par(mfrow = c(1,1))
# Checking the normality of the residuals
plot(model6.full, which = 2)
# Shapiro-Wilk test and Kolmogorov-Smirnov test Testing Normality
shapiro.test(residuals(model6.full))
LillieTest(residuals(model6.full))
sf.test(residuals(model6.full))
check_normality(model6.full)# OK: Residuals appear as normally distributed
# Checking the linearity of the relationship
plot(model6.full, which = 1)
# plot the relationship between the fitted values and the observed values for the outcome variable
plot(model6.full.fittedvalues, SWI, xlab = "Fitted Values", ylab = "Observed Values")
lines(lowess(SWI ~ model6.full.fittedvalues), col = 'red')
# for each individual predictor
par(mfrow = c(1,3))
# partial-residual plots, cannot contain interactions
termplot(model6.full, partial.resid = TRUE)
crPlots(model6.full)
par(mfrow = c(1,1))
# Checking the homogeneity of variance
# https://stats.stackexchange.com/questions/193061/what-is-the-difference-between-these-two-breusch-pagan-tests
# In short, the studentized BP test is more robust, usually go with bptest,
# with studentize = TRUE (default) and varformula = ~ fitted.values(my.lm) as options,
# for an initial approach for homoskedasticity.
plot(model6.full, which = 3)
ncvTest(model6.full)
bptest(model6.full, ~ SWF + temperature + management)
bptest(model6.full, ~ fitted.values(model6.full)) # accepted
coeftest(model6.full, vcov= hccm)
summary(model6.full)
# if homogeneity of variance is violated, sandwich estimators is applied.
# Because the homogeneity of variance assumption wasn’t violated,
# these t tests are pretty much identical to the former ones in the summary(model6.full)
# outliers
plot(model6.full, which = 4)
plot(model6.full, which = 5)
influencePlot(model6.full, main="influence Plot", sub="cook's distance")
par(mfrow = c(1,3))
avPlot(model6.full, variable = 'I(SWF^2)')
avPlot(model6.full, variable = 'management')
avPlot(model6.full, variable = 'temperature')
par(mfrow = c(1,1))
# Standardized residuals
model6.full.stdres <- stdres(model6.full)
plot(model6.full.stdres, ylim = c(-4,4), ylab = "Standardized residuals")
abline(h = c(-2.5,2.5), col = "red")
label_x <- seq(1,400)
text(subset(model6.full.stdres,model6.full.stdres >2.5), labels=row.names(subset(data.full, model6.full.stdres > 2.5)),
x = as.character(label_x[model6.full.stdres >2.5]), cex = 0.7, pos = 1)
text(subset(model6.full.stdres,model6.full.stdres < -2.5), labels=row.names(subset(data.full, model6.full.stdres < -2.5)),
x = as.character(label_x[model6.full.stdres < -2.5]), cex = 0.7, pos = 1)
which(model6.full.stdres > 2.5 | model6.full.stdres < -2.5)
# Studentized residuals
model6.full.studres <- studres(model6.full)
plot(model6.full.studres, ylim = c(-4,4), ylab = "Studentized residuals")
abline(h = c(-2.5,2.5), col = "red")
text(subset(model6.full.studres,model6.full.studres >2.5), labels=row.names(subset(data.full, model6.full.studres > 2.5)),
x = as.character(label_x[model6.full.studres >2.5]), cex = 0.7, pos = 1)
text(subset(model6.full.studres,model6.full.studres < -2.5), labels=row.names(subset(data.full, model6.full.studres < -2.5)),
x = as.character(label_x[model6.full.studres < -2.5]), cex = 0.7, pos = 1)
which(model6.full.studres > 2.5 | model6.full.studres < -2.5)
# Bonferroni Outlier Test
outlierTest(model6.full) # OBS 1 as a outlier with Bonferroni p < 0.05
detach(data.full)
# Q6====
# investigating possible association between duration (outcome) and temperature (predictor).
data.training <- data.full[-d.test, ]
attach(data.training)
# (a) Fit non-parametric models with k=1 and k=2, ====
# for spans 0.25, 0.5, and 0.75 and choose the best-fitting model
# Local linear regression
plot(temperature, duration, main = "Local linear regression")
s <- c(0.25, 0.5, 0.75)
colors <- c("red", "green", "blue")
for (i in 1:length(s)) lines(temperature, predict(loess(duration ~ temperature, span = s[i], degree = 1),
data = data.training), col = colors[i])
legend(5, 40, c("span = 0.25", "span = 0.5", "span = 0.75"), lty = 1, col = colors)
# Local quadratic regression
plot(temperature, duration, main = "Local quadratic regression")
for (i in 1:length(s)) lines(temperature, predict(loess(duration ~ temperature, span = s[i], degree = 2),
data = data.training), col = colors[i])
legend(5, 40, c("span = 0.25", "span = 0.5", "span = 0.75"), lty = 1, col = colors)
# Check model assumptions
# ========== k=1, span=0.25
fit.loess1<-loess(duration~temperature,degree = 1,span=0.25)
fit.loess1
par(mfrow=c(2,2))
plot(duration~temperature)
lines(loess.smooth(temperature,duration,span=0.25,degree=1),col='red')
plot(residuals(fit.loess1)~temperature)
lines(loess.smooth(temperature,residuals(fit.loess1),span=0.25,degree=1),col='red')
abline(h=0,lty=2)
plot(fitted(fit.loess1),sqrt(abs(residuals(fit.loess1))))
lines(loess.smooth(fitted(fit.loess1),sqrt(abs(residuals(fit.loess1))),span=0.25,degree=1),col='red')
qqnorm(residuals(fit.loess1))
qqline(residuals(fit.loess1),col='red')
# ========== k=1, span=0.5
fit.loess2<-loess(duration~temperature,degree = 1,span=0.5)
fit.loess2
par(mfrow=c(2,2))
plot(duration~temperature)
lines(loess.smooth(temperature,duration,span=0.5,degree=1),col='red')
plot(residuals(fit.loess2)~temperature)
lines(loess.smooth(temperature,residuals(fit.loess2),span=0.5,degree=1),col='red')
abline(h=0,lty=2)
plot(fitted(fit.loess2),sqrt(abs(residuals(fit.loess2))))
lines(loess.smooth(fitted(fit.loess2),sqrt(abs(residuals(fit.loess2))),span=0.5,degree=1),col='red')
qqnorm(residuals(fit.loess2))
qqline(residuals(fit.loess2),col='red')
# ========== k=1, span=0.75
fit.loess3<-loess(duration~temperature,degree = 1,span=0.75)
fit.loess3
par(mfrow=c(2,2))
plot(duration~temperature)
lines(loess.smooth(temperature,duration,span=0.75,degree=1),col='red')
plot(residuals(fit.loess3)~temperature)
lines(loess.smooth(temperature,residuals(fit.loess3),span=0.75,degree=1),col='red')
abline(h=0,lty=2)
plot(fitted(fit.loess3),sqrt(abs(residuals(fit.loess3))))
lines(loess.smooth(fitted(fit.loess3),sqrt(abs(residuals(fit.loess3))),span=0.75,degree=1),col='red')
qqnorm(residuals(fit.loess3))
qqline(residuals(fit.loess3),col='red')
# ========== k=2, span=0.25
fit.loess4<-loess(duration~temperature,degree = 2,span=0.25)
fit.loess4
par(mfrow=c(2,2))
plot(duration~temperature)
lines(loess.smooth(temperature,duration,span=0.25,degree=2),col='red')
plot(residuals(fit.loess4)~temperature)
lines(loess.smooth(temperature,residuals(fit.loess4),span=0.25,degree=2),col='red')
abline(h=0,lty=2)
plot(fitted(fit.loess4),sqrt(abs(residuals(fit.loess4))))
lines(loess.smooth(fitted(fit.loess4),sqrt(abs(residuals(fit.loess4))),span=0.25,degree=2),col='red')
qqnorm(residuals(fit.loess4))
qqline(residuals(fit.loess4),col='red')
# ========== k=2, span=0.5
fit.loess5<-loess(duration~temperature,degree = 2,span=0.5)
fit.loess5
par(mfrow=c(2,2))
plot(duration~temperature)
lines(loess.smooth(temperature,duration,span=0.5,degree=2),col='red')
plot(residuals(fit.loess5)~temperature)
lines(loess.smooth(temperature,residuals(fit.loess5),span=0.5,degree=2),col='red')
abline(h=0,lty=2)
plot(fitted(fit.loess5),sqrt(abs(residuals(fit.loess5))))
lines(loess.smooth(fitted(fit.loess5),sqrt(abs(residuals(fit.loess5))),span=0.5,degree=2),col='red')
qqnorm(residuals(fit.loess5))
qqline(residuals(fit.loess5),col='red')
# ========== k=2, span=0.75
fit.loess6<-loess(duration~temperature,degree = 2,span=0.75)
fit.loess6
par(mfrow=c(2,2))
plot(duration~temperature)
lines(loess.smooth(temperature,duration,span=0.75,degree=2),col='red')
plot(residuals(fit.loess6)~temperature)
lines(loess.smooth(temperature,residuals(fit.loess6),span=0.75,degree=2),col='red')
abline(h=0,lty=2)
plot(fitted(fit.loess6),sqrt(abs(residuals(fit.loess6))))
lines(loess.smooth(fitted(fit.loess6),sqrt(abs(residuals(fit.loess6))),span=0.75,degree=2),col='red')
qqnorm(residuals(fit.loess6))
qqline(residuals(fit.loess6),col='red')
# k =2, span = 0.75 is the best-fitting model
# (b) Fit a quadratic linear model====
par(mfrow = c(1,1))
plot(temperature, duration, main = "Polynomial regression")
# Linear model
fit1 <- lm(duration ~ temperature, data = data.training)
abline(fit1, col = "red")
# Quadratic model
fit2 <- lm(duration ~ temperature + I(temperature^2), data = data.training)
fit2.coef <- fit2$coefficients
curve(fit2.coef[1] + fit2.coef[2]*x + fit2.coef[3]*x^2, 0, 60, add = TRUE, col = "green")
# Cubic model
fit3 <- lm(duration ~ temperature + I(temperature^2) + I(temperature^3), data = data.training)
fit3.coef <- fit3$coefficients
curve(fit3.coef[1] + fit3.coef[2]*x + fit3.coef[3]*x^2 + fit3.coef[4]*x^3, 0, 60, add = TRUE, col = "blue")
# Add legend
legend(5, 40, c("linear", "quadratic", "cubic"), lty = 1, col = c("red", "green", "blue"))
# (c) a plot of the data, the best nonparametric fit, and the linear fit====
par(mfrow = c(1,1))
plot(temperature, duration, main = "Quadratic vs non-parametric regression")
# Local quadratic regression: k = 2, span = 0.75
fit.loess <- loess(duration ~ temperature, span = 0.75, degree = 2)
lines(temperature, predict(fit.loess, data = data.training), col = 'red')
# Linear model
fit1 <- lm(duration ~ temperature, data = data.training)
abline(fit1, col = "black")
# Quadratic model
fit2 <- lm(duration ~ temperature + I(temperature^2), data = data.training)
fit2.coef <- fit2$coefficients
curve(fit2.coef[1] + fit2.coef[2]*x + fit2.coef[3]*x^2, 0, 60, add = TRUE, col = "green")
# Cubic model
fit3 <- lm(duration ~ temperature + I(temperature^2) + I(temperature^3), data = data.training)
fit3.coef <- fit3$coefficients
curve(fit3.coef[1] + fit3.coef[2]*x + fit3.coef[3]*x^2 + fit3.coef[4]*x^3, 0, 60, add = TRUE, col = "blue")
legend(4, 43, c("Non-parametric fit", "linear", "quadratic", "cubic"), lty = 1, col = c("red", "black", "green", "blue"))
# According to the visual interpretation, Non-parametric model fits the data best.
# (d) Test whether the non-parametric model of your choice====
# fits the data better than the quadratic model
summary(fit.loess)
summary(fit2) # 69%
# Compare quadratic linear model with non-parametric model
traceS <- fit.loess$trace.hat
SSE0 <- sum(residuals(fit2)^2)
SSE1 <- sum(residuals(fit.loess)^2)
n <- dim(data.training)[1]
Fvalue <- ((SSE0 - SSE1) / (traceS - 3)) / (SSE1 / (n - traceS))
Fvalue
Fcrit <- qf(0.95, traceS - 3, n - traceS)
Fcrit
1 - pf(Fvalue, traceS - 3, n - traceS)
# the difference between the non-parametric model and the quadratic model is s
# ignificant since P-value is zero
# Prediction
attach(data.test)
t.pred <- predict(fit.loess, data.test, se = TRUE)
t.upper <- t.pred$fit + qnorm(0.975) * t.pred$se.fit
t.lower <- t.pred$fit - qnorm(0.975) * t.pred$se.fit
loess <- data.frame("pred" = t.pred$fit, "lower" = t.lower, "upper" = t.upper)
plot(data.test$temperature, data.test$duration)
lines(lowess(data.test$temperature,t.pred$fit))
lines(lowess(data.test$temperature,t.upper))
lines(lowess(data.test$temperature,t.lower))
t.pred <- predict(fit2, data.test, se = TRUE)
t.upper <- t.pred$fit + qnorm(0.975) * t.pred$se.fit
t.lower <- t.pred$fit - qnorm(0.975) * t.pred$se.fit
quadratic <- data.frame("pred" = t.pred$fit, "lower" = t.lower, "upper" = t.upper)
plot(data.test$temperature, data.test$duration)
lines(lowess(data.test$temperature,t.pred$fit))
lines(lowess(data.test$temperature,t.upper))
lines(lowess(data.test$temperature,t.lower))
detach(data.test)
# Assessing goodness of fit
# R-squared
rsq <- function (x, y) cor(x, y) ^ 2
rsq1 <- rsq(loess[,1], duration) # r.squared = 0.8168894
rsq2 <- rsq(quadratic[,1], duration) # r.squared = 0.6893309
RSQ <- c(rsq1, rsq2)
names(RSQ) <- c("Non-parametric", "Linear quadratic")
sort(RSQ)
# Residual sum of squares
RSS1 <- sum(residuals(fit.loess)^2)
RSS2 <- sum(residuals(fit2)^2)
RSS <- c(RSS1, RSS2)
names(RSS) <- c("Non-parametric", "Linear quadratic")
sort(RSS)
# Pearson estimated residual variance
sigma.squared1 <- RSS1 / (n - traceS)
sigma.squared2 <- RSS2 / fit2$df.residual
sigma.squared <- c(sigma.squared1, sigma.squared2)
names(sigma.squared) <- c("Non-parametric", "Linear quadratic")
sort(sigma.squared)
# Mean squared error
MSE1 <- sum(residuals(fit.loess)^2) / (n - traceS)
MSE2 <- sum(residuals(fit2)^2) / (fit2$df.residual)
MSE <- c(MSE1, MSE2)
names(MSE) <- c("Non-parametric", "Linear quadratic")
sort(MSE)
# Root mean squared error
RMSE1 <- sqrt(MSE1)
RMSE2 <- sqrt(MSE2)
RMSE <- c(RMSE1, RMSE2)
names(RMSE) <- c("Non-parametric", "Linear quadratic")
sort(RMSE)
# MSEP
MSEP1 <- mean((loess[,1] - duration)^2)
MSEP2 <- mean((quadratic[,1] - duration)^2)
MSEP <- c(MSEP1, MSEP2)
names(MSEP) <- c("Non-parametric", "Linear quadratic")
sort(MSEP)
compare.results <- data.frame(rbind(RSQ,RSS,sigma.squared, MSE, RMSE, MSEP),
row.names = c('RSQ','RSS','sigma.squared', 'MSE', 'RMSE', 'MSEP'))
names(compare.results) <- c("Non-parametric", "Linear quadratic")
compare.results
# Non-parametric model fits the data better than the quadratic model
caret::postResample(loess[,1], duration)
caret::postResample(quadratic[,1], duration)
detach(data.training) | /project.R | no_license | xinzheng007/GLM | R | false | false | 51,644 | r | # ZHENG XIN, r0766879, KU LEUVEN
# R version: R version 3.5.0 (2018-04-23) -- "Joy in Playing"
# R packages and Data preparation ====
library(lmtest)
library(MASS)
library(gvlma)
library(rstatix)
library(psych)
library(DescTools)
library(performance)
library(car)
library(robustbase)
library(caret)
library(TeachingDemos)
library(segmented)
library(nortest)
# Data preparation
rm(list = ls())
data.full <- read.table('invertebrate.txt', header = T)
set.seed(0766879)
d.test <- sample(1:dim(data.full)[1], 200 )
data.test <- data.full[d.test, ]
data.training <- data.full[-d.test, ]
# Q1====
# Perform an exploratory analysis of the variables
# (compute descriptive statistics and make histograms, boxplots, scatter plots, . . . )
attach(data.training)
# Descriptive statistics
str(data.training)
summary(data.training)
# Correlation Matrix with P-values
cor_mat(data.training)
cor_pmat(data.training)
cor <- cor(data.training[, !names(data.training) == 'SWI']) # correlation between predictor variables
cor # High correlation bwtween duration and temperature (Question 6)
dim(data.training)
# Exploratory analysis
histNorm <- function(x, densCol = "darkblue", xlab = ''){
m <- mean(x)
std <- sqrt(var(x))
h <- max(hist(x,plot=FALSE)$density)
d <- dnorm(x, mean=m, sd=std)
maxY <- max(h,d)
hist(x, prob=TRUE,
xlab = xlab, ylab="Frequency", ylim=c(0, maxY),
main="Histogram")
curve(dnorm(x, mean=m, sd=std),
col=densCol, lwd=2, add=TRUE)
}
par(mfrow = c(3,2))
histNorm(data.training$SWI, xlab = "SWI")
histNorm(data.training$SWF, xlab = "SWF")
histNorm(data.training$temperature, xlab = "temperature")
histNorm(data.training$size, xlab = "size")
histNorm(data.training$management, xlab = "management")
# management as a Categorical predictor, not normally distributed
histNorm(data.training$duration, xlab = "duration")
# boxplots
par(mfrow = c(3,2))
boxplot(SWI, main = "Boxplot of SWI") # two outliers, both smaller than 4.5.
boxplot(SWF, main = "Boxplot of SWF") # three outliers
boxplot(temperature, main = "Boxplot of temperature") # three outliers
boxplot(size, main = "Boxplot of size")
boxplot(management, main = "Boxplot of management")
boxplot(duration, main = "Boxplot of duration") # one outlier
lab_y <- seq(1,200)
source("https://raw.githubusercontent.com/talgalili/R-code-snippets/master/boxplot.with.outlier.label.r")
# Load the function to label all the outliers in a boxplot
par(mfrow = c(2,3))
# OBS 51, 286
boxplot.with.outlier.label(SWI, row.names(data.training), main = "Boxplot of SWI")
# OBS 51, 139, 351
boxplot.with.outlier.label(SWF, row.names(data.training), main = "Boxplot of SWF")
# OBS 1, 3, 397
boxplot.with.outlier.label(temperature, row.names(data.training), main = "Boxplot of temperature")
boxplot.with.outlier.label(size, row.names(data.training), main = "Boxplot of size")
boxplot.with.outlier.label(management, row.names(data.training), main = "Boxplot of management")
# OBS 7
boxplot.with.outlier.label(duration, row.names(data.training), main = "Boxplot of duration")
# ~ 1, 3, 7, 51, 139, 286, 351, 397
# Scatter plot
par(mfrow = c(1,1))
pairs(data.training)
pairs(data.training, panel = function(x,y) {points(x,y); lines(lowess(x,y), col = "red")})
pairs.panels(data.training) # Robust fitting is done using lowess regression.
pairs.panels(data.training, lm=TRUE) # lm=TRUE, linear regression fits are shown for both y by x and x by y.
# Q2====
# fit a linear first-order regression model with SWI as outcome
# and SWF, temperature, size and management (not duration!) as predictors.
data.training <- data.training[, !names(data.training) == 'duration']
n_test <- dim(data.training)[1]
n_test
p <- dim(data.training)[2]
p
# linear first-order regression model
fit <- lm(SWI ~ SWF+temperature+size+management, data = data.training)
summary(fit) # test whether a particular regression coefficient is significantly different from zero.
# ANOVA, test whether the regression model as a whole is performing significantly better than a null model
anova(fit)
# SWF, temperature, management are significant, size non-significant
# Individual confidence intervals
alpha <- 0.05
confint(fit, level = 1 - alpha)
# Simultaneous confidence intervals with Bonferroni correction
alpha <- 0.05
confint(fit, level = 1 - alpha / 2)
# (a) Check whether a first-order model adequately captures the variability in the outcome====
# Multiple R-squared: 0.5805, Adjusted R-squared: 0.5719
# R^2
summary(fit)
summary(fit)$r.squared
# 58% of the total variance in the outcome is explained by the first-order model
# (b) Check the Gauss-Markov conditions====
# Check model assumptions
fit.res <- residuals(fit)
fit.stdres <- stdres(fit)
fit.fittedvalues <- fitted.values(fit)
par(mfrow = c(2,2))
qqnorm(fit.stdres, main="")
qqline(fit.stdres)
plot(fit.res, xlab = "Index", ylab = "Residual")
plot(fit.fittedvalues, fit.res, xlab = "Fitted value", ylab = "Residual")
lines(lowess(fit.res ~ fit.fittedvalues), col = "red")
plot(fit.stdres, xlab = "Index", ylab = "Standardized residual", ylim = c(-3,3))
abline(h = -2.5, lty = 2)
abline(h = 2.5, lty = 2)
# UL: small deviations from normal distributed residuals
# UR: pattern indicates Homoscedasticity (no heteroscedastic errors)
# BL: curved band suggests linearity assumption is not satisfied
# BR: outliers
par(mfrow = c(2,2))
plot(SWF, fit$residuals, ylab = "Residual")
lines(lowess(fit$residuals ~ SWF), col = "red")
# plot indicates the linear model is defective (add quadratic terms)
plot(temperature, fit$residuals, ylab = "Residual")
lines(lowess(fit$residuals ~ temperature), col = "red")
# plot indicates the errors are heteroscedastic
plot(size, fit$residuals, ylab = "Residual")
lines(lowess(fit$residuals ~ size), col = "red")
plot(management, fit$residuals, ylab = "Residual")
lines(lowess(fit$residuals ~ management), col = "red")
par(mfrow = c(1,1))
# Gauss-Markov conditions tests
summary(gvlma.lm(fit))
# Checking the normality of the residuals
plot(fit, which = 2)
# Shapiro-Wilk test and Kolmogorov-Smirnov test Testing Normality
shapiro.test(residuals(fit))
LillieTest(residuals(fit))
check_normality(fit) # OK: Residuals appear as normally distributed
# Checking the linearity of the relationship
plot(fit, which = 1)
# plot the relationship between the fitted values and the observed values for the outcome variable
# A straight line suggests that there’s nothing grossly wrong
plot(fit.fittedvalues, SWI, xlab = "Fitted Values", ylab = "Observed Values")
lines(lowess(SWI ~ fit.fittedvalues), col = 'red')
# for each individual predictor
par(mfrow = c(2,2))
# partial-residual plots, cannot contain interactions
termplot(fit, partial.resid = TRUE)
crPlots(fit)
ceresPlots(fit) # less prone to leakage of nonlinearity among the predictors.
residualPlots(model = fit) # Adding SWF^2
# this function also reports the results of a bunch of curvature tests.
# For a predictor variable X, this test is equivalent to adding a new predictor
# to the model corresponding to X^2. If it comes up significant, it implies that
# there is some nonlinear relationship between the variable and the residuals.
par(mfrow = c(1,1))
# Checking the homogeneity of variance
plot(fit, which = 3)
ncvTest(fit)
bptest(fit, ~ SWF + temperature + size + management) # there’s no violation of heteroskedasticity
coeftest(fit, vcov= hccm)
# if homogeneity of variance is violated, sandwich estimators is applied.
# Because the homogeneity of variance assumption wasn’t violated,
# these t tests are pretty much identical to the former ones in the summary(fit)
# Checking independence, which we assume to be met
DurbinWatsonTest(fit, alternative="two.sided", data=data.training)
durbinWatsonTest(fit, alternative="two.sided", data=data.training)
# (c) Check whether there is (severe) multicollinearity====
# Correlation
corx <- cor
# small correlations between variables
# VIF: the largest VIF is larger than 10, or
# if the mean of the VIF values is considerably larger than 1.
VIF <- diag(solve(corx))
max(VIF)
mean(VIF)
# Eigenvalues: A condition number nj > 30 is an indication for multicollinearity.
corx.eig <- eigen(corx)$values
corx.eig
sqrt(max(corx.eig)/corx.eig)
# indicating no multicollinearity
# (d) Check whether there are influential outliers====
plot(fit, which = 4)
plot(fit, which = 5)
plot(fit, which = 6)
# This function creates a “bubble” plot of Studentized residuals versus hat values,
# with the areas of the circles representing the observations proportional to Cook's distance.
# Vertical reference lines are drawn at twice and three times the average hat value,
# horizontal reference lines at -2, 0, and 2 on the Studentized-residual scale.
influencePlot(fit, main="influence Plot", sub="cook's distance")
# added-variable partial-regression plots
# identify data points with high leverage and
# influential data points that might not have high leverage.
par(mfrow = c(2,2))
avPlot(fit, variable = 'SWF')
avPlot(fit, variable = 'management')
avPlot(fit, variable = 'temperature')
avPlot(fit, variable = 'size')
par(mfrow = c(1,1))
# Classical approaches to find the vertical outliers and the leverage points====
# Standardized residuals
par(mfrow = c(1,1))
fit.stdres <- stdres(fit)
plot(fit.stdres, ylim = c(-4,4), ylab = "Standardized residuals")
abline(h = c(-2.5,2.5), col = "red")
label_x <- seq(1,200)
text(subset(fit.stdres,fit.stdres >2.5), labels=row.names(subset(data.training, fit.stdres > 2.5)),
x = as.character(label_x[fit.stdres >2.5]), cex = 0.7, pos = 1)
text(subset(fit.stdres,fit.stdres < -2.5), labels=row.names(subset(data.training, fit.stdres < -2.5)),
x = as.character(label_x[fit.stdres < -2.5]), cex = 0.7, pos = 1)
which(fit.stdres > 2.5 | fit.stdres < -2.5)
# Studentized residuals
fit.studres <- studres(fit)
plot(fit.studres, ylim = c(-4,4), ylab = "Studentized residuals")
abline(h = c(-2.5,2.5), col = "red")
text(subset(fit.studres,fit.studres >2.5), labels=row.names(subset(data.training, fit.studres > 2.5)),
x = as.character(label_x[fit.studres >2.5]), cex = 0.7, pos = 1)
text(subset(fit.studres,fit.studres < -2.5), labels=row.names(subset(data.training, fit.studres < -2.5)),
x = as.character(label_x[fit.studres < -2.5]), cex = 0.7, pos = 1)
which(fit.studres > 2.5 | fit.studres < -2.5)
# Classical approaches to find the leverage points
# Diagonal elements of hat matrix
fit.influence <- influence(fit)
plot(fit.influence$hat, ylab = "Diagonal elements of hat matrix")
h = 2*p/n_test
abline(h = h, col = "red")
text(subset(fit.influence$hat,fit.influence$hat > h),
labels=row.names(subset(data.training, fit.influence$hat > h)),
x = as.character(label_x[fit.influence$hat > h]), cex = 0.7, pos = 1)
which(fit.influence$hat > h)
# measures of influence
# DFFITS
fit.dffits <- dffits(fit)
plot(fit.dffits, ylab = "DFFITS")
h = 2*sqrt(p/n_test)
abline(h = h, col = "red")
text(subset(fit.dffits,fit.dffits > h), labels=row.names(subset(data.training, fit.dffits > h)),
x = as.character(label_x[fit.dffits > h]), cex = 0.7, pos = 1)
which(fit.dffits > h)
# Cook's distance
fit.Cd <- cooks.distance(fit)
plot(fit.Cd, ylab = "Cook's distance")
abline(h = 1, col = "red")
which(fit.Cd > 1)
# DFBETAS
fit.dfbetas <- dfbetas(fit)
plot(fit.dfbetas, ylab = "DFBETAS")
h = 2/sqrt(n_test)
abline(h = h, col = "red")
x = fit.dfbetas[,2] > h
text(subset(fit.dfbetas[,2], x), labels=row.names(subset(data.training, x)),
x = data.frame(fit.dfbetas)[,1][x], cex = 0.7, pos = 4)
which(fit.dfbetas[,2] > h)
# Outliers are not noticed by Cook's distance, but DFFITS and DFBETAS are more powerful.
# Bonferroni Outlier Test
outlierTest(fit) # No outliers with Bonferroni p < 0.05
# robust diagnostic plot====
# Reweighted LTS (maximal (50%) breakdown value)
par(mfrow = c(1,1))
RLTS <- ltsReg(SWI ~ SWF+temperature+size+management, data = data.training)
summary(RLTS)
summary(RLTS)$r.squared # 63%
# Note: It is strongly recommend using lmrob() instead of ltsReg!
lmrob <- lmrob(SWI ~ SWF+temperature+size+management, data = data.training)
summary(lmrob)
summary(lmrob)$r.squared # 60%
# rqq: Normal Q-Q plot of the standardized residuals;
# rindex: plot of the standardized residuals versus their index;
# rfit: plot of the standardized residuals versus fitted values;
# rdiag: regression diagnostic plot.
plot(RLTS, which = 'rqq')
plot(RLTS, which = 'rindex')
plot(RLTS, which = 'rfit') # No. 113, 151, 190, 198 --> OBS 218, 286, 371, 395
plot(RLTS, which = 'rdiag')
# Diagnostic plot
RLTS.stdres <- RLTS$residuals/RLTS$scale
plot(RLTS$RD, RLTS.stdres, ylim = c(-5,5),
xlab = "Robust distance", ylab = "Standardized 50% LTS residuals",
main = 'Regression Diagnostic Plot')
v = sqrt(qchisq(0.975, p - 1))
abline(v = sqrt(qchisq(0.975, p - 1)), col = "red")
abline(h = c(-2.5,2.5), col = "red")
text(subset(RLTS.stdres,RLTS.stdres >2.5), labels=row.names(subset(data.training, RLTS.stdres > 2.5)),
x = as.character(RLTS$RD[RLTS.stdres >2.5]), cex = 0.7, pos = 2)
text(subset(RLTS.stdres,RLTS.stdres < -2.5), labels=row.names(subset(data.training, RLTS.stdres < -2.5)),
x = as.character(RLTS$RD[RLTS.stdres < -2.5]), cex = 0.7, pos = 2)
which(RLTS.stdres > 2.5 | RLTS.stdres < -2.5) # vertical outliers: OBS 218 286 371 395
text(subset(RLTS.stdres, RLTS$RD > v), labels=row.names(subset(data.training, RLTS$RD > v)),
x = as.character(RLTS$RD[RLTS$RD > v]), cex = 0.7, pos = 1)
which(RLTS$RD > v) # good leverage points: OBS 1, 3, 27
# RLTS (30% breakdown value)
RLTS2 <- ltsReg(SWI ~ I(SWF^2)+temperature+management, data = data.training, alpha = 0.7)
summary(RLTS2)
# Detection of outliers
plot(RLTS2, which = 'rqq')
plot(RLTS2, which = 'rindex')
plot(RLTS2, which = 'rfit')
plot(RLTS2, which = 'rdiag')
RLTS2.stdres <- RLTS2$residuals/RLTS2$scale
plot(RLTS2$RD, RLTS2.stdres, ylim = c(-5,5),
xlab = "Robust distance", ylab = "Standardized 30% LTS residuals",
main = 'Regression Diagnostic Plot')
v = sqrt(qchisq(0.975, p - 1))
abline(v = sqrt(qchisq(0.975, p - 1)), col = "red")
abline(h = c(-2.5,2.5), col = "red")
text(subset(RLTS2.stdres,RLTS2.stdres >2.5),
labels=row.names(subset(data.training, RLTS2.stdres > 2.5)),
x = as.character(RLTS2$RD[RLTS2.stdres >2.5]), cex = 0.7, pos = 2)
text(subset(RLTS2.stdres,RLTS2.stdres < -2.5),
labels=row.names(subset(data.training, RLTS2.stdres < -2.5)),
x = as.character(RLTS2$RD[RLTS2.stdres < -2.5]), cex = 0.7, pos = 2)
which(RLTS2.stdres > 2.5 | RLTS2.stdres < -2.5)
text(subset(RLTS2.stdres, RLTS2$RD > v), labels=row.names(subset(data.training, RLTS2$RD > v)),
x = as.character(RLTS2$RD[RLTS2$RD > v]), cex = 0.7, pos = 1)
which(RLTS2$RD > v)
# Q3====
# Build a good linear regression model may containing higher-order terms, interactions,
# transformed variables and/or other methods to improve the model assumptions.
# Model 1: Variable selection with Interaction terms====
# First look at the interaction terms. Generally the third and higher order interactions
# are weak and hard to interpret, so look at the main effects and second order interactions.
# The R formula syntax using ^2 to mean "all two-way interactions of the variables".
fit_with <- lm(SWI ~ (SWF + temperature + management + size)^2, data = data.training)
summary(fit_with) # temperature:management interaction term is significant at the 5% level
# Backward elimination based on AIC
fit.full <- lm(SWI ~ (SWF + temperature + management + size)^2, data = data.training)
fit.full
stepAIC(fit.full, scope = list(upper = ~ (SWF + temperature + management + size)^2, lower = ~ 1),
direction = "backward")
# AIC=-339.91 to AIC=-348.44
# SWI ~ SWF + temperature + management + temperature:management
# Forward selection based on AIC
fit.null <- lm(SWI ~ 1, data = data.training)
fit.null
stepAIC(fit.null, scope = list(upper = ~ (SWF + temperature + management + size)^2, lower = ~ 1),
direction = "forward")
# AIC=-179.47 to AIC=-348.44
# SWI ~ SWF + temperature + management + temperature:management
# Stepwise selection based on AIC (started at full model)
stepAIC(fit.full, scope=list(upper = ~ (SWF + temperature + management + size)^2, lower = ~ 1),
direction = "both")
# AIC=-339.91 to AIC=-348.44
# SWI ~ SWF + temperature + management + temperature:management
# Stepwise selection based on AIC (started at null model)
stepAIC(fit.null, scope=list(upper = ~ (SWF + temperature + management + size)^2, lower = ~ 1),
direction = "both")
# AIC=-179.47 to AIC=-348.44
# SWI ~ SWF + temperature + management + temperature:management
fit_with <- lm(formula = SWI ~ SWF + temperature * management, data = data.training)
summary(fit_with) # temperature:management interaction term is only significant at the 10% level
# Reason 1: Many statisticians use a much larger significance level for the AB interaction F test than
# what they use for the main effects. The reason is to get a higher chance to detect existing interactions.
summary(fit_with)$r.squared # 0.5872287, 59% of the total variance in the outcome is explained
# Reason 2: stepAIC is equivalent to applying a hypothesis test with the significance level 0.157.
# According to the stepwise selection procedure, the model with the interaction term has smaller AIC
# as well as larger goodness-of-fit (R^2)
# Reason 3: As for interpretation, the longer being subject to nature management, the higher stability of
# nature area, the less infulence of the temperature change.
relweights <- function(fit, ...) {
R <- cor(fit$model)
nvar <- ncol(R)
rxx <- R[2:nvar, 2:nvar]
rxy <- R[2:nvar, 1]
svd <- eigen(rxx)
evec <- svd$vectors
ev <- svd$values
delta <- diag(sqrt(ev))
# correlations between original predictors and new orthogonal variables
lambda <- evec %*% delta %*% t(evec)
lambdasq <- lambda^2
# regression coefficients of Y on orthogonal variables
beta <- solve(lambda) %*% rxy
rsquare <- colSums(beta^2)
rawwgt <- lambdasq %*% beta^2
import <- (rawwgt/rsquare) * 100
lbls <- names(fit$model[2:nvar])
rownames(import) <- lbls
colnames(import) <- "Weights"
# plot results
barplot(t(import), names.arg = lbls, ylab = "% of R-Square",
xlab = "Predictor Variables", main = "Relative Importance of Predictor Variables",
sub = paste("R-Square = ", round(rsquare, digits = 3)),
...)
return(import)
}
relweights(fit, col = "lightgrey")
relweights(fit_with, col = "blue")
# size is dropped
# Model 2: Variable selection without Interaction terms====
# Backward elimination based on F-statistic/t-statistic
dropterm(fit.full, test = "F")
fit_drop <- update(fit.full, ~ . - temperature:size)
dropterm(fit_drop, test = "F")
fit_drop <- update(fit_drop, ~ . - temperature:size - SWF:management)
dropterm(fit_drop, test = "F")
fit_drop <- update(fit_drop, ~ . - temperature:size - SWF:management - SWF:size)
dropterm(fit_drop, test = "F")
fit_drop <- update(fit_drop, ~ . - temperature:size - SWF:management - SWF:size - management:size)
dropterm(fit_drop, test = "F")
fit_drop <- update(fit_drop, ~ . - temperature:size - SWF:management - SWF:size - management:size - size)
dropterm(fit_drop, test = "F")
fit_drop <- update(fit_drop, ~ . - temperature:size - SWF:management - SWF:size - management:size - size - SWF:temperature)
dropterm(fit_drop, test = "F")
fit_drop <- update(fit_drop, ~ . - temperature:size - SWF:management - SWF:size - management:size - size - SWF:temperature - temperature:management)
dropterm(fit_drop, test = "F")
# SWI ~ SWF + temperature + management
# Forward selection based on F-statistic/t-statistic
addterm(fit.null, ~ . + SWF + temperature + size + management + SWF:temperature + SWF:management + SWF:size +
temperature:management + temperature:size + management:size, test = "F")
fit_add <- update(fit.null, ~ . + SWF)
addterm(fit_add, ~ . + temperature + size + management + SWF:temperature + SWF:management + SWF:size +
temperature:management + temperature:size + management:size, test = "F")
fit_add <- update(fit_add, ~ . + temperature)
addterm(fit_add, ~. + size + management + SWF:temperature + SWF:management + SWF:size +
temperature:management + temperature:size + management:size, test = "F")
fit_add <- update(fit_add, ~ . + management)
addterm(fit_add, ~. + size + SWF:temperature + SWF:management + SWF:size +
temperature:management + temperature:size + management:size, test = "F")
# SWI ~ SWF + temperature + management
fit_without <- lm(formula = SWI ~ SWF + temperature + management, data = data.training)
summary(fit_without)
summary(fit_without)$r.squared # 0.5804086, 58% of the total variance in the outcome is explained
anova(fit_with, fit_without) # Reason 1: P = 0.07421, not significantly different between the two models
# −2log-likelihood+kn, where n represents the number of parameters in the fitted model, and k=2 for the usual AIC, or
# k=log(N) (N being the number of observations) for the so-called BIC or SBC (Schwarz's Bayesian criterion)
stepAIC(fit_without, scope = list(upper = ~ SWF + temperature * management, lower = ~ 1), direction = "both")
AIC(fit_with, fit_without)
# Reason 2: AIC=-347.16 to AIC=-348.44, little increase in AIC value
# Reason 3: When an interaction isn’t significant, drop it if you are just checking for the presence of an interaction
# to make sure you are specifying the model correctly. The interaction uses up df,
# changes the meaning of the lower order coefficients and complicates the model.
# But if you actually hypothesized an interaction that wasn’t significant, leave it in the model.
# The insignificant interaction means something in this case – it helps you evaluate your hypothesis.
# Taking it out can do more damage in specification error than in will in the loss of df.
# Reason 4: Overall, no improvement on the assumptions of the model 1, which has a few outliers
# Model 3: Adding the Quadratic term of SWF====
fit3_with <- lm(SWI ~ SWF + I(SWF^2) + temperature * management, data = data.training)
summary(fit3_with) # SWF is non-significant, temperature:management is only significant at the 10% level
summary(fit3_with)$r.squared # 61% of the total variance in the outcome is explained
# Stepwise selection
fit.full <- lm(SWI ~ SWF + I(SWF^2) + temperature * management,
data = data.training)
fit.full
fit.null <- lm(SWI ~ 1, data = data.training)
fit.null
# Stepwise selection based on AIC (started at full model)
stepAIC(fit.full, scope=list(upper = ~ SWF + I(SWF^2) + temperature * management, lower = ~ 1), direction = "both")
# AIC=-357.84 to AIC=-359.58
# SWI ~ I(SWF^2) + temperature + management + temperature:management
# Stepwise selection based on AIC (started at null model)
stepAIC(fit.null, scope=list(upper = ~ SWF + I(SWF^2) + temperature * management, lower = ~ 1), direction = "both")
# AIC=-179.47 to AIC=-359.58
# SWI ~ I(SWF^2) + temperature + management + temperature:management
fit3_with <- lm(SWI ~ I(SWF^2) + temperature * management, data = data.training)
summary(fit3_with)
summary(fit3_with)$r.squared # 61% of the total variance in the outcome is explained
fit3_without <- lm(SWI ~ SWF + I(SWF^2) + temperature + management, data = data.training)
summary(fit3_without) # SWF is non-significant
summary(fit3_without)$r.squared # 60% of the total variance in the outcome is explained
# Stepwise selection
fit.full <- lm(SWI ~ SWF + I(SWF^2) + temperature + management,
data = data.training)
fit.full
fit.null <- lm(SWI ~ 1, data = data.training)
fit.null
# Stepwise selection based on AIC (started at full model)
stepAIC(fit.full, scope=list(upper = ~ SWF + I(SWF^2) + temperature + management, lower = ~ 1), direction = "both")
# AIC=-357.03 to AIC=-358.68
# SWI ~ I(SWF^2) + temperature + management
# Stepwise selection based on AIC (started at null model)
stepAIC(fit.null, scope=list(upper = ~ SWF + I(SWF^2) + temperature + management, lower = ~ 1), direction = "both")
# AIC=-179.47 to AIC=-358.68
# SWI ~ I(SWF^2) + temperature + management
fit3_without <- lm(SWI ~ I(SWF^2) + temperature + management, data = data.training)
summary(fit3_without)
summary(fit3_without)$r.squared # 60% of the total variance in the outcome is explained
# Model 4: Transformations====
# Box-Cox transformation on Y, one of the solutions to the problem of linearality ====
sum(data.training$SWI <= 0) # response should be strictly positive
par(mfrow = c(1,1))
out_without <- boxcox(SWI ~ I(SWF^2)+temperature+management, plotit = TRUE, data = data.training)
lambda_without <- out_without$x[which(out_without$y == max(out_without$y))]
lambda_without # lambda = 0.7878788
out_with <- boxcox(SWI ~ I(SWF^2)+temperature*management, plotit = TRUE, data = data.training)
lambda_with <- out_with$x[which(out_with$y == max(out_with$y))]
lambda_with # lambda = 0.8282828
# powerTransform uses the maximum likelihood-like approach of Box and Cox (1964) to select a transformatiion
# of a univariate or multivariate response for normality, linearity and/or constant variance.
powerTransform(fit3_with, family="bcnPower")
powerTransform(fit3_without, family="bcnPower")
# lambda is approximately equal to 1, no Box-cox transformation
# X Variable transformation of temperature====
# try segmented linear regression/Piecewise linear regression====
fit_without_segmented <- segmented.lm(fit3_without, seg.Z = ~temperature, psi = c(12, 27), control=seg.control(display=FALSE))
summary(fit_without_segmented)
# Estimated Break-Point(s): 12.748, 27.200
fit_without_segmented.res <- residuals(fit_without_segmented)
fit_without_segmented.stdres <- stdres(fit_without_segmented)
fit_without_segmented.fittedvalues <- fitted.values(fit_without_segmented)
par(mfrow = c(2,2))
qqnorm(fit_without_segmented.stdres, main="")
qqline(fit_without_segmented.stdres)
plot(fit_without_segmented.res, xlab = "Index", ylab = "Residual")
plot(fit_without_segmented.fittedvalues, fit_without_segmented.res, xlab = "Fitted value", ylab = "Residual")
lines(lowess(fit_without_segmented.res ~ fit_without_segmented.fittedvalues), col = "red")
plot(fit_without_segmented.stdres, xlab = "Index", ylab = "Standardized residual", ylim = c(-3,3))
abline(h = -2.5, lty = 2)
abline(h = 2.5, lty = 2)
par(mfrow = c(1,3))
plot(I(SWF^2), fit_without_segmented$residuals, ylab = "Residual")
lines(lowess(fit_without_segmented$residuals ~ I(SWF^2)), col = "red")
# plot indicates the errors are heteroscedastic
plot(temperature, fit_without_segmented$residuals, ylab = "Residual", main = 'Piecewise')
lines(lowess(fit_without_segmented$residuals ~ temperature), col = "red")
# plot indicates the linear model is defective (curve segmentation > 20) and the errors are heteroscedastic
plot(management, fit_without_segmented$residuals, ylab = "Residual")
lines(lowess(fit_without_segmented$residuals ~ management), col = "red")
par(mfrow = c(1,1))
fit_with_segmented <- segmented.lm(fit3_with, seg.Z = ~temperature, psi = c(12, 27), control=seg.control(display=FALSE))
# Estimated Break-Point(s): 12.621, 27.200
summary(fit_with_segmented)
fit_with_segmented.res <- residuals(fit_with_segmented)
fit_with_segmented.stdres <- stdres(fit_with_segmented)
fit_with_segmented.fittedvalues <- fitted.values(fit_with_segmented)
par(mfrow = c(2,2))
qqnorm(fit_with_segmented.stdres, main="")
qqline(fit_with_segmented.stdres)
plot(fit_with_segmented.res, xlab = "Index", ylab = "Residual")
plot(fit_with_segmented.fittedvalues, fit_with_segmented.res, xlab = "Fitted value", ylab = "Residual")
lines(lowess(fit_with_segmented.res ~ fit_with_segmented.fittedvalues), col = "red")
plot(fit_with_segmented.stdres, xlab = "Index", ylab = "Standardized residual", ylim = c(-3,3))
abline(h = -2.5, lty = 2)
abline(h = 2.5, lty = 2)
par(mfrow = c(2,2))
plot(I(SWF^2), fit_with_segmented$residuals, ylab = "Residual")
lines(lowess(fit_with_segmented$residuals ~ I(SWF^2)), col = "red")
# plot indicates the errors are heteroscedastic
plot(temperature, fit_with_segmented$residuals, ylab = "Residual")
lines(lowess(fit_with_segmented$residuals ~ temperature), col = "red")
# plot indicates the linear model is defective (curve segmentation > 20) and the errors are heteroscedastic
plot(management, fit_with_segmented$residuals, ylab = "Residual")
lines(lowess(fit_with_segmented$residuals ~ management), col = "red")
plot(temperature*management, fit_with_segmented$residuals, ylab = "Residual")
lines(lowess(fit_with_segmented$residuals ~ temperature*management), col = "red")
par(mfrow = c(1,1))
# link function====
boxTidwell(SWI ~ I(SWF^2) + temperature + abs(management+0.00000001),
other.x = ~ size, data = data.training)
# lambda = 0.5, but not significant with P = 0.2012
plot(SWI ~ temperature, data = data.training)
lines(lowess(SWI ~ temperature))
plot(SWI ~ sqrt(temperature), data = data.training)
lines(lowess(SWI ~ sqrt(temperature)))
plot(SWI ~ logit(temperature), data = data.training)
lines(lowess(SWI ~ logit(temperature)))
plot(SWI ~ I(temperature^2), data = data.training)
lines(lowess(SWI ~ I(temperature^2)))
plot(SWI ~ log(temperature), data = data.training)
lines(lowess(SWI ~ log(temperature)))
# try the logarithms transformation, logit transformation, square-root transformation
# and even the quadratic term, in order to spread out the tails of the distribution.
f1 <- lm(SWI ~ I(SWF^2)+I(temperature^2)+management, data = data.training)
f11 <- lm(SWI ~ I(SWF^2)+I(temperature^2)*management, data = data.training)
f2<- lm(SWI ~ I(SWF^2)+logit(temperature)+management, data = data.training)
f22<- lm(SWI ~ I(SWF^2)+logit(temperature)*management, data = data.training)
f3 <- lm(SWI ~ I(SWF^2)+log(temperature)+management, data = data.training)
f33 <- lm(SWI ~ I(SWF^2)+log(temperature)*management, data = data.training)
fit4_without <- lm(SWI ~ I(SWF^2)+sqrt(temperature)+management, data = data.training)
fit4_with <- lm(SWI ~ I(SWF^2)+sqrt(temperature)*management, data = data.training)
# Model 5: Weighted least squares model=====
fit4_without <- lm(formula = SWI ~ I(SWF^2) + temperature + management, data = data.training)
w_without <- 1/lm(abs(stdres(fit4_without)) ~ I(SWF^2)+ temperature +management, data = data.training)$fitted.values^2
fit5_nontrafo <- lm(SWI ~ I(SWF^2)+ temperature +management, weight = w_without, data = data.training)
fit4_without_trafo <- lm(formula = SWI ~ I(SWF^2) + sqrt(temperature) + management, data = data.training)
w_trafo <- 1/lm(abs(stdres(fit4_without_trafo)) ~ I(SWF^2)+ sqrt(temperature) +management, data = data.training)$fitted.values^2
fit5_trafo <- lm(SWI ~ I(SWF^2)+ sqrt(temperature) +management, weight = w_trafo, data = data.training)
fit4_with <- lm(formula = SWI ~ I(SWF^2) + temperature * management, data = data.training)
w_with <- 1/lm(abs(stdres(fit4_with)) ~ I(SWF^2)+temperature*management, data = data.training)$fitted.values^2
fit5_with_nontrafo <- lm(SWI ~ I(SWF^2)+temperature*management, weight = w_with, data = data.training)
fit4_with_trafo <- lm(formula = SWI ~ I(SWF^2) + sqrt(temperature) * management, data = data.training)
w_with_trafo <- 1/lm(abs(stdres(fit4_with_trafo)) ~ I(SWF^2)+sqrt(temperature) *management, data = data.training)$fitted.values^2
fit5_with_trafo <- lm(SWI ~ I(SWF^2)+sqrt(temperature) *management, weight = w_with_trafo, data = data.training)
# Model 6: Boxcox transformation of Model 5====
out_without <- boxcox(fit5_nontrafo, plotit = TRUE)
lambda_without <- out_without$x[which(out_without$y == max(out_without$y))]
lambda_without # sqrt(y)
out_without <- boxcox(fit5_trafo, plotit = TRUE)
lambda_without <- out_without$x[which(out_without$y == max(out_without$y))]
lambda_without # sqrt(y)
fit6_nontrafo <- lm(SWI^0.5 ~ I(SWF^2)+temperature+management,
weight = w_without, data = data.training)
fit6_trafo <- lm(SWI^0.5 ~ I(SWF^2)+sqrt(temperature)+management,
weight = w_trafo, data = data.training)
# Check model assumptions
# Leave-one-out methods: PRESS
# Models with small PRESSp values (or PRESSp/n) are considered good candidate models
PRESS1 <- sum((residuals(fit5_nontrafo) / (1 - lm.influence(fit5_nontrafo)$hat))^2)
PRESS2 <- sum((residuals(fit5_trafo) / (1 - lm.influence(fit5_trafo)$hat))^2)
PRESS3 <- sum((residuals(fit6_nontrafo) / (1 - lm.influence(fit6_nontrafo)$hat))^2)
PRESS4 <- sum((residuals(fit6_trafo) / (1 - lm.influence(fit6_trafo)$hat))^2)
PRESS5 <- sum((residuals(fit5_with_nontrafo) / (1 - lm.influence(fit5_with_nontrafo)$hat))^2)
PRESS6 <- sum((residuals(fit5_with_trafo) / (1 - lm.influence(fit5_with_trafo)$hat))^2)
PRESS <- c(PRESS1, PRESS2,PRESS3, PRESS4,PRESS5, PRESS6)
names(PRESS) <- c("fit5_nontrafo", 'fit5_trafo','fit6_nontrafo',"fit6_trafo",'fit5_with_nontrafo','fit5_with_trafo')
sort(PRESS)
# MSE
MSE1 <- summary(fit5_nontrafo)$sigma^2
MSE2 <- summary(fit5_trafo)$sigma^2
MSE3 <- summary(fit6_nontrafo)$sigma^2
MSE4 <- summary(fit6_trafo)$sigma^2
MSE5 <- summary(fit5_with_nontrafo)$sigma^2
MSE6 <- summary(fit5_with_trafo)$sigma^2
MSE <- c(MSE1, MSE2, MSE3,MSE4, MSE5, MSE6)
names(MSE) <- c("fit5_nontrafo", 'fit5_trafo','fit6_nontrafo',"fit6_trafo",'fit5_with_nontrafo','fit5_with_trafo')
sort(MSE)
detach(data.training)
# Q4====
# Fit both models to the validation data. Investigate and compare their performance.
# model 5 fit5_nontrafo; model 6 fit6_nontrafo.
attach(data.test)
# Fit both models to the validation data. Investigate and compare their performance.
# model 6 fit6_trafo; model 6 fit6_nontrafo.
attach(data.test)
# Model fit6_nontrafo: SWI^0.5 ~ I(SWF^2) + temperature+management, weights = w_without
model5_OLS <- lm(SWI ~ I(SWF^2) + temperature + management, data = data.test)
w5_without <- 1/lm(abs(stdres(model5_OLS)) ~ I(SWF^2)+ temperature +management, data = data.test)$fitted.values^2
model6_nontrafo.val <- lm(SWI^0.5 ~ I(SWF^2) + temperature + management, weights = w5_without, data = data.test)
summary(model6_nontrafo.val); summary(fit6_nontrafo)
summary(model6_nontrafo.val)$r.squared
# Model fit6_trafo: SWI^0.5 ~ I(SWF^2) + sqrt(temperature)+management, weights = w_without
model5_trafo <- lm(SWI ~ I(SWF^2) + sqrt(temperature) + management, data = data.test)
w5_trafo <- 1/lm(abs(stdres(model5_trafo)) ~ I(SWF^2)+ sqrt(temperature) +management, data = data.test)$fitted.values^2
model6_trafo.val <- lm(SWI^0.5 ~ I(SWF^2) + sqrt(temperature) + management, weights = w5_trafo, data = data.test)
summary(model6_trafo.val); summary(fit6_trafo)
summary(model6_trafo.val)$r.squared
# Compare estimated coefficients and standard errors
# Individual confidence intervals
alpha <- 0.05
confint(fit6_trafo, level = 1 - alpha)
confint(model6_trafo.val, level = 1 - alpha)
confint(model6_nontrafo.val, level = 1 - alpha)
confint(fit6_nontrafo, level = 1 - alpha)
# Simultaneous confidence intervals with Bonferroni correction
alpha <- 0.05
confint(fit6_trafo, level = 1 - alpha/2)
confint(model6_trafo.val, level = 1 - alpha/2)
confint(model6_nontrafo.val, level = 1 - alpha/2)
confint(fit6_nontrafo, level = 1 - alpha/2)
# Prediction
# A prediction interval reflects the uncertainty of a single value,
# while a confidence interval reflects the uncertainty of the predicted mean.
pred_trafo <- predict(fit6_trafo, newdata = data.test, interval = "prediction")
pred_nontrafo <- predict(fit6_nontrafo, newdata = data.test, interval = "prediction")
predict(fit6_trafo, newdata = data.test, interval = "confidence")
predict(fit6_nontrafo, newdata = data.test, interval = "confidence")
# MSEP
MSEP1 <- mean((predict(model6_trafo.val, newdata = data.test) - SWI)^2)
MSEP2 <- mean((predict(model6_nontrafo.val, newdata = data.test) - SWI)^2)
MSEP <- c(MSEP1, MSEP2)
names(MSEP) <- c("model6_trafo.val", "model6_nontrafo.val")
sort(MSEP)
# Leave-one-out methods: PRESS
# Models with small PRESSp values (or PRESSp/n) are considered good candidate models
PRESS1 <- sum((residuals(model6_trafo.val) / (1 - lm.influence(model6_trafo.val)$hat))^2)
PRESS2 <- sum((residuals(model6_nontrafo.val) / (1 - lm.influence(model6_nontrafo.val)$hat))^2)
PRESS <- c(PRESS1, PRESS2)
names(PRESS) <- c('model6_trafo.val',"model6_nontrafo.val")
sort(PRESS)
# MSE
MSE1 <- summary(model6_trafo.val)$sigma^2
MSE2 <- summary(model6_nontrafo.val)$sigma^2
MSE <- c(MSE1, MSE2)
names(MSE) <- c("model6_trafo.val", 'model6_nontrafo.val')
sort(MSE)
detach(data.test)
# Q5====
# fit your ultimate model (fit6_nontrafo) of preference to the full dataset.
attach(data.full)
# Model fit6_nontrafo: SWI^0.5 ~ I(SWF^2) + temperature+management, weights = w_without
fit_full <- lm(SWI ~ I(SWF^2) + temperature + management, data = data.full)
w_full <- 1/lm(abs(stdres(fit_full)) ~ I(SWF^2)+ temperature +management, data = data.full)$fitted.values^2
model6.full <- lm(SWI^0.5 ~ I(SWF^2) + temperature + management, weights = w_full, data = data.full)
summary(model6.full)
summary(model6.full)$r.squared
# Individual confidence intervals
alpha <- 0.05
confint(fit6_nontrafo, level = 1 - alpha)
confint(model6_nontrafo.val, level = 1 - alpha)
confint(model6.full, level = 1 - alpha)
# Simultaneous confidence intervals with Bonferroni correction
alpha <- 0.05
confint(fit6_nontrafo, level = 1 - alpha/2)
confint(model6_nontrafo.val, level = 1 - alpha/2)
confint(model6.full, level = 1 - alpha/2)
# Check model assumptions
model6.full.res <- residuals(model6.full)
model6.full.stdres <- stdres(model6.full)
model6.full.fittedvalues <- fitted.values(model6.full)
par(mfrow = c(2,2))
qqnorm(model6.full.stdres, main="")
qqline(model6.full.stdres)
plot(model6.full.res, xlab = "Index", ylab = "Residual")
plot(model6.full.fittedvalues, model6.full.res, xlab = "Fitted value", ylab = "Residual")
lines(lowess(model6.full.res ~ model6.full.fittedvalues), col = "red")
plot(model6.full.stdres, xlab = "Index", ylab = "Standardized residual", ylim = c(-3,3))
abline(h = -2.5, lty = 2)
abline(h = 2.5, lty = 2)
# UL: small deviations from normal distributed residuals
# UR: pattern indicates no heteroscedastic errors
# BL: linearity assumption is satisfied
# BR: outliers
par(mfrow = c(1,3))
plot(I(SWF^2), model6.full$residuals, ylab = "Residual")
lines(lowess(model6.full$residuals ~ I(SWF^2)), col = "red")
plot(temperature, model6.full$residuals, ylab = "Residual")
lines(lowess(model6.full$residuals ~ temperature), col = "red")
plot(management, model6.full$residuals, ylab = "Residual")
lines(lowess(model6.full$residuals ~ management), col = "red")
par(mfrow = c(1,1))
# Checking the normality of the residuals
plot(model6.full, which = 2)
# Shapiro-Wilk test and Kolmogorov-Smirnov test Testing Normality
shapiro.test(residuals(model6.full))
LillieTest(residuals(model6.full))
sf.test(residuals(model6.full))
check_normality(model6.full)# OK: Residuals appear as normally distributed
# Checking the linearity of the relationship
plot(model6.full, which = 1)
# plot the relationship between the fitted values and the observed values for the outcome variable
plot(model6.full.fittedvalues, SWI, xlab = "Fitted Values", ylab = "Observed Values")
lines(lowess(SWI ~ model6.full.fittedvalues), col = 'red')
# for each individual predictor
par(mfrow = c(1,3))
# partial-residual plots, cannot contain interactions
termplot(model6.full, partial.resid = TRUE)
crPlots(model6.full)
par(mfrow = c(1,1))
# Checking the homogeneity of variance
# https://stats.stackexchange.com/questions/193061/what-is-the-difference-between-these-two-breusch-pagan-tests
# In short, the studentized BP test is more robust, usually go with bptest,
# with studentize = TRUE (default) and varformula = ~ fitted.values(my.lm) as options,
# for an initial approach for homoskedasticity.
plot(model6.full, which = 3)
ncvTest(model6.full)
bptest(model6.full, ~ SWF + temperature + management)
bptest(model6.full, ~ fitted.values(model6.full)) # accepted
coeftest(model6.full, vcov= hccm)
summary(model6.full)
# if homogeneity of variance is violated, sandwich estimators is applied.
# Because the homogeneity of variance assumption wasn’t violated,
# these t tests are pretty much identical to the former ones in the summary(model6.full)
# outliers
plot(model6.full, which = 4)
plot(model6.full, which = 5)
influencePlot(model6.full, main="influence Plot", sub="cook's distance")
par(mfrow = c(1,3))
avPlot(model6.full, variable = 'I(SWF^2)')
avPlot(model6.full, variable = 'management')
avPlot(model6.full, variable = 'temperature')
par(mfrow = c(1,1))
# Standardized residuals
model6.full.stdres <- stdres(model6.full)
plot(model6.full.stdres, ylim = c(-4,4), ylab = "Standardized residuals")
abline(h = c(-2.5,2.5), col = "red")
label_x <- seq(1,400)
text(subset(model6.full.stdres,model6.full.stdres >2.5), labels=row.names(subset(data.full, model6.full.stdres > 2.5)),
x = as.character(label_x[model6.full.stdres >2.5]), cex = 0.7, pos = 1)
text(subset(model6.full.stdres,model6.full.stdres < -2.5), labels=row.names(subset(data.full, model6.full.stdres < -2.5)),
x = as.character(label_x[model6.full.stdres < -2.5]), cex = 0.7, pos = 1)
which(model6.full.stdres > 2.5 | model6.full.stdres < -2.5)
# Studentized residuals
model6.full.studres <- studres(model6.full)
plot(model6.full.studres, ylim = c(-4,4), ylab = "Studentized residuals")
abline(h = c(-2.5,2.5), col = "red")
text(subset(model6.full.studres,model6.full.studres >2.5), labels=row.names(subset(data.full, model6.full.studres > 2.5)),
x = as.character(label_x[model6.full.studres >2.5]), cex = 0.7, pos = 1)
text(subset(model6.full.studres,model6.full.studres < -2.5), labels=row.names(subset(data.full, model6.full.studres < -2.5)),
x = as.character(label_x[model6.full.studres < -2.5]), cex = 0.7, pos = 1)
which(model6.full.studres > 2.5 | model6.full.studres < -2.5)
# Bonferroni Outlier Test
outlierTest(model6.full) # OBS 1 as a outlier with Bonferroni p < 0.05
detach(data.full)
# Q6====
# investigating possible association between duration (outcome) and temperature (predictor).
data.training <- data.full[-d.test, ]
attach(data.training)
# (a) Fit non-parametric models with k=1 and k=2, ====
# for spans 0.25, 0.5, and 0.75 and choose the best-fitting model
# Local linear regression
plot(temperature, duration, main = "Local linear regression")
s <- c(0.25, 0.5, 0.75)
colors <- c("red", "green", "blue")
for (i in 1:length(s)) lines(temperature, predict(loess(duration ~ temperature, span = s[i], degree = 1),
data = data.training), col = colors[i])
legend(5, 40, c("span = 0.25", "span = 0.5", "span = 0.75"), lty = 1, col = colors)
# Local quadratic regression
plot(temperature, duration, main = "Local quadratic regression")
for (i in 1:length(s)) lines(temperature, predict(loess(duration ~ temperature, span = s[i], degree = 2),
data = data.training), col = colors[i])
legend(5, 40, c("span = 0.25", "span = 0.5", "span = 0.75"), lty = 1, col = colors)
# Check model assumptions
# ========== k=1, span=0.25
fit.loess1<-loess(duration~temperature,degree = 1,span=0.25)
fit.loess1
par(mfrow=c(2,2))
plot(duration~temperature)
lines(loess.smooth(temperature,duration,span=0.25,degree=1),col='red')
plot(residuals(fit.loess1)~temperature)
lines(loess.smooth(temperature,residuals(fit.loess1),span=0.25,degree=1),col='red')
abline(h=0,lty=2)
plot(fitted(fit.loess1),sqrt(abs(residuals(fit.loess1))))
lines(loess.smooth(fitted(fit.loess1),sqrt(abs(residuals(fit.loess1))),span=0.25,degree=1),col='red')
qqnorm(residuals(fit.loess1))
qqline(residuals(fit.loess1),col='red')
# ========== k=1, span=0.5
fit.loess2<-loess(duration~temperature,degree = 1,span=0.5)
fit.loess2
par(mfrow=c(2,2))
plot(duration~temperature)
lines(loess.smooth(temperature,duration,span=0.5,degree=1),col='red')
plot(residuals(fit.loess2)~temperature)
lines(loess.smooth(temperature,residuals(fit.loess2),span=0.5,degree=1),col='red')
abline(h=0,lty=2)
plot(fitted(fit.loess2),sqrt(abs(residuals(fit.loess2))))
lines(loess.smooth(fitted(fit.loess2),sqrt(abs(residuals(fit.loess2))),span=0.5,degree=1),col='red')
qqnorm(residuals(fit.loess2))
qqline(residuals(fit.loess2),col='red')
# ========== k=1, span=0.75
fit.loess3<-loess(duration~temperature,degree = 1,span=0.75)
fit.loess3
par(mfrow=c(2,2))
plot(duration~temperature)
lines(loess.smooth(temperature,duration,span=0.75,degree=1),col='red')
plot(residuals(fit.loess3)~temperature)
lines(loess.smooth(temperature,residuals(fit.loess3),span=0.75,degree=1),col='red')
abline(h=0,lty=2)
plot(fitted(fit.loess3),sqrt(abs(residuals(fit.loess3))))
lines(loess.smooth(fitted(fit.loess3),sqrt(abs(residuals(fit.loess3))),span=0.75,degree=1),col='red')
qqnorm(residuals(fit.loess3))
qqline(residuals(fit.loess3),col='red')
# ========== k=2, span=0.25
fit.loess4<-loess(duration~temperature,degree = 2,span=0.25)
fit.loess4
par(mfrow=c(2,2))
plot(duration~temperature)
lines(loess.smooth(temperature,duration,span=0.25,degree=2),col='red')
plot(residuals(fit.loess4)~temperature)
lines(loess.smooth(temperature,residuals(fit.loess4),span=0.25,degree=2),col='red')
abline(h=0,lty=2)
plot(fitted(fit.loess4),sqrt(abs(residuals(fit.loess4))))
lines(loess.smooth(fitted(fit.loess4),sqrt(abs(residuals(fit.loess4))),span=0.25,degree=2),col='red')
qqnorm(residuals(fit.loess4))
qqline(residuals(fit.loess4),col='red')
# ========== k=2, span=0.5
fit.loess5<-loess(duration~temperature,degree = 2,span=0.5)
fit.loess5
par(mfrow=c(2,2))
plot(duration~temperature)
lines(loess.smooth(temperature,duration,span=0.5,degree=2),col='red')
plot(residuals(fit.loess5)~temperature)
lines(loess.smooth(temperature,residuals(fit.loess5),span=0.5,degree=2),col='red')
abline(h=0,lty=2)
plot(fitted(fit.loess5),sqrt(abs(residuals(fit.loess5))))
lines(loess.smooth(fitted(fit.loess5),sqrt(abs(residuals(fit.loess5))),span=0.5,degree=2),col='red')
qqnorm(residuals(fit.loess5))
qqline(residuals(fit.loess5),col='red')
# ========== k=2, span=0.75
fit.loess6<-loess(duration~temperature,degree = 2,span=0.75)
fit.loess6
par(mfrow=c(2,2))
plot(duration~temperature)
lines(loess.smooth(temperature,duration,span=0.75,degree=2),col='red')
plot(residuals(fit.loess6)~temperature)
lines(loess.smooth(temperature,residuals(fit.loess6),span=0.75,degree=2),col='red')
abline(h=0,lty=2)
plot(fitted(fit.loess6),sqrt(abs(residuals(fit.loess6))))
lines(loess.smooth(fitted(fit.loess6),sqrt(abs(residuals(fit.loess6))),span=0.75,degree=2),col='red')
qqnorm(residuals(fit.loess6))
qqline(residuals(fit.loess6),col='red')
# k =2, span = 0.75 is the best-fitting model
# (b) Fit a quadratic linear model====
par(mfrow = c(1,1))
plot(temperature, duration, main = "Polynomial regression")
# Linear model
fit1 <- lm(duration ~ temperature, data = data.training)
abline(fit1, col = "red")
# Quadratic model
fit2 <- lm(duration ~ temperature + I(temperature^2), data = data.training)
fit2.coef <- fit2$coefficients
curve(fit2.coef[1] + fit2.coef[2]*x + fit2.coef[3]*x^2, 0, 60, add = TRUE, col = "green")
# Cubic model
fit3 <- lm(duration ~ temperature + I(temperature^2) + I(temperature^3), data = data.training)
fit3.coef <- fit3$coefficients
curve(fit3.coef[1] + fit3.coef[2]*x + fit3.coef[3]*x^2 + fit3.coef[4]*x^3, 0, 60, add = TRUE, col = "blue")
# Add legend
legend(5, 40, c("linear", "quadratic", "cubic"), lty = 1, col = c("red", "green", "blue"))
# (c) a plot of the data, the best nonparametric fit, and the linear fit====
par(mfrow = c(1,1))
plot(temperature, duration, main = "Quadratic vs non-parametric regression")
# Local quadratic regression: k = 2, span = 0.75
fit.loess <- loess(duration ~ temperature, span = 0.75, degree = 2)
lines(temperature, predict(fit.loess, data = data.training), col = 'red')
# Linear model
fit1 <- lm(duration ~ temperature, data = data.training)
abline(fit1, col = "black")
# Quadratic model
fit2 <- lm(duration ~ temperature + I(temperature^2), data = data.training)
fit2.coef <- fit2$coefficients
curve(fit2.coef[1] + fit2.coef[2]*x + fit2.coef[3]*x^2, 0, 60, add = TRUE, col = "green")
# Cubic model
fit3 <- lm(duration ~ temperature + I(temperature^2) + I(temperature^3), data = data.training)
fit3.coef <- fit3$coefficients
curve(fit3.coef[1] + fit3.coef[2]*x + fit3.coef[3]*x^2 + fit3.coef[4]*x^3, 0, 60, add = TRUE, col = "blue")
legend(4, 43, c("Non-parametric fit", "linear", "quadratic", "cubic"), lty = 1, col = c("red", "black", "green", "blue"))
# According to the visual interpretation, Non-parametric model fits the data best.
# (d) Test whether the non-parametric model of your choice====
# fits the data better than the quadratic model
summary(fit.loess)
summary(fit2) # 69%
# Compare quadratic linear model with non-parametric model
traceS <- fit.loess$trace.hat
SSE0 <- sum(residuals(fit2)^2)
SSE1 <- sum(residuals(fit.loess)^2)
n <- dim(data.training)[1]
Fvalue <- ((SSE0 - SSE1) / (traceS - 3)) / (SSE1 / (n - traceS))
Fvalue
Fcrit <- qf(0.95, traceS - 3, n - traceS)
Fcrit
1 - pf(Fvalue, traceS - 3, n - traceS)
# the difference between the non-parametric model and the quadratic model is s
# ignificant since P-value is zero
# Prediction
attach(data.test)
t.pred <- predict(fit.loess, data.test, se = TRUE)
t.upper <- t.pred$fit + qnorm(0.975) * t.pred$se.fit
t.lower <- t.pred$fit - qnorm(0.975) * t.pred$se.fit
loess <- data.frame("pred" = t.pred$fit, "lower" = t.lower, "upper" = t.upper)
plot(data.test$temperature, data.test$duration)
lines(lowess(data.test$temperature,t.pred$fit))
lines(lowess(data.test$temperature,t.upper))
lines(lowess(data.test$temperature,t.lower))
t.pred <- predict(fit2, data.test, se = TRUE)
t.upper <- t.pred$fit + qnorm(0.975) * t.pred$se.fit
t.lower <- t.pred$fit - qnorm(0.975) * t.pred$se.fit
quadratic <- data.frame("pred" = t.pred$fit, "lower" = t.lower, "upper" = t.upper)
plot(data.test$temperature, data.test$duration)
lines(lowess(data.test$temperature,t.pred$fit))
lines(lowess(data.test$temperature,t.upper))
lines(lowess(data.test$temperature,t.lower))
detach(data.test)
# Assessing goodness of fit
# R-squared
rsq <- function (x, y) cor(x, y) ^ 2
rsq1 <- rsq(loess[,1], duration) # r.squared = 0.8168894
rsq2 <- rsq(quadratic[,1], duration) # r.squared = 0.6893309
RSQ <- c(rsq1, rsq2)
names(RSQ) <- c("Non-parametric", "Linear quadratic")
sort(RSQ)
# Residual sum of squares
RSS1 <- sum(residuals(fit.loess)^2)
RSS2 <- sum(residuals(fit2)^2)
RSS <- c(RSS1, RSS2)
names(RSS) <- c("Non-parametric", "Linear quadratic")
sort(RSS)
# Pearson estimated residual variance
sigma.squared1 <- RSS1 / (n - traceS)
sigma.squared2 <- RSS2 / fit2$df.residual
sigma.squared <- c(sigma.squared1, sigma.squared2)
names(sigma.squared) <- c("Non-parametric", "Linear quadratic")
sort(sigma.squared)
# Mean squared error
MSE1 <- sum(residuals(fit.loess)^2) / (n - traceS)
MSE2 <- sum(residuals(fit2)^2) / (fit2$df.residual)
MSE <- c(MSE1, MSE2)
names(MSE) <- c("Non-parametric", "Linear quadratic")
sort(MSE)
# Root mean squared error
RMSE1 <- sqrt(MSE1)
RMSE2 <- sqrt(MSE2)
RMSE <- c(RMSE1, RMSE2)
names(RMSE) <- c("Non-parametric", "Linear quadratic")
sort(RMSE)
# MSEP
MSEP1 <- mean((loess[,1] - duration)^2)
MSEP2 <- mean((quadratic[,1] - duration)^2)
MSEP <- c(MSEP1, MSEP2)
names(MSEP) <- c("Non-parametric", "Linear quadratic")
sort(MSEP)
compare.results <- data.frame(rbind(RSQ,RSS,sigma.squared, MSE, RMSE, MSEP),
row.names = c('RSQ','RSS','sigma.squared', 'MSE', 'RMSE', 'MSEP'))
names(compare.results) <- c("Non-parametric", "Linear quadratic")
compare.results
# Non-parametric model fits the data better than the quadratic model
caret::postResample(loess[,1], duration)
caret::postResample(quadratic[,1], duration)
detach(data.training) |
rm(list=ls(all=TRUE))
set.seed(1)
setwd('U:\\GIT_models\\git_segmentation_behavior')
source('gibbs functions.R')
source('gibbs sampler main function.R')
dat=read.csv('fake data.csv',as.is=T)
ndata.types=ncol(dat)
#priors
alpha=0.01
ngibbs=5000
#run gibbs sampler
breakpt=time.segm.behavior(dat=dat,ngibbs=ngibbs)
#compare estimated breakpoints to true break points
length(breakpt)
abline(v=breakpt,lty=3,col='green')
| /run gibbs.R | no_license | drvalle1/git_segmentation_behavior | R | false | false | 423 | r | rm(list=ls(all=TRUE))
set.seed(1)
setwd('U:\\GIT_models\\git_segmentation_behavior')
source('gibbs functions.R')
source('gibbs sampler main function.R')
dat=read.csv('fake data.csv',as.is=T)
ndata.types=ncol(dat)
#priors
alpha=0.01
ngibbs=5000
#run gibbs sampler
breakpt=time.segm.behavior(dat=dat,ngibbs=ngibbs)
#compare estimated breakpoints to true break points
length(breakpt)
abline(v=breakpt,lty=3,col='green')
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config
NULL
#' Amazon QuickSight
#'
#' @description
#' Amazon QuickSight API Reference
#'
#' Amazon QuickSight is a fully managed, serverless business intelligence
#' service for the AWS Cloud that makes it easy to extend data and insights
#' to every user in your organization. This API reference contains
#' documentation for a programming interface that you can use to manage
#' Amazon QuickSight.
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#'
#' @section Service syntax:
#' ```
#' svc <- quicksight(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string"
#' ),
#' endpoint = "string",
#' region = "string"
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- quicksight()
#' svc$cancel_ingestion(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=quicksight_cancel_ingestion]{cancel_ingestion} \tab Cancels an ongoing ingestion of data into SPICE \cr
#' \link[=quicksight_create_dashboard]{create_dashboard} \tab Creates a dashboard from a template \cr
#' \link[=quicksight_create_data_set]{create_data_set} \tab Creates a dataset \cr
#' \link[=quicksight_create_data_source]{create_data_source} \tab Creates a data source \cr
#' \link[=quicksight_create_group]{create_group} \tab Creates an Amazon QuickSight group \cr
#' \link[=quicksight_create_group_membership]{create_group_membership} \tab Adds an Amazon QuickSight user to an Amazon QuickSight group \cr
#' \link[=quicksight_create_iam_policy_assignment]{create_iam_policy_assignment} \tab Creates an assignment with one specified IAM policy, identified by its Amazon Resource Name (ARN) \cr
#' \link[=quicksight_create_ingestion]{create_ingestion} \tab Creates and starts a new SPICE ingestion on a dataset Any ingestions operating on tagged datasets inherit the same tags automatically for use in access control \cr
#' \link[=quicksight_create_template]{create_template} \tab Creates a template from an existing QuickSight analysis or template \cr
#' \link[=quicksight_create_template_alias]{create_template_alias} \tab Creates a template alias for a template \cr
#' \link[=quicksight_create_theme]{create_theme} \tab Creates a theme \cr
#' \link[=quicksight_create_theme_alias]{create_theme_alias} \tab Creates a theme alias for a theme \cr
#' \link[=quicksight_delete_dashboard]{delete_dashboard} \tab Deletes a dashboard \cr
#' \link[=quicksight_delete_data_set]{delete_data_set} \tab Deletes a dataset \cr
#' \link[=quicksight_delete_data_source]{delete_data_source} \tab Deletes the data source permanently \cr
#' \link[=quicksight_delete_group]{delete_group} \tab Removes a user group from Amazon QuickSight \cr
#' \link[=quicksight_delete_group_membership]{delete_group_membership} \tab Removes a user from a group so that the user is no longer a member of the group \cr
#' \link[=quicksight_delete_iam_policy_assignment]{delete_iam_policy_assignment} \tab Deletes an existing IAM policy assignment \cr
#' \link[=quicksight_delete_template]{delete_template} \tab Deletes a template \cr
#' \link[=quicksight_delete_template_alias]{delete_template_alias} \tab Deletes the item that the specified template alias points to \cr
#' \link[=quicksight_delete_theme]{delete_theme} \tab Deletes a theme \cr
#' \link[=quicksight_delete_theme_alias]{delete_theme_alias} \tab Deletes the version of the theme that the specified theme alias points to \cr
#' \link[=quicksight_delete_user]{delete_user} \tab Deletes the Amazon QuickSight user that is associated with the identity of the AWS Identity and Access Management (IAM) user or role that's making the call \cr
#' \link[=quicksight_delete_user_by_principal_id]{delete_user_by_principal_id} \tab Deletes a user identified by its principal ID \cr
#' \link[=quicksight_describe_dashboard]{describe_dashboard} \tab Provides a summary for a dashboard \cr
#' \link[=quicksight_describe_dashboard_permissions]{describe_dashboard_permissions} \tab Describes read and write permissions for a dashboard \cr
#' \link[=quicksight_describe_data_set]{describe_data_set} \tab Describes a dataset \cr
#' \link[=quicksight_describe_data_set_permissions]{describe_data_set_permissions} \tab Describes the permissions on a dataset \cr
#' \link[=quicksight_describe_data_source]{describe_data_source} \tab Describes a data source \cr
#' \link[=quicksight_describe_data_source_permissions]{describe_data_source_permissions} \tab Describes the resource permissions for a data source \cr
#' \link[=quicksight_describe_group]{describe_group} \tab Returns an Amazon QuickSight group's description and Amazon Resource Name (ARN) \cr
#' \link[=quicksight_describe_iam_policy_assignment]{describe_iam_policy_assignment} \tab Describes an existing IAM policy assignment, as specified by the assignment name \cr
#' \link[=quicksight_describe_ingestion]{describe_ingestion} \tab Describes a SPICE ingestion \cr
#' \link[=quicksight_describe_template]{describe_template} \tab Describes a template's metadata \cr
#' \link[=quicksight_describe_template_alias]{describe_template_alias} \tab Describes the template alias for a template \cr
#' \link[=quicksight_describe_template_permissions]{describe_template_permissions} \tab Describes read and write permissions on a template \cr
#' \link[=quicksight_describe_theme]{describe_theme} \tab Describes a theme \cr
#' \link[=quicksight_describe_theme_alias]{describe_theme_alias} \tab Describes the alias for a theme \cr
#' \link[=quicksight_describe_theme_permissions]{describe_theme_permissions} \tab Describes the read and write permissions for a theme \cr
#' \link[=quicksight_describe_user]{describe_user} \tab Returns information about a user, given the user name \cr
#' \link[=quicksight_get_dashboard_embed_url]{get_dashboard_embed_url} \tab Generates a URL and authorization code that you can embed in your web server code \cr
#' \link[=quicksight_list_dashboards]{list_dashboards} \tab Lists dashboards in an AWS account \cr
#' \link[=quicksight_list_dashboard_versions]{list_dashboard_versions} \tab Lists all the versions of the dashboards in the QuickSight subscription \cr
#' \link[=quicksight_list_data_sets]{list_data_sets} \tab Lists all of the datasets belonging to the current AWS account in an AWS Region \cr
#' \link[=quicksight_list_data_sources]{list_data_sources} \tab Lists data sources in current AWS Region that belong to this AWS account \cr
#' \link[=quicksight_list_group_memberships]{list_group_memberships} \tab Lists member users in a group \cr
#' \link[=quicksight_list_groups]{list_groups} \tab Lists all user groups in Amazon QuickSight \cr
#' \link[=quicksight_list_iam_policy_assignments]{list_iam_policy_assignments} \tab Lists IAM policy assignments in the current Amazon QuickSight account \cr
#' \link[=quicksight_list_iam_policy_assignments_for_user]{list_iam_policy_assignments_for_user} \tab Lists all the IAM policy assignments, including the Amazon Resource Names (ARNs) for the IAM policies assigned to the specified user and group or groups that the user belongs to\cr
#' \link[=quicksight_list_ingestions]{list_ingestions} \tab Lists the history of SPICE ingestions for a dataset \cr
#' \link[=quicksight_list_tags_for_resource]{list_tags_for_resource} \tab Lists the tags assigned to a resource \cr
#' \link[=quicksight_list_template_aliases]{list_template_aliases} \tab Lists all the aliases of a template \cr
#' \link[=quicksight_list_templates]{list_templates} \tab Lists all the templates in the current Amazon QuickSight account \cr
#' \link[=quicksight_list_template_versions]{list_template_versions} \tab Lists all the versions of the templates in the current Amazon QuickSight account \cr
#' \link[=quicksight_list_theme_aliases]{list_theme_aliases} \tab Lists all the aliases of a theme \cr
#' \link[=quicksight_list_themes]{list_themes} \tab Lists all the themes in the current AWS account \cr
#' \link[=quicksight_list_theme_versions]{list_theme_versions} \tab Lists all the versions of the themes in the current AWS account \cr
#' \link[=quicksight_list_user_groups]{list_user_groups} \tab Lists the Amazon QuickSight groups that an Amazon QuickSight user is a member of \cr
#' \link[=quicksight_list_users]{list_users} \tab Returns a list of all of the Amazon QuickSight users belonging to this account \cr
#' \link[=quicksight_register_user]{register_user} \tab Creates an Amazon QuickSight user, whose identity is associated with the AWS Identity and Access Management (IAM) identity or role specified in the request \cr
#' \link[=quicksight_search_dashboards]{search_dashboards} \tab Searchs for dashboards that belong to a user \cr
#' \link[=quicksight_tag_resource]{tag_resource} \tab Assigns one or more tags (key-value pairs) to the specified QuickSight resource \cr
#' \link[=quicksight_untag_resource]{untag_resource} \tab Removes a tag or tags from a resource \cr
#' \link[=quicksight_update_dashboard]{update_dashboard} \tab Updates a dashboard in an AWS account \cr
#' \link[=quicksight_update_dashboard_permissions]{update_dashboard_permissions} \tab Updates read and write permissions on a dashboard \cr
#' \link[=quicksight_update_dashboard_published_version]{update_dashboard_published_version} \tab Updates the published version of a dashboard \cr
#' \link[=quicksight_update_data_set]{update_data_set} \tab Updates a dataset \cr
#' \link[=quicksight_update_data_set_permissions]{update_data_set_permissions} \tab Updates the permissions on a dataset \cr
#' \link[=quicksight_update_data_source]{update_data_source} \tab Updates a data source \cr
#' \link[=quicksight_update_data_source_permissions]{update_data_source_permissions} \tab Updates the permissions to a data source \cr
#' \link[=quicksight_update_group]{update_group} \tab Changes a group description \cr
#' \link[=quicksight_update_iam_policy_assignment]{update_iam_policy_assignment} \tab Updates an existing IAM policy assignment \cr
#' \link[=quicksight_update_template]{update_template} \tab Updates a template from an existing Amazon QuickSight analysis or another template \cr
#' \link[=quicksight_update_template_alias]{update_template_alias} \tab Updates the template alias of a template \cr
#' \link[=quicksight_update_template_permissions]{update_template_permissions} \tab Updates the resource permissions for a template \cr
#' \link[=quicksight_update_theme]{update_theme} \tab Updates a theme \cr
#' \link[=quicksight_update_theme_alias]{update_theme_alias} \tab Updates an alias of a theme \cr
#' \link[=quicksight_update_theme_permissions]{update_theme_permissions} \tab Updates the resource permissions for a theme \cr
#' \link[=quicksight_update_user]{update_user} \tab Updates an Amazon QuickSight user
#' }
#'
#' @rdname quicksight
#' @export
quicksight <- function(config = list()) {
svc <- .quicksight$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.quicksight <- list()
.quicksight$operations <- list()
.quicksight$metadata <- list(
service_name = "quicksight",
endpoints = list("*" = list(endpoint = "quicksight.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "quicksight.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "quicksight.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "quicksight.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "QuickSight",
api_version = "2018-04-01",
signing_name = NULL,
json_version = "1.0",
target_prefix = ""
)
.quicksight$service <- function(config = list()) {
handlers <- new_handlers("restjson", "v4")
new_service(.quicksight$metadata, handlers, config)
}
| /paws/R/quicksight_service.R | permissive | jcheng5/paws | R | false | false | 12,021 | r | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config
NULL
#' Amazon QuickSight
#'
#' @description
#' Amazon QuickSight API Reference
#'
#' Amazon QuickSight is a fully managed, serverless business intelligence
#' service for the AWS Cloud that makes it easy to extend data and insights
#' to every user in your organization. This API reference contains
#' documentation for a programming interface that you can use to manage
#' Amazon QuickSight.
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#'
#' @section Service syntax:
#' ```
#' svc <- quicksight(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string"
#' ),
#' endpoint = "string",
#' region = "string"
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- quicksight()
#' svc$cancel_ingestion(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=quicksight_cancel_ingestion]{cancel_ingestion} \tab Cancels an ongoing ingestion of data into SPICE \cr
#' \link[=quicksight_create_dashboard]{create_dashboard} \tab Creates a dashboard from a template \cr
#' \link[=quicksight_create_data_set]{create_data_set} \tab Creates a dataset \cr
#' \link[=quicksight_create_data_source]{create_data_source} \tab Creates a data source \cr
#' \link[=quicksight_create_group]{create_group} \tab Creates an Amazon QuickSight group \cr
#' \link[=quicksight_create_group_membership]{create_group_membership} \tab Adds an Amazon QuickSight user to an Amazon QuickSight group \cr
#' \link[=quicksight_create_iam_policy_assignment]{create_iam_policy_assignment} \tab Creates an assignment with one specified IAM policy, identified by its Amazon Resource Name (ARN) \cr
#' \link[=quicksight_create_ingestion]{create_ingestion} \tab Creates and starts a new SPICE ingestion on a dataset Any ingestions operating on tagged datasets inherit the same tags automatically for use in access control \cr
#' \link[=quicksight_create_template]{create_template} \tab Creates a template from an existing QuickSight analysis or template \cr
#' \link[=quicksight_create_template_alias]{create_template_alias} \tab Creates a template alias for a template \cr
#' \link[=quicksight_create_theme]{create_theme} \tab Creates a theme \cr
#' \link[=quicksight_create_theme_alias]{create_theme_alias} \tab Creates a theme alias for a theme \cr
#' \link[=quicksight_delete_dashboard]{delete_dashboard} \tab Deletes a dashboard \cr
#' \link[=quicksight_delete_data_set]{delete_data_set} \tab Deletes a dataset \cr
#' \link[=quicksight_delete_data_source]{delete_data_source} \tab Deletes the data source permanently \cr
#' \link[=quicksight_delete_group]{delete_group} \tab Removes a user group from Amazon QuickSight \cr
#' \link[=quicksight_delete_group_membership]{delete_group_membership} \tab Removes a user from a group so that the user is no longer a member of the group \cr
#' \link[=quicksight_delete_iam_policy_assignment]{delete_iam_policy_assignment} \tab Deletes an existing IAM policy assignment \cr
#' \link[=quicksight_delete_template]{delete_template} \tab Deletes a template \cr
#' \link[=quicksight_delete_template_alias]{delete_template_alias} \tab Deletes the item that the specified template alias points to \cr
#' \link[=quicksight_delete_theme]{delete_theme} \tab Deletes a theme \cr
#' \link[=quicksight_delete_theme_alias]{delete_theme_alias} \tab Deletes the version of the theme that the specified theme alias points to \cr
#' \link[=quicksight_delete_user]{delete_user} \tab Deletes the Amazon QuickSight user that is associated with the identity of the AWS Identity and Access Management (IAM) user or role that's making the call \cr
#' \link[=quicksight_delete_user_by_principal_id]{delete_user_by_principal_id} \tab Deletes a user identified by its principal ID \cr
#' \link[=quicksight_describe_dashboard]{describe_dashboard} \tab Provides a summary for a dashboard \cr
#' \link[=quicksight_describe_dashboard_permissions]{describe_dashboard_permissions} \tab Describes read and write permissions for a dashboard \cr
#' \link[=quicksight_describe_data_set]{describe_data_set} \tab Describes a dataset \cr
#' \link[=quicksight_describe_data_set_permissions]{describe_data_set_permissions} \tab Describes the permissions on a dataset \cr
#' \link[=quicksight_describe_data_source]{describe_data_source} \tab Describes a data source \cr
#' \link[=quicksight_describe_data_source_permissions]{describe_data_source_permissions} \tab Describes the resource permissions for a data source \cr
#' \link[=quicksight_describe_group]{describe_group} \tab Returns an Amazon QuickSight group's description and Amazon Resource Name (ARN) \cr
#' \link[=quicksight_describe_iam_policy_assignment]{describe_iam_policy_assignment} \tab Describes an existing IAM policy assignment, as specified by the assignment name \cr
#' \link[=quicksight_describe_ingestion]{describe_ingestion} \tab Describes a SPICE ingestion \cr
#' \link[=quicksight_describe_template]{describe_template} \tab Describes a template's metadata \cr
#' \link[=quicksight_describe_template_alias]{describe_template_alias} \tab Describes the template alias for a template \cr
#' \link[=quicksight_describe_template_permissions]{describe_template_permissions} \tab Describes read and write permissions on a template \cr
#' \link[=quicksight_describe_theme]{describe_theme} \tab Describes a theme \cr
#' \link[=quicksight_describe_theme_alias]{describe_theme_alias} \tab Describes the alias for a theme \cr
#' \link[=quicksight_describe_theme_permissions]{describe_theme_permissions} \tab Describes the read and write permissions for a theme \cr
#' \link[=quicksight_describe_user]{describe_user} \tab Returns information about a user, given the user name \cr
#' \link[=quicksight_get_dashboard_embed_url]{get_dashboard_embed_url} \tab Generates a URL and authorization code that you can embed in your web server code \cr
#' \link[=quicksight_list_dashboards]{list_dashboards} \tab Lists dashboards in an AWS account \cr
#' \link[=quicksight_list_dashboard_versions]{list_dashboard_versions} \tab Lists all the versions of the dashboards in the QuickSight subscription \cr
#' \link[=quicksight_list_data_sets]{list_data_sets} \tab Lists all of the datasets belonging to the current AWS account in an AWS Region \cr
#' \link[=quicksight_list_data_sources]{list_data_sources} \tab Lists data sources in current AWS Region that belong to this AWS account \cr
#' \link[=quicksight_list_group_memberships]{list_group_memberships} \tab Lists member users in a group \cr
#' \link[=quicksight_list_groups]{list_groups} \tab Lists all user groups in Amazon QuickSight \cr
#' \link[=quicksight_list_iam_policy_assignments]{list_iam_policy_assignments} \tab Lists IAM policy assignments in the current Amazon QuickSight account \cr
#' \link[=quicksight_list_iam_policy_assignments_for_user]{list_iam_policy_assignments_for_user} \tab Lists all the IAM policy assignments, including the Amazon Resource Names (ARNs) for the IAM policies assigned to the specified user and group or groups that the user belongs to\cr
#' \link[=quicksight_list_ingestions]{list_ingestions} \tab Lists the history of SPICE ingestions for a dataset \cr
#' \link[=quicksight_list_tags_for_resource]{list_tags_for_resource} \tab Lists the tags assigned to a resource \cr
#' \link[=quicksight_list_template_aliases]{list_template_aliases} \tab Lists all the aliases of a template \cr
#' \link[=quicksight_list_templates]{list_templates} \tab Lists all the templates in the current Amazon QuickSight account \cr
#' \link[=quicksight_list_template_versions]{list_template_versions} \tab Lists all the versions of the templates in the current Amazon QuickSight account \cr
#' \link[=quicksight_list_theme_aliases]{list_theme_aliases} \tab Lists all the aliases of a theme \cr
#' \link[=quicksight_list_themes]{list_themes} \tab Lists all the themes in the current AWS account \cr
#' \link[=quicksight_list_theme_versions]{list_theme_versions} \tab Lists all the versions of the themes in the current AWS account \cr
#' \link[=quicksight_list_user_groups]{list_user_groups} \tab Lists the Amazon QuickSight groups that an Amazon QuickSight user is a member of \cr
#' \link[=quicksight_list_users]{list_users} \tab Returns a list of all of the Amazon QuickSight users belonging to this account \cr
#' \link[=quicksight_register_user]{register_user} \tab Creates an Amazon QuickSight user, whose identity is associated with the AWS Identity and Access Management (IAM) identity or role specified in the request \cr
#' \link[=quicksight_search_dashboards]{search_dashboards} \tab Searchs for dashboards that belong to a user \cr
#' \link[=quicksight_tag_resource]{tag_resource} \tab Assigns one or more tags (key-value pairs) to the specified QuickSight resource \cr
#' \link[=quicksight_untag_resource]{untag_resource} \tab Removes a tag or tags from a resource \cr
#' \link[=quicksight_update_dashboard]{update_dashboard} \tab Updates a dashboard in an AWS account \cr
#' \link[=quicksight_update_dashboard_permissions]{update_dashboard_permissions} \tab Updates read and write permissions on a dashboard \cr
#' \link[=quicksight_update_dashboard_published_version]{update_dashboard_published_version} \tab Updates the published version of a dashboard \cr
#' \link[=quicksight_update_data_set]{update_data_set} \tab Updates a dataset \cr
#' \link[=quicksight_update_data_set_permissions]{update_data_set_permissions} \tab Updates the permissions on a dataset \cr
#' \link[=quicksight_update_data_source]{update_data_source} \tab Updates a data source \cr
#' \link[=quicksight_update_data_source_permissions]{update_data_source_permissions} \tab Updates the permissions to a data source \cr
#' \link[=quicksight_update_group]{update_group} \tab Changes a group description \cr
#' \link[=quicksight_update_iam_policy_assignment]{update_iam_policy_assignment} \tab Updates an existing IAM policy assignment \cr
#' \link[=quicksight_update_template]{update_template} \tab Updates a template from an existing Amazon QuickSight analysis or another template \cr
#' \link[=quicksight_update_template_alias]{update_template_alias} \tab Updates the template alias of a template \cr
#' \link[=quicksight_update_template_permissions]{update_template_permissions} \tab Updates the resource permissions for a template \cr
#' \link[=quicksight_update_theme]{update_theme} \tab Updates a theme \cr
#' \link[=quicksight_update_theme_alias]{update_theme_alias} \tab Updates an alias of a theme \cr
#' \link[=quicksight_update_theme_permissions]{update_theme_permissions} \tab Updates the resource permissions for a theme \cr
#' \link[=quicksight_update_user]{update_user} \tab Updates an Amazon QuickSight user
#' }
#'
#' @rdname quicksight
#' @export
quicksight <- function(config = list()) {
svc <- .quicksight$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.quicksight <- list()
.quicksight$operations <- list()
.quicksight$metadata <- list(
service_name = "quicksight",
endpoints = list("*" = list(endpoint = "quicksight.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "quicksight.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "quicksight.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "quicksight.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "QuickSight",
api_version = "2018-04-01",
signing_name = NULL,
json_version = "1.0",
target_prefix = ""
)
.quicksight$service <- function(config = list()) {
handlers <- new_handlers("restjson", "v4")
new_service(.quicksight$metadata, handlers, config)
}
|
# Introduction ----
# author: Jiaxian Shen (jiaxianshen2022@u.northwestern.edu)
# date:
# purpose:
# no preprocessing
# Libraries ----
library(easypackages) # to load multiple packages at once
library(methods) # quest does not load this automatically
library(dplyr) # For data manipulation
library(ggplot2) # For data visualisation
library(openxlsx) # handle xlsx files
library(caret) # machine learning
library(doParallel)
library(MLmetrics) # required for random forest
library(randomForest) # rf
library(e1071)
libraries("glmnet", "Matrix") # glmnet
library(RRF) # RRFglobal
library(gbm) # gbm
library(C50) # C5.0Tree
library(pls) # pls
library(kernlab) # svmLinear & svmRadial
library(kknn) # kknn
# Set the working directory
setwd("/projects/p30892/cdc/nonpareil/ml")
# Import data
# check the data type of columns of interest by str()
file <- read.xlsx("out_parameter_all.xlsx",
sheet = 1, startRow = 1, colNames = TRUE,
rowNames = FALSE, detectDates = FALSE, skipEmptyRows = TRUE,
skipEmptyCols = TRUE, rows = NULL, cols = NULL,
check.names = FALSE, namedRegion = NULL, na.strings = "NA", fillMergedCells = FALSE)[,c(7:12,14:16)]
## add country column
file$country <- file$geography
file$country <- ifelse(file$country %in% c("Chicago", "Pittsburgh", "w_coast", "s_w_w_coast", "w", "s_e","e_coast"), "US", file$country )
# change character to factor
for (i in 2:10){
file[,i] <- as.factor(file[,i])
}
## check data
anyNA(file) # no missing value
# test ----
## categorize diversity (1 increment)
file$div_c4[file$diversity <= 16 ] = 1
for (jj in seq(1,3,by=1)) {
file$div_c4[file$diversity > (15+jj) & file$diversity <= (16+jj)] = (jj+1)
}
file$div_c4[file$diversity > 19 ] = 5
file$div_c4 <- as.factor(file$div_c4) # change to factor
file <- file %>%
mutate(div_c4 = factor(div_c4,
labels = make.names(levels(div_c4))))
# caret parallel
cl <- makePSOCKcluster(6)
registerDoParallel(cl)
# make 80/20 training/testing split ----
set.seed(87)
train.index <- createDataPartition(file$div_c4, p=0.80, list=FALSE)
train.data <- file[train.index, -c(grep("diversity",colnames(file)), grep("geography",colnames(file)))]
test.data <- file[-train.index, -c(grep("diversity",colnames(file)), grep("geography",colnames(file)))]
### ml algorithms ----
## Run algorithms using repeated 5-fold cross validation (5 times)
tr_ctrl <- trainControl(method="repeatedcv", number=5, repeats=5,
classProbs=TRUE,
summaryFunction=multiClassSummary,
#sampling='down',
allowParallel=TRUE)
X <- train.data[, !(names(train.data) %in% "div_c4")]
Y <- train.data$div_c4
## case 1: no study ----
train_1 <- file[train.index, -c(grep("diversity",colnames(file)), grep("geography",colnames(file)), grep("study",colnames(file)))]
test_1 <- file[-train.index, -c(grep("diversity",colnames(file)), grep("geography",colnames(file)), grep("study",colnames(file)))]
X1 <- train_1[, !(names(train_1) %in% "div_c4")]
Y1 <- train_1$div_c4
## random forest: rf: nno study
set.seed(87)
mod.rf.1 <- train(x = X1, y = Y1,
method="rf",
tuneLength = 5,
trControl=tr_ctrl)
cat("=============================\n")
cat("case 1: random forest (rf) + no [study]\n")
print(mod.rf.1)
rf.1.Imp <- varImp(mod.rf.1) # only non-fomula format works for generating variable importance
rf.1.Imp$importance
# plot
pdf("fig/rfIMP_div_c4_no_study.pdf", width = 6, height = 4)
plot(rf.1.Imp)
dev.off()
## case 6: 2 predictors: location, building ----
train_6 <- file[train.index, c(grep("location",colnames(file)), grep("building",colnames(file)), grep("div_c4",colnames(file)))]
test_6 <- file[-train.index, c(grep("location",colnames(file)), grep("building",colnames(file)), grep("div_c4",colnames(file)))]
X6 <- train_6[, !(names(train_6) %in% "div_c4")]
Y6 <- train_6$div_c4
## random forest:
set.seed(87)
mod.rf.6 <- train(x = X6, y = Y6,
method="rf",
tuneLength = 5,
trControl=tr_ctrl)
cat("=============================\n")
cat("case 6: mod.rf.6\n")
print(mod.rf.6)
rf.6.Imp <- varImp(mod.rf.6) # only non-fomula format works for generating variable importance
rf.6.Imp$importance
# plot
pdf("fig/div_c4_rf.6.Imp.pdf", width = 6, height = 4)
plot(rf.6.Imp)
dev.off()
## case 7: 1 predictor: location ----
train_7 <- file[train.index, c(grep("location",colnames(file)), grep("div_c4",colnames(file)))]
test_7 <- file[-train.index, c(grep("location",colnames(file)), grep("div_c4",colnames(file)))]
X7 <- train_7[, !(names(train_7) %in% "div_c4"), drop=FALSE] # to keep X as a dataframe, otherwise bugging in train()
Y7 <- train_7$div_c4
## random forest:
set.seed(87)
mod.rf.7 <- train(x = X7, y = Y7,
method="rf",
tuneLength = 5,
trControl=tr_ctrl)
cat("=============================\n")
cat("case 7: mod.rf.7\n")
print(mod.rf.7)
rf.7.Imp <- varImp(mod.rf.7) # only non-fomula format works for generating variable importance
rf.7.Imp$importance
# plot
pdf("fig/div_c4_rf.7.Imp.pdf", width = 6, height = 4)
plot(rf.7.Imp)
dev.off()
stopCluster(cl) # stop parellel
# assess model performance on training/validation set CV
models <- list(mod.rf.1 = mod.rf.1,
mod.rf.6 = mod.rf.6,
mod.rf.7 = mod.rf.7)
resample_models <- resamples(models)
cat("=============================\n")
cat("summary of models:\n")
summary(resample_models, metric=c("AUC","Kappa", "Mean_Balanced_Accuracy"))
# plot all models
pdf('fig/ml_div_c4_elimvar.pdf',height=6,width=13)
bwplot(resample_models,metric=c("AUC","Kappa", "Mean_Balanced_Accuracy"))
dev.off()
### estimate skill of model on the validation dataset ----
cm_list <- list()
test_mean_balanced_accuracy <- numeric(3)
case <- c(1, 6, 7)
for (kk in 1:3){
cm_list[[kk]] <- confusionMatrix(predict(models[[kk]], get(paste("test",case[kk],sep = "_"))), get(paste("test",case[kk],sep = "_"))$div_c4)
test_mean_balanced_accuracy[kk] <- mean(cm_list[[kk]]$byClass[ ,"Balanced Accuracy"]) # mean balanced accuracy across diversity categories
}
cat("=============================\n")
cat("test_mean_balanced_accuracy:\n")
test_mean_balanced_accuracy
### save all variables ----
save.image(file = "rdata/cdc_nonpareil_ml_quest_c4_elimvar.RData")
| /nonpareil/machine_learning/cdc_nonpareil_ml_quest_c4_elimvar.R | permissive | hartmann-lab/workflow_metagenomic_environmental_surveillance | R | false | false | 6,558 | r | # Introduction ----
# author: Jiaxian Shen (jiaxianshen2022@u.northwestern.edu)
# date:
# purpose:
# no preprocessing
# Libraries ----
library(easypackages) # to load multiple packages at once
library(methods) # quest does not load this automatically
library(dplyr) # For data manipulation
library(ggplot2) # For data visualisation
library(openxlsx) # handle xlsx files
library(caret) # machine learning
library(doParallel)
library(MLmetrics) # required for random forest
library(randomForest) # rf
library(e1071)
libraries("glmnet", "Matrix") # glmnet
library(RRF) # RRFglobal
library(gbm) # gbm
library(C50) # C5.0Tree
library(pls) # pls
library(kernlab) # svmLinear & svmRadial
library(kknn) # kknn
# Set the working directory
setwd("/projects/p30892/cdc/nonpareil/ml")
# Import data
# check the data type of columns of interest by str()
file <- read.xlsx("out_parameter_all.xlsx",
sheet = 1, startRow = 1, colNames = TRUE,
rowNames = FALSE, detectDates = FALSE, skipEmptyRows = TRUE,
skipEmptyCols = TRUE, rows = NULL, cols = NULL,
check.names = FALSE, namedRegion = NULL, na.strings = "NA", fillMergedCells = FALSE)[,c(7:12,14:16)]
## add country column
file$country <- file$geography
file$country <- ifelse(file$country %in% c("Chicago", "Pittsburgh", "w_coast", "s_w_w_coast", "w", "s_e","e_coast"), "US", file$country )
# change character to factor
for (i in 2:10){
file[,i] <- as.factor(file[,i])
}
## check data
anyNA(file) # no missing value
# test ----
## categorize diversity (1 increment)
file$div_c4[file$diversity <= 16 ] = 1
for (jj in seq(1,3,by=1)) {
file$div_c4[file$diversity > (15+jj) & file$diversity <= (16+jj)] = (jj+1)
}
file$div_c4[file$diversity > 19 ] = 5
file$div_c4 <- as.factor(file$div_c4) # change to factor
file <- file %>%
mutate(div_c4 = factor(div_c4,
labels = make.names(levels(div_c4))))
# caret parallel
cl <- makePSOCKcluster(6)
registerDoParallel(cl)
# make 80/20 training/testing split ----
set.seed(87)
train.index <- createDataPartition(file$div_c4, p=0.80, list=FALSE)
train.data <- file[train.index, -c(grep("diversity",colnames(file)), grep("geography",colnames(file)))]
test.data <- file[-train.index, -c(grep("diversity",colnames(file)), grep("geography",colnames(file)))]
### ml algorithms ----
## Run algorithms using repeated 5-fold cross validation (5 times)
tr_ctrl <- trainControl(method="repeatedcv", number=5, repeats=5,
classProbs=TRUE,
summaryFunction=multiClassSummary,
#sampling='down',
allowParallel=TRUE)
X <- train.data[, !(names(train.data) %in% "div_c4")]
Y <- train.data$div_c4
## case 1: no study ----
train_1 <- file[train.index, -c(grep("diversity",colnames(file)), grep("geography",colnames(file)), grep("study",colnames(file)))]
test_1 <- file[-train.index, -c(grep("diversity",colnames(file)), grep("geography",colnames(file)), grep("study",colnames(file)))]
X1 <- train_1[, !(names(train_1) %in% "div_c4")]
Y1 <- train_1$div_c4
## random forest: rf: nno study
set.seed(87)
mod.rf.1 <- train(x = X1, y = Y1,
method="rf",
tuneLength = 5,
trControl=tr_ctrl)
cat("=============================\n")
cat("case 1: random forest (rf) + no [study]\n")
print(mod.rf.1)
rf.1.Imp <- varImp(mod.rf.1) # only non-fomula format works for generating variable importance
rf.1.Imp$importance
# plot
pdf("fig/rfIMP_div_c4_no_study.pdf", width = 6, height = 4)
plot(rf.1.Imp)
dev.off()
## case 6: 2 predictors: location, building ----
train_6 <- file[train.index, c(grep("location",colnames(file)), grep("building",colnames(file)), grep("div_c4",colnames(file)))]
test_6 <- file[-train.index, c(grep("location",colnames(file)), grep("building",colnames(file)), grep("div_c4",colnames(file)))]
X6 <- train_6[, !(names(train_6) %in% "div_c4")]
Y6 <- train_6$div_c4
## random forest:
set.seed(87)
mod.rf.6 <- train(x = X6, y = Y6,
method="rf",
tuneLength = 5,
trControl=tr_ctrl)
cat("=============================\n")
cat("case 6: mod.rf.6\n")
print(mod.rf.6)
rf.6.Imp <- varImp(mod.rf.6) # only non-fomula format works for generating variable importance
rf.6.Imp$importance
# plot
pdf("fig/div_c4_rf.6.Imp.pdf", width = 6, height = 4)
plot(rf.6.Imp)
dev.off()
## case 7: 1 predictor: location ----
train_7 <- file[train.index, c(grep("location",colnames(file)), grep("div_c4",colnames(file)))]
test_7 <- file[-train.index, c(grep("location",colnames(file)), grep("div_c4",colnames(file)))]
X7 <- train_7[, !(names(train_7) %in% "div_c4"), drop=FALSE] # to keep X as a dataframe, otherwise bugging in train()
Y7 <- train_7$div_c4
## random forest:
set.seed(87)
mod.rf.7 <- train(x = X7, y = Y7,
method="rf",
tuneLength = 5,
trControl=tr_ctrl)
cat("=============================\n")
cat("case 7: mod.rf.7\n")
print(mod.rf.7)
rf.7.Imp <- varImp(mod.rf.7) # only non-fomula format works for generating variable importance
rf.7.Imp$importance
# plot
pdf("fig/div_c4_rf.7.Imp.pdf", width = 6, height = 4)
plot(rf.7.Imp)
dev.off()
stopCluster(cl) # stop parellel
# assess model performance on training/validation set CV
models <- list(mod.rf.1 = mod.rf.1,
mod.rf.6 = mod.rf.6,
mod.rf.7 = mod.rf.7)
resample_models <- resamples(models)
cat("=============================\n")
cat("summary of models:\n")
summary(resample_models, metric=c("AUC","Kappa", "Mean_Balanced_Accuracy"))
# plot all models
pdf('fig/ml_div_c4_elimvar.pdf',height=6,width=13)
bwplot(resample_models,metric=c("AUC","Kappa", "Mean_Balanced_Accuracy"))
dev.off()
### estimate skill of model on the validation dataset ----
cm_list <- list()
test_mean_balanced_accuracy <- numeric(3)
case <- c(1, 6, 7)
for (kk in 1:3){
cm_list[[kk]] <- confusionMatrix(predict(models[[kk]], get(paste("test",case[kk],sep = "_"))), get(paste("test",case[kk],sep = "_"))$div_c4)
test_mean_balanced_accuracy[kk] <- mean(cm_list[[kk]]$byClass[ ,"Balanced Accuracy"]) # mean balanced accuracy across diversity categories
}
cat("=============================\n")
cat("test_mean_balanced_accuracy:\n")
test_mean_balanced_accuracy
### save all variables ----
save.image(file = "rdata/cdc_nonpareil_ml_quest_c4_elimvar.RData")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/setMethods.R
\docType{methods}
\name{Expectations}
\alias{Expectations}
\alias{Expectations}
\alias{Expectations<-}
\alias{Expectations,MOFAmodel-method}
\title{Expectations: set and retrieve expectations}
\usage{
Expectations(object)
.Expectations(object) <- value
\S4method{Expectations}{MOFAmodel}(object)
}
\arguments{
\item{object}{a \code{\link{MOFAmodel}} object.}
}
\value{
list of expectations
}
| /MOFAtools/man/Expectations.Rd | no_license | vd4mmind/MOFA | R | false | true | 502 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/setMethods.R
\docType{methods}
\name{Expectations}
\alias{Expectations}
\alias{Expectations}
\alias{Expectations<-}
\alias{Expectations,MOFAmodel-method}
\title{Expectations: set and retrieve expectations}
\usage{
Expectations(object)
.Expectations(object) <- value
\S4method{Expectations}{MOFAmodel}(object)
}
\arguments{
\item{object}{a \code{\link{MOFAmodel}} object.}
}
\value{
list of expectations
}
|
library(dplyr)
library(readr)
if (url.exists("http://www.transtats.bts.gov/Download_Lookup.asp?Lookup=L_UNIQUE_CARRIERS")) {
raw <- read_csv("http://www.transtats.bts.gov/Download_Lookup.asp?Lookup=L_UNIQUE_CARRIERS")
} else stop("Can't access `airlines` link in 'data-raw/airlines.R'")
load("data/flights.rda")
airlines <- raw %>%
select(carrier = Code, name = Description) %>%
semi_join(flights) %>%
arrange(carrier)
write_csv(airlines, "data-raw/airlines.csv")
save(airlines, file = "data/airlines.rda", compress = "bzip2")
| /data-raw/airlines.R | no_license | LinhHPham/nycflights | R | false | false | 540 | r | library(dplyr)
library(readr)
if (url.exists("http://www.transtats.bts.gov/Download_Lookup.asp?Lookup=L_UNIQUE_CARRIERS")) {
raw <- read_csv("http://www.transtats.bts.gov/Download_Lookup.asp?Lookup=L_UNIQUE_CARRIERS")
} else stop("Can't access `airlines` link in 'data-raw/airlines.R'")
load("data/flights.rda")
airlines <- raw %>%
select(carrier = Code, name = Description) %>%
semi_join(flights) %>%
arrange(carrier)
write_csv(airlines, "data-raw/airlines.csv")
save(airlines, file = "data/airlines.rda", compress = "bzip2")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matrix_utils.R
\name{read_matrix}
\alias{read_matrix}
\title{Read matrix from file}
\usage{
read_matrix(ncol)
}
\arguments{
\item{ncol}{integer, expected number of columns, passed to \code{\link[base:matrix]{base::matrix()}}.}
}
\value{
function with 1 argument: \code{file}
}
\description{
A function factory to read matrix with given number of columns from a file.
The matrix is read row-wise (with \code{byrow = TRUE}).
}
\examples{
m <- matrix(1:6, ncol = 3)
tf <- tempfile()
write(t(m), file = tf, ncolumns = 3)
rm3 <- read_matrix(ncol = 3)
rm3(tf)
}
| /man/read_matrix.Rd | permissive | maciejsmolka/solvergater | R | false | true | 634 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matrix_utils.R
\name{read_matrix}
\alias{read_matrix}
\title{Read matrix from file}
\usage{
read_matrix(ncol)
}
\arguments{
\item{ncol}{integer, expected number of columns, passed to \code{\link[base:matrix]{base::matrix()}}.}
}
\value{
function with 1 argument: \code{file}
}
\description{
A function factory to read matrix with given number of columns from a file.
The matrix is read row-wise (with \code{byrow = TRUE}).
}
\examples{
m <- matrix(1:6, ncol = 3)
tf <- tempfile()
write(t(m), file = tf, ncolumns = 3)
rm3 <- read_matrix(ncol = 3)
rm3(tf)
}
|
#!/usr/bin/env Rscript
args <- commandArgs(trailingOnly=TRUE)
if (length(args) != 4)
stop("usage: permute.r n muts.rda callable.bed output.rda")
nperms <- as.integer(args[1])
mutrda <- args[2]
callable.bed <- args[3]
outrda <- args[4]
if (file.exists(outrda))
stop(sprintf("output file %s already exists, please delete it first", outrda))
library(annotatr)
library(regioneR)
load(mutrda) # loads "somatic"
somatic <- somatic[!is.na(somatic$pass) & somatic$pass,]
cat("making mut granges\n")
str(somatic)
if (nrow(somatic) > 0) {
muts <- GRanges(
seqnames=paste0("chr", somatic$chr),
ranges=IRanges(start=somatic$pos-1, end=somatic$pos))
} else {
muts <- GRanges()
}
cat("reading callable regions\n")
cf <- read.table(callable.bed, header=F, stringsAsFactors=F)
callable <- GRanges(
seqnames=paste0('chr', cf[,1]),
ranges=IRanges(start=cf[,2], cf[,3]))
perms <- lapply(1:nperms, function(i) {
cat('.')
randomizeRegions(muts, mask=gaps(callable), allow.overlaps=T, per.chromosome=T)
})
cat('\n')
save(perms, callable, file=outrda)
| /scan2-0.9/scripts/permute.r | no_license | parklab/SCAN2_PTA_paper_2022 | R | false | false | 1,087 | r | #!/usr/bin/env Rscript
args <- commandArgs(trailingOnly=TRUE)
if (length(args) != 4)
stop("usage: permute.r n muts.rda callable.bed output.rda")
nperms <- as.integer(args[1])
mutrda <- args[2]
callable.bed <- args[3]
outrda <- args[4]
if (file.exists(outrda))
stop(sprintf("output file %s already exists, please delete it first", outrda))
library(annotatr)
library(regioneR)
load(mutrda) # loads "somatic"
somatic <- somatic[!is.na(somatic$pass) & somatic$pass,]
cat("making mut granges\n")
str(somatic)
if (nrow(somatic) > 0) {
muts <- GRanges(
seqnames=paste0("chr", somatic$chr),
ranges=IRanges(start=somatic$pos-1, end=somatic$pos))
} else {
muts <- GRanges()
}
cat("reading callable regions\n")
cf <- read.table(callable.bed, header=F, stringsAsFactors=F)
callable <- GRanges(
seqnames=paste0('chr', cf[,1]),
ranges=IRanges(start=cf[,2], cf[,3]))
perms <- lapply(1:nperms, function(i) {
cat('.')
randomizeRegions(muts, mask=gaps(callable), allow.overlaps=T, per.chromosome=T)
})
cat('\n')
save(perms, callable, file=outrda)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bhl_pagesearch.R
\name{bhl_pagesearch}
\alias{bhl_pagesearch}
\title{Search an item for pages containing the specified text}
\usage{
bhl_pagesearch(id, text, as = "table", key = NULL, ...)
}
\arguments{
\item{id}{(integer) BHL identifier of the item to be searched}
\item{text}{(character) the text for which to search}
\item{as}{(character) Return a list ("list"), json ("json"), xml ("xml"),
or parsed table ("table", default). Note that \code{as="table"} can give
different data format back depending on the function - for example,
sometimes a data.frame and sometimes a character vector.}
\item{key}{Your BHL API key, either enter, or loads from your \code{.Renviron}
as \code{BHL_KEY}
or from \code{.Rprofile} as \code{bhl_key}.}
\item{...}{Curl options passed on to \code{\link[crul:HttpClient]{crul::HttpClient()}}}
}
\description{
Search an item for pages containing the specified text
}
\examples{
\dontrun{
bhl_pagesearch(22004, "dog")
bhl_pagesearch(22004, "dog", as = "json")
}
}
| /man/bhl_pagesearch.Rd | permissive | cran/rbhl | R | false | true | 1,074 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bhl_pagesearch.R
\name{bhl_pagesearch}
\alias{bhl_pagesearch}
\title{Search an item for pages containing the specified text}
\usage{
bhl_pagesearch(id, text, as = "table", key = NULL, ...)
}
\arguments{
\item{id}{(integer) BHL identifier of the item to be searched}
\item{text}{(character) the text for which to search}
\item{as}{(character) Return a list ("list"), json ("json"), xml ("xml"),
or parsed table ("table", default). Note that \code{as="table"} can give
different data format back depending on the function - for example,
sometimes a data.frame and sometimes a character vector.}
\item{key}{Your BHL API key, either enter, or loads from your \code{.Renviron}
as \code{BHL_KEY}
or from \code{.Rprofile} as \code{bhl_key}.}
\item{...}{Curl options passed on to \code{\link[crul:HttpClient]{crul::HttpClient()}}}
}
\description{
Search an item for pages containing the specified text
}
\examples{
\dontrun{
bhl_pagesearch(22004, "dog")
bhl_pagesearch(22004, "dog", as = "json")
}
}
|
rm(list = ls())
load("cox_dat.Rdata")
str(dat)
library(survival)
library(survminer)
library(forestplot)
library(stringr)
#cox回归,建立模型
model <- coxph(Surv(time, event) ~., data = dat )
ggforest(model)
#summary
m = summary(model)
colnames(m$coefficients)
#[1] "coef" "exp(coef)" "se(coef)" "z" "Pr(>|z|)"
colnames(m$conf.int)
#[1] "exp(coef)" "exp(-coef)" "lower .95" "upper .95"
#p值改一下格式,加上显著性
p = ifelse(
m$coefficients[, 5] < 0.001,
"<0.001 ***",
ifelse(
m$coefficients[, 5] < 0.01,
"<0.01 **",
ifelse(
m$coefficients[, 5] < 0.05,
paste(round(m$coefficients[, 5], 3), " *"),
round(m$coefficients[, 5], 3)
)
)
)
p
#HR和它的置信区间
dat2 = as.data.frame(round(m$conf.int[, c(1, 3, 4)], 2))
dat2 = tibble::rownames_to_column(dat2, var = "Trait")
colnames(dat2)[2:4] = c("HR", "lower", "upper")
#需要在图上显示的HR文字和p值
dat2$HR2 = paste0(dat2[, 2], "(", dat2[, 3], "-", dat2[, 4], ")")
dat2$p = p
str(dat2)
#基础画图
forestplot(
dat2[, c(1, 4, 6)],
mean = dat2[, 2],
lower = dat2[, 3],
upper = dat2[, 4],
zero = 1,
boxsize = 0.4,
col = fpColors(box = '#1075BB', lines = 'black', zero = 'grey'),
lty.ci = "solid",
graph.pos = 2
)
# - -----------------------------------------------------------------------
#修饰
dat2$Trait = str_remove(dat2$Trait, "gender|stage")
ins = function(x) {
c(x, rep(NA, ncol(dat2) - 1))
}
#重点是矩阵如何建立
dat2 = rbind(
c("Trait", NA, NA, NA, "HR", "p"),
ins("gender"),
ins("female"),
dat2[1, ],
ins("stage"),
ins("i"),
dat2[2:nrow(dat2), ]
)
for(i in 2:4) {
dat2[, i] = as.numeric(dat2[, i])
}
str(dat2)
forestplot(
dat2[, c(1, 5, 6)],
mean = dat2[, 2],
lower = dat2[, 3],
upper = dat2[, 4],
zero = 1,
boxsize = 0.4,
col = fpColors(box = '#1075BB', lines = 'black', zero = 'grey'),
lty.ci = "solid",
graph.pos = 2,
#xticks = F,
is.summary = c(T, T, F, F, T, rep(F, 10)),
align = "l",
hrzl_lines = list(
"1" = gpar(lty=1),
"2" = gpar(lty=1),
"16"= gpar(lty=1)),
colgap = unit(5, 'mm')
)
| /forestplot.R | no_license | ylchenchen/forestplot | R | false | false | 2,218 | r | rm(list = ls())
load("cox_dat.Rdata")
str(dat)
library(survival)
library(survminer)
library(forestplot)
library(stringr)
#cox回归,建立模型
model <- coxph(Surv(time, event) ~., data = dat )
ggforest(model)
#summary
m = summary(model)
colnames(m$coefficients)
#[1] "coef" "exp(coef)" "se(coef)" "z" "Pr(>|z|)"
colnames(m$conf.int)
#[1] "exp(coef)" "exp(-coef)" "lower .95" "upper .95"
#p值改一下格式,加上显著性
p = ifelse(
m$coefficients[, 5] < 0.001,
"<0.001 ***",
ifelse(
m$coefficients[, 5] < 0.01,
"<0.01 **",
ifelse(
m$coefficients[, 5] < 0.05,
paste(round(m$coefficients[, 5], 3), " *"),
round(m$coefficients[, 5], 3)
)
)
)
p
#HR和它的置信区间
dat2 = as.data.frame(round(m$conf.int[, c(1, 3, 4)], 2))
dat2 = tibble::rownames_to_column(dat2, var = "Trait")
colnames(dat2)[2:4] = c("HR", "lower", "upper")
#需要在图上显示的HR文字和p值
dat2$HR2 = paste0(dat2[, 2], "(", dat2[, 3], "-", dat2[, 4], ")")
dat2$p = p
str(dat2)
#基础画图
forestplot(
dat2[, c(1, 4, 6)],
mean = dat2[, 2],
lower = dat2[, 3],
upper = dat2[, 4],
zero = 1,
boxsize = 0.4,
col = fpColors(box = '#1075BB', lines = 'black', zero = 'grey'),
lty.ci = "solid",
graph.pos = 2
)
# - -----------------------------------------------------------------------
#修饰
dat2$Trait = str_remove(dat2$Trait, "gender|stage")
ins = function(x) {
c(x, rep(NA, ncol(dat2) - 1))
}
#重点是矩阵如何建立
dat2 = rbind(
c("Trait", NA, NA, NA, "HR", "p"),
ins("gender"),
ins("female"),
dat2[1, ],
ins("stage"),
ins("i"),
dat2[2:nrow(dat2), ]
)
for(i in 2:4) {
dat2[, i] = as.numeric(dat2[, i])
}
str(dat2)
forestplot(
dat2[, c(1, 5, 6)],
mean = dat2[, 2],
lower = dat2[, 3],
upper = dat2[, 4],
zero = 1,
boxsize = 0.4,
col = fpColors(box = '#1075BB', lines = 'black', zero = 'grey'),
lty.ci = "solid",
graph.pos = 2,
#xticks = F,
is.summary = c(T, T, F, F, T, rep(F, 10)),
align = "l",
hrzl_lines = list(
"1" = gpar(lty=1),
"2" = gpar(lty=1),
"16"= gpar(lty=1)),
colgap = unit(5, 'mm')
)
|
##################################################################################################
# Supporting code for Evolution and lineage dynamics of a transmissible cancer in Tasmanian devils
# Author: Kevin Gori
# Date: May 2020
#
# WHAT THIS FILE DOES:
# Step 05 of the pipeline for analysing recurrent alleles.
# Adds phylogenetic information - requires manual inspection of the tree.
logger <- getLogger("PIPELINE_05")
table_path <- file.path(PIPELINE.DIR, "intermediate_data", "tree_informative_losses.xlsx")
logger$warn(paste("Ensure manual information in", table_path, "is correct"))
logger$info("Reading precomputed table of informative mip+cnv_id combinations for loss cnvs into 'informative'")
manual_table <- as.data.table(read.xlsx(table_path, "final"))
manual_table[, State := "loss"]
manual_table[, informative := TRUE]
logger$info("Adding 'tree_informative' column based on manual annotation")
mips_data[, tree_informative := FALSE]
mips_data[manual_table, tree_informative := i.tree_informative, on = .(cnv_id, mip_name, State, informative)]
mips_data[, INFO.PASS := FALSE]
mips_data[State == "gain", INFO.PASS := informative]
mips_data[State == "loss", INFO.PASS := tree_informative]
mips_data[, informative := NULL]
mips_data[, tree_informative := NULL]
| /scripts/recurrent_alleles/05_add_manual_annotation_from_tree.R | no_license | TransmissibleCancerGroup/TCG_2020_devil_paper | R | false | false | 1,285 | r | ##################################################################################################
# Supporting code for Evolution and lineage dynamics of a transmissible cancer in Tasmanian devils
# Author: Kevin Gori
# Date: May 2020
#
# WHAT THIS FILE DOES:
# Step 05 of the pipeline for analysing recurrent alleles.
# Adds phylogenetic information - requires manual inspection of the tree.
logger <- getLogger("PIPELINE_05")
table_path <- file.path(PIPELINE.DIR, "intermediate_data", "tree_informative_losses.xlsx")
logger$warn(paste("Ensure manual information in", table_path, "is correct"))
logger$info("Reading precomputed table of informative mip+cnv_id combinations for loss cnvs into 'informative'")
manual_table <- as.data.table(read.xlsx(table_path, "final"))
manual_table[, State := "loss"]
manual_table[, informative := TRUE]
logger$info("Adding 'tree_informative' column based on manual annotation")
mips_data[, tree_informative := FALSE]
mips_data[manual_table, tree_informative := i.tree_informative, on = .(cnv_id, mip_name, State, informative)]
mips_data[, INFO.PASS := FALSE]
mips_data[State == "gain", INFO.PASS := informative]
mips_data[State == "loss", INFO.PASS := tree_informative]
mips_data[, informative := NULL]
mips_data[, tree_informative := NULL]
|
ERT_comparison_box <- function(width = 12, collapsible = T, collapsed = T) {
box(title = HTML('<p style="font-size:120%;">Expected Runtime Comparisons (across functions on one dimension)</p>'),
width = width, collapsible = collapsible, solidHeader = TRUE,
status = "primary", collapsed = collapsed,
sidebarLayout(
sidebarPanel(
width = 2,
selectInput('ERTPlot.Aggr.Algs', label = 'Select which IDs to include:',
multiple = T, selected = NULL, choices = NULL) %>% shinyInput_label_embed(
custom_icon() %>%
bs_embed_popover(
title = "ID selection", content = alg_select_info,
placement = "auto"
)
),
selectInput('ERTPlot.Aggr.Funcs', label = 'Select which functions to aggregate over:',
multiple = T, selected = NULL, choices = NULL),
selectInput('ERTPlot.Aggr.Mode', label = 'Select the plotting mode',
choices = c('radar', 'line'), selected = 'radar'),
checkboxInput('ERTPlot.Aggr.Ranking',
label = 'Use ranking instead of ERT-values',
value = T),
checkboxInput('ERTPlot.Aggr.Logy',
label = 'Scale y axis \\(\\log_{10}\\)',
value = F),
actionButton("ERTPlot.Aggr.Refresh", "Refresh the figure and table"),
hr(),
selectInput('ERTPlot.Aggr.Format', label = 'Select the figure format',
choices = supported_fig_format, selected = supported_fig_format[[1]]),
downloadButton('ERTPlot.Aggr.Download', label = 'Download the figure'),
hr(),
selectInput('ERTPlot.Aggr.TableFormat', label = 'Select the table format',
choices = supported_table_format, selected = supported_table_format[[1]]),
downloadButton('ERTPlot.Aggr.DownloadTable', label = 'Download the table')
),
mainPanel(
width = 10,
column(
width = 12, align = "center",
HTML_P('The <b><i>ERT</i></b> of the runtime samples across all functions.
ERT is decided based on the target values in the table below,
with the default being the <b>best reached f(x) by any of the
selected algorithms</b>. When using a lineplot, <i>Infinite ERTS</i> are shown as
non-connected dots on the graph.'),
plotlyOutput.IOHanalyzer('ERTPlot.Aggr.Plot'),
hr(),
HTML_P("The chosen <b>target values</b> per function are as follows
(double click an entry to edit it):"),
DT::dataTableOutput("ERTPlot.Aggr.Targets"),
hr(),
HTML_P("The raw <b>ERT</b>-values are:"),
DT::dataTableOutput("ERTPlot.Aggr.ERTTable")
)
)
)
)
}
#TODO: combine with other function using proper namespacing and modularity
ERT_comparison_box_dim <- function(width = 12, collapsible = T, collapsed = T) {
box(title = HTML('<p style="font-size:120%;">Expected Runtime Comparisons (across dimensions)</p>'),
width = width, collapsible = collapsible, solidHeader = TRUE,
status = "primary", collapsed = collapsed,
sidebarLayout(
sidebarPanel(
width = 2,
selectInput('ERTPlot.Aggr_Dim.Algs', label = 'Select which IDs to include:',
multiple = T, selected = NULL, choices = NULL) %>% shinyInput_label_embed(
custom_icon() %>%
bs_embed_popover(
title = "ID selection", content = alg_select_info,
placement = "auto"
)
),
selectInput('ERTPlot.Aggr_Dim.Mode', label = 'Select the plotting mode',
choices = c('radar', 'line'), selected = 'line'),
checkboxInput('ERTPlot.Aggr_Dim.Ranking',
label = 'Use ranking instead of ERT-values',
value = F),
checkboxInput('ERTPlot.Aggr_Dim.Logy',
label = 'Scale y axis \\(\\log_{10}\\)',
value = T),
actionButton("ERTPlot.Aggr_Dim.Refresh", "Refresh the figure and table"),
hr(),
selectInput('ERTPlot.Aggr_Dim.Format', label = 'Select the figure format',
choices = supported_fig_format, selected = supported_fig_format[[1]]),
downloadButton('ERTPlot.Aggr_Dim.Download', label = 'Download the figure'),
hr(),
selectInput('ERTPlot.Aggr_Dim.TableFormat', label = 'Select the table format',
choices = supported_table_format, selected = supported_table_format[[1]]),
downloadButton('ERTPlot.Aggr_Dim.DownloadTable', label = 'Download the table')
),
mainPanel(
width = 10,
column(
width = 12, align = "center",
HTML_P('The <b><i>ERT</i></b> of the runtime samples across all functions.
ERT is decided based on the target values in the table below,
with the default being the <b>best reached f(x) by any of the
selected algorithms</b>. <i>Infinite ERTS</i> are shown as
seperate dots on the graph.'),
plotlyOutput.IOHanalyzer('ERTPlot.Aggr_Dim.Plot'),
hr(),
HTML_P("The chosen <b>target values</b> per dimension are as follows
(double click an entry to edit it):"),
DT::dataTableOutput("ERTPlot.Aggr_Dim.Targets"),
hr(),
HTML_P("The raw <b>ERT</b>-values are:"),
DT::dataTableOutput("ERTPlot.Aggr_Dim.ERTTable")
)
)
)
)
} | /inst/shiny-server/ui/ERT_comparison_box.R | permissive | IOHprofiler/IOHanalyzer | R | false | false | 6,297 | r | ERT_comparison_box <- function(width = 12, collapsible = T, collapsed = T) {
box(title = HTML('<p style="font-size:120%;">Expected Runtime Comparisons (across functions on one dimension)</p>'),
width = width, collapsible = collapsible, solidHeader = TRUE,
status = "primary", collapsed = collapsed,
sidebarLayout(
sidebarPanel(
width = 2,
selectInput('ERTPlot.Aggr.Algs', label = 'Select which IDs to include:',
multiple = T, selected = NULL, choices = NULL) %>% shinyInput_label_embed(
custom_icon() %>%
bs_embed_popover(
title = "ID selection", content = alg_select_info,
placement = "auto"
)
),
selectInput('ERTPlot.Aggr.Funcs', label = 'Select which functions to aggregate over:',
multiple = T, selected = NULL, choices = NULL),
selectInput('ERTPlot.Aggr.Mode', label = 'Select the plotting mode',
choices = c('radar', 'line'), selected = 'radar'),
checkboxInput('ERTPlot.Aggr.Ranking',
label = 'Use ranking instead of ERT-values',
value = T),
checkboxInput('ERTPlot.Aggr.Logy',
label = 'Scale y axis \\(\\log_{10}\\)',
value = F),
actionButton("ERTPlot.Aggr.Refresh", "Refresh the figure and table"),
hr(),
selectInput('ERTPlot.Aggr.Format', label = 'Select the figure format',
choices = supported_fig_format, selected = supported_fig_format[[1]]),
downloadButton('ERTPlot.Aggr.Download', label = 'Download the figure'),
hr(),
selectInput('ERTPlot.Aggr.TableFormat', label = 'Select the table format',
choices = supported_table_format, selected = supported_table_format[[1]]),
downloadButton('ERTPlot.Aggr.DownloadTable', label = 'Download the table')
),
mainPanel(
width = 10,
column(
width = 12, align = "center",
HTML_P('The <b><i>ERT</i></b> of the runtime samples across all functions.
ERT is decided based on the target values in the table below,
with the default being the <b>best reached f(x) by any of the
selected algorithms</b>. When using a lineplot, <i>Infinite ERTS</i> are shown as
non-connected dots on the graph.'),
plotlyOutput.IOHanalyzer('ERTPlot.Aggr.Plot'),
hr(),
HTML_P("The chosen <b>target values</b> per function are as follows
(double click an entry to edit it):"),
DT::dataTableOutput("ERTPlot.Aggr.Targets"),
hr(),
HTML_P("The raw <b>ERT</b>-values are:"),
DT::dataTableOutput("ERTPlot.Aggr.ERTTable")
)
)
)
)
}
#TODO: combine with other function using proper namespacing and modularity
ERT_comparison_box_dim <- function(width = 12, collapsible = T, collapsed = T) {
box(title = HTML('<p style="font-size:120%;">Expected Runtime Comparisons (across dimensions)</p>'),
width = width, collapsible = collapsible, solidHeader = TRUE,
status = "primary", collapsed = collapsed,
sidebarLayout(
sidebarPanel(
width = 2,
selectInput('ERTPlot.Aggr_Dim.Algs', label = 'Select which IDs to include:',
multiple = T, selected = NULL, choices = NULL) %>% shinyInput_label_embed(
custom_icon() %>%
bs_embed_popover(
title = "ID selection", content = alg_select_info,
placement = "auto"
)
),
selectInput('ERTPlot.Aggr_Dim.Mode', label = 'Select the plotting mode',
choices = c('radar', 'line'), selected = 'line'),
checkboxInput('ERTPlot.Aggr_Dim.Ranking',
label = 'Use ranking instead of ERT-values',
value = F),
checkboxInput('ERTPlot.Aggr_Dim.Logy',
label = 'Scale y axis \\(\\log_{10}\\)',
value = T),
actionButton("ERTPlot.Aggr_Dim.Refresh", "Refresh the figure and table"),
hr(),
selectInput('ERTPlot.Aggr_Dim.Format', label = 'Select the figure format',
choices = supported_fig_format, selected = supported_fig_format[[1]]),
downloadButton('ERTPlot.Aggr_Dim.Download', label = 'Download the figure'),
hr(),
selectInput('ERTPlot.Aggr_Dim.TableFormat', label = 'Select the table format',
choices = supported_table_format, selected = supported_table_format[[1]]),
downloadButton('ERTPlot.Aggr_Dim.DownloadTable', label = 'Download the table')
),
mainPanel(
width = 10,
column(
width = 12, align = "center",
HTML_P('The <b><i>ERT</i></b> of the runtime samples across all functions.
ERT is decided based on the target values in the table below,
with the default being the <b>best reached f(x) by any of the
selected algorithms</b>. <i>Infinite ERTS</i> are shown as
seperate dots on the graph.'),
plotlyOutput.IOHanalyzer('ERTPlot.Aggr_Dim.Plot'),
hr(),
HTML_P("The chosen <b>target values</b> per dimension are as follows
(double click an entry to edit it):"),
DT::dataTableOutput("ERTPlot.Aggr_Dim.Targets"),
hr(),
HTML_P("The raw <b>ERT</b>-values are:"),
DT::dataTableOutput("ERTPlot.Aggr_Dim.ERTTable")
)
)
)
)
} |
/newpathrule.R | no_license | hying99/teachers | R | false | false | 10,546 | r | ||
\name{mu.vd1.4p}
\alias{mu.vd1.4p}
\title{Intrinsic mortality rate for the 2-process 4-parameter vitality model}
\usage{
mu.vd1.4p(x, r, s)
}
\arguments{
\item{x}{age}
\item{r}{r value}
\item{s}{s value}
}
\value{
Intrinsic age-specific mortality rates
}
\description{
Gives the intrinsic age-specific mortality rates for a given set of \code{r} and \code{s}, the intrinsic parameters.
}
\seealso{\code{\link{mu.vd.4p}}, \code{\link{mu.vd2.4p}}}
| /man/mu.vd1.4p.Rd | no_license | cran/vitality | R | false | false | 461 | rd | \name{mu.vd1.4p}
\alias{mu.vd1.4p}
\title{Intrinsic mortality rate for the 2-process 4-parameter vitality model}
\usage{
mu.vd1.4p(x, r, s)
}
\arguments{
\item{x}{age}
\item{r}{r value}
\item{s}{s value}
}
\value{
Intrinsic age-specific mortality rates
}
\description{
Gives the intrinsic age-specific mortality rates for a given set of \code{r} and \code{s}, the intrinsic parameters.
}
\seealso{\code{\link{mu.vd.4p}}, \code{\link{mu.vd2.4p}}}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_parameters.R
\name{model_param_list_create}
\alias{model_param_list_create}
\title{Model Parameter List Creation}
\usage{
model_param_list_create(eta = 1/(21 * 365), rho = 0.85, a0 = 2920,
sigma2 = 1.67, max_age = 100 * 365, rA = 1/195, rT = 0.2,
rD = 0.2, rU = 1/110.299, rP = 1/15, dE = 12, delayGam = 12.5,
cD = 0.0676909, cT = 0.322 * cD, cU = 0.006203, gamma1 = 1.82425,
d1 = 0.160527, dID = 3650, ID0 = 1.577533, kD = 0.476614,
uD = 9.44512, aD = 8001.99, fD0 = 0.007055, gammaD = 4.8183,
alphaA = 0.75735, alphaU = 0.185624, b0 = 0.590076, b1 = 0.5,
dB = 3650, IB0 = 43.8787, kB = 2.15506, uB = 7.19919,
phi0 = 0.791666, phi1 = 0.000737, dCA = 10950, IC0 = 18.02366,
kC = 2.36949, uCA = 6.06349, PM = 0.774368, dCM = 67.6952,
delayMos = 10, tau1 = 0.69, tau2 = 2.31, mu0 = 0.132,
Q0 = 0.92, chi = 0.86, bites_Bed = 0.89, bites_Indoors = 0.97,
muEL = 0.0338, muLL = 0.0348, muPL = 0.249, dEL = 6.64,
dLL = 3.72, dPL = 0.643, gammaL = 13.25, km = 11, cm = 0.05,
betaL = 21.2, num_int = 1, itn_cov = 0, irs_cov = 0,
ITN_IRS_on = -1, DY = 365, d_ITN0 = 0.41, r_ITN0 = 0.56,
r_ITN1 = 0.24, r_IRS0 = 0.6, d_IRS0 = 1, irs_half_life = 0.5 *
DY, itn_half_life = 2.64 * DY, IRS_interval = 1 * DY,
ITN_interval = 3 * DY, ...)
}
\arguments{
\item{eta}{Death rate for expoential population distribtuion, i.e. 1/Mean Population Age. Default = 0.0001305}
\item{rho}{Age-dependent biting parameter. Default = 0.85}
\item{a0}{Age-dependent biting parameter. Default = 2920}
\item{sigma2}{Variance of the log heterogeneity in biting rates. Default = 1.67}
\item{max_age}{Maximum age in days. Default = 100*365}
\item{rA}{Rate of leaving asymptomatic infection. Default = 0.00512821}
\item{rT}{Rate of leaving treatment. Default = 0.2}
\item{rD}{Rate of leaving clinical disease. Default = 0.2}
\item{rU}{Rate of recovering from subpatent infection. Default = 0.00906627}
\item{rP}{Rate of leaving prophylaxis. Default = 0.06666667}
\item{dE}{Latent period of human infection. Default = 12}
\item{delayGam}{Lag from parasites to infectious gametocytes. Default = 12.5}
\item{cD}{Untreated disease contribution to infectiousness. Default = 0.0676909}
\item{cT}{Treated disease contribution to infectiousness. Default = 0.322 * cD}
\item{cU}{Subpatent disease contribution to infectiousness. Default = 0.006203}
\item{gamma1}{Parameter for infectiousness of state A. Default = 1.82425}
\item{d1}{Minimum probability due to maximum immunity. Default = 0.160527}
\item{dID}{Inverse of decay rate. Default = 3650}
\item{ID0}{Scale parameter. Default = 1.577533}
\item{kD}{Shape parameter. Default = 0.476614}
\item{uD}{Duration in which immunity is not boosted. Default = 9.44512}
\item{aD}{Scale parameter relating age to immunity. Default = 8001.99}
\item{fD0}{Time-scale at which immunity changes with age. Default = 0.007055}
\item{gammaD}{Shape parameter relating age to immunity. Default = 4.8183}
\item{alphaA}{PCR detection probability parameters state A. Default = 0.757}
\item{alphaU}{PCR detection probability parameters state U. Default = 0.186}
\item{b0}{Maximum probability due to no immunity. Default = 0.590076}
\item{b1}{Maximum relative reduction due to immunity. Default = 0.5}
\item{dB}{Inverse of decay rate. Default = 3650}
\item{IB0}{Scale parameter. Default = 43.8787}
\item{kB}{Shape parameter. Default = 2.15506}
\item{uB}{Duration in which immunity is not boosted. Default = 7.19919}
\item{phi0}{Maximum probability due to no immunity. Default = 0.791666}
\item{phi1}{Maximum relative reduction due to immunity. Default = 0.000737}
\item{dCA}{Inverse of decay rate. Default = 10950}
\item{IC0}{Scale parameter. Default = 18.02366}
\item{kC}{Shape parameter. Default = 2.36949}
\item{uCA}{Duration in which immunity is not boosted. Default = 6.06349}
\item{PM}{New-born immunity relative to mother’s. Default = 0.774368}
\item{dCM}{Inverse of decay rate of maternal immunity. Default = 67.6952}
\item{delayMos}{Extrinsic incubation period. Default = 10}
\item{tau1}{Duration of host seeking, assumed to be constant between species. Default = 0.69}
\item{tau2}{Duration of mosquito resting after feed. Default = 2.31}
\item{mu0}{Daily mortality of adult mosquitos. Default = 0.132}
\item{Q0}{Anthrophagy probability. Default = 0.92}
\item{chi}{Endophily probability. Default = 0.86}
\item{bites_Bed}{Percentage of bites indoors and in bed. Default = 0.89}
\item{bites_Indoors}{Percentage of bites indoors . Default = 0.97}
\item{muEL}{Per capita daily mortality rate of early stage larvae (low density). Default = 0.0338}
\item{muLL}{Per capita daily mortality rate of late stage larvae (low density). Default = 0.0348}
\item{muPL}{Per capita daily mortality rate of pupae. Default = 0.249}
\item{dEL}{Development time of early stage larvae. Default = 6.64}
\item{dLL}{Development time of late stage larvae. Default = 3.72}
\item{dPL}{Development time of pupae. Default = 0.643}
\item{gammaL}{Relative effect of density dependence on late instars relative to early instars. Default = 13.25}
\item{km}{Seasonal carrying capacity. Default = 11}
\item{cm}{Seasonal birth rate. Default = 0.05}
\item{betaL}{Number of eggs laid per day per mosquito. Default = 21.2}
\item{num_int}{Number of intervention parameters. Default = 4}
\item{itn_cov}{The proportion of people that use an ITN. Default = 0}
\item{irs_cov}{The proportion of people living in houses that have been sprayed. Default = 0}
\item{ITN_IRS_on}{Time of ITN and IRS to be activated. Default = -1, i.e. never.}
\item{DY}{Duration of year (days). Default = 365}
\item{d_ITN0}{Probability of dying with an encounter with ITN (max). Default = 0.41}
\item{r_ITN0}{Probability of repeating behaviour with ITN (max). Default = 0.56}
\item{r_ITN1}{Probability of repeating behaviour with ITN (min). Default = 0.24}
\item{r_IRS0}{Probability of repeating behaviour with IRS (min). Default = 0.6}
\item{d_IRS0}{Probability of dying with an encounter with IRS (max). Default = 1}
\item{irs_half_life}{IRS half life. Default = 0.5 * DY}
\item{itn_half_life}{ITN half life. Default = 2.64 * DY}
\item{IRS_interval}{How long before IRS is repeated, i.e. when IRS decay = 1. Default = 1 * DY}
\item{ITN_interval}{How long before ITN is repeated, i.e. when IRS decay = 1. Default = 3 * DY}
\item{...}{Any other parameters needed for non-standard model. If they share the same name
as any of the defined parameters \code{model_param_list_create} will stop. You can either write
any extra parameters you like individually, e.g. model_param_list_create(extra1 = 1, extra2 = 2)
and these parameteres will appear appended to the returned list, or you can pass explicitly
the ellipsis argument as a list created before, e.g. model_param_list_create(...=list(extra1 = 1, extra2 = 2))}
}
\description{
\code{model_param_list_create} creates list of model parameters to be used
within \code{equilibrium_init_create}
}
| /man/model_param_list_create.Rd | no_license | jhellewell14/ICDMM | R | false | true | 7,067 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_parameters.R
\name{model_param_list_create}
\alias{model_param_list_create}
\title{Model Parameter List Creation}
\usage{
model_param_list_create(eta = 1/(21 * 365), rho = 0.85, a0 = 2920,
sigma2 = 1.67, max_age = 100 * 365, rA = 1/195, rT = 0.2,
rD = 0.2, rU = 1/110.299, rP = 1/15, dE = 12, delayGam = 12.5,
cD = 0.0676909, cT = 0.322 * cD, cU = 0.006203, gamma1 = 1.82425,
d1 = 0.160527, dID = 3650, ID0 = 1.577533, kD = 0.476614,
uD = 9.44512, aD = 8001.99, fD0 = 0.007055, gammaD = 4.8183,
alphaA = 0.75735, alphaU = 0.185624, b0 = 0.590076, b1 = 0.5,
dB = 3650, IB0 = 43.8787, kB = 2.15506, uB = 7.19919,
phi0 = 0.791666, phi1 = 0.000737, dCA = 10950, IC0 = 18.02366,
kC = 2.36949, uCA = 6.06349, PM = 0.774368, dCM = 67.6952,
delayMos = 10, tau1 = 0.69, tau2 = 2.31, mu0 = 0.132,
Q0 = 0.92, chi = 0.86, bites_Bed = 0.89, bites_Indoors = 0.97,
muEL = 0.0338, muLL = 0.0348, muPL = 0.249, dEL = 6.64,
dLL = 3.72, dPL = 0.643, gammaL = 13.25, km = 11, cm = 0.05,
betaL = 21.2, num_int = 1, itn_cov = 0, irs_cov = 0,
ITN_IRS_on = -1, DY = 365, d_ITN0 = 0.41, r_ITN0 = 0.56,
r_ITN1 = 0.24, r_IRS0 = 0.6, d_IRS0 = 1, irs_half_life = 0.5 *
DY, itn_half_life = 2.64 * DY, IRS_interval = 1 * DY,
ITN_interval = 3 * DY, ...)
}
\arguments{
\item{eta}{Death rate for expoential population distribtuion, i.e. 1/Mean Population Age. Default = 0.0001305}
\item{rho}{Age-dependent biting parameter. Default = 0.85}
\item{a0}{Age-dependent biting parameter. Default = 2920}
\item{sigma2}{Variance of the log heterogeneity in biting rates. Default = 1.67}
\item{max_age}{Maximum age in days. Default = 100*365}
\item{rA}{Rate of leaving asymptomatic infection. Default = 0.00512821}
\item{rT}{Rate of leaving treatment. Default = 0.2}
\item{rD}{Rate of leaving clinical disease. Default = 0.2}
\item{rU}{Rate of recovering from subpatent infection. Default = 0.00906627}
\item{rP}{Rate of leaving prophylaxis. Default = 0.06666667}
\item{dE}{Latent period of human infection. Default = 12}
\item{delayGam}{Lag from parasites to infectious gametocytes. Default = 12.5}
\item{cD}{Untreated disease contribution to infectiousness. Default = 0.0676909}
\item{cT}{Treated disease contribution to infectiousness. Default = 0.322 * cD}
\item{cU}{Subpatent disease contribution to infectiousness. Default = 0.006203}
\item{gamma1}{Parameter for infectiousness of state A. Default = 1.82425}
\item{d1}{Minimum probability due to maximum immunity. Default = 0.160527}
\item{dID}{Inverse of decay rate. Default = 3650}
\item{ID0}{Scale parameter. Default = 1.577533}
\item{kD}{Shape parameter. Default = 0.476614}
\item{uD}{Duration in which immunity is not boosted. Default = 9.44512}
\item{aD}{Scale parameter relating age to immunity. Default = 8001.99}
\item{fD0}{Time-scale at which immunity changes with age. Default = 0.007055}
\item{gammaD}{Shape parameter relating age to immunity. Default = 4.8183}
\item{alphaA}{PCR detection probability parameters state A. Default = 0.757}
\item{alphaU}{PCR detection probability parameters state U. Default = 0.186}
\item{b0}{Maximum probability due to no immunity. Default = 0.590076}
\item{b1}{Maximum relative reduction due to immunity. Default = 0.5}
\item{dB}{Inverse of decay rate. Default = 3650}
\item{IB0}{Scale parameter. Default = 43.8787}
\item{kB}{Shape parameter. Default = 2.15506}
\item{uB}{Duration in which immunity is not boosted. Default = 7.19919}
\item{phi0}{Maximum probability due to no immunity. Default = 0.791666}
\item{phi1}{Maximum relative reduction due to immunity. Default = 0.000737}
\item{dCA}{Inverse of decay rate. Default = 10950}
\item{IC0}{Scale parameter. Default = 18.02366}
\item{kC}{Shape parameter. Default = 2.36949}
\item{uCA}{Duration in which immunity is not boosted. Default = 6.06349}
\item{PM}{New-born immunity relative to mother’s. Default = 0.774368}
\item{dCM}{Inverse of decay rate of maternal immunity. Default = 67.6952}
\item{delayMos}{Extrinsic incubation period. Default = 10}
\item{tau1}{Duration of host seeking, assumed to be constant between species. Default = 0.69}
\item{tau2}{Duration of mosquito resting after feed. Default = 2.31}
\item{mu0}{Daily mortality of adult mosquitos. Default = 0.132}
\item{Q0}{Anthrophagy probability. Default = 0.92}
\item{chi}{Endophily probability. Default = 0.86}
\item{bites_Bed}{Percentage of bites indoors and in bed. Default = 0.89}
\item{bites_Indoors}{Percentage of bites indoors . Default = 0.97}
\item{muEL}{Per capita daily mortality rate of early stage larvae (low density). Default = 0.0338}
\item{muLL}{Per capita daily mortality rate of late stage larvae (low density). Default = 0.0348}
\item{muPL}{Per capita daily mortality rate of pupae. Default = 0.249}
\item{dEL}{Development time of early stage larvae. Default = 6.64}
\item{dLL}{Development time of late stage larvae. Default = 3.72}
\item{dPL}{Development time of pupae. Default = 0.643}
\item{gammaL}{Relative effect of density dependence on late instars relative to early instars. Default = 13.25}
\item{km}{Seasonal carrying capacity. Default = 11}
\item{cm}{Seasonal birth rate. Default = 0.05}
\item{betaL}{Number of eggs laid per day per mosquito. Default = 21.2}
\item{num_int}{Number of intervention parameters. Default = 4}
\item{itn_cov}{The proportion of people that use an ITN. Default = 0}
\item{irs_cov}{The proportion of people living in houses that have been sprayed. Default = 0}
\item{ITN_IRS_on}{Time of ITN and IRS to be activated. Default = -1, i.e. never.}
\item{DY}{Duration of year (days). Default = 365}
\item{d_ITN0}{Probability of dying with an encounter with ITN (max). Default = 0.41}
\item{r_ITN0}{Probability of repeating behaviour with ITN (max). Default = 0.56}
\item{r_ITN1}{Probability of repeating behaviour with ITN (min). Default = 0.24}
\item{r_IRS0}{Probability of repeating behaviour with IRS (min). Default = 0.6}
\item{d_IRS0}{Probability of dying with an encounter with IRS (max). Default = 1}
\item{irs_half_life}{IRS half life. Default = 0.5 * DY}
\item{itn_half_life}{ITN half life. Default = 2.64 * DY}
\item{IRS_interval}{How long before IRS is repeated, i.e. when IRS decay = 1. Default = 1 * DY}
\item{ITN_interval}{How long before ITN is repeated, i.e. when IRS decay = 1. Default = 3 * DY}
\item{...}{Any other parameters needed for non-standard model. If they share the same name
as any of the defined parameters \code{model_param_list_create} will stop. You can either write
any extra parameters you like individually, e.g. model_param_list_create(extra1 = 1, extra2 = 2)
and these parameteres will appear appended to the returned list, or you can pass explicitly
the ellipsis argument as a list created before, e.g. model_param_list_create(...=list(extra1 = 1, extra2 = 2))}
}
\description{
\code{model_param_list_create} creates list of model parameters to be used
within \code{equilibrium_init_create}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/S3_definitions.R
\name{plot.rate}
\alias{plot.rate}
\title{plot method for rate object}
\usage{
\method{plot}{rate}(x, conf.int = TRUE, eps = 0.2, left.margin, xlim, ...)
}
\arguments{
\item{x}{a rate object (see \code{\link{rate}})}
\item{conf.int}{logical; default TRUE draws the confidence intervals}
\item{eps}{is the height of the ending of the error bars}
\item{left.margin}{set a custom left margin for long variable names. Function
tries to do it by default.}
\item{xlim}{change the x-axis location}
\item{...}{arguments passed on to graphical functions points and segment
(e.g. \code{col}, \code{lwd}, \code{pch} and \code{cex})}
}
\value{
Always returns `NULL` invisibly.
This function is called for its side effects.
}
\description{
Plot rate estimates with confidence intervals lines using R base graphics
}
\details{
This is limited explanatory tool but most graphical
parameters are user adjustable.
}
\author{
Matti Rantanen
}
| /man/plot.rate.Rd | no_license | cran/popEpi | R | false | true | 1,064 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/S3_definitions.R
\name{plot.rate}
\alias{plot.rate}
\title{plot method for rate object}
\usage{
\method{plot}{rate}(x, conf.int = TRUE, eps = 0.2, left.margin, xlim, ...)
}
\arguments{
\item{x}{a rate object (see \code{\link{rate}})}
\item{conf.int}{logical; default TRUE draws the confidence intervals}
\item{eps}{is the height of the ending of the error bars}
\item{left.margin}{set a custom left margin for long variable names. Function
tries to do it by default.}
\item{xlim}{change the x-axis location}
\item{...}{arguments passed on to graphical functions points and segment
(e.g. \code{col}, \code{lwd}, \code{pch} and \code{cex})}
}
\value{
Always returns `NULL` invisibly.
This function is called for its side effects.
}
\description{
Plot rate estimates with confidence intervals lines using R base graphics
}
\details{
This is limited explanatory tool but most graphical
parameters are user adjustable.
}
\author{
Matti Rantanen
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/endometrium.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.3,family="gaussian",standardize=FALSE)
sink('./endometrium_042.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/ReliefF/endometrium/endometrium_042.R | no_license | esbgkannan/QSMART | R | false | false | 356 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/endometrium.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.3,family="gaussian",standardize=FALSE)
sink('./endometrium_042.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
################################################################################
# CHANGE LOG (last 20 changes)
# 03.01.2019: Elaborated description for parameter "what".
# 28.06.2016: Added support for 'Quality Sensor'.
# 02.12.2016: Possible to return multiple kits by specifying a vector.
# 29.08.2015: Added importFrom.
# 28.06.2015: Changed parameter names to format: lower.case
# 14.12.2014: what='Gender' changed to 'Sex.Marker' now return vector.
# 26.09.2014: Fixed error if kit=NULL and what!=NA.
# 26.08.2014: what=Offset/Repeat, now returns identical data frames.
# 03.08.2014: Added option to return kit index.
# 02.03.2014: Removed factor levels from 'Marker' before returning 'OFFSET'/'REPEAT'.
# 09.12.2013: Removed factor levels from 'Marker' before returning 'VIRTUAL'.
# 20.11.2013: Change parameter name 'kitNameOrIndex' to 'kit'.
# 10.11.2013: 'Marker' returns vector instead of factor.
# 24.10.2013: Fixed error when no matching kit and 'what'!=NA, return NA.
# 04.10.2013: Removed factor levels from 'Marker' before returning 'COLOR'.
# 17.09.2013: Added new parameter 'what' to specify return values.
# 16.09.2013: Changed to support new kits file structure.
# 05.06.2013: Added 'gender.marker'
# 19.05.2013: Re-written for reading data from text file.
#' @title Get Kit
#'
#' @description
#' Provides information about STR kits.
#'
#' @details
#' The function returns the following information for a kit specified in kits.txt:
#' Panel name, short kit name (unique, user defined), full kit name (user defined),
#' marker names, allele names, allele sizes (bp),
#' minimum allele size, maximum allele size (bp), flag for virtual alleles,
#' marker color, marker repeat unit size (bp), minimum marker size,
#' maximum marker, marker offset (bp), flag for sex markers (TRUE/FALSE).
#'
#' If no matching kit or kit index is found NA is returned.
#' If kit='NULL' or '0' a vector of available kits is printed and NA returned.
#'
#' @param kit string or integer to specify the kit.
#' @param what string to specify which information to return. Default is 'NA' which return all info.
#' Not case sensitive. Possible values: "Index", "Panel", "Short.Name", "Full.Name",
#' "Marker, "Allele", "Size", "Virtual", "Color", "Repeat", "Range", "Offset", "Sex.Marker",
#' "Quality.Sensor". An unsupported value returns NA and a warning.
#' @param show.messages logical, default TRUE for printing messages to the R prompt.
#' @param .kit.info data frame, run function on a data frame instead of the kits.txt file.
#' @param debug logical indicating printing debug information.
#'
#' @return data.frame with kit information.
#'
#' @export
#'
#' @importFrom utils read.delim
#'
#' @examples
#' # Show all information stored for kit with short name 'ESX17'.
#' getKit("ESX17")
getKit <- function(kit = NULL, what = NA, show.messages = FALSE, .kit.info = NULL, debug = FALSE) {
if (debug) {
print(paste("IN:", match.call()[[1]]))
}
.separator <- .Platform$file.sep # Platform dependent path separator.
# LOAD KIT INFO ############################################################
if (is.null(.kit.info)) {
# Get package path.
packagePath <- path.package("strvalidator", quiet = FALSE)
subFolder <- "extdata"
fileName <- "kit.txt"
filePath <- paste(packagePath, subFolder, fileName, sep = .separator)
.kit.info <- read.delim(
file = filePath, header = TRUE, sep = "\t", quote = "\"",
dec = ".", fill = TRUE, stringsAsFactors = FALSE
)
}
# Available kits. Must match else if construct.
kits <- unique(.kit.info$Short.Name)
# Check if NULL
if (is.null(kit)) {
# Print available kits
if (show.messages) {
message("Available kits:")
}
res <- kits
# String provided.
} else {
# Check if number or string.
if (is.numeric(kit)) {
# Set index to number.
index <- kit
} else {
# Find matching kit index (case insensitive)
index <- match(toupper(kit), toupper(kits))
}
# No matching kit.
if (any(is.na(index))) {
# Print available kits
if (show.messages) {
message(paste(
"No matching kit! \nAvailable kits:",
paste(kits, collapse = ", ")
))
}
return(NA)
# Assign matching kit information.
} else {
currentKit <- .kit.info[.kit.info$Short.Name %in% kits[index], ]
res <- data.frame(
Panel = currentKit$Panel,
Short.Name = currentKit$Short.Name,
Full.Name = currentKit$Full.Name,
Marker = currentKit$Marker,
Allele = currentKit$Allele,
Size = currentKit$Size,
Size.Min = currentKit$Size.Min,
Size.Max = currentKit$Size.Max,
Virtual = currentKit$Virtual,
Color = currentKit$Color,
Repeat = currentKit$Repeat,
Marker.Min = currentKit$Marker.Min,
Marker.Max = currentKit$Marker.Max,
Offset = currentKit$Offset,
Sex.Marker = currentKit$Sex.Marker,
Quality.Sensor = currentKit$Quality.Sensor,
stringsAsFactors = FALSE
)
# Create useful factors.
res$Marker <- factor(res$Marker, levels = unique(res$Marker))
}
}
# Used in error message in 'else'.
options <- paste("Index",
"Panel",
"Short.Name",
"Full.Name",
"Marker",
"Allele",
"Size",
"Virtual",
"Color",
"Repeat",
"Range",
"Offset",
"Sex.Marker",
"Quality.Sensor",
sep = ", "
)
# WHAT ----------------------------------------------------------------------
# Kit is required.
if (!is.null(kit)) {
if (is.na(what)) {
# Return all kit information.
return(res)
} else if (toupper(what) == "INDEX") {
# Return kit index.
return(index)
} else if (toupper(what) == "PANEL") {
# Return panel name.
return(unique(res$Panel))
} else if (toupper(what) == "SHORT.NAME") {
# Return short name.
return(unique(res$Short.Name))
} else if (toupper(what) == "FULL.NAME") {
# Return full name.
return(unique(res$Full.Name))
} else if (toupper(what) == "MARKER") {
# Return all markers.
return(as.vector(unique(res$Marker)))
} else if (toupper(what) == "ALLELE") {
# Return all alleles and markers.
res <- data.frame(Marker = res$Marker, Allele = res$Allele)
return(res)
} else if (toupper(what) == "SIZE") {
# Returns all alleles and their indicated normal size in base pair.
# Their normal size range is idicated in min and max columns.
# Grouped by marker.
res <- data.frame(
Marker = res$Marker,
Allele = res$Allele,
Size = res$Size,
Size.Min = res$Size.Min,
Size.Max = res$Size.Max,
stringsAsFactors = FALSE
)
return(res)
} else if (toupper(what) == "VIRTUAL") {
# Returns all alleles (bins) with a flag if it is virtual
# 1 for virtual or 0 it it is a physical ladder fragment.
# Grouped per marker.
res <- data.frame(
Marker = as.character(res$Marker),
Allele = res$Allele,
Virtual = res$Virtual,
stringsAsFactors = FALSE
)
return(res)
} else if (toupper(what) == "COLOR") {
# Return markers and their color as strings.
marker <- getKit(kit, what = "Marker")
color <- NA
for (m in seq(along = marker)) {
color[m] <- unique(res$Color[res$Marker == marker[m]])
}
res <- data.frame(
Marker = marker,
Color = color,
stringsAsFactors = FALSE
)
return(res)
} else if (toupper(what) == "REPEAT") {
# Return markers and their repeat unit length in base pair.
marker <- getKit(kit, what = "Marker")
offset <- NA
repeatUnit <- NA
for (m in seq(along = marker)) {
offset[m] <- unique(res$Offset[res$Marker == marker[m]])
repeatUnit[m] <- unique(res$Repeat[res$Marker == marker[m]])
}
res <- data.frame(
Marker = marker, Offset = offset, Repeat = repeatUnit,
stringsAsFactors = FALSE
)
return(res)
} else if (toupper(what) == "RANGE") {
# Return markers and their range (min and max) in base pair.
marker <- getKit(kit, what = "Marker")
markerMin <- NA
markerMax <- NA
color <- NA
for (m in seq(along = marker)) {
markerMin[m] <- unique(res$Marker.Min[res$Marker == marker[m]])
markerMax[m] <- unique(res$Marker.Max[res$Marker == marker[m]])
color[m] <- unique(res$Color[res$Marker == marker[m]])
}
res <- data.frame(
Marker = marker,
Color = color,
Marker.Min = markerMin,
Marker.Max = markerMax,
stringsAsFactors = FALSE
)
# Create useful factors.
res$Color <- factor(res$Color, levels = unique(res$Color))
return(res)
} else if (toupper(what) == "OFFSET") {
# Return markers and their estimated offset in base pair.
marker <- getKit(kit, what = "Marker")
offset <- NA
repeatUnit <- NA
for (m in seq(along = marker)) {
offset[m] <- unique(res$Offset[res$Marker == marker[m]])
repeatUnit[m] <- unique(res$Repeat[res$Marker == marker[m]])
}
res <- data.frame(
Marker = marker, Offset = offset, Repeat = repeatUnit,
stringsAsFactors = FALSE
)
return(res)
} else if (toupper(what) == "QUALITY.SENSOR") {
# Return quality sensors as vector.
qsMarkers <- as.character(unique(res$Marker[res$Quality.Sensor == TRUE]))
return(qsMarkers)
} else if (toupper(what) == "SEX.MARKER") {
# Return sex markers as vector.
sexMarkers <- as.character(unique(res$Marker[res$Sex.Marker == TRUE]))
return(sexMarkers)
} else {
warning(paste(what, "not supported! \nwhat = {", options, "}"))
return(NA)
}
} else {
# If kit is NULL return available kits.
return(res)
}
}
| /R/getKit.r | no_license | OskarHansson/strvalidator | R | false | false | 10,016 | r | ################################################################################
# CHANGE LOG (last 20 changes)
# 03.01.2019: Elaborated description for parameter "what".
# 28.06.2016: Added support for 'Quality Sensor'.
# 02.12.2016: Possible to return multiple kits by specifying a vector.
# 29.08.2015: Added importFrom.
# 28.06.2015: Changed parameter names to format: lower.case
# 14.12.2014: what='Gender' changed to 'Sex.Marker' now return vector.
# 26.09.2014: Fixed error if kit=NULL and what!=NA.
# 26.08.2014: what=Offset/Repeat, now returns identical data frames.
# 03.08.2014: Added option to return kit index.
# 02.03.2014: Removed factor levels from 'Marker' before returning 'OFFSET'/'REPEAT'.
# 09.12.2013: Removed factor levels from 'Marker' before returning 'VIRTUAL'.
# 20.11.2013: Change parameter name 'kitNameOrIndex' to 'kit'.
# 10.11.2013: 'Marker' returns vector instead of factor.
# 24.10.2013: Fixed error when no matching kit and 'what'!=NA, return NA.
# 04.10.2013: Removed factor levels from 'Marker' before returning 'COLOR'.
# 17.09.2013: Added new parameter 'what' to specify return values.
# 16.09.2013: Changed to support new kits file structure.
# 05.06.2013: Added 'gender.marker'
# 19.05.2013: Re-written for reading data from text file.
#' @title Get Kit
#'
#' @description
#' Provides information about STR kits.
#'
#' @details
#' The function returns the following information for a kit specified in kits.txt:
#' Panel name, short kit name (unique, user defined), full kit name (user defined),
#' marker names, allele names, allele sizes (bp),
#' minimum allele size, maximum allele size (bp), flag for virtual alleles,
#' marker color, marker repeat unit size (bp), minimum marker size,
#' maximum marker, marker offset (bp), flag for sex markers (TRUE/FALSE).
#'
#' If no matching kit or kit index is found NA is returned.
#' If kit='NULL' or '0' a vector of available kits is printed and NA returned.
#'
#' @param kit string or integer to specify the kit.
#' @param what string to specify which information to return. Default is 'NA' which return all info.
#' Not case sensitive. Possible values: "Index", "Panel", "Short.Name", "Full.Name",
#' "Marker, "Allele", "Size", "Virtual", "Color", "Repeat", "Range", "Offset", "Sex.Marker",
#' "Quality.Sensor". An unsupported value returns NA and a warning.
#' @param show.messages logical, default TRUE for printing messages to the R prompt.
#' @param .kit.info data frame, run function on a data frame instead of the kits.txt file.
#' @param debug logical indicating printing debug information.
#'
#' @return data.frame with kit information.
#'
#' @export
#'
#' @importFrom utils read.delim
#'
#' @examples
#' # Show all information stored for kit with short name 'ESX17'.
#' getKit("ESX17")
getKit <- function(kit = NULL, what = NA, show.messages = FALSE, .kit.info = NULL, debug = FALSE) {
if (debug) {
print(paste("IN:", match.call()[[1]]))
}
.separator <- .Platform$file.sep # Platform dependent path separator.
# LOAD KIT INFO ############################################################
if (is.null(.kit.info)) {
# Get package path.
packagePath <- path.package("strvalidator", quiet = FALSE)
subFolder <- "extdata"
fileName <- "kit.txt"
filePath <- paste(packagePath, subFolder, fileName, sep = .separator)
.kit.info <- read.delim(
file = filePath, header = TRUE, sep = "\t", quote = "\"",
dec = ".", fill = TRUE, stringsAsFactors = FALSE
)
}
# Available kits. Must match else if construct.
kits <- unique(.kit.info$Short.Name)
# Check if NULL
if (is.null(kit)) {
# Print available kits
if (show.messages) {
message("Available kits:")
}
res <- kits
# String provided.
} else {
# Check if number or string.
if (is.numeric(kit)) {
# Set index to number.
index <- kit
} else {
# Find matching kit index (case insensitive)
index <- match(toupper(kit), toupper(kits))
}
# No matching kit.
if (any(is.na(index))) {
# Print available kits
if (show.messages) {
message(paste(
"No matching kit! \nAvailable kits:",
paste(kits, collapse = ", ")
))
}
return(NA)
# Assign matching kit information.
} else {
currentKit <- .kit.info[.kit.info$Short.Name %in% kits[index], ]
res <- data.frame(
Panel = currentKit$Panel,
Short.Name = currentKit$Short.Name,
Full.Name = currentKit$Full.Name,
Marker = currentKit$Marker,
Allele = currentKit$Allele,
Size = currentKit$Size,
Size.Min = currentKit$Size.Min,
Size.Max = currentKit$Size.Max,
Virtual = currentKit$Virtual,
Color = currentKit$Color,
Repeat = currentKit$Repeat,
Marker.Min = currentKit$Marker.Min,
Marker.Max = currentKit$Marker.Max,
Offset = currentKit$Offset,
Sex.Marker = currentKit$Sex.Marker,
Quality.Sensor = currentKit$Quality.Sensor,
stringsAsFactors = FALSE
)
# Create useful factors.
res$Marker <- factor(res$Marker, levels = unique(res$Marker))
}
}
# Used in error message in 'else'.
options <- paste("Index",
"Panel",
"Short.Name",
"Full.Name",
"Marker",
"Allele",
"Size",
"Virtual",
"Color",
"Repeat",
"Range",
"Offset",
"Sex.Marker",
"Quality.Sensor",
sep = ", "
)
# WHAT ----------------------------------------------------------------------
# Kit is required.
if (!is.null(kit)) {
if (is.na(what)) {
# Return all kit information.
return(res)
} else if (toupper(what) == "INDEX") {
# Return kit index.
return(index)
} else if (toupper(what) == "PANEL") {
# Return panel name.
return(unique(res$Panel))
} else if (toupper(what) == "SHORT.NAME") {
# Return short name.
return(unique(res$Short.Name))
} else if (toupper(what) == "FULL.NAME") {
# Return full name.
return(unique(res$Full.Name))
} else if (toupper(what) == "MARKER") {
# Return all markers.
return(as.vector(unique(res$Marker)))
} else if (toupper(what) == "ALLELE") {
# Return all alleles and markers.
res <- data.frame(Marker = res$Marker, Allele = res$Allele)
return(res)
} else if (toupper(what) == "SIZE") {
# Returns all alleles and their indicated normal size in base pair.
# Their normal size range is idicated in min and max columns.
# Grouped by marker.
res <- data.frame(
Marker = res$Marker,
Allele = res$Allele,
Size = res$Size,
Size.Min = res$Size.Min,
Size.Max = res$Size.Max,
stringsAsFactors = FALSE
)
return(res)
} else if (toupper(what) == "VIRTUAL") {
# Returns all alleles (bins) with a flag if it is virtual
# 1 for virtual or 0 it it is a physical ladder fragment.
# Grouped per marker.
res <- data.frame(
Marker = as.character(res$Marker),
Allele = res$Allele,
Virtual = res$Virtual,
stringsAsFactors = FALSE
)
return(res)
} else if (toupper(what) == "COLOR") {
# Return markers and their color as strings.
marker <- getKit(kit, what = "Marker")
color <- NA
for (m in seq(along = marker)) {
color[m] <- unique(res$Color[res$Marker == marker[m]])
}
res <- data.frame(
Marker = marker,
Color = color,
stringsAsFactors = FALSE
)
return(res)
} else if (toupper(what) == "REPEAT") {
# Return markers and their repeat unit length in base pair.
marker <- getKit(kit, what = "Marker")
offset <- NA
repeatUnit <- NA
for (m in seq(along = marker)) {
offset[m] <- unique(res$Offset[res$Marker == marker[m]])
repeatUnit[m] <- unique(res$Repeat[res$Marker == marker[m]])
}
res <- data.frame(
Marker = marker, Offset = offset, Repeat = repeatUnit,
stringsAsFactors = FALSE
)
return(res)
} else if (toupper(what) == "RANGE") {
# Return markers and their range (min and max) in base pair.
marker <- getKit(kit, what = "Marker")
markerMin <- NA
markerMax <- NA
color <- NA
for (m in seq(along = marker)) {
markerMin[m] <- unique(res$Marker.Min[res$Marker == marker[m]])
markerMax[m] <- unique(res$Marker.Max[res$Marker == marker[m]])
color[m] <- unique(res$Color[res$Marker == marker[m]])
}
res <- data.frame(
Marker = marker,
Color = color,
Marker.Min = markerMin,
Marker.Max = markerMax,
stringsAsFactors = FALSE
)
# Create useful factors.
res$Color <- factor(res$Color, levels = unique(res$Color))
return(res)
} else if (toupper(what) == "OFFSET") {
# Return markers and their estimated offset in base pair.
marker <- getKit(kit, what = "Marker")
offset <- NA
repeatUnit <- NA
for (m in seq(along = marker)) {
offset[m] <- unique(res$Offset[res$Marker == marker[m]])
repeatUnit[m] <- unique(res$Repeat[res$Marker == marker[m]])
}
res <- data.frame(
Marker = marker, Offset = offset, Repeat = repeatUnit,
stringsAsFactors = FALSE
)
return(res)
} else if (toupper(what) == "QUALITY.SENSOR") {
# Return quality sensors as vector.
qsMarkers <- as.character(unique(res$Marker[res$Quality.Sensor == TRUE]))
return(qsMarkers)
} else if (toupper(what) == "SEX.MARKER") {
# Return sex markers as vector.
sexMarkers <- as.character(unique(res$Marker[res$Sex.Marker == TRUE]))
return(sexMarkers)
} else {
warning(paste(what, "not supported! \nwhat = {", options, "}"))
return(NA)
}
} else {
# If kit is NULL return available kits.
return(res)
}
}
|
context("Functional end-to-end tests")
tmp_a <- tempfile()
tmp_b <- tempfile()
setup({
# Try to initialize sparkr. Requires the env var SPARK_HOME pointing to a local spark installation
if (!is.na(Sys.getenv("SPARK_HOME", unset = NA))){
library(SparkR, lib.loc=normalizePath(file.path(Sys.getenv("SPARK_HOME"), "R", "lib")))
} else {
stop("please set SPARK_HOME env var")
}
SparkR::sparkR.session()
})
test_that("Test comparison",{
library(sparkdataframecompare)
df_a <- data.frame(
col_1 = c(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
col_2 = as.Date(c("2019-01-01", "2018-12-31", "2018-11-30", "2018-10-31", "2018-09-30", "2018-07-31", "2018-06-30", "2018-01-01", "2017-12-31", "2016-03-31")),
col_3 = c("v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10"),
col_4 = c(1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.0),
col_5 = c(10L, 20L, 30L, 40L, 50L, 60L, 70L, 80L, 90L, 100L),
col_extra_a = c("a", "b", "c", "d", "e", "f", "g", "h", "i", "l")
)
df_b <- data.frame(
col_1 = c(2, 3, 4, 5, 6, 7, 12, 9, 10, 11),
col_2 = as.Date(c("2019-01-01", "2018-12-31", "2018-11-30", "2018-10-31", "2018-08-30", "2018-07-31", "2018-06-30", "2018-01-01", "2017-12-31", "2016-03-31")),
col_3 = c("v1", "v2", "vv", "v4", "v5", "v6", "v7", "v8", "v9", "v10"),
col_4 = c(1.1, 2.2, 3.3, 4.4, 5.5, 0.0, 7.7, 8.8, 9.9, 10.0),
col_5 = c(10L, 40L, 30L, 40L, 50L, 60L, 70L, 80L, 90L, 100L),
col_extra_b = c("a", "b", "c", "d", "e", "f", "g", "h", "i", "l")
)
SparkR::createOrReplaceTempView(SparkR::createDataFrame(df_a), "df_a")
SparkR::createOrReplaceTempView(SparkR::createDataFrame(df_b), "df_b")
res <- compare("df_a", "df_b", c("col_1"), report_spec(".", "null"), logger=NULL, progress_fun=NULL)
expect_equal(res$table_1, "df_a")
expect_equal(res$table_2, "df_b")
expect_equal(res$join_cols, c("col_1"))
expect_equal(res$rows$missing_in_1$count, 1)
expect_equal(res$rows$missing_in_2$count, 1)
expect_equal(res$rows$common$count, 9)
expect_equal(res$columns$missing_in_1, list(count=1, names=c("col_extra_b")))
expect_equal(res$columns$missing_in_2, list(count=1, names=c("col_extra_a")))
expect_equal(res$columns$common, list(count=5, names=c("col_1", "col_2", "col_3", "col_4", "col_5")))
expect_equal(res$columns$compare_columns, c("col_2", "col_3", "col_4", "col_5"))
col_diff <- res$columns$diff
# col_2
df <- col_diff[col_diff$name=="col_2",]
expect_equal(df$type_1, df$type_2)
expect_equal(df$type_1, "date")
expect_equal(df$differences, 1)
expect_equal(df$differences_pct, 1/9*100)
expect_equal(df$rel_diff_mean, 1)
# col_3
df <- col_diff[col_diff$name=="col_3",]
expect_equal(df$type_1, df$type_2)
expect_equal(df$type_1, "string")
expect_equal(df$differences, 1)
expect_equal(df$differences_pct, 1/9*100)
expect_equal(df$rel_diff_mean, 1)
# col_4
df <- col_diff[col_diff$name=="col_4",]
expect_equal(df$type_1, df$type_2)
expect_equal(df$type_1, "double")
expect_equal(df$differences, 1)
expect_equal(df$differences_pct, 1/9*100)
expect_equal(df$abs_diff_mean, 6.6)
# col_5
df <- col_diff[col_diff$name=="col_5",]
expect_equal(df$type_1, df$type_2)
expect_equal(df$type_1, "int")
expect_equal(df$differences, 1)
expect_equal(df$differences_pct, 1/9*100)
expect_equal(df$abs_diff_mean, -20)
expect_equal(df$rel_diff_mean, -1)
})
test_that("Test NA count",{
library(sparkdataframecompare)
df_a <- data.frame(
col_1 = c(1, 2, 3, 4, 5),
col_2 = as.Date(c("2019-01-01", "2018-12-31", NA, "2018-10-31", "2018-09-30")),
col_3 = c("v1", "v2", "v6", "v4", NA),
col_4 = c(NaN, 2.2, 3.3, 4.4, 5.5),
col_5 = c(NA, 20L, NaN, 40L, 50L)
)
df_b <- data.frame(
col_1 = c(1, 2, 3, 4, 5),
col_2 = as.Date(c("2019-01-01", NA, "2018-12-31", "2018-10-31", "2018-09-30")),
col_3 = c("v1", "v2", "v3", "v4", NA),
col_4 = c(2.2, NaN, 3.3, 4.4, 5.4),
col_5 = c(NA, NA, NA, NA, 50L)
)
# unfortunately as of spark 2.3.2 serialization of NA doesn't work properly, so we need
# to serialize to CSV and read from CSV in spark
write.csv(df_a, tmp_a, row.names = FALSE)
write.csv(df_b, tmp_b, row.names = FALSE)
schema <- SparkR::structType("col_1 int, col_2 date, col_3 string, col_4 double, col_5 int")
SparkR::createOrReplaceTempView(SparkR::read.df(tmp_a, source="csv", sep=",", header=TRUE, schema=schema), "df_a")
SparkR::createOrReplaceTempView(SparkR::read.df(tmp_b, source="csv", sep=",", header=TRUE, schema=schema), "df_b")
res <- compare("df_a", "df_b", c("col_1"), report_spec(".", "null"), logger=NULL, progress_fun=NULL)
col_diff <- res$columns$diff
# col_2
df <- col_diff[col_diff$name=="col_2",]
expect_equal(df$type_1, df$type_2)
expect_equal(df$type_1, "date")
expect_equal(df$NA_1, 1)
expect_equal(df$NA_2, 1)
expect_equal(df$NA_both, 0)
expect_equal(df$differences, 2)
# col_3
df <- col_diff[col_diff$name=="col_3",]
expect_equal(df$type_1, df$type_2)
expect_equal(df$type_1, "string")
expect_equal(df$NA_1, 1)
expect_equal(df$NA_2, 1)
expect_equal(df$NA_both, 1)
expect_equal(df$differences, 1)
# col_4
df <- col_diff[col_diff$name=="col_4",]
expect_equal(df$type_1, df$type_2)
expect_equal(df$type_1, "double")
expect_equal(df$NA_1, 1)
expect_equal(df$NA_2, 1)
expect_equal(df$NA_both, 0)
expect_equal(df$differences, 3)
# col_5
df <- col_diff[col_diff$name=="col_5",]
expect_equal(df$type_1, df$type_2)
expect_equal(df$type_1, "int")
expect_equal(df$NA_1, 2)
expect_equal(df$NA_2, 4)
expect_equal(df$NA_both, 2)
expect_equal(df$differences, 2)
})
teardown({
SparkR::sparkR.stop()
unlink(tmp_a)
unlink(tmp_b)
})
| /tests/testthat/test_functional.R | no_license | avalente/sparkdataframecompare | R | false | false | 6,022 | r | context("Functional end-to-end tests")
tmp_a <- tempfile()
tmp_b <- tempfile()
setup({
# Try to initialize sparkr. Requires the env var SPARK_HOME pointing to a local spark installation
if (!is.na(Sys.getenv("SPARK_HOME", unset = NA))){
library(SparkR, lib.loc=normalizePath(file.path(Sys.getenv("SPARK_HOME"), "R", "lib")))
} else {
stop("please set SPARK_HOME env var")
}
SparkR::sparkR.session()
})
test_that("Test comparison",{
library(sparkdataframecompare)
df_a <- data.frame(
col_1 = c(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
col_2 = as.Date(c("2019-01-01", "2018-12-31", "2018-11-30", "2018-10-31", "2018-09-30", "2018-07-31", "2018-06-30", "2018-01-01", "2017-12-31", "2016-03-31")),
col_3 = c("v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10"),
col_4 = c(1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.0),
col_5 = c(10L, 20L, 30L, 40L, 50L, 60L, 70L, 80L, 90L, 100L),
col_extra_a = c("a", "b", "c", "d", "e", "f", "g", "h", "i", "l")
)
df_b <- data.frame(
col_1 = c(2, 3, 4, 5, 6, 7, 12, 9, 10, 11),
col_2 = as.Date(c("2019-01-01", "2018-12-31", "2018-11-30", "2018-10-31", "2018-08-30", "2018-07-31", "2018-06-30", "2018-01-01", "2017-12-31", "2016-03-31")),
col_3 = c("v1", "v2", "vv", "v4", "v5", "v6", "v7", "v8", "v9", "v10"),
col_4 = c(1.1, 2.2, 3.3, 4.4, 5.5, 0.0, 7.7, 8.8, 9.9, 10.0),
col_5 = c(10L, 40L, 30L, 40L, 50L, 60L, 70L, 80L, 90L, 100L),
col_extra_b = c("a", "b", "c", "d", "e", "f", "g", "h", "i", "l")
)
SparkR::createOrReplaceTempView(SparkR::createDataFrame(df_a), "df_a")
SparkR::createOrReplaceTempView(SparkR::createDataFrame(df_b), "df_b")
res <- compare("df_a", "df_b", c("col_1"), report_spec(".", "null"), logger=NULL, progress_fun=NULL)
expect_equal(res$table_1, "df_a")
expect_equal(res$table_2, "df_b")
expect_equal(res$join_cols, c("col_1"))
expect_equal(res$rows$missing_in_1$count, 1)
expect_equal(res$rows$missing_in_2$count, 1)
expect_equal(res$rows$common$count, 9)
expect_equal(res$columns$missing_in_1, list(count=1, names=c("col_extra_b")))
expect_equal(res$columns$missing_in_2, list(count=1, names=c("col_extra_a")))
expect_equal(res$columns$common, list(count=5, names=c("col_1", "col_2", "col_3", "col_4", "col_5")))
expect_equal(res$columns$compare_columns, c("col_2", "col_3", "col_4", "col_5"))
col_diff <- res$columns$diff
# col_2
df <- col_diff[col_diff$name=="col_2",]
expect_equal(df$type_1, df$type_2)
expect_equal(df$type_1, "date")
expect_equal(df$differences, 1)
expect_equal(df$differences_pct, 1/9*100)
expect_equal(df$rel_diff_mean, 1)
# col_3
df <- col_diff[col_diff$name=="col_3",]
expect_equal(df$type_1, df$type_2)
expect_equal(df$type_1, "string")
expect_equal(df$differences, 1)
expect_equal(df$differences_pct, 1/9*100)
expect_equal(df$rel_diff_mean, 1)
# col_4
df <- col_diff[col_diff$name=="col_4",]
expect_equal(df$type_1, df$type_2)
expect_equal(df$type_1, "double")
expect_equal(df$differences, 1)
expect_equal(df$differences_pct, 1/9*100)
expect_equal(df$abs_diff_mean, 6.6)
# col_5
df <- col_diff[col_diff$name=="col_5",]
expect_equal(df$type_1, df$type_2)
expect_equal(df$type_1, "int")
expect_equal(df$differences, 1)
expect_equal(df$differences_pct, 1/9*100)
expect_equal(df$abs_diff_mean, -20)
expect_equal(df$rel_diff_mean, -1)
})
test_that("Test NA count",{
library(sparkdataframecompare)
df_a <- data.frame(
col_1 = c(1, 2, 3, 4, 5),
col_2 = as.Date(c("2019-01-01", "2018-12-31", NA, "2018-10-31", "2018-09-30")),
col_3 = c("v1", "v2", "v6", "v4", NA),
col_4 = c(NaN, 2.2, 3.3, 4.4, 5.5),
col_5 = c(NA, 20L, NaN, 40L, 50L)
)
df_b <- data.frame(
col_1 = c(1, 2, 3, 4, 5),
col_2 = as.Date(c("2019-01-01", NA, "2018-12-31", "2018-10-31", "2018-09-30")),
col_3 = c("v1", "v2", "v3", "v4", NA),
col_4 = c(2.2, NaN, 3.3, 4.4, 5.4),
col_5 = c(NA, NA, NA, NA, 50L)
)
# unfortunately as of spark 2.3.2 serialization of NA doesn't work properly, so we need
# to serialize to CSV and read from CSV in spark
write.csv(df_a, tmp_a, row.names = FALSE)
write.csv(df_b, tmp_b, row.names = FALSE)
schema <- SparkR::structType("col_1 int, col_2 date, col_3 string, col_4 double, col_5 int")
SparkR::createOrReplaceTempView(SparkR::read.df(tmp_a, source="csv", sep=",", header=TRUE, schema=schema), "df_a")
SparkR::createOrReplaceTempView(SparkR::read.df(tmp_b, source="csv", sep=",", header=TRUE, schema=schema), "df_b")
res <- compare("df_a", "df_b", c("col_1"), report_spec(".", "null"), logger=NULL, progress_fun=NULL)
col_diff <- res$columns$diff
# col_2
df <- col_diff[col_diff$name=="col_2",]
expect_equal(df$type_1, df$type_2)
expect_equal(df$type_1, "date")
expect_equal(df$NA_1, 1)
expect_equal(df$NA_2, 1)
expect_equal(df$NA_both, 0)
expect_equal(df$differences, 2)
# col_3
df <- col_diff[col_diff$name=="col_3",]
expect_equal(df$type_1, df$type_2)
expect_equal(df$type_1, "string")
expect_equal(df$NA_1, 1)
expect_equal(df$NA_2, 1)
expect_equal(df$NA_both, 1)
expect_equal(df$differences, 1)
# col_4
df <- col_diff[col_diff$name=="col_4",]
expect_equal(df$type_1, df$type_2)
expect_equal(df$type_1, "double")
expect_equal(df$NA_1, 1)
expect_equal(df$NA_2, 1)
expect_equal(df$NA_both, 0)
expect_equal(df$differences, 3)
# col_5
df <- col_diff[col_diff$name=="col_5",]
expect_equal(df$type_1, df$type_2)
expect_equal(df$type_1, "int")
expect_equal(df$NA_1, 2)
expect_equal(df$NA_2, 4)
expect_equal(df$NA_both, 2)
expect_equal(df$differences, 2)
})
teardown({
SparkR::sparkR.stop()
unlink(tmp_a)
unlink(tmp_b)
})
|
\name{position_stack}
\alias{position_stack}
\alias{PositionStack}
\title{position\_stack}
\description{Stack overlapping objects on top of one another}
\details{
This page describes position\_stack, see \code{\link{layer}} and \code{\link{qplot}} for how to create a complete plot from individual components.
}
\usage{position_stack(width=NULL, height=NULL, ...)}
\arguments{
\item{width}{NULL}
\item{height}{NULL}
\item{...}{ignored }
}
\seealso{\itemize{
\item \url{http://had.co.nz/ggplot2/position_stack.html}
}}
\value{A \code{\link{layer}}}
\examples{\dontrun{
# See ?geom_bar and ?geom_area for more examples
ggplot(mtcars, aes(x=factor(cyl), fill=factor(vs))) + geom_bar()
ggplot(diamonds, aes(x=price)) + geom_histogram(binwidth=500)
ggplot(diamonds, aes(x=price, fill=cut)) + geom_histogram(binwidth=500)
}}
\author{Hadley Wickham, \url{http://had.co.nz/}}
\keyword{hplot}
| /man/position_stack.rd | no_license | strongh/ggplot2 | R | false | false | 892 | rd | \name{position_stack}
\alias{position_stack}
\alias{PositionStack}
\title{position\_stack}
\description{Stack overlapping objects on top of one another}
\details{
This page describes position\_stack, see \code{\link{layer}} and \code{\link{qplot}} for how to create a complete plot from individual components.
}
\usage{position_stack(width=NULL, height=NULL, ...)}
\arguments{
\item{width}{NULL}
\item{height}{NULL}
\item{...}{ignored }
}
\seealso{\itemize{
\item \url{http://had.co.nz/ggplot2/position_stack.html}
}}
\value{A \code{\link{layer}}}
\examples{\dontrun{
# See ?geom_bar and ?geom_area for more examples
ggplot(mtcars, aes(x=factor(cyl), fill=factor(vs))) + geom_bar()
ggplot(diamonds, aes(x=price)) + geom_histogram(binwidth=500)
ggplot(diamonds, aes(x=price, fill=cut)) + geom_histogram(binwidth=500)
}}
\author{Hadley Wickham, \url{http://had.co.nz/}}
\keyword{hplot}
|
setwd("/Users/Aparajit/Desktop/Kaggle/")
#used to set working directory
train <- read.csv("train.csv",header = TRUE,stringsAsFactors= TRUE)
testData <- read.csv("test.csv",header = TRUE,stringsAsFactors= TRUE)
# read.csv used to read CSV files into R studio
head(train)
plot(density(train$Age,na.rm = TRUE))
# Most people between 20 to 40 years of age
plot(density(train$Pclass,na.rm = TRUE))
# 3rd class >1st class > 2nd class
## Survival rate by sex
plot(density(train$Fare,na.rm=TRUE))
counts <- table(train$Survived,train$Sex)
head(counts)
barplot(counts,xlab = "Sex",ylab = "No of Ppl",main = "Survival Rate by Sex")
counts[2] / (counts[1] + counts[2])
counts[3]/(counts[3]+counts[4])
109/(468+109)
##Survival rate by passenger class
PcClassSurvival <- table(train$Survived,train$Pclass)
head(PcClassSurvival)
barplot(PcClassSurvival,xlab = "Class of Cabin",ylab = "No of people",main = "Survival by Class")
PcClassSurvival[2]/(PcClassSurvival[2]+PcClassSurvival[1])
PcClassSurvival[4]/(PcClassSurvival[4]+PcClassSurvival[3])
PcClassSurvival[6]/(PcClassSurvival[6]+PcClassSurvival[5])
## Survival rate by age
SurviavlByAge <- table(train$Survived,train$Age)
head(SurviavlByAge)
#
### Part one ends before this comment
### Part two starts below
#removing non relevamt variables
head(train)
traindata <- train[-c(1,9:12)]
head(traindata)
#
#Replacing Gender variable (Male/Female) with a Dummy Variable (0/1)
traindata$Sex=gsub("female",1,traindata$Sex,ignore.case = TRUE)
traindata$Sex=gsub("^male",0,traindata$Sex,ignore.case = TRUE)
#
head(traindata)
#trying to find percentage of missing values by attribute
colMeans(is.na(traindata))
#Age has 19 % missing values
library("mice")
md.pattern(traindata)
test <- traindata[-c(3)]
#Using mice package we know that 714 records have zero missing values
# 177 records have age missing
methods(mice)
tempData <- mice(test,m=5,maxit=50,meth='pmm',seed=500)
summary(tempData)
tempData$imp$Age
imputeddata <- complete(tempData,1)
traindata <- cbind(imputeddata,traindata$Name)
colnames(traindata)[7] <- "Name"
md.pattern(traindata)
## train data has no more missing values
### Creating new variables child , family and mother
#CHILD
traindata["Child"]
for (i in 1:nrow(traindata)) {
if (traindata$Age[i] <= 18) {
traindata$Child[i] = 1
} else {
traindata$Child[i] = 2
}
}
#FAMILY
traindata["Family"] = NA
for(i in 1:nrow(traindata)) {
x = traindata$SibSp[i]
y = traindata$Parch[i]
traindata$Family[i] = x + y + 1
}
#MOTHER
traindata["Mother"]
for(i in 1:nrow(traindata)) {
if(traindata$Name[i] == "Mrs" & traindata$Parch[i] > 0) {
traindata$Mother[i] = 1
} else {
traindata$Mother[i] = 2
}
}
### TRAINING DATASET COMPLETE
### TEST Data processing begins below
head(testData)
plot(density(testData$Age,na.rm = TRUE))
PassengerId = testData[1]
testData <- testData[-c(1,8:11)]
head(testData)
testData$Sex=gsub("female",1,testData$Sex,ignore.case = TRUE)
testData$Sex=gsub("^male",0,testData$Sex,ignore.case = TRUE)
head(testData)
colMeans(is.na(testData))
#Age variable has about 20 % Missing values
md.pattern(testData)
# 86 Records have missing values
X <- testData[-c(2)]
head(X)
X_temp <- mice(X,m=5,maxit=50,meth='pmm',seed=500)
summary(X_temp)
X_temp$imp$Age
imputeddata_test <- complete(X_temp,1)
testData <- cbind(imputeddata_test,testData$Name)
head(testData)
colnames(testData)[6] <- "Name"
md.pattern(testData)
### Creating new variables child , family and mother for test data
#CHILD
testData["Child"]
for (i in 1:nrow(testData)) {
if (testData$Age[i] <= 18) {
testData$Child[i] = 1
} else {
testData$Child[i] = 2
}
}
#FAMILY
testData["Family"] = NA
for(i in 1:nrow(testData)) {
x = testData$SibSp[i]
y = testData$Parch[i]
testData$Family[i] = x + y + 1
}
#MOTHER
testData["Mother"]
for(i in 1:nrow(testData)) {
if(testData$Name[i] == "Mrs" & testData$Parch[i] > 0) {
testData$Mother[i] = 1
} else {
testData$Mother[i] = 2
}
}
####
head(testData)
####TEST DATA PREPARATION COMPLETE
train.glm <- glm(Survived~Pclass+Sex+Age+Child+Szs+Family+Mother,family=binomial,data = traindata)
summary(train.glm)
#family is the link funtion . dafault here is gaussian and data output ith binomial will be for logit regression
p.hats <- predict.glm(train.glm, newdata = testData, type = "response")
survival <- vector()
for(i in 1:length(p.hats)) {
if(p.hats[i] > .5) {
survival[i] <- 1
} else {
survival[i] <- 0
}
}
kaggle.sub <- cbind(PassengerId,survival)
colnames(kaggle.sub) <- c("PassengerId", "Survived")
write.csv(kaggle.sub, file = "kaggle.csv", row.names = FALSE)
| /Titanic.R | no_license | aparajit10/Titanic- | R | false | false | 4,632 | r | setwd("/Users/Aparajit/Desktop/Kaggle/")
#used to set working directory
train <- read.csv("train.csv",header = TRUE,stringsAsFactors= TRUE)
testData <- read.csv("test.csv",header = TRUE,stringsAsFactors= TRUE)
# read.csv used to read CSV files into R studio
head(train)
plot(density(train$Age,na.rm = TRUE))
# Most people between 20 to 40 years of age
plot(density(train$Pclass,na.rm = TRUE))
# 3rd class >1st class > 2nd class
## Survival rate by sex
plot(density(train$Fare,na.rm=TRUE))
counts <- table(train$Survived,train$Sex)
head(counts)
barplot(counts,xlab = "Sex",ylab = "No of Ppl",main = "Survival Rate by Sex")
counts[2] / (counts[1] + counts[2])
counts[3]/(counts[3]+counts[4])
109/(468+109)
##Survival rate by passenger class
PcClassSurvival <- table(train$Survived,train$Pclass)
head(PcClassSurvival)
barplot(PcClassSurvival,xlab = "Class of Cabin",ylab = "No of people",main = "Survival by Class")
PcClassSurvival[2]/(PcClassSurvival[2]+PcClassSurvival[1])
PcClassSurvival[4]/(PcClassSurvival[4]+PcClassSurvival[3])
PcClassSurvival[6]/(PcClassSurvival[6]+PcClassSurvival[5])
## Survival rate by age
SurviavlByAge <- table(train$Survived,train$Age)
head(SurviavlByAge)
#
### Part one ends before this comment
### Part two starts below
#removing non relevamt variables
head(train)
traindata <- train[-c(1,9:12)]
head(traindata)
#
#Replacing Gender variable (Male/Female) with a Dummy Variable (0/1)
traindata$Sex=gsub("female",1,traindata$Sex,ignore.case = TRUE)
traindata$Sex=gsub("^male",0,traindata$Sex,ignore.case = TRUE)
#
head(traindata)
#trying to find percentage of missing values by attribute
colMeans(is.na(traindata))
#Age has 19 % missing values
library("mice")
md.pattern(traindata)
test <- traindata[-c(3)]
#Using mice package we know that 714 records have zero missing values
# 177 records have age missing
methods(mice)
tempData <- mice(test,m=5,maxit=50,meth='pmm',seed=500)
summary(tempData)
tempData$imp$Age
imputeddata <- complete(tempData,1)
traindata <- cbind(imputeddata,traindata$Name)
colnames(traindata)[7] <- "Name"
md.pattern(traindata)
## train data has no more missing values
### Creating new variables child , family and mother
#CHILD
traindata["Child"]
for (i in 1:nrow(traindata)) {
if (traindata$Age[i] <= 18) {
traindata$Child[i] = 1
} else {
traindata$Child[i] = 2
}
}
#FAMILY
traindata["Family"] = NA
for(i in 1:nrow(traindata)) {
x = traindata$SibSp[i]
y = traindata$Parch[i]
traindata$Family[i] = x + y + 1
}
#MOTHER
traindata["Mother"]
for(i in 1:nrow(traindata)) {
if(traindata$Name[i] == "Mrs" & traindata$Parch[i] > 0) {
traindata$Mother[i] = 1
} else {
traindata$Mother[i] = 2
}
}
### TRAINING DATASET COMPLETE
### TEST Data processing begins below
head(testData)
plot(density(testData$Age,na.rm = TRUE))
PassengerId = testData[1]
testData <- testData[-c(1,8:11)]
head(testData)
testData$Sex=gsub("female",1,testData$Sex,ignore.case = TRUE)
testData$Sex=gsub("^male",0,testData$Sex,ignore.case = TRUE)
head(testData)
colMeans(is.na(testData))
#Age variable has about 20 % Missing values
md.pattern(testData)
# 86 Records have missing values
X <- testData[-c(2)]
head(X)
X_temp <- mice(X,m=5,maxit=50,meth='pmm',seed=500)
summary(X_temp)
X_temp$imp$Age
imputeddata_test <- complete(X_temp,1)
testData <- cbind(imputeddata_test,testData$Name)
head(testData)
colnames(testData)[6] <- "Name"
md.pattern(testData)
### Creating new variables child , family and mother for test data
#CHILD
testData["Child"]
for (i in 1:nrow(testData)) {
if (testData$Age[i] <= 18) {
testData$Child[i] = 1
} else {
testData$Child[i] = 2
}
}
#FAMILY
testData["Family"] = NA
for(i in 1:nrow(testData)) {
x = testData$SibSp[i]
y = testData$Parch[i]
testData$Family[i] = x + y + 1
}
#MOTHER
testData["Mother"]
for(i in 1:nrow(testData)) {
if(testData$Name[i] == "Mrs" & testData$Parch[i] > 0) {
testData$Mother[i] = 1
} else {
testData$Mother[i] = 2
}
}
####
head(testData)
####TEST DATA PREPARATION COMPLETE
train.glm <- glm(Survived~Pclass+Sex+Age+Child+Szs+Family+Mother,family=binomial,data = traindata)
summary(train.glm)
#family is the link funtion . dafault here is gaussian and data output ith binomial will be for logit regression
p.hats <- predict.glm(train.glm, newdata = testData, type = "response")
survival <- vector()
for(i in 1:length(p.hats)) {
if(p.hats[i] > .5) {
survival[i] <- 1
} else {
survival[i] <- 0
}
}
kaggle.sub <- cbind(PassengerId,survival)
colnames(kaggle.sub) <- c("PassengerId", "Survived")
write.csv(kaggle.sub, file = "kaggle.csv", row.names = FALSE)
|
\name{cyclicb}
\alias{cyclicb.data}
\alias{cyclicb.qtl}
\alias{cyclicb}
\title{Cyclic graph (b) example}
\description{We use a Gibbs sampling scheme to generate a data-set with
200 individuals (according with cyclic graph (b)). Each phenotype is
affected by 3 QTLs. We fixed the regression coefficients at 0.5,
error variances at 0.025 and the QTL effects at 0.2, 0.3
and 0.4 for the three F2 genotypes. We used
a burn-in of 2000 for the Gibbs sampler.}
\details{For cyclic graphs, the output of the qdgAlgo function
computes the log-likelihood up to the normalization constant
(un-normalized log-likelihood). We can use the un-normalized
log-likelihood to compare cyclic graphs with reversed directions
(since they have the same normalization constant). However we cannot
compare cyclic and acyclic graphs.}
\references{Chaibub Neto et al. (2008) Inferring causal phenotype networks from
segregating populations. Genetics 179: 1089-1100.}
\usage{data(cyclicb)}
\examples{
\dontrun{
bp <- matrix(0, 6, 6)
bp[2,1] <- bp[1,5] <- bp[3,1] <- bp[4,2] <- bp[5,4] <- bp[5,6] <- bp[6,3] <- 0.5
stdev <- rep(0.025, 6)
## Use R/qtl routines to simulate.
set.seed(3456789)
mymap <- sim.map(len = rep(100,20), n.mar = 10, eq.spacing = FALSE,
include.x = FALSE)
mycross <- sim.cross(map = mymap, n.ind = 200, type = "f2")
mycross <- sim.geno(mycross, n.draws = 1)
cyclicb.qtl <- produce.qtl.sample(cross = mycross, n.phe = 6)
mygeno <- pull.geno(mycross)[, unlist(cyclicb.qtl$markers)]
cyclicb.data <- generate.data.graph.b(cross = mycross, burnin = 2000,
bq = c(0.2,0.3,0.4), bp = bp, stdev = stdev, geno = mygeno)
save(cyclicb.qtl, cyclicb.data, file = "cyclicb.RData", compress = TRUE)
}
data(cyclicb)
out <- qdgAlgo(cross=cyclicb.data,
phenotype.names=paste("y",1:6,sep=""),
marker.names=cyclicb.qtl$markers,
QTL=cyclicb.qtl$allqtl,
alpha=0.005,
n.qdg.random.starts=10,
skel.method="pcskel")
out2 <- qdgSEM(out, cross=cyclicb.data)
out2
plot(out2)
}
\keyword{datagen}
| /man/cyclicb.Rd | no_license | byandell/qdg | R | false | false | 2,013 | rd | \name{cyclicb}
\alias{cyclicb.data}
\alias{cyclicb.qtl}
\alias{cyclicb}
\title{Cyclic graph (b) example}
\description{We use a Gibbs sampling scheme to generate a data-set with
200 individuals (according with cyclic graph (b)). Each phenotype is
affected by 3 QTLs. We fixed the regression coefficients at 0.5,
error variances at 0.025 and the QTL effects at 0.2, 0.3
and 0.4 for the three F2 genotypes. We used
a burn-in of 2000 for the Gibbs sampler.}
\details{For cyclic graphs, the output of the qdgAlgo function
computes the log-likelihood up to the normalization constant
(un-normalized log-likelihood). We can use the un-normalized
log-likelihood to compare cyclic graphs with reversed directions
(since they have the same normalization constant). However we cannot
compare cyclic and acyclic graphs.}
\references{Chaibub Neto et al. (2008) Inferring causal phenotype networks from
segregating populations. Genetics 179: 1089-1100.}
\usage{data(cyclicb)}
\examples{
\dontrun{
bp <- matrix(0, 6, 6)
bp[2,1] <- bp[1,5] <- bp[3,1] <- bp[4,2] <- bp[5,4] <- bp[5,6] <- bp[6,3] <- 0.5
stdev <- rep(0.025, 6)
## Use R/qtl routines to simulate.
set.seed(3456789)
mymap <- sim.map(len = rep(100,20), n.mar = 10, eq.spacing = FALSE,
include.x = FALSE)
mycross <- sim.cross(map = mymap, n.ind = 200, type = "f2")
mycross <- sim.geno(mycross, n.draws = 1)
cyclicb.qtl <- produce.qtl.sample(cross = mycross, n.phe = 6)
mygeno <- pull.geno(mycross)[, unlist(cyclicb.qtl$markers)]
cyclicb.data <- generate.data.graph.b(cross = mycross, burnin = 2000,
bq = c(0.2,0.3,0.4), bp = bp, stdev = stdev, geno = mygeno)
save(cyclicb.qtl, cyclicb.data, file = "cyclicb.RData", compress = TRUE)
}
data(cyclicb)
out <- qdgAlgo(cross=cyclicb.data,
phenotype.names=paste("y",1:6,sep=""),
marker.names=cyclicb.qtl$markers,
QTL=cyclicb.qtl$allqtl,
alpha=0.005,
n.qdg.random.starts=10,
skel.method="pcskel")
out2 <- qdgSEM(out, cross=cyclicb.data)
out2
plot(out2)
}
\keyword{datagen}
|
#' @author Alfonso Jiménez-Vílchez
#' @title Jd evaluation measure
#' @description Generates an evaluation function that applies the discriminant function designed by Narendra and Fukunaga \insertCite{Narendra1977}{FSinR} to generate an evaluation measure for a set of features (set measure). This function is called internally within the \code{\link{filterEvaluator}} function.
#'
#' @return Returns a function that is used to generate an evaluation set measure using the Jd.
#' @references
#' \insertAllCited{}
#' @importFrom Rdpack reprompt
#' @import dplyr
#' @importFrom stats cov
#' @import rlang
#' @importFrom rlang UQ
#' @export
#'
#' @examples
#'\dontrun{
#'
#' ## The direct application of this function is an advanced use that consists of using this
#' # function directly to evaluate a set of features
#' ## Classification problem
#'
#' # Generate the evaluation function with JD
#' Jd_evaluator <- Jd()
#' # Evaluate the features (parametes: dataset, target variable and features)
#' Jd_evaluator(ToothGrowth,'supp',c('len','dose'))
#' }
Jd <- function() {
JdEvaluator <- function(data, class, features) {
if (!length(features)) {
return(0);
}
feature.classes <- unique(as.data.frame(data[,class,drop = FALSE]))
if (nrow(feature.classes) != 2) {
stop('Data set is required to have only 2 classes');
}
vectors <- data %>%
select(features, class) %>%
group_by_at(class) %>%
summarise_at(features,list(mean)) %>%
select(features)
vector <- unlist(vectors[1,] - vectors[2,])
matrixA <- data %>%
filter(UQ(as.name(class)) == feature.classes[1,1]) %>%
select(features) %>%
as.matrix() %>%
cov()
matrixB <- data %>%
filter(UQ(as.name(class)) == feature.classes[2,1]) %>%
select(features) %>%
as.matrix() %>%
cov()
return (as.numeric(t(vector) %*% solve((matrixA + matrixB)/2) %*% vector))
}
attr(JdEvaluator,'shortName') <- "Jd"
attr(JdEvaluator,'name') <- "Jd"
attr(JdEvaluator,'target') <- "maximize"
attr(JdEvaluator,'kind') <- "Set measure"
attr(JdEvaluator,'needsDataToBeDiscrete') <- FALSE
attr(JdEvaluator,'needsDataToBeContinuous') <- FALSE
return(JdEvaluator)
}
| /R/Jd.R | no_license | cran/FSinR | R | false | false | 2,262 | r | #' @author Alfonso Jiménez-Vílchez
#' @title Jd evaluation measure
#' @description Generates an evaluation function that applies the discriminant function designed by Narendra and Fukunaga \insertCite{Narendra1977}{FSinR} to generate an evaluation measure for a set of features (set measure). This function is called internally within the \code{\link{filterEvaluator}} function.
#'
#' @return Returns a function that is used to generate an evaluation set measure using the Jd.
#' @references
#' \insertAllCited{}
#' @importFrom Rdpack reprompt
#' @import dplyr
#' @importFrom stats cov
#' @import rlang
#' @importFrom rlang UQ
#' @export
#'
#' @examples
#'\dontrun{
#'
#' ## The direct application of this function is an advanced use that consists of using this
#' # function directly to evaluate a set of features
#' ## Classification problem
#'
#' # Generate the evaluation function with JD
#' Jd_evaluator <- Jd()
#' # Evaluate the features (parametes: dataset, target variable and features)
#' Jd_evaluator(ToothGrowth,'supp',c('len','dose'))
#' }
Jd <- function() {
JdEvaluator <- function(data, class, features) {
if (!length(features)) {
return(0);
}
feature.classes <- unique(as.data.frame(data[,class,drop = FALSE]))
if (nrow(feature.classes) != 2) {
stop('Data set is required to have only 2 classes');
}
vectors <- data %>%
select(features, class) %>%
group_by_at(class) %>%
summarise_at(features,list(mean)) %>%
select(features)
vector <- unlist(vectors[1,] - vectors[2,])
matrixA <- data %>%
filter(UQ(as.name(class)) == feature.classes[1,1]) %>%
select(features) %>%
as.matrix() %>%
cov()
matrixB <- data %>%
filter(UQ(as.name(class)) == feature.classes[2,1]) %>%
select(features) %>%
as.matrix() %>%
cov()
return (as.numeric(t(vector) %*% solve((matrixA + matrixB)/2) %*% vector))
}
attr(JdEvaluator,'shortName') <- "Jd"
attr(JdEvaluator,'name') <- "Jd"
attr(JdEvaluator,'target') <- "maximize"
attr(JdEvaluator,'kind') <- "Set measure"
attr(JdEvaluator,'needsDataToBeDiscrete') <- FALSE
attr(JdEvaluator,'needsDataToBeContinuous') <- FALSE
return(JdEvaluator)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{countryExData}
\alias{countryExData}
\title{Example dataset for country level data (2008 Environmental Performance
Index)}
\format{A data frame with 149 observations on the following 80 variables.
\describe{ \item{ISO3V10}{a character vector}
\item{Country}{a character vector} \item{EPI_regions}{a
character vector} \item{GEO_subregion}{a character vector}
\item{Population2005}{a numeric vector}
\item{GDP_capita.MRYA}{a numeric vector} \item{landlock}{a
numeric vector} \item{landarea}{a numeric vector}
\item{density}{a numeric vector} \item{EPI}{a numeric
vector} \item{ENVHEALTH}{a numeric vector}
\item{ECOSYSTEM}{a numeric vector} \item{ENVHEALTH.1}{a
numeric vector} \item{AIR_E}{a numeric vector}
\item{WATER_E}{a numeric vector} \item{BIODIVERSITY}{a
numeric vector} \item{PRODUCTIVE_NATURAL_RESOURCES}{a numeric
vector} \item{CLIMATE}{a numeric vector} \item{DALY_SC}{a
numeric vector} \item{WATER_H}{a numeric vector}
\item{AIR_H}{a numeric vector} \item{AIR_E.1}{a numeric
vector} \item{WATER_E.1}{a numeric vector}
\item{BIODIVERSITY.1}{a numeric vector} \item{FOREST}{a
numeric vector} \item{FISH}{a numeric vector}
\item{AGRICULTURE}{a numeric vector} \item{CLIMATE.1}{a
numeric vector} \item{ACSAT_pt}{a numeric vector}
\item{WATSUP_pt}{a numeric vector} \item{DALY_pt}{a numeric
vector} \item{INDOOR_pt}{a numeric vector} \item{PM10_pt}{a
numeric vector} \item{OZONE_H_pt}{a numeric vector}
\item{SO2_pt}{a numeric vector} \item{OZONE_E_pt}{a numeric
vector} \item{WATQI_pt}{a numeric vector} \item{WATSTR_pt}{a
numeric vector} \item{WATQI_GEMS.station.data}{a numeric vector}
\item{FORGRO_pt}{a numeric vector} \item{CRI_pt}{a numeric
vector} \item{EFFCON_pt}{a numeric vector} \item{AZE_pt}{a
numeric vector} \item{MPAEEZ_pt}{a numeric vector}
\item{EEZTD_pt}{a numeric vector} \item{MTI_pt}{a numeric
vector} \item{IRRSTR_pt}{a numeric vector} \item{AGINT_pt}{a
numeric vector} \item{AGSUB_pt}{a numeric vector}
\item{BURNED_pt}{a numeric vector} \item{PEST_pt}{a numeric
vector} \item{GHGCAP_pt}{a numeric vector}
\item{CO2IND_pt}{a numeric vector} \item{CO2KWH_pt}{a
numeric vector} \item{ACSAT}{a numeric vector}
\item{WATSUP}{a numeric vector} \item{DALY}{a numeric
vector} \item{INDOOR}{a numeric vector} \item{PM10}{a
numeric vector} \item{OZONE_H}{a numeric vector}
\item{SO2}{a numeric vector} \item{OZONE_E}{a numeric
vector} \item{WATQI}{a numeric vector}
\item{WATQI_GEMS.station.data.1}{a numeric vector}
\item{WATSTR}{a numeric vector} \item{FORGRO}{a numeric
vector} \item{CRI}{a numeric vector} \item{EFFCON}{a numeric
vector} \item{AZE}{a numeric vector} \item{MPAEEZ}{a numeric
vector} \item{EEZTD}{a numeric vector} \item{MTI}{a numeric
vector} \item{IRRSTR}{a numeric vector} \item{AGINT}{a
numeric vector} \item{AGSUB}{a numeric vector}
\item{BURNED}{a numeric vector} \item{PEST}{a numeric
vector} \item{GHGCAP}{a numeric vector} \item{CO2IND}{a
numeric vector} \item{CO2KWH}{a numeric vector} }}
\source{
http://epi.yale.edu/Downloads
}
\description{
A dataframe containing example country level data for 149 countries. This
is the 2008 Environmental Performance Index (EPI) downloaded from
http://epi.yale.edu/. Used here with permission, further details on the
data can be found there. The data are referenced by ISO 3 letter country
codes and country names.
}
\details{
2008 Environmental Performance Index (EPI) data downloaded from :
http://epi.yale.edu/Downloads
Disclaimers This 2008 Environmental Performance Index (EPI) tracks national
environmental results on a quantitative basis, measuring proximity to an
established set of policy targets using the best data available. Data
constraints and limitations in methodology make this a work in progress.
Further refinements will be undertaken over the next few years. Comments,
suggestions, feedback, and referrals to better data sources are welcome at:
http://epi.yale.edu or epi@yale.edu.
}
\examples{
data(countryExData,envir=environment(),package="rworldmap")
str(countryExData)
}
\references{
Esty, Daniel C., M.A. Levy, C.H. Kim, A. de Sherbinin, T.
Srebotnjak, and V. Mara. 2008. 2008 Environmental Performance Index. New
Haven: Yale Center for Environmental Law and Policy.
}
\keyword{datasets}
| /man/countryExData.Rd | no_license | xhesc/rworldmap | R | false | true | 4,417 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{countryExData}
\alias{countryExData}
\title{Example dataset for country level data (2008 Environmental Performance
Index)}
\format{A data frame with 149 observations on the following 80 variables.
\describe{ \item{ISO3V10}{a character vector}
\item{Country}{a character vector} \item{EPI_regions}{a
character vector} \item{GEO_subregion}{a character vector}
\item{Population2005}{a numeric vector}
\item{GDP_capita.MRYA}{a numeric vector} \item{landlock}{a
numeric vector} \item{landarea}{a numeric vector}
\item{density}{a numeric vector} \item{EPI}{a numeric
vector} \item{ENVHEALTH}{a numeric vector}
\item{ECOSYSTEM}{a numeric vector} \item{ENVHEALTH.1}{a
numeric vector} \item{AIR_E}{a numeric vector}
\item{WATER_E}{a numeric vector} \item{BIODIVERSITY}{a
numeric vector} \item{PRODUCTIVE_NATURAL_RESOURCES}{a numeric
vector} \item{CLIMATE}{a numeric vector} \item{DALY_SC}{a
numeric vector} \item{WATER_H}{a numeric vector}
\item{AIR_H}{a numeric vector} \item{AIR_E.1}{a numeric
vector} \item{WATER_E.1}{a numeric vector}
\item{BIODIVERSITY.1}{a numeric vector} \item{FOREST}{a
numeric vector} \item{FISH}{a numeric vector}
\item{AGRICULTURE}{a numeric vector} \item{CLIMATE.1}{a
numeric vector} \item{ACSAT_pt}{a numeric vector}
\item{WATSUP_pt}{a numeric vector} \item{DALY_pt}{a numeric
vector} \item{INDOOR_pt}{a numeric vector} \item{PM10_pt}{a
numeric vector} \item{OZONE_H_pt}{a numeric vector}
\item{SO2_pt}{a numeric vector} \item{OZONE_E_pt}{a numeric
vector} \item{WATQI_pt}{a numeric vector} \item{WATSTR_pt}{a
numeric vector} \item{WATQI_GEMS.station.data}{a numeric vector}
\item{FORGRO_pt}{a numeric vector} \item{CRI_pt}{a numeric
vector} \item{EFFCON_pt}{a numeric vector} \item{AZE_pt}{a
numeric vector} \item{MPAEEZ_pt}{a numeric vector}
\item{EEZTD_pt}{a numeric vector} \item{MTI_pt}{a numeric
vector} \item{IRRSTR_pt}{a numeric vector} \item{AGINT_pt}{a
numeric vector} \item{AGSUB_pt}{a numeric vector}
\item{BURNED_pt}{a numeric vector} \item{PEST_pt}{a numeric
vector} \item{GHGCAP_pt}{a numeric vector}
\item{CO2IND_pt}{a numeric vector} \item{CO2KWH_pt}{a
numeric vector} \item{ACSAT}{a numeric vector}
\item{WATSUP}{a numeric vector} \item{DALY}{a numeric
vector} \item{INDOOR}{a numeric vector} \item{PM10}{a
numeric vector} \item{OZONE_H}{a numeric vector}
\item{SO2}{a numeric vector} \item{OZONE_E}{a numeric
vector} \item{WATQI}{a numeric vector}
\item{WATQI_GEMS.station.data.1}{a numeric vector}
\item{WATSTR}{a numeric vector} \item{FORGRO}{a numeric
vector} \item{CRI}{a numeric vector} \item{EFFCON}{a numeric
vector} \item{AZE}{a numeric vector} \item{MPAEEZ}{a numeric
vector} \item{EEZTD}{a numeric vector} \item{MTI}{a numeric
vector} \item{IRRSTR}{a numeric vector} \item{AGINT}{a
numeric vector} \item{AGSUB}{a numeric vector}
\item{BURNED}{a numeric vector} \item{PEST}{a numeric
vector} \item{GHGCAP}{a numeric vector} \item{CO2IND}{a
numeric vector} \item{CO2KWH}{a numeric vector} }}
\source{
http://epi.yale.edu/Downloads
}
\description{
A dataframe containing example country level data for 149 countries. This
is the 2008 Environmental Performance Index (EPI) downloaded from
http://epi.yale.edu/. Used here with permission, further details on the
data can be found there. The data are referenced by ISO 3 letter country
codes and country names.
}
\details{
2008 Environmental Performance Index (EPI) data downloaded from :
http://epi.yale.edu/Downloads
Disclaimers This 2008 Environmental Performance Index (EPI) tracks national
environmental results on a quantitative basis, measuring proximity to an
established set of policy targets using the best data available. Data
constraints and limitations in methodology make this a work in progress.
Further refinements will be undertaken over the next few years. Comments,
suggestions, feedback, and referrals to better data sources are welcome at:
http://epi.yale.edu or epi@yale.edu.
}
\examples{
data(countryExData,envir=environment(),package="rworldmap")
str(countryExData)
}
\references{
Esty, Daniel C., M.A. Levy, C.H. Kim, A. de Sherbinin, T.
Srebotnjak, and V. Mara. 2008. 2008 Environmental Performance Index. New
Haven: Yale Center for Environmental Law and Policy.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/likebut.R
\name{estimates.numeric}
\alias{estimates.numeric}
\title{Get Estimates for Numeric}
\usage{
\method{estimates}{numeric}(x, ...)
}
\arguments{
\item{x}{object}
\item{...}{passed arguments}
}
\description{
Gets estimates for numeric by coercing to character.
}
\seealso{
Other estimates: \code{\link{estimates.character}},
\code{\link{estimates}}
}
\keyword{internal}
| /man/estimates.numeric.Rd | no_license | romainfrancois/nonmemica | R | false | true | 458 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/likebut.R
\name{estimates.numeric}
\alias{estimates.numeric}
\title{Get Estimates for Numeric}
\usage{
\method{estimates}{numeric}(x, ...)
}
\arguments{
\item{x}{object}
\item{...}{passed arguments}
}
\description{
Gets estimates for numeric by coercing to character.
}
\seealso{
Other estimates: \code{\link{estimates.character}},
\code{\link{estimates}}
}
\keyword{internal}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.