content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
source("secrets.R")
if (1) {
# connect to DB
library(RMySQL)
con <- dbConnect(MySQL(), user="cwebb", password=password,
dbname="restor", host="mysql.phylodiversity.net")
# dbListTables(con)
# dbListFields(con, "event")
# get meas data
meas <- dbGetQuery(con, "SELECT plot.plotCode, sdl.tag, sdl.id AS sdlID,
sdl.plantYear,
meas.height, meas.width, meas.dead, event.months
FROM `meas` , plot, sdl, event
WHERE meas.sdlID = sdl.id
AND sdl.plotID = plot.id
AND sdl.plantYear < 2011
AND meas.eventID = event.id")
# head(meas)
# get plot and sdl data
plot <- dbGetQuery(con, "SELECT * FROM plot WHERE id < 423")
sdltax <- dbGetQuery(con, "SELECT sdl.id, taxon.code as species, taxon.local
FROM sdl, taxon WHERE sdl.taxonID = taxon.id")
tax <- dbGetQuery(con, "SELECT code, local as localname, gen, sp from taxon
ORDER BY code")
dbDisconnect(con)
# Add treatment codes, e.g...:
plot$treatPre[plot$roundup == 0 & plot$cardboard == 0] <- "rc"
plot$treatPre[plot$roundup == 1 & plot$cardboard == 0] <- "Rc"
plot$treatPre[plot$roundup == 1 & plot$cardboard == 1] <- "RC"
plot$treatPre[plot$roundup == 0 & plot$cardboard == 1] <- "rC"
plot$treatPre <- as.factor(plot$treatPre)
for (i in c("pressing", "roundup", "weeding", "cardboard")) {
plot[plot[,i] == 1,i] <- "yes"
plot[plot[,i] == 0,i] <- "no"
}
plot$any.fertilizer[plot$fertilizer =="ORG" | plot$fertilizer =="NPK"] <- "yes"
plot$any.fertilizer[plot$fertilizer == "KON" | plot$fertilizer == "NONE"] <- "no"
plot$any.fertilizer <- as.factor(plot$any.fertilizer)
plot[plot[,"fertilizer"] == "KON","fertilizer"] <- "None"
plot[plot[,"fertilizer"] == "ORG","fertilizer"] <- "Compost"
}
# ------------ function, ordered box plot
ordBP <- function(df, myfactor, title) {
bp <- boxplot(df$htgr ~ df[,myfactor], plot=F)
bpord <- order(bp$stats[3,])
bp$stats <- bp$stats[,bpord]
bp$names <- bp$name[bpord]
bp$conf <- bp$conf[,bpord]
bp$n <- bp$n[bpord]
bxp(bp, outline=F, varwidth=T, notch=T, main=title,
ylab="Relative growth rate", las=2)
}
#------------------------------------------
# -------------- function, ordered spine plot
ordSP <- function(df, myfactor, title) {
z <- table(as.factor(df[,myfactor]), as.factor(df$died))
z <- z[order(z[,2]/z[,1]),]
z <- z[,c(2,1)]
par(las=2)
spineplot(z, main=title, ylab="Survival")
}
# Begin output (note - must have echo=F if called from source)
sink(file="anal.md", append=F)
cat("% ASRI restoration\n")
### cat("css: anal.css\n\n")
cat("# Analysis of seedling survival and growth in ASRI restoration site\n\n")
cat("----\n\n")
sink()
# for each year:
for (year in 2009:2010) {
#year <- 2009
if (year == 2009) { mon <- c(0,18) }
if (year == 2010) { mon <- c(1,14) }
sink(file="anal.md", append=T)
cat("## Seedlings planted in", year, ", from month", mon[1],
"to month", mon[2], "\n\n")
sink()
### ** beware! This is a multi GB memory hog! The `drop=T` is not
### working now and so there are NAs in the sdlID code which cause
### the merge to match millions of NA to millions of NAs. Now using
### subset
### this will only match sdls with records in both years (all=F)
# a <- merge(all.x = T,
# meas[ meas$plantYear == year & meas$months == mon[1], , drop=T],
# meas[ meas$plantYear == year & meas$months == mon[2], , drop=T],
# by = "sdlID")[ , c("sdlID", "plotCode.x",
# "height.x", "width.x", "dead.x",
# "height.y", "width.y", "dead.y")]
a <- merge(all.x = T,
subset(meas, plantYear == year & months == mon[1]),
subset(meas, plantYear == year & months == mon[2]),
by = "sdlID")[ , c("sdlID", "plotCode.x",
"height.x", "width.x", "dead.x",
"height.y", "width.y", "dead.y")]
# prepare
a$died[a$dead.x == "yes"] <- "died"
a$died[a$dead.x == "no" & a$dead.y == "yes"] <- "died"
a$died[a$dead.x == "no" & is.na(a$dead.y)] <- "died" # not found
a$died[a$dead.x == "no" & a$dead.y == "no"] <- "lived"
a$died <- as.factor(a$died)
a$htgr <- (a$height.y - a$height.x) / a$height.x
a$htgr[is.nan(a$htgr)] <- NA
a$htgr[is.infinite(a$htgr)] <- NA
a <- merge(a, plot, by.x = "plotCode.x", by.y = "plotCode")
a <- merge(a, sdltax, by.x = "sdlID", by.y = "id", all.x=T)
# Lived and died:
sink(file="anal.md", append=T)
cat("### Overall seedling survival\n\n")
#print(table(a$died))
cat("Survival: **", (length(a$died[a$died=="lived"]) / length(a$died)) * 100,
"%** out of **", length(a$died) , "** seedlings\n\n", sep="")
sink()
# BY FACTOR
for (myfactor in
c("species", "pressing", "roundup", "weeding", "cardboard", "fertilizer", "any.fertilizer","nsdl")) {
sink(file="anal.md", append=T)
cat("### Survival and growth vs.", myfactor, "\n\n")
# can't use levels, as these seem to be inheritted through the merge
if (length(table(a[, myfactor])[table(a[, myfactor])!=0]) == 1) {
cat("**Only one level of ", myfactor, "**\n\n", sep="")
sink()
next
}
cat("#### Survival\n\n")
cat(", ")\n\n", sep="")
cat("<pre>\n")
print(chisq.test(as.factor(a$died), as.factor(a[,myfactor])))
cat("</pre>\n\n")
cat("#### Growth\n\n")
cat(", ")\n\n", sep="")
cat("<pre>\n");
print(anova(lm(a$htgr ~ as.factor(a[,myfactor]))))
cat("</pre>\n\n")
sink()
png(paste(year,"_surv_", myfactor, ".png", sep=""),
height = 400, width = (400 + length(levels(as.factor(a[,myfactor])))*20))
ordSP(a, myfactor, paste("Effect of", myfactor, "on seedling survival,", year,
"plantings"))
dev.off()
png(paste(year,"_RGR_", myfactor, ".png", sep=""),
height = 400, width = (400 + length(levels(as.factor(a[,myfactor])))*20))
ordBP(a, myfactor, paste("Effect of", myfactor, "on seedling growth,", year,
"plantings"))
dev.off()
}
sink(file="anal.md", append=T)
cat("----\n\n")
sink()
}
sink(file="anal.md", append=T)
cat("## Species list\n\n<pre>\n")
print(tax)
cat("</pre>\n\n")
sink()
# because multimarkdown has a bug when there are lots of tags:
system("gawk '{if ($0 ~ /<\\/pre>/) v = 0; else if(v) print \" \" $0; else if ($0 ~ /<pre>/) v=1; else print $0}' anal.md > anal.tmp; mv -f anal.tmp anal.md")
# stop()
# ################### NOTES
# just the common ones:
# common <- data.frame(table(a$code))[order(data.frame(table(a$code))[,2],
# decreasing =T),]
# common <- as.vector(common[1:10,1])
# b <- a
# b <- b[!is.na(match(b$code, common)), ,drop=T]
# png(paste(year, "_RGR_spp_common.png", sep=""), height=500, width=700)
# ordBP(b, paste("Relative Growth Rate of seedlings planted in", year, ", between months", mon[1], "and",mon[2]))
# dev.off()
# subsetting:
# remember drop=T, e.g.
# a <- meas[ meas$plantYear == 2010, , drop=T]
# check for factor removal: levels(a$plotCode)
| /data+analysis/2012_script/analysis.R | no_license | aammd/asri-restor-phase1 | R | false | false | 7,324 | r |
source("secrets.R")
if (1) {
# connect to DB
library(RMySQL)
con <- dbConnect(MySQL(), user="cwebb", password=password,
dbname="restor", host="mysql.phylodiversity.net")
# dbListTables(con)
# dbListFields(con, "event")
# get meas data
meas <- dbGetQuery(con, "SELECT plot.plotCode, sdl.tag, sdl.id AS sdlID,
sdl.plantYear,
meas.height, meas.width, meas.dead, event.months
FROM `meas` , plot, sdl, event
WHERE meas.sdlID = sdl.id
AND sdl.plotID = plot.id
AND sdl.plantYear < 2011
AND meas.eventID = event.id")
# head(meas)
# get plot and sdl data
plot <- dbGetQuery(con, "SELECT * FROM plot WHERE id < 423")
sdltax <- dbGetQuery(con, "SELECT sdl.id, taxon.code as species, taxon.local
FROM sdl, taxon WHERE sdl.taxonID = taxon.id")
tax <- dbGetQuery(con, "SELECT code, local as localname, gen, sp from taxon
ORDER BY code")
dbDisconnect(con)
# Add treatment codes, e.g...:
plot$treatPre[plot$roundup == 0 & plot$cardboard == 0] <- "rc"
plot$treatPre[plot$roundup == 1 & plot$cardboard == 0] <- "Rc"
plot$treatPre[plot$roundup == 1 & plot$cardboard == 1] <- "RC"
plot$treatPre[plot$roundup == 0 & plot$cardboard == 1] <- "rC"
plot$treatPre <- as.factor(plot$treatPre)
for (i in c("pressing", "roundup", "weeding", "cardboard")) {
plot[plot[,i] == 1,i] <- "yes"
plot[plot[,i] == 0,i] <- "no"
}
plot$any.fertilizer[plot$fertilizer =="ORG" | plot$fertilizer =="NPK"] <- "yes"
plot$any.fertilizer[plot$fertilizer == "KON" | plot$fertilizer == "NONE"] <- "no"
plot$any.fertilizer <- as.factor(plot$any.fertilizer)
plot[plot[,"fertilizer"] == "KON","fertilizer"] <- "None"
plot[plot[,"fertilizer"] == "ORG","fertilizer"] <- "Compost"
}
# ------------ function, ordered box plot
ordBP <- function(df, myfactor, title) {
bp <- boxplot(df$htgr ~ df[,myfactor], plot=F)
bpord <- order(bp$stats[3,])
bp$stats <- bp$stats[,bpord]
bp$names <- bp$name[bpord]
bp$conf <- bp$conf[,bpord]
bp$n <- bp$n[bpord]
bxp(bp, outline=F, varwidth=T, notch=T, main=title,
ylab="Relative growth rate", las=2)
}
#------------------------------------------
# -------------- function, ordered spine plot
ordSP <- function(df, myfactor, title) {
z <- table(as.factor(df[,myfactor]), as.factor(df$died))
z <- z[order(z[,2]/z[,1]),]
z <- z[,c(2,1)]
par(las=2)
spineplot(z, main=title, ylab="Survival")
}
# Begin output (note - must have echo=F if called from source)
sink(file="anal.md", append=F)
cat("% ASRI restoration\n")
### cat("css: anal.css\n\n")
cat("# Analysis of seedling survival and growth in ASRI restoration site\n\n")
cat("----\n\n")
sink()
# for each year:
for (year in 2009:2010) {
#year <- 2009
if (year == 2009) { mon <- c(0,18) }
if (year == 2010) { mon <- c(1,14) }
sink(file="anal.md", append=T)
cat("## Seedlings planted in", year, ", from month", mon[1],
"to month", mon[2], "\n\n")
sink()
### ** beware! This is a multi GB memory hog! The `drop=T` is not
### working now and so there are NAs in the sdlID code which cause
### the merge to match millions of NA to millions of NAs. Now using
### subset
### this will only match sdls with records in both years (all=F)
# a <- merge(all.x = T,
# meas[ meas$plantYear == year & meas$months == mon[1], , drop=T],
# meas[ meas$plantYear == year & meas$months == mon[2], , drop=T],
# by = "sdlID")[ , c("sdlID", "plotCode.x",
# "height.x", "width.x", "dead.x",
# "height.y", "width.y", "dead.y")]
a <- merge(all.x = T,
subset(meas, plantYear == year & months == mon[1]),
subset(meas, plantYear == year & months == mon[2]),
by = "sdlID")[ , c("sdlID", "plotCode.x",
"height.x", "width.x", "dead.x",
"height.y", "width.y", "dead.y")]
# prepare
a$died[a$dead.x == "yes"] <- "died"
a$died[a$dead.x == "no" & a$dead.y == "yes"] <- "died"
a$died[a$dead.x == "no" & is.na(a$dead.y)] <- "died" # not found
a$died[a$dead.x == "no" & a$dead.y == "no"] <- "lived"
a$died <- as.factor(a$died)
a$htgr <- (a$height.y - a$height.x) / a$height.x
a$htgr[is.nan(a$htgr)] <- NA
a$htgr[is.infinite(a$htgr)] <- NA
a <- merge(a, plot, by.x = "plotCode.x", by.y = "plotCode")
a <- merge(a, sdltax, by.x = "sdlID", by.y = "id", all.x=T)
# Lived and died:
sink(file="anal.md", append=T)
cat("### Overall seedling survival\n\n")
#print(table(a$died))
cat("Survival: **", (length(a$died[a$died=="lived"]) / length(a$died)) * 100,
"%** out of **", length(a$died) , "** seedlings\n\n", sep="")
sink()
# BY FACTOR
for (myfactor in
c("species", "pressing", "roundup", "weeding", "cardboard", "fertilizer", "any.fertilizer","nsdl")) {
sink(file="anal.md", append=T)
cat("### Survival and growth vs.", myfactor, "\n\n")
# can't use levels, as these seem to be inheritted through the merge
if (length(table(a[, myfactor])[table(a[, myfactor])!=0]) == 1) {
cat("**Only one level of ", myfactor, "**\n\n", sep="")
sink()
next
}
cat("#### Survival\n\n")
cat(", ")\n\n", sep="")
cat("<pre>\n")
print(chisq.test(as.factor(a$died), as.factor(a[,myfactor])))
cat("</pre>\n\n")
cat("#### Growth\n\n")
cat(", ")\n\n", sep="")
cat("<pre>\n");
print(anova(lm(a$htgr ~ as.factor(a[,myfactor]))))
cat("</pre>\n\n")
sink()
png(paste(year,"_surv_", myfactor, ".png", sep=""),
height = 400, width = (400 + length(levels(as.factor(a[,myfactor])))*20))
ordSP(a, myfactor, paste("Effect of", myfactor, "on seedling survival,", year,
"plantings"))
dev.off()
png(paste(year,"_RGR_", myfactor, ".png", sep=""),
height = 400, width = (400 + length(levels(as.factor(a[,myfactor])))*20))
ordBP(a, myfactor, paste("Effect of", myfactor, "on seedling growth,", year,
"plantings"))
dev.off()
}
sink(file="anal.md", append=T)
cat("----\n\n")
sink()
}
sink(file="anal.md", append=T)
cat("## Species list\n\n<pre>\n")
print(tax)
cat("</pre>\n\n")
sink()
# because multimarkdown has a bug when there are lots of tags:
system("gawk '{if ($0 ~ /<\\/pre>/) v = 0; else if(v) print \" \" $0; else if ($0 ~ /<pre>/) v=1; else print $0}' anal.md > anal.tmp; mv -f anal.tmp anal.md")
# stop()
# ################### NOTES
# just the common ones:
# common <- data.frame(table(a$code))[order(data.frame(table(a$code))[,2],
# decreasing =T),]
# common <- as.vector(common[1:10,1])
# b <- a
# b <- b[!is.na(match(b$code, common)), ,drop=T]
# png(paste(year, "_RGR_spp_common.png", sep=""), height=500, width=700)
# ordBP(b, paste("Relative Growth Rate of seedlings planted in", year, ", between months", mon[1], "and",mon[2]))
# dev.off()
# subsetting:
# remember drop=T, e.g.
# a <- meas[ meas$plantYear == 2010, , drop=T]
# check for factor removal: levels(a$plotCode)
|
library(shiny)
library(shinydashboard)
library(ggthemes)
library(shinythemes)
library(sf)
library(tidyverse)
library(viridis)
load("covid_state_data.rda")
load("mapdaten.rda")
ui<-fluidPage(titlePanel(
h1("Corona Dashboard", align = "center")), theme = shinytheme("cerulean"),
tags$footer("von Philipp Conrad", align = "center"),
fluidRow(
column(4, box(width =12,plotOutput("distPlot"),title ="Die Bundesländer im Vergleich",solidHeader = T)),
column(4, box(width =12,plotOutput("one"),title = "Entwicklung im ausgewählten Bundesland")),
column(4, box(width =12,plotOutput("two"), title = "Aktuelle Deutschlandkarte"))),
hr(),
fluidRow(
column(4, selectInput("x", "X-Axis", choices = names(covid_state_data),selected = "date"),
selectInput("y","Y-Axis", choices = names(covid_state_data), selected = "cases_vs_population")),
column(4, selectInput("z",label = "Bundesland", choices = unique(covid_state_data$state), selected = "Bayern")
, selectInput("bot","Y-Axis", choices = names(covid_state_data), selected = "totalcases")),
column(4,selectInput("s",label="Variabel", choices=names(covid_state_data), selected ="totalcases"),
selectizeInput("t", label = "Datum", choices = unique(covid_state_data$date), selected = "2020-10-24", multiple = FALSE,
options = NULL))
),
hr(),
fluidRow(
DT::dataTableOutput("mytable")
),
tags$footer("von Philipp Conrad", align = "right")
)
server <- function(input, output) {
output$distPlot <- renderPlot({
ggplot(covid_state_data,aes_string(x = input$x,y=input$y))+
geom_point(aes(color =state))+
labs(color="")+
theme_minimal()
})
output$one <- renderPlot({
covid_state_data %>%
filter(state == input$z) %>%
ggplot(covid_state_data,mapping=aes(x=date))+
geom_point(aes_string(y=input$bot))+
labs(color="")+
theme_minimal()+
theme(legend.position = "none")
})
output$two <- renderPlot({
mapdaten %>%
filter(date == input$t) %>%
ggplot(aes_string(fill = input$s)) +
geom_sf()+
scale_fill_viridis()+
theme_map()+
theme(legend.background = element_blank(), legend.position = "right")
})
output$mytable = DT::renderDataTable({
covid_state_data})
}
# Run the application
shinyApp(ui = ui, server = server)
| /app.R | no_license | Baumol/Shiny-Covid-Dashboard | R | false | false | 2,668 | r |
library(shiny)
library(shinydashboard)
library(ggthemes)
library(shinythemes)
library(sf)
library(tidyverse)
library(viridis)
load("covid_state_data.rda")
load("mapdaten.rda")
ui<-fluidPage(titlePanel(
h1("Corona Dashboard", align = "center")), theme = shinytheme("cerulean"),
tags$footer("von Philipp Conrad", align = "center"),
fluidRow(
column(4, box(width =12,plotOutput("distPlot"),title ="Die Bundesländer im Vergleich",solidHeader = T)),
column(4, box(width =12,plotOutput("one"),title = "Entwicklung im ausgewählten Bundesland")),
column(4, box(width =12,plotOutput("two"), title = "Aktuelle Deutschlandkarte"))),
hr(),
fluidRow(
column(4, selectInput("x", "X-Axis", choices = names(covid_state_data),selected = "date"),
selectInput("y","Y-Axis", choices = names(covid_state_data), selected = "cases_vs_population")),
column(4, selectInput("z",label = "Bundesland", choices = unique(covid_state_data$state), selected = "Bayern")
, selectInput("bot","Y-Axis", choices = names(covid_state_data), selected = "totalcases")),
column(4,selectInput("s",label="Variabel", choices=names(covid_state_data), selected ="totalcases"),
selectizeInput("t", label = "Datum", choices = unique(covid_state_data$date), selected = "2020-10-24", multiple = FALSE,
options = NULL))
),
hr(),
fluidRow(
DT::dataTableOutput("mytable")
),
tags$footer("von Philipp Conrad", align = "right")
)
server <- function(input, output) {
output$distPlot <- renderPlot({
ggplot(covid_state_data,aes_string(x = input$x,y=input$y))+
geom_point(aes(color =state))+
labs(color="")+
theme_minimal()
})
output$one <- renderPlot({
covid_state_data %>%
filter(state == input$z) %>%
ggplot(covid_state_data,mapping=aes(x=date))+
geom_point(aes_string(y=input$bot))+
labs(color="")+
theme_minimal()+
theme(legend.position = "none")
})
output$two <- renderPlot({
mapdaten %>%
filter(date == input$t) %>%
ggplot(aes_string(fill = input$s)) +
geom_sf()+
scale_fill_viridis()+
theme_map()+
theme(legend.background = element_blank(), legend.position = "right")
})
output$mytable = DT::renderDataTable({
covid_state_data})
}
# Run the application
shinyApp(ui = ui, server = server)
|
#' K nearest neighbors alogrithm
#'
#'A package that computes the k nearest neighbors
#'This funciton calls the c interface in the package to calculate the k nearest neighbors for a given set of data
#'
#' @param x.mat [n x p] matrix of features
#' @param y.vec label column vector [n]
#' @param testx.vec numeric feature vector for test [p]
#' @param max.neighbors max number of neighbors
#'
#' @return numeric vector of size max.neighbors with predicitons from one to max.neighbors
#' @export
#'
#' @examples
#' data(zip.train, package="ElemStatLearn")
#' i01 <- which(zip.train[,1] %in% c(0,1))
#' train.i <- i01[1:5]
#' test.i <- i01[6]
#' x <- zip.train[train.i, -1]
#' y <- zip.train[train.i, 1]
#' testx <- zip.train[test.i, -1]
#' knn(x, y, testx, 3)
#' zip.train[test.i, 1]
knn <- function(x.mat, y.vec, testx.vec, max.neighbors){
result.list <- .C("knn_interface", as.double(x.mat), as.double(y.vec), as.double(testx.vec),
as.integer(nrow(x.mat)), as.integer(ncol(x.mat)), as.integer(max.neighbors),
predicitons=double(max.neighbors), PACKAGE="nearestneighbors")
result.list$predicitons
}
0
#' Cross Validation Function
#'
#'A package that computes the k nearest neighbors
#'This funciton calls the c interface in the package to calculate the k nearest neighbors for a given set of data
#'
#' @param x.mat [n x p] matrix of features
#' @param y.vec label column vector [n]
#' @param max.neighbors max number of neighbors
#' @param fold.vec empty vector
#' @param n.folds number folds
#'
#'
#' @return numeric vector of size max.neighbors with predicitons from one to max.neighbors
#' @export
#'
#' @examples
#' library(nearestneighbors)
#' data(ozone, package="ElemStatLearn")
#' head(ozone)
#' x.mat <- as.matrix(ozone[,-1])
#' y.vec <- ozone[,1]
#' max.neighbors <- 4
#' n.folds <- 20
#' fold.vec <- sample(rep(1:n.folds), length(y.vec),TRUE)
#' nearestneighbors::NNLearnCV(x.mat=x.mat, y.vec=y.vec, max.neighbors=max.neighbors, fold.vec=fold.vec, n.folds=n.folds)
NNLearnCV <- function(x.mat, y.vec, max.neighbors=30, fold.vec=NULL, n.folds=5){
fold.vec <- sample(rep(1:n.folds, l=nrow(X.mat)))
for(fold.i in seq_along(unique.folds)){
for(prediction.set.name in c("train", "validation")){
pred.mat <- NN1toKmaxPredict(
train.features, train.labels,
prediction.set.features, max.neighbors)
loss.mat <- if(labels.all.01){
ifelse(pred.mat>0.5, 1, 0) != y.vec #zero-one loss for binary classification.
}else{
(pred.mat-y.vec)^2 #square loss for regression.
}
train.or.validation.loss.mat[, fold.i] <- colMeans(loss.mat)
}
}
}
| /Project1/R/Predict1toMaxNeighbors.R | no_license | Jeeppinger/MachineLearningProjects | R | false | false | 2,692 | r | #' K nearest neighbors alogrithm
#'
#'A package that computes the k nearest neighbors
#'This funciton calls the c interface in the package to calculate the k nearest neighbors for a given set of data
#'
#' @param x.mat [n x p] matrix of features
#' @param y.vec label column vector [n]
#' @param testx.vec numeric feature vector for test [p]
#' @param max.neighbors max number of neighbors
#'
#' @return numeric vector of size max.neighbors with predicitons from one to max.neighbors
#' @export
#'
#' @examples
#' data(zip.train, package="ElemStatLearn")
#' i01 <- which(zip.train[,1] %in% c(0,1))
#' train.i <- i01[1:5]
#' test.i <- i01[6]
#' x <- zip.train[train.i, -1]
#' y <- zip.train[train.i, 1]
#' testx <- zip.train[test.i, -1]
#' knn(x, y, testx, 3)
#' zip.train[test.i, 1]
knn <- function(x.mat, y.vec, testx.vec, max.neighbors){
result.list <- .C("knn_interface", as.double(x.mat), as.double(y.vec), as.double(testx.vec),
as.integer(nrow(x.mat)), as.integer(ncol(x.mat)), as.integer(max.neighbors),
predicitons=double(max.neighbors), PACKAGE="nearestneighbors")
result.list$predicitons
}
0
#' Cross Validation Function
#'
#'A package that computes the k nearest neighbors
#'This funciton calls the c interface in the package to calculate the k nearest neighbors for a given set of data
#'
#' @param x.mat [n x p] matrix of features
#' @param y.vec label column vector [n]
#' @param max.neighbors max number of neighbors
#' @param fold.vec empty vector
#' @param n.folds number folds
#'
#'
#' @return numeric vector of size max.neighbors with predicitons from one to max.neighbors
#' @export
#'
#' @examples
#' library(nearestneighbors)
#' data(ozone, package="ElemStatLearn")
#' head(ozone)
#' x.mat <- as.matrix(ozone[,-1])
#' y.vec <- ozone[,1]
#' max.neighbors <- 4
#' n.folds <- 20
#' fold.vec <- sample(rep(1:n.folds), length(y.vec),TRUE)
#' nearestneighbors::NNLearnCV(x.mat=x.mat, y.vec=y.vec, max.neighbors=max.neighbors, fold.vec=fold.vec, n.folds=n.folds)
NNLearnCV <- function(x.mat, y.vec, max.neighbors=30, fold.vec=NULL, n.folds=5){
fold.vec <- sample(rep(1:n.folds, l=nrow(X.mat)))
for(fold.i in seq_along(unique.folds)){
for(prediction.set.name in c("train", "validation")){
pred.mat <- NN1toKmaxPredict(
train.features, train.labels,
prediction.set.features, max.neighbors)
loss.mat <- if(labels.all.01){
ifelse(pred.mat>0.5, 1, 0) != y.vec #zero-one loss for binary classification.
}else{
(pred.mat-y.vec)^2 #square loss for regression.
}
train.or.validation.loss.mat[, fold.i] <- colMeans(loss.mat)
}
}
}
|
library(vegan)
library(tidyr)
library(dplyr)
library(ggplot2)
library(colorblindr)
library(tidyverse)
library(tikzDevice)
library(xtable)
library(reshape2)
library(scales)
library(dataMaid)
library(nortest)
# Set working directory to source file location
if(Sys.getenv("RSTUDIO") == "1"){
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
}else{
setwd(utils::getSrcDirectory()[1])
}
# Load data
load("Tables.RData")
##### Statistical analyses using all.pipelines.detailed #####
### ALPHA ###
# Remove columns Reservoir, Season and Pipeline
statistic = all.pipelines.detailed[,! colnames(all.pipelines.detailed) %in% c("Reservoir", "Season", "Pipeline")]
# Alpha diversity: shannon index
all.pipelines.detailed$Shannon = diversity(statistic)
# Create column for Reservoir, Season and Pipeline together
all.pipelines.detailed = unite(all.pipelines.detailed, Reservoir_Season_Pipeline, c(Reservoir, Season, Pipeline), remove=F, sep=" ")
# Create column for Reservoir and Season together
all.pipelines.detailed = unite(all.pipelines.detailed, Reservoir_Season, c(Reservoir, Season), remove=F, sep=" ")
# Create column for Reservoir and Pipeline together
all.pipelines.detailed = unite(all.pipelines.detailed, Reservoir_Pipeline, c(Reservoir, Pipeline), remove=F, sep=" ")
# Create column for Season and Pipeline together
all.pipelines.detailed = unite(all.pipelines.detailed, Season_Pipeline, c(Season, Pipeline), remove=F, sep=" ")
# Create column with unique name
all.pipelines.detailed$All = "All"
### Shannon index ###
# Export ggplot to Latex
tikz(file = "Alpha_diversity_shannon_PRS.tex", width = 6, height = 3)
# Plot Reservoir_Season_Pipeline
ggplot(all.pipelines.detailed, aes(x = Pipeline, y = Shannon)) +
geom_point(aes(fill = Pipeline), shape = 21, size = 3) +
xlab(NULL) +
ylab("Shannon index") +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.text.y = element_text(size = 6, color = "black"),
axis.title = element_text(size = 10, face = "plain"),
legend.text = element_text(size = 6, color = "black"),
legend.title = element_text(size = 8, face = "plain"),
legend.margin = margin(t = 0, unit='cm'),
legend.key = element_rect(fill = NA, color = NA),
strip.text.x = element_text(size = 6, face = "plain", color = "black"),
strip.background = element_blank(),
#plot.margin = margin(10, 10, 10, 50),
plot.title = element_text(hjust = 0.5, size = 20, face = "bold",
margin = margin(10, 0, 10, 0)),
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(colour = "lightgray", size = 0.3),
panel.grid.minor = element_line(colour = "lightgray", size = 0.1),
panel.border = element_rect(colour = "black", fill = NA, size = 0.5)
) + facet_grid(~Reservoir_Season, switch = "x") +
scale_fill_discrete(name = "Pipelines")
dev.off()
# Plot All
p = ggplot(all.pipelines.detailed, aes(y = Shannon)) +
stat_boxplot(geom = "errorbar", lwd = 1, position = "dodge", show.legend = F, aes(color = All)) +
geom_boxplot(coef = 1.5, show.legend = F, alpha = 0.5, outlier.alpha = 1, varwidth = T, lwd = 1, aes(color = All, fill = All)) +
xlab(NULL) +
ylab("Shannon index") +
theme(axis.text.x = element_blank(),
axis.text.y = element_text(size = 6, color = "black"),
axis.title = element_text(size = 10, face = "plain"),
axis.ticks.x = element_blank(),
legend.text = element_text(size = 6, color = "black"),
legend.title = element_text(size = 8, face = "plain"),
legend.margin = margin(t = 0, unit='cm'),
legend.key = element_rect(fill = NA, color = NA),
strip.text.x = element_text(size = 8, face = "plain", color = "black"),
strip.background = element_blank(),
#plot.margin = margin(10, 10, 10, 50),
plot.title = element_text(hjust = 0.5, size = 20, face = "bold",
margin = margin(10, 0, 10, 0)),
panel.background = element_rect(fill = 'lightgray'),
panel.grid.major = element_line(colour = "white", size = 0.3),
panel.grid.minor = element_line(colour = "white", size = 0.1),
panel.border = element_rect(colour = "black", fill = NA, size = 0.5)
) + facet_grid(~All, switch="x")
# Calculate median and quartiles
ggplot_build(p)$data
min(all.pipelines.detailed$Shannon)
max(all.pipelines.detailed$Shannon)
median(all.pipelines.detailed$Shannon)
quartiles(all.pipelines.detailed$Shannon)
all.pipelines.detailed[all.pipelines.detailed$Shannon == min(all.pipelines.detailed$Shannon),]["Shannon"]
all.pipelines.detailed[all.pipelines.detailed$Shannon == max(all.pipelines.detailed$Shannon),]["Shannon"]
## Pipelines ##
# Plot Pipeline
p = ggplot(all.pipelines.detailed, aes(y = Shannon)) +
stat_boxplot(geom = "errorbar", lwd = 1, position = "dodge", show.legend = F, aes(color = Pipeline)) +
geom_boxplot(coef = 1.5, show.legend = F, alpha = 0.5, outlier.alpha = 1, varwidth = T, lwd = 1, aes(color = Pipeline, fill = Pipeline)) +
xlab(NULL) +
ylab("Shannon index") +
theme(axis.text.x = element_blank(),
axis.text.y = element_text(size = 6, color = "black"),
axis.title = element_text(size = 10, face = "plain"),
axis.ticks.x = element_blank(),
legend.text = element_text(size = 6, color = "black"),
legend.title = element_text(size = 8, face = "plain"),
legend.margin = margin(t = 0, unit='cm'),
legend.key = element_rect(fill = NA, color = NA),
strip.text.x = element_text(size = 8, face = "plain", color = "black"),
strip.background = element_blank(),
#plot.margin = margin(10, 10, 10, 50),
plot.title = element_text(hjust = 0.5, size = 20, face = "bold",
margin = margin(10, 0, 10, 0)),
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(colour = "lightgray", size = 0.3),
panel.grid.minor = element_line(colour = "lightgray", size = 0.1),
panel.border = element_rect(colour = "black", fill = NA, size = 0.5)
) + facet_grid(~Pipeline, switch="x")
# Export ggplot to Latex
tikz(file = "Alpha_diversity_shannon_pipelines.tex", width = 6, height = 3)
p
dev.off()
# Get min, max, median, and quartiles for each pipeline
ggplot_build(p)$data
# Get pipelines
pipelines = unique(all.pipelines.detailed$Pipeline)
for (pipeline in pipelines) {
print(pipeline)
# Extract pipeline
pipeline = all.pipelines.detailed[all.pipelines.detailed$Pipeline == pipeline,]
# Get min, max, median, and quartiles for each pipeline
print(round(min(pipeline$Shannon), digits = 3))
print(round(max(pipeline$Shannon), digits = 3))
print(median(pipeline$Shannon))
print(quartiles(pipeline$Shannon))
print(pipeline[pipeline$Shannon == min(pipeline$Shannon),]["Shannon"])
print(pipeline[pipeline$Shannon == max(pipeline$Shannon),]["Shannon"])
print("--------------------------------------------------------------")
print("")
}
## Statistical analyses ##
# fit linear models
mod.Shannon = aov(Shannon~Pipeline, data=all.pipelines.detailed)
# ANOVA
anova.test = anova(mod.Shannon)
anova.test
# Tukey
tukey.test = TukeyHSD(mod.Shannon)
tukey.test
# Check if p-value < 0.05
tukey.test = as.data.frame(tukey.test[["Pipeline"]])
tukey.test[tukey.test$`p adj` < 0.05,]
# Export as latex tables
print(xtable(anova.test, digits = c(0, 0, 4, 4, 4, 4)), booktabs=TRUE, file = "Alpha_Diversity_Shannon_ANOVA_Pipelines.tex")
print(xtable(tukey.test$Pipeline, digits = c(0, 4, 4, 4, 4)), booktabs=TRUE, file = "Alpha_Diversity_Shannon_Tukey_Pipelines.tex")
## Reservoirs ##
# Plot Reservoir
p = ggplot(all.pipelines.detailed, aes(y = Shannon)) +
stat_boxplot(geom = "errorbar", lwd = 1, position = "dodge", show.legend = F, aes(color = Reservoir)) +
geom_boxplot(coef = 1.5, show.legend = F, alpha = 0.5, outlier.alpha = 1, varwidth = T, lwd = 1, aes(color = Reservoir, fill = Reservoir)) +
xlab(NULL) +
ylab("Shannon index") +
theme(axis.text.x = element_blank(),
axis.text.y = element_text(size = 6, color = "black"),
axis.title = element_text(size = 10, face = "plain"),
axis.ticks.x = element_blank(),
legend.text = element_text(size = 6, color = "black"),
legend.title = element_text(size = 8, face = "plain"),
legend.margin = margin(t = 0, unit='cm'),
legend.key = element_rect(fill = NA, color = NA),
strip.text.x = element_text(size = 8, face = "plain", color = "black"),
strip.background = element_blank(),
#plot.margin = margin(10, 10, 10, 50),
plot.title = element_text(hjust = 0.5, size = 20, face = "bold",
margin = margin(10, 0, 10, 0)),
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(colour = "lightgray", size = 0.3),
panel.grid.minor = element_line(colour = "lightgray", size = 0.1),
panel.border = element_rect(colour = "black", fill = NA, size = 0.5)
) + facet_grid(~Reservoir, switch="x")
# Export ggplot to Latex
tikz(file = "Alpha_diversity_shannon_reservoirs.tex", width = 6, height = 3)
p
dev.off()
# Get min, max, median, and quartiles for each reservoir
ggplot_build(p)$data
# Get reservoirs
reservoirs = unique(all.pipelines.detailed$Reservoir)
for (reservoir in reservoirs) {
print(reservoir)
# Extract reservoir
reservoir = all.pipelines.detailed[all.pipelines.detailed$Reservoir == reservoir,]
# Get min, max, median, and quartiles for each reservoir
print(round(min(reservoir$Shannon), digits = 3))
print(round(max(reservoir$Shannon), digits = 3))
print(median(reservoir$Shannon))
print(quartiles(reservoir$Shannon))
print(reservoir[reservoir$Shannon == min(reservoir$Shannon),]["Shannon"])
print(reservoir[reservoir$Shannon == max(reservoir$Shannon),]["Shannon"])
print("--------------------------------------------------------------")
print("")
}
## Statistical analyses ##
# fit linear models
mod.Shannon = aov(Shannon~Reservoir, data=all.pipelines.detailed)
# ANOVA
anova.test = anova(mod.Shannon)
anova.test
# Tukey
tukey.test = TukeyHSD(mod.Shannon)
tukey.test
# Check if p-value < 0.05
tukey.test = as.data.frame(tukey.test[["Reservoir"]])
tukey.test[tukey.test$`p adj` < 0.05,]
# Export as latex tables
print(xtable(anova.test, digits = c(0, 0, 4, 4, 4, 4)), booktabs=TRUE, file = "Alpha_Diversity_Shannon_ANOVA_Reservoirs.tex")
## Seasons ##
# Plot Season
p = ggplot(all.pipelines.detailed, aes(y = Shannon)) +
stat_boxplot(geom = "errorbar", lwd = 1, position = "dodge", show.legend = F, aes(color = Season)) +
geom_boxplot(coef = 1.5, show.legend = F, alpha = 0.5, outlier.alpha = 1, varwidth = T, lwd = 1, aes(color = Season, fill = Season)) +
xlab(NULL) +
ylab("Shannon index") +
theme(axis.text.x = element_blank(),
axis.text.y = element_text(size = 6, color = "black"),
axis.title = element_text(size = 10, face = "plain"),
axis.ticks.x = element_blank(),
legend.text = element_text(size = 6, color = "black"),
legend.title = element_text(size = 8, face = "plain"),
legend.margin = margin(t = 0, unit='cm'),
legend.key = element_rect(fill = NA, color = NA),
strip.text.x = element_text(size = 8, face = "plain", color = "black"),
strip.background = element_blank(),
#plot.margin = margin(10, 10, 10, 50),
plot.title = element_text(hjust = 0.5, size = 20, face = "bold",
margin = margin(10, 0, 10, 0)),
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(colour = "lightgray", size = 0.3),
panel.grid.minor = element_line(colour = "lightgray", size = 0.1),
panel.border = element_rect(colour = "black", fill = NA, size = 0.5)
) + facet_grid(~Season, switch="x")
# Export ggplot to Latex
tikz(file = "Alpha_diversity_shannon_seasons.tex", width = 6, height = 3)
p
dev.off()
# Get min, max, median, and quartiles for each season
ggplot_build(p)$data
# Get seasons
seasons = unique(all.pipelines.detailed$Season)
for (season in seasons) {
print(season)
# Extract season
season = all.pipelines.detailed[all.pipelines.detailed$Season == season,]
# Get min, max, median, and quartiles for each season
print(round(min(season$Shannon), digits = 3))
print(round(max(season$Shannon), digits = 3))
print(median(season$Shannon))
print(quartiles(season$Shannon))
print(season[season$Shannon == min(season$Shannon),]["Shannon"])
print(season[season$Shannon == max(season$Shannon),]["Shannon"])
print("--------------------------------------------------------------")
print("")
}
# Fences
quartiles = quartiles(all.pipelines.detailed[all.pipelines.detailed$Season == "Summer",]$Shannon)
upperq = round(quartiles$value[["75%"]])
lowerq = round(quartiles$value[["25%"]])
iqr = upperq - lowerq
upper.fence = upperq + (1.5 * iqr)
lower.fence = lowerq - (1.5 * iqr)
## Statistical analyses ##
# fit linear models
mod.Shannon = aov(Shannon~Season, data=all.pipelines.detailed)
# ANOVA
anova.test = anova(mod.Shannon)
anova.test
# T-test
t.test(Shannon~Season, data=all.pipelines.detailed, var.equal = TRUE)
# Tukey
tukey.test = TukeyHSD(mod.Shannon)
tukey.test
# Check if p-value < 0.05
tukey.test = as.data.frame(tukey.test[["Season"]])
tukey.test[tukey.test$`p adj` < 0.05,]
# Export as latex tables
print(xtable(anova.test, digits = c(0, 0, 4, 4, 4, 4)), booktabs=TRUE, file = "Alpha_Diversity_Shannon_ANOVA_Seasons.tex") | /Codes/Alpha_Diversity_Shanonn.R | no_license | RomuloAS/eDNA_metabarcoding_pipelines_comparison | R | false | false | 13,695 | r | library(vegan)
library(tidyr)
library(dplyr)
library(ggplot2)
library(colorblindr)
library(tidyverse)
library(tikzDevice)
library(xtable)
library(reshape2)
library(scales)
library(dataMaid)
library(nortest)
# Set working directory to source file location
if(Sys.getenv("RSTUDIO") == "1"){
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
}else{
setwd(utils::getSrcDirectory()[1])
}
# Load data
load("Tables.RData")
##### Statistical analyses using all.pipelines.detailed #####
### ALPHA ###
# Remove columns Reservoir, Season and Pipeline
statistic = all.pipelines.detailed[,! colnames(all.pipelines.detailed) %in% c("Reservoir", "Season", "Pipeline")]
# Alpha diversity: shannon index
all.pipelines.detailed$Shannon = diversity(statistic)
# Create column for Reservoir, Season and Pipeline together
all.pipelines.detailed = unite(all.pipelines.detailed, Reservoir_Season_Pipeline, c(Reservoir, Season, Pipeline), remove=F, sep=" ")
# Create column for Reservoir and Season together
all.pipelines.detailed = unite(all.pipelines.detailed, Reservoir_Season, c(Reservoir, Season), remove=F, sep=" ")
# Create column for Reservoir and Pipeline together
all.pipelines.detailed = unite(all.pipelines.detailed, Reservoir_Pipeline, c(Reservoir, Pipeline), remove=F, sep=" ")
# Create column for Season and Pipeline together
all.pipelines.detailed = unite(all.pipelines.detailed, Season_Pipeline, c(Season, Pipeline), remove=F, sep=" ")
# Create column with unique name
all.pipelines.detailed$All = "All"
### Shannon index ###
# Export ggplot to Latex
tikz(file = "Alpha_diversity_shannon_PRS.tex", width = 6, height = 3)
# Plot Reservoir_Season_Pipeline
ggplot(all.pipelines.detailed, aes(x = Pipeline, y = Shannon)) +
geom_point(aes(fill = Pipeline), shape = 21, size = 3) +
xlab(NULL) +
ylab("Shannon index") +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.text.y = element_text(size = 6, color = "black"),
axis.title = element_text(size = 10, face = "plain"),
legend.text = element_text(size = 6, color = "black"),
legend.title = element_text(size = 8, face = "plain"),
legend.margin = margin(t = 0, unit='cm'),
legend.key = element_rect(fill = NA, color = NA),
strip.text.x = element_text(size = 6, face = "plain", color = "black"),
strip.background = element_blank(),
#plot.margin = margin(10, 10, 10, 50),
plot.title = element_text(hjust = 0.5, size = 20, face = "bold",
margin = margin(10, 0, 10, 0)),
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(colour = "lightgray", size = 0.3),
panel.grid.minor = element_line(colour = "lightgray", size = 0.1),
panel.border = element_rect(colour = "black", fill = NA, size = 0.5)
) + facet_grid(~Reservoir_Season, switch = "x") +
scale_fill_discrete(name = "Pipelines")
dev.off()
# Plot All
p = ggplot(all.pipelines.detailed, aes(y = Shannon)) +
stat_boxplot(geom = "errorbar", lwd = 1, position = "dodge", show.legend = F, aes(color = All)) +
geom_boxplot(coef = 1.5, show.legend = F, alpha = 0.5, outlier.alpha = 1, varwidth = T, lwd = 1, aes(color = All, fill = All)) +
xlab(NULL) +
ylab("Shannon index") +
theme(axis.text.x = element_blank(),
axis.text.y = element_text(size = 6, color = "black"),
axis.title = element_text(size = 10, face = "plain"),
axis.ticks.x = element_blank(),
legend.text = element_text(size = 6, color = "black"),
legend.title = element_text(size = 8, face = "plain"),
legend.margin = margin(t = 0, unit='cm'),
legend.key = element_rect(fill = NA, color = NA),
strip.text.x = element_text(size = 8, face = "plain", color = "black"),
strip.background = element_blank(),
#plot.margin = margin(10, 10, 10, 50),
plot.title = element_text(hjust = 0.5, size = 20, face = "bold",
margin = margin(10, 0, 10, 0)),
panel.background = element_rect(fill = 'lightgray'),
panel.grid.major = element_line(colour = "white", size = 0.3),
panel.grid.minor = element_line(colour = "white", size = 0.1),
panel.border = element_rect(colour = "black", fill = NA, size = 0.5)
) + facet_grid(~All, switch="x")
# Calculate median and quartiles
ggplot_build(p)$data
min(all.pipelines.detailed$Shannon)
max(all.pipelines.detailed$Shannon)
median(all.pipelines.detailed$Shannon)
quartiles(all.pipelines.detailed$Shannon)
all.pipelines.detailed[all.pipelines.detailed$Shannon == min(all.pipelines.detailed$Shannon),]["Shannon"]
all.pipelines.detailed[all.pipelines.detailed$Shannon == max(all.pipelines.detailed$Shannon),]["Shannon"]
## Pipelines ##
# Plot Pipeline
p = ggplot(all.pipelines.detailed, aes(y = Shannon)) +
stat_boxplot(geom = "errorbar", lwd = 1, position = "dodge", show.legend = F, aes(color = Pipeline)) +
geom_boxplot(coef = 1.5, show.legend = F, alpha = 0.5, outlier.alpha = 1, varwidth = T, lwd = 1, aes(color = Pipeline, fill = Pipeline)) +
xlab(NULL) +
ylab("Shannon index") +
theme(axis.text.x = element_blank(),
axis.text.y = element_text(size = 6, color = "black"),
axis.title = element_text(size = 10, face = "plain"),
axis.ticks.x = element_blank(),
legend.text = element_text(size = 6, color = "black"),
legend.title = element_text(size = 8, face = "plain"),
legend.margin = margin(t = 0, unit='cm'),
legend.key = element_rect(fill = NA, color = NA),
strip.text.x = element_text(size = 8, face = "plain", color = "black"),
strip.background = element_blank(),
#plot.margin = margin(10, 10, 10, 50),
plot.title = element_text(hjust = 0.5, size = 20, face = "bold",
margin = margin(10, 0, 10, 0)),
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(colour = "lightgray", size = 0.3),
panel.grid.minor = element_line(colour = "lightgray", size = 0.1),
panel.border = element_rect(colour = "black", fill = NA, size = 0.5)
) + facet_grid(~Pipeline, switch="x")
# Export ggplot to Latex
tikz(file = "Alpha_diversity_shannon_pipelines.tex", width = 6, height = 3)
p
dev.off()
# Get min, max, median, and quartiles for each pipeline
ggplot_build(p)$data
# Get pipelines
pipelines = unique(all.pipelines.detailed$Pipeline)
for (pipeline in pipelines) {
print(pipeline)
# Extract pipeline
pipeline = all.pipelines.detailed[all.pipelines.detailed$Pipeline == pipeline,]
# Get min, max, median, and quartiles for each pipeline
print(round(min(pipeline$Shannon), digits = 3))
print(round(max(pipeline$Shannon), digits = 3))
print(median(pipeline$Shannon))
print(quartiles(pipeline$Shannon))
print(pipeline[pipeline$Shannon == min(pipeline$Shannon),]["Shannon"])
print(pipeline[pipeline$Shannon == max(pipeline$Shannon),]["Shannon"])
print("--------------------------------------------------------------")
print("")
}
## Statistical analyses ##
# fit linear models
mod.Shannon = aov(Shannon~Pipeline, data=all.pipelines.detailed)
# ANOVA
anova.test = anova(mod.Shannon)
anova.test
# Tukey
tukey.test = TukeyHSD(mod.Shannon)
tukey.test
# Check if p-value < 0.05
tukey.test = as.data.frame(tukey.test[["Pipeline"]])
tukey.test[tukey.test$`p adj` < 0.05,]
# Export as latex tables
print(xtable(anova.test, digits = c(0, 0, 4, 4, 4, 4)), booktabs=TRUE, file = "Alpha_Diversity_Shannon_ANOVA_Pipelines.tex")
print(xtable(tukey.test$Pipeline, digits = c(0, 4, 4, 4, 4)), booktabs=TRUE, file = "Alpha_Diversity_Shannon_Tukey_Pipelines.tex")
## Reservoirs ##
# Plot Reservoir
p = ggplot(all.pipelines.detailed, aes(y = Shannon)) +
stat_boxplot(geom = "errorbar", lwd = 1, position = "dodge", show.legend = F, aes(color = Reservoir)) +
geom_boxplot(coef = 1.5, show.legend = F, alpha = 0.5, outlier.alpha = 1, varwidth = T, lwd = 1, aes(color = Reservoir, fill = Reservoir)) +
xlab(NULL) +
ylab("Shannon index") +
theme(axis.text.x = element_blank(),
axis.text.y = element_text(size = 6, color = "black"),
axis.title = element_text(size = 10, face = "plain"),
axis.ticks.x = element_blank(),
legend.text = element_text(size = 6, color = "black"),
legend.title = element_text(size = 8, face = "plain"),
legend.margin = margin(t = 0, unit='cm'),
legend.key = element_rect(fill = NA, color = NA),
strip.text.x = element_text(size = 8, face = "plain", color = "black"),
strip.background = element_blank(),
#plot.margin = margin(10, 10, 10, 50),
plot.title = element_text(hjust = 0.5, size = 20, face = "bold",
margin = margin(10, 0, 10, 0)),
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(colour = "lightgray", size = 0.3),
panel.grid.minor = element_line(colour = "lightgray", size = 0.1),
panel.border = element_rect(colour = "black", fill = NA, size = 0.5)
) + facet_grid(~Reservoir, switch="x")
# Export ggplot to Latex
tikz(file = "Alpha_diversity_shannon_reservoirs.tex", width = 6, height = 3)
p
dev.off()
# Get min, max, median, and quartiles for each reservoir
ggplot_build(p)$data
# Get reservoirs
reservoirs = unique(all.pipelines.detailed$Reservoir)
for (reservoir in reservoirs) {
print(reservoir)
# Extract reservoir
reservoir = all.pipelines.detailed[all.pipelines.detailed$Reservoir == reservoir,]
# Get min, max, median, and quartiles for each reservoir
print(round(min(reservoir$Shannon), digits = 3))
print(round(max(reservoir$Shannon), digits = 3))
print(median(reservoir$Shannon))
print(quartiles(reservoir$Shannon))
print(reservoir[reservoir$Shannon == min(reservoir$Shannon),]["Shannon"])
print(reservoir[reservoir$Shannon == max(reservoir$Shannon),]["Shannon"])
print("--------------------------------------------------------------")
print("")
}
## Statistical analyses ##
# fit linear models
mod.Shannon = aov(Shannon~Reservoir, data=all.pipelines.detailed)
# ANOVA
anova.test = anova(mod.Shannon)
anova.test
# Tukey
tukey.test = TukeyHSD(mod.Shannon)
tukey.test
# Check if p-value < 0.05
tukey.test = as.data.frame(tukey.test[["Reservoir"]])
tukey.test[tukey.test$`p adj` < 0.05,]
# Export as latex tables
print(xtable(anova.test, digits = c(0, 0, 4, 4, 4, 4)), booktabs=TRUE, file = "Alpha_Diversity_Shannon_ANOVA_Reservoirs.tex")
## Seasons ##
# Plot Season
p = ggplot(all.pipelines.detailed, aes(y = Shannon)) +
stat_boxplot(geom = "errorbar", lwd = 1, position = "dodge", show.legend = F, aes(color = Season)) +
geom_boxplot(coef = 1.5, show.legend = F, alpha = 0.5, outlier.alpha = 1, varwidth = T, lwd = 1, aes(color = Season, fill = Season)) +
xlab(NULL) +
ylab("Shannon index") +
theme(axis.text.x = element_blank(),
axis.text.y = element_text(size = 6, color = "black"),
axis.title = element_text(size = 10, face = "plain"),
axis.ticks.x = element_blank(),
legend.text = element_text(size = 6, color = "black"),
legend.title = element_text(size = 8, face = "plain"),
legend.margin = margin(t = 0, unit='cm'),
legend.key = element_rect(fill = NA, color = NA),
strip.text.x = element_text(size = 8, face = "plain", color = "black"),
strip.background = element_blank(),
#plot.margin = margin(10, 10, 10, 50),
plot.title = element_text(hjust = 0.5, size = 20, face = "bold",
margin = margin(10, 0, 10, 0)),
panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(colour = "lightgray", size = 0.3),
panel.grid.minor = element_line(colour = "lightgray", size = 0.1),
panel.border = element_rect(colour = "black", fill = NA, size = 0.5)
) + facet_grid(~Season, switch="x")
# Export ggplot to Latex
tikz(file = "Alpha_diversity_shannon_seasons.tex", width = 6, height = 3)
p
dev.off()
# Get min, max, median, and quartiles for each season
ggplot_build(p)$data
# Get seasons
seasons = unique(all.pipelines.detailed$Season)
for (season in seasons) {
print(season)
# Extract season
season = all.pipelines.detailed[all.pipelines.detailed$Season == season,]
# Get min, max, median, and quartiles for each season
print(round(min(season$Shannon), digits = 3))
print(round(max(season$Shannon), digits = 3))
print(median(season$Shannon))
print(quartiles(season$Shannon))
print(season[season$Shannon == min(season$Shannon),]["Shannon"])
print(season[season$Shannon == max(season$Shannon),]["Shannon"])
print("--------------------------------------------------------------")
print("")
}
# Fences
quartiles = quartiles(all.pipelines.detailed[all.pipelines.detailed$Season == "Summer",]$Shannon)
upperq = round(quartiles$value[["75%"]])
lowerq = round(quartiles$value[["25%"]])
iqr = upperq - lowerq
upper.fence = upperq + (1.5 * iqr)
lower.fence = lowerq - (1.5 * iqr)
## Statistical analyses ##
# fit linear models
mod.Shannon = aov(Shannon~Season, data=all.pipelines.detailed)
# ANOVA
anova.test = anova(mod.Shannon)
anova.test
# T-test
t.test(Shannon~Season, data=all.pipelines.detailed, var.equal = TRUE)
# Tukey
tukey.test = TukeyHSD(mod.Shannon)
tukey.test
# Check if p-value < 0.05
tukey.test = as.data.frame(tukey.test[["Season"]])
tukey.test[tukey.test$`p adj` < 0.05,]
# Export as latex tables
print(xtable(anova.test, digits = c(0, 0, 4, 4, 4, 4)), booktabs=TRUE, file = "Alpha_Diversity_Shannon_ANOVA_Seasons.tex") |
#' \pkg{gmailr} makes gmail access easy.
#'
#' \code{gmailr} provides an interface to the gmail api \url{https://developers.google.com/gmail/api/}
#' @docType package
#' @name gmailr
#' @import httr
#' @import base64enc
NULL
#' Pipe statements
#'
#' Like dplyr and ggvis gmailr also uses the pipe function, \code{\%>\%} to turn
#' function composition into a series of imperative statements.
#'
#' @importFrom magrittr %>%
#' @name %>%
#' @rdname pipe
#' @export
#' @param lhs,rhs A visualisation and a function to apply to it
#' @examples
#' # Instead of
#' to(mime(), 'someone@@somewhere.com')
#' # you can write
#' mime() %>% to('someone@@somewhere.com')
NULL
the <- new.env(parent = emptyenv())
the$id <- "955034766742-huv7d1b1euegvk5vfmfq7v83u4rpdqb0.apps.googleusercontent.com"
the$secret <- "rpJPeEMnDOh7qNAVjUh_aKlO"
get_token <- function() {
if(!exists("token", the)){
gmail_auth()
}
the$token
}
#' Clear the current oauth token
#' @export
clear_token <- function() {
unlink(".httr-oauth")
the$token <- NULL
}
#' Setup oauth authentication for your gmail
#'
#' @param scope the authentication scope to use
#' @param id the client_id to use for authentication
#' @param secret the client secret to use for authentication
#' @param secret_file the secret json file downloaded from \url{https://console.cloud.google.com}
#' @seealso use_secret_file to set the default id and secret to a different
#' value than the default.
#' @export
#' @examples
#' \dontrun{
#' gmail_auth("compose")
#' }
gmail_auth <- function(scope=c("read_only", "modify", "compose", "full"),
id = the$id,
secret = the$secret,
secret_file = NULL) {
if(!is.null(secret_file)){
if (!(missing(id) && missing(secret))) {
stop("You should set either ", sQuote("secret_file"), " or ",
sQuote("id"), " and ", sQuote("secret"), ", not both",
call. = FALSE)
}
use_secret_file(secret_file)
# Use new ID and secret
id <- the$id
secret <- the$secret
}
myapp <- oauth_app("google", id, secret)
scope_urls <- c(read_only = "https://www.googleapis.com/auth/gmail.readonly",
modify = "https://www.googleapis.com/auth/gmail.modify",
compose = "https://www.googleapis.com/auth/gmail.compose",
full = "https://mail.google.com/")
scope <- scope_urls[match.arg(scope, several.ok=TRUE)]
the$token <- oauth2.0_token(oauth_endpoints("google"), myapp, scope = scope)
}
#' Use information from a secret file
#'
#' This function sets the default secret and client_id to those in the secret
#' file
#' @param filename the filename of the file
#' @export
use_secret_file <- function(filename) {
info <- jsonlite::fromJSON(readChar(filename, nchars=1e5))
the$secret <- info$installed$client_secret
the$id <- info$installed$client_id
}
#' Get the body text of a message or draft
#' @param x the object from which to retrieve the body
#' @param ... other parameters passed to methods
#' @export
#' @examples
#' \dontrun{
#' body(my_message)
#' body(my_draft)
#' }
body <- function(x, ...) UseMethod("body")
#' @export
body.gmail_message <- function(x, type="text/plain", collapse = FALSE, ...){
is_multipart <- !is.null(x$payload$parts)
if (is_multipart) {
if (is.null(type)){
good_parts <- TRUE
} else {
good_parts <- vapply(x$payload$parts, FUN.VALUE = logical(1),
function(part) {
any(
vapply(part$headers, FUN.VALUE = logical(1),
function(header) {
tolower(header$name) %==% "content-type" &&
grepl(type, header$value, ignore.case = TRUE)
})
)
})
}
res <-
lapply(x$payload$parts[good_parts],
function(x){
base64url_decode_to_char(x$body$data)
})
} else { # non_multipart
res <- base64url_decode_to_char(x$payload$body$data)
}
if (collapse){
res <- paste0(collapse = "\n", res)
}
res
}
#' @export
body.gmail_draft <- function(x, ...){ body.gmail_message(x$message, ...) }
#' Get the id of a gmailr object
#' @param x the object from which to retrieve the id
#' @param ... other parameters passed to methods
#' @export
#' @examples
#' \dontrun{
#' id(my_message)
#' id(my_draft)
#' }
id <- function(x, ...) UseMethod("id")
#' @export
id.gmail_message <- function(x, ...) { x$id }
#' @export
id.gmail_thread <- id.gmail_message
#' @export
id.gmail_draft <- id.gmail_message
#' @rdname id
#' @export
#' @inheritParams id
#' @param what the type of id to return
id.gmail_messages <- function(x, what=c("message_id", "thread_id"), ...){
what <- switch(match.arg(what),
message_id = "id",
thread_id = "threadId"
)
unlist(lapply(x, function(page) { vapply(page$messages, "[[", character(1), what) }))
}
#' @export
id.gmail_drafts <- function(x, what=c("draft_id", "message_id", "thread_id"), ...){
what <- switch(match.arg(what),
draft_id = return(
unlist(lapply(x, function(page) { vapply(page$drafts, "[[", character(1), "id")}))
),
message_id = "id",
thread_id = "threadId"
)
unlist(lapply(x, function(page) { vapply(page$drafts, function(x){ x$message[[what]] }, character(1)) }))
}
#' @export
id.gmail_threads <- function(x, ...){
unlist(lapply(x, function(page) { vapply(page$threads, "[[", character(1), "id") }))
}
#' Methods to get values from message or drafts
#' @param x the object from which to get or set the field
#' @param ... other parameters passed to methods
#' @rdname accessors
#' @export
to <- function(x, ...) UseMethod("to")
#' @export
to.gmail_message <- function(x, ...){ header_value(x, "To") }
#' @export
to.gmail_draft <- function(x, ...){ to.gmail_message(x$message, ...) }
#' @rdname accessors
#' @export
from <- function(x, ...) UseMethod("from")
#' @export
from.gmail_message <- function(x, ...){ header_value(x, "From") }
#' @export
from.gmail_draft <- from.gmail_message
#' @export
from <- function(x, ...) UseMethod("from")
#' @rdname accessors
#' @export
cc <- function(x, ...) UseMethod("cc")
#' @export
cc.gmail_message <- function(x, ...){ header_value(x, "Cc") }
#' @export
cc.gmail_draft <- function(x, ...){ from.gmail_message(x$message, ...) }
#' @rdname accessors
#' @export
bcc <- function(x, ...) UseMethod("bcc")
#' @export
bcc.gmail_message <- function(x, ...){ header_value(x, "Bcc") }
#' @export
bcc.gmail_draft <- function(x, ...){ from.gmail_message(x$message, ...) }
#' @rdname accessors
#' @export
date <- function(x, ...) UseMethod("date")
#' @export
date.default <- function(x, ...) { base::date() }
#' @export
date.gmail_message <- function(x, ...){ header_value(x, "Date") }
#' @export
date.gmail_draft <- function(x, ...){ date.gmail_message(x$message, ...) }
#' @rdname accessors
#' @export
subject <- function(x, ...) UseMethod("subject")
#' @export
subject.gmail_message <- function(x, ...) { header_value(x, "Subject") }
#' @export
subject.gmail_draft <- function(x, ...){ subject.gmail_message(x$message, ...) }
header_value <- function(x, name){
Find(function(header) identical(header$name, name), x$payload$headers)$value
}
#' @export
print.gmail_message <- function(x, ...){
to <- to(x)
from <- from(x)
date <- date(x)
subject <- subject(x)
id <- id(x)
cat(p(
crayon::bold("Id: "), id, "\n",
crayon::bold("To: "), to, "\n",
crayon::bold("From: "), from, "\n",
crayon::bold("Date: "), date, "\n",
crayon::bold("Subject: "), subject, "\n",
body(x, collapse = TRUE)), "\n")
}
#' @export
print.gmail_thread <- function(x, ...){
id <- id(x)
cat(strwrap(p(crayon::bold("Thread Id: "), id, "\n")), "\n")
}
#' @export
print.gmail_draft <- function(x, ...){
id <- id(x)
cat(strwrap(p(crayon::bold("Draft Id: "), id, "\n")), "\n")
print(x$message, ...)
}
#' @export
print.gmail_messages <- function(x, ...){
message_ids <- id(x, "message_id")
thread_ids <- id(x, "thread_id")
print(format(data.frame(message_id=message_ids, thread_id=thread_ids)), ...)
}
#' @export
print.gmail_threads <- function(x, ...){
thread_ids <- id(x)
snippets <- unlist(lapply(x, function(page) { vapply(page$threads, "[[", character(1), "snippet") }))
print(format(data.frame(thread_id=thread_ids, snippet=snippets)), ...)
}
#' @export
print.gmail_drafts <- function(x, ...){
draft_ids <- id(x, "draft_id")
message_ids <- id(x, "message_id")
thread_ids <- id(x, "thread_id")
print(format(data.frame(draft_ids, message_id=message_ids, thread_id=thread_ids)), ...)
}
the$last_response <- list()
gmailr_query <- function(fun, location, user_id, class = NULL, upload = FALSE,
...) {
path_fun <- if (upload) gmail_upload_path else gmail_path
response <- fun(path_fun(user_id, location),
config(token = get_token()),
...)
result <- content(response, "parsed")
the$last_response <- response
if (status_code(response) >= 300) {
cond <- structure(list(
call = sys.call(-1),
content = result,
response = response,
message = paste0("Gmail API error: ", status_code(response), "\n ", result$error$message, "\n")),
class = c("condition", "error", "gmailr_error"))
stop(cond)
}
if (!is.null(class) && !is.null(result)) {
class(result) <- class
}
result
}
#' Response from the last query
#'
#' @export
last_response <- function() {
the$last_response
}
gmailr_POST <- function(location, user_id, class = NULL, ...) {
gmailr_query(POST, location, user_id, class, ...)
}
gmailr_GET <- function(location, user_id, class = NULL, ...) {
gmailr_query(GET, location, user_id, class, ...)
}
gmailr_DELETE <- function(location, user_id, class = NULL, ...) {
gmailr_query(DELETE, location, user_id, class, ...)
}
| /R/gmailr.R | no_license | josibake/gmailr | R | false | false | 9,904 | r | #' \pkg{gmailr} makes gmail access easy.
#'
#' \code{gmailr} provides an interface to the gmail api \url{https://developers.google.com/gmail/api/}
#' @docType package
#' @name gmailr
#' @import httr
#' @import base64enc
NULL
#' Pipe statements
#'
#' Like dplyr and ggvis gmailr also uses the pipe function, \code{\%>\%} to turn
#' function composition into a series of imperative statements.
#'
#' @importFrom magrittr %>%
#' @name %>%
#' @rdname pipe
#' @export
#' @param lhs,rhs A visualisation and a function to apply to it
#' @examples
#' # Instead of
#' to(mime(), 'someone@@somewhere.com')
#' # you can write
#' mime() %>% to('someone@@somewhere.com')
NULL
the <- new.env(parent = emptyenv())
the$id <- "955034766742-huv7d1b1euegvk5vfmfq7v83u4rpdqb0.apps.googleusercontent.com"
the$secret <- "rpJPeEMnDOh7qNAVjUh_aKlO"
get_token <- function() {
if(!exists("token", the)){
gmail_auth()
}
the$token
}
#' Clear the current oauth token
#' @export
clear_token <- function() {
unlink(".httr-oauth")
the$token <- NULL
}
#' Setup oauth authentication for your gmail
#'
#' @param scope the authentication scope to use
#' @param id the client_id to use for authentication
#' @param secret the client secret to use for authentication
#' @param secret_file the secret json file downloaded from \url{https://console.cloud.google.com}
#' @seealso use_secret_file to set the default id and secret to a different
#' value than the default.
#' @export
#' @examples
#' \dontrun{
#' gmail_auth("compose")
#' }
gmail_auth <- function(scope=c("read_only", "modify", "compose", "full"),
id = the$id,
secret = the$secret,
secret_file = NULL) {
if(!is.null(secret_file)){
if (!(missing(id) && missing(secret))) {
stop("You should set either ", sQuote("secret_file"), " or ",
sQuote("id"), " and ", sQuote("secret"), ", not both",
call. = FALSE)
}
use_secret_file(secret_file)
# Use new ID and secret
id <- the$id
secret <- the$secret
}
myapp <- oauth_app("google", id, secret)
scope_urls <- c(read_only = "https://www.googleapis.com/auth/gmail.readonly",
modify = "https://www.googleapis.com/auth/gmail.modify",
compose = "https://www.googleapis.com/auth/gmail.compose",
full = "https://mail.google.com/")
scope <- scope_urls[match.arg(scope, several.ok=TRUE)]
the$token <- oauth2.0_token(oauth_endpoints("google"), myapp, scope = scope)
}
#' Use information from a secret file
#'
#' This function sets the default secret and client_id to those in the secret
#' file
#' @param filename the filename of the file
#' @export
use_secret_file <- function(filename) {
info <- jsonlite::fromJSON(readChar(filename, nchars=1e5))
the$secret <- info$installed$client_secret
the$id <- info$installed$client_id
}
#' Get the body text of a message or draft
#' @param x the object from which to retrieve the body
#' @param ... other parameters passed to methods
#' @export
#' @examples
#' \dontrun{
#' body(my_message)
#' body(my_draft)
#' }
body <- function(x, ...) UseMethod("body")
#' @export
body.gmail_message <- function(x, type="text/plain", collapse = FALSE, ...){
is_multipart <- !is.null(x$payload$parts)
if (is_multipart) {
if (is.null(type)){
good_parts <- TRUE
} else {
good_parts <- vapply(x$payload$parts, FUN.VALUE = logical(1),
function(part) {
any(
vapply(part$headers, FUN.VALUE = logical(1),
function(header) {
tolower(header$name) %==% "content-type" &&
grepl(type, header$value, ignore.case = TRUE)
})
)
})
}
res <-
lapply(x$payload$parts[good_parts],
function(x){
base64url_decode_to_char(x$body$data)
})
} else { # non_multipart
res <- base64url_decode_to_char(x$payload$body$data)
}
if (collapse){
res <- paste0(collapse = "\n", res)
}
res
}
#' @export
body.gmail_draft <- function(x, ...){ body.gmail_message(x$message, ...) }
#' Get the id of a gmailr object
#' @param x the object from which to retrieve the id
#' @param ... other parameters passed to methods
#' @export
#' @examples
#' \dontrun{
#' id(my_message)
#' id(my_draft)
#' }
id <- function(x, ...) UseMethod("id")
#' @export
id.gmail_message <- function(x, ...) { x$id }
#' @export
id.gmail_thread <- id.gmail_message
#' @export
id.gmail_draft <- id.gmail_message
#' @rdname id
#' @export
#' @inheritParams id
#' @param what the type of id to return
id.gmail_messages <- function(x, what=c("message_id", "thread_id"), ...){
what <- switch(match.arg(what),
message_id = "id",
thread_id = "threadId"
)
unlist(lapply(x, function(page) { vapply(page$messages, "[[", character(1), what) }))
}
#' @export
id.gmail_drafts <- function(x, what=c("draft_id", "message_id", "thread_id"), ...){
what <- switch(match.arg(what),
draft_id = return(
unlist(lapply(x, function(page) { vapply(page$drafts, "[[", character(1), "id")}))
),
message_id = "id",
thread_id = "threadId"
)
unlist(lapply(x, function(page) { vapply(page$drafts, function(x){ x$message[[what]] }, character(1)) }))
}
#' @export
id.gmail_threads <- function(x, ...){
unlist(lapply(x, function(page) { vapply(page$threads, "[[", character(1), "id") }))
}
#' Methods to get values from message or drafts
#' @param x the object from which to get or set the field
#' @param ... other parameters passed to methods
#' @rdname accessors
#' @export
to <- function(x, ...) UseMethod("to")
#' @export
to.gmail_message <- function(x, ...){ header_value(x, "To") }
#' @export
to.gmail_draft <- function(x, ...){ to.gmail_message(x$message, ...) }
#' @rdname accessors
#' @export
from <- function(x, ...) UseMethod("from")
#' @export
from.gmail_message <- function(x, ...){ header_value(x, "From") }
#' @export
from.gmail_draft <- from.gmail_message
#' @export
from <- function(x, ...) UseMethod("from")
#' @rdname accessors
#' @export
cc <- function(x, ...) UseMethod("cc")
#' @export
cc.gmail_message <- function(x, ...){ header_value(x, "Cc") }
#' @export
cc.gmail_draft <- function(x, ...){ from.gmail_message(x$message, ...) }
#' @rdname accessors
#' @export
bcc <- function(x, ...) UseMethod("bcc")
#' @export
bcc.gmail_message <- function(x, ...){ header_value(x, "Bcc") }
#' @export
bcc.gmail_draft <- function(x, ...){ from.gmail_message(x$message, ...) }
#' @rdname accessors
#' @export
date <- function(x, ...) UseMethod("date")
#' @export
date.default <- function(x, ...) { base::date() }
#' @export
date.gmail_message <- function(x, ...){ header_value(x, "Date") }
#' @export
date.gmail_draft <- function(x, ...){ date.gmail_message(x$message, ...) }
#' @rdname accessors
#' @export
subject <- function(x, ...) UseMethod("subject")
#' @export
subject.gmail_message <- function(x, ...) { header_value(x, "Subject") }
#' @export
subject.gmail_draft <- function(x, ...){ subject.gmail_message(x$message, ...) }
header_value <- function(x, name){
Find(function(header) identical(header$name, name), x$payload$headers)$value
}
#' @export
print.gmail_message <- function(x, ...){
to <- to(x)
from <- from(x)
date <- date(x)
subject <- subject(x)
id <- id(x)
cat(p(
crayon::bold("Id: "), id, "\n",
crayon::bold("To: "), to, "\n",
crayon::bold("From: "), from, "\n",
crayon::bold("Date: "), date, "\n",
crayon::bold("Subject: "), subject, "\n",
body(x, collapse = TRUE)), "\n")
}
#' @export
print.gmail_thread <- function(x, ...){
id <- id(x)
cat(strwrap(p(crayon::bold("Thread Id: "), id, "\n")), "\n")
}
#' @export
print.gmail_draft <- function(x, ...){
id <- id(x)
cat(strwrap(p(crayon::bold("Draft Id: "), id, "\n")), "\n")
print(x$message, ...)
}
#' @export
print.gmail_messages <- function(x, ...){
message_ids <- id(x, "message_id")
thread_ids <- id(x, "thread_id")
print(format(data.frame(message_id=message_ids, thread_id=thread_ids)), ...)
}
#' @export
print.gmail_threads <- function(x, ...){
thread_ids <- id(x)
snippets <- unlist(lapply(x, function(page) { vapply(page$threads, "[[", character(1), "snippet") }))
print(format(data.frame(thread_id=thread_ids, snippet=snippets)), ...)
}
#' @export
print.gmail_drafts <- function(x, ...){
draft_ids <- id(x, "draft_id")
message_ids <- id(x, "message_id")
thread_ids <- id(x, "thread_id")
print(format(data.frame(draft_ids, message_id=message_ids, thread_id=thread_ids)), ...)
}
the$last_response <- list()
gmailr_query <- function(fun, location, user_id, class = NULL, upload = FALSE,
...) {
path_fun <- if (upload) gmail_upload_path else gmail_path
response <- fun(path_fun(user_id, location),
config(token = get_token()),
...)
result <- content(response, "parsed")
the$last_response <- response
if (status_code(response) >= 300) {
cond <- structure(list(
call = sys.call(-1),
content = result,
response = response,
message = paste0("Gmail API error: ", status_code(response), "\n ", result$error$message, "\n")),
class = c("condition", "error", "gmailr_error"))
stop(cond)
}
if (!is.null(class) && !is.null(result)) {
class(result) <- class
}
result
}
#' Response from the last query
#'
#' @export
last_response <- function() {
the$last_response
}
gmailr_POST <- function(location, user_id, class = NULL, ...) {
gmailr_query(POST, location, user_id, class, ...)
}
gmailr_GET <- function(location, user_id, class = NULL, ...) {
gmailr_query(GET, location, user_id, class, ...)
}
gmailr_DELETE <- function(location, user_id, class = NULL, ...) {
gmailr_query(DELETE, location, user_id, class, ...)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/learners.R
\name{list_learners}
\alias{list_learners}
\title{List available learners for a given task}
\usage{
list_learners(task)
}
\arguments{
\item{task}{[Task] input task}
}
\value{
[character(n)] vector of learner.ids
}
\description{
List available learners for a given task
}
| /man/list_learners.Rd | no_license | pfistfl/OMLRandomBotv2 | R | false | true | 360 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/learners.R
\name{list_learners}
\alias{list_learners}
\title{List available learners for a given task}
\usage{
list_learners(task)
}
\arguments{
\item{task}{[Task] input task}
}
\value{
[character(n)] vector of learner.ids
}
\description{
List available learners for a given task
}
|
#
# SPECpower.R, 12 Aug 16
# Data from:
# http://www.spec.org
# power_ssj2008-results-20160613-125328.csv
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
library("car")
jpow=read.csv(paste0(ESEUR_dir, "hardware/ssj2008-results-20160613.csv.xz"), as.is=TRUE)
jpow$ssj_ops=jpow$ssj_ops...100..of.target.load
jpow$avg.watts=jpow$Average.watts...100..of.target.load
pal_col=rainbow_hcl(3)
jpow$ops_watt=jpow$ssj_ops/jpow$avg.watts
pairs( ~ ops_watt+
Nodes+
# JVM.Vendor+
Processor.MHz+
Chips+
Cores+
# Threads.Per.Core+
Memory.GB
# +ssj_ops
, col=point_col,
data=jpow)
j_av_watts=subset(jpow, !is.na(avg.watts))
pow_mod=glm(avg.watts ~
Nodes+
# JVM.Vendor+
Processor.MHz+
Chips+
Cores+
# Threads.Per.Core+
Memory.GB
# +ssj_ops
, data=j_av_watts)
summary(pow_mod)
vif(pow_mod)
pow_mod=glm(avg.watts ~
Nodes+
# JVM.Vendor+
Processor.MHz+
# Chips has the highest VIF
# Chips+
Cores+
# Threads.Per.Core+
Memory.GB
# +ssj_ops
, data=j_av_watts)
summary(pow_mod)
vif(pow_mod)
library("penalized")
pow_mod=glm(avg.watts ~
Nodes+
# JVM.Vendor+
Processor.MHz+
Chips+
Cores+
# Threads.Per.Core+
Memory.GB
# +ssj_ops
, data=j_av_watts)
summary(pow_mod)
vif(pow_mod)
pen_mod=penalized(avg.watts ~
Nodes+
# JVM.Vendor+
Processor.MHz+
Chips+
Cores+
# Threads.Per.Core+
Memory.GB
# +ssj_ops
, data=j_av_watts,
# Give maximum opportunity to change the parameters
lambda1=1.0,
lambda2=1.0)
coefficients(pen_mod)
| /hardware/SPECpower.R | no_license | alanponce/ESEUR-code-data | R | false | false | 1,962 | r | #
# SPECpower.R, 12 Aug 16
# Data from:
# http://www.spec.org
# power_ssj2008-results-20160613-125328.csv
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
library("car")
jpow=read.csv(paste0(ESEUR_dir, "hardware/ssj2008-results-20160613.csv.xz"), as.is=TRUE)
jpow$ssj_ops=jpow$ssj_ops...100..of.target.load
jpow$avg.watts=jpow$Average.watts...100..of.target.load
pal_col=rainbow_hcl(3)
jpow$ops_watt=jpow$ssj_ops/jpow$avg.watts
pairs( ~ ops_watt+
Nodes+
# JVM.Vendor+
Processor.MHz+
Chips+
Cores+
# Threads.Per.Core+
Memory.GB
# +ssj_ops
, col=point_col,
data=jpow)
j_av_watts=subset(jpow, !is.na(avg.watts))
pow_mod=glm(avg.watts ~
Nodes+
# JVM.Vendor+
Processor.MHz+
Chips+
Cores+
# Threads.Per.Core+
Memory.GB
# +ssj_ops
, data=j_av_watts)
summary(pow_mod)
vif(pow_mod)
pow_mod=glm(avg.watts ~
Nodes+
# JVM.Vendor+
Processor.MHz+
# Chips has the highest VIF
# Chips+
Cores+
# Threads.Per.Core+
Memory.GB
# +ssj_ops
, data=j_av_watts)
summary(pow_mod)
vif(pow_mod)
library("penalized")
pow_mod=glm(avg.watts ~
Nodes+
# JVM.Vendor+
Processor.MHz+
Chips+
Cores+
# Threads.Per.Core+
Memory.GB
# +ssj_ops
, data=j_av_watts)
summary(pow_mod)
vif(pow_mod)
pen_mod=penalized(avg.watts ~
Nodes+
# JVM.Vendor+
Processor.MHz+
Chips+
Cores+
# Threads.Per.Core+
Memory.GB
# +ssj_ops
, data=j_av_watts,
# Give maximum opportunity to change the parameters
lambda1=1.0,
lambda2=1.0)
coefficients(pen_mod)
|
library(dplyr)
library(knitr)
library(ggplot2)
graphs <- read.csv('data/graphs.csv')
population <- read.csv('data/population.csv')
sentences <- read.csv('data/sentences.csv')
#optional to include
kable(population, col.names = c("Total", "", "10,000"))
#used as sentences
signedup <- graphs%>% filter(Question == "SignedUp") %>% select(Response, Percent)
willing <- graphs%>%filter(Question == "Willing") %>% select(Response, Percent)
tableSignedUp <- kable(signedup)
tableWilling <- kable(willing)
#bar
waitlistDie <- graphs%>% filter(Question == "WaitlistDie") %>% select(Response,Percent)
ylabel <- c("Strongly Agree", "Somewhat Agree", "Somewhat Disagree", "Strongly Disagree")
waitlist<- ggplot(waitlistDie) + geom_col(mapping = aes(x = reorder(ylabel, -waitlistDie$Percent) , y = waitlistDie$Percent )) + ggtitle("Percent of People who Agree that People on the Waitlist Die") +
xlab("Response") + ylab("Percent")
transplantSupport <- graphs %>% filter(Question == "TransplantSupport") %>% select(Response,Percent)
tslabel <- c("Strongly Support","Support","Oppose","Strongly Oppose")
transSupport <- ggplot(transplantSupport) + geom_col(mapping = aes(x = reorder(tslabel, -transplantSupport$Percent) , y =transplantSupport$Percent )) + ggtitle("Percent of People who Support Transplants") +
xlab("Response") + ylab("Percent")
deathDonate <- graphs%>% filter(Question == "DeathDonate") %>% select(Response,Percent)
ddlabel <- c("Likely Yes","Likely No","Strong No","Strong Yes")
donateDeath <- ggplot(deathDonate) + geom_col(mapping = aes(x = reorder(ddlabel, -deathDonate$Percent) , y = deathDonate$Percent )) + ggtitle("Percent of People who are Willing to Donate Thei Organs After Death") +
xlab("Response") + ylab("Percent")
# stacked bar
kidney <- graphs%>% filter(Question == "Kidney") %>% select(Response,Percent)
kidneys<- ggplot(kidney) + geom_col(mapping = aes(x = reorder(kidney$Response, -kidney$Percent) , y = kidney$Percent )) + ggtitle("Percent of People whobelieve that kidney") +
xlab("Response") + ylab("Percent")
liver <- graphs%>% filter(Question == "Liver") %>% select(Response,Percent)
livers<- ggplot(liver) + geom_col(mapping = aes(x = reorder(liver$Response, -liver$Percent) , y = liver$Percent )) + ggtitle("Percent of People whobelieve that liver") +
xlab("Response") + ylab("Percent")
lung <- graphs%>% filter(Question == "Lung") %>% select(Response,Percent)
lungs <- ggplot(lung) + geom_col(mapping = aes(x = reorder(lung$Response, -lung$Percent) , y = lung$Percent )) + ggtitle("Percent of People whobelieve that lung") +
xlab("Response") + ylab("Percent")
| /Final Deliverable/survey.R | no_license | sarahpeng/Group-7 | R | false | false | 2,617 | r | library(dplyr)
library(knitr)
library(ggplot2)
graphs <- read.csv('data/graphs.csv')
population <- read.csv('data/population.csv')
sentences <- read.csv('data/sentences.csv')
#optional to include
kable(population, col.names = c("Total", "", "10,000"))
#used as sentences
signedup <- graphs%>% filter(Question == "SignedUp") %>% select(Response, Percent)
willing <- graphs%>%filter(Question == "Willing") %>% select(Response, Percent)
tableSignedUp <- kable(signedup)
tableWilling <- kable(willing)
#bar
waitlistDie <- graphs%>% filter(Question == "WaitlistDie") %>% select(Response,Percent)
ylabel <- c("Strongly Agree", "Somewhat Agree", "Somewhat Disagree", "Strongly Disagree")
waitlist<- ggplot(waitlistDie) + geom_col(mapping = aes(x = reorder(ylabel, -waitlistDie$Percent) , y = waitlistDie$Percent )) + ggtitle("Percent of People who Agree that People on the Waitlist Die") +
xlab("Response") + ylab("Percent")
transplantSupport <- graphs %>% filter(Question == "TransplantSupport") %>% select(Response,Percent)
tslabel <- c("Strongly Support","Support","Oppose","Strongly Oppose")
transSupport <- ggplot(transplantSupport) + geom_col(mapping = aes(x = reorder(tslabel, -transplantSupport$Percent) , y =transplantSupport$Percent )) + ggtitle("Percent of People who Support Transplants") +
xlab("Response") + ylab("Percent")
deathDonate <- graphs%>% filter(Question == "DeathDonate") %>% select(Response,Percent)
ddlabel <- c("Likely Yes","Likely No","Strong No","Strong Yes")
donateDeath <- ggplot(deathDonate) + geom_col(mapping = aes(x = reorder(ddlabel, -deathDonate$Percent) , y = deathDonate$Percent )) + ggtitle("Percent of People who are Willing to Donate Thei Organs After Death") +
xlab("Response") + ylab("Percent")
# stacked bar
kidney <- graphs%>% filter(Question == "Kidney") %>% select(Response,Percent)
kidneys<- ggplot(kidney) + geom_col(mapping = aes(x = reorder(kidney$Response, -kidney$Percent) , y = kidney$Percent )) + ggtitle("Percent of People whobelieve that kidney") +
xlab("Response") + ylab("Percent")
liver <- graphs%>% filter(Question == "Liver") %>% select(Response,Percent)
livers<- ggplot(liver) + geom_col(mapping = aes(x = reorder(liver$Response, -liver$Percent) , y = liver$Percent )) + ggtitle("Percent of People whobelieve that liver") +
xlab("Response") + ylab("Percent")
lung <- graphs%>% filter(Question == "Lung") %>% select(Response,Percent)
lungs <- ggplot(lung) + geom_col(mapping = aes(x = reorder(lung$Response, -lung$Percent) , y = lung$Percent )) + ggtitle("Percent of People whobelieve that lung") +
xlab("Response") + ylab("Percent")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io_and_formats.R
\name{balance_fragment_clusters}
\alias{balance_fragment_clusters}
\title{Downsample fragments within cluster sets to match the sample with the lowest number of fragments}
\usage{
balance_fragment_clusters(fragment_list, clusters)
}
\arguments{
\item{fragment_list}{The list object containing GenomicRanges objects.}
\item{clusters}{a vector with cluster assignments for each item in fragment_list}
}
\value{
a lis of GenomicRanges objects with all members of each cluster downsampled to the
minimum number of reads of all cluster members.
}
\description{
Downsample fragments within cluster sets to match the sample with the lowest number of fragments
}
| /man/balance_fragment_clusters.Rd | permissive | adrisede/lowcat | R | false | true | 751 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io_and_formats.R
\name{balance_fragment_clusters}
\alias{balance_fragment_clusters}
\title{Downsample fragments within cluster sets to match the sample with the lowest number of fragments}
\usage{
balance_fragment_clusters(fragment_list, clusters)
}
\arguments{
\item{fragment_list}{The list object containing GenomicRanges objects.}
\item{clusters}{a vector with cluster assignments for each item in fragment_list}
}
\value{
a lis of GenomicRanges objects with all members of each cluster downsampled to the
minimum number of reads of all cluster members.
}
\description{
Downsample fragments within cluster sets to match the sample with the lowest number of fragments
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wafregional_operations.R
\name{wafregional_create_web_acl_migration_stack}
\alias{wafregional_create_web_acl_migration_stack}
\title{Creates an AWS CloudFormation WAFV2 template for the specified web ACL
in the specified Amazon S3 bucket}
\usage{
wafregional_create_web_acl_migration_stack(WebACLId, S3BucketName,
IgnoreUnsupportedType)
}
\arguments{
\item{WebACLId}{[required] The UUID of the WAF Classic web ACL that you want to migrate to WAF v2.}
\item{S3BucketName}{[required] The name of the Amazon S3 bucket to store the CloudFormation template
in. The S3 bucket must be configured as follows for the migration:
\itemize{
\item The bucket name must start with \verb{aws-waf-migration-}. For example,
\code{aws-waf-migration-my-web-acl}.
\item The bucket must be in the Region where you are deploying the
template. For example, for a web ACL in us-west-2, you must use an
Amazon S3 bucket in us-west-2 and you must deploy the template stack
to us-west-2.
\item The bucket policies must permit the migration process to write data.
For listings of the bucket policies, see the Examples section.
}}
\item{IgnoreUnsupportedType}{[required] Indicates whether to exclude entities that can't be migrated or to stop
the migration. Set this to true to ignore unsupported entities in the
web ACL during the migration. Otherwise, if AWS WAF encounters
unsupported entities, it stops the process and throws an exception.}
}
\description{
Creates an AWS CloudFormation WAFV2 template for the specified web ACL
in the specified Amazon S3 bucket. Then, in CloudFormation, you create a
stack from the template, to create the web ACL and its resources in AWS
WAFV2. Use this to migrate your AWS WAF Classic web ACL to the latest
version of AWS WAF.
This is part of a larger migration procedure for web ACLs from AWS WAF
Classic to the latest version of AWS WAF. For the full procedure,
including caveats and manual steps to complete the migration and switch
over to the new web ACL, see \href{https://docs.aws.amazon.com/waf/latest/developerguide/waf-migrating-from-classic.html}{Migrating your AWS WAF Classic resources to AWS WAF}
in the \href{https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html}{AWS WAF Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$create_web_acl_migration_stack(
WebACLId = "string",
S3BucketName = "string",
IgnoreUnsupportedType = TRUE|FALSE
)
}
}
\keyword{internal}
| /cran/paws.security.identity/man/wafregional_create_web_acl_migration_stack.Rd | permissive | sanchezvivi/paws | R | false | true | 2,507 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wafregional_operations.R
\name{wafregional_create_web_acl_migration_stack}
\alias{wafregional_create_web_acl_migration_stack}
\title{Creates an AWS CloudFormation WAFV2 template for the specified web ACL
in the specified Amazon S3 bucket}
\usage{
wafregional_create_web_acl_migration_stack(WebACLId, S3BucketName,
IgnoreUnsupportedType)
}
\arguments{
\item{WebACLId}{[required] The UUID of the WAF Classic web ACL that you want to migrate to WAF v2.}
\item{S3BucketName}{[required] The name of the Amazon S3 bucket to store the CloudFormation template
in. The S3 bucket must be configured as follows for the migration:
\itemize{
\item The bucket name must start with \verb{aws-waf-migration-}. For example,
\code{aws-waf-migration-my-web-acl}.
\item The bucket must be in the Region where you are deploying the
template. For example, for a web ACL in us-west-2, you must use an
Amazon S3 bucket in us-west-2 and you must deploy the template stack
to us-west-2.
\item The bucket policies must permit the migration process to write data.
For listings of the bucket policies, see the Examples section.
}}
\item{IgnoreUnsupportedType}{[required] Indicates whether to exclude entities that can't be migrated or to stop
the migration. Set this to true to ignore unsupported entities in the
web ACL during the migration. Otherwise, if AWS WAF encounters
unsupported entities, it stops the process and throws an exception.}
}
\description{
Creates an AWS CloudFormation WAFV2 template for the specified web ACL
in the specified Amazon S3 bucket. Then, in CloudFormation, you create a
stack from the template, to create the web ACL and its resources in AWS
WAFV2. Use this to migrate your AWS WAF Classic web ACL to the latest
version of AWS WAF.
This is part of a larger migration procedure for web ACLs from AWS WAF
Classic to the latest version of AWS WAF. For the full procedure,
including caveats and manual steps to complete the migration and switch
over to the new web ACL, see \href{https://docs.aws.amazon.com/waf/latest/developerguide/waf-migrating-from-classic.html}{Migrating your AWS WAF Classic resources to AWS WAF}
in the \href{https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html}{AWS WAF Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$create_web_acl_migration_stack(
WebACLId = "string",
S3BucketName = "string",
IgnoreUnsupportedType = TRUE|FALSE
)
}
}
\keyword{internal}
|
library(shiny)
library(shinythemes)
library(ggplot2)
library(leaflet)
library(htmltools)
library(DT)
library(plotly)
library(rsconnect)
setwd('../')
source("Scripts/load_data.R")
# /////////////////////////////////////////////////////////////////////////////////////////////////////////////
# ////////////////////////////////////////// /////////////////////////////////////////
# ///////////////////////////////////////// UI Object //////////////////////////////////////////
# //////////////////////////////////////// ///////////////////////////////////////////
# /////////////////////////////////////////////////////////////////////////////////////////////////////////////
# Define UI for application that draws a histogram
ui <- fluidPage(
theme = shinytheme('simplex'),
navbarPage(
title = div( tags$img(src = "logo2.png",height="15%", width="15%"), style = "text-align:center;") ,
tabPanel(div("HOME", style = "font-size: 15px;"),
tags$img(src='background2.jpg', style = 'width: 100% ; height: 100%')
),
tabPanel(div("1. Compare multiple cities", style = "font-size: 15px"),
br(),
sidebarLayout(
sidebarPanel(
selectInput("cities1",
h4("Filter by city"),
multiple = TRUE,
choices = cities
),
dateRangeInput("dates1",
h4("Date range"),
format = "yyyy-mm-dd",
start = "2020-01-01",
end = "2020-12-12"),
radioButtons("feature1",
h4("Select the feature"),
choices = list("Availability over 30 days" = 'availability_30',
"Average price over 30 days" = 'price_30',
"Expected revenue over 30 days" = 'revenue_30' )),
selectInput("dimension1",
h4("Add new dimension"),
choices = c("neighbourhood_cleansed",
"room_type",
"accommodates",
"bedrooms",
"beds"),
selected = "beds"
),
h4("Select the aggregation type"),
checkboxInput("average1", "Average", value = FALSE),
checkboxInput("median1", "Median", value = FALSE),
radioButtons("choicePlot1",
h4("Select the plot type"),
choices = list("histogram" = "histogram",
"density" = "density",
"boxplot" = "boxplot",
"proportion" = "proportion"),
selected = "histogram"
),
submitButton("Submit"),
width = 3
),
mainPanel(
plotOutput("plot1"),br(),br(),
dataTableOutput('table1'),
width = 9
)
)
),
tabPanel(div("2. Deep dive into a city", style = "font-size: 15px"),
br(),
sidebarLayout(
sidebarPanel(
selectInput("cities2",
h4("Filter by city"),
multiple = FALSE,
choices = cities
),
dateRangeInput("dates2",
h4("Date range"),
format = "yyyy-mm-dd",
start = "2020-01-01",
end = "2020-12-12"),
radioButtons("feature2",
h4("Select the feature"),
choices = list("Availability over 30 days" = 'availability_30',
"Average price over 30 days" = 'price_30',
"Expected revenue over 30 days" = 'revenue_30' )),
selectInput("dimension2",
h4("Add new dimension"),
choices = c("neighbourhood_cleansed",
"room_type",
"accommodates",
"bedrooms",
"beds"),
selected = "beds"
),
h4("Select the aggregation type"),
checkboxInput("average2", "Average", value = FALSE),
checkboxInput("median2", "Median", value = FALSE),
radioButtons("choicePlot2",
h4("Select the plot type"),
choices = list("histogram" = "histogram",
"density" = "density",
"boxplot" = "boxplot",
"proportion" = "proportion"),
selected = "histogram"
),
submitButton("Submit"),
width = 3
),
mainPanel(
plotlyOutput("plot2"),br(),br(),
dataTableOutput('table2'),
width = 9
)
),
leafletOutput("mymap", height = 750)
),
tabPanel(div("Documentation", style = "font-size: 15px"),
br(),
mainPanel(
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"
),
h1("Airbnb Analytics App"),
br(),
p("Welcome to our Airbnb analytics App that allows you to get some interesting insights on Airbnb statistics."),
br(),
br(),
p("Our app allows you to perform two different types of analysis: "),
tags$ol(
tags$li("Perform comparison between cities"),
tags$li("Deep dive into a selected city")
),br(),
h3("1) Compare multiple cities"),
p("The first type of analysis allows you to display insightful graphics (Density, Boxplot and Histogram)
concerning the cities of your choice.",br(),br(),
"The main goal of this section is to compare statistics between cities, therefore it is granted to you
to select multiple cities. ",br(),br(),
"Our graphs will automatically adapt themselves with respect to your filters
such as date, feature and dimension.",br(),br(),
"Finally, you can also display a table that stores the average and median of the selected feature.
All these filters are placed beautifully in a sidebar on the left of the panel."),
br(),
h3("2) Deep dive into a specific city"),
p("The second type of analysis allows you to display more information for a specific city.", br(),
" As in the first section, the filters are the same but you can select only one city.", br(),br(),
"This finer grained analysis gives you some information such as average and median of
the selected feature according to a dimension you specify. ", br(),br(),
"Eventually, we provide you a map, so that you may picture yourself easily and interactivelly
the different accommodations geographically")
)
)
)
)
# /////////////////////////////////////////////////////////////////////////////////////////////////////////////
# ////////////////////////////////////////// /////////////////////////////////////////
# ///////////////////////////////////////// Server Object //////////////////////////////////////////
# //////////////////////////////////////// ///////////////////////////////////////////
# /////////////////////////////////////////////////////////////////////////////////////////////////////////////
# Define server logic required to draw a histogram
server <- function(input, output) {
# ******************************************************************************************
# **************************************** TAB 1 *****************************************
# ******************************************************************************************
df_filtered1 <- reactive({
req(input$cities1)
df <- load_data(input$cities1, input$dates1[1], input$dates1[2])
return(df)
})
output$plot1 <- renderPlot({
df_plot <- df_filtered1()
df_plot <- df_plot %>% filter( !is.na(df_plot[input$feature1]) & (!is.na(df_plot[input$dimension1]) ) )
if (input$dimension1 == 'neighbourhood_cleansed') {
top_10_neighborhood <- df_plot %>%
group_by(city) %>%
count(neighbourhood_cleansed) %>%
arrange(city, desc(n)) %>%
top_n(10, n)
df_plot <- df_plot %>%
filter(neighbourhood_cleansed %in% top_10_neighborhood$neighbourhood_cleansed )
}
coeff <- 1
if(input$feature1 == 'availability_30') coeff <- 0.04
else if(input$feature1 == 'price_30') coeff <- 0.5
else if(input$feature1 == 'revenue_30') coeff <- 1
if (input$choicePlot1 == 'boxplot') {
ggplot(df_plot, aes_string(input$dimension1, input$feature1, fill='city')) +
geom_boxplot( outlier.shape = NA) +
scale_y_continuous(limits = quantile(df_plot[input$feature1], c(0.1, 0.9), na.rm = T)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
}
else if (input$choicePlot1 == 'histogram') {
ggplot(df_plot, aes_string(x = input$dimension1, y = input$feature1, fill='city')) +
geom_bar(stat='identity') + ylab(input$feature1) + coord_flip()
}
else if (input$choicePlot1 == 'density') {
ggplot(df_plot[!is.na(df_plot[input$feature1]),], aes_string(input$feature1, fill = 'city')) +
geom_density(alpha = 0.4) + ylab("density") + xlim(0,1000*coeff)
}
else if (input$choicePlot1 == 'proportion'){
ggplot(df_plot, aes_string(x = input$dimension1, fill= 'city')) +
geom_bar(aes_string(x = input$dimension1), stat="count", lwd = 0.8, position = position_dodge(0.9), na.rm = T) +
coord_flip() + xlab(input$dimension1) + ylab("Proportions")
}
})
output$table1 <- renderDataTable({
opt = list(pageLength = 5, searching = F)
df_table <- df_filtered1()
df_table <- df_table %>% filter( !is.na(df_table[input$feature1]) & (!is.na(df_table[input$dimension1]) ) )
if (input$average1 & !input$median1) {
df_table %>%
group_by(city) %>%
summarise(average = mean(!!rlang::sym(input$feature1))) %>%
DT::datatable(options = opt)
}
else if (!input$average1 & input$median1) {
df_table %>%
group_by(city) %>%
summarise(median = median(!!rlang::sym(input$feature1)))%>%
DT::datatable(options = opt)
}
else if (input$average1 & input$median1) {
df_table %>%
group_by(city) %>%
summarise(average = mean(!!rlang::sym(input$feature1)),
median = median(!!rlang::sym(input$feature1)))%>%
DT::datatable(options = opt)
}
})
# ******************************************************************************************
# **************************************** TAB 2 *****************************************
# ******************************************************************************************
df_filtered2 <- reactive({
df <- load_data(input$cities2, input$dates2[1], input$dates2[2] )
return(df)
})
output$plot2 <- renderPlotly({
df_plot <- df_filtered2()
df_plot <- df_plot %>% filter( !is.na(df_plot[input$feature2]) & (!is.na(df_plot[input$dimension2]) ) )
if (input$dimension2 == 'neighbourhood_cleansed') {
top_10_neighborhood <- df_plot %>%
group_by(city) %>%
count(neighbourhood_cleansed) %>%
arrange(city, desc(n)) %>%
top_n(10, n)
df_plot <- df_plot %>%
filter(neighbourhood_cleansed %in% top_10_neighborhood$neighbourhood_cleansed )
}
coeff <- 1
if(input$feature2 == 'availability_30') coeff <- 0.04
else if(input$feature2 == 'price_30') coeff <- 0.5
else if(input$feature2 == 'revenue_30') coeff <- 1
if (input$choicePlot2 == 'boxplot') {
ggplot(df_plot, aes_string(input$dimension2, input$feature2, fill='city')) +
geom_boxplot( outlier.shape = NA) +
scale_y_continuous(limits = quantile(df_plot[input$feature2], c(0.1, 0.9), na.rm = T)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
}
else if (input$choicePlot2 == 'histogram') {
ggplot(df_plot, aes_string(x = input$dimension2, y = input$feature2, fill='city')) +
geom_bar(stat='identity') + ylab(input$feature2) + coord_flip()
}
else if (input$choicePlot2 == 'density') {
ggplot(df_plot[!is.na(df_plot[input$feature2]),], aes_string(input$feature2, fill = 'city')) +
geom_density(alpha = 0.4) + ylab("density") + xlim(0,1000*coeff)
}
else if (input$choicePlot2 == 'proportion'){
ggplot(df_plot, aes_string(x = input$dimension2, fill= 'city')) +
geom_bar(aes_string(x = input$dimension2), stat="count", lwd = 0.8, position = position_dodge(0.9), na.rm = T) +
coord_flip() + xlab(input$dimension2) + ylab("Proportions")
}
})
output$table2 <- renderDataTable({
opt = list(pageLength = 5, searching = F)
df_table <- df_filtered2()
df_table <- df_table %>% filter( !is.na(df_table[input$feature2]) & (!is.na(df_table[input$dimension2]) ) )
if (input$average2 & !input$median2) {
df_table %>%
group_by(city, !!rlang::sym(input$dimension2)) %>%
summarise(average = mean(!!rlang::sym(input$feature2)))%>%
DT::datatable(options = opt)
}
else if (!input$average2 & input$median2) {
df_table %>%
group_by(city, !!rlang::sym(input$dimension2)) %>%
summarise(median = median(!!rlang::sym(input$feature2)))%>%
DT::datatable(options = opt)
}
else if (input$average2 & input$median2) {
df_table %>%
group_by(city, !!rlang::sym(input$dimension2)) %>%
summarise(average = mean(!!rlang::sym(input$feature2)),
median = median(!!rlang::sym(input$feature2)))%>%
DT::datatable(options = opt)
}
})
output$mymap <- renderLeaflet({
df_filtered2() %>%
leaflet() %>%
addTiles() %>%
addMarkers(clusterOptions = markerClusterOptions(),
popup = ~ paste0( tags$b("City: "), city , "</br>",
tags$b("Neighbourhood: "), neighbourhood_cleansed, "</br>",
tags$b("Price: "), price, " $", "</br>",
tags$a(href = listing_url[1], "Link")
),
label = ~htmlEscape(neighbourhood_cleansed)
)
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /App/app.R | no_license | HenintsoaRaza/Data-Analytics-Project | R | false | false | 17,995 | r | library(shiny)
library(shinythemes)
library(ggplot2)
library(leaflet)
library(htmltools)
library(DT)
library(plotly)
library(rsconnect)
setwd('../')
source("Scripts/load_data.R")
# /////////////////////////////////////////////////////////////////////////////////////////////////////////////
# ////////////////////////////////////////// /////////////////////////////////////////
# ///////////////////////////////////////// UI Object //////////////////////////////////////////
# //////////////////////////////////////// ///////////////////////////////////////////
# /////////////////////////////////////////////////////////////////////////////////////////////////////////////
# Define UI for application that draws a histogram
ui <- fluidPage(
theme = shinytheme('simplex'),
navbarPage(
title = div( tags$img(src = "logo2.png",height="15%", width="15%"), style = "text-align:center;") ,
tabPanel(div("HOME", style = "font-size: 15px;"),
tags$img(src='background2.jpg', style = 'width: 100% ; height: 100%')
),
tabPanel(div("1. Compare multiple cities", style = "font-size: 15px"),
br(),
sidebarLayout(
sidebarPanel(
selectInput("cities1",
h4("Filter by city"),
multiple = TRUE,
choices = cities
),
dateRangeInput("dates1",
h4("Date range"),
format = "yyyy-mm-dd",
start = "2020-01-01",
end = "2020-12-12"),
radioButtons("feature1",
h4("Select the feature"),
choices = list("Availability over 30 days" = 'availability_30',
"Average price over 30 days" = 'price_30',
"Expected revenue over 30 days" = 'revenue_30' )),
selectInput("dimension1",
h4("Add new dimension"),
choices = c("neighbourhood_cleansed",
"room_type",
"accommodates",
"bedrooms",
"beds"),
selected = "beds"
),
h4("Select the aggregation type"),
checkboxInput("average1", "Average", value = FALSE),
checkboxInput("median1", "Median", value = FALSE),
radioButtons("choicePlot1",
h4("Select the plot type"),
choices = list("histogram" = "histogram",
"density" = "density",
"boxplot" = "boxplot",
"proportion" = "proportion"),
selected = "histogram"
),
submitButton("Submit"),
width = 3
),
mainPanel(
plotOutput("plot1"),br(),br(),
dataTableOutput('table1'),
width = 9
)
)
),
tabPanel(div("2. Deep dive into a city", style = "font-size: 15px"),
br(),
sidebarLayout(
sidebarPanel(
selectInput("cities2",
h4("Filter by city"),
multiple = FALSE,
choices = cities
),
dateRangeInput("dates2",
h4("Date range"),
format = "yyyy-mm-dd",
start = "2020-01-01",
end = "2020-12-12"),
radioButtons("feature2",
h4("Select the feature"),
choices = list("Availability over 30 days" = 'availability_30',
"Average price over 30 days" = 'price_30',
"Expected revenue over 30 days" = 'revenue_30' )),
selectInput("dimension2",
h4("Add new dimension"),
choices = c("neighbourhood_cleansed",
"room_type",
"accommodates",
"bedrooms",
"beds"),
selected = "beds"
),
h4("Select the aggregation type"),
checkboxInput("average2", "Average", value = FALSE),
checkboxInput("median2", "Median", value = FALSE),
radioButtons("choicePlot2",
h4("Select the plot type"),
choices = list("histogram" = "histogram",
"density" = "density",
"boxplot" = "boxplot",
"proportion" = "proportion"),
selected = "histogram"
),
submitButton("Submit"),
width = 3
),
mainPanel(
plotlyOutput("plot2"),br(),br(),
dataTableOutput('table2'),
width = 9
)
),
leafletOutput("mymap", height = 750)
),
tabPanel(div("Documentation", style = "font-size: 15px"),
br(),
mainPanel(
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"
),
h1("Airbnb Analytics App"),
br(),
p("Welcome to our Airbnb analytics App that allows you to get some interesting insights on Airbnb statistics."),
br(),
br(),
p("Our app allows you to perform two different types of analysis: "),
tags$ol(
tags$li("Perform comparison between cities"),
tags$li("Deep dive into a selected city")
),br(),
h3("1) Compare multiple cities"),
p("The first type of analysis allows you to display insightful graphics (Density, Boxplot and Histogram)
concerning the cities of your choice.",br(),br(),
"The main goal of this section is to compare statistics between cities, therefore it is granted to you
to select multiple cities. ",br(),br(),
"Our graphs will automatically adapt themselves with respect to your filters
such as date, feature and dimension.",br(),br(),
"Finally, you can also display a table that stores the average and median of the selected feature.
All these filters are placed beautifully in a sidebar on the left of the panel."),
br(),
h3("2) Deep dive into a specific city"),
p("The second type of analysis allows you to display more information for a specific city.", br(),
" As in the first section, the filters are the same but you can select only one city.", br(),br(),
"This finer grained analysis gives you some information such as average and median of
the selected feature according to a dimension you specify. ", br(),br(),
"Eventually, we provide you a map, so that you may picture yourself easily and interactivelly
the different accommodations geographically")
)
)
)
)
# /////////////////////////////////////////////////////////////////////////////////////////////////////////////
# ////////////////////////////////////////// /////////////////////////////////////////
# ///////////////////////////////////////// Server Object //////////////////////////////////////////
# //////////////////////////////////////// ///////////////////////////////////////////
# /////////////////////////////////////////////////////////////////////////////////////////////////////////////
# Define server logic required to draw a histogram
server <- function(input, output) {
# ******************************************************************************************
# **************************************** TAB 1 *****************************************
# ******************************************************************************************
df_filtered1 <- reactive({
req(input$cities1)
df <- load_data(input$cities1, input$dates1[1], input$dates1[2])
return(df)
})
output$plot1 <- renderPlot({
df_plot <- df_filtered1()
df_plot <- df_plot %>% filter( !is.na(df_plot[input$feature1]) & (!is.na(df_plot[input$dimension1]) ) )
if (input$dimension1 == 'neighbourhood_cleansed') {
top_10_neighborhood <- df_plot %>%
group_by(city) %>%
count(neighbourhood_cleansed) %>%
arrange(city, desc(n)) %>%
top_n(10, n)
df_plot <- df_plot %>%
filter(neighbourhood_cleansed %in% top_10_neighborhood$neighbourhood_cleansed )
}
coeff <- 1
if(input$feature1 == 'availability_30') coeff <- 0.04
else if(input$feature1 == 'price_30') coeff <- 0.5
else if(input$feature1 == 'revenue_30') coeff <- 1
if (input$choicePlot1 == 'boxplot') {
ggplot(df_plot, aes_string(input$dimension1, input$feature1, fill='city')) +
geom_boxplot( outlier.shape = NA) +
scale_y_continuous(limits = quantile(df_plot[input$feature1], c(0.1, 0.9), na.rm = T)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
}
else if (input$choicePlot1 == 'histogram') {
ggplot(df_plot, aes_string(x = input$dimension1, y = input$feature1, fill='city')) +
geom_bar(stat='identity') + ylab(input$feature1) + coord_flip()
}
else if (input$choicePlot1 == 'density') {
ggplot(df_plot[!is.na(df_plot[input$feature1]),], aes_string(input$feature1, fill = 'city')) +
geom_density(alpha = 0.4) + ylab("density") + xlim(0,1000*coeff)
}
else if (input$choicePlot1 == 'proportion'){
ggplot(df_plot, aes_string(x = input$dimension1, fill= 'city')) +
geom_bar(aes_string(x = input$dimension1), stat="count", lwd = 0.8, position = position_dodge(0.9), na.rm = T) +
coord_flip() + xlab(input$dimension1) + ylab("Proportions")
}
})
output$table1 <- renderDataTable({
opt = list(pageLength = 5, searching = F)
df_table <- df_filtered1()
df_table <- df_table %>% filter( !is.na(df_table[input$feature1]) & (!is.na(df_table[input$dimension1]) ) )
if (input$average1 & !input$median1) {
df_table %>%
group_by(city) %>%
summarise(average = mean(!!rlang::sym(input$feature1))) %>%
DT::datatable(options = opt)
}
else if (!input$average1 & input$median1) {
df_table %>%
group_by(city) %>%
summarise(median = median(!!rlang::sym(input$feature1)))%>%
DT::datatable(options = opt)
}
else if (input$average1 & input$median1) {
df_table %>%
group_by(city) %>%
summarise(average = mean(!!rlang::sym(input$feature1)),
median = median(!!rlang::sym(input$feature1)))%>%
DT::datatable(options = opt)
}
})
# ******************************************************************************************
# **************************************** TAB 2 *****************************************
# ******************************************************************************************
df_filtered2 <- reactive({
df <- load_data(input$cities2, input$dates2[1], input$dates2[2] )
return(df)
})
output$plot2 <- renderPlotly({
df_plot <- df_filtered2()
df_plot <- df_plot %>% filter( !is.na(df_plot[input$feature2]) & (!is.na(df_plot[input$dimension2]) ) )
if (input$dimension2 == 'neighbourhood_cleansed') {
top_10_neighborhood <- df_plot %>%
group_by(city) %>%
count(neighbourhood_cleansed) %>%
arrange(city, desc(n)) %>%
top_n(10, n)
df_plot <- df_plot %>%
filter(neighbourhood_cleansed %in% top_10_neighborhood$neighbourhood_cleansed )
}
coeff <- 1
if(input$feature2 == 'availability_30') coeff <- 0.04
else if(input$feature2 == 'price_30') coeff <- 0.5
else if(input$feature2 == 'revenue_30') coeff <- 1
if (input$choicePlot2 == 'boxplot') {
ggplot(df_plot, aes_string(input$dimension2, input$feature2, fill='city')) +
geom_boxplot( outlier.shape = NA) +
scale_y_continuous(limits = quantile(df_plot[input$feature2], c(0.1, 0.9), na.rm = T)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
}
else if (input$choicePlot2 == 'histogram') {
ggplot(df_plot, aes_string(x = input$dimension2, y = input$feature2, fill='city')) +
geom_bar(stat='identity') + ylab(input$feature2) + coord_flip()
}
else if (input$choicePlot2 == 'density') {
ggplot(df_plot[!is.na(df_plot[input$feature2]),], aes_string(input$feature2, fill = 'city')) +
geom_density(alpha = 0.4) + ylab("density") + xlim(0,1000*coeff)
}
else if (input$choicePlot2 == 'proportion'){
ggplot(df_plot, aes_string(x = input$dimension2, fill= 'city')) +
geom_bar(aes_string(x = input$dimension2), stat="count", lwd = 0.8, position = position_dodge(0.9), na.rm = T) +
coord_flip() + xlab(input$dimension2) + ylab("Proportions")
}
})
output$table2 <- renderDataTable({
opt = list(pageLength = 5, searching = F)
df_table <- df_filtered2()
df_table <- df_table %>% filter( !is.na(df_table[input$feature2]) & (!is.na(df_table[input$dimension2]) ) )
if (input$average2 & !input$median2) {
df_table %>%
group_by(city, !!rlang::sym(input$dimension2)) %>%
summarise(average = mean(!!rlang::sym(input$feature2)))%>%
DT::datatable(options = opt)
}
else if (!input$average2 & input$median2) {
df_table %>%
group_by(city, !!rlang::sym(input$dimension2)) %>%
summarise(median = median(!!rlang::sym(input$feature2)))%>%
DT::datatable(options = opt)
}
else if (input$average2 & input$median2) {
df_table %>%
group_by(city, !!rlang::sym(input$dimension2)) %>%
summarise(average = mean(!!rlang::sym(input$feature2)),
median = median(!!rlang::sym(input$feature2)))%>%
DT::datatable(options = opt)
}
})
output$mymap <- renderLeaflet({
df_filtered2() %>%
leaflet() %>%
addTiles() %>%
addMarkers(clusterOptions = markerClusterOptions(),
popup = ~ paste0( tags$b("City: "), city , "</br>",
tags$b("Neighbourhood: "), neighbourhood_cleansed, "</br>",
tags$b("Price: "), price, " $", "</br>",
tags$a(href = listing_url[1], "Link")
),
label = ~htmlEscape(neighbourhood_cleansed)
)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/SimMVN.R
\name{SimMVN}
\alias{SimMVN}
\title{Simulate multivariate normal Mixture}
\usage{
SimMVN(Means, Covs, N, P)
}
\arguments{
\item{Means,}{covariance, Sample size, weights}
}
\description{
...
}
\examples{
#not run
}
\keyword{Wishart}
| /man/SimMVN.Rd | no_license | zoevanhavre/Zmix_devVersion2 | R | false | false | 328 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/SimMVN.R
\name{SimMVN}
\alias{SimMVN}
\title{Simulate multivariate normal Mixture}
\usage{
SimMVN(Means, Covs, N, P)
}
\arguments{
\item{Means,}{covariance, Sample size, weights}
}
\description{
...
}
\examples{
#not run
}
\keyword{Wishart}
|
install.packages("party")
library(party)
# First, clear all previous stuff out of the workspace...
rm(list = ls());
mushdata <- read.csv("mushrooms.csv", header = TRUE)
summary(mushdata)
#analyze from
#Vision(cap / gill/ ring/ veil)
#Smell(odor)
#Habitat(habitat)
#Vision-cap
png(file = "dtree cap_shape.png")
output.tree_vision_cap_shape <- ctree(class~ cap.shape, data = mushdata)
plot(output.tree_vision_cap_shape)
dev.off()
output.tree_tactile_cap <- ctree(class~cap.surface , data = mushdata)
plot(output.tree_tactile_cap)
#insignificant
png(file = "dtree cap_color.png")
output.tree_vision_cap <- ctree(class~cap.color, data = mushdata)
plot(output.tree_vision_cap)
dev.off()
#Vision-gill
png(file = "dtree gill.png")
output.tree_vision_gill <- ctree(class~gill.spacing + gill.attachment + gill.size , data = mushdata)
plot(output.tree_vision_gill)
dev.off()
#too much attributes in gill.color
#Ring
png(file = "dtree ring.png")
output.tree_vision_ring <- ctree(class~ring.type + ring.number, data = mushdata)
plot(output.tree_vision_ring)
dev.off()
#Veil
png(file = "dtree veil.png")
output.tree_vision_veil <- ctree(class~veil.color, data = mushdata)
plot(output.tree_vision_veil)
dev.off()
#Smell
png(file = "dtree odor.png")
output.tree <- ctree(class~ odor, data = mushdata)
plot(output.tree)
dev.off()
#Habitat
png(file = "dtree habitat.png")
output.tree_habitat <- ctree(class~ habitat, data = mushdata)
plot(output.tree_habitat)
dev.off() | /mushroom_decision_tree.R | no_license | mendel462/Mushroom-Analysis | R | false | false | 1,535 | r | install.packages("party")
library(party)
# First, clear all previous stuff out of the workspace...
rm(list = ls());
mushdata <- read.csv("mushrooms.csv", header = TRUE)
summary(mushdata)
#analyze from
#Vision(cap / gill/ ring/ veil)
#Smell(odor)
#Habitat(habitat)
#Vision-cap
png(file = "dtree cap_shape.png")
output.tree_vision_cap_shape <- ctree(class~ cap.shape, data = mushdata)
plot(output.tree_vision_cap_shape)
dev.off()
output.tree_tactile_cap <- ctree(class~cap.surface , data = mushdata)
plot(output.tree_tactile_cap)
#insignificant
png(file = "dtree cap_color.png")
output.tree_vision_cap <- ctree(class~cap.color, data = mushdata)
plot(output.tree_vision_cap)
dev.off()
#Vision-gill
png(file = "dtree gill.png")
output.tree_vision_gill <- ctree(class~gill.spacing + gill.attachment + gill.size , data = mushdata)
plot(output.tree_vision_gill)
dev.off()
#too much attributes in gill.color
#Ring
png(file = "dtree ring.png")
output.tree_vision_ring <- ctree(class~ring.type + ring.number, data = mushdata)
plot(output.tree_vision_ring)
dev.off()
#Veil
png(file = "dtree veil.png")
output.tree_vision_veil <- ctree(class~veil.color, data = mushdata)
plot(output.tree_vision_veil)
dev.off()
#Smell
png(file = "dtree odor.png")
output.tree <- ctree(class~ odor, data = mushdata)
plot(output.tree)
dev.off()
#Habitat
png(file = "dtree habitat.png")
output.tree_habitat <- ctree(class~ habitat, data = mushdata)
plot(output.tree_habitat)
dev.off() |
createXES<- function(file,
traces,
events,
classifiers = NULL,
logattributes = NULL,
caseid_field = NULL,
case_classifier){
# File: The location of the output file
# Traces: a dataframe where each row represents a trace and each column represents
# a trace attribute.
# Events: a dataframe where each row represents an event and each column represents
# an event attribute. (This dataframe also has a column which refers to the
# traceid)
# Classifiers: A list of classifiers. The key represents the name of the classifier
# and the value contains a string vector of the respective event attributes
# Logatrributes: A list of log atributes. The key represents the attribute name and the
# value represents the attribute value. The attribute type is derived from
# the attribute value
# Caseid_field: The columnname which acts as traceid in the events dataframe.
# DEFAULT: The first column of the events dataframe is used as traceID.
##################HELPER FUNCTIONS############################
add <- function(text){
xml_i <<- xml_i + 1
xml[xml_i] <<- text
#xml
#cat(text, file, append=TRUE)
}
addAttribute <- function(datatype, key, value){
if(is.null(value)){
return()
}
if(datatype == "date"){
value = strftime(value,format="%Y-%m-%dT%H:%M:%S.000+00:00")
}
add(paste0('<',datatype,' key="',key,'" value="',value,'"/>'))
}
addExtensions <- function(attrs){
#add concept extension if any of the event attributes start with the "concept:" prefix
if(any(grepl("^concept:", names(attrs)))){
add('<extension name="Concept" prefix="concept" uri="http://www.xes-standard.org/concept.xesext" />')
}
#add time extension if any of the event attributes start with the "time:" prefix
if(any(grepl("^time:", names(attrs)))){
add('<extension name="Time" prefix="concept" uri="http://www.xes-standard.org/time.xesext" />')
}
}
addGlobals <- function(data, attrs, scope){
temp <- sapply(data[,names(attrs)],function(x){all(!is.na(x))})
globals <- names(temp[temp==TRUE])
add(paste0('<global scope="',scope,'">'))
for(key in globals){
datatype <- attrs[key]
addAttribute(datatype, key, defaultvalues[[datatype]])
}
add('</global>')
}
addClassifiers <- function(){
if(is.null(classifiers)){
return()
}
for(name in names(classifiers)){
add(paste0('<classifier name="',name,'" keys="', paste(classifiers[[name]], collapse=" "),'"/>'))
}
}
addLogAttributes <- function(){
if(is.null(logattributes)){
return()
}
for(name in names(logattributes)){
value = logattributes[[name]]
datatype = attribute_types[class(value)[1]]
if(datatype == "date"){
value = strftime(value,format="%Y-%m-%dT%H:%M:%S.000+00:00")
}
add(paste0('<', datatype,' key="',name,'" value="', value,'"/>'))
}
}
addTraces <- function(){
# apply(traces, 1, addTrace)
total = dim(traces)[1]
pb <- txtProgressBar(min=0, max = total, style = 3)
for(i in 1:total){
trace <- traces[i,,drop=FALSE]
addTrace(trace)
setTxtProgressBar(pb, i)
}
}
addTrace <- function(trace){
add('<trace>')
for(name in names(trace_attrs)){
addAttribute(trace_attrs[name], name, trace[name])
}
trace_id <- as.character(trace[[trace_caseid_field]])
addEvents(events_per_trace[[trace_id]])
add('</trace>')
}
addEvents <- function(trace_events){
apply(trace_events, 1, addEvent)
}
addEvent <- function(event){
add('<event>')
for(name in names(event_attrs)){
addAttribute(event_attrs[name], name, event[name])
}
add('</event>')
}
detectAttrType <- function(key, data){
detected = class(data[[key]])[1]
attribute_types[[detected]]
}
get_attr_info<- function(data){
sapply(colnames(data), detectAttrType, data)
}
############PRELIMINARIES##################
defaultvalues <- list("string"="default",
"int"="0",
"date"="1970-01-01T00:00:00.000+00:00",
"boolean" = "false")
attribute_types <- list("factor"="string",
"POSIXct"="date",
"integer"="int",
"ordered"="string",
"character"="string",
"logical" = "boolean")
trace_attrs <- get_attr_info(traces)
if(is.null(caseid_field)){
event_attrs <- get_attr_info(events[,-1])
}
else{
event_attrs <- get_attr_info(events[,names(events)!=caseid_field])
}
if(is.null(caseid_field)){
events_caseid_field <- colnames(events)[1]
}
else { #adjustment Gert
events_caseid_field <- caseid_field
}
events_per_trace <- split(events,events[events_caseid_field])
if("concept:name" %in% names(trace_attrs)){
trace_caseid_field <- "concept:name"
}
else if(events_caseid_field %in% names(trace_attrs)){
trace_caseid_field <- events_caseid_field
}
else{
trace_caseid_field <- colnames(traces)[1]
}
n_event_attrs = length(event_attrs)
n_trace_attrs = length(trace_attrs)
n_classifiers = length(classifiers)
n_logattributes = length(logattributes)
n_traces = dim(traces)[1]
n_events = dim(events)[1]
maxsize = 4+ 3*n_event_attrs + 3*n_trace_attrs + n_classifiers + n_logattributes + n_traces*(2+n_trace_attrs)+n_events*(2+n_event_attrs)
xml <- rep(NA,maxsize)
xml_i = 1
############GENERATE XML###################
#fileConn <- file(file, open="at")
add('<?xml version="1.0"?>')
#cat('<?xml version="1.0"?>', file=file)
add('<log xmlns="http://www.xes-standard.org/" xes.version="2.0">')
addExtensions(c(trace_attrs, event_attrs))
addGlobals(traces, trace_attrs, "trace")
addGlobals(events, event_attrs, "event")
addClassifiers()
addLogAttributes()
addTraces()
add('</log>')
xml <- na.omit(xml)
xml <- str_replace_all(xml, case_classifier, "concept:name")
writeLines(xml, file)
#close(fileConn)
}
| /R/XES.r | no_license | cran/xesreadR | R | false | false | 6,434 | r |
createXES<- function(file,
traces,
events,
classifiers = NULL,
logattributes = NULL,
caseid_field = NULL,
case_classifier){
# File: The location of the output file
# Traces: a dataframe where each row represents a trace and each column represents
# a trace attribute.
# Events: a dataframe where each row represents an event and each column represents
# an event attribute. (This dataframe also has a column which refers to the
# traceid)
# Classifiers: A list of classifiers. The key represents the name of the classifier
# and the value contains a string vector of the respective event attributes
# Logatrributes: A list of log atributes. The key represents the attribute name and the
# value represents the attribute value. The attribute type is derived from
# the attribute value
# Caseid_field: The columnname which acts as traceid in the events dataframe.
# DEFAULT: The first column of the events dataframe is used as traceID.
##################HELPER FUNCTIONS############################
add <- function(text){
xml_i <<- xml_i + 1
xml[xml_i] <<- text
#xml
#cat(text, file, append=TRUE)
}
addAttribute <- function(datatype, key, value){
if(is.null(value)){
return()
}
if(datatype == "date"){
value = strftime(value,format="%Y-%m-%dT%H:%M:%S.000+00:00")
}
add(paste0('<',datatype,' key="',key,'" value="',value,'"/>'))
}
addExtensions <- function(attrs){
#add concept extension if any of the event attributes start with the "concept:" prefix
if(any(grepl("^concept:", names(attrs)))){
add('<extension name="Concept" prefix="concept" uri="http://www.xes-standard.org/concept.xesext" />')
}
#add time extension if any of the event attributes start with the "time:" prefix
if(any(grepl("^time:", names(attrs)))){
add('<extension name="Time" prefix="concept" uri="http://www.xes-standard.org/time.xesext" />')
}
}
addGlobals <- function(data, attrs, scope){
temp <- sapply(data[,names(attrs)],function(x){all(!is.na(x))})
globals <- names(temp[temp==TRUE])
add(paste0('<global scope="',scope,'">'))
for(key in globals){
datatype <- attrs[key]
addAttribute(datatype, key, defaultvalues[[datatype]])
}
add('</global>')
}
addClassifiers <- function(){
if(is.null(classifiers)){
return()
}
for(name in names(classifiers)){
add(paste0('<classifier name="',name,'" keys="', paste(classifiers[[name]], collapse=" "),'"/>'))
}
}
addLogAttributes <- function(){
if(is.null(logattributes)){
return()
}
for(name in names(logattributes)){
value = logattributes[[name]]
datatype = attribute_types[class(value)[1]]
if(datatype == "date"){
value = strftime(value,format="%Y-%m-%dT%H:%M:%S.000+00:00")
}
add(paste0('<', datatype,' key="',name,'" value="', value,'"/>'))
}
}
addTraces <- function(){
# apply(traces, 1, addTrace)
total = dim(traces)[1]
pb <- txtProgressBar(min=0, max = total, style = 3)
for(i in 1:total){
trace <- traces[i,,drop=FALSE]
addTrace(trace)
setTxtProgressBar(pb, i)
}
}
addTrace <- function(trace){
add('<trace>')
for(name in names(trace_attrs)){
addAttribute(trace_attrs[name], name, trace[name])
}
trace_id <- as.character(trace[[trace_caseid_field]])
addEvents(events_per_trace[[trace_id]])
add('</trace>')
}
addEvents <- function(trace_events){
apply(trace_events, 1, addEvent)
}
addEvent <- function(event){
add('<event>')
for(name in names(event_attrs)){
addAttribute(event_attrs[name], name, event[name])
}
add('</event>')
}
detectAttrType <- function(key, data){
detected = class(data[[key]])[1]
attribute_types[[detected]]
}
get_attr_info<- function(data){
sapply(colnames(data), detectAttrType, data)
}
############PRELIMINARIES##################
defaultvalues <- list("string"="default",
"int"="0",
"date"="1970-01-01T00:00:00.000+00:00",
"boolean" = "false")
attribute_types <- list("factor"="string",
"POSIXct"="date",
"integer"="int",
"ordered"="string",
"character"="string",
"logical" = "boolean")
trace_attrs <- get_attr_info(traces)
if(is.null(caseid_field)){
event_attrs <- get_attr_info(events[,-1])
}
else{
event_attrs <- get_attr_info(events[,names(events)!=caseid_field])
}
if(is.null(caseid_field)){
events_caseid_field <- colnames(events)[1]
}
else { #adjustment Gert
events_caseid_field <- caseid_field
}
events_per_trace <- split(events,events[events_caseid_field])
if("concept:name" %in% names(trace_attrs)){
trace_caseid_field <- "concept:name"
}
else if(events_caseid_field %in% names(trace_attrs)){
trace_caseid_field <- events_caseid_field
}
else{
trace_caseid_field <- colnames(traces)[1]
}
n_event_attrs = length(event_attrs)
n_trace_attrs = length(trace_attrs)
n_classifiers = length(classifiers)
n_logattributes = length(logattributes)
n_traces = dim(traces)[1]
n_events = dim(events)[1]
maxsize = 4+ 3*n_event_attrs + 3*n_trace_attrs + n_classifiers + n_logattributes + n_traces*(2+n_trace_attrs)+n_events*(2+n_event_attrs)
xml <- rep(NA,maxsize)
xml_i = 1
############GENERATE XML###################
#fileConn <- file(file, open="at")
add('<?xml version="1.0"?>')
#cat('<?xml version="1.0"?>', file=file)
add('<log xmlns="http://www.xes-standard.org/" xes.version="2.0">')
addExtensions(c(trace_attrs, event_attrs))
addGlobals(traces, trace_attrs, "trace")
addGlobals(events, event_attrs, "event")
addClassifiers()
addLogAttributes()
addTraces()
add('</log>')
xml <- na.omit(xml)
xml <- str_replace_all(xml, case_classifier, "concept:name")
writeLines(xml, file)
#close(fileConn)
}
|
event.type='se'
vals.m<-rep(0,length(tissue.sets))
vals.f<-rep(0,length(tissue.sets))
num.genes<-rep(0,length(tissue.sets))
de.genes.to.fem<-rep(0,length(tissue.sets))
de.genes.to.mal<-rep(0,length(tissue.sets))
for (tissue.set in lapply(tissue.sets,paste,collapse='.'))
{
for (tissue in unlist(strsplit(tissue.set,split = '\\.')))
{
if (!file.exists(paste0('/Users/karleg/Dimorph/gene_expression/all_genes/DE_result_',tissue,'.txt')))
next
de.tab<-read.table(paste0('/Users/karleg/Dimorph/gene_expression/all_genes/DE_result_',tissue,'.txt'))
# mart <- useMart(biomart = "ensembl", dataset = "hsapiens_gene_ensembl",host="www.ensembl.org")
# gene.loc<-getBM(attributes = c("hgnc_symbol","chromosome_name"),
# filters = "hgnc_symbol",
# values = gsub('\\..*','',rownames(de.tab)),
# mart = mart)
# y.genes<-gene.loc$hgnc_symbol[gene.loc$chromosome_name=='Y']
# de.tab<-de.tab[!(rownames(de.tab) %in% y.genes),]
de.genes.to.fem[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]<-de.genes.to.fem[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]+sum(de.tab$logFC>=log2(1.5) & de.tab$adj.P.Val<=0.05)
names(de.genes.to.fem)[which(unlist(lapply(tissue.sets,paste,collapse='.')==tissue.set))]<-tissue.set
de.genes.to.mal[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]<-de.genes.to.mal[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]+sum(de.tab$logFC<=(-log2(1.5)) & de.tab$adj.P.Val<=0.05)
names(de.genes.to.mal)[which(unlist(lapply(tissue.sets,paste,collapse='.')==tissue.set))]<-tissue.set
}
if (!file.exists(paste('/Users/karleg/Dimorph/other/',tissue.set,event.type,'.txt',sep='')))
{
vals.m[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-NA
names(vals.m)[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-tissue.set
vals.f[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-NA
names(vals.f)[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-tissue.set
num.genes[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-NA
names(num.genes)[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-tissue.set
de.genes.to.fem[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-NA
names(de.genes.to.fem)[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-tissue.set
de.genes.to.mal[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-NA
names(de.genes.to.mal)[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-tissue.set
next
}
events.tab<-read.table(paste('/Users/karleg/Dimorph/other/',tissue.set,event.type,'.txt',sep=''))
anno.tab<-read.table(paste0('/Users/karleg/Dimorph/fromGTF.',toupper(event.type),'.txt'),header = T)
anno.tab<-anno.tab[anno.tab$ID %in% rownames(events.tab[abs(events.tab$logFC)>=log2(1.5) & events.tab$adj.P.Val<=0.05,]),]
vals.m[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]<-nrow(events.tab[events.tab$logFC>=log2(1.5) & events.tab$adj.P.Val<=0.05,])
names(vals.m)[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]<-tissue.set
vals.f[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]<-nrow(events.tab[events.tab$logFC<=(-log2(1.5)) & events.tab$adj.P.Val<=0.05,])
names(vals.f)[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]<-tissue.set
num.genes[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]<-ifelse(nrow(anno.tab)>0,length(unique(anno.tab$geneSymbol)),0)
names(num.genes)[which(unlist(lapply(tissue.sets,paste,collapse='.')==tissue.set))]<-tissue.set
}
labels<-read.csv('/Users/karleg/Dimorph/labels.tsv',sep='\t')
library(ggplot2)
df <- data.frame(Log2DEGenes=log2(as.numeric(c(de.genes.to.mal,de.genes.to.fem))),Gender=c(rep('Male',length(de.genes.to.mal)),rep('Female',length(de.genes.to.fem))),Tissue=rep(labels[match(labels[,1],names(de.genes.to.fem)),2],2))
df<-df[!is.na(df$Log2DEGenes),]
df<-df[df$Log2DEGenes>0,]
ggplot(df, aes(fill=Gender, y=Log2DEGenes, x=Tissue)) +
geom_bar(position="dodge", stat="identity") + theme(axis.text.x=element_text(angle = -90, hjust = 0))+ scale_fill_manual("legend", values = c("Male" = "Blue", "Female" = "Red"))+ylab(bquote('Number of DE genes ('* ~Log[2]*')'))+ theme(axis.text.x=element_text(vjust=0.2))
df <- data.frame(Log2DEGenes=log2(as.numeric(c(vals.m,vals.f))),Gender=c(rep('Male',length(vals.m)),rep('Female',length(vals.f))),Tissue=rep(labels[match(labels[,1],names(vals.f)),2],2))
df<-df[!is.na(df$Log2DEGenes),]
df<-df[df$Log2DEGenes>0,]
ggplot(df, aes(fill=Gender, y=Log2DEGenes, x=Tissue)) +
geom_bar(position="dodge", stat="identity") + theme(axis.text.x=element_text(angle = -90, hjust = 0))+ scale_fill_manual("legend", values = c("Male" = "Blue", "Female" = "Red"))+ylab(bquote('Number of ASE events ('* ~Log[2]*')'))+ theme(axis.text.x=element_text(vjust=0.2))
save.image('/Users/karleg/Dimorph/RDATA/figureS1.RData')
#Before running the following, use the Session menu to set working directory to source file location
load('figureS1.RData')
library(ggplot2)
library(ggsci)
p<- ggplot(df, aes(fill=Gender, y=Log2DEGenes, x=Tissue)) + geom_bar(position="dodge", stat="identity") + theme_minimal() + scale_fill_npg()
p<-p+theme(axis.text.x=element_text(angle = -90, hjust = 0,vjust=0.2),
axis.title.x = element_blank(),
text = element_text(size=20),
axis.text = element_text(size=20, hjust=0.5),
axis.title.y = element_text(size=20),
legend.title = element_blank(),
legend.position="top")+
#scale_fill_manual("legend", values = c("Male", "Female"))+
ylab(bquote('Number of differentially expressed genes ('*log[2]*')'))
p
| /dimorphAS/figures/supplementalFigures/figureS1.R | permissive | cgpu/sbas-nf | R | false | false | 5,995 | r | event.type='se'
vals.m<-rep(0,length(tissue.sets))
vals.f<-rep(0,length(tissue.sets))
num.genes<-rep(0,length(tissue.sets))
de.genes.to.fem<-rep(0,length(tissue.sets))
de.genes.to.mal<-rep(0,length(tissue.sets))
for (tissue.set in lapply(tissue.sets,paste,collapse='.'))
{
for (tissue in unlist(strsplit(tissue.set,split = '\\.')))
{
if (!file.exists(paste0('/Users/karleg/Dimorph/gene_expression/all_genes/DE_result_',tissue,'.txt')))
next
de.tab<-read.table(paste0('/Users/karleg/Dimorph/gene_expression/all_genes/DE_result_',tissue,'.txt'))
# mart <- useMart(biomart = "ensembl", dataset = "hsapiens_gene_ensembl",host="www.ensembl.org")
# gene.loc<-getBM(attributes = c("hgnc_symbol","chromosome_name"),
# filters = "hgnc_symbol",
# values = gsub('\\..*','',rownames(de.tab)),
# mart = mart)
# y.genes<-gene.loc$hgnc_symbol[gene.loc$chromosome_name=='Y']
# de.tab<-de.tab[!(rownames(de.tab) %in% y.genes),]
de.genes.to.fem[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]<-de.genes.to.fem[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]+sum(de.tab$logFC>=log2(1.5) & de.tab$adj.P.Val<=0.05)
names(de.genes.to.fem)[which(unlist(lapply(tissue.sets,paste,collapse='.')==tissue.set))]<-tissue.set
de.genes.to.mal[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]<-de.genes.to.mal[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]+sum(de.tab$logFC<=(-log2(1.5)) & de.tab$adj.P.Val<=0.05)
names(de.genes.to.mal)[which(unlist(lapply(tissue.sets,paste,collapse='.')==tissue.set))]<-tissue.set
}
if (!file.exists(paste('/Users/karleg/Dimorph/other/',tissue.set,event.type,'.txt',sep='')))
{
vals.m[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-NA
names(vals.m)[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-tissue.set
vals.f[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-NA
names(vals.f)[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-tissue.set
num.genes[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-NA
names(num.genes)[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-tissue.set
de.genes.to.fem[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-NA
names(de.genes.to.fem)[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-tissue.set
de.genes.to.mal[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-NA
names(de.genes.to.mal)[which(lapply(tissue.sets,paste,collapse='.')==tissue.set)]<-tissue.set
next
}
events.tab<-read.table(paste('/Users/karleg/Dimorph/other/',tissue.set,event.type,'.txt',sep=''))
anno.tab<-read.table(paste0('/Users/karleg/Dimorph/fromGTF.',toupper(event.type),'.txt'),header = T)
anno.tab<-anno.tab[anno.tab$ID %in% rownames(events.tab[abs(events.tab$logFC)>=log2(1.5) & events.tab$adj.P.Val<=0.05,]),]
vals.m[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]<-nrow(events.tab[events.tab$logFC>=log2(1.5) & events.tab$adj.P.Val<=0.05,])
names(vals.m)[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]<-tissue.set
vals.f[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]<-nrow(events.tab[events.tab$logFC<=(-log2(1.5)) & events.tab$adj.P.Val<=0.05,])
names(vals.f)[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]<-tissue.set
num.genes[which(unlist(lapply(tissue.sets,paste,collapse='.'))==tissue.set)]<-ifelse(nrow(anno.tab)>0,length(unique(anno.tab$geneSymbol)),0)
names(num.genes)[which(unlist(lapply(tissue.sets,paste,collapse='.')==tissue.set))]<-tissue.set
}
labels<-read.csv('/Users/karleg/Dimorph/labels.tsv',sep='\t')
library(ggplot2)
df <- data.frame(Log2DEGenes=log2(as.numeric(c(de.genes.to.mal,de.genes.to.fem))),Gender=c(rep('Male',length(de.genes.to.mal)),rep('Female',length(de.genes.to.fem))),Tissue=rep(labels[match(labels[,1],names(de.genes.to.fem)),2],2))
df<-df[!is.na(df$Log2DEGenes),]
df<-df[df$Log2DEGenes>0,]
ggplot(df, aes(fill=Gender, y=Log2DEGenes, x=Tissue)) +
geom_bar(position="dodge", stat="identity") + theme(axis.text.x=element_text(angle = -90, hjust = 0))+ scale_fill_manual("legend", values = c("Male" = "Blue", "Female" = "Red"))+ylab(bquote('Number of DE genes ('* ~Log[2]*')'))+ theme(axis.text.x=element_text(vjust=0.2))
df <- data.frame(Log2DEGenes=log2(as.numeric(c(vals.m,vals.f))),Gender=c(rep('Male',length(vals.m)),rep('Female',length(vals.f))),Tissue=rep(labels[match(labels[,1],names(vals.f)),2],2))
df<-df[!is.na(df$Log2DEGenes),]
df<-df[df$Log2DEGenes>0,]
ggplot(df, aes(fill=Gender, y=Log2DEGenes, x=Tissue)) +
geom_bar(position="dodge", stat="identity") + theme(axis.text.x=element_text(angle = -90, hjust = 0))+ scale_fill_manual("legend", values = c("Male" = "Blue", "Female" = "Red"))+ylab(bquote('Number of ASE events ('* ~Log[2]*')'))+ theme(axis.text.x=element_text(vjust=0.2))
save.image('/Users/karleg/Dimorph/RDATA/figureS1.RData')
#Before running the following, use the Session menu to set working directory to source file location
load('figureS1.RData')
library(ggplot2)
library(ggsci)
p<- ggplot(df, aes(fill=Gender, y=Log2DEGenes, x=Tissue)) + geom_bar(position="dodge", stat="identity") + theme_minimal() + scale_fill_npg()
p<-p+theme(axis.text.x=element_text(angle = -90, hjust = 0,vjust=0.2),
axis.title.x = element_blank(),
text = element_text(size=20),
axis.text = element_text(size=20, hjust=0.5),
axis.title.y = element_text(size=20),
legend.title = element_blank(),
legend.position="top")+
#scale_fill_manual("legend", values = c("Male", "Female"))+
ylab(bquote('Number of differentially expressed genes ('*log[2]*')'))
p
|
rm(list = ls())
source('~/CS6957/hw4/GP.r')
#plot.new();
#lamda = 0.1;
#plot(x,y)
eps = 1e-5;
x = seq(-5.0, 5.0, by=0.01);
y = drawsample(x, 0.01, eps);
plot(x,y,type='l', lwd=3,col="green",
ylim =c(-3,3),
main=c("Gaussian process samples"))
y = drawsample(x, 0.01, eps);
lines(x,y,col="red",lwd=3)
y = drawsample(x, 1, eps);
lines(x,y,col="blue",lwd=3)
y = drawsample(x, 10, eps);
lines(x,y,col="black",lwd=3)
#y = drawsample(x, 100, eps);
#lines(x,y,col="magenta",lwd=3)
legend('bottomright',
c(expression(paste(lambda,"= 0.01")),
expression(paste(lambda,"= 0.1")),
expression(paste(lambda,"= 1")),
expression(paste(lambda,"= 10"))),
col=c("green","red","blue","black"),lwd=3,cex=0.6)
#dev.copy2pdf(file = "p1.pdf") | /hw4/p1.r | no_license | prateep03/CS6957 | R | false | false | 770 | r | rm(list = ls())
source('~/CS6957/hw4/GP.r')
#plot.new();
#lamda = 0.1;
#plot(x,y)
eps = 1e-5;
x = seq(-5.0, 5.0, by=0.01);
y = drawsample(x, 0.01, eps);
plot(x,y,type='l', lwd=3,col="green",
ylim =c(-3,3),
main=c("Gaussian process samples"))
y = drawsample(x, 0.01, eps);
lines(x,y,col="red",lwd=3)
y = drawsample(x, 1, eps);
lines(x,y,col="blue",lwd=3)
y = drawsample(x, 10, eps);
lines(x,y,col="black",lwd=3)
#y = drawsample(x, 100, eps);
#lines(x,y,col="magenta",lwd=3)
legend('bottomright',
c(expression(paste(lambda,"= 0.01")),
expression(paste(lambda,"= 0.1")),
expression(paste(lambda,"= 1")),
expression(paste(lambda,"= 10"))),
col=c("green","red","blue","black"),lwd=3,cex=0.6)
#dev.copy2pdf(file = "p1.pdf") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lambda.r
\name{lambda}
\alias{lambda}
\title{Lambda syntax for array iteration}
\usage{
lambda(fml, along, group = c(), simplify = TRUE, envir = parent.frame())
}
\arguments{
\item{fml}{A call prefixed with a tilde}
\item{along}{A named vector which objects to subset (eg: c(x=1))}
\item{group}{Not implemented}
\item{simplify}{Return array instead of index+result if scalar}
\item{envir}{Environment where variables can be found}
}
\description{
Lambda syntax for array iteration
}
| /man/lambda.Rd | permissive | haimeh/narray | R | false | true | 565 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lambda.r
\name{lambda}
\alias{lambda}
\title{Lambda syntax for array iteration}
\usage{
lambda(fml, along, group = c(), simplify = TRUE, envir = parent.frame())
}
\arguments{
\item{fml}{A call prefixed with a tilde}
\item{along}{A named vector which objects to subset (eg: c(x=1))}
\item{group}{Not implemented}
\item{simplify}{Return array instead of index+result if scalar}
\item{envir}{Environment where variables can be found}
}
\description{
Lambda syntax for array iteration
}
|
\name{wblr.fit}
\alias{wblr.fit}
\title{
Add Fit Distributions to \code{wblr} Objects
}
\description{
This function fits probability distributions to \code{wblr} objects.
}
\usage{wblr.fit(x, modify.by.t0=FALSE, \dots)}
\arguments{
\item{x}{
Object of class \code{"wblr"}.
}
\item{modify.by.t0}{
A logical value to signifying whether to revise object data by subtraction of the "t0" (translation) parameter of a 3-parameter fit.
A value of TRUE generates a linearized view of the fit on its base distribution canvas. It is recommended that
the resulting object have an altered name perhaps adding a ".3p" suffix to the original wblr object to preserve original data.
}
\item{\dots}{
Options for fitting the (life-)time observations, and for plotting the results.
}
}
\details{
This function calculates fits for the (life-)time observations in the
\code{wblr} object and adds them to the object alongside any
pre-existing fits.
Fitting options are passed with the \code{dist} and \code{method.fit}
arguments:
\describe{
\item{\code{dist}}{
A character string with the target distribution for fitting.
Possible values are \code{"weibull"}, \code{"weibull2p"},
\code{"weibull3p"} (three parameter Weibull), \code{"lognormal"}
, \code{"lognormal2p"}or \code{"lognormal3p"}.
Defaults to \code{"weibull"}.
}
\item{\code{in.legend}}{
Logical value controlling the inclusion of various elements in
the legend.
If \code{in.legend=FALSE} is passed,
the resulting fit calculations will be omitted from the legend,
leaving only observation summary data.
Defaults to \code{TRUE}.
}
\item{\code{method.fit}}{
A vector of class \code{"character"} with fitting options.
Defaults to \code{"rr-xony"}.
\itemize{
\item \code{"rr"}: Rank Regression (RR). Depending on the method for
calculating probability plot positions chosen during the creation of the
\code{wblr} object (see option \code{pp} and
function \code{\link{wblr}}), this can
either be "exact median rank regression" or
"Benard's approximate median rank regression".
If this method is used then it is mandatory to additionally specify
either X-on-Y or Y-on-X regression.
\item \code{"xony"},\code{"yonx"}: Differentiate between X-on-Y and Y-on-X
regression, respectively. For rank regression in lifetime analysis,
it is best practice to use the X values ((life-)time observations)
as the response variables whose horizontal distance to the fit line
must be minimized, and the Y values (unreliabilities) as the
explanatory variable.
\item \code{"mle"}: Maximum Likelihood Estimation (MLE), using
many functions of the \pkg{debias} package.
\item \code{"mle-rba"}: Maximum Likelihood Estimation with Reduced Bias Adjustment
as popularized by Abernethy based on the median bias of MLE fitted distributions.
\item \code{"mle-unbias"}: Maximum Likelihood Estimation with bias adjustment
as popularized by Reliasoft software based on the mean bias of MLE fitted distributions.
}
}
Additionally, one can pass any options available from \code{options.wblr},
such as \code{col} or \code{is.plot.legend}. The graphical options
will be used when plotting the (life-)time observations using \code{plot.wblr}.
Subsequent calls to \code{wblr.conf} will inherit these options.
Currently, there is no graceful error recovery after attempting to fit
lifetime data including negative time observations, for example
\code{wblr.fit(wblr(-5:10)).}
}
}
\value{
The function returns its argument object \code{x}, extended with the
calculated fit and the optional graphical and calculation arguments as
provided to the function.
}
\references{
William Q. Meeker and Luis A. Escobar, (1998) "Statistical Methods for Reliability Data", Wiley-Interscience, New York
Robert B. Abernethy, (2008) "The New Weibull Handbook, Fifth Edition"
John I. McCool, (2012) "Using the Weibull Distribution: Reliability, Modeling and Inference"
Marie Laure Delignette-Muller, Christophe Dutang (2015). "fitdistrplus: An R Package for Fitting Distributions".
Journal of Statistical Software, 64(4), 1-34. URL http://www.jstatsoft.org/v64/i04/.
}
| /man/wblr.fit.Rd | no_license | cran/WeibullR | R | false | false | 4,956 | rd | \name{wblr.fit}
\alias{wblr.fit}
\title{
Add Fit Distributions to \code{wblr} Objects
}
\description{
This function fits probability distributions to \code{wblr} objects.
}
\usage{wblr.fit(x, modify.by.t0=FALSE, \dots)}
\arguments{
\item{x}{
Object of class \code{"wblr"}.
}
\item{modify.by.t0}{
A logical value to signifying whether to revise object data by subtraction of the "t0" (translation) parameter of a 3-parameter fit.
A value of TRUE generates a linearized view of the fit on its base distribution canvas. It is recommended that
the resulting object have an altered name perhaps adding a ".3p" suffix to the original wblr object to preserve original data.
}
\item{\dots}{
Options for fitting the (life-)time observations, and for plotting the results.
}
}
\details{
This function calculates fits for the (life-)time observations in the
\code{wblr} object and adds them to the object alongside any
pre-existing fits.
Fitting options are passed with the \code{dist} and \code{method.fit}
arguments:
\describe{
\item{\code{dist}}{
A character string with the target distribution for fitting.
Possible values are \code{"weibull"}, \code{"weibull2p"},
\code{"weibull3p"} (three parameter Weibull), \code{"lognormal"}
, \code{"lognormal2p"}or \code{"lognormal3p"}.
Defaults to \code{"weibull"}.
}
\item{\code{in.legend}}{
Logical value controlling the inclusion of various elements in
the legend.
If \code{in.legend=FALSE} is passed,
the resulting fit calculations will be omitted from the legend,
leaving only observation summary data.
Defaults to \code{TRUE}.
}
\item{\code{method.fit}}{
A vector of class \code{"character"} with fitting options.
Defaults to \code{"rr-xony"}.
\itemize{
\item \code{"rr"}: Rank Regression (RR). Depending on the method for
calculating probability plot positions chosen during the creation of the
\code{wblr} object (see option \code{pp} and
function \code{\link{wblr}}), this can
either be "exact median rank regression" or
"Benard's approximate median rank regression".
If this method is used then it is mandatory to additionally specify
either X-on-Y or Y-on-X regression.
\item \code{"xony"},\code{"yonx"}: Differentiate between X-on-Y and Y-on-X
regression, respectively. For rank regression in lifetime analysis,
it is best practice to use the X values ((life-)time observations)
as the response variables whose horizontal distance to the fit line
must be minimized, and the Y values (unreliabilities) as the
explanatory variable.
\item \code{"mle"}: Maximum Likelihood Estimation (MLE), using
many functions of the \pkg{debias} package.
\item \code{"mle-rba"}: Maximum Likelihood Estimation with Reduced Bias Adjustment
as popularized by Abernethy based on the median bias of MLE fitted distributions.
\item \code{"mle-unbias"}: Maximum Likelihood Estimation with bias adjustment
as popularized by Reliasoft software based on the mean bias of MLE fitted distributions.
}
}
Additionally, one can pass any options available from \code{options.wblr},
such as \code{col} or \code{is.plot.legend}. The graphical options
will be used when plotting the (life-)time observations using \code{plot.wblr}.
Subsequent calls to \code{wblr.conf} will inherit these options.
Currently, there is no graceful error recovery after attempting to fit
lifetime data including negative time observations, for example
\code{wblr.fit(wblr(-5:10)).}
}
}
\value{
The function returns its argument object \code{x}, extended with the
calculated fit and the optional graphical and calculation arguments as
provided to the function.
}
\references{
William Q. Meeker and Luis A. Escobar, (1998) "Statistical Methods for Reliability Data", Wiley-Interscience, New York
Robert B. Abernethy, (2008) "The New Weibull Handbook, Fifth Edition"
John I. McCool, (2012) "Using the Weibull Distribution: Reliability, Modeling and Inference"
Marie Laure Delignette-Muller, Christophe Dutang (2015). "fitdistrplus: An R Package for Fitting Distributions".
Journal of Statistical Software, 64(4), 1-34. URL http://www.jstatsoft.org/v64/i04/.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bandwidth_selection_CV.R
\name{get_criterion_CV}
\alias{get_criterion_CV}
\title{Optimisation criterion for bandwidth selection using the Cross Validation method.}
\usage{
get_criterion_CV(Kernel, data, maxEval)
}
\arguments{
\item{Kernel}{A real function. The kernel.}
\item{data}{A double vector of the sample data to use.}
\item{maxEval}{A double vector of length 1. The maximum number of function evaluations when integrating.}
}
\value{
A vectorised single-parameter function. The Cross Validation bandwidth selection
optimisation criterion.
}
\description{
Returns the Cross Validation Criterion for fixed Kernel and data only dependend on h
Minimise it to find the optimal value for h.
}
| /KDE.Rcheck/00_pkg_src/KDE/man/get_criterion_CV.Rd | no_license | CarlaSa/R-Project_KDE | R | false | true | 775 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bandwidth_selection_CV.R
\name{get_criterion_CV}
\alias{get_criterion_CV}
\title{Optimisation criterion for bandwidth selection using the Cross Validation method.}
\usage{
get_criterion_CV(Kernel, data, maxEval)
}
\arguments{
\item{Kernel}{A real function. The kernel.}
\item{data}{A double vector of the sample data to use.}
\item{maxEval}{A double vector of length 1. The maximum number of function evaluations when integrating.}
}
\value{
A vectorised single-parameter function. The Cross Validation bandwidth selection
optimisation criterion.
}
\description{
Returns the Cross Validation Criterion for fixed Kernel and data only dependend on h
Minimise it to find the optimal value for h.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/brainvoyager.R
\name{read.smp.brainvoyager.v3}
\alias{read.smp.brainvoyager.v3}
\title{Read Brainvoyager statistical surface results from v3 SMP file.}
\usage{
read.smp.brainvoyager.v3(filepath)
}
\arguments{
\item{filepath}{character string, path to file in Brainvoyager SMP file format}
}
\value{
named list of file contents
}
\description{
Read Brainvoyager statistical surface results from v3 SMP file.
}
\note{
Do not call this, call \code{read.smp.brainvoyager} instead, which will figure out the version and call the appropriate function.
}
\keyword{internal}
| /man/read.smp.brainvoyager.v3.Rd | permissive | dfsp-spirit/freesurferformats | R | false | true | 645 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/brainvoyager.R
\name{read.smp.brainvoyager.v3}
\alias{read.smp.brainvoyager.v3}
\title{Read Brainvoyager statistical surface results from v3 SMP file.}
\usage{
read.smp.brainvoyager.v3(filepath)
}
\arguments{
\item{filepath}{character string, path to file in Brainvoyager SMP file format}
}
\value{
named list of file contents
}
\description{
Read Brainvoyager statistical surface results from v3 SMP file.
}
\note{
Do not call this, call \code{read.smp.brainvoyager} instead, which will figure out the version and call the appropriate function.
}
\keyword{internal}
|
\name{connectToServerApp}
\alias{connectToServerApp}
\title{connectToServerApp}
\usage{
connectToServerApp(port, timeout)
}
\arguments{
\item{port}{}
\item{timeout}{}
}
\value{
connection
}
\description{
connectToServerApp
}
\author{
Barnet Wagman
}
\keyword{internal}
| /man/connectToServerApp.Rd | no_license | cran/rreval | R | false | false | 283 | rd | \name{connectToServerApp}
\alias{connectToServerApp}
\title{connectToServerApp}
\usage{
connectToServerApp(port, timeout)
}
\arguments{
\item{port}{}
\item{timeout}{}
}
\value{
connection
}
\description{
connectToServerApp
}
\author{
Barnet Wagman
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class_AmGraph.R
\docType{class}
\name{AmGraph-class}
\alias{AmGraph-class}
\title{AmGraph class}
\description{
Creates the visualization of the data in following types:
line, column, step line, smoothed line, ohlc and candlestick.
}
\details{
Run \code{api("AmGraph")} for more details and all avalaible properties.
}
\section{Slots}{
\describe{
\item{\code{balloonText}}{\code{character}.
Balloon text. You can use tags like [[value]], [[description]], [[percents]], [[open]], [[category]]
or any other field name from your data provider. HTML tags can also be used.}
\item{\code{title}}{\code{character}. Graph title.}
\item{\code{type}}{\code{character}.
Type of the graph. Possible values are: "line", "column", "step", "smoothedLine", "candlestick", "ohlc".
XY and Radar charts can only display "line" otherArguments graphs.}
\item{\code{valueField}}{\code{character}.
Name of the value field in your dataProvider.}
\item{\code{listeners}}{\code{"list"} containining the listeners to add to the object.
The list must be named as in the official API. Each element must be a character string.
See examples for details.}
\item{\code{otherProperties}}{\code{"list"}
containing other avalaible properties not yet implemented in the package.}
\item{\code{value}}{\code{numeric}.}
}}
| /man/AmGraph-class.Rd | no_license | datastorm-open/rAmCharts | R | false | true | 1,368 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class_AmGraph.R
\docType{class}
\name{AmGraph-class}
\alias{AmGraph-class}
\title{AmGraph class}
\description{
Creates the visualization of the data in following types:
line, column, step line, smoothed line, ohlc and candlestick.
}
\details{
Run \code{api("AmGraph")} for more details and all avalaible properties.
}
\section{Slots}{
\describe{
\item{\code{balloonText}}{\code{character}.
Balloon text. You can use tags like [[value]], [[description]], [[percents]], [[open]], [[category]]
or any other field name from your data provider. HTML tags can also be used.}
\item{\code{title}}{\code{character}. Graph title.}
\item{\code{type}}{\code{character}.
Type of the graph. Possible values are: "line", "column", "step", "smoothedLine", "candlestick", "ohlc".
XY and Radar charts can only display "line" otherArguments graphs.}
\item{\code{valueField}}{\code{character}.
Name of the value field in your dataProvider.}
\item{\code{listeners}}{\code{"list"} containining the listeners to add to the object.
The list must be named as in the official API. Each element must be a character string.
See examples for details.}
\item{\code{otherProperties}}{\code{"list"}
containing other avalaible properties not yet implemented in the package.}
\item{\code{value}}{\code{numeric}.}
}}
|
%
% Copyright 2007-2018 The OpenMx Project
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
\name{mxFitFunctionML}
\alias{mxFitFunctionML}
\alias{MxFitFunctionML-class}
\alias{print,MxFitFunctionML-method}
\alias{show,MxFitFunctionML-method}
\title{Create MxFitFunctionML Object}
\description{
This function creates a new MxFitFunctionML object.
}
\usage{
mxFitFunctionML(vector = FALSE, rowDiagnostics = FALSE, ..., fellner =
as.logical(NA), verbose=0L, profileOut=c(),
rowwiseParallel=as.logical(NA), jointConditionOn = c("auto", "ordinal", "continuous"))
}
\arguments{
\item{vector}{A logical value indicating whether the objective function result is the likelihood vector.}
\item{rowDiagnostics}{A logical value indicating whether the row-wise
results of the objective function should be returned as an attribute
of the fit function.}
\item{...}{Not used. Forces remaining arguments to be specified by name.}
\item{fellner}{Whether to fully expand the covariance matrix for
maximum flexibility.}
\item{verbose}{Level of diagnostic output}
\item{profileOut}{Character vector naming constant coefficients to
profile out of the likelihood (sometimes known as REML)}
\item{rowwiseParallel}{For raw data only, whether to use OpenMP to parallelize the
evaluation of rows}
\item{jointConditionOn}{The evaluation strategy when both continuous
and ordinal data are present.}
}
\details{
Fit functions are functions for which free parameter values are optimized such that the value of a cost function is minimized. The mxFitFunctionML function computes -2*(log likelihood) of the data given the current values of the free parameters and the expectation function (e.g., \link{mxExpectationNormal} or \link{mxExpectationRAM}) selected for the model.
The 'vector' argument is either TRUE or FALSE, and determines whether the objective function returns a column vector of the likelihoods, or a single -2*(log likelihood) value.
The 'rowDiagnostics' argument is either TRUE or FALSE, and determines whether the row likelihoods are returned as an attribute of the fit function. It is sometimes useful to inspect the likelihoods for outliers, diagnostics, or other anomalies.
When \code{vector=FALSE} and \code{rowDiagnostics=TRUE}, fitfunction can be referenced in the model and included in algebras as a scalar. The row likelihoods are an attribute of the fit function but are not accessible in the model during optimization. The row likelihoods are accessible to the user after the model has been run.
By default, \code{jointConditionOn='auto'} and a heuristic will be used
to select the fastest algorithm. Conditioning the continuous data on
ordinal will be superior when there are relatively few unique ordinal
patterns. Otherwise, conditioning the ordinal data on continuous will
perform better when there are relatively many ordinal patterns.
Usage Notes:
The results of the optimization can be reported using the \link{summary} function, or accessed directly in the 'output' slot of the resulting model (i.e., modelName$output). Components of the output may be referenced using the \link{Extract} functionality.
}
\value{
Returns a new MxFitFunctionML object. One and only one MxFitFunctionML object should be included in each model along with an associated \link{mxExpectationNormal} or \link{mxExpectationRAM} object.
}
\seealso{
Other fit functions:
\code{\link{mxFitFunctionMultigroup}},
\code{\link{mxFitFunctionWLS}}, \code{\link{mxFitFunctionAlgebra}},
\code{\link{mxFitFunctionGREML}}, \code{\link{mxFitFunctionR}},
\code{\link{mxFitFunctionRow}}
More information about the OpenMx package may be found \link[=OpenMx]{here}.
}
\references{
The OpenMx User's guide can be found at http://openmx.ssri.psu.edu/documentation.
}
\examples{
# Create and fit a model using mxMatrix, mxAlgebra, mxExpectationNormal, and mxFitFunctionML
library(OpenMx)
# Simulate some data
x=rnorm(1000, mean=0, sd=1)
y= 0.5*x + rnorm(1000, mean=0, sd=1)
tmpFrame <- data.frame(x, y)
tmpNames <- names(tmpFrame)
# Define the matrices
M <- mxMatrix(type = "Full", nrow = 1, ncol = 2, values=c(0,0),
free=c(TRUE,TRUE), labels=c("Mx", "My"), name = "M")
S <- mxMatrix(type = "Full", nrow = 2, ncol = 2, values=c(1,0,0,1),
free=c(TRUE,FALSE,FALSE,TRUE), labels=c("Vx", NA, NA, "Vy"), name = "S")
A <- mxMatrix(type = "Full", nrow = 2, ncol = 2, values=c(0,1,0,0),
free=c(FALSE,TRUE,FALSE,FALSE), labels=c(NA, "b", NA, NA), name = "A")
I <- mxMatrix(type="Iden", nrow=2, ncol=2, name="I")
# Define the expectation
expCov <- mxAlgebra(solve(I-A) \%*\% S \%*\% t(solve(I-A)), name="expCov")
expFunction <- mxExpectationNormal(covariance="expCov", means="M", dimnames=tmpNames)
# Choose a fit function
fitFunction <- mxFitFunctionML(rowDiagnostics=TRUE)
# also return row likelihoods, even though the fit function
# value is still 1x1
# Define the model
tmpModel <- mxModel(model="exampleModel", M, S, A, I, expCov, expFunction, fitFunction,
mxData(observed=tmpFrame, type="raw"))
# Fit the model and print a summary
tmpModelOut <- mxRun(tmpModel)
summary(tmpModelOut)
fitResOnly <- mxEval(fitfunction, tmpModelOut)
attributes(fitResOnly) <- NULL
fitResOnly
# Look at the row likelihoods alone
fitLikeOnly <- attr(mxEval(fitfunction, tmpModelOut), 'likelihoods')
head(fitLikeOnly)
}
| /man/mxFitFunctionML.Rd | no_license | mileysmiley/OpenMx | R | false | false | 5,940 | rd | %
% Copyright 2007-2018 The OpenMx Project
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
\name{mxFitFunctionML}
\alias{mxFitFunctionML}
\alias{MxFitFunctionML-class}
\alias{print,MxFitFunctionML-method}
\alias{show,MxFitFunctionML-method}
\title{Create MxFitFunctionML Object}
\description{
This function creates a new MxFitFunctionML object.
}
\usage{
mxFitFunctionML(vector = FALSE, rowDiagnostics = FALSE, ..., fellner =
as.logical(NA), verbose=0L, profileOut=c(),
rowwiseParallel=as.logical(NA), jointConditionOn = c("auto", "ordinal", "continuous"))
}
\arguments{
\item{vector}{A logical value indicating whether the objective function result is the likelihood vector.}
\item{rowDiagnostics}{A logical value indicating whether the row-wise
results of the objective function should be returned as an attribute
of the fit function.}
\item{...}{Not used. Forces remaining arguments to be specified by name.}
\item{fellner}{Whether to fully expand the covariance matrix for
maximum flexibility.}
\item{verbose}{Level of diagnostic output}
\item{profileOut}{Character vector naming constant coefficients to
profile out of the likelihood (sometimes known as REML)}
\item{rowwiseParallel}{For raw data only, whether to use OpenMP to parallelize the
evaluation of rows}
\item{jointConditionOn}{The evaluation strategy when both continuous
and ordinal data are present.}
}
\details{
Fit functions are functions for which free parameter values are optimized such that the value of a cost function is minimized. The mxFitFunctionML function computes -2*(log likelihood) of the data given the current values of the free parameters and the expectation function (e.g., \link{mxExpectationNormal} or \link{mxExpectationRAM}) selected for the model.
The 'vector' argument is either TRUE or FALSE, and determines whether the objective function returns a column vector of the likelihoods, or a single -2*(log likelihood) value.
The 'rowDiagnostics' argument is either TRUE or FALSE, and determines whether the row likelihoods are returned as an attribute of the fit function. It is sometimes useful to inspect the likelihoods for outliers, diagnostics, or other anomalies.
When \code{vector=FALSE} and \code{rowDiagnostics=TRUE}, fitfunction can be referenced in the model and included in algebras as a scalar. The row likelihoods are an attribute of the fit function but are not accessible in the model during optimization. The row likelihoods are accessible to the user after the model has been run.
By default, \code{jointConditionOn='auto'} and a heuristic will be used
to select the fastest algorithm. Conditioning the continuous data on
ordinal will be superior when there are relatively few unique ordinal
patterns. Otherwise, conditioning the ordinal data on continuous will
perform better when there are relatively many ordinal patterns.
Usage Notes:
The results of the optimization can be reported using the \link{summary} function, or accessed directly in the 'output' slot of the resulting model (i.e., modelName$output). Components of the output may be referenced using the \link{Extract} functionality.
}
\value{
Returns a new MxFitFunctionML object. One and only one MxFitFunctionML object should be included in each model along with an associated \link{mxExpectationNormal} or \link{mxExpectationRAM} object.
}
\seealso{
Other fit functions:
\code{\link{mxFitFunctionMultigroup}},
\code{\link{mxFitFunctionWLS}}, \code{\link{mxFitFunctionAlgebra}},
\code{\link{mxFitFunctionGREML}}, \code{\link{mxFitFunctionR}},
\code{\link{mxFitFunctionRow}}
More information about the OpenMx package may be found \link[=OpenMx]{here}.
}
\references{
The OpenMx User's guide can be found at http://openmx.ssri.psu.edu/documentation.
}
\examples{
# Create and fit a model using mxMatrix, mxAlgebra, mxExpectationNormal, and mxFitFunctionML
library(OpenMx)
# Simulate some data
x=rnorm(1000, mean=0, sd=1)
y= 0.5*x + rnorm(1000, mean=0, sd=1)
tmpFrame <- data.frame(x, y)
tmpNames <- names(tmpFrame)
# Define the matrices
M <- mxMatrix(type = "Full", nrow = 1, ncol = 2, values=c(0,0),
free=c(TRUE,TRUE), labels=c("Mx", "My"), name = "M")
S <- mxMatrix(type = "Full", nrow = 2, ncol = 2, values=c(1,0,0,1),
free=c(TRUE,FALSE,FALSE,TRUE), labels=c("Vx", NA, NA, "Vy"), name = "S")
A <- mxMatrix(type = "Full", nrow = 2, ncol = 2, values=c(0,1,0,0),
free=c(FALSE,TRUE,FALSE,FALSE), labels=c(NA, "b", NA, NA), name = "A")
I <- mxMatrix(type="Iden", nrow=2, ncol=2, name="I")
# Define the expectation
expCov <- mxAlgebra(solve(I-A) \%*\% S \%*\% t(solve(I-A)), name="expCov")
expFunction <- mxExpectationNormal(covariance="expCov", means="M", dimnames=tmpNames)
# Choose a fit function
fitFunction <- mxFitFunctionML(rowDiagnostics=TRUE)
# also return row likelihoods, even though the fit function
# value is still 1x1
# Define the model
tmpModel <- mxModel(model="exampleModel", M, S, A, I, expCov, expFunction, fitFunction,
mxData(observed=tmpFrame, type="raw"))
# Fit the model and print a summary
tmpModelOut <- mxRun(tmpModel)
summary(tmpModelOut)
fitResOnly <- mxEval(fitfunction, tmpModelOut)
attributes(fitResOnly) <- NULL
fitResOnly
# Look at the row likelihoods alone
fitLikeOnly <- attr(mxEval(fitfunction, tmpModelOut), 'likelihoods')
head(fitLikeOnly)
}
|
library(PwrGSD)
### Name: mystack
### Title: Stack a dataset
### Aliases: mystack
### Keywords: data
### ** Examples
## none as yet
| /data/genthat_extracted_code/PwrGSD/examples/Ch14-mystack.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 141 | r | library(PwrGSD)
### Name: mystack
### Title: Stack a dataset
### Aliases: mystack
### Keywords: data
### ** Examples
## none as yet
|
# Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly. The
# following two functions are used to cache the inverse of a matrix.
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
# makeCacheMatrix creates a list containing a function to
inv<- NULL
# sets inv as undefined for now
sl <- function(y){
x<<-y
inv<<- NULL
}
get <- function()x
setInverse <- function(invVal) {
cachedInv <<- invVal
return(cachedInv)
}
getInverse <- function() {cachedInv
list(set=set, get=get, setInverse=setInverse, getInverse=getInverse)
}
#assessing inv
#creates a list
}
# The next function checks if the matrix has already been computed, if it has, it print the stored value, else it calculates the inverse of the matrix.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x = matrix(), ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
#checking if data is already cached
return(inv)
}
data<-x$get()
inv<- solve(data)
x$setinverse(inv)
#outputting inverse
inv
}
| /cachematrix.R | no_license | rishavdasgupta/ProgrammingAssignment2 | R | false | false | 1,429 | r | # Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly. The
# following two functions are used to cache the inverse of a matrix.
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
# makeCacheMatrix creates a list containing a function to
inv<- NULL
# sets inv as undefined for now
sl <- function(y){
x<<-y
inv<<- NULL
}
get <- function()x
setInverse <- function(invVal) {
cachedInv <<- invVal
return(cachedInv)
}
getInverse <- function() {cachedInv
list(set=set, get=get, setInverse=setInverse, getInverse=getInverse)
}
#assessing inv
#creates a list
}
# The next function checks if the matrix has already been computed, if it has, it print the stored value, else it calculates the inverse of the matrix.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x = matrix(), ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
#checking if data is already cached
return(inv)
}
data<-x$get()
inv<- solve(data)
x$setinverse(inv)
#outputting inverse
inv
}
|
a <- 0
b <- 0
phi <- 1.6180339887498949
a <- floor(runif(3500000)*1000)
run <- function() {
(phi^a - (-phi)^(-a))/sqrt(5)
}
| /microbenchmarks/att/fibonacci.R | no_license | bedatadriven/renjin-benchmarks | R | false | false | 128 | r |
a <- 0
b <- 0
phi <- 1.6180339887498949
a <- floor(runif(3500000)*1000)
run <- function() {
(phi^a - (-phi)^(-a))/sqrt(5)
}
|
## First load the power data from 2007-02-01 to 2007-02-02
pwr <- read.table("household_power_consumption.txt", stringsAsFactors = FALSE,
header = TRUE, sep=";", na.strings = "?")
## Convert dates to Date objects to for subsetting
pwr <- subset(pwr, as.Date(Date, "%d/%m/%Y") == "2007-02-01" |
as.Date(Date, "%d/%m/%Y") == "2007-02-02")
## Convert dates and times to DateTime col
pwr$DateTime <- paste(pwr$Date, pwr$Time)
pwr$DateTime <- strptime(pwr$DateTime, "%d/%m/%Y %H:%M:%S")
## We need to convert cols to numeric to plot them
pwr$Global_active_power <- as.numeric(pwr$Global_active_power)
## Set the format and name for the first plot
png(file = "plot2.png", width = 480, height = 480, units = "px")
with(pwr, plot(DateTime, Global_active_power, xlab = "",
type = "n", ylab = "Global Active Power (kilowatts)"))
with(pwr, lines(DateTime, Global_active_power))
dev.off() | /plot2.R | no_license | adamazoulay/ExData_Plotting1 | R | false | false | 955 | r | ## First load the power data from 2007-02-01 to 2007-02-02
pwr <- read.table("household_power_consumption.txt", stringsAsFactors = FALSE,
header = TRUE, sep=";", na.strings = "?")
## Convert dates to Date objects to for subsetting
pwr <- subset(pwr, as.Date(Date, "%d/%m/%Y") == "2007-02-01" |
as.Date(Date, "%d/%m/%Y") == "2007-02-02")
## Convert dates and times to DateTime col
pwr$DateTime <- paste(pwr$Date, pwr$Time)
pwr$DateTime <- strptime(pwr$DateTime, "%d/%m/%Y %H:%M:%S")
## We need to convert cols to numeric to plot them
pwr$Global_active_power <- as.numeric(pwr$Global_active_power)
## Set the format and name for the first plot
png(file = "plot2.png", width = 480, height = 480, units = "px")
with(pwr, plot(DateTime, Global_active_power, xlab = "",
type = "n", ylab = "Global Active Power (kilowatts)"))
with(pwr, lines(DateTime, Global_active_power))
dev.off() |
# Reverse input string
library(stringr)
str_rev <- function(string){
paste(rev(str_split(string, "")[[1]]), collapse="")
}
gz_unzip <- function(gzf){
fn1 <- gzf
fn2 <- sub('\\.gz$', '', fn1)
con <- gzfile(fn1, 'rb')
out <- file(fn2, 'wb')
while (length(buf <- readBin(con=con, what="raw", n=0x8000)) > 0){
writeBin(buf, out)
}
close(con)
close(out)
} | /NLPUtil.R | no_license | tmyst/R_Functions | R | false | false | 381 | r | # Reverse input string
library(stringr)
str_rev <- function(string){
paste(rev(str_split(string, "")[[1]]), collapse="")
}
gz_unzip <- function(gzf){
fn1 <- gzf
fn2 <- sub('\\.gz$', '', fn1)
con <- gzfile(fn1, 'rb')
out <- file(fn2, 'wb')
while (length(buf <- readBin(con=con, what="raw", n=0x8000)) > 0){
writeBin(buf, out)
}
close(con)
close(out)
} |
# Main script
# load functions to read raw emails
source('source/readEmails.R')
# if spamdata is zipped uncomment
# unzip('spamdata.zip')
# list emails from directory where raw emails are located
# and parse with readMessage (from readEmails.R)
spamDir <- "spamdata"
paths <- list.files(spamDir, recursive = TRUE)
emails <- lapply(paths, readMessage, spamDir)
# save R object if you want
save(emails, file = "emails.rda")
load('emails.rda')
# remove the one bad email
# this one is returned as "NULL" from our function
emails <- emails[-4865]
# this gives a T/F vector indicating spam/no spam
isSpam <- do.call(c, lapply(emails, function(x)
grepl("spam",strsplit(x$path, "/")[[1]][1])))
# small script to list functions in 'spam_fns.R'
# and apply them to get our variables or 'features'
# from the emails
x <- new.env()
source('source/spam_fns.R', local=x)
fns <- ls(envir = x)
source('source/spam_fns.R')
apply.fns <- function(fn, emails)
{
f <- get(fn)
do.call(c, lapply(emails, f))
}
# data frame with results
results <- as.data.frame(do.call(cbind, lapply(fns, apply.fns, emails)))
colnames(results) <- fns
# partition data
part <- sample(1:nrow(results), round(nrow(results)*.1))
train <- results[-part,]
test <- results[part,]
isSpam.train <- isSpam[-part]
isSpam.test <- isSpam[part]
test <- impute.na(test)
train <- impute.na(train)
source('source/KNN.R')
ntest <- nrow(test)
spam.preds <- do.call(c, lapply(1:ntest, k.neighbor, test=test, train=train, isSpam=isSpam.train, k=10))
| /mainScript.R | no_license | ncbernstein/spam_prediction | R | false | false | 1,519 | r | # Main script
# load functions to read raw emails
source('source/readEmails.R')
# if spamdata is zipped uncomment
# unzip('spamdata.zip')
# list emails from directory where raw emails are located
# and parse with readMessage (from readEmails.R)
spamDir <- "spamdata"
paths <- list.files(spamDir, recursive = TRUE)
emails <- lapply(paths, readMessage, spamDir)
# save R object if you want
save(emails, file = "emails.rda")
load('emails.rda')
# remove the one bad email
# this one is returned as "NULL" from our function
emails <- emails[-4865]
# this gives a T/F vector indicating spam/no spam
isSpam <- do.call(c, lapply(emails, function(x)
grepl("spam",strsplit(x$path, "/")[[1]][1])))
# small script to list functions in 'spam_fns.R'
# and apply them to get our variables or 'features'
# from the emails
x <- new.env()
source('source/spam_fns.R', local=x)
fns <- ls(envir = x)
source('source/spam_fns.R')
apply.fns <- function(fn, emails)
{
f <- get(fn)
do.call(c, lapply(emails, f))
}
# data frame with results
results <- as.data.frame(do.call(cbind, lapply(fns, apply.fns, emails)))
colnames(results) <- fns
# partition data
part <- sample(1:nrow(results), round(nrow(results)*.1))
train <- results[-part,]
test <- results[part,]
isSpam.train <- isSpam[-part]
isSpam.test <- isSpam[part]
test <- impute.na(test)
train <- impute.na(train)
source('source/KNN.R')
ntest <- nrow(test)
spam.preds <- do.call(c, lapply(1:ntest, k.neighbor, test=test, train=train, isSpam=isSpam.train, k=10))
|
df1<-(iris)
df<- createDataFrame(df1)
class(df1)
class(df)
head(select(df, df$Sepal_Length, df$Species))
head(filter(df, df$Sepal_Length>5.5))
head(summarize(select(df, df$Sepal_Length, df$Species), filter(df, df$Sepal_Length>5.5)))
head(summarize(groupBy(df, df$Species), mean=mean(df$Sepal_Length), count=n(df$Sepal_Length)))
| /ProblemSets/PS4/PS4b_Hoehne.R | permissive | Life-According-to-Jordan/DScourseS18 | R | false | false | 328 | r | df1<-(iris)
df<- createDataFrame(df1)
class(df1)
class(df)
head(select(df, df$Sepal_Length, df$Species))
head(filter(df, df$Sepal_Length>5.5))
head(summarize(select(df, df$Sepal_Length, df$Species), filter(df, df$Sepal_Length>5.5)))
head(summarize(groupBy(df, df$Species), mean=mean(df$Sepal_Length), count=n(df$Sepal_Length)))
|
##################################################################
# bgtWidth() function #
##################################################################
"bgtWidth" <- function(n, s, p, conf.level = 0.95,
alternative = "two.sided", method = "CP") {
if (any(n <= 3)) {
stop("the number of groups n allowed in calculations must be integers greater than 1")
}
if (any(s < 1)) {
stop("group size s must be specified as integers > 0")
}
if (length(conf.level) != 1 || conf.level < 0 || conf.level > 1) {
stop("conf.level must be a positive number between 0 and 1")
}
if (length(p) != 1 || p > 1 || p < 0) {
stop("true proportion p must be specified as a single number between 0 and 1")
}
method <- match.arg(method, choices = c("CP", "Blaker", "AC",
"score", "Wald", "soc"))
alternative <- match.arg(alternative, choices = c("two.sided",
"less", "greater"))
# calculations:
matnsp <- cbind(n, s, p)
matnsp <- cbind("ns" = matnsp[,1] * matnsp[,2], matnsp)
power <- numeric(length = nrow(matnsp))
bias <- numeric(length = nrow(matnsp))
expCIwidth <- numeric(length = nrow(matnsp))
for (i in 1:length(expCIwidth)) {
expCIwidth[i] <- bgtWidthI(n = matnsp[[i,2]], s = matnsp[[i,3]],
p = matnsp[[i,4]], conf.level = conf.level,
alternative = alternative,
method = method)$expCIWidth
}
return(as.matrix(cbind(matnsp,expCIwidth)))
}
# Brianna Hitt - 02-13-2020
# Changed class from "binWidth" to "gtWidth"
"bgtWidthI" <- function(n, s, p, conf.level = 0.95,
alternative = "two.sided", method = "CP") {
# indicator function for the CI length at a special event
# in one sided case: length is defined as absolute difference between
# estimator and confidence bound
L.Ind <- function(y, n, s, p, conf.level, alternative, method) {
if (method == "Wald") {
int <- bgtWald(y = y, n = n, s = s, conf.level = conf.level,
alternative = alternative)
}
if (method == "Wilson") {
int <- bgtWilson(y = y, n = n, s = s, conf.level = conf.level,
alternative = alternative)
}
if (method == "AC") {
int <- bgtAC(y = y, n = n, s = s, conf.level = conf.level,
alternative = alternative)
}
if (method == "soc") {
int <- bgtSOC(y = y, n = n, s = s, conf.level = conf.level,
alternative = alternative)
}
if (method == "CP") {
int <- bgtCP(y = y, n = n, s = s, conf.level = conf.level,
alternative = alternative)
}
if (method == "Blaker") {
int <- bgtBlaker(y = y, n = n, s = s, conf.level = conf.level)
}
if (alternative == "less") {
CIlength <- int[[2]] - p
}
if (alternative == "greater") {
CIlength <- p - int[[1]]
}
if (alternative == "two.sided") {
CIlength <- int[[2]] - int[[1]]
}
CIlength
}
# Probability of a single event, the group testing density:
bgt.prob <- function(n, y, s, p.tr) {
theta <- 1 - (1 - p.tr)^s
dbinom(x = y, size = n, prob = theta)
}
# calculate this for all possible events:
yvec <- 0:n
Lvec <- numeric(length = length(yvec))
probvec <- numeric(length = length(yvec))
for (i in 1:length(yvec)) {
Lvec[i] <- L.Ind(y = yvec[i], n = n, s = s, p = p, conf.level = conf.level,
alternative = alternative, method = method)
probvec[i] <- bgt.prob(y = yvec[i], n = n, s = s, p.tr = p)
}
expCILength <- sum(Lvec * probvec)
# E(X) = sum(Xi * prob(Xi))
out <- list(expCIWidth = expCILength, alternative = alternative,
n = n, s = s, p = p)
class(out) <- "gtWidth"
return(out)
}
##################################################################
# gtWidth() function #
##################################################################
#' @title Expected width of confidence intervals in group testing
#'
#' @description Calculation of the expected value of the width of
#' confidence intervals for one proportion in group testing. Calculations
#' are available for the confidence interval methods in \code{\link{propCI}}.
#'
#' @param n integer specifying the number of groups. A vector of
#' integers is also allowed.
#' @param s integer specifying the common size of groups. A vector
#' of integers is also allowed.
#' @param p the assumed true proportion of individuals showing
#' the trait to be estimated. A vector is also allowed.
#' @param conf.level the required confidence level of the interval.
#' @param alternative character string specifying the alternative
#' hypothesis, either \kbd{"two.sided"}, \kbd{"less"}, or \kbd{"greater"}.
#' @param method character string specifying the confidence
#' interval method. Available options include those in \code{\link{propCI}}.
#'
#' @details The two-sided (\kbd{alternative="two.sided"}) option calculates the
#' expected width between the lower and upper bound of a two-sided
#' \eqn{conf.level*100} percent confidence interval. See Tebbs & Bilder (2004)
#' for expression. The one-sided (\kbd{alternative="less"} or
#' \kbd{alternative="greater"}) options calculate the expected distance between the
#' one-sided limit and the assumed true proportion \kbd{p} for a one-sided
#' \eqn{conf.level*100} percent confidence interval.
#'
#' @return A matrix containing the columns:
#' \item{ns}{the resulting total number of units, \eqn{n*s}.}
#' \item{n}{the number of groups.}
#' \item{s}{the group size.}
#' \item{p}{the assumed true proportion.}
#' \item{expCIWidth}{the expected value of the confidence
#' interval width as defined under the argument \kbd{alternative}.}
#'
#' @author This function was originally written as \code{bgtWidth} by Frank
#' Schaarschmidt for the \code{binGroup} package. Minor modifications have
#' been made for inclusion of the function in the \code{binGroup2} package.
#'
#' @references
#' \insertRef{Tebbs2004}{binGroup2}
#'
#' @seealso \code{\link{propCI}} for confidence intervals in
#' group testing.
#'
#' @family estimation functions
#'
#' @examples
#' # Examine different group sizes to determine
#' # the shortest expected width.
#' gtWidth(n = 20, s = seq(from = 1, to = 200, by = 10),
#' p = 0.01, alternative = "less", method = "CP")
#'
#' # Calculate the expected width of the confidence
#' # interval with a group size of 1 (individual testing).
#' gtWidth(n = 20, s = 1, p = 0.005, alternative = "less", method = "CP")
gtWidth <- bgtWidth
#
| /R/gtWidth.R | no_license | cran/binGroup2 | R | false | false | 6,975 | r | ##################################################################
# bgtWidth() function #
##################################################################
"bgtWidth" <- function(n, s, p, conf.level = 0.95,
alternative = "two.sided", method = "CP") {
if (any(n <= 3)) {
stop("the number of groups n allowed in calculations must be integers greater than 1")
}
if (any(s < 1)) {
stop("group size s must be specified as integers > 0")
}
if (length(conf.level) != 1 || conf.level < 0 || conf.level > 1) {
stop("conf.level must be a positive number between 0 and 1")
}
if (length(p) != 1 || p > 1 || p < 0) {
stop("true proportion p must be specified as a single number between 0 and 1")
}
method <- match.arg(method, choices = c("CP", "Blaker", "AC",
"score", "Wald", "soc"))
alternative <- match.arg(alternative, choices = c("two.sided",
"less", "greater"))
# calculations:
matnsp <- cbind(n, s, p)
matnsp <- cbind("ns" = matnsp[,1] * matnsp[,2], matnsp)
power <- numeric(length = nrow(matnsp))
bias <- numeric(length = nrow(matnsp))
expCIwidth <- numeric(length = nrow(matnsp))
for (i in 1:length(expCIwidth)) {
expCIwidth[i] <- bgtWidthI(n = matnsp[[i,2]], s = matnsp[[i,3]],
p = matnsp[[i,4]], conf.level = conf.level,
alternative = alternative,
method = method)$expCIWidth
}
return(as.matrix(cbind(matnsp,expCIwidth)))
}
# Brianna Hitt - 02-13-2020
# Changed class from "binWidth" to "gtWidth"
"bgtWidthI" <- function(n, s, p, conf.level = 0.95,
alternative = "two.sided", method = "CP") {
# indicator function for the CI length at a special event
# in one sided case: length is defined as absolute difference between
# estimator and confidence bound
L.Ind <- function(y, n, s, p, conf.level, alternative, method) {
if (method == "Wald") {
int <- bgtWald(y = y, n = n, s = s, conf.level = conf.level,
alternative = alternative)
}
if (method == "Wilson") {
int <- bgtWilson(y = y, n = n, s = s, conf.level = conf.level,
alternative = alternative)
}
if (method == "AC") {
int <- bgtAC(y = y, n = n, s = s, conf.level = conf.level,
alternative = alternative)
}
if (method == "soc") {
int <- bgtSOC(y = y, n = n, s = s, conf.level = conf.level,
alternative = alternative)
}
if (method == "CP") {
int <- bgtCP(y = y, n = n, s = s, conf.level = conf.level,
alternative = alternative)
}
if (method == "Blaker") {
int <- bgtBlaker(y = y, n = n, s = s, conf.level = conf.level)
}
if (alternative == "less") {
CIlength <- int[[2]] - p
}
if (alternative == "greater") {
CIlength <- p - int[[1]]
}
if (alternative == "two.sided") {
CIlength <- int[[2]] - int[[1]]
}
CIlength
}
# Probability of a single event, the group testing density:
bgt.prob <- function(n, y, s, p.tr) {
theta <- 1 - (1 - p.tr)^s
dbinom(x = y, size = n, prob = theta)
}
# calculate this for all possible events:
yvec <- 0:n
Lvec <- numeric(length = length(yvec))
probvec <- numeric(length = length(yvec))
for (i in 1:length(yvec)) {
Lvec[i] <- L.Ind(y = yvec[i], n = n, s = s, p = p, conf.level = conf.level,
alternative = alternative, method = method)
probvec[i] <- bgt.prob(y = yvec[i], n = n, s = s, p.tr = p)
}
expCILength <- sum(Lvec * probvec)
# E(X) = sum(Xi * prob(Xi))
out <- list(expCIWidth = expCILength, alternative = alternative,
n = n, s = s, p = p)
class(out) <- "gtWidth"
return(out)
}
##################################################################
# gtWidth() function #
##################################################################
#' @title Expected width of confidence intervals in group testing
#'
#' @description Calculation of the expected value of the width of
#' confidence intervals for one proportion in group testing. Calculations
#' are available for the confidence interval methods in \code{\link{propCI}}.
#'
#' @param n integer specifying the number of groups. A vector of
#' integers is also allowed.
#' @param s integer specifying the common size of groups. A vector
#' of integers is also allowed.
#' @param p the assumed true proportion of individuals showing
#' the trait to be estimated. A vector is also allowed.
#' @param conf.level the required confidence level of the interval.
#' @param alternative character string specifying the alternative
#' hypothesis, either \kbd{"two.sided"}, \kbd{"less"}, or \kbd{"greater"}.
#' @param method character string specifying the confidence
#' interval method. Available options include those in \code{\link{propCI}}.
#'
#' @details The two-sided (\kbd{alternative="two.sided"}) option calculates the
#' expected width between the lower and upper bound of a two-sided
#' \eqn{conf.level*100} percent confidence interval. See Tebbs & Bilder (2004)
#' for expression. The one-sided (\kbd{alternative="less"} or
#' \kbd{alternative="greater"}) options calculate the expected distance between the
#' one-sided limit and the assumed true proportion \kbd{p} for a one-sided
#' \eqn{conf.level*100} percent confidence interval.
#'
#' @return A matrix containing the columns:
#' \item{ns}{the resulting total number of units, \eqn{n*s}.}
#' \item{n}{the number of groups.}
#' \item{s}{the group size.}
#' \item{p}{the assumed true proportion.}
#' \item{expCIWidth}{the expected value of the confidence
#' interval width as defined under the argument \kbd{alternative}.}
#'
#' @author This function was originally written as \code{bgtWidth} by Frank
#' Schaarschmidt for the \code{binGroup} package. Minor modifications have
#' been made for inclusion of the function in the \code{binGroup2} package.
#'
#' @references
#' \insertRef{Tebbs2004}{binGroup2}
#'
#' @seealso \code{\link{propCI}} for confidence intervals in
#' group testing.
#'
#' @family estimation functions
#'
#' @examples
#' # Examine different group sizes to determine
#' # the shortest expected width.
#' gtWidth(n = 20, s = seq(from = 1, to = 200, by = 10),
#' p = 0.01, alternative = "less", method = "CP")
#'
#' # Calculate the expected width of the confidence
#' # interval with a group size of 1 (individual testing).
#' gtWidth(n = 20, s = 1, p = 0.005, alternative = "less", method = "CP")
gtWidth <- bgtWidth
#
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleValues.R
\name{getNodeProperty}
\alias{getNodeProperty}
\title{Get Node Property Values}
\usage{
getNodeProperty(node.names, visual.property, network = NULL,
base.url = .defaultBaseUrl)
}
\arguments{
\item{node.names}{List of node names}
\item{visual.property}{Name of a visual property. See \link{getVisualPropertyNames}.}
\item{network}{(optional) Name or SUID of the network. Default is the "current" network active in Cytoscape.}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
Property value
}
\description{
Get values for any node property of the specified nodes.
}
\details{
This method retrieves the actual property of the node, given the
current visual style, factoring together any default, mapping and bypass setting.
}
\examples{
\donttest{
getNodeProperty(c('node 0','node 1'),'NODE_SHAPE')
}
}
| /man/getNodeProperty.Rd | permissive | KUNJU-PITT/RCy3 | R | false | true | 1,093 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleValues.R
\name{getNodeProperty}
\alias{getNodeProperty}
\title{Get Node Property Values}
\usage{
getNodeProperty(node.names, visual.property, network = NULL,
base.url = .defaultBaseUrl)
}
\arguments{
\item{node.names}{List of node names}
\item{visual.property}{Name of a visual property. See \link{getVisualPropertyNames}.}
\item{network}{(optional) Name or SUID of the network. Default is the "current" network active in Cytoscape.}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
Property value
}
\description{
Get values for any node property of the specified nodes.
}
\details{
This method retrieves the actual property of the node, given the
current visual style, factoring together any default, mapping and bypass setting.
}
\examples{
\donttest{
getNodeProperty(c('node 0','node 1'),'NODE_SHAPE')
}
}
|
Rud.D <-
function(cutscore, quadrature, sem)
{
os <- quadrature[[1]]
we <- quadrature[[2]]
nn <- length(os)
nc <- length(cutscore)
esacc <- escon <-matrix(NA,length(cutscore), nn, dimnames = list(paste("cut at",round(cutscore,3)), round(os,3)))
for(j in 1:length(cutscore)){
cuts<-c(-Inf, cutscore[j], Inf)
categ<-cut(os,cuts,labels=FALSE,right=FALSE)
for(i in 1:nn) {
esacc[j,i]<-(pnorm(cuts[categ[i]+1],os[i],sem[i])-pnorm(cuts[categ[i]],os[i],sem[i]))
escon[j,i]<-((pnorm(cuts[2], os[i],sem[i]) - pnorm(cuts[1],os[i],sem[i]))^2 + (pnorm(cuts[3], os[i],sem[i]) - pnorm(cuts[2],os[i],sem[i]))^2 )
}
}
if(nc == 1){
ans<- (list("Marginal" = cbind("Accuracy" = apply(esacc,1,weighted.mean,we), "Consistency" = apply(escon,1,weighted.mean,we)), "Conditional" = list("Accuracy" =t(esacc), "Consistency" = t(escon))))
return(ans)
} else {
simul <- matrix(NA, nn, 2, dimnames = list(round(os,3), c("Accuracy", "Consistency")))
cuts <- c(-Inf, cutscore, Inf)
categ<-cut(os,cuts,labels=FALSE,right=FALSE)
for(i in 1:nn){
simul[i,1] <- (pnorm(cuts[categ[i]+1],os[i],sem[i])-pnorm(cuts[categ[i]],os[i],sem[i]))
sha <- 0
for(j in 1:(nc+1)){
sha <- sha + (pnorm(cuts[j+1],os[i],sem[i])-pnorm(cuts[j],os[i],sem[i]))^2
}
simul[i,2] <- sha
}
ans <- (list("Marginal" = rbind(cbind("Accuracy" = apply(esacc,1,weighted.mean,we), "Consistency" = apply(escon,1,weighted.mean,we)), "Simultaneous" = apply(simul,2,weighted.mean,we) ), "Conditional" = list("Accuracy" =cbind(t(esacc), "Simultaneous" =simul[,1]), "Consistency" = cbind(t(escon),"Simultaneous" =simul[,2]))))
return(ans)
}
}
| /R/Rud.D.R | no_license | cran/cacIRT | R | false | false | 1,812 | r | Rud.D <-
function(cutscore, quadrature, sem)
{
os <- quadrature[[1]]
we <- quadrature[[2]]
nn <- length(os)
nc <- length(cutscore)
esacc <- escon <-matrix(NA,length(cutscore), nn, dimnames = list(paste("cut at",round(cutscore,3)), round(os,3)))
for(j in 1:length(cutscore)){
cuts<-c(-Inf, cutscore[j], Inf)
categ<-cut(os,cuts,labels=FALSE,right=FALSE)
for(i in 1:nn) {
esacc[j,i]<-(pnorm(cuts[categ[i]+1],os[i],sem[i])-pnorm(cuts[categ[i]],os[i],sem[i]))
escon[j,i]<-((pnorm(cuts[2], os[i],sem[i]) - pnorm(cuts[1],os[i],sem[i]))^2 + (pnorm(cuts[3], os[i],sem[i]) - pnorm(cuts[2],os[i],sem[i]))^2 )
}
}
if(nc == 1){
ans<- (list("Marginal" = cbind("Accuracy" = apply(esacc,1,weighted.mean,we), "Consistency" = apply(escon,1,weighted.mean,we)), "Conditional" = list("Accuracy" =t(esacc), "Consistency" = t(escon))))
return(ans)
} else {
simul <- matrix(NA, nn, 2, dimnames = list(round(os,3), c("Accuracy", "Consistency")))
cuts <- c(-Inf, cutscore, Inf)
categ<-cut(os,cuts,labels=FALSE,right=FALSE)
for(i in 1:nn){
simul[i,1] <- (pnorm(cuts[categ[i]+1],os[i],sem[i])-pnorm(cuts[categ[i]],os[i],sem[i]))
sha <- 0
for(j in 1:(nc+1)){
sha <- sha + (pnorm(cuts[j+1],os[i],sem[i])-pnorm(cuts[j],os[i],sem[i]))^2
}
simul[i,2] <- sha
}
ans <- (list("Marginal" = rbind(cbind("Accuracy" = apply(esacc,1,weighted.mean,we), "Consistency" = apply(escon,1,weighted.mean,we)), "Simultaneous" = apply(simul,2,weighted.mean,we) ), "Conditional" = list("Accuracy" =cbind(t(esacc), "Simultaneous" =simul[,1]), "Consistency" = cbind(t(escon),"Simultaneous" =simul[,2]))))
return(ans)
}
}
|
## A pair of functions that cache the inverse of a matrix
## Creates a special matrix object that can cache its inverse
makeCacheMatrix <- function( m = matrix() )
{
## Initialize the inverse property
i <- NULL
## Method to set the matrix
set <- function( matrix )
{
m <<- matrix
i <<- NULL
}
## Method the get the matrix
get <- function()
{
## Return the matrix
m
}
## Method to set the inverse of the matrix
setInverse <- function(inverse)
{
i <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function()
{
## Return the inverse property
i
}
## Return a list of the methods
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix"
## above. If the inverse has already been calculated (and the matrix has not
## changed), then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(x, ...)
{
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) )
{
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
| /cachematrix.R | no_license | psshanthi/ProgrammingAssignment2 | R | false | false | 1,469 | r | ## A pair of functions that cache the inverse of a matrix
## Creates a special matrix object that can cache its inverse
makeCacheMatrix <- function( m = matrix() )
{
## Initialize the inverse property
i <- NULL
## Method to set the matrix
set <- function( matrix )
{
m <<- matrix
i <<- NULL
}
## Method the get the matrix
get <- function()
{
## Return the matrix
m
}
## Method to set the inverse of the matrix
setInverse <- function(inverse)
{
i <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function()
{
## Return the inverse property
i
}
## Return a list of the methods
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix"
## above. If the inverse has already been calculated (and the matrix has not
## changed), then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(x, ...)
{
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) )
{
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
|
context("Path Functions")
#' swap2DfCols function ---------------------------------
df_temp <- data.frame(cbind(rep(1, 100), rep(5, 100)))
df_temp2 <- data.frame(cbind(rep(5, 100), rep(10, 100), rep(15, 100)))
df_temp_swapped <- TCpredictionbands::swap2DfCols(df_temp)
test_that("Dimension and values are correct for the first example", {
expect_equal(dim(df_temp_swapped), c(100, 2))
expect_equal(df_temp_swapped, data.frame(cbind(rep(5, 100), rep(1, 100))))
})
df_temp_swapped2 <- TCpredictionbands::swap2DfCols(df_temp2)
test_that("Dimension and values are correct for the second example", {
expect_equal(dim(df_temp_swapped2), c(100, 2))
expect_equal(df_temp_swapped2,
data.frame(cbind(rep(10, 100), rep(5, 100))))
})
| /TCpredictionbands/tests/testthat/test-path-functions.R | permissive | Mr8ND/TC-prediction-bands | R | false | false | 751 | r | context("Path Functions")
#' swap2DfCols function ---------------------------------
df_temp <- data.frame(cbind(rep(1, 100), rep(5, 100)))
df_temp2 <- data.frame(cbind(rep(5, 100), rep(10, 100), rep(15, 100)))
df_temp_swapped <- TCpredictionbands::swap2DfCols(df_temp)
test_that("Dimension and values are correct for the first example", {
expect_equal(dim(df_temp_swapped), c(100, 2))
expect_equal(df_temp_swapped, data.frame(cbind(rep(5, 100), rep(1, 100))))
})
df_temp_swapped2 <- TCpredictionbands::swap2DfCols(df_temp2)
test_that("Dimension and values are correct for the second example", {
expect_equal(dim(df_temp_swapped2), c(100, 2))
expect_equal(df_temp_swapped2,
data.frame(cbind(rep(10, 100), rep(5, 100))))
})
|
freq_to_unit <- function(freq_distr){
#' Individual rankings/orderings from the frequency distribution
#'
#' Construct the dataset of individual rankings/orderings from the frequency distribution of the distinct observed sequences.
#'
#' @param freq_distr Numeric matrix of the distinct observed sequences with the corresponding frequencies indicated in the last \eqn{(K+1)}-th column.
#'
#' @return Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of observed individual sequences.
#'
#' @author Cristina Mollica and Luca Tardella
#' @examples
#'
#' library(gtools)
#' K <- 4
#' perm_matrix <- permutations(n=K, r=K)
#' freq_data <- cbind(perm_matrix, sample(1:factorial(K)))
#' freq_data
#' freq_to_unit(freq_distr=freq_data)
#'
#' @export
if(is.vector(freq_distr)){
freq_distr <- t(freq_distr)
}
K <- ncol(freq_distr)-1
r_seq <- fill_single_entries(data=freq_distr[,-(K+1)])
out <- r_seq[rep(1:nrow(r_seq),freq_distr[,(K+1)]),]
rownames(out) <- NULL
return(out)
######### TUTTE LE DIRETTIVE PER CREARE IL FILE NAMESPACE
######### LE INSERIAMO QUI SOTTO
#'@useDynLib PLMIX, .registration = TRUE
#'@importFrom stats median
#'@importFrom stats var
#'@importFrom stats rgamma
#'@importFrom stats dgamma
#'@importFrom stats na.omit
#'@importFrom utils getFromNamespace
#'@importFrom abind adrop
#'@importFrom coda as.mcmc
#'@importFrom coda HPDinterval
#'@importFrom foreach foreach
#'@importFrom foreach %dopar%
#'@importFrom graphics plot
#'@importFrom gtools ddirichlet
#'@importFrom gtools permutations
#'@importFrom gridExtra grid.arrange
#'@importFrom ggmcmc ggmcmc
#'@importFrom ggmcmc ggs
#'@importFrom ggplot2 ggplot
#'@importFrom ggplot2 aes
#'@importFrom ggplot2 aes_string
#'@importFrom ggplot2 position_stack
#'@importFrom ggplot2 geom_bar
#'@importFrom ggplot2 coord_polar
#'@importFrom ggplot2 labs
#'@importFrom ggplot2 geom_tile
#'@importFrom ggplot2 scale_fill_brewer
#'@importFrom ggplot2 theme_void
#'@importFrom ggplot2 xlim
#'@importFrom ggplot2 scale_fill_gradient
#'@importFrom ggplot2 element_blank
#'@importFrom ggplot2 theme
#'@importFrom label.switching pra
#'@importFrom label.switching permute.mcmc
#'@importFrom MCMCpack rdirichlet
#'@importFrom reshape2 melt
#'@importFrom rcdd makeH
#'@importFrom rcdd scdd
#'@importFrom radarchart chartJSRadar
#'@importFrom Rcpp evalCpp
#'
}
unit_to_freq <- function(data){
#' Frequency distribution from the individual rankings/orderings
#'
#' Construct the frequency distribution of the distinct observed sequences from the dataset of individual rankings/orderings.
#'
#' @param data Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of observed individual sequences.
#' @return Numeric matrix of the distinct observed sequences with the corresponding frequencies indicated in the last \eqn{(K+1)}-th column.
#'
#' @author Cristina Mollica and Luca Tardella
#' @examples
#'
#' ## Frequency distribution for the APA top-ordering dataset
#' data(d_apa)
#' unit_to_freq(data=d_apa)
#' @export
data <- fill_single_entries(data=data)
K <- ncol(data)
freq <- table(apply(data,1,paste,collapse="-"))
obs_seq <- matrix(as.numeric(unlist(strsplit(names(freq),split="-"))),nrow=length(freq),ncol=K,byrow=TRUE)
rownames(obs_seq) <- NULL
out <- cbind(obs_seq,freq=freq,deparse.level=0)
rownames(out) <- NULL
return(out)
}
fill_single_entries <- function(data){
#/' Utility to fill in single missing entries of top-(K-1) sequences in partial ordering/ranking datasets
#/'
#/' @param data Numeric data matrix of partial sequences.
#/'
#/' @return Numeric data matrix of partial sequences in the same format of the input \code{data} with possible single missing entries filled.
#/' @author Cristina Mollica and Luca Tardella
if(is.vector(data)){
data <- t(data)
}
K=ncol(data)
r_single_miss <- (rowSums(data==0)==1)
if(any(r_single_miss)){
w_row <- which(r_single_miss)
w_col <- apply(data[w_row,,drop=FALSE],1,function(x)which(x==0))
w_item <- apply(data[w_row,,drop=FALSE],1,setdiff,x=1:K)
data[cbind(w_row,w_col)] <- w_item
warning(paste(paste0("Top-",K-1,""),"sequencies correspond to full orderings. Single missing entries filled."), call. = FALSE)
}
return(data)
}
is.top_ordering <- function(data,...){
#' Top-ordering datasets
#'
#' Check the consistency of partial ordering data with a top-ordering dataset.
#'
#' The argument \code{data} requires the partial sequences expressed in ordering format. When the value of \code{is.top-ordering} is \code{FALSE}, the membership function returns also a message with the conditions that are not met for the \code{data} to be a top-ordering dataset. \code{NA}'s in the input \code{data} are tacitly converted into zero entries.
#'
#' @param data An object containing the partial orderings whose consistency with a top-ordering dataset has to be tested. The following classes are admissible for \code{data}: numeric \code{matrix}, \code{data.frame}, \code{RandData} from the \code{rankdist} package and \code{rankings} from the \code{PlackettLuce} package.
#' @param ... Further arguments passed to or from other methods (not used).
#'
#' @return Logical: \code{TRUE} if the \code{data} argument is consistent with a top-ordering dataset (with a possible warning message if the supplied data need a further treatment with the coercion function \code{\link{as.top_ordering}} before being processed with the core functions of \pkg{PLMIX}) and \code{FALSE} otherwise.
#'
#' @references
#' Turner, H., Kormidis, I. and Firth, D. (2018). PlackettLuce: Plackett-Luce Models for Rankings. R package version 0.2-3. \url{https://CRAN.R-project.org/package=PlackettLuce}
#'
#' Qian, Z. (2018). rankdist: Distance Based Ranking Models. R package version 1.1.3. \url{https://CRAN.R-project.org/package=rankdist}
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link[PlackettLuce]{rankings}} and \code{\link[PlackettLuce]{rankings}}
#'
#' @examples
#'
#' ## A toy example of data matrix not satisfying the conditions to be a top-ordering dataset
#' toy_data=rbind(1:5,
#' c(0,4,3,2,1),
#' c(4,3.4,2,1,5),
#' c(2,3,0,0,NA),
#' c(4,4,3,2,5),
#' c(3,5,4,2,6),
#' c(2,-3,1,4,5),
#' c(2,0,1,4,5),
#' c(2,3,1,1,1),
#' c(2,3,0,4,0))
#'
#' is.top_ordering(data=toy_data)
#'
#' ## A dataset from the StatRank package satisfying the conditions to be a top-ordering dataset
#' library(StatRank)
#' data(Data.Election9)
#' is.top_ordering(data=Data.Election9)
#'
#' @export is.top_ordering
#' @export
if(!(class(data)[1]%in%c("top_ordering","RankData","rankings","matrix","data.frame"))){
stop("Invalid 'type' of data argument.")
}
if(any(class(data)=="top_ordering")){
out=TRUE
}
if(class(data)[1]=="RankData"){
warning("Objects of class 'RankData' are compatible with top-ordering datasets, but need to be coerced with as.top_ordering() before using the other functions of the PLMIX package.")
out=TRUE
}
if(class(data)[1]=="rankings"){
ttt=try(as.top_ordering(data=data, format_input="ranking"),silent=TRUE)
if(class(ttt)=="try-error"){
warning("The supplied data of class 'rankings' is not compatible with a top-ordering dataset because all rankings contain ties.")
out=FALSE
}else{
warning("The supplied data of class 'rankings' is compatible with a top-ordering dataset, but needs to be coerced with as.top_ordering() before using the other functions of the PLMIX package.")
out=TRUE
}
}
if(class(data)[1]=="matrix" | class(data)[1]=="data.frame"){
if(class(data)[1]=="data.frame"){
data <- as.matrix(data)
}
if(is.vector(data)){
data <- t(data)
}
data[which(is.na(data))]=0
K=ncol(data)
if(any(!(data%in%(0:K)))){
check1=FALSE
message(paste0("->> Only integers {", paste(0:K,collapse=", "), "} are allowed as entries of the top-ordering dataset:"))
if(any(data<0)){
message("* Some entries are negative.")
}
if(any(data>K)){
message(paste0("* Some entries are > ", K,"."))
}
if(any((data%%1)>0)){
message(paste("* Some entries are not integer."))
}
}else{
check1=TRUE
}
data_dupl <- t(apply(data,1,duplicated))
data_dupl[data==0] <- NA
if(any(data_dupl,na.rm=TRUE)){
check2=FALSE
message("->> Ties are not allowed.")
}else{
check2=TRUE
}
non_cons_zeros=apply(data,1,function(x) if(0%in%x) length(setdiff(min(which(x==0)):K,which(x==0)))>0 else FALSE )
if(any(non_cons_zeros)){
check3=FALSE
message("->> Non-consecutive zero are not allowed in the rows of a top-ordering dataset.")
}else{
check3=TRUE
}
if(any(data[,1]==0)){
check4=FALSE
message("->> Rows starting with zero entries are not allowed in a top-ordering dataset.")
}else{
check4=TRUE
}
out=all(c(check1,check2,check3,check4))
}
return(out)
}
as.top_ordering <- function(data,format_input=NULL,aggr=NULL,freq_col=NULL,ties_method="random",...){
#' Coercion into top-ordering datasets
#'
#' Attempt to coerce the input data into a top-ordering dataset.
#'
#' The coercion function \code{as.top_ordering} tries to coerce the input data into an object of class \code{top_ordering} after checking for possible partial sequences that do not satisfy the top-ordering requirements. If none of the supplied sequences satisfies the top-ordering conditions, an error message is returned. \code{NA}'s in the input \code{data} are tacitly converted into zero entries.
#'
#' @param data An object containing the partial sequences to be coerced into an object of class \code{top_ordering}. The following classes are admissible for \code{data}: numeric \code{matrix}, \code{data.frame}, \code{RandData} from the \code{rankdist} package and \code{rankings} from the \code{PlackettLuce} package.
#' @param format_input Character string indicating the format of the \code{data} input, namely \code{"ordering"} or \code{"ranking"}. Used only when the class of the \code{data} argument is matrix or data frame. Default is \code{NULL}.
#' @param aggr Logical: whether the \code{data} argument collects the distinct observed sequences with the corresponding frequencies (aggregated format). Used only when the class of the \code{data} aargument is matrix or data frame. Default is \code{NULL}.
#' @param freq_col Integer indicating the column of the \code{data} argument containing the frequencies of the distinct observed sequences. Used only when the class of the \code{data} argument is matrix or data frame and \code{aggr} argument is \code{TRUE}. Default is \code{NULL}.
#' @param ties_method Character string indicating the treatment of sequences with ties (not used for data of class \code{RankData}). If \code{"remove"}, the sequences with ties are removed before acting the coercion; if \code{"random"} (default), tied positions are re-assigned at random before acting the coercion.
#' @param ... Further arguments passed to or from other methods (not used).
#'
#' @return An object of S3 class \code{c("top_ordering","matrix")}.
#'
#' @references
#' Turner, H., Kormidis, I. and Firth, D. (2018). PlackettLuce: Plackett-Luce Models for Rankings. R package version 0.2-3. \url{https://CRAN.R-project.org/package=PlackettLuce}
#'
#' Qian, Z. (2018). rankdist: Distance Based Ranking Models. R package version 1.1.3. \url{https://CRAN.R-project.org/package=rankdist}
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{is.top_ordering}}, \code{\link[PlackettLuce]{as.rankings}} and \code{\link[PlackettLuce]{rankings}}
#'
#' @examples
#'
#' ## Coerce an object of class 'rankings' into an object of class 'top_ordering'
#' library(PlackettLuce)
#' RR <- matrix(c(1, 2, 0, 0,
#' 4, 1, 2, 3,
#' 2, 1, 1, 1,
#' 1, 2, 3, 0,
#' 2, 1, 1, 0,
#' 1, 0, 3, 2), nrow = 6, byrow = TRUE)
#' RR_rank=as.rankings(RR)
#' RR_rank
#' as.top_ordering(RR_rank, ties_method="random")
#'
#' ## Coerce an object of class 'RankData' into an object of class 'top_ordering'
#' library(rankdist)
#' data(apa_partial_obj)
#' d_apa_top_ord=as.top_ordering(data=apa_partial_obj)
#' identical(d_apa,d_apa_top_ord)
#'
#' ## Coerce a data frame from the package prefmod into an object of class 'top_ordering'
#' library(prefmod)
#' data(carconf)
#' carconf_rank=carconf[,1:6]
#' carconf_top_ord=as.top_ordering(data=carconf_rank,format_input="ranking",aggr=FALSE)
#' identical(d_carconf,carconf_top_ord)
#'
#' ## Coerce a data frame from the package pmr into an object of class 'top_ordering'
#' library(pmr)
#' data(big4)
#' head(big4)
#' big4_top_ord=as.top_ordering(data=big4,format_input="ranking",aggr=TRUE,freq_col=5)
#' head(big4_top_ord)
#'
#' @export as.top_ordering
#' @export
if(!(class(data)[1]%in%c("top_ordering","RankData","rankings","matrix","data.frame"))){
stop("Invalid 'type' of data argument (see 'Details').")
}
if(any(class(data)=="top_ordering")){
out=data
}
if(class(data)[1]=="RankData"){
K=data@nobj
dist_rankings=data@ranking
tied_rows=apply(dist_rankings,1,function(x)any(duplicated(x)))
dist_rankings[tied_rows,]=t(apply(dist_rankings[tied_rows,],1,rank,ties.method="max"))
temp_tied=dist_rankings[tied_rows,]
temp_tied[which(temp_tied==K)]=0
dist_rankings[tied_rows,]=temp_tied
dist_orderings=rank_ord_switch(data=dist_rankings,format_input="ranking")
n_dist=data@ndistinct
out=dist_orderings[rep(1:n_dist,times=data@count),]
}
if(class(data)[1]=="rankings"){
temp_rankings=unclass(data)
N=nrow(temp_rankings)
temp_rankings[temp_rankings==0] <- NA
tied_rows=which(apply(temp_rankings,1,function(x)any(duplicated(na.omit(x)))))
if(length(tied_rows)>0){
if(ties_method=="remove"){
if(length(tied_rows)<N){
warning("Rankings with ties are removed from the supplied dataset.")
temp_rankings=temp_rankings[-tied_rows,,drop=FALSE]
}else{
if(length(tied_rows)==N){
stop("Supplied data cannot be coerced into a top-ordering dataset because all rankings contain ties.")
}
}
}else{
warning("Tied positions are re-assigned at random to satisfy the top-ordering requirements.")
temp_rankings[tied_rows,]=t(apply(temp_rankings[tied_rows,],1,rank,na.last="keep",ties.method="random"))
}
}
temp_rankings[is.na(temp_rankings)] <- 0
out=rank_ord_switch(temp_rankings,format_input="ranking")
}
if(class(data)[1]=="matrix" | class(data)[1]=="data.frame"){
if(class(data)[1]=="data.frame"){
data <- as.matrix(data)
}
if(is.vector(data)){
data <- t(data)
}
if(aggr){
NN=nrow(data)
data_aggr=data[,-freq_col,drop=FALSE]
freq=data[,freq_col]
data=data_aggr[rep(1:NN,times=freq),]
}
K=ncol(data)
N=nrow(data)
data[which(is.na(data))]=0
if(format_input=="ordering"){
check1=which(apply(data,1,function(x)any(!(x%in%(0:K)))))
data_dupl <- t(apply(data,1,duplicated))
data_dupl[data==0] <- NA
check2=which(apply(data_dupl,1,any,na.rm=TRUE))
non_cons_zeros=apply(data,1,function(x) if(0%in%x) length(setdiff(min(which(x==0)):K,which(x==0)))>0 else FALSE )
check3=which(non_cons_zeros)
check4=which(data[,1]==0)
checks=unique(c(check1,check2,check3,check4))
if(length(checks)>0 & length(checks)<N){
warning("Rows not satisfying the requirements of a top-ordering dataset have been removed. Please, apply the function is.top_ordering() to the supplied data for more information.")
data=data[-checks,,drop=FALSE]
}else{
if(length(checks)==N){
stop("Supplied data cannot be coerced because the provided orderings do not satisfy the requirements of a top-ordering dataset.")
}
}
}else{
check1=which(apply(data,1,function(x)any(!(x%in%(0:K)))))
data_dupl <- t(apply(data,1,duplicated))
data_dupl[data==0] <- NA
check2=which(apply(data_dupl,1,any,na.rm=TRUE))
check3=which(apply(data,1,function(x)any(diff(sort(x))>1)))
if(ties_method=="remove"){
checks=unique(c(check1,check2,check3))
}else{
checks=unique(c(check1,check3))
if(length(check2)>0){
warning("Tied positions are re-assigned at random to satisfy the top-ordering requirements.")
temp=data[check2,]
temp[temp==0]=NA
data[check2,]=t(apply(temp,1,rank,na.last="keep",ties.method="random"))
}
}
if(length(checks)>0 & length(checks)<N){
warning("Rows not satisfying the requirements of a top-ordering dataset have been removed. Please, apply the function is.top_ordering() to the supplied data for more information.")
data=data[-checks,,drop=FALSE]
}else{
if(length(checks)==N){
stop("Supplied data cannot be coerced because the provided rankings do not satisfy the requirements of a top-ordering dataset.")
}
}
data=rank_ord_switch(data,format_input="ranking")
}
out=data
}
class(out) <- c("top_ordering","matrix")
return(out)
}
myorder <- function(x){
#/' Utility to switch from a partial ranking to a partial ordering (missing positions denoted with zero)
#/' @param x Numeric integer vector
#/'
#/' @author Cristina Mollica and Luca Tardella
k <- sum(is.na(x))
out <- c(order(x,na.last=NA),rep(0,k))
return(out)
}
rank_ord_switch <- function(data,format_input,nranked=NULL){
#' Switch from orderings to rankings and vice versa
#'
#' Convert the format of the input dataset from orderings to rankings and vice versa.
#'
#'
#' @param data Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial sequences whose format has to be converted.
#' @param format_input Character string indicating the format of the \code{data} input, namely \code{"ordering"} or \code{"ranking"}.
#' @param nranked Optional numeric vector of length \eqn{N} with the number of items ranked by each sample unit.
#'
#' @return Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial sequences with inverse format.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @examples
#'
#' ## From orderings to rankings for the Dublin West dataset
#' data(d_dublinwest)
#' head(d_dublinwest)
#' rank_ord_switch(data=head(d_dublinwest), format_input="ordering")
#' @export
data <- fill_single_entries(data=data)
K <- ncol(data)
if(any(data==0)){
data[data==0] <- NA
if(format_input=="ranking"){
out <- t(apply(data,1,myorder))
colnames(out) <- paste0("Rank_",1:K)
}else{
N <- nrow(data)
if(is.null(nranked)) nranked=rowSums(!is.na(data))
out <- matrix(0,nrow=N,ncol=K)
out[cbind(rep(1:N,nranked),na.omit(c(t(data))))] <- unlist(sapply(nranked,seq,from=1))
}
}else{
out <- t(apply(data,1,order))
}
if(format_input=="ranking"){
colnames(out) <- paste0("Rank_",1:K)
}else{
colnames(out) <- paste0("Item_",1:K)
}
return(out)
}
rank_summaries <- function(data,format_input,mean_rank=TRUE,marginals=TRUE,pc=TRUE){
#' Descriptive summaries for a partial ordering/ranking dataset
#'
#' Compute rank summaries and censoring patterns for a partial ordering/ranking dataset.
#'
#' @param data Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial sequences.
#' @param format_input Character string indicating the format of the \code{data} input, namely \code{"ordering"} or \code{"ranking"}.
#' @param mean_rank Logical: whether the mean rank vector has to be computed. Default is \code{TRUE}.
#' @param marginals Logical: whether the marginal rank distributions have to be computed. Default is \code{TRUE}.
#' @param pc Logical: whether the paired comparison matrix has to be computed. Default is \code{TRUE}.
#'
#' @return A list of named objects:
#'
#' \item{\code{nranked}}{ Numeric vector of length \eqn{N} with the number of items ranked by each sample unit.}
#' \item{\code{nranked_distr}}{ Frequency distribution of the \code{nranked} vector.}
#' \item{\code{na_or_not}}{ Numeric \eqn{3}\eqn{\times}{x}\eqn{K} matrix with the counts of sample units that ranked or not each item. The last row contains the total by column, corresponding to the sample size \eqn{N}.}
#' \item{\code{mean_rank}}{ Numeric vector of length \eqn{K} with the mean rank of each item.}
#' \item{\code{marginals}}{ Numeric \eqn{K}\eqn{\times}{x}\eqn{K} matrix of the marginal rank distributions: the \eqn{(i,j)}-th entry indicates the number of units that ranked item \eqn{i} in the \eqn{j}-th position.}
#' \item{\code{pc}}{ Numeric \eqn{K}\eqn{\times}{x}\eqn{K} paired comparison matrix: the \eqn{(i,i')}-th entry indicates the number of sample units that preferred item \eqn{i} to item \eqn{i'}.}
#'
#'
#' @references
#' Marden, J. I. (1995). Analyzing and modeling rank data. \emph{Monographs on Statistics and Applied Probability} (64). Chapman & Hall, ISSN: 0-412-99521-2. London.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @examples
#'
#' data(d_carconf)
#' rank_summaries(data=d_carconf, format_input="ordering")
#' @export
data <- fill_single_entries(data=data)
N <- nrow(data)
K <- ncol(data)
if(format_input=="ordering"){
data <- rank_ord_switch(data=data,format_input=format_input,nranked=NULL)
format_input <- "ranking"
}
data[data==0] <- NA
isna_data <- is.na(data)
nranked <- rowSums(!isna_data)
#nranked_distr <- table(nranked,dnn=NULL,deparse.level=0)
nranked_distr <- table(factor(nranked,levels=1:K))
#names(nranked_distr) <- paste0("Top-",1:(K-1))
names(nranked_distr) <- paste0("Top-",names(nranked_distr))
na <- colSums(isna_data)
na_or_not <- rbind(na, N-na, rep(N, K))
dimnames(na_or_not) <- list(c("n.a.","not n.a.","total"),paste0("Item_",1:K))
if(mean_rank){
mean_rank <- colMeans(data,na.rm=TRUE)
names(mean_rank) <- paste0("Item_",1:K)
}else{
mean_rank <- NULL
}
if(marginals){
marginals <- apply(data,2,tabulate,nbins=K)
dimnames(marginals) <- list(paste0("Rank_",1:K),paste0("Item_",1:K))
}else{
marginals <- NULL
}
if(pc){
data[is.na(data)] <- 0
pc <- paired_comparisons(data=data,format_input=format_input,nranked=nranked)
rownames(pc) <- colnames(pc) <- paste0("Item_",1:K)
}else{
pc <- NULL
}
out <- list(nranked=nranked,nranked_distr=nranked_distr,
na_or_not=na_or_not,mean_rank=mean_rank,
marginals=marginals,pc=pc)
return(out)
}
paired_comparisons <- function(data,format_input,nranked=NULL){
#' Paired comparison matrix for a partial ordering/ranking dataset
#'
#' Construct the paired comparison matrix for a partial ordering/ranking dataset.
#'
#' @param data Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial sequences.
#' @param format_input Character string indicating the format of the \code{data} input, namely \code{"ordering"} or \code{"ranking"}.
#' @param nranked Optional numeric vector of length \eqn{N} with the number of items ranked by each sample unit.
#'
#' @return Numeric \eqn{K}\eqn{\times}{x}\eqn{K} paired comparison matrix: the \eqn{(i,i')}-th entry indicates the number of sample units that preferred item \eqn{i} to item \eqn{i'}.
#'
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{rank_summaries}}
#'
#' @examples
#'
#' data(d_dublinwest)
#' paired_comparisons(data=d_dublinwest, format_input="ordering")
#' @export
data <- fill_single_entries(data=data)
N <- nrow(data)
K <- ncol(data)
if(format_input=="ranking"){
if(is.null(nranked)) nranked <- rowSums(data!=0)
data <- rank_ord_switch(data,format_input=format_input,nranked=nranked)
}
pc <- tau(pi_inv=data)
rownames(pc) <- colnames(pc) <- paste0("Item_",1:K)
return(pc)
} # K*K matrix
make_partial <- function(data,format_input,nranked=NULL,probcens=rep(1,ncol(data)-1)){
#' Censoring of complete rankings/orderings
#'
#' Return partial top rankings/orderings from complete sequences obtained either with user-specified censoring patterns or with a random truncation.
#'
#' The censoring of the complete sequences can be performed in: (i) a deterministic way, by specifying the number of top positions to be retained for each sample unit in the \code{nranked} argument; (ii) a random way, by sequentially specifying the probabilities of the top-1, top-2, \eqn{...}, top-\eqn{(K-1)} censoring patterns in the \code{probcens} argument. Recall that a top-\eqn{(K-1)} sequence corresponds to a complete ordering/ranking.
#'
#' @param data Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of complete sequences to be censored.
#' @param format_input Character string indicating the format of the \code{data} input, namely \code{"ordering"} or \code{"ranking"}.
#' @param nranked Numeric vector of length \eqn{N} with the desired number of items ranked by each sample unit after censoring. If not supplied (\code{NULL}), the censoring patterns are randomly generated according to the probabilities in the \code{probcens} argument.
#' @param probcens Numeric vector of length \eqn{(K-1)} with the probability of each censoring pattern to be employed for the random truncation of the complete sequences (normalization is not necessary). It works only if \code{nranked} argument is \code{NULL} (see 'Details'). Default is equal probabilities.
#'
#' @return A list of two named objects:
#'
#' \item{\code{partialdata}}{ Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial (censored) sequences with the same format of the input \code{data} and missing positions/items denoted with zero entries.}
#' \item{\code{nranked}}{ Numeric vector of length \eqn{N} with the number of items ranked by each sample unit after censoring.}
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @examples
#'
#' data(d_german)
#' head(d_german)
#' d_german_cens <- make_partial(data=d_german, format_input="ordering",
#' probcens=c(0.3, 0.3, 0.4))
#' head(d_german_cens$partialdata)
#'
#' ## Check consistency with the nominal censoring probabilities
#' round(prop.table(table(d_german_cens$nranked)), 2)
#'
#' @export
data <- fill_single_entries(data=data)
K <- ncol(data)
if(format_input=="ranking"){
data <- rank_ord_switch(data,format_input=format_input)
}
if(is.null(nranked)){
N <- nrow(data)
nranked <- sample(c(1:(K-2),K),size=N,replace=TRUE,prob=probcens)
}
out <- data*t(sapply(nranked,function(x)rep(c(1,0),c(x,K-x))))
if(format_input=="ranking"){
out <- rank_ord_switch(out,format_input="ordering",nranked=nranked)
}
return(list(partialdata=out,nranked=nranked))
} # N*K censored data matrix
make_complete <- function(data,format_input,nranked=NULL,probitems=rep(1,ncol(data))){
#' Completion of partial rankings/orderings
#'
#' Return complete rankings/orderings from partial sequences relying on a random generation of the missing positions/items.
#'
#' The completion of the partial top rankings/orderings is performed according to the Plackett-Luce scheme, that is, with a sampling without replacement of the not-ranked items by using the positive values in the \code{probitems} argument as support parameters (normalization is not necessary).
#'
#' @param data Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial sequences to be completed.
#' @param format_input Character string indicating the format of the \code{data} input, namely \code{"ordering"} or \code{"ranking"}.
#' @param nranked Optional numeric vector of length \eqn{N} with the number of items ranked by each sample unit.
#' @param probitems Numeric vector with the \eqn{K} item-specific probabilities to be employed for the random generation of the missing positions/items (see 'Details'). Default is equal probabilities.
#'
#' @return A list of two named objects:
#'
#' \item{\code{completedata}}{ Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of complete sequences with the same format of the input \code{data}.}
#' \item{\code{nranked}}{ Numeric vector of length \eqn{N} with the number of items ranked by each sample unit of the input \code{data}.}
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @examples
#'
#' ## Completion based on the top item frequencies
#' data(d_dublinwest)
#' head(d_dublinwest)
#' top_item_freq <- rank_summaries(data=d_dublinwest, format_input="ordering", mean_rank=FALSE,
#' pc=FALSE)$marginals["Rank_1",]
#'
#' d_dublinwest_compl <- make_complete(data=d_dublinwest, format_input="ordering",
#' probitems=top_item_freq)
#' head(d_dublinwest_compl$completedata)
#'
#' @export
data <- fill_single_entries(data=data)
K <- ncol(data)
if(is.null(nranked)){
nranked <- rowSums(data!=0)
}
if(format_input=="ranking"){
data <- rank_ord_switch(data,format_input=format_input,nranked=nranked)
}
data[data==0] <- NA
out <- data
partialdata <- out[which(nranked!=K),]
out[which(nranked!=K),] <- t(apply(partialdata,1,function(x){ notrankeditems=setdiff(1:K,x); c(na.omit(x),sample(notrankeditems,prob=probitems[notrankeditems]))}))
if(format_input=="ranking"){
out <- rank_ord_switch(out,format_input="ordering")
}
return(list(completedata=out,nranked=nranked))
}
### Utility to simulate from a EPL
mysample <- function(support,pr){
sample(x=support,prob=pr)
}
rPLMIX <- function(n=1,K,G,p=t(matrix(1/K,nrow=K,ncol=G)),ref_order=t(matrix(1:K,nrow=K,ncol=G)),weights=rep(1/G,G),format_output="ordering"){
#' Random sample from a mixture of Plackett-Luce models
#'
#' Draw a random sample of complete orderings/rankings from a \eqn{G}-component mixture of Plackett-Luce models.
#'
#' Positive values are required for \code{p} and \code{weights} arguments (normalization is not necessary).
#'
#' The \code{ref_order} argument accommodates for the more general mixture of Extended Plackett-Luce models (EPL), involving the additional reference order parameters (Mollica and Tardella 2014). A permutation of the first \eqn{K} integers can be specified in each row of the \code{ref_order} argument to generate a sample from a \eqn{G}-component mixture of EPL. Since the Plackett-Luce model is a special instance of the EPL with the reference order equal to the identity permutation \eqn{(1,\dots,K)}, the default value of the \code{ref_order} argument is forward orders.
#'
#' @param n Number of observations to be sampled. Default is 1.
#' @param K Number of possible items.
#' @param G Number of mixture components.
#' @param p Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of component-specific support parameters. Default is equal support parameters (uniform mixture components).
#' @param ref_order Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of component-specific reference orders. Default is forward orders (identity permutations) in each row, corresponding to Plackett-Luce mixture components (see 'Details').
#' @param weights Numeric vector of \eqn{G} mixture weights. Default is equal weights.
#' @param format_output Character string indicating the format of the returned simulated dataset (\code{"ordering"} or \code{"ranking"}). Default is \code{"ordering"}.
#'
#' @return If \eqn{G=1}, a numeric \eqn{N}\eqn{\times}{x}\eqn{K} matrix of simulated complete sequences. If \eqn{G>1}, a list of two named objects:
#'
#' \item{\code{comp}}{ Numeric vector of \eqn{N} mixture component memberships.}
#' \item{\code{sim_data}}{ Numeric \eqn{N}\eqn{\times}{x}\eqn{K} matrix of simulated complete sequences.}
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @examples
#'
#' K <- 6
#' G <- 3
#' support_par <- matrix(1:(G*K), nrow=G, ncol=K)
#' weights_par <- c(0.50, 0.25, 0.25)
#'
#' set.seed(47201)
#' simulated_data <- rPLMIX(n=5, K=K, G=G, p=support_par, weights=weights_par)
#' simulated_data$comp
#' simulated_data$sim_data
#'
#' @export
if(G==1){
if(is.matrix(p)) p <- c(p)
if(is.matrix(ref_order)) ref_order <- c(ref_order)
p_par <- p/sum(p)
perm_par <- matrix(p_par,nrow=K,ncol=n)
out <- t(apply(perm_par,2,mysample,support=1:K))
out <- out[,order(ref_order)]
if(format_output=="ranking") out <- rank_ord_switch(out,format_input="ordering",nranked=rep(K,n))
return(out)
}else{
p_par <- p/rowSums(p)
comp <- sample(x=1:G,size=n,replace=T,prob=weights)
perm_par <- p[comp,]
out <- t(apply(perm_par,1,mysample,support=1:K))
for(g in 1:G){
out[comp==g,] <- out[comp==g,order(ref_order[g,])]
}
if(format_output=="ranking"){
out <- rank_ord_switch(out,format_input="ordering",nranked=rep(K,n))
}
return(list(comp=comp,sim_data=out))
}
}
likPLMIX <- function(p,ref_order,weights,pi_inv){
#' @rdname loglikelihood
#' @name Loglikelihood
#' @aliases likPLMIX loglikPLMIX loglikelihood Likelihood likelihood
#' @title Likelihood and log-likelihood evaluation for a mixture of Plackett-Luce models
#'
#' @description Compute either the likelihood or the log-likelihood of the Plackett-Luce mixture model parameters for a partial ordering dataset.
#' @details The \code{ref_order} argument accommodates for the more general mixture of Extended Plackett-Luce models (EPL), involving the additional reference order parameters (Mollica and Tardella 2014). A permutation of the first \eqn{K} integers can be specified in each row of the \code{ref_order} argument. Since the Plackett-Luce model is a special instance of the EPL with the reference order equal to the identity permutation, the \code{ref_order} argument must be a matrix with \eqn{G} rows equal to \eqn{(1,\dots,K)} when dealing with Plackett-Luce mixtures.
#' @param p Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of component-specific support parameters.
#' @param ref_order Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of component-specific reference orders.
#' @param weights Numeric vector of \eqn{G} mixture weights.
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @return Either the likelihood or the log-likelihood value of the Plackett-Luce mixture model parameters for a partial ordering dataset.
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' Mollica, C. and Tardella, L. (2014). Epitope profiling via mixture modeling for ranked data. \emph{Statistics in Medicine}, \bold{33}(21), pages 3738--3758, ISSN: 0277-6715, DOI: 10.1002/sim.6224.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @examples
#'
#' data(d_apa)
#' K <- ncol(d_apa)
#' G <- 3
#' support_par <- matrix(1:(G*K), nrow=G, ncol=K)
#' weights_par <- c(0.50, 0.25, 0.25)
#' loglikPLMIX(p=support_par, ref_order=matrix(1:K, nrow=G, ncol=K, byrow=TRUE),
#' weights=weights_par, pi_inv=d_apa)
#'
#' @export
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
lik <- exp(loglikPLMIX(p,ref_order,weights,pi_inv))
return(lik)
}
bicPLMIX <- function(max_log_lik,pi_inv,G,ref_known=TRUE,ref_vary=FALSE){
#' BIC for the MLE of a mixture of Plackett-Luce models
#'
#' Compute BIC value for the MLE of a mixture of Plackett-Luce models fitted to partial orderings.
#'
#' The \code{max_log_lik} and the BIC values can be straightforwardly obtained from the output of the \code{\link{mapPLMIX}} and \code{\link{mapPLMIX_multistart}} functions when the default noninformative priors are adopted in the MAP procedure. So, the \code{bicPLMIX} function is especially useful to compute the BIC value from the output of alternative MLE methods for mixtures of Plackett-Luce models implemented, for example, with other softwares.
#'
#' The \code{ref_known} and \code{ref_vary} arguments accommodate for the more general mixture of Extended Plackett-Luce models (EPL), involving the additional reference order parameters (Mollica and Tardella 2014). Since the Plackett-Luce model is a special instance of the EPL with the reference order equal to the identity permutation \eqn{(1,\dots,K)}, the default values of \code{ref_known} and \code{ref_vary} are set equal, respectively, to \code{TRUE} and \code{FALSE}.
#'
#' @param max_log_lik Maximized log-likelihood value.
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @param G Number of mixture components.
#' @param ref_known Logical: whether the component-specific reference orders are known (not to be estimated). Default is \code{TRUE}.
#' @param ref_vary Logical: whether the reference orders vary across mixture components. Default is \code{FALSE}.
#'
#' @return A list of two named objects:
#'
#' \item{\code{max_log_lik}}{ The \code{max_log_lik} argument.}
#' \item{\code{bic}}{ BIC value.}
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' Mollica, C. and Tardella, L. (2014). Epitope profiling via mixture modeling for ranked data. \emph{Statistics in Medicine}, \bold{33}(21), pages 3738--3758, ISSN: 0277-6715, DOI: 10.1002/sim.6224.
#'
#' Schwarz, G. (1978). Estimating the dimension of a model. \emph{Ann. Statist.}, \bold{6}(2), pages 461--464, ISSN: 0090-5364, DOI: 10.1002/sim.6224.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{mapPLMIX}} and \code{\link{mapPLMIX_multistart}}
#'
#' @examples
#'
#' data(d_carconf)
#' K <- ncol(d_carconf)
#' MAP_mult <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=3, n_start=2, n_iter=400*3)
#' bicPLMIX(max_log_lik=MAP_mult$mod$max_objective, pi_inv=d_carconf, G=3)$bic
#'
#' ## Equivalently
#' MAP_mult$mod$bic
#'
#' @export
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
N <- nrow(pi_inv)
K <- ncol(pi_inv)
if(!ref_known){
if(ref_vary){
bic <- -2*max_log_lik+(G*(K-1)+G+(G-1))*log(N)
}else{
bic <- -2*max_log_lik+(G*(K-1)+1+(G-1))*log(N)
}
}else{
bic <- -2*max_log_lik+(G*(K-1)+(G-1))*log(N)
}
return(list(max_log_lik=max_log_lik,bic=bic))
}
gammamat <- function(u_bin,z_hat){
gam <- t(z_hat)%*%u_bin
return(gam)
}
binary_group_ind <- function(class,G){
#' Binary group membership matrix
#'
#' Construct the binary group membership matrix from the multinomial classification vector.
#'
#' @param class Numeric vector of class memberships.
#' @param G Number of possible different classes.
#'
#' @return Numeric \code{length(class)}\eqn{\times}{x}\eqn{G} matrix of binary group memberships.
#' @author Cristina Mollica and Luca Tardella
#' @examples
#'
#' binary_group_ind(class=c(3,1,5), G=6)
#'
#' @export
N <- length(class)
temp <- (rep(1:G,length(class))==rep(class,each=G))*1
out <- matrix(temp,nrow=N,ncol=G,byrow=TRUE)
return(out)
} # N*G matrix
##########################################################
############# EM for MAP estimation #############################
mapPLMIX <- function(pi_inv,K,G,
init=list(p=NULL,omega=NULL),n_iter=1000,
hyper=list(shape0=matrix(1,nrow=G,ncol=K),rate0=rep(0,G),alpha0=rep(1,G)),
eps=10^(-6),
centered_start=FALSE,
plot_objective=FALSE){
#' MAP estimation for a Bayesian mixture of Plackett-Luce models
#'
#' Perform MAP estimation via EM algorithm for a Bayesian mixture of Plackett-Luce models fitted to partial orderings.
#'
#' Under noninformative (flat) prior setting, the EM algorithm for MAP estimation corresponds to the EMM algorithm described by Gormley and Murphy (2006) to perform frequentist inference. In this case, the MAP solution coincides with the MLE and the output vectors \code{log_lik} and \code{objective} coincide as well.
#'
#' The \code{\link{mapPLMIX}} function performs the MAP procedure with a single starting value. To address the issue of local maxima in the posterior distribution, see the \code{\link{mapPLMIX_multistart}} function.
#'
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @param K Number of possible items.
#' @param G Number of mixture components.
#' @param init List of named objects with initialization values: \code{p} is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of component-specific support parameters; \code{omega} is a numeric vector of \eqn{G} mixture weights. If starting values are not supplied (\code{NULL}), they are randomly generated with a uniform distribution. Default is \code{NULL}.
#' @param n_iter Maximum number of EM iterations.
#' @param hyper List of named objects with hyperparameter values for the conjugate prior specification: \code{shape0} is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of shape hyperparameters; \code{rate0} is a numeric vector of \eqn{G} rate hyperparameters; \code{alpha0} is a numeric vector of \eqn{G} Dirichlet hyperparameters. Default is noninformative (flat) prior setting.
#' @param eps Tolerance value for the convergence criterion.
#' @param centered_start Logical: whether a random start whose support parameters and weights should be centered around the observed relative frequency that each item has been ranked top. Default is \code{FALSE}. Ignored when \code{init} is not \code{NULL}.
#' @param plot_objective Logical: whether the objective function (that is the kernel of the log-posterior distribution) should be plotted. Default is \code{FALSE}.
#'
#' @return A list of S3 class \code{mpPLMIX} with named elements:
#'
#' \item{\code{W_map}}{ Numeric vector with the MAP estimates of the \eqn{G} mixture weights.}
#' \item{\code{P_map}}{ Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix with the MAP estimates of the component-specific support parameters.}
#' \item{\code{z_hat}}{ Numeric \eqn{N}\eqn{\times}{x}\eqn{G} matrix of estimated posterior component membership probabilities.}
#' \item{\code{class_map}}{ Numeric vector of \eqn{N} mixture component memberships based on MAP allocation from the \code{z_hat} matrix.}
#' \item{\code{log_lik}}{ Numeric vector of the log-likelihood values at each iteration.}
#' \item{\code{objective}}{ Numeric vector of the objective function values (that is the kernel of the log-posterior distribution) at each iteration.}
#' \item{\code{max_objective}}{ Maximized objective function value.}
#' \item{\code{bic}}{ BIC value (only for the default flat priors, otherwise \code{NULL}).}
#' \item{\code{conv}}{ Binary convergence indicator: 1 = convergence has been achieved, 0 = otherwise.}
#' \item{\code{call}}{ The matched call.}
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' Gormley, I. C. and Murphy, T. B. (2006). Analysis of Irish third-level college applications data. \emph{Journal of the Royal Statistical Society: Series A}, \bold{169}(2), pages 361--379, ISSN: 0964-1998, DOI: 10.1111/j.1467-985X.2006.00412.x.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{mapPLMIX_multistart}}
#'
#' @examples
#'
#' data(d_carconf)
#' MAP <- mapPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_iter=400*3)
#' str(MAP)
#' MAP$P_map
#' MAP$W_map
#'
#' @export
cl <- match.call()
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
N <- nrow(pi_inv)
n_rank <- howmanyranked(pi_inv)
rho <- matrix(1:K,nrow=G,ncol=K,byrow=TRUE)
ref_known <- TRUE
ref_vary <- FALSE
if(is.null(init$omega)){
#omega <- runif(G)
#omega <- omega/sum(omega)
omega <- rdirichlet(1,rep(1,G))
}else{
omega <- init$omega
if(sum(omega)!=1){
warning("Initial mixture weights must add to one ==> input arguments has been normalized!")
omega <- omega/sum(omega)
}
}
if(is.null(init$p)){
if(centered_start){
# print("CENTERED START !!")
mle1comp <- matrix(prop.table(table(factor(pi_inv[,1],levels=1:K))),nrow=1)
p <- random_start(mlesupp=mle1comp, givenweights=omega)
p <- p/rowSums(p)
}else{
# print("COMPLETELY RANDOM (uniform support, rescaled) START")
# p <- matrix(runif(G*K),nrow=G,ncol=K)
# p <- p/rowSums(p)
p <- rdirichlet(G,rep(1,K))
}
}else{
p <- init$p
if(is.vector(p)){
p <- t(p)
}
if(!all(rowSums(p)==1)){
warning("Initial support parameters for each mixture component must
add to one ==> input arguments has been normalized!")
p <- p/rowSums(p)
}
}
init <- list(p=p,omega=omega)
shape0 <- hyper$shape0
rate0 <- hyper$rate0
alpha0 <- hyper$alpha0
u_bin <- umat(pi_inv=pi_inv)
log_lik <- rep(NA,n_iter)
if(!(all(shape0==1) & all(rate0==0) & all(shape0==1))){
# print("Non-flat prior input")
log_prior <- log_lik
}
objective <- log_lik
conv <- 0
l <- 1
while(l<=n_iter){
z_hat <- Estep(p=p,ref_order=rho,weights=omega,pi_inv=pi_inv)
omega <- UpWhet(z_hat=z_hat,alpha0=alpha0)
if(any(is.na(omega))){
print("==> PROBLEM WITH *omega* update")
print(omega)
}
p <- UpPhetpartial(p=p,ref_order=rho,pi_inv=pi_inv,z_hat=z_hat,shape0=shape0,
rate0=rate0,n_rank=n_rank,u_bin=u_bin)
if(any(is.na(p))){
print("==> PROBLEM WITH *p* update")
print(p)
}
log_lik[l] <- loglikPLMIX(p=p,ref_order=rho,weights=omega,pi_inv=pi_inv)
if(is.na(log_lik[l])){
print(p)
print(omega)
threshold <- -17
while(is.na(log_lik[l]) & threshold<(-3)){
p[p<=(10^threshold)] <- 10^threshold
threshold <- threshold+1
log_lik[l] <- loglikPLMIX(p=p,ref_order=rho,weights=omega,pi_inv=pi_inv)
print(paste0("Likelihood/parameter approximation for support parameter <=10^(-",threshold,")"))
}
}
if(!(all(shape0==1) & all(rate0==0) & all(shape0==1))){
log_prior[l] <- log(ddirichlet(omega,alpha0))+sum(dgamma(p,shape=shape0,rate=rate0,log=TRUE))
objective[l] <- log_lik[l]+log_prior[l]
}else{
objective[l] <- log_lik[l]
}
if(l>=2){
if((objective[l]-objective[l-1])/abs(objective[l-1])<eps |
((objective[l]-objective[l-1])==0 & objective[l-1]==0)){
conv <- 1
l <- n_iter+1
}
}
l <- l+1
}
P_map=p/rowSums(p)
dimnames(P_map)=list(paste0("g_",1:G),paste0("p_",1:K))
names(omega)=paste0("w_",1:G)
log_lik <- log_lik[!(is.na(log_lik))]
max_log_lik <- max(log_lik)
objective <- objective[!(is.na(objective))]
max_objective <- max(objective)
if(all(shape0==1) & all(rate0==0) & all(shape0==1)){
bic <- bicPLMIX(max_log_lik=max_log_lik,pi_inv=pi_inv,
G=G,ref_known=ref_known,
ref_vary=ref_vary)$bic
}else{
bic <- NULL
}
if(plot_objective){
plot(objective,ylab="Log-joint distribution",xlab="Iteration",
main=paste("MAP estimation for PL mixture with",G,"components"),type="l")
}
out=list(W_map=omega,P_map=P_map,z_hat=z_hat,class_map=apply(z_hat,1,which.max),
log_lik=log_lik,objective=objective,max_objective=max_objective,bic=bic,conv=conv,call=cl)
class(out)="mpPLMIX"
return(out)
}
mapPLMIX_multistart <- function(pi_inv,K,G,n_start=1,
init=rep(list(list(p=NULL,omega=NULL)),times=n_start),
n_iter=200,
hyper=list(shape0=matrix(1,nrow=G,ncol=K),rate0=rep(0,G),alpha0=rep(1,G)),
eps=10^(-6),
plot_objective=FALSE,
init_index=1:n_start,
parallel=FALSE,
centered_start=FALSE){
#' MAP estimation for a Bayesian mixture of Plackett-Luce models with multiple starting values
#'
#' Perform MAP estimation via EM algorithm with multiple starting values for a Bayesian mixture of Plackett-Luce models fitted to partial orderings.
#'
#' Under noninformative (flat) prior setting, the EM algorithm for MAP estimation corresponds to the EMM algorithm described by Gormley and Murphy (2006) to perform frequentist inference. In this case the MAP solution coincides with the MLE. The best model in terms of maximized posterior distribution is returned.
#'
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @param K Number of possible items.
#' @param G Number of mixture components.
#' @param n_start Number of starting values.
#' @param init List of \code{n_start} lists of named objects with initialization values: \code{p} is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of component-specific support parameters; \code{omega} is a numeric vector of \eqn{G} mixture weights. If starting values are not supplied (\code{NULL}), they are randomly generated with a uniform distribution. Default is \code{NULL}.
#' @param n_iter Maximum number of EM iterations.
#' @param hyper List of named objects with hyperparameter values for the conjugate prior specification: \code{shape0} is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of shape hyperparameters; \code{rate0} is a numeric vector of \eqn{G} rate hyperparameters; \code{alpha0} is a numeric vector of \eqn{G} Dirichlet hyperparameters. Default is noninformative (flat) prior setting.
#' @param eps Tolerance value for the convergence criterion.
#' @param plot_objective Logical: whether the objective function (that is the kernel of the log-posterior distribution) should be plotted. Default is \code{FALSE}.
#' @param init_index Numeric vector indicating the positions of the starting values in the \code{init} list to be actually launched. Useful to launch the most promising starting values identified after a preliminary run. Default is run all the starting points in the \code{init} list.
#' @param parallel Logical: whether parallelization should be used. Default is \code{FALSE}.
#' @param centered_start Logical: whether a random start whose support parameters and weights should be centered around the observed relative frequency that each item has been ranked top. Default is \code{FALSE}. Ignored when \code{init} is not \code{NULL}.
#'
#' @return A list of S3 class \code{mpPLMIX} with named elements:
#'
#' \item{\code{mod}}{ List of named objects describing the best model in terms of maximized posterior distribution. See output values of the single-run \code{\link{mapPLMIX}} function for a detailed explanation of the list elements.}
#' \item{\code{max_objective}}{ Numeric vector of the maximized objective function values for each initialization.}
#' \item{\code{convergence}}{ Binary vector with \code{length(init_index)} convergence indicators for each initialization: 1 = convergence has been achieved, 0 = otherwise.}
#' \item{\code{call}}{ The matched call.}
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' Gormley, I. C. and Murphy, T. B. (2006). Analysis of Irish third-level college applications data. \emph{Journal of the Royal Statistical Society: Series A}, \bold{169}(2), pages 361--379, ISSN: 0964-1998, DOI: 10.1111/j.1467-985X.2006.00412.x.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{mapPLMIX}}
#'
#' @examples
#'
#' data(d_carconf)
#' MAP_mult <- mapPLMIX_multistart(pi_inv=d_carconf, K=ncol(d_carconf), G=3,
#' n_start=2, n_iter=400*3)
#' str(MAP_mult)
#' MAP_mult$mod$P_map
#' MAP_mult$mod$W_map
#'
#' @export
cl <- match.call()
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
for(i in 1:n_start){
# print(paste0("Multiple starting point #",i))
if(is.null(init[[i]]$omega)){
#omega <- runif(G)
#omega <- omega/sum(omega)
omega <- rdirichlet(1,rep(1,G))
}else{
omega <- init[[i]]$omega
if(sum(omega)!=1){
warning("Initial mixture weights must add to one ==> input arguments has been normalized!")
omega <- omega/sum(omega)
}
}
if(is.null(init[[i]]$p)){
if(centered_start){
# print("CENTERED START !!")
mle1comp <- matrix(prop.table(table(factor(pi_inv[,1],levels=1:K))),nrow=1)
p <- random_start(mlesupp=mle1comp, givenweights=omega)
p <- p/rowSums(p)
}else{
# print("COMPLETELY RANDOM (uniform support, rescaled) START")
#p <- matrix(runif(G*K),nrow=G,ncol=K)
#p <- p/rowSums(p)
p <- rdirichlet(G,rep(1,K))
}
}else{
p <- init[[i]]$p
if(is.vector(p)){
p <- t(p)
}
if(!all(rowSums(p)==1)){
warning("Initial support parameters for each mixture component must
add to one ==> input arguments has been normalized!")
p <- p/rowSums(p)
}
}
init[[i]] <- list(p=p,omega=omega)
}
if(!parallel){
mod <- vector(mode="list",length=length(init_index))
max_objective <- rep(NA,length(init_index))
convergence <- rep(NA,length(init_index))
record <- rep(NA,length(init_index))
l <- 0
for(i in init_index){
l <- l+1
# print(paste("INITIALIZATION",l))
mod[[l]] <- mapPLMIX(pi_inv=pi_inv,K=K,G=G,init=init[[i]],n_iter=n_iter,hyper=hyper,
eps=eps,centered_start=centered_start,plot_objective=plot_objective)
max_objective[l] <- mod[[l]]$max_objective
convergence[l] <- mod[[l]]$conv
record[l] <- max(max_objective[1:l])
print(paste("Starting value #",l," => best objective function value so far =",record[l]))
}
mod <- mod[[which.max(max_objective)]]
class(mod) <- "list"
mod <- mod[-length(mod)]
out=list(mod=mod,max_objective=max_objective,convergence=convergence,call=cl)
}else{
mod <- foreach(i=init_index) %dopar%{
tempmod <- mapPLMIX(pi_inv=pi_inv,K=K,G=G,init=init[[i]],n_iter=n_iter,hyper=hyper,
eps=eps,centered_start=centered_start,plot_objective=plot_objective)
}
max_objective <- sapply(mod,"[[","max_objective")
convergence <- sapply(mod,"[[","conv")
outmod <- mod[[which.max(max_objective)]]
class(outmod) <- "list"
outmod <- outmod[-length(outmod)]
out=list(mod=outmod,max_objective=max_objective,convergence=convergence,call=cl)
}
class(out)="mpPLMIX"
return(out)
}
print.mpPLMIX <- function(x,...){
#' Print of the MAP estimation algorithm for a Bayesian mixture of Plackett-Luce models
#'
#' \code{print} method for class \code{mpPLMIX}. It shows some general information on the MAP estimation procedure for a Bayesian mixture of Plackett-Luce models.
#'
#'
#' @param x Object of class \code{mpPLMIX} returned by the \code{mapPLMIX} or \code{mapPLMIX_multistart} function.
#' @param ... Further arguments passed to or from other methods (not used).
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{mapPLMIX}} and \code{\link{mapPLMIX_multistart}}
#'
#' @examples
#'
#' ## Print of the MAP procedure with a single starting point
#' data(d_carconf)
#' MAP <- mapPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3)
#' print(MAP)
#'
#' ## Print of the MAP procedure with 5 starting points
#' MAP_multi <- mapPLMIX_multistart(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_start=5)
#' print(MAP_multi)
#' @export print.mpPLMIX
#' @export
mpPLMIX_out=x
if(class(mpPLMIX_out)!="mpPLMIX"){
stop("The function requires an object of S3 class 'mpPLMIX' as its first argument.")
}
cat("\nCall:\n", paste(deparse(mpPLMIX_out$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
if(is.null(mpPLMIX_out$convergence)){
G=length(mpPLMIX_out$W_map)
K=ncol(mpPLMIX_out$P_map)
L=length(mpPLMIX_out$log_lik)
cat("MAP estimation procedure for a Bayesian mixture of Plackett-Luce models:\n")
if(!is.null(mpPLMIX_out$bic)){
cat("Prior distribution used: flat (default) ====> MAP = MLE\n")
}else{
cat("Prior distribution used: subjective (see 'Call')\n")
}
cat("\n")
cat("No. of items:",K,"\n")
cat("No. of mixture components:",G,"\n")
cat("No. of iterations:",L,"\n")
cat("\n")
cat("Max. log-likelihood:",max(mpPLMIX_out$log_lik,na.rm=TRUE),"\n")
cat("Max. objective function:",mpPLMIX_out$max_objective,"\n")
if(!is.null(mpPLMIX_out$bic)){
cat("BIC:",mpPLMIX_out$bic,"\n")
}
cat("\n")
cat("Algorithm convergence check:",ifelse(mpPLMIX_out$conv,"Ok.","Failed."),"\n")
}else{
G=length(mpPLMIX_out$mod$W_map)
K=ncol(mpPLMIX_out$mod$P_map)
L=length(mpPLMIX_out$mod$log_lik)
cat("MAP estimation procedure for a Bayesian mixture of Plackett-Luce models with",length(mpPLMIX_out$convergence),"starting values ====> best solution reported:\n")
if(!is.null(mpPLMIX_out$mod$bic)){
cat("Prior distribution used: flat (default) ====> MAP = MLE\n")
}else{
cat("Prior distribution used: subjective (see 'Call')\n")
}
cat("\n")
cat("No. of items:",K,"\n")
cat("No. of mixture components:",G,"\n")
cat("No. of iterations:",L,"\n")
cat("\n")
cat("Max. log-likelihood:",max(mpPLMIX_out$mod$log_lik,na.rm=TRUE),"\n")
cat("Max. objective function:",mpPLMIX_out$mod$max_objective,"\n")
if(!is.null(mpPLMIX_out$mod$bic)){
cat("BIC:",mpPLMIX_out$mod$bic,"\n")
}
cat("\n")
cat("Algorithm convergence check:",ifelse(mpPLMIX_out$mod$conv,"Ok.","Failed."),"\n")
}
cat("\n")
cat("Use functions summary() and plot() to summarize and visualize the object of class 'mpPLMIX'.")
}
print.summary.mpPLMIX <- function(x,...){
#/' Print of the summary of MAP estimation for a Bayesian mixture of Plackett-Luce models
#/'
#/' \code{print} method for class \code{summary.mpPLMIX}. It provides summaries for the MAP estimation of a Bayesian mixture of Plackett-Luce models.
#/'
#/'
#/' @param x Object of class \code{summary.mpPLMIX} returned by the \code{summary.mpPLMIX} function.
#/' @param ... Further arguments passed to or from other methods (not used).
#/'
#/'
#/' @references
#/' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#/'
#/' Mollica, C. and Tardella, L. (2014). Epitope profiling via mixture modeling for ranked data. \emph{Statistics in Medicine}, \bold{33}(21), pages 3738--3758, ISSN: 0277-6715, DOI: 10.1002/sim.6224.
#/'
#/' @author Cristina Mollica and Luca Tardella
summary.mpPLMIX_out=x
if(class(summary.mpPLMIX_out)!="summary.mpPLMIX"){
stop("The function requires an object of S3 class 'summary.mpPLMIX' as its first argument.")
}
G=length(summary.mpPLMIX_out$MAP_w)
cat("\nCall:\n", paste(deparse(summary.mpPLMIX_out$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
if(G>1){
cat("MAP estimates of the mixture weghts:\n")
print(summary.mpPLMIX_out$MAP_w)
cat("\n")
}
cat("MAP estimates of the support parameters:\n")
print(summary.mpPLMIX_out$MAP_p)
cat("\n")
cat("Estimated component-specific modal orderings:\n")
print(summary.mpPLMIX_out$Modal_orderings)
cat("\n")
if(G>1){
cat("Relative frequency distribution of group memberships:\n")
print(summary.mpPLMIX_out$group_distr)
cat("\n")
}
if(!is.null(summary.mpPLMIX_out$perc_conv_rate)){
cat("Convergence percentage over multiple initialization:\n")
print(c(Conv=summary.mpPLMIX_out$perc_conv_rate))
}
}
summary.mpPLMIX <- function(object,digits=2,...){
#' Summary of the MAP estimation for a Bayesian mixture of Plackett-Luce models
#'
#' \code{summary} method for class \code{mpPLMIX}. It provides summaries for the MAP estimation of a Bayesian mixture of Plackett-Luce models.
#'
#' @param object Object of class \code{mpPLMIX} returned by the \code{mapPLMIX} or \code{mapPLMIX_multistart} function.
#' @param digits Number of decimal places for rounding the summaries.
#' @param ... Further arguments passed to or from other methods (not used).
#'
#' @return A list of summaries for the \code{mpPLMIX} class object:
#'
#' \item{\code{MAP_w}}{ Numeric vector with the MAP estimates of the \eqn{G} mixture weights. Returned only when when \eqn{G>1}.}
#' \item{\code{MAP_p}}{ Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix with the MAP estimates of the component-specific support parameters.}
#' \item{\code{MAP_modal_orderings}}{ Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix with the estimated modal orderings of each mixture component.}
#' \item{\code{group_distr}}{ Numeric vector with the relative frequency distribution of the mixture component memberships based on MAP allocation. Returned only when when \eqn{G>1}.}
#' \item{\code{perc_conv_rate}}{ Numeric scalar with the percentage of MAP algorithm convergence over the multiple starting points. Returned only when \code{summary.mpPLMIX} is applied to the output of the \code{mapPLMIX_multistart} function.}
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @examples
#'
#' ## Summary of the MAP procedure with a single starting point
#' data(d_carconf)
#' MAP <- mapPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3)
#' summary(MAP)
#'
#' ## Summary of the MAP procedure with 5 starting points
#' MAP_multi <- mapPLMIX_multistart(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_start=5)
#' summary(MAP_multi)
#' @export summary.mpPLMIX
#' @export
mpPLMIX_out=object
if(class(mpPLMIX_out)!="mpPLMIX"){
stop("The function requires an object of S3 class 'mpPLMIX' as its first argument.")
}
cl=mpPLMIX_out$call
if(is.null(mpPLMIX_out$convergence)){
G=length(mpPLMIX_out$W_map)
K=ncol(mpPLMIX_out$P_map)
out=list(MAP_w=mpPLMIX_out$W_map,MAP_p=mpPLMIX_out$P_map,
Modal_orderings=t(apply(matrix(mpPLMIX_out$P_map,nrow=G,ncol=K),1,order,decreasing=TRUE)),
group_distr=prop.table(table(factor(mpPLMIX_out$class_map,levels=1:G))),
call=cl)
out[c(1:2,4)]=lapply(out[c(1:2,4)],round,digits=digits)
dimnames(out$Modal_orderings) <- list(paste0("g_",1:G),paste0("Rank_",1:K))
}else{
G=length(mpPLMIX_out$mod$W_map)
K=ncol(mpPLMIX_out$mod$P_map)
out=list(MAP_w=mpPLMIX_out$mod$W_map,MAP_p=mpPLMIX_out$mod$P_map,
Modal_orderings=t(apply(matrix(mpPLMIX_out$mod$P_map,nrow=G,ncol=K),1,order,decreasing=TRUE)),
group_distr=prop.table(table(factor(mpPLMIX_out$mod$class_map,levels=1:G))),
perc_conv_rate=100*mean(mpPLMIX_out$convergence),
call=cl)
out[c(1:2,4:5)]=lapply(out[c(1:2,4:5)],round,digits=digits)
}
dimnames(out$Modal_orderings) <- list(paste0("g_",1:G),paste0("Rank_",1:K))
class(out)="summary.mpPLMIX"
out
}
plot.mpPLMIX <- function(x,max_scale_radar=NULL,...){
#' Plot the MAP estimates for a Bayesian mixture of Plackett-Luce models
#'
#' \code{plot} method for class \code{mpPLMIX}.
#'
#' By recalling the \code{chartJSRadar} function from the \code{radarchart} package and the routines of the \code{ggplot2} package, \code{plot.mpPLMIX} produces a radar plot of the support parameters and, when \eqn{G>1}, a donut plot of the mixture weights and a heatmap of the component membership probabilities based on the MAP estimates. The radar chart is returned in the Viewer Pane.
#'
#' @param x Object of class \code{mpPLMIX} returned by the \code{mpPLMIX} function.
#' @param max_scale_radar Numeric scalar indicating the maximum value on each axis of the radar plot for the support parameter point estimates. Default is \code{NULL} meaning that the maximum of the estimated support parameters is used.
#' @param ... Further arguments passed to or from other methods (not used).
#'
#'
#' @references
#' Ashton, D. and Porter, S. (2016). radarchart: Radar Chart from 'Chart.js'. R package version 0.3.1. \url{https://CRAN.R-project.org/package=radarchart}
#'
#' Wickham, H. (2009). ggplot2: Elegant Graphics for Data Analysis. Springer-Verlag New York.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link[radarchart]{chartJSRadar}} and \code{\link[ggplot2]{ggplot}}
#'
#' @examples
#'
#' # Not run:
#' data(d_carconf)
#' MAP <- mapPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3)
#' plot(MAP)
#'
#' # Not run:
#' MAP_multi <- mapPLMIX_multistart(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_start=5)
#' plot(MAP_multi)
#' @export plot.mpPLMIX
#' @export
mpPLMIX_out=x
if(class(mpPLMIX_out)!="mpPLMIX"){
stop("The function requires an object of S3 class 'mpPLMIX' as its first argument.")
}
if(is.null(mpPLMIX_out$convergence)){
G=length(mpPLMIX_out$W_map)
K=ncol(mpPLMIX_out$P_map)
N=nrow(mpPLMIX_out$z_hat)
}else{
G=length(mpPLMIX_out$mod$W_map)
K=ncol(mpPLMIX_out$mod$P_map)
N=nrow(mpPLMIX_out$mod$z_hat)
}
labs <- paste("Item",1:K)
if(is.null(mpPLMIX_out$convergence)){
scores=as.list(as.data.frame(t(mpPLMIX_out$P_map)))
}else{
scores=as.list(as.data.frame(t(mpPLMIX_out$mod$P_map)))
}
names(scores)=paste("Group",1:G)
main_radar="MAP estimates of the support parameters"
oo=chartJSRadar(scores = scores, labs = labs, main=main_radar,maxScale = ifelse(is.null(max_scale_radar),max(unlist(scores)),max_scale_radar))
print(oo)
if(G>1){
if(is.null(mpPLMIX_out$convergence)){
df_w <- data.frame(Composition = paste0(paste("Group",1:G),":"),value=mpPLMIX_out$W_map,label=paste(paste0(paste("Group",1:G),":"),paste0(round(mpPLMIX_out$W_map*100), "%")))
df_z=as.data.frame(mpPLMIX_out$z_hat)
}else{
df_w <- data.frame(Composition = paste0(paste("Group",1:G),":"),value=mpPLMIX_out$mod$W_map,label=paste(paste0(paste("Group",1:G),":"),paste0(round(mpPLMIX_out$mod$W_map*100), "%")))
df_z=as.data.frame(mpPLMIX_out$mod$z_hat)
}
pp=ggplot(df_w, aes_string(x=2, y = "value", fill = "label")) +
geom_bar(stat = "identity", color = "white") +
coord_polar(theta = "y", start = 0)+
labs(x = NULL, y = NULL, fill = NULL, title = "Sample composition by group membership")+
scale_fill_brewer(palette="Blues")+
theme_void()+
xlim(0.5, 2.5)
names(df_z)=paste("Group",1:G)
df_z=data.frame(Unit=paste("Unit",1:N),df_z)
df_z_m=melt(df_z,id.vars="Unit")
# zz=ggplot(df_z_m, aes_string("variable", "Unit")) +
# geom_tile(aes_string(fill = "value"), colour = "white") +
# labs(x = "", y = "Sample units", fill = NULL, title = "Component membership probabilities")+
# theme(axis.text.y = element_blank(),
# axis.ticks = element_blank())+
# scale_fill_gradient(low = "white", high = "steelblue")
zz=ggplot(df_z_m, aes_string("Unit", "variable")) +
geom_tile(aes_string(fill = "value"), colour = "white") +
labs(x = "Sample units", y = "", fill = NULL, title = "Component membership probabilities")+
theme(axis.text.x = element_blank(),
axis.ticks = element_blank())+
scale_fill_gradient(low = "white", high = "steelblue")
grid.arrange(pp,zz,nrow=2)
}
}
##########################################################
############# GIBBS SAMPLING #############################
gibbsPLMIX <- function(pi_inv,K,G,
init=list(z=NULL,p=NULL),
n_iter=1000,
n_burn=500,
hyper=list(shape0=matrix(1,nrow=G,ncol=K),rate0=rep(0.001,G),alpha0=rep(1,G)),
centered_start=FALSE){
#' Gibbs sampling for a Bayesian mixture of Plackett-Luce models
#'
#' Perform Gibbs sampling simulation for a Bayesian mixture of Plackett-Luce models fitted to partial orderings.
#'
#' The size \eqn{L} of the final MCMC sample is equal to \code{n_iter}-\code{n_burn}.
#'
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @param K Number of possible items.
#' @param G Number of mixture components.
#' @param init List of named objects with initialization values: \code{z} is a numeric \eqn{N}\eqn{\times}{x}\eqn{G} matrix of binary mixture component memberships; \code{p} is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of component-specific support parameters. If starting values are not supplied (\code{NULL}), they are randomly generated with a uniform distribution. Default is \code{NULL}.
#' @param n_iter Total number of MCMC iterations.
#' @param n_burn Number of initial burn-in drawings removed from the returned MCMC sample.
#' @param hyper List of named objects with hyperparameter values for the conjugate prior specification: \code{shape0} is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of shape hyperparameters; \code{rate0} is a numeric vector of \eqn{G} rate hyperparameters; \code{alpha0} is a numeric vector of \eqn{G} Dirichlet hyperparameters. Default is vague prior setting.
#' @param centered_start Logical: whether a random start whose support parameters and weights should be centered around the observed relative frequency that each item has been ranked top. Default is \code{FALSE}. Ignored when \code{init} is not \code{NULL}.
#'
#' @return A list of S3 class \code{gsPLMIX} with named elements:
#'
#' \item{\code{W}}{ Numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with MCMC samples of the mixture weights.}
#' \item{\code{P}}{ Numeric \eqn{L}\eqn{\times}{x}\eqn{(G*K)} matrix with MCMC samples of the component-specific support parameters.}
#' \item{\code{log_lik}}{ Numeric vector of \eqn{L} posterior log-likelihood values.}
#' \item{\code{deviance}}{ Numeric vector of \eqn{L} posterior deviance values (\eqn{-2 * }\code{log_lik}).}
#' \item{\code{objective}}{ Numeric vector of \eqn{L} objective function values (that is the kernel of the log-posterior distribution).}
#' \item{\code{call}}{ The matched call.}
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @examples
#'
#' data(d_carconf)
#' GIBBS <- gibbsPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_iter=30, n_burn=10)
#' str(GIBBS)
#' GIBBS$P
#' GIBBS$W
#'
#' @export
cl=match.call()
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
N <- nrow(pi_inv)
n_rank <- howmanyranked(pi_inv)
rho <- matrix(1:K,nrow=G,ncol=K,byrow=TRUE)
if(is.null(init$z)){
z <- binary_group_ind(class=sample(1:G,size=N,replace=TRUE),G=G)
}else{
z <- init$z
}
omega <- colMeans(z)
if(is.null(init$p)){
if(centered_start){
print("CENTERED START !!")
# omega <- rdirichlet(1,rep(1,G))
mle1comp <- matrix(prop.table(table(factor(pi_inv[,1],levels=1:K))),nrow=1)
p <- random_start(mlesupp=mle1comp, givenweights=omega)
# p <- p/rowSums(p)
}else{
print("COMPLETELY RANDOM (uniform support, rescaled) START")
p <- matrix(rgamma(n=G*K,shape=1,rate=1),nrow=G,ncol=K)
}
}else{
p <- init$p
}
shape0 <- hyper$shape0
rate0 <- hyper$rate0
alpha0 <- hyper$alpha0
u_bin <- umat(pi_inv=pi_inv)
log_lik <- c(loglikPLMIX(p=p,ref_order=rho,weights=omega,pi_inv=pi_inv),
rep(NA,n_iter))
log_prior <- c(log(ddirichlet(omega,alpha0))+sum(dgamma(p,shape=shape0,rate=rate0,log=TRUE)),
rep(NA,n_iter))
objective <- log_lik+log_prior
Pi <- array(NA,dim=c(G,K,n_iter+1))
Pi[,,1] <- p
Zeta <- z
Omega <- matrix(NA,nrow=n_iter+1,ncol=G)
Omega[1,] <- omega
for(l in 1:n_iter){
if(l%%500==0){
print(paste("GIBBS iteration",l))
}
Omega[l+1,] <- rdirichlet(n=1,alpha=alpha0+colSums(Zeta))
temprate <- CompRateYpartial(p=adrop(Pi[,,l,drop=FALSE],3),pi_inv=pi_inv,ref_order=rho,z=Zeta,n_rank=n_rank)
Ypsilon <- SimYpsilon(rate=temprate,n_rank=n_rank)
Pi[,,l+1] <- matrix(rgamma(n=G*K,shape=shape0+gammamat(u_bin=u_bin,z_hat=Zeta),
rate <- CompRateP(pi_inv=pi_inv, Y=Ypsilon, z=Zeta, u_bin=u_bin, n_rank=n_rank, rate0=rate0)),nrow=G,ncol=K)
Zeta <- binary_group_ind(apply(CompProbZpartial(p=adrop(Pi[,,l+1,drop=FALSE],3),pi_inv=pi_inv,Y=Ypsilon, u_bin=u_bin,n_rank,omega=Omega[l+1,]),1,FUN=sample,x=1:G,replace=TRUE,size=1),G=G)
log_lik[l+1] <- loglikPLMIX(p=adrop(Pi[,,l+1,drop=FALSE],3),ref_order=rho,weights=Omega[l+1,],
pi_inv=pi_inv)
log_prior[l+1] <- log(ddirichlet(Omega[l+1,],alpha0))+sum(dgamma(adrop(Pi[,,l+1,drop=FALSE],3),shape=shape0,rate=rate0,log=TRUE))
objective[l+1] <- log_lik[l+1]+log_prior[l+1]
}
log_lik <- log_lik[-c(1:(n_burn+1))]
objective <- objective[-c(1:(n_burn+1))]
Omega <- Omega[-c(1:(n_burn+1)),,drop=FALSE]
colnames(Omega) <- paste0("w_",1:G)
Pi <- array(apply(Pi,3,FUN=function(x)x/rowSums(x)),c(G,K,n_iter+1))
Pi=t(apply(Pi,3,c))[-c(1:(n_burn+1)),]
colnames(Pi) <- paste0("p_",rep(1:G,K),rep(1:K,each=G))
out=list(W=Omega,P=Pi,log_lik=log_lik,deviance=-2*log_lik,objective=objective,call=cl)
class(out)="gsPLMIX"
return(out)
}
gsPLMIX_to_mcmc <- function(gsPLMIX_out){
#' MCMC class objects from the Gibbs sampling simulations of a Bayesian mixture of Plackett-Luce models
#'
#' Coerce the Gibbs sampling simulations for a Bayesian mixture of Plackett-Luce models into an \code{mcmc} class object.
#'
#' \code{gsPLMIX_to_mcmc} attemps to coerce its argument by recalling the \code{as.mcmc} function of the \code{coda} package.
#'
#' @param gsPLMIX_out Object of class \code{gsPLMIX} returned by the \code{gibbsPLMIX} function.
#'
#' @return An \code{mcmc} class object.
#'
#' @references
#' Plummer, M., Best, N., Cowles, K. and Vines, K. (2006). CODA: Convergence Diagnosis and Output Analysis for MCMC, \emph{R News}, \bold{6}, pages 7--11, ISSN: 1609-3631.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link[coda]{as.mcmc}}
#'
#' @examples
#'
#' data(d_carconf)
#' GIBBS <- gibbsPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_iter=30, n_burn=10)
#'
#' ## Coerce the posterior samples into an mcmc class object
#' gsPLMIX_to_mcmc(GIBBS)
#'
#' @export
if(class(gsPLMIX_out)!="gsPLMIX"){
stop("The function requires an object of S3 class 'gsPLMIX' as its first argument.")
}
G=ncol(gsPLMIX_out$W)
K=ncol(gsPLMIX_out$P)/G
class(gsPLMIX_out)="list"
gsPLMIX_out=gsPLMIX_out[-length(gsPLMIX_out)] # to remove call element
out=as.data.frame(gsPLMIX_out)
colnames(out)=c(paste0("w_",1:G),paste0("p_",rep(1:G,K),rep(1:K,each=G)),"log_lik","deviance","objective")
out=as.mcmc(out)
return(out)
}
summary.gsPLMIX <- function(object,quantiles=c(0.025, 0.25, 0.5, 0.75, 0.975),hpd_prob=0.95,digits=2,...){
#' Summary of the Gibbs sampling procedure for a Bayesian mixture of Plackett-Luce models
#'
#' \code{summary} method for class \code{gsPLMIX}. It provides summary statistics and credible intervals for the Gibbs sampling simulation of a Bayesian mixture of Plackett-Luce models.
#'
#' Posterior summaries include means, standard deviations, naive standard errors of the means (ignoring autocorrelation of the chain) and time-series standard errors based on an estimate of the spectral density at 0. They correspond to the \code{statistics} element of the output returned by the \code{summary.mcmc} function of the \code{coda} package. Highest posterior density (HPD) intervals are obtained by recalling the \code{HPDinterval} function of the \code{coda} package.
#'
#' @param object Object of class \code{gsPLMIX} returned by the \code{gibbsPLMIX} function.
#' @param quantiles Numeric vector of quantile probabilities.
#' @param hpd_prob Numeric scalar in the grid of values spanning the interval (0,1) by 0.05, giving the posterior probability content of the HPD intervals. Supplied values outside the grid are rounded.
#' @param digits Number of decimal places for rounding the posterior summaries.
#' @param ... Further arguments passed to or from other methods (not used).
#'
#' @return A list of summary statistics for the \code{gsPLMIX} class object:
#'
#' \item{\code{statistics}}{ Numeric matrix with posterior summaries in each row (see 'Details').}
#' \item{\code{quantiles}}{ Numeric matrix with posterior quantiles at the given \code{quantiles} probabilities in each row.}
#' \item{\code{HPDintervals}}{ Numeric matrix with 100\eqn{*}\code{hpd_prob}\% HPD intervals in each row.}
#' \item{\code{Modal_orderings}}{ Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix with the estimated posterior modal orderings of each mixture component.}
#' \item{\code{call}}{ The matched call.}
#'
#' @references
#' Plummer, M., Best, N., Cowles, K. and Vines, K. (2006). CODA: Convergence Diagnosis and Output Analysis for MCMC, \emph{R News}, \bold{6}, pages 7--11, ISSN: 1609-3631.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link[coda]{summary.mcmc}} and \code{\link[coda]{HPDinterval}}
#'
#' @examples
#'
#' data(d_carconf)
#' GIBBS <- gibbsPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_iter=30, n_burn=10)
#'
#' ## Summary of the Gibbs sampling procedure
#' summary(GIBBS)
#' @export summary.gsPLMIX
#' @export
gsPLMIX_out=object
G=ncol(gsPLMIX_out$W)
K=ncol(gsPLMIX_out$P)/G
cl=gsPLMIX_out$call
mcmc_obj=gsPLMIX_to_mcmc(gsPLMIX_out)
p_idx=grep(pattern="p_",x=colnames(mcmc_obj))
temp=getFromNamespace("summary.mcmc",ns="coda")(object=mcmc_obj,quantiles=quantiles)[1:2]
hpd_int=HPDinterval(mcmc_obj,prob=hpd_prob)
# attr(hpd_int,"Probability")=NULL
out=list(statistics=temp[[1]],quantiles=as.matrix(temp[[2]]),HPD_intervals=hpd_int,
Modal_orderings=t(apply(matrix(temp$statistics[p_idx,"Mean"],nrow=G,ncol=K),1,order,decreasing=TRUE)),
call=cl)
out[1:3]=lapply(out[1:3],round,digits=digits)
names(out)[3]=paste0(100*hpd_prob,"%_HPD_intervals")
if(length(quantiles)==1){
colnames(out$quantiles)=paste0(quantiles*100,"%")
}
dimnames(out$Modal_orderings) <- list(paste0("g_",1:G),paste0("Rank_",1:K))
class(out)="summary.gsPLMIX"
out
}
plot.gsPLMIX <- function(x,file="ggmcmc-output.pdf",family=NA,plot=NULL,param_page=5,width=7,height=10,dev_type_html="png",post_est="mean",max_scale_radar=NULL,...){
#' Plot the Gibbs sampling simulations for a Bayesian mixture of Plackett-Luce models
#'
#' \code{plot} method for class \code{gsPLMIX}. It builds a suite of plots, visual convergence diagnostics and credible intervals for the MCMC samples of a Bayesian mixture of Plackett-Luce models. Graphics can be plotted directly into the current working device or stored into an external file placed into the current working directory.
#'
#' Plots of the MCMC samples include histograms, densities, traceplots, running means plots, overlapped densities comparing the complete and partial samples, autocorrelation functions, crosscorrelation plots and caterpillar plots of the 90 and 95\% equal-tails credible intervals. Note that the latter are created for the support parameters (when either \code{family=NA} or \code{family="p"}), for the mixture weights in the case \eqn{G>1} (when either \code{family=NA} or \code{family="w"}), for the log-likelihood values (when \code{family="log_lik"}), for the deviance values (when \code{family="deviance"}). Convergence tools include the potential scale reduction factor and the Geweke z-score. These functionalities are implemented with a call to the \code{ggs} and \code{ggmcmc} functions of the \code{ggmcmc} package (see 'Examples' for the specification of the \code{plot} argument) and for the objective function values (when \code{family="objective"}).
#'
#' By recalling the \code{chartJSRadar} function from the \code{radarchart} package and the routines of the \code{ggplot2} package, \code{plot.gsPLMIX} additionally produces a radar plot of the support parameters and, when \eqn{G>1}, a donut plot of the mixture weights based on the posterior point estimates. The radar chart is returned in the Viewer Pane.
#'
#' @param x Object of class \code{gsPLMIX} returned by the \code{gibbsPLMIX} function.
#' @param file Character vector with the name of the file to be created in the current working directory. Defaults is "ggmcmc-output.pdf". When NULL, plots are directly returned into the current working device (not recommended). This option allows also the user to work with an opened pdf (or other) device. When the file has an html file extension, the output is an Rmarkdown report with the figures embedded in the html file.
#' @param family Character string indicating the name of the family of parameters to be plotted. A family of parameters is considered to be any group of parameters with the same name but different numerical values (for example \code{w[1]}, \code{w[2]}, etc). Default is \code{NA} meaning that all the parameters in the chain are plotted. Alternatively, one can choose \code{"w"}, \code{"p"}, \code{"log_lik"}, \code{"deviance"} or \code{"objective"}.
#' @param plot Character vector containing the names of the desired plots. Default is \code{NULL} meaning that all the plots and convergence diagnostics are built (see 'Details').
#' @param param_page Number of parameters to be plotted in each page. Defaults is 5.
#' @param width Numeric scalar indicating the width of the pdf display in inches. Defaults is 7.
#' @param height Numeric scalar indicating the height of the pdf display in inches. Defaults is 10.
#' @param dev_type_html Character vector indicating the type of graphical device for the html output. Default is \code{"png"}. Alternatively, one can choose \code{"svg"}.
#' @param post_est Character string indicating the point estimates of the Plackett-Luce mixture parameters to be computed from the \code{gsPLMIX} class object and then plotted in the current working device. Default is \code{"mean"}. Alternatively, one can choose \code{"median"}.
#' @param max_scale_radar Numeric scalar indicating the maximum value on each axis of the radar plot for the support parameter point estimates. Default is \code{NULL} meaning that the maximum of the estimated support parameters is used.
#' @param ... Further arguments passed to or from other methods (not used).
#'
#'
#' @references
#' Ashton, D. and Porter, S. (2016). radarchart: Radar Chart from 'Chart.js'. R package version 0.3.1. \url{https://CRAN.R-project.org/package=radarchart}
#'
#' Wickham, H. (2009). ggplot2: Elegant Graphics for Data Analysis. Springer-Verlag New York.
#'
#' Fernandez-i-Marin, X. (2006). ggmcmc: Analysis of MCMC Samples and Bayesian Inference, \emph{Journal of Statistical Software}, \bold{70}(9), pages 1--20, DOI: 10.18637/jss.v070.i09.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link[ggmcmc]{ggs}}, \code{\link[ggmcmc]{ggmcmc}}, \code{\link[radarchart]{chartJSRadar}} and \code{\link[ggplot2]{ggplot}}
#'
#' @examples
#'
#' # Not run:
#' data(d_carconf)
#' GIBBS <- gibbsPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=5, n_iter=30, n_burn=10)
#'
#' # Not run:
#' # Plot posterior samples supplied as an gsPLMIX class object
#' # plot(GIBBS)
#'
#' # Selected plots of the posterior samples of the support parameters
#' # plot(GIBBS, family="p", plot=c("compare_partial","Rhat","caterpillar"), param_page=6)
#'
#' # Selected plots of the posterior samples of the mixture weights
#' # plot(GIBBS, family="w", plot=c("histogram","running","crosscorrelation","caterpillar"))
#'
#' # Selected plots of the posterior log-likelihood values
#' # plot(GIBBS, family="log_lik", plot=c("autocorrelation","geweke"), param_page=1)
#'
#' # Selected plots of the posterior deviance values
#' # plot(GIBBS, family="deviance", plot=c("traceplot","density"), param_page=1)
#' @export plot.gsPLMIX
#' @export
gsPLMIX_out=x
mcmc_obj=gsPLMIX_to_mcmc(gsPLMIX_out=gsPLMIX_out)
G=ncol(gsPLMIX_out$W)
K=ncol(gsPLMIX_out$P)/G
n_par=G+G*K
colnames(mcmc_obj)[1:n_par]=gsub("_","[",colnames(mcmc_obj)[1:n_par])
colnames(mcmc_obj)[1:n_par]=paste0(colnames(mcmc_obj)[1:n_par],"]")
if(G==1){
mcmc_obj=mcmc_obj[,-1]
}
tbl_obj = ggs(S=mcmc_obj)
if(!is.na(family) & family=="w" & G==1){
message(paste("No. of mixture components:",G, "====> w[1] = 1"))
}else{
simplify_traceplot=NULL
mcmc_plot=ggmcmc(tbl_obj,file=file,family=family,plot=plot,param_page=param_page,width=width,height=height,
simplify_traceplot=simplify_traceplot,dev_type_html=dev_type_html)
print(mcmc_plot)
}
# if(!is.na(family) & family=="p"){
labs <- paste("Item",1:K)
temp_radar=summary(object=gsPLMIX_out,quantiles=0.5)
if(post_est=="mean"){
scores=as.list(as.data.frame(t(matrix(temp_radar$statistics[grep("p",rownames(temp_radar$quantiles)),"Mean"],nrow=G,ncol=K))))
main_radar="Posterior means of the support parameters"
}else{
scores=as.list(as.data.frame(t(matrix(temp_radar$quantiles[grep("p",rownames(temp_radar$quantiles)),],nrow=G,ncol=K))))
main_radar="Posterior medians of the support parameters"
}
names(scores)=paste("Group",1:G)
oo=chartJSRadar(scores = scores, labs = labs, main=main_radar,maxScale = ifelse(is.null(max_scale_radar),max(unlist(scores)),max_scale_radar))
print(oo)
# }
# if(!is.na(family) & family=="w" & G>1){
if(G>1){
temp_radar=summary(object=gsPLMIX_out,quantiles=0.5)
if(post_est=="mean"){
temp_value=temp_radar$statistics[grep("w",rownames(temp_radar$quantiles)),"Mean"]
}else{
temp_value=temp_radar$quantiles[grep("w",rownames(temp_radar$quantiles)),]
}
df_w <- data.frame(Composition = paste0(paste("Group",1:G),":"),value=temp_value,label=paste(paste0(paste("Group",1:G),":"),paste0(round(temp_value*100), "%")))
pp=ggplot(df_w, aes_string(x = 2, y = "value", fill = "label")) +
geom_bar(stat = "identity", color = "white") +
coord_polar(theta = "y", start = 0)+
labs(x = NULL, y = NULL, fill = NULL, title = "Sample composition by group membership")+
# geom_text(aes(label = paste0(round(value*100), "%")), position = position_stack(vjust = 0.5))+
scale_fill_brewer(palette="Blues")+
theme_void()+
xlim(0.5, 2.5)
print(pp)
}
}
print.gsPLMIX <- function(x,...){
#' Print of the Gibbs sampling simulation of a Bayesian mixture of Plackett-Luce models
#'
#' \code{print} method for class \code{gsPLMIX}. It shows some general information on the Gibbs sampling simulation for a Bayesian mixture of Plackett-Luce models.
#'
#'
#' @param x Object of class \code{gsPLMIX} returned by the \code{gibbsPLMIX} function.
#' @param ... Further arguments passed to or from other methods (not used).
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{gibbsPLMIX}}
#'
#' @examples
#'
#' ## Print of the Gibbs sampling procedure
#' data(d_carconf)
#' GIBBS <- gibbsPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_iter=30, n_burn=10)
#' print(GIBBS)
#' @export print.gsPLMIX
#' @export
gsPLMIX_out=x
if(class(gsPLMIX_out)!="gsPLMIX"){
stop("The function requires an object of S3 class 'gsPLMIX' as its first argument.")
}
G=ncol(gsPLMIX_out$W)
K=ncol(gsPLMIX_out$P)/G
L=nrow(gsPLMIX_out$W)
cat("\nCall:\n", paste(deparse(gsPLMIX_out$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
cat("Gibbs sampling procedure for a Bayesian mixture of Plackett-Luce models:\n")
cat("\n")
cat("No. of items:",K,"\n")
cat("No. of mixture components:",G,"\n")
cat("No. of saved MCMC samples:",L,"\n")
cat("\n")
cat("Max. posterior log-likelihood:",max(gsPLMIX_out$log_lik,na.rm=TRUE),"\n")
cat("Min. posterior deviance:",min(gsPLMIX_out$deviance,na.rm=TRUE),"\n")
cat("Max. objective function:",max(gsPLMIX_out$objective,na.rm=TRUE),"\n")
cat("\n")
cat("Use functions summary() and plot() to summarize and visualize the object of class 'gsPLMIX'.")
}
print.summary.gsPLMIX <- function(x,...){
#/' Print of the summary of Gibbs sampling simulation of a Bayesian mixture of Plackett-Luce models.
#/'
#/' \code{print} method for class \code{summary.gsPLMIX}. It shows some general information on the Gibbs sampling simulation of a Bayesian mixture of Plackett-Luce models.
#/'
#/'
#/' @param x Object of class \code{summary.gsPLMIX} returned by the \code{summary.gibbsPLMIX} function.
#/' @param ... Further arguments passed to or from other methods (not used).
#/'
#/'
#/' @references
#/' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#/'
#/' Mollica, C. and Tardella, L. (2014). Epitope profiling via mixture modeling for ranked data. \emph{Statistics in Medicine}, \bold{33}(21), pages 3738--3758, ISSN: 0277-6715, DOI: 10.1002/sim.6224.
#/'
#/' @author Cristina Mollica and Luca Tardella
summary.gsPLMIX_out=x
if(class(summary.gsPLMIX_out)!="summary.gsPLMIX"){
stop("The function requires an object of S3 class 'summary.gsPLMIX' as its first argument.")
}
cat("\nCall:\n", paste(deparse(summary.gsPLMIX_out$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
cat("Posterior statistics:\n")
print(summary.gsPLMIX_out$statistics)
cat("\n")
cat("Quantiles:\n")
print(summary.gsPLMIX_out$quantiles)
cat("\n")
pr=paste0(100*attr(summary.gsPLMIX_out[[grep("HPD",names(summary.gsPLMIX_out))]],"Probability"),"%")
cat(pr,"HPD intervals:\n")
attr(summary.gsPLMIX_out[[grep("HPD",names(summary.gsPLMIX_out))]],"Probability")=NULL
print(summary.gsPLMIX_out[[grep("HPD",names(summary.gsPLMIX_out))]])
cat("\n")
cat("Estimated component-specific modal orderings:\n")
print(summary.gsPLMIX_out$Modal_orderings)
}
random_start <- function(mlesupp, givenweights, alpha=rep(1,G)){
#/' Appropriate simulation of starting values for tandom initialization of Gibbs Sampling. It start from the mle corresponding to no-group structure and then it randomly selects rescaled random support points (with sum 1) of G mixture components such that the marginal support coincides with the mle support for G=1
#/' Random generation of starting values of the component-specific support parameters for Gibbs sampling
#/'
#/' @param mlesupp MLE of support parameters
#/' @param givenweights A numeric vector of \eqn{G} mixture weights
#/' @param alpha A numeric vector of \eqn{G} positive reals to be used as Dirichlet parameters for the random start which corresponds to a convex combination of \eqn{G} support parameter vertices
#/'
#/' @return \code{out} A numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix with starting values of the component-specific support parameters
#/'
#/' @author Cristina Mollica and Luca Tardella
K <- length(mlesupp)
G <- length(givenweights)
out <- matrix(NA,nrow=G,ncol=K)
if(G==1){
out[1,] <- mlesupp
}else{
# for each component
# compute the H-representation
# transform it into the V-representation
# draw a random sample from the symplex
for( j in 1:K ) {
Aineq <- rbind(-diag(G))
bineq <- c(rep(0, G))
Aeq <- matrix(givenweights,nrow=1)
beq <- mlesupp[j]
hr <- makeH(Aineq,bineq,Aeq,beq)
vr <- scdd(hr)
Vertexes <- t(vr$output[,-c(1,2)]) # as column vectors
myrandomcomponentwithconstrainedmean <- Vertexes%*%t(rdirichlet(1,alpha))
out[,j] <- myrandomcomponentwithconstrainedmean
}
}
return(out)
}
#### Selection criteria
selectPLMIX_single <- function(pi_inv,G,
MCMCsampleP=NULL,
MCMCsampleW=NULL,
MAPestP,
MAPestW,
deviance,
post_est="mean"){
#/' Bayesian selection criteria for mixtures of Plackett-Luce models
#/'
#/' Compute Bayesian comparison criteria for mixtures of Plackett-Luce models with a different number of components.
#/'
#/' Two versions of DIC and BPIC are returned corresponding to two alternative ways of computing the penalty term: the former was proposed by Spiegelhalter et al. (2002) and is denoted with \code{pD}, whereas the latter was proposed by Gelman et al. (2004) and is denoted with \code{pV}. DIC2 coincides with AICM, that is, the Bayesian counterpart of AIC introduced by Raftery et al. (2007).
#/'
#/' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#/' @param G Number of mixture components.
#/' @param MCMCsampleP Numeric \eqn{L}\eqn{\times}{x}\eqn{G*K} matrix with the MCMC samples of the component-specific support parameters.
#/' @param MCMCsampleW Numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights.
#/' @param MAPestP Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of MAP component-specific support parameter estimates.
#/' @param MAPestW Numeric vector of the \eqn{G} MAP estimates of the mixture weights.
#/' @param deviance Numeric vector of posterior deviance values.
#/' @param post_est Character string indicating the point estimates of the Plackett-Luce mixture parameters to be computed from the MCMC sample. This argument is ignored when MAP estimates are supplied in the \code{MAPestP} and \code{MAPestW} arguments. Default is \code{"mean"}. Alternatively, one can choose \code{"median"}.
#/'
#/' @return A list of named objects:
#/'
#/' \item{\code{point_estP}}{ Numeric \eqn{G}\eqn{\times}{x}\eqn{(K+1)} matrix with the point estimates of the Plackett-Luce mixture parameters. The \eqn{(K+1)}-th column contains estimates of the mixture weights.}
#/' \item{\code{point_estW}}{ Numeric \eqn{G}\eqn{\times}{x}\eqn{(K+1)} matrix with the point estimates of the Plackett-Luce mixture parameters. The \eqn{(K+1)}-th column contains estimates of the mixture weights.}
#/' \item{\code{D_bar}}{ Posterior expected deviance.}
#/' \item{\code{D_hat}}{ Deviance function evaluated at \code{point_est}.}
#/' \item{\code{pD}}{ Effective number of parameters computed as \code{D_bar}-\code{D_hat}.}
#/' \item{\code{pV}}{ Effective number of parameters computed as half the posterior variance of the deviance.}
#/' \item{\code{DIC1}}{ Deviance Information Criterion with penalty term equal to \code{pD}.}
#/' \item{\code{DIC2}}{ Deviance Information Criterion with penalty term equal to \code{pV}.}
#/' \item{\code{BPIC1}}{ Bayesian Predictive Information Criterion obtained from \code{DIC1} by doubling its penalty term.}
#/' \item{\code{BPIC2}}{ Bayesian Predictive Information Criterion obtained from \code{DIC2} by doubling its penalty term.}
#/' \item{\code{BICM1}}{ Bayesian Information Criterion-Monte Carlo.}
#/' \item{\code{BICM2}}{ Bayesian Information Criterion-Monte Carlo based on the actual MAP estimate given in the \code{MAPestP} and \code{MAPestW} arguments (unlike \code{BICM1}, no approximation of the MAP estimate from the MCMC sample).}
#/'
#/'
#/' @references
#/' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#/'
#/' Ando, T. (2007). Bayesian predictive information criterion for the evaluation of hierarchical Bayesian and empirical Bayes models. \emph{Biometrika}, \bold{94}(2), pages 443--458.
#/'
#/' Raftery, A. E, Satagopan, J. M., Newton M. A. and Krivitsky, P. N. (2007). BAYESIAN STATISTICS 8. \emph{Proceedings of the eighth Valencia International Meeting 2006}, pages 371--416. Oxford University Press.
#/'
#/' Gelman, A., Carlin, J. B., Stern, H. S. and Rubin, D. B. (2004). Bayesian data analysis. Chapman & Hall/CRC, Second Edition, ISBN: 1-58488-388-X. New York.
#/'
#/' Spiegelhalter, D. J., Best, N. G., Carlin, B. P., Van Der Linde, A. (2002). Bayesian measures of model complexity and fit. \emph{Journal of the Royal Statistical Society: Series B (Statistical Methodology)}, \bold{64}(4), pages 583--639.
#/'
#/' @author Cristina Mollica and Luca Tardella
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
N <- nrow(pi_inv)
K <- ncol(pi_inv)
D_bar <- mean(deviance)
if(!is.null(MAPestP) & !is.null(MAPestW)){
point_estP <- MAPestP
point_estW <- MAPestW
}else{
if(post_est=="mean"){
point_estP <- matrix(colMeans(MCMCsampleP),G,K)
point_estW <- colMeans(MCMCsampleW)
}else{
point_estP <- matrix(apply(MCMCsampleP,2,FUN=median),G,K)
point_estW <- apply(MCMCsampleW,2,FUN=median)
}
}
rho <- matrix(1:K,nrow=G,ncol=K,byrow=TRUE)
D_hat <- -2*loglikPLMIX(p=point_estP,weights=point_estW,ref_order=rho,pi_inv=pi_inv)
pD <- D_bar-D_hat
pV <- var(deviance)/2
return(list(point_estP=point_estP,point_estW=point_estW,D_bar=D_bar,D_hat=D_hat,pD=pD,pV=pV,DIC1=D_bar+pD,DIC2=D_bar+pV,
BPIC1=D_bar+2*pD,BPIC2=D_bar+2*pV,BICM1=D_bar+pV*(log(x=N)-1),BICM2=D_hat+pV*log(x=N)))
}
selectPLMIX <- function(pi_inv,seq_G,
MCMCsampleP=vector(mode="list",length=length(seq_G)),
MCMCsampleW=vector(mode="list",length=length(seq_G)),
MAPestP,
MAPestW,
deviance,
post_est="mean",
parallel=FALSE){
#' Bayesian selection criteria for mixtures of Plackett-Luce models
#'
#' Compute Bayesian comparison criteria for mixtures of Plackett-Luce models with a different number of components.
#'
#' The \code{selectPLMIX} function privileges the use of the MAP point estimates to compute the Bayesian model comparison criteria, since they are not affected by the label switching issue. By setting both the \code{MAPestP} and \code{MAPestW} arguments equal to NULL, the user can alternatively compute the selection measures by relying on a different posterior summary (\code{"mean"} or \code{"median"}) specified in the \code{post_est} argument. In the latter case, the MCMC samples for each Plackett-Luce mixture must be supplied in the lists \code{MCMCsampleP} and \code{MCMCsampleW}. The drawback when working with point estimates other than the MAP is that the possible presence of label switching has to be previously removed from the traces to obtain meaningful results. See the \code{\link{label_switchPLMIX}} function to perfom label switching adjustment of the MCMC samples.
#'
#' Several model selection criteria are returned. The two versions of DIC correspond to alternative ways of computing the effective number of parameters: DIC1 was proposed by Spiegelhalter et al. (2002) with penalty named \code{pD}, whereas DIC2 was proposed by Gelman et al. (2004) with penalty named \code{pV}. The latter coincides with the AICM introduced by Raftery et al. (2007), that is, the Bayesian counterpart of AIC. BPIC1 and BPIC2 are obtained from the two DIC by simply doubling the penalty term, as suggested by Ando (2007) to contrast DIC's tendency to overfitting. BICM1 is the Bayesian variant of the BIC, originally presented by Raftery et al. (2007) and entirely based on the MCMC sample. The BICM2, instead, involved the MAP estimate without the need of its approximation from the MCMC sample as for the BICM1.
#'
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @param seq_G Numeric vector with the number of components of the Plackett-Luce mixtures to be compared.
#' @param MCMCsampleP List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{(G*K)} matrix with the MCMC samples of the component-specific support parameters. Default is list of \code{NULL} elements.
#' @param MCMCsampleW List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights. Default is list of \code{NULL} elements.
#' @param MAPestP List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix with the MAP estimates of the component-specific support parameters.
#' @param MAPestW List of size \code{length(seq_G)}, whose generic element is a numeric vector with the MAP estimates of the \eqn{G} mixture weights.
#' @param deviance List of size \code{length(seq_G)}, whose generic element is a numeric vector of posterior deviance values.
#' @param post_est Character string indicating the point estimates of the Plackett-Luce mixture parameters to be computed from the MCMC sample. This argument is ignored when MAP estimates are supplied in the \code{MAPestP} and \code{MAPestW} arguments. Default is \code{"mean"}. Alternatively, one can choose \code{"median"} (see 'Details').
#' @param parallel Logical: whether parallelization should be used. Default is \code{FALSE}.
#'
#' @return A list of named objects:
#'
#' \item{\code{point_estP}}{ List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix with the point estimates of the component-specific support parameters employed for the computation of the criteria.}
#' \item{\code{point_estW}}{ List of size \code{length(seq_G)}, whose generic element is a numeric vector with the \eqn{G} point estimates of the mixture weights employed for the computation of the criteria.}
#' \item{\code{fitting}}{ Numeric \code{length(seq_G)}\eqn{\times}{x}\eqn{2} matrix with the fitting terms of the comparison measures, given by the posterior expected deviance \code{D_bar} and the deviance \code{D_hat} evaluated at the point estimate.}
#' \item{\code{penalties}}{ Numeric \code{length(seq_G)}\eqn{\times}{x}\eqn{2} matrix with the penalty terms \code{pD} and \code{pV} (effective number of parameters).}
#' \item{\code{criteria}}{ Numeric \code{length(seq_G)}\eqn{\times}{x}\eqn{6} matrix of Bayesian model selection criteria: \code{DIC1}, \code{DIC2}, \code{BPIC1}, \code{BPIC2}, \code{BICM1} and \code{BICM2} (see 'Details').}
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' Ando, T. (2007). Bayesian predictive information criterion for the evaluation of hierarchical Bayesian and empirical Bayes models. \emph{Biometrika}, \bold{94}(2), pages 443--458.
#'
#' Raftery, A. E, Satagopan, J. M., Newton M. A. and Krivitsky, P. N. (2007). BAYESIAN STATISTICS 8. \emph{Proceedings of the eighth Valencia International Meeting 2006}, pages 371--416. Oxford University Press.
#'
#' Gelman, A., Carlin, J. B., Stern, H. S. and Rubin, D. B. (2004). Bayesian data analysis. Chapman & Hall/CRC, Second Edition, ISBN: 1-58488-388-X. New York.
#'
#' Spiegelhalter, D. J., Best, N. G., Carlin, B. P. and Van Der Linde, A. (2002). Bayesian measures of model complexity and fit. \emph{Journal of the Royal Statistical Society: Series B (Statistical Methodology)}, \bold{64}(4), pages 583--639.
#'
#' @author Cristina Mollica and Luca Tardella
#' @examples
#'
#' data(d_carconf)
#' K <- ncol(d_carconf)
#'
#' ## Fit 1- and 2-component PL mixtures via MAP estimation
#' MAP_1 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=1,
#' n_start=2, n_iter=400*1)
#'
#' MAP_2 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=2,
#' n_start=2, n_iter=400*2)
#'
#' mcmc_iter <- 30
#' burnin <- 10
#'
#' ## Fit 1- and 2-component PL mixtures via Gibbs sampling procedure
#' GIBBS_1 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=1, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_1$mod$P_map,
#' z=binary_group_ind(MAP_1$mod$class_map,G=1)))
#' GIBBS_2 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=2, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_2$mod$P_map,
#' z=binary_group_ind(MAP_2$mod$class_map,G=2)))
#' ## Select the optimal number of components
#' SELECT <- selectPLMIX(pi_inv=d_carconf, seq_G=1:2,
#' MAPestP=list(MAP_1$mod$P_map, MAP_2$mod$P_map),
#' MAPestW=list(MAP_1$mod$W_map, MAP_2$mod$W_map),
#' deviance=list(GIBBS_1$deviance, GIBBS_2$deviance))
#' SELECT$criteria
#'
#' @export
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
ncomp <- length(seq_G)
if(!parallel){
selection <- vector(mode="list",length=ncomp)
for(l in 1:ncomp){
print(paste("SELECTION CRITERIA FOR G=",seq_G[l]))
selection[[l]] <- selectPLMIX_single(pi_inv=pi_inv,G=seq_G[l],MCMCsampleP=MCMCsampleP[[l]],
MCMCsampleW=MCMCsampleW[[l]],MAPestP=MAPestP[[l]],
MAPestW=MAPestW[[l]],deviance=deviance[[l]],post_est=post_est)
}
}else{
selection <- foreach(l=1:ncomp) %dopar%{
tempselection <- selectPLMIX_single(pi_inv=pi_inv,G=seq_G[l],MCMCsampleP=MCMCsampleP[[l]],
MCMCsampleW=MCMCsampleW[[l]],MAPestP=MAPestP[[l]],
MAPestW=MAPestW[[l]],deviance=deviance[[l]],post_est=post_est)
}
}
point_estP <- sapply(selection,"[[","point_estP")
point_estW <- sapply(selection,"[[","point_estW")
fitting <- t(sapply(lapply(selection,"[",c("D_bar","D_hat")),unlist))
effective_numer_of_parameters <- t(sapply(lapply(selection,"[",c("pD","pV")),unlist))
criteria <- t(sapply(lapply(selection,"[",c("DIC1","DIC2","BPIC1","BPIC2","BICM1","BICM2")),unlist))
names(point_estP) <- names(point_estW) <- rownames(fitting) <- rownames(effective_numer_of_parameters) <- rownames(criteria) <- paste0("G_",seq_G)
out <- list(point_estP=point_estP,point_estW=point_estW,fitting=fitting,
effective_numer_of_parameters=effective_numer_of_parameters,criteria=criteria)
return(out)
}
#### Label switching adjustment
label_switchPLMIX_single <- function(pi_inv,G,
MCMCsampleP,
MCMCsampleW,
MAPestP,
MAPestW){
#/' Label switching adjustment for mixtures of Plackett-Luce models
#/'
#/' Remove the label switching phenomenon from the MCMC samples of Bayesian mixtures of Plackett-Luce models with a different number of components.
#/'
#/' The \code{label_switchPLMIX} function performs the label switching adjustment of the MCMC samples via the Pivotal Reordering Algorithm (PRA) described in Marin et al (2005), by recalling the \code{\link[label.switching]{pra}} function from the \code{\link[label.switching]{label.switching}} package.
#/'
#/' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#/' @param G Number of mixture components.
#/' @param MCMCsampleP Numeric \eqn{L}\eqn{\times}{x}\eqn{G*K} matrix with the MCMC samples of the component-specific support parameters to be processed.
#/' @param MCMCsampleW Numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights to be processed.
#/' @param MAPestP Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of MAP component-specific support parameter estimates to be used as pivot in the PRA method.
#/' @param MAPestW Numeric vector of the \eqn{G} MAP estimates of the mixture weights as pivot in the PRA method.
#/'
#/' @return A list of named objects:
#/'
#/' \item{\code{final_sampleP}}{ Numeric \eqn{G}\eqn{\times}{x}\eqn{K}\eqn{\times}{x}\eqn{L} array MCMC samples of the component-specific support parameters adjusted for label switching.}
#/' \item{\code{final_sampleW}}{ Numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix of MCMC samples of the mixture weights adjusted for label switching.}
#/'
#/' @author Cristina Mollica and Luca Tardella
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
N <- nrow(pi_inv)
K <- ncol(pi_inv)
L <- nrow(MCMCsampleW)
mcmc.sample <- array(cbind(MCMCsampleP,MCMCsampleW),c(L,G,(K+1)))
if(G==1){
reordered.pra <- list(output=NULL)
reordered.pra$output <- mcmc.sample
}else{
print("LABEL SWITCHING ADJUSTMENT WITH PIVOTAL REORDERING ALGORITHM")
pivot.input <- cbind(MAPestP,MAPestW)
lab.pra <- pra(mcmc.pars=mcmc.sample,pivot=pivot.input)
reordered.pra <- permute.mcmc(mcmc=mcmc.sample,permutations=lab.pra$permutations)
}
final.sample <- matrix(reordered.pra$output,nrow=L,ncol=G*(K+1))
final_sampleP <- array(t(final.sample[,1:(G*K)]),c(G,K,L))
final_sampleW <- final.sample[,-c(1:(G*K)),drop=FALSE]
out <- list(final_sampleP=final_sampleP,final_sampleW=final_sampleW)
return(out)
}
label_switchPLMIX <- function(pi_inv,seq_G,
MCMCsampleP,
MCMCsampleW,
MAPestP,
MAPestW,
parallel=FALSE){
#' Label switching adjustment of the Gibbs sampling simulations for Bayesian mixtures of Plackett-Luce models
#'
#' Remove the label switching phenomenon from the MCMC samples of Bayesian mixtures of Plackett-Luce models with \eqn{G>1} components.
#'
#' The \code{label_switchPLMIX} function performs the label switching adjustment of the MCMC samples via the Pivotal Reordering Algorithm (PRA) described in Marin et al (2005), by recalling the \code{\link[label.switching]{pra}} function from the \code{\link[label.switching]{label.switching}} package.
#'
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @param seq_G Numeric vector with the number of components of the Plackett-Luce mixtures to be assessed.
#' @param MCMCsampleP List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{(G*K)} matrix with the MCMC samples of the component-specific support parameters to be processed.
#' @param MCMCsampleW List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights to be processed.
#' @param MAPestP List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix with the MAP estimates of the component-specific support parameters to be used as a pivot in the PRA method (see 'Details').
#' @param MAPestW List of size \code{length(seq_G)}, whose generic element is a numeric vector with the MAP estimates of the \eqn{G} mixture weights to be used as a pivot in the PRA method (see 'Details').
#' @param parallel Logical: whether parallelization should be used. Default is \code{FALSE}.
#'
#' @return A list of named objects:
#'
#' \item{\code{final_sampleP}}{ List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{G}\eqn{\times}{x}\eqn{K}\eqn{\times}{x}\eqn{L} array with the MCMC samples of the component-specific support parameters adjusted for label switching.}
#' \item{\code{final_sampleW}}{ List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights adjusted for label switching.}
#'
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' Papastamoulis, P. (2016). label.switching: An R Package for Dealing with the Label Switching Problem in MCMC Outputs. \emph{Journal of Statistical Software}, \bold{69}(1), pages 1--24, DOI: 10.18637/jss.v069.c01.
#'
#' Marin, J. M., Mengersen, K. and Robert, C.P. (2005). Bayesian modelling and inference on mixtures of distributions. \emph{Handbook of Statistics} (25), D. Dey and C.R. Rao (eds). Elsevier-Sciences.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link[label.switching]{pra}}
#'
#' @examples
#'
#' data(d_carconf)
#' K <- ncol(d_carconf)
#'
#' ## Fit 1- and 2-component PL mixtures via MAP estimation
#' MAP_1 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=1,
#' n_start=2, n_iter=400*1)
#'
#' MAP_2 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=2,
#' n_start=2, n_iter=400*2)
#'
#' MAP_3 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=3,
#' n_start=2, n_iter=400*3)
#'
#' mcmc_iter <- 30
#' burnin <- 10
#'
#' ## Fit 1- and 2-component PL mixtures via Gibbs sampling procedure
#' GIBBS_1 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=1, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_1$mod$P_map,
#' z=binary_group_ind(MAP_1$mod$class_map,G=1)))
#' GIBBS_2 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=2, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_2$mod$P_map,
#' z=binary_group_ind(MAP_2$mod$class_map,G=2)))
#' GIBBS_3 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=3, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_3$mod$P_map,
#' z=binary_group_ind(MAP_3$mod$class_map,G=3)))
#'
#' ## Adjusting the MCMC samples for label switching
#' LS <- label_switchPLMIX(pi_inv=d_carconf, seq_G=1:3,
#' MCMCsampleP=list(GIBBS_1$P, GIBBS_2$P, GIBBS_3$P),
#' MCMCsampleW=list(GIBBS_1$W, GIBBS_2$W, GIBBS_3$W),
#' MAPestP=list(MAP_1$mod$P_map, MAP_2$mod$P_map, MAP_3$mod$P_map),
#' MAPestW=list(MAP_1$mod$W_map, MAP_2$mod$W_map, MAP_3$mod$W_map))
#' str(LS)
#' @export
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
ncomp <- length(seq_G)
if(!parallel){
adjust <- vector(mode="list",length=ncomp)
for(l in 1:ncomp){
adjust[[l]] <- label_switchPLMIX_single(pi_inv=pi_inv,G=seq_G[l],MCMCsampleP=MCMCsampleP[[l]],
MCMCsampleW=MCMCsampleW[[l]],MAPestP=MAPestP[[l]],
MAPestW=MAPestW[[l]])
}
}else{
adjust <- foreach(l=1:ncomp) %dopar%{
tempadjust <- label_switchPLMIX_single(pi_inv=pi_inv,G=seq_G[l],MCMCsampleP=MCMCsampleP[[l]],
MCMCsampleW=MCMCsampleW[[l]],MAPestP=MAPestP[[l]],
MAPestW=MAPestW[[l]])
}
}
# OLD final_sampleP <- sapply(adjust,"[[","final_sampleP")
# OLD final_sampleW <- sapply(adjust,"[[","final_sampleW")
final_sampleP <- drop(simplify2array(simplify2array(lapply(adjust,function(x){lapply("final_sampleP",function(y)do.call("[[",list(x,y)))}))))
final_sampleW <- drop(simplify2array(simplify2array(lapply(adjust,function(x){lapply("final_sampleW",function(y)do.call("[[",list(x,y)))}))))
if(length(seq_G)>1){
names(final_sampleP) <- names(final_sampleW) <- paste0("G_",seq_G)
}else{
final_sampleP <- list(final_sampleP)
final_sampleW <- list(final_sampleW)
names(final_sampleP) <- names(final_sampleW) <- paste0("G_",seq_G)
}
out <- list(final_sampleP=final_sampleP,final_sampleW=final_sampleW)
return(out)
}
#### Posterior predictive check
ppcheckPLMIX_single <- function(pi_inv,G,
MCMCsampleP,
MCMCsampleW,
top1=TRUE,
paired=TRUE){
#/' Posterior predictive check for a mixture of Plackett-Luce models
#/'
#/' Compute predictive posterior \eqn{p}-values based on top item and paired comparison frequencies to assess the goodness-of-fit of a Bayesian mixtures of Plackett-Luce models for partial orderings.
#/'
#/' In the case of partial orderings, the same missingness patterns of the observed dataset, i.e., the number of items ranked by each sample unit, are reproduced on the replicated datasets.
#/'
#/' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#/' @param G Number of mixture components.
#/' @param MCMCsampleP Numeric \eqn{L}\eqn{\times}{x}\eqn{G*K} matrix with the MCMC samples of the component-specific support parameters.
#/' @param MCMCsampleW Numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights.
#/' @param top1 Logical: whether the posterior predictive \eqn{p}-value based on top frequencies has to be computed. Default is \code{TRUE}.
#/' @param paired Logical: whether the posterior predictive \eqn{p}-value based on paired comparison frequencies has to be computed. Default is \code{TRUE}.
#/'
#/' @return A list of named objects:
#/'
#/' \item{\code{post_pred_pvalue_top1}}{ If \code{top1} is \code{TRUE}, posterior predictive \eqn{p}-value based on top frequencies, otherwise \code{NULL}.}
#/' \item{\code{post_pred_pvalue_paired}}{ If \code{paired} is \code{TRUE}, posterior predictive \eqn{p}-value based on paired comparison frequencies, otherwise \code{NULL}.}
#/'
#/' @author Cristina Mollica and Luca Tardella
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
N <- nrow(pi_inv)
K <- ncol(pi_inv)
L <- nrow(MCMCsampleW)
final.sample <- cbind(MCMCsampleP,MCMCsampleW)
final_sampleP <- array(c(t(MCMCsampleP)),c(G,K,L))
final_sampleW <- MCMCsampleW
pi_inv_int <- pi_inv
mode(pi_inv_int) <- "integer"
rho <- matrix(1:K,nrow=G,ncol=K,byrow=TRUE)
if(top1){
print(paste("POSTERIOR PREDICTIVE CHECK FOR G=",G))
print("Top1 frequencies-based posterior predictive p-value")
chi.obs.top1 <- rep(NA,L)
chi.rep.top1 <- rep(NA,L)
for(l in 1:L){
(if((l%%200)==0) print(l))
chi.obs.top1[l] <- chisqmeasureobs1dim(pi_inv_int, p=matrix(final_sampleP[,,l],nrow=G), weights=final_sampleW[l,])
chi.rep.top1[l] <- chisqmeasuretheo1dim(N,ref_order=rho, p=matrix(final_sampleP[,,l],nrow=G), weights=final_sampleW[l,],pi_inv_int)
}
post_pred_pvalue_top1 <- mean(chi.rep.top1 >= chi.obs.top1)
}else{
post_pred_pvalue_top1 <- NA
}
if(paired){
print(paste("POSTERIOR PREDICTIVE CHECK FOR G=",G))
print("Paired comparison frequencies-based posterior predictive p-value")
chi.obs.paired <- rep(NA,L)
chi.rep.paired <- rep(NA,L)
for(l in 1:L){
(if((l%%200)==0) print(l))
chi.obs.paired[l] <- chisqmeasureobs(pi_inv_int, p=matrix(final_sampleP[,,l],nrow=G), weights=final_sampleW[l,])
chi.rep.paired[l] <- chisqmeasuretheo(N,ref_order=rho, p=matrix(final_sampleP[,,l],nrow=G), weights=final_sampleW[l,],pi_inv_int)
}
post_pred_pvalue_paired <- mean(chi.rep.paired >= chi.obs.paired)
}else{
post_pred_pvalue_paired <- NA
}
out <- list(post_pred_pvalue_top1=post_pred_pvalue_top1,post_pred_pvalue_paired=post_pred_pvalue_paired)
return(out)
}
ppcheckPLMIX <- function(pi_inv,seq_G,
MCMCsampleP,
MCMCsampleW,
top1=TRUE,
paired=TRUE,
parallel=FALSE){
#' Posterior predictive check for Bayesian mixtures of Plackett-Luce models
#'
#' Perform posterior predictive check to assess the goodness-of-fit of Bayesian mixtures of Plackett-Luce models with a different number of components.
#'
#' The \code{ppcheckPLMIX} function returns two posterior predictive \eqn{p}-values based on two chi squared discrepancy variables involving: (i) the top item frequencies and (ii) the paired comparison frequencies. In the presence of partial sequences in the \code{pi_inv} matrix, the same missingness patterns observed in the dataset (i.e., the number of items ranked by each sample unit) are reproduced on the replicated datasets from the posterior predictive distribution.
#'
#'
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @param seq_G Numeric vector with the number of components of the Plackett-Luce mixtures to be assessed.
#' @param MCMCsampleP List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{(G*K)} matrix with the MCMC samples of the component-specific support parameters.
#' @param MCMCsampleW List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights.
#' @param top1 Logical: whether the posterior predictive \eqn{p}-value based on the top item frequencies has to be computed. Default is \code{TRUE}.
#' @param paired Logical: whether the posterior predictive \eqn{p}-value based on the paired comparison frequencies has to be computed. Default is \code{TRUE}.
#' @param parallel Logical: whether parallelization should be used. Default is \code{FALSE}.
#'
#' @return A list with a named element:
#'
#' \item{\code{post_pred_pvalue}}{ Numeric \code{length(seq_G)}\eqn{\times}{x}\eqn{2} matrix of posterior predictive \eqn{p}-values based on the top item and paired comparison frequencies. If either \code{top1} or \code{paired} argument is \code{FALSE}, the corresponding matrix entries are \code{NA}.}
#'
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{ppcheckPLMIX_cond}}
#'
#' @examples
#'
#' data(d_carconf)
#' K <- ncol(d_carconf)
#'
#' ## Fit 1- and 2-component PL mixtures via MAP estimation
#' MAP_1 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=1,
#' n_start=2, n_iter=400*1)
#'
#' MAP_2 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=2,
#' n_start=2, n_iter=400*2)
#'
#' MAP_3 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=3,
#' n_start=2, n_iter=400*3)
#'
#' mcmc_iter <- 30
#' burnin <- 10
#'
#' ## Fit 1- and 2-component PL mixtures via Gibbs sampling procedure
#' GIBBS_1 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=1, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_1$mod$P_map,
#' z=binary_group_ind(MAP_1$mod$class_map,G=1)))
#' GIBBS_2 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=2, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_2$mod$P_map,
#' z=binary_group_ind(MAP_2$mod$class_map,G=2)))
#' GIBBS_3 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=3, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_3$mod$P_map,
#' z=binary_group_ind(MAP_3$mod$class_map,G=3)))
#'
#' ## Checking goodness-of-fit of the estimated mixtures
#' CHECK <- ppcheckPLMIX(pi_inv=d_carconf, seq_G=1:3,
#' MCMCsampleP=list(GIBBS_1$P, GIBBS_2$P, GIBBS_3$P),
#' MCMCsampleW=list(GIBBS_1$W, GIBBS_2$W, GIBBS_3$W))
#' CHECK$post_pred_pvalue
#'
#' @export
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
ncomp <- length(seq_G)
if(!parallel){
fitting <- vector(mode="list",length=ncomp)
for(l in 1:ncomp){
fitting[[l]] <- ppcheckPLMIX_single(pi_inv=pi_inv,G=seq_G[l],MCMCsampleP=MCMCsampleP[[l]],
MCMCsampleW=MCMCsampleW[[l]],top1=top1,paired=paired)
}
}else{
fitting <- foreach(l=1:ncomp) %dopar%{
tempfitting <- ppcheckPLMIX_single(pi_inv=pi_inv,G=seq_G[l],MCMCsampleP=MCMCsampleP[[l]],
MCMCsampleW=MCMCsampleW[[l]],top1=top1,paired=paired)
}
}
post_pred_pvalue <- t(sapply(lapply(fitting,"[",c("post_pred_pvalue_top1","post_pred_pvalue_paired")),unlist))
if(!is.numeric(post_pred_pvalue)){
post_pred_pvalue <- matrix(NA,nrow=length(seq_G),ncol=2)
}
attributes(post_pred_pvalue) <- attributes(post_pred_pvalue)[c("dim","dimnames")]
post_pred_pvalue <- as.matrix(post_pred_pvalue)
rownames(post_pred_pvalue) <- paste0("G_",seq_G)
out <- list(post_pred_pvalue=post_pred_pvalue)
return(out)
}
ppcheckPLMIX_cond_single <- function(pi_inv,G,
MCMCsampleP,
MCMCsampleW,
top1=TRUE,
paired=TRUE){
#/' Conditional predictive posterior \eqn{p}-values
#/'
#/' Compute conditional predictive posterior \eqn{p}-values based on top paired comparison frequencies to assess the goodness-of-fit of a Bayesian mixtures of Plackett-Luce models for partial orderings.
#/'
#/' In the case of partial orderings, the same missingness patterns of the observed dataset, i.e., the number of items ranked by each sample unit, are reproduced on the replicated datasets.
#/'
#/' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#/' @param G Number of mixture components.
#/' @param MCMCsampleP Numeric \eqn{L}\eqn{\times}{x}\eqn{G*K} matrix with the MCMC samples of the component-specific support parameters.
#/' @param MCMCsampleW Numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights.
#/' @param top1 Logical: whether the posterior predictive \eqn{p}-value based on top frequencies has to be computed. Default is \code{TRUE}.
#/' @param paired Logical: whether the posterior predictive \eqn{p}-value based on paired comparison frequencies has to be computed. Default is \code{TRUE}.
#/'
#/' @return A list of named objects:
#/'
#/' \item{\code{post_pred_pvalue_top1}}{ If \code{top1} is \code{TRUE}, posterior predictive \eqn{p}-value based on top frequencies, otherwise \code{NULL}.}
#/' \item{\code{post_pred_pvalue_paired}}{ If \code{paired} is \code{TRUE}, posterior predictive \eqn{p}-value based on paired comparison frequencies, otherwise \code{NULL}.}
#/'
#/' @author Cristina Mollica and Luca Tardella
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
N <- nrow(pi_inv)
K <- ncol(pi_inv)
L <- nrow(MCMCsampleW)
final.sample <- cbind(MCMCsampleP,MCMCsampleW)
final_sampleP <- array(c(t(MCMCsampleP)),c(G,K,L))
final_sampleW <- MCMCsampleW
pi_inv_int <- pi_inv
mode(pi_inv_int) <- "integer"
rho <- matrix(1:K,nrow=G,ncol=K,byrow=TRUE)
if(top1){
print(paste("CONDITIONAL POSTERIOR PREDICTIVE CHECK FOR G=",G))
print("Conditional top1 frequencies-based posterior predictive p-value")
chi.obs.top1.cond <- rep(NA,L)
chi.rep.top1.cond <- rep(NA,L)
chi.obs.top1.mat <- array(NA,dim=c(K,K,L))
chi.rep.top1.mat <- array(NA,dim=c(K,K,L))
for(l in 1:L){
(if((l%%200)==0) print(l))
chi.obs.top1.mat[,,l] <- chisqmeasureobsmatrix1dim(pi_inv_int, p=matrix(final_sampleP[,,l],nrow=G), weights=final_sampleW[l,])
chi.rep.top1.mat[,,l] <- chisqmeasuretheomatrix1dim(N,ref_order=rho, p=matrix(final_sampleP[,,l],nrow=G), weights=final_sampleW[l,],pi_inv_int)
chi.obs.top1.cond[l] <- sum(chi.obs.top1.mat[,,l])
chi.rep.top1.cond[l] <- sum(chi.rep.top1.mat[,,l])
}
post_pred_pvalue_top1_cond <- mean(chi.rep.top1.cond >= chi.obs.top1.cond)
}else{
post_pred_pvalue_top1_cond <- NA
}
if(paired){
print(paste("CONDITIONAL POSTERIOR PREDICTIVE CHECK FOR G=",G))
print("Conditional paired comparison frequencies-based posterior predictive p-value")
chi.obs.paired.cond=rep(NA,L)
chi.rep.paired.cond=rep(NA,L)
for(l in 1:L){
(if((l%%200)==0) print(l))
chi.obs.paired.cond[l] <- chisqmeasureobscond(pi_inv_int, p=matrix(final_sampleP[,,l],nrow=G), weights=final_sampleW[l,])
chi.rep.paired.cond[l] <- chisqmeasuretheocond(N,ref_order=rho, p=matrix(final_sampleP[,,l],nrow=G), weights=final_sampleW[l,],pi_inv_int)
}
post_pred_pvalue_paired_cond <- mean(chi.rep.paired.cond >= chi.obs.paired.cond)
}else{
post_pred_pvalue_paired_cond <- NA
}
out <- list(post_pred_pvalue_top1_cond=post_pred_pvalue_top1_cond,post_pred_pvalue_paired_cond=post_pred_pvalue_paired_cond)
return(out)
}
ppcheckPLMIX_cond <- function(pi_inv,seq_G,
MCMCsampleP,
MCMCsampleW,
top1=TRUE,
paired=TRUE,
parallel=FALSE){
#' Conditional posterior predictive check for Bayesian mixtures of Plackett-Luce models
#'
#' Perform conditional posterior predictive check to assess the goodness-of-fit of Bayesian mixtures of Plackett-Luce models with a different number of components.
#'
#' The \code{ppcheckPLMIX_cond} function returns two posterior predictive \eqn{p}-values based on two chi squared discrepancy variables involving: (i) the top item frequencies and (ii) the paired comparison frequencies. In the presence of partial sequences in the \code{pi_inv} matrix, the same missingness patterns observed in the dataset (i.e., the number of items ranked by each sample unit) are reproduced on the replicated datasets from the posterior predictive distribution. Differently from the \code{ppcheckPLMIX} function, the condional discrepancy measures are obtained by summing up the chi squared discrepancies computed on subsamples of observations with the same number of ranked items.
#'
#'
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @param seq_G Numeric vector with the number of components of the Plackett-Luce mixtures to be assessed.
#' @param MCMCsampleP List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{(G*K)} matrix with the MCMC samples of the component-specific support parameters.
#' @param MCMCsampleW List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights.
#' @param top1 Logical: whether the posterior predictive \eqn{p}-value based on the top item frequencies has to be computed. Default is \code{TRUE}.
#' @param paired Logical: whether the posterior predictive \eqn{p}-value based on the paired comparison frequencies has to be computed. Default is \code{TRUE}.
#' @param parallel Logical: whether parallelization should be used. Default is \code{FALSE}.
#'
#' @return A list with a named element:
#'
#' \item{\code{post_pred_pvalue_cond}}{ Numeric \code{length(seq_G)}\eqn{\times}{x}\eqn{2} matrix of posterior predictive \eqn{p}-values based on the top item and paired comparison frequencies. If either \code{top1} or \code{paired} argument is \code{FALSE}, the corresponding matrix entries are \code{NA}.}
#'
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{ppcheckPLMIX}}
#'
#' @examples
#'
#' data(d_carconf)
#' K <- ncol(d_carconf)
#'
#' ## Fit 1- and 2-component PL mixtures via MAP estimation
#' MAP_1 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=1,
#' n_start=2, n_iter=400*1)
#'
#' MAP_2 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=2,
#' n_start=2, n_iter=400*2)
#'
#' MAP_3 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=3,
#' n_start=2, n_iter=400*3)
#'
#' mcmc_iter <- 30
#' burnin <- 10
#'
#' ## Fit 1- and 2-component PL mixtures via Gibbs sampling procedure
#' GIBBS_1 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=1, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_1$mod$P_map,
#' z=binary_group_ind(MAP_1$mod$class_map,G=1)))
#' GIBBS_2 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=2, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_2$mod$P_map,
#' z=binary_group_ind(MAP_2$mod$class_map,G=2)))
#' GIBBS_3 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=3, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_3$mod$P_map,
#' z=binary_group_ind(MAP_3$mod$class_map,G=3)))
#'
#' ## Checking goodness-of-fit of the estimated mixtures
#' CHECKCOND <- ppcheckPLMIX_cond(pi_inv=d_carconf, seq_G=1:3,
#' MCMCsampleP=list(GIBBS_1$P, GIBBS_2$P, GIBBS_3$P),
#' MCMCsampleW=list(GIBBS_1$W, GIBBS_2$W, GIBBS_3$W))
#' CHECKCOND$post_pred_pvalue
#'
#' @export
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
ncomp <- length(seq_G)
if(!parallel){
fitting <- vector(mode="list",length=ncomp)
for(l in 1:ncomp){
fitting[[l]] <- ppcheckPLMIX_cond_single(pi_inv=pi_inv,G=seq_G[l],MCMCsampleP=MCMCsampleP[[l]],
MCMCsampleW=MCMCsampleW[[l]],top1=top1,paired=paired)
}
}else{
fitting <- foreach(l=1:ncomp) %dopar%{
tempfitting <- ppcheckPLMIX_cond_single(pi_inv=pi_inv,G=seq_G[l],MCMCsampleP=MCMCsampleP[[l]],
MCMCsampleW=MCMCsampleW[[l]],top1=top1,paired=paired)
}
}
post_pred_pvalue_cond <- t(sapply(lapply(fitting,"[",c("post_pred_pvalue_top1_cond","post_pred_pvalue_paired_cond")),unlist))
if(!is.numeric(post_pred_pvalue_cond)){
post_pred_pvalue_cond <- matrix(NA,nrow=length(seq_G),ncol=2)
}
attributes(post_pred_pvalue_cond) <- attributes(post_pred_pvalue_cond)[c("dim","dimnames")]
post_pred_pvalue_cond <- as.matrix(post_pred_pvalue_cond)
rownames(post_pred_pvalue_cond) <- paste0("G_",seq_G)
out <- list(post_pred_pvalue_cond=post_pred_pvalue_cond)
return(out)
}
| /PLMIX/R/PLMIXfunctions.R | no_license | akhikolla/InformationHouse | R | false | false | 142,193 | r | freq_to_unit <- function(freq_distr){
#' Individual rankings/orderings from the frequency distribution
#'
#' Construct the dataset of individual rankings/orderings from the frequency distribution of the distinct observed sequences.
#'
#' @param freq_distr Numeric matrix of the distinct observed sequences with the corresponding frequencies indicated in the last \eqn{(K+1)}-th column.
#'
#' @return Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of observed individual sequences.
#'
#' @author Cristina Mollica and Luca Tardella
#' @examples
#'
#' library(gtools)
#' K <- 4
#' perm_matrix <- permutations(n=K, r=K)
#' freq_data <- cbind(perm_matrix, sample(1:factorial(K)))
#' freq_data
#' freq_to_unit(freq_distr=freq_data)
#'
#' @export
if(is.vector(freq_distr)){
freq_distr <- t(freq_distr)
}
K <- ncol(freq_distr)-1
r_seq <- fill_single_entries(data=freq_distr[,-(K+1)])
out <- r_seq[rep(1:nrow(r_seq),freq_distr[,(K+1)]),]
rownames(out) <- NULL
return(out)
######### TUTTE LE DIRETTIVE PER CREARE IL FILE NAMESPACE
######### LE INSERIAMO QUI SOTTO
#'@useDynLib PLMIX, .registration = TRUE
#'@importFrom stats median
#'@importFrom stats var
#'@importFrom stats rgamma
#'@importFrom stats dgamma
#'@importFrom stats na.omit
#'@importFrom utils getFromNamespace
#'@importFrom abind adrop
#'@importFrom coda as.mcmc
#'@importFrom coda HPDinterval
#'@importFrom foreach foreach
#'@importFrom foreach %dopar%
#'@importFrom graphics plot
#'@importFrom gtools ddirichlet
#'@importFrom gtools permutations
#'@importFrom gridExtra grid.arrange
#'@importFrom ggmcmc ggmcmc
#'@importFrom ggmcmc ggs
#'@importFrom ggplot2 ggplot
#'@importFrom ggplot2 aes
#'@importFrom ggplot2 aes_string
#'@importFrom ggplot2 position_stack
#'@importFrom ggplot2 geom_bar
#'@importFrom ggplot2 coord_polar
#'@importFrom ggplot2 labs
#'@importFrom ggplot2 geom_tile
#'@importFrom ggplot2 scale_fill_brewer
#'@importFrom ggplot2 theme_void
#'@importFrom ggplot2 xlim
#'@importFrom ggplot2 scale_fill_gradient
#'@importFrom ggplot2 element_blank
#'@importFrom ggplot2 theme
#'@importFrom label.switching pra
#'@importFrom label.switching permute.mcmc
#'@importFrom MCMCpack rdirichlet
#'@importFrom reshape2 melt
#'@importFrom rcdd makeH
#'@importFrom rcdd scdd
#'@importFrom radarchart chartJSRadar
#'@importFrom Rcpp evalCpp
#'
}
unit_to_freq <- function(data){
#' Frequency distribution from the individual rankings/orderings
#'
#' Construct the frequency distribution of the distinct observed sequences from the dataset of individual rankings/orderings.
#'
#' @param data Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of observed individual sequences.
#' @return Numeric matrix of the distinct observed sequences with the corresponding frequencies indicated in the last \eqn{(K+1)}-th column.
#'
#' @author Cristina Mollica and Luca Tardella
#' @examples
#'
#' ## Frequency distribution for the APA top-ordering dataset
#' data(d_apa)
#' unit_to_freq(data=d_apa)
#' @export
data <- fill_single_entries(data=data)
K <- ncol(data)
freq <- table(apply(data,1,paste,collapse="-"))
obs_seq <- matrix(as.numeric(unlist(strsplit(names(freq),split="-"))),nrow=length(freq),ncol=K,byrow=TRUE)
rownames(obs_seq) <- NULL
out <- cbind(obs_seq,freq=freq,deparse.level=0)
rownames(out) <- NULL
return(out)
}
fill_single_entries <- function(data){
#/' Utility to fill in single missing entries of top-(K-1) sequences in partial ordering/ranking datasets
#/'
#/' @param data Numeric data matrix of partial sequences.
#/'
#/' @return Numeric data matrix of partial sequences in the same format of the input \code{data} with possible single missing entries filled.
#/' @author Cristina Mollica and Luca Tardella
if(is.vector(data)){
data <- t(data)
}
K=ncol(data)
r_single_miss <- (rowSums(data==0)==1)
if(any(r_single_miss)){
w_row <- which(r_single_miss)
w_col <- apply(data[w_row,,drop=FALSE],1,function(x)which(x==0))
w_item <- apply(data[w_row,,drop=FALSE],1,setdiff,x=1:K)
data[cbind(w_row,w_col)] <- w_item
warning(paste(paste0("Top-",K-1,""),"sequencies correspond to full orderings. Single missing entries filled."), call. = FALSE)
}
return(data)
}
is.top_ordering <- function(data,...){
#' Top-ordering datasets
#'
#' Check the consistency of partial ordering data with a top-ordering dataset.
#'
#' The argument \code{data} requires the partial sequences expressed in ordering format. When the value of \code{is.top-ordering} is \code{FALSE}, the membership function returns also a message with the conditions that are not met for the \code{data} to be a top-ordering dataset. \code{NA}'s in the input \code{data} are tacitly converted into zero entries.
#'
#' @param data An object containing the partial orderings whose consistency with a top-ordering dataset has to be tested. The following classes are admissible for \code{data}: numeric \code{matrix}, \code{data.frame}, \code{RandData} from the \code{rankdist} package and \code{rankings} from the \code{PlackettLuce} package.
#' @param ... Further arguments passed to or from other methods (not used).
#'
#' @return Logical: \code{TRUE} if the \code{data} argument is consistent with a top-ordering dataset (with a possible warning message if the supplied data need a further treatment with the coercion function \code{\link{as.top_ordering}} before being processed with the core functions of \pkg{PLMIX}) and \code{FALSE} otherwise.
#'
#' @references
#' Turner, H., Kormidis, I. and Firth, D. (2018). PlackettLuce: Plackett-Luce Models for Rankings. R package version 0.2-3. \url{https://CRAN.R-project.org/package=PlackettLuce}
#'
#' Qian, Z. (2018). rankdist: Distance Based Ranking Models. R package version 1.1.3. \url{https://CRAN.R-project.org/package=rankdist}
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link[PlackettLuce]{rankings}} and \code{\link[PlackettLuce]{rankings}}
#'
#' @examples
#'
#' ## A toy example of data matrix not satisfying the conditions to be a top-ordering dataset
#' toy_data=rbind(1:5,
#' c(0,4,3,2,1),
#' c(4,3.4,2,1,5),
#' c(2,3,0,0,NA),
#' c(4,4,3,2,5),
#' c(3,5,4,2,6),
#' c(2,-3,1,4,5),
#' c(2,0,1,4,5),
#' c(2,3,1,1,1),
#' c(2,3,0,4,0))
#'
#' is.top_ordering(data=toy_data)
#'
#' ## A dataset from the StatRank package satisfying the conditions to be a top-ordering dataset
#' library(StatRank)
#' data(Data.Election9)
#' is.top_ordering(data=Data.Election9)
#'
#' @export is.top_ordering
#' @export
if(!(class(data)[1]%in%c("top_ordering","RankData","rankings","matrix","data.frame"))){
stop("Invalid 'type' of data argument.")
}
if(any(class(data)=="top_ordering")){
out=TRUE
}
if(class(data)[1]=="RankData"){
warning("Objects of class 'RankData' are compatible with top-ordering datasets, but need to be coerced with as.top_ordering() before using the other functions of the PLMIX package.")
out=TRUE
}
if(class(data)[1]=="rankings"){
ttt=try(as.top_ordering(data=data, format_input="ranking"),silent=TRUE)
if(class(ttt)=="try-error"){
warning("The supplied data of class 'rankings' is not compatible with a top-ordering dataset because all rankings contain ties.")
out=FALSE
}else{
warning("The supplied data of class 'rankings' is compatible with a top-ordering dataset, but needs to be coerced with as.top_ordering() before using the other functions of the PLMIX package.")
out=TRUE
}
}
if(class(data)[1]=="matrix" | class(data)[1]=="data.frame"){
if(class(data)[1]=="data.frame"){
data <- as.matrix(data)
}
if(is.vector(data)){
data <- t(data)
}
data[which(is.na(data))]=0
K=ncol(data)
if(any(!(data%in%(0:K)))){
check1=FALSE
message(paste0("->> Only integers {", paste(0:K,collapse=", "), "} are allowed as entries of the top-ordering dataset:"))
if(any(data<0)){
message("* Some entries are negative.")
}
if(any(data>K)){
message(paste0("* Some entries are > ", K,"."))
}
if(any((data%%1)>0)){
message(paste("* Some entries are not integer."))
}
}else{
check1=TRUE
}
data_dupl <- t(apply(data,1,duplicated))
data_dupl[data==0] <- NA
if(any(data_dupl,na.rm=TRUE)){
check2=FALSE
message("->> Ties are not allowed.")
}else{
check2=TRUE
}
non_cons_zeros=apply(data,1,function(x) if(0%in%x) length(setdiff(min(which(x==0)):K,which(x==0)))>0 else FALSE )
if(any(non_cons_zeros)){
check3=FALSE
message("->> Non-consecutive zero are not allowed in the rows of a top-ordering dataset.")
}else{
check3=TRUE
}
if(any(data[,1]==0)){
check4=FALSE
message("->> Rows starting with zero entries are not allowed in a top-ordering dataset.")
}else{
check4=TRUE
}
out=all(c(check1,check2,check3,check4))
}
return(out)
}
as.top_ordering <- function(data,format_input=NULL,aggr=NULL,freq_col=NULL,ties_method="random",...){
#' Coercion into top-ordering datasets
#'
#' Attempt to coerce the input data into a top-ordering dataset.
#'
#' The coercion function \code{as.top_ordering} tries to coerce the input data into an object of class \code{top_ordering} after checking for possible partial sequences that do not satisfy the top-ordering requirements. If none of the supplied sequences satisfies the top-ordering conditions, an error message is returned. \code{NA}'s in the input \code{data} are tacitly converted into zero entries.
#'
#' @param data An object containing the partial sequences to be coerced into an object of class \code{top_ordering}. The following classes are admissible for \code{data}: numeric \code{matrix}, \code{data.frame}, \code{RandData} from the \code{rankdist} package and \code{rankings} from the \code{PlackettLuce} package.
#' @param format_input Character string indicating the format of the \code{data} input, namely \code{"ordering"} or \code{"ranking"}. Used only when the class of the \code{data} argument is matrix or data frame. Default is \code{NULL}.
#' @param aggr Logical: whether the \code{data} argument collects the distinct observed sequences with the corresponding frequencies (aggregated format). Used only when the class of the \code{data} aargument is matrix or data frame. Default is \code{NULL}.
#' @param freq_col Integer indicating the column of the \code{data} argument containing the frequencies of the distinct observed sequences. Used only when the class of the \code{data} argument is matrix or data frame and \code{aggr} argument is \code{TRUE}. Default is \code{NULL}.
#' @param ties_method Character string indicating the treatment of sequences with ties (not used for data of class \code{RankData}). If \code{"remove"}, the sequences with ties are removed before acting the coercion; if \code{"random"} (default), tied positions are re-assigned at random before acting the coercion.
#' @param ... Further arguments passed to or from other methods (not used).
#'
#' @return An object of S3 class \code{c("top_ordering","matrix")}.
#'
#' @references
#' Turner, H., Kormidis, I. and Firth, D. (2018). PlackettLuce: Plackett-Luce Models for Rankings. R package version 0.2-3. \url{https://CRAN.R-project.org/package=PlackettLuce}
#'
#' Qian, Z. (2018). rankdist: Distance Based Ranking Models. R package version 1.1.3. \url{https://CRAN.R-project.org/package=rankdist}
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{is.top_ordering}}, \code{\link[PlackettLuce]{as.rankings}} and \code{\link[PlackettLuce]{rankings}}
#'
#' @examples
#'
#' ## Coerce an object of class 'rankings' into an object of class 'top_ordering'
#' library(PlackettLuce)
#' RR <- matrix(c(1, 2, 0, 0,
#' 4, 1, 2, 3,
#' 2, 1, 1, 1,
#' 1, 2, 3, 0,
#' 2, 1, 1, 0,
#' 1, 0, 3, 2), nrow = 6, byrow = TRUE)
#' RR_rank=as.rankings(RR)
#' RR_rank
#' as.top_ordering(RR_rank, ties_method="random")
#'
#' ## Coerce an object of class 'RankData' into an object of class 'top_ordering'
#' library(rankdist)
#' data(apa_partial_obj)
#' d_apa_top_ord=as.top_ordering(data=apa_partial_obj)
#' identical(d_apa,d_apa_top_ord)
#'
#' ## Coerce a data frame from the package prefmod into an object of class 'top_ordering'
#' library(prefmod)
#' data(carconf)
#' carconf_rank=carconf[,1:6]
#' carconf_top_ord=as.top_ordering(data=carconf_rank,format_input="ranking",aggr=FALSE)
#' identical(d_carconf,carconf_top_ord)
#'
#' ## Coerce a data frame from the package pmr into an object of class 'top_ordering'
#' library(pmr)
#' data(big4)
#' head(big4)
#' big4_top_ord=as.top_ordering(data=big4,format_input="ranking",aggr=TRUE,freq_col=5)
#' head(big4_top_ord)
#'
#' @export as.top_ordering
#' @export
if(!(class(data)[1]%in%c("top_ordering","RankData","rankings","matrix","data.frame"))){
stop("Invalid 'type' of data argument (see 'Details').")
}
if(any(class(data)=="top_ordering")){
out=data
}
if(class(data)[1]=="RankData"){
K=data@nobj
dist_rankings=data@ranking
tied_rows=apply(dist_rankings,1,function(x)any(duplicated(x)))
dist_rankings[tied_rows,]=t(apply(dist_rankings[tied_rows,],1,rank,ties.method="max"))
temp_tied=dist_rankings[tied_rows,]
temp_tied[which(temp_tied==K)]=0
dist_rankings[tied_rows,]=temp_tied
dist_orderings=rank_ord_switch(data=dist_rankings,format_input="ranking")
n_dist=data@ndistinct
out=dist_orderings[rep(1:n_dist,times=data@count),]
}
if(class(data)[1]=="rankings"){
temp_rankings=unclass(data)
N=nrow(temp_rankings)
temp_rankings[temp_rankings==0] <- NA
tied_rows=which(apply(temp_rankings,1,function(x)any(duplicated(na.omit(x)))))
if(length(tied_rows)>0){
if(ties_method=="remove"){
if(length(tied_rows)<N){
warning("Rankings with ties are removed from the supplied dataset.")
temp_rankings=temp_rankings[-tied_rows,,drop=FALSE]
}else{
if(length(tied_rows)==N){
stop("Supplied data cannot be coerced into a top-ordering dataset because all rankings contain ties.")
}
}
}else{
warning("Tied positions are re-assigned at random to satisfy the top-ordering requirements.")
temp_rankings[tied_rows,]=t(apply(temp_rankings[tied_rows,],1,rank,na.last="keep",ties.method="random"))
}
}
temp_rankings[is.na(temp_rankings)] <- 0
out=rank_ord_switch(temp_rankings,format_input="ranking")
}
if(class(data)[1]=="matrix" | class(data)[1]=="data.frame"){
if(class(data)[1]=="data.frame"){
data <- as.matrix(data)
}
if(is.vector(data)){
data <- t(data)
}
if(aggr){
NN=nrow(data)
data_aggr=data[,-freq_col,drop=FALSE]
freq=data[,freq_col]
data=data_aggr[rep(1:NN,times=freq),]
}
K=ncol(data)
N=nrow(data)
data[which(is.na(data))]=0
if(format_input=="ordering"){
check1=which(apply(data,1,function(x)any(!(x%in%(0:K)))))
data_dupl <- t(apply(data,1,duplicated))
data_dupl[data==0] <- NA
check2=which(apply(data_dupl,1,any,na.rm=TRUE))
non_cons_zeros=apply(data,1,function(x) if(0%in%x) length(setdiff(min(which(x==0)):K,which(x==0)))>0 else FALSE )
check3=which(non_cons_zeros)
check4=which(data[,1]==0)
checks=unique(c(check1,check2,check3,check4))
if(length(checks)>0 & length(checks)<N){
warning("Rows not satisfying the requirements of a top-ordering dataset have been removed. Please, apply the function is.top_ordering() to the supplied data for more information.")
data=data[-checks,,drop=FALSE]
}else{
if(length(checks)==N){
stop("Supplied data cannot be coerced because the provided orderings do not satisfy the requirements of a top-ordering dataset.")
}
}
}else{
check1=which(apply(data,1,function(x)any(!(x%in%(0:K)))))
data_dupl <- t(apply(data,1,duplicated))
data_dupl[data==0] <- NA
check2=which(apply(data_dupl,1,any,na.rm=TRUE))
check3=which(apply(data,1,function(x)any(diff(sort(x))>1)))
if(ties_method=="remove"){
checks=unique(c(check1,check2,check3))
}else{
checks=unique(c(check1,check3))
if(length(check2)>0){
warning("Tied positions are re-assigned at random to satisfy the top-ordering requirements.")
temp=data[check2,]
temp[temp==0]=NA
data[check2,]=t(apply(temp,1,rank,na.last="keep",ties.method="random"))
}
}
if(length(checks)>0 & length(checks)<N){
warning("Rows not satisfying the requirements of a top-ordering dataset have been removed. Please, apply the function is.top_ordering() to the supplied data for more information.")
data=data[-checks,,drop=FALSE]
}else{
if(length(checks)==N){
stop("Supplied data cannot be coerced because the provided rankings do not satisfy the requirements of a top-ordering dataset.")
}
}
data=rank_ord_switch(data,format_input="ranking")
}
out=data
}
class(out) <- c("top_ordering","matrix")
return(out)
}
myorder <- function(x){
#/' Utility to switch from a partial ranking to a partial ordering (missing positions denoted with zero)
#/' @param x Numeric integer vector
#/'
#/' @author Cristina Mollica and Luca Tardella
k <- sum(is.na(x))
out <- c(order(x,na.last=NA),rep(0,k))
return(out)
}
rank_ord_switch <- function(data,format_input,nranked=NULL){
#' Switch from orderings to rankings and vice versa
#'
#' Convert the format of the input dataset from orderings to rankings and vice versa.
#'
#'
#' @param data Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial sequences whose format has to be converted.
#' @param format_input Character string indicating the format of the \code{data} input, namely \code{"ordering"} or \code{"ranking"}.
#' @param nranked Optional numeric vector of length \eqn{N} with the number of items ranked by each sample unit.
#'
#' @return Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial sequences with inverse format.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @examples
#'
#' ## From orderings to rankings for the Dublin West dataset
#' data(d_dublinwest)
#' head(d_dublinwest)
#' rank_ord_switch(data=head(d_dublinwest), format_input="ordering")
#' @export
data <- fill_single_entries(data=data)
K <- ncol(data)
if(any(data==0)){
data[data==0] <- NA
if(format_input=="ranking"){
out <- t(apply(data,1,myorder))
colnames(out) <- paste0("Rank_",1:K)
}else{
N <- nrow(data)
if(is.null(nranked)) nranked=rowSums(!is.na(data))
out <- matrix(0,nrow=N,ncol=K)
out[cbind(rep(1:N,nranked),na.omit(c(t(data))))] <- unlist(sapply(nranked,seq,from=1))
}
}else{
out <- t(apply(data,1,order))
}
if(format_input=="ranking"){
colnames(out) <- paste0("Rank_",1:K)
}else{
colnames(out) <- paste0("Item_",1:K)
}
return(out)
}
rank_summaries <- function(data,format_input,mean_rank=TRUE,marginals=TRUE,pc=TRUE){
#' Descriptive summaries for a partial ordering/ranking dataset
#'
#' Compute rank summaries and censoring patterns for a partial ordering/ranking dataset.
#'
#' @param data Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial sequences.
#' @param format_input Character string indicating the format of the \code{data} input, namely \code{"ordering"} or \code{"ranking"}.
#' @param mean_rank Logical: whether the mean rank vector has to be computed. Default is \code{TRUE}.
#' @param marginals Logical: whether the marginal rank distributions have to be computed. Default is \code{TRUE}.
#' @param pc Logical: whether the paired comparison matrix has to be computed. Default is \code{TRUE}.
#'
#' @return A list of named objects:
#'
#' \item{\code{nranked}}{ Numeric vector of length \eqn{N} with the number of items ranked by each sample unit.}
#' \item{\code{nranked_distr}}{ Frequency distribution of the \code{nranked} vector.}
#' \item{\code{na_or_not}}{ Numeric \eqn{3}\eqn{\times}{x}\eqn{K} matrix with the counts of sample units that ranked or not each item. The last row contains the total by column, corresponding to the sample size \eqn{N}.}
#' \item{\code{mean_rank}}{ Numeric vector of length \eqn{K} with the mean rank of each item.}
#' \item{\code{marginals}}{ Numeric \eqn{K}\eqn{\times}{x}\eqn{K} matrix of the marginal rank distributions: the \eqn{(i,j)}-th entry indicates the number of units that ranked item \eqn{i} in the \eqn{j}-th position.}
#' \item{\code{pc}}{ Numeric \eqn{K}\eqn{\times}{x}\eqn{K} paired comparison matrix: the \eqn{(i,i')}-th entry indicates the number of sample units that preferred item \eqn{i} to item \eqn{i'}.}
#'
#'
#' @references
#' Marden, J. I. (1995). Analyzing and modeling rank data. \emph{Monographs on Statistics and Applied Probability} (64). Chapman & Hall, ISSN: 0-412-99521-2. London.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @examples
#'
#' data(d_carconf)
#' rank_summaries(data=d_carconf, format_input="ordering")
#' @export
data <- fill_single_entries(data=data)
N <- nrow(data)
K <- ncol(data)
if(format_input=="ordering"){
data <- rank_ord_switch(data=data,format_input=format_input,nranked=NULL)
format_input <- "ranking"
}
data[data==0] <- NA
isna_data <- is.na(data)
nranked <- rowSums(!isna_data)
#nranked_distr <- table(nranked,dnn=NULL,deparse.level=0)
nranked_distr <- table(factor(nranked,levels=1:K))
#names(nranked_distr) <- paste0("Top-",1:(K-1))
names(nranked_distr) <- paste0("Top-",names(nranked_distr))
na <- colSums(isna_data)
na_or_not <- rbind(na, N-na, rep(N, K))
dimnames(na_or_not) <- list(c("n.a.","not n.a.","total"),paste0("Item_",1:K))
if(mean_rank){
mean_rank <- colMeans(data,na.rm=TRUE)
names(mean_rank) <- paste0("Item_",1:K)
}else{
mean_rank <- NULL
}
if(marginals){
marginals <- apply(data,2,tabulate,nbins=K)
dimnames(marginals) <- list(paste0("Rank_",1:K),paste0("Item_",1:K))
}else{
marginals <- NULL
}
if(pc){
data[is.na(data)] <- 0
pc <- paired_comparisons(data=data,format_input=format_input,nranked=nranked)
rownames(pc) <- colnames(pc) <- paste0("Item_",1:K)
}else{
pc <- NULL
}
out <- list(nranked=nranked,nranked_distr=nranked_distr,
na_or_not=na_or_not,mean_rank=mean_rank,
marginals=marginals,pc=pc)
return(out)
}
paired_comparisons <- function(data,format_input,nranked=NULL){
#' Paired comparison matrix for a partial ordering/ranking dataset
#'
#' Construct the paired comparison matrix for a partial ordering/ranking dataset.
#'
#' @param data Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial sequences.
#' @param format_input Character string indicating the format of the \code{data} input, namely \code{"ordering"} or \code{"ranking"}.
#' @param nranked Optional numeric vector of length \eqn{N} with the number of items ranked by each sample unit.
#'
#' @return Numeric \eqn{K}\eqn{\times}{x}\eqn{K} paired comparison matrix: the \eqn{(i,i')}-th entry indicates the number of sample units that preferred item \eqn{i} to item \eqn{i'}.
#'
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{rank_summaries}}
#'
#' @examples
#'
#' data(d_dublinwest)
#' paired_comparisons(data=d_dublinwest, format_input="ordering")
#' @export
data <- fill_single_entries(data=data)
N <- nrow(data)
K <- ncol(data)
if(format_input=="ranking"){
if(is.null(nranked)) nranked <- rowSums(data!=0)
data <- rank_ord_switch(data,format_input=format_input,nranked=nranked)
}
pc <- tau(pi_inv=data)
rownames(pc) <- colnames(pc) <- paste0("Item_",1:K)
return(pc)
} # K*K matrix
make_partial <- function(data,format_input,nranked=NULL,probcens=rep(1,ncol(data)-1)){
#' Censoring of complete rankings/orderings
#'
#' Return partial top rankings/orderings from complete sequences obtained either with user-specified censoring patterns or with a random truncation.
#'
#' The censoring of the complete sequences can be performed in: (i) a deterministic way, by specifying the number of top positions to be retained for each sample unit in the \code{nranked} argument; (ii) a random way, by sequentially specifying the probabilities of the top-1, top-2, \eqn{...}, top-\eqn{(K-1)} censoring patterns in the \code{probcens} argument. Recall that a top-\eqn{(K-1)} sequence corresponds to a complete ordering/ranking.
#'
#' @param data Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of complete sequences to be censored.
#' @param format_input Character string indicating the format of the \code{data} input, namely \code{"ordering"} or \code{"ranking"}.
#' @param nranked Numeric vector of length \eqn{N} with the desired number of items ranked by each sample unit after censoring. If not supplied (\code{NULL}), the censoring patterns are randomly generated according to the probabilities in the \code{probcens} argument.
#' @param probcens Numeric vector of length \eqn{(K-1)} with the probability of each censoring pattern to be employed for the random truncation of the complete sequences (normalization is not necessary). It works only if \code{nranked} argument is \code{NULL} (see 'Details'). Default is equal probabilities.
#'
#' @return A list of two named objects:
#'
#' \item{\code{partialdata}}{ Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial (censored) sequences with the same format of the input \code{data} and missing positions/items denoted with zero entries.}
#' \item{\code{nranked}}{ Numeric vector of length \eqn{N} with the number of items ranked by each sample unit after censoring.}
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @examples
#'
#' data(d_german)
#' head(d_german)
#' d_german_cens <- make_partial(data=d_german, format_input="ordering",
#' probcens=c(0.3, 0.3, 0.4))
#' head(d_german_cens$partialdata)
#'
#' ## Check consistency with the nominal censoring probabilities
#' round(prop.table(table(d_german_cens$nranked)), 2)
#'
#' @export
data <- fill_single_entries(data=data)
K <- ncol(data)
if(format_input=="ranking"){
data <- rank_ord_switch(data,format_input=format_input)
}
if(is.null(nranked)){
N <- nrow(data)
nranked <- sample(c(1:(K-2),K),size=N,replace=TRUE,prob=probcens)
}
out <- data*t(sapply(nranked,function(x)rep(c(1,0),c(x,K-x))))
if(format_input=="ranking"){
out <- rank_ord_switch(out,format_input="ordering",nranked=nranked)
}
return(list(partialdata=out,nranked=nranked))
} # N*K censored data matrix
make_complete <- function(data,format_input,nranked=NULL,probitems=rep(1,ncol(data))){
#' Completion of partial rankings/orderings
#'
#' Return complete rankings/orderings from partial sequences relying on a random generation of the missing positions/items.
#'
#' The completion of the partial top rankings/orderings is performed according to the Plackett-Luce scheme, that is, with a sampling without replacement of the not-ranked items by using the positive values in the \code{probitems} argument as support parameters (normalization is not necessary).
#'
#' @param data Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial sequences to be completed.
#' @param format_input Character string indicating the format of the \code{data} input, namely \code{"ordering"} or \code{"ranking"}.
#' @param nranked Optional numeric vector of length \eqn{N} with the number of items ranked by each sample unit.
#' @param probitems Numeric vector with the \eqn{K} item-specific probabilities to be employed for the random generation of the missing positions/items (see 'Details'). Default is equal probabilities.
#'
#' @return A list of two named objects:
#'
#' \item{\code{completedata}}{ Numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of complete sequences with the same format of the input \code{data}.}
#' \item{\code{nranked}}{ Numeric vector of length \eqn{N} with the number of items ranked by each sample unit of the input \code{data}.}
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @examples
#'
#' ## Completion based on the top item frequencies
#' data(d_dublinwest)
#' head(d_dublinwest)
#' top_item_freq <- rank_summaries(data=d_dublinwest, format_input="ordering", mean_rank=FALSE,
#' pc=FALSE)$marginals["Rank_1",]
#'
#' d_dublinwest_compl <- make_complete(data=d_dublinwest, format_input="ordering",
#' probitems=top_item_freq)
#' head(d_dublinwest_compl$completedata)
#'
#' @export
data <- fill_single_entries(data=data)
K <- ncol(data)
if(is.null(nranked)){
nranked <- rowSums(data!=0)
}
if(format_input=="ranking"){
data <- rank_ord_switch(data,format_input=format_input,nranked=nranked)
}
data[data==0] <- NA
out <- data
partialdata <- out[which(nranked!=K),]
out[which(nranked!=K),] <- t(apply(partialdata,1,function(x){ notrankeditems=setdiff(1:K,x); c(na.omit(x),sample(notrankeditems,prob=probitems[notrankeditems]))}))
if(format_input=="ranking"){
out <- rank_ord_switch(out,format_input="ordering")
}
return(list(completedata=out,nranked=nranked))
}
### Utility to simulate from a EPL
mysample <- function(support,pr){
sample(x=support,prob=pr)
}
rPLMIX <- function(n=1,K,G,p=t(matrix(1/K,nrow=K,ncol=G)),ref_order=t(matrix(1:K,nrow=K,ncol=G)),weights=rep(1/G,G),format_output="ordering"){
#' Random sample from a mixture of Plackett-Luce models
#'
#' Draw a random sample of complete orderings/rankings from a \eqn{G}-component mixture of Plackett-Luce models.
#'
#' Positive values are required for \code{p} and \code{weights} arguments (normalization is not necessary).
#'
#' The \code{ref_order} argument accommodates for the more general mixture of Extended Plackett-Luce models (EPL), involving the additional reference order parameters (Mollica and Tardella 2014). A permutation of the first \eqn{K} integers can be specified in each row of the \code{ref_order} argument to generate a sample from a \eqn{G}-component mixture of EPL. Since the Plackett-Luce model is a special instance of the EPL with the reference order equal to the identity permutation \eqn{(1,\dots,K)}, the default value of the \code{ref_order} argument is forward orders.
#'
#' @param n Number of observations to be sampled. Default is 1.
#' @param K Number of possible items.
#' @param G Number of mixture components.
#' @param p Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of component-specific support parameters. Default is equal support parameters (uniform mixture components).
#' @param ref_order Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of component-specific reference orders. Default is forward orders (identity permutations) in each row, corresponding to Plackett-Luce mixture components (see 'Details').
#' @param weights Numeric vector of \eqn{G} mixture weights. Default is equal weights.
#' @param format_output Character string indicating the format of the returned simulated dataset (\code{"ordering"} or \code{"ranking"}). Default is \code{"ordering"}.
#'
#' @return If \eqn{G=1}, a numeric \eqn{N}\eqn{\times}{x}\eqn{K} matrix of simulated complete sequences. If \eqn{G>1}, a list of two named objects:
#'
#' \item{\code{comp}}{ Numeric vector of \eqn{N} mixture component memberships.}
#' \item{\code{sim_data}}{ Numeric \eqn{N}\eqn{\times}{x}\eqn{K} matrix of simulated complete sequences.}
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @examples
#'
#' K <- 6
#' G <- 3
#' support_par <- matrix(1:(G*K), nrow=G, ncol=K)
#' weights_par <- c(0.50, 0.25, 0.25)
#'
#' set.seed(47201)
#' simulated_data <- rPLMIX(n=5, K=K, G=G, p=support_par, weights=weights_par)
#' simulated_data$comp
#' simulated_data$sim_data
#'
#' @export
if(G==1){
if(is.matrix(p)) p <- c(p)
if(is.matrix(ref_order)) ref_order <- c(ref_order)
p_par <- p/sum(p)
perm_par <- matrix(p_par,nrow=K,ncol=n)
out <- t(apply(perm_par,2,mysample,support=1:K))
out <- out[,order(ref_order)]
if(format_output=="ranking") out <- rank_ord_switch(out,format_input="ordering",nranked=rep(K,n))
return(out)
}else{
p_par <- p/rowSums(p)
comp <- sample(x=1:G,size=n,replace=T,prob=weights)
perm_par <- p[comp,]
out <- t(apply(perm_par,1,mysample,support=1:K))
for(g in 1:G){
out[comp==g,] <- out[comp==g,order(ref_order[g,])]
}
if(format_output=="ranking"){
out <- rank_ord_switch(out,format_input="ordering",nranked=rep(K,n))
}
return(list(comp=comp,sim_data=out))
}
}
likPLMIX <- function(p,ref_order,weights,pi_inv){
#' @rdname loglikelihood
#' @name Loglikelihood
#' @aliases likPLMIX loglikPLMIX loglikelihood Likelihood likelihood
#' @title Likelihood and log-likelihood evaluation for a mixture of Plackett-Luce models
#'
#' @description Compute either the likelihood or the log-likelihood of the Plackett-Luce mixture model parameters for a partial ordering dataset.
#' @details The \code{ref_order} argument accommodates for the more general mixture of Extended Plackett-Luce models (EPL), involving the additional reference order parameters (Mollica and Tardella 2014). A permutation of the first \eqn{K} integers can be specified in each row of the \code{ref_order} argument. Since the Plackett-Luce model is a special instance of the EPL with the reference order equal to the identity permutation, the \code{ref_order} argument must be a matrix with \eqn{G} rows equal to \eqn{(1,\dots,K)} when dealing with Plackett-Luce mixtures.
#' @param p Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of component-specific support parameters.
#' @param ref_order Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of component-specific reference orders.
#' @param weights Numeric vector of \eqn{G} mixture weights.
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @return Either the likelihood or the log-likelihood value of the Plackett-Luce mixture model parameters for a partial ordering dataset.
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' Mollica, C. and Tardella, L. (2014). Epitope profiling via mixture modeling for ranked data. \emph{Statistics in Medicine}, \bold{33}(21), pages 3738--3758, ISSN: 0277-6715, DOI: 10.1002/sim.6224.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @examples
#'
#' data(d_apa)
#' K <- ncol(d_apa)
#' G <- 3
#' support_par <- matrix(1:(G*K), nrow=G, ncol=K)
#' weights_par <- c(0.50, 0.25, 0.25)
#' loglikPLMIX(p=support_par, ref_order=matrix(1:K, nrow=G, ncol=K, byrow=TRUE),
#' weights=weights_par, pi_inv=d_apa)
#'
#' @export
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
lik <- exp(loglikPLMIX(p,ref_order,weights,pi_inv))
return(lik)
}
bicPLMIX <- function(max_log_lik,pi_inv,G,ref_known=TRUE,ref_vary=FALSE){
#' BIC for the MLE of a mixture of Plackett-Luce models
#'
#' Compute BIC value for the MLE of a mixture of Plackett-Luce models fitted to partial orderings.
#'
#' The \code{max_log_lik} and the BIC values can be straightforwardly obtained from the output of the \code{\link{mapPLMIX}} and \code{\link{mapPLMIX_multistart}} functions when the default noninformative priors are adopted in the MAP procedure. So, the \code{bicPLMIX} function is especially useful to compute the BIC value from the output of alternative MLE methods for mixtures of Plackett-Luce models implemented, for example, with other softwares.
#'
#' The \code{ref_known} and \code{ref_vary} arguments accommodate for the more general mixture of Extended Plackett-Luce models (EPL), involving the additional reference order parameters (Mollica and Tardella 2014). Since the Plackett-Luce model is a special instance of the EPL with the reference order equal to the identity permutation \eqn{(1,\dots,K)}, the default values of \code{ref_known} and \code{ref_vary} are set equal, respectively, to \code{TRUE} and \code{FALSE}.
#'
#' @param max_log_lik Maximized log-likelihood value.
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @param G Number of mixture components.
#' @param ref_known Logical: whether the component-specific reference orders are known (not to be estimated). Default is \code{TRUE}.
#' @param ref_vary Logical: whether the reference orders vary across mixture components. Default is \code{FALSE}.
#'
#' @return A list of two named objects:
#'
#' \item{\code{max_log_lik}}{ The \code{max_log_lik} argument.}
#' \item{\code{bic}}{ BIC value.}
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' Mollica, C. and Tardella, L. (2014). Epitope profiling via mixture modeling for ranked data. \emph{Statistics in Medicine}, \bold{33}(21), pages 3738--3758, ISSN: 0277-6715, DOI: 10.1002/sim.6224.
#'
#' Schwarz, G. (1978). Estimating the dimension of a model. \emph{Ann. Statist.}, \bold{6}(2), pages 461--464, ISSN: 0090-5364, DOI: 10.1002/sim.6224.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{mapPLMIX}} and \code{\link{mapPLMIX_multistart}}
#'
#' @examples
#'
#' data(d_carconf)
#' K <- ncol(d_carconf)
#' MAP_mult <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=3, n_start=2, n_iter=400*3)
#' bicPLMIX(max_log_lik=MAP_mult$mod$max_objective, pi_inv=d_carconf, G=3)$bic
#'
#' ## Equivalently
#' MAP_mult$mod$bic
#'
#' @export
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
N <- nrow(pi_inv)
K <- ncol(pi_inv)
if(!ref_known){
if(ref_vary){
bic <- -2*max_log_lik+(G*(K-1)+G+(G-1))*log(N)
}else{
bic <- -2*max_log_lik+(G*(K-1)+1+(G-1))*log(N)
}
}else{
bic <- -2*max_log_lik+(G*(K-1)+(G-1))*log(N)
}
return(list(max_log_lik=max_log_lik,bic=bic))
}
gammamat <- function(u_bin,z_hat){
gam <- t(z_hat)%*%u_bin
return(gam)
}
binary_group_ind <- function(class,G){
#' Binary group membership matrix
#'
#' Construct the binary group membership matrix from the multinomial classification vector.
#'
#' @param class Numeric vector of class memberships.
#' @param G Number of possible different classes.
#'
#' @return Numeric \code{length(class)}\eqn{\times}{x}\eqn{G} matrix of binary group memberships.
#' @author Cristina Mollica and Luca Tardella
#' @examples
#'
#' binary_group_ind(class=c(3,1,5), G=6)
#'
#' @export
N <- length(class)
temp <- (rep(1:G,length(class))==rep(class,each=G))*1
out <- matrix(temp,nrow=N,ncol=G,byrow=TRUE)
return(out)
} # N*G matrix
##########################################################
############# EM for MAP estimation #############################
mapPLMIX <- function(pi_inv,K,G,
init=list(p=NULL,omega=NULL),n_iter=1000,
hyper=list(shape0=matrix(1,nrow=G,ncol=K),rate0=rep(0,G),alpha0=rep(1,G)),
eps=10^(-6),
centered_start=FALSE,
plot_objective=FALSE){
#' MAP estimation for a Bayesian mixture of Plackett-Luce models
#'
#' Perform MAP estimation via EM algorithm for a Bayesian mixture of Plackett-Luce models fitted to partial orderings.
#'
#' Under noninformative (flat) prior setting, the EM algorithm for MAP estimation corresponds to the EMM algorithm described by Gormley and Murphy (2006) to perform frequentist inference. In this case, the MAP solution coincides with the MLE and the output vectors \code{log_lik} and \code{objective} coincide as well.
#'
#' The \code{\link{mapPLMIX}} function performs the MAP procedure with a single starting value. To address the issue of local maxima in the posterior distribution, see the \code{\link{mapPLMIX_multistart}} function.
#'
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @param K Number of possible items.
#' @param G Number of mixture components.
#' @param init List of named objects with initialization values: \code{p} is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of component-specific support parameters; \code{omega} is a numeric vector of \eqn{G} mixture weights. If starting values are not supplied (\code{NULL}), they are randomly generated with a uniform distribution. Default is \code{NULL}.
#' @param n_iter Maximum number of EM iterations.
#' @param hyper List of named objects with hyperparameter values for the conjugate prior specification: \code{shape0} is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of shape hyperparameters; \code{rate0} is a numeric vector of \eqn{G} rate hyperparameters; \code{alpha0} is a numeric vector of \eqn{G} Dirichlet hyperparameters. Default is noninformative (flat) prior setting.
#' @param eps Tolerance value for the convergence criterion.
#' @param centered_start Logical: whether a random start whose support parameters and weights should be centered around the observed relative frequency that each item has been ranked top. Default is \code{FALSE}. Ignored when \code{init} is not \code{NULL}.
#' @param plot_objective Logical: whether the objective function (that is the kernel of the log-posterior distribution) should be plotted. Default is \code{FALSE}.
#'
#' @return A list of S3 class \code{mpPLMIX} with named elements:
#'
#' \item{\code{W_map}}{ Numeric vector with the MAP estimates of the \eqn{G} mixture weights.}
#' \item{\code{P_map}}{ Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix with the MAP estimates of the component-specific support parameters.}
#' \item{\code{z_hat}}{ Numeric \eqn{N}\eqn{\times}{x}\eqn{G} matrix of estimated posterior component membership probabilities.}
#' \item{\code{class_map}}{ Numeric vector of \eqn{N} mixture component memberships based on MAP allocation from the \code{z_hat} matrix.}
#' \item{\code{log_lik}}{ Numeric vector of the log-likelihood values at each iteration.}
#' \item{\code{objective}}{ Numeric vector of the objective function values (that is the kernel of the log-posterior distribution) at each iteration.}
#' \item{\code{max_objective}}{ Maximized objective function value.}
#' \item{\code{bic}}{ BIC value (only for the default flat priors, otherwise \code{NULL}).}
#' \item{\code{conv}}{ Binary convergence indicator: 1 = convergence has been achieved, 0 = otherwise.}
#' \item{\code{call}}{ The matched call.}
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' Gormley, I. C. and Murphy, T. B. (2006). Analysis of Irish third-level college applications data. \emph{Journal of the Royal Statistical Society: Series A}, \bold{169}(2), pages 361--379, ISSN: 0964-1998, DOI: 10.1111/j.1467-985X.2006.00412.x.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{mapPLMIX_multistart}}
#'
#' @examples
#'
#' data(d_carconf)
#' MAP <- mapPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_iter=400*3)
#' str(MAP)
#' MAP$P_map
#' MAP$W_map
#'
#' @export
cl <- match.call()
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
N <- nrow(pi_inv)
n_rank <- howmanyranked(pi_inv)
rho <- matrix(1:K,nrow=G,ncol=K,byrow=TRUE)
ref_known <- TRUE
ref_vary <- FALSE
if(is.null(init$omega)){
#omega <- runif(G)
#omega <- omega/sum(omega)
omega <- rdirichlet(1,rep(1,G))
}else{
omega <- init$omega
if(sum(omega)!=1){
warning("Initial mixture weights must add to one ==> input arguments has been normalized!")
omega <- omega/sum(omega)
}
}
if(is.null(init$p)){
if(centered_start){
# print("CENTERED START !!")
mle1comp <- matrix(prop.table(table(factor(pi_inv[,1],levels=1:K))),nrow=1)
p <- random_start(mlesupp=mle1comp, givenweights=omega)
p <- p/rowSums(p)
}else{
# print("COMPLETELY RANDOM (uniform support, rescaled) START")
# p <- matrix(runif(G*K),nrow=G,ncol=K)
# p <- p/rowSums(p)
p <- rdirichlet(G,rep(1,K))
}
}else{
p <- init$p
if(is.vector(p)){
p <- t(p)
}
if(!all(rowSums(p)==1)){
warning("Initial support parameters for each mixture component must
add to one ==> input arguments has been normalized!")
p <- p/rowSums(p)
}
}
init <- list(p=p,omega=omega)
shape0 <- hyper$shape0
rate0 <- hyper$rate0
alpha0 <- hyper$alpha0
u_bin <- umat(pi_inv=pi_inv)
log_lik <- rep(NA,n_iter)
if(!(all(shape0==1) & all(rate0==0) & all(shape0==1))){
# print("Non-flat prior input")
log_prior <- log_lik
}
objective <- log_lik
conv <- 0
l <- 1
while(l<=n_iter){
z_hat <- Estep(p=p,ref_order=rho,weights=omega,pi_inv=pi_inv)
omega <- UpWhet(z_hat=z_hat,alpha0=alpha0)
if(any(is.na(omega))){
print("==> PROBLEM WITH *omega* update")
print(omega)
}
p <- UpPhetpartial(p=p,ref_order=rho,pi_inv=pi_inv,z_hat=z_hat,shape0=shape0,
rate0=rate0,n_rank=n_rank,u_bin=u_bin)
if(any(is.na(p))){
print("==> PROBLEM WITH *p* update")
print(p)
}
log_lik[l] <- loglikPLMIX(p=p,ref_order=rho,weights=omega,pi_inv=pi_inv)
if(is.na(log_lik[l])){
print(p)
print(omega)
threshold <- -17
while(is.na(log_lik[l]) & threshold<(-3)){
p[p<=(10^threshold)] <- 10^threshold
threshold <- threshold+1
log_lik[l] <- loglikPLMIX(p=p,ref_order=rho,weights=omega,pi_inv=pi_inv)
print(paste0("Likelihood/parameter approximation for support parameter <=10^(-",threshold,")"))
}
}
if(!(all(shape0==1) & all(rate0==0) & all(shape0==1))){
log_prior[l] <- log(ddirichlet(omega,alpha0))+sum(dgamma(p,shape=shape0,rate=rate0,log=TRUE))
objective[l] <- log_lik[l]+log_prior[l]
}else{
objective[l] <- log_lik[l]
}
if(l>=2){
if((objective[l]-objective[l-1])/abs(objective[l-1])<eps |
((objective[l]-objective[l-1])==0 & objective[l-1]==0)){
conv <- 1
l <- n_iter+1
}
}
l <- l+1
}
P_map=p/rowSums(p)
dimnames(P_map)=list(paste0("g_",1:G),paste0("p_",1:K))
names(omega)=paste0("w_",1:G)
log_lik <- log_lik[!(is.na(log_lik))]
max_log_lik <- max(log_lik)
objective <- objective[!(is.na(objective))]
max_objective <- max(objective)
if(all(shape0==1) & all(rate0==0) & all(shape0==1)){
bic <- bicPLMIX(max_log_lik=max_log_lik,pi_inv=pi_inv,
G=G,ref_known=ref_known,
ref_vary=ref_vary)$bic
}else{
bic <- NULL
}
if(plot_objective){
plot(objective,ylab="Log-joint distribution",xlab="Iteration",
main=paste("MAP estimation for PL mixture with",G,"components"),type="l")
}
out=list(W_map=omega,P_map=P_map,z_hat=z_hat,class_map=apply(z_hat,1,which.max),
log_lik=log_lik,objective=objective,max_objective=max_objective,bic=bic,conv=conv,call=cl)
class(out)="mpPLMIX"
return(out)
}
mapPLMIX_multistart <- function(pi_inv,K,G,n_start=1,
init=rep(list(list(p=NULL,omega=NULL)),times=n_start),
n_iter=200,
hyper=list(shape0=matrix(1,nrow=G,ncol=K),rate0=rep(0,G),alpha0=rep(1,G)),
eps=10^(-6),
plot_objective=FALSE,
init_index=1:n_start,
parallel=FALSE,
centered_start=FALSE){
#' MAP estimation for a Bayesian mixture of Plackett-Luce models with multiple starting values
#'
#' Perform MAP estimation via EM algorithm with multiple starting values for a Bayesian mixture of Plackett-Luce models fitted to partial orderings.
#'
#' Under noninformative (flat) prior setting, the EM algorithm for MAP estimation corresponds to the EMM algorithm described by Gormley and Murphy (2006) to perform frequentist inference. In this case the MAP solution coincides with the MLE. The best model in terms of maximized posterior distribution is returned.
#'
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @param K Number of possible items.
#' @param G Number of mixture components.
#' @param n_start Number of starting values.
#' @param init List of \code{n_start} lists of named objects with initialization values: \code{p} is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of component-specific support parameters; \code{omega} is a numeric vector of \eqn{G} mixture weights. If starting values are not supplied (\code{NULL}), they are randomly generated with a uniform distribution. Default is \code{NULL}.
#' @param n_iter Maximum number of EM iterations.
#' @param hyper List of named objects with hyperparameter values for the conjugate prior specification: \code{shape0} is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of shape hyperparameters; \code{rate0} is a numeric vector of \eqn{G} rate hyperparameters; \code{alpha0} is a numeric vector of \eqn{G} Dirichlet hyperparameters. Default is noninformative (flat) prior setting.
#' @param eps Tolerance value for the convergence criterion.
#' @param plot_objective Logical: whether the objective function (that is the kernel of the log-posterior distribution) should be plotted. Default is \code{FALSE}.
#' @param init_index Numeric vector indicating the positions of the starting values in the \code{init} list to be actually launched. Useful to launch the most promising starting values identified after a preliminary run. Default is run all the starting points in the \code{init} list.
#' @param parallel Logical: whether parallelization should be used. Default is \code{FALSE}.
#' @param centered_start Logical: whether a random start whose support parameters and weights should be centered around the observed relative frequency that each item has been ranked top. Default is \code{FALSE}. Ignored when \code{init} is not \code{NULL}.
#'
#' @return A list of S3 class \code{mpPLMIX} with named elements:
#'
#' \item{\code{mod}}{ List of named objects describing the best model in terms of maximized posterior distribution. See output values of the single-run \code{\link{mapPLMIX}} function for a detailed explanation of the list elements.}
#' \item{\code{max_objective}}{ Numeric vector of the maximized objective function values for each initialization.}
#' \item{\code{convergence}}{ Binary vector with \code{length(init_index)} convergence indicators for each initialization: 1 = convergence has been achieved, 0 = otherwise.}
#' \item{\code{call}}{ The matched call.}
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' Gormley, I. C. and Murphy, T. B. (2006). Analysis of Irish third-level college applications data. \emph{Journal of the Royal Statistical Society: Series A}, \bold{169}(2), pages 361--379, ISSN: 0964-1998, DOI: 10.1111/j.1467-985X.2006.00412.x.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{mapPLMIX}}
#'
#' @examples
#'
#' data(d_carconf)
#' MAP_mult <- mapPLMIX_multistart(pi_inv=d_carconf, K=ncol(d_carconf), G=3,
#' n_start=2, n_iter=400*3)
#' str(MAP_mult)
#' MAP_mult$mod$P_map
#' MAP_mult$mod$W_map
#'
#' @export
cl <- match.call()
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
for(i in 1:n_start){
# print(paste0("Multiple starting point #",i))
if(is.null(init[[i]]$omega)){
#omega <- runif(G)
#omega <- omega/sum(omega)
omega <- rdirichlet(1,rep(1,G))
}else{
omega <- init[[i]]$omega
if(sum(omega)!=1){
warning("Initial mixture weights must add to one ==> input arguments has been normalized!")
omega <- omega/sum(omega)
}
}
if(is.null(init[[i]]$p)){
if(centered_start){
# print("CENTERED START !!")
mle1comp <- matrix(prop.table(table(factor(pi_inv[,1],levels=1:K))),nrow=1)
p <- random_start(mlesupp=mle1comp, givenweights=omega)
p <- p/rowSums(p)
}else{
# print("COMPLETELY RANDOM (uniform support, rescaled) START")
#p <- matrix(runif(G*K),nrow=G,ncol=K)
#p <- p/rowSums(p)
p <- rdirichlet(G,rep(1,K))
}
}else{
p <- init[[i]]$p
if(is.vector(p)){
p <- t(p)
}
if(!all(rowSums(p)==1)){
warning("Initial support parameters for each mixture component must
add to one ==> input arguments has been normalized!")
p <- p/rowSums(p)
}
}
init[[i]] <- list(p=p,omega=omega)
}
if(!parallel){
mod <- vector(mode="list",length=length(init_index))
max_objective <- rep(NA,length(init_index))
convergence <- rep(NA,length(init_index))
record <- rep(NA,length(init_index))
l <- 0
for(i in init_index){
l <- l+1
# print(paste("INITIALIZATION",l))
mod[[l]] <- mapPLMIX(pi_inv=pi_inv,K=K,G=G,init=init[[i]],n_iter=n_iter,hyper=hyper,
eps=eps,centered_start=centered_start,plot_objective=plot_objective)
max_objective[l] <- mod[[l]]$max_objective
convergence[l] <- mod[[l]]$conv
record[l] <- max(max_objective[1:l])
print(paste("Starting value #",l," => best objective function value so far =",record[l]))
}
mod <- mod[[which.max(max_objective)]]
class(mod) <- "list"
mod <- mod[-length(mod)]
out=list(mod=mod,max_objective=max_objective,convergence=convergence,call=cl)
}else{
mod <- foreach(i=init_index) %dopar%{
tempmod <- mapPLMIX(pi_inv=pi_inv,K=K,G=G,init=init[[i]],n_iter=n_iter,hyper=hyper,
eps=eps,centered_start=centered_start,plot_objective=plot_objective)
}
max_objective <- sapply(mod,"[[","max_objective")
convergence <- sapply(mod,"[[","conv")
outmod <- mod[[which.max(max_objective)]]
class(outmod) <- "list"
outmod <- outmod[-length(outmod)]
out=list(mod=outmod,max_objective=max_objective,convergence=convergence,call=cl)
}
class(out)="mpPLMIX"
return(out)
}
print.mpPLMIX <- function(x,...){
#' Print of the MAP estimation algorithm for a Bayesian mixture of Plackett-Luce models
#'
#' \code{print} method for class \code{mpPLMIX}. It shows some general information on the MAP estimation procedure for a Bayesian mixture of Plackett-Luce models.
#'
#'
#' @param x Object of class \code{mpPLMIX} returned by the \code{mapPLMIX} or \code{mapPLMIX_multistart} function.
#' @param ... Further arguments passed to or from other methods (not used).
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{mapPLMIX}} and \code{\link{mapPLMIX_multistart}}
#'
#' @examples
#'
#' ## Print of the MAP procedure with a single starting point
#' data(d_carconf)
#' MAP <- mapPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3)
#' print(MAP)
#'
#' ## Print of the MAP procedure with 5 starting points
#' MAP_multi <- mapPLMIX_multistart(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_start=5)
#' print(MAP_multi)
#' @export print.mpPLMIX
#' @export
mpPLMIX_out=x
if(class(mpPLMIX_out)!="mpPLMIX"){
stop("The function requires an object of S3 class 'mpPLMIX' as its first argument.")
}
cat("\nCall:\n", paste(deparse(mpPLMIX_out$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
if(is.null(mpPLMIX_out$convergence)){
G=length(mpPLMIX_out$W_map)
K=ncol(mpPLMIX_out$P_map)
L=length(mpPLMIX_out$log_lik)
cat("MAP estimation procedure for a Bayesian mixture of Plackett-Luce models:\n")
if(!is.null(mpPLMIX_out$bic)){
cat("Prior distribution used: flat (default) ====> MAP = MLE\n")
}else{
cat("Prior distribution used: subjective (see 'Call')\n")
}
cat("\n")
cat("No. of items:",K,"\n")
cat("No. of mixture components:",G,"\n")
cat("No. of iterations:",L,"\n")
cat("\n")
cat("Max. log-likelihood:",max(mpPLMIX_out$log_lik,na.rm=TRUE),"\n")
cat("Max. objective function:",mpPLMIX_out$max_objective,"\n")
if(!is.null(mpPLMIX_out$bic)){
cat("BIC:",mpPLMIX_out$bic,"\n")
}
cat("\n")
cat("Algorithm convergence check:",ifelse(mpPLMIX_out$conv,"Ok.","Failed."),"\n")
}else{
G=length(mpPLMIX_out$mod$W_map)
K=ncol(mpPLMIX_out$mod$P_map)
L=length(mpPLMIX_out$mod$log_lik)
cat("MAP estimation procedure for a Bayesian mixture of Plackett-Luce models with",length(mpPLMIX_out$convergence),"starting values ====> best solution reported:\n")
if(!is.null(mpPLMIX_out$mod$bic)){
cat("Prior distribution used: flat (default) ====> MAP = MLE\n")
}else{
cat("Prior distribution used: subjective (see 'Call')\n")
}
cat("\n")
cat("No. of items:",K,"\n")
cat("No. of mixture components:",G,"\n")
cat("No. of iterations:",L,"\n")
cat("\n")
cat("Max. log-likelihood:",max(mpPLMIX_out$mod$log_lik,na.rm=TRUE),"\n")
cat("Max. objective function:",mpPLMIX_out$mod$max_objective,"\n")
if(!is.null(mpPLMIX_out$mod$bic)){
cat("BIC:",mpPLMIX_out$mod$bic,"\n")
}
cat("\n")
cat("Algorithm convergence check:",ifelse(mpPLMIX_out$mod$conv,"Ok.","Failed."),"\n")
}
cat("\n")
cat("Use functions summary() and plot() to summarize and visualize the object of class 'mpPLMIX'.")
}
print.summary.mpPLMIX <- function(x,...){
#/' Print of the summary of MAP estimation for a Bayesian mixture of Plackett-Luce models
#/'
#/' \code{print} method for class \code{summary.mpPLMIX}. It provides summaries for the MAP estimation of a Bayesian mixture of Plackett-Luce models.
#/'
#/'
#/' @param x Object of class \code{summary.mpPLMIX} returned by the \code{summary.mpPLMIX} function.
#/' @param ... Further arguments passed to or from other methods (not used).
#/'
#/'
#/' @references
#/' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#/'
#/' Mollica, C. and Tardella, L. (2014). Epitope profiling via mixture modeling for ranked data. \emph{Statistics in Medicine}, \bold{33}(21), pages 3738--3758, ISSN: 0277-6715, DOI: 10.1002/sim.6224.
#/'
#/' @author Cristina Mollica and Luca Tardella
summary.mpPLMIX_out=x
if(class(summary.mpPLMIX_out)!="summary.mpPLMIX"){
stop("The function requires an object of S3 class 'summary.mpPLMIX' as its first argument.")
}
G=length(summary.mpPLMIX_out$MAP_w)
cat("\nCall:\n", paste(deparse(summary.mpPLMIX_out$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
if(G>1){
cat("MAP estimates of the mixture weghts:\n")
print(summary.mpPLMIX_out$MAP_w)
cat("\n")
}
cat("MAP estimates of the support parameters:\n")
print(summary.mpPLMIX_out$MAP_p)
cat("\n")
cat("Estimated component-specific modal orderings:\n")
print(summary.mpPLMIX_out$Modal_orderings)
cat("\n")
if(G>1){
cat("Relative frequency distribution of group memberships:\n")
print(summary.mpPLMIX_out$group_distr)
cat("\n")
}
if(!is.null(summary.mpPLMIX_out$perc_conv_rate)){
cat("Convergence percentage over multiple initialization:\n")
print(c(Conv=summary.mpPLMIX_out$perc_conv_rate))
}
}
summary.mpPLMIX <- function(object,digits=2,...){
#' Summary of the MAP estimation for a Bayesian mixture of Plackett-Luce models
#'
#' \code{summary} method for class \code{mpPLMIX}. It provides summaries for the MAP estimation of a Bayesian mixture of Plackett-Luce models.
#'
#' @param object Object of class \code{mpPLMIX} returned by the \code{mapPLMIX} or \code{mapPLMIX_multistart} function.
#' @param digits Number of decimal places for rounding the summaries.
#' @param ... Further arguments passed to or from other methods (not used).
#'
#' @return A list of summaries for the \code{mpPLMIX} class object:
#'
#' \item{\code{MAP_w}}{ Numeric vector with the MAP estimates of the \eqn{G} mixture weights. Returned only when when \eqn{G>1}.}
#' \item{\code{MAP_p}}{ Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix with the MAP estimates of the component-specific support parameters.}
#' \item{\code{MAP_modal_orderings}}{ Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix with the estimated modal orderings of each mixture component.}
#' \item{\code{group_distr}}{ Numeric vector with the relative frequency distribution of the mixture component memberships based on MAP allocation. Returned only when when \eqn{G>1}.}
#' \item{\code{perc_conv_rate}}{ Numeric scalar with the percentage of MAP algorithm convergence over the multiple starting points. Returned only when \code{summary.mpPLMIX} is applied to the output of the \code{mapPLMIX_multistart} function.}
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @examples
#'
#' ## Summary of the MAP procedure with a single starting point
#' data(d_carconf)
#' MAP <- mapPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3)
#' summary(MAP)
#'
#' ## Summary of the MAP procedure with 5 starting points
#' MAP_multi <- mapPLMIX_multistart(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_start=5)
#' summary(MAP_multi)
#' @export summary.mpPLMIX
#' @export
mpPLMIX_out=object
if(class(mpPLMIX_out)!="mpPLMIX"){
stop("The function requires an object of S3 class 'mpPLMIX' as its first argument.")
}
cl=mpPLMIX_out$call
if(is.null(mpPLMIX_out$convergence)){
G=length(mpPLMIX_out$W_map)
K=ncol(mpPLMIX_out$P_map)
out=list(MAP_w=mpPLMIX_out$W_map,MAP_p=mpPLMIX_out$P_map,
Modal_orderings=t(apply(matrix(mpPLMIX_out$P_map,nrow=G,ncol=K),1,order,decreasing=TRUE)),
group_distr=prop.table(table(factor(mpPLMIX_out$class_map,levels=1:G))),
call=cl)
out[c(1:2,4)]=lapply(out[c(1:2,4)],round,digits=digits)
dimnames(out$Modal_orderings) <- list(paste0("g_",1:G),paste0("Rank_",1:K))
}else{
G=length(mpPLMIX_out$mod$W_map)
K=ncol(mpPLMIX_out$mod$P_map)
out=list(MAP_w=mpPLMIX_out$mod$W_map,MAP_p=mpPLMIX_out$mod$P_map,
Modal_orderings=t(apply(matrix(mpPLMIX_out$mod$P_map,nrow=G,ncol=K),1,order,decreasing=TRUE)),
group_distr=prop.table(table(factor(mpPLMIX_out$mod$class_map,levels=1:G))),
perc_conv_rate=100*mean(mpPLMIX_out$convergence),
call=cl)
out[c(1:2,4:5)]=lapply(out[c(1:2,4:5)],round,digits=digits)
}
dimnames(out$Modal_orderings) <- list(paste0("g_",1:G),paste0("Rank_",1:K))
class(out)="summary.mpPLMIX"
out
}
plot.mpPLMIX <- function(x,max_scale_radar=NULL,...){
#' Plot the MAP estimates for a Bayesian mixture of Plackett-Luce models
#'
#' \code{plot} method for class \code{mpPLMIX}.
#'
#' By recalling the \code{chartJSRadar} function from the \code{radarchart} package and the routines of the \code{ggplot2} package, \code{plot.mpPLMIX} produces a radar plot of the support parameters and, when \eqn{G>1}, a donut plot of the mixture weights and a heatmap of the component membership probabilities based on the MAP estimates. The radar chart is returned in the Viewer Pane.
#'
#' @param x Object of class \code{mpPLMIX} returned by the \code{mpPLMIX} function.
#' @param max_scale_radar Numeric scalar indicating the maximum value on each axis of the radar plot for the support parameter point estimates. Default is \code{NULL} meaning that the maximum of the estimated support parameters is used.
#' @param ... Further arguments passed to or from other methods (not used).
#'
#'
#' @references
#' Ashton, D. and Porter, S. (2016). radarchart: Radar Chart from 'Chart.js'. R package version 0.3.1. \url{https://CRAN.R-project.org/package=radarchart}
#'
#' Wickham, H. (2009). ggplot2: Elegant Graphics for Data Analysis. Springer-Verlag New York.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link[radarchart]{chartJSRadar}} and \code{\link[ggplot2]{ggplot}}
#'
#' @examples
#'
#' # Not run:
#' data(d_carconf)
#' MAP <- mapPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3)
#' plot(MAP)
#'
#' # Not run:
#' MAP_multi <- mapPLMIX_multistart(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_start=5)
#' plot(MAP_multi)
#' @export plot.mpPLMIX
#' @export
mpPLMIX_out=x
if(class(mpPLMIX_out)!="mpPLMIX"){
stop("The function requires an object of S3 class 'mpPLMIX' as its first argument.")
}
if(is.null(mpPLMIX_out$convergence)){
G=length(mpPLMIX_out$W_map)
K=ncol(mpPLMIX_out$P_map)
N=nrow(mpPLMIX_out$z_hat)
}else{
G=length(mpPLMIX_out$mod$W_map)
K=ncol(mpPLMIX_out$mod$P_map)
N=nrow(mpPLMIX_out$mod$z_hat)
}
labs <- paste("Item",1:K)
if(is.null(mpPLMIX_out$convergence)){
scores=as.list(as.data.frame(t(mpPLMIX_out$P_map)))
}else{
scores=as.list(as.data.frame(t(mpPLMIX_out$mod$P_map)))
}
names(scores)=paste("Group",1:G)
main_radar="MAP estimates of the support parameters"
oo=chartJSRadar(scores = scores, labs = labs, main=main_radar,maxScale = ifelse(is.null(max_scale_radar),max(unlist(scores)),max_scale_radar))
print(oo)
if(G>1){
if(is.null(mpPLMIX_out$convergence)){
df_w <- data.frame(Composition = paste0(paste("Group",1:G),":"),value=mpPLMIX_out$W_map,label=paste(paste0(paste("Group",1:G),":"),paste0(round(mpPLMIX_out$W_map*100), "%")))
df_z=as.data.frame(mpPLMIX_out$z_hat)
}else{
df_w <- data.frame(Composition = paste0(paste("Group",1:G),":"),value=mpPLMIX_out$mod$W_map,label=paste(paste0(paste("Group",1:G),":"),paste0(round(mpPLMIX_out$mod$W_map*100), "%")))
df_z=as.data.frame(mpPLMIX_out$mod$z_hat)
}
pp=ggplot(df_w, aes_string(x=2, y = "value", fill = "label")) +
geom_bar(stat = "identity", color = "white") +
coord_polar(theta = "y", start = 0)+
labs(x = NULL, y = NULL, fill = NULL, title = "Sample composition by group membership")+
scale_fill_brewer(palette="Blues")+
theme_void()+
xlim(0.5, 2.5)
names(df_z)=paste("Group",1:G)
df_z=data.frame(Unit=paste("Unit",1:N),df_z)
df_z_m=melt(df_z,id.vars="Unit")
# zz=ggplot(df_z_m, aes_string("variable", "Unit")) +
# geom_tile(aes_string(fill = "value"), colour = "white") +
# labs(x = "", y = "Sample units", fill = NULL, title = "Component membership probabilities")+
# theme(axis.text.y = element_blank(),
# axis.ticks = element_blank())+
# scale_fill_gradient(low = "white", high = "steelblue")
zz=ggplot(df_z_m, aes_string("Unit", "variable")) +
geom_tile(aes_string(fill = "value"), colour = "white") +
labs(x = "Sample units", y = "", fill = NULL, title = "Component membership probabilities")+
theme(axis.text.x = element_blank(),
axis.ticks = element_blank())+
scale_fill_gradient(low = "white", high = "steelblue")
grid.arrange(pp,zz,nrow=2)
}
}
##########################################################
############# GIBBS SAMPLING #############################
gibbsPLMIX <- function(pi_inv,K,G,
init=list(z=NULL,p=NULL),
n_iter=1000,
n_burn=500,
hyper=list(shape0=matrix(1,nrow=G,ncol=K),rate0=rep(0.001,G),alpha0=rep(1,G)),
centered_start=FALSE){
#' Gibbs sampling for a Bayesian mixture of Plackett-Luce models
#'
#' Perform Gibbs sampling simulation for a Bayesian mixture of Plackett-Luce models fitted to partial orderings.
#'
#' The size \eqn{L} of the final MCMC sample is equal to \code{n_iter}-\code{n_burn}.
#'
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @param K Number of possible items.
#' @param G Number of mixture components.
#' @param init List of named objects with initialization values: \code{z} is a numeric \eqn{N}\eqn{\times}{x}\eqn{G} matrix of binary mixture component memberships; \code{p} is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of component-specific support parameters. If starting values are not supplied (\code{NULL}), they are randomly generated with a uniform distribution. Default is \code{NULL}.
#' @param n_iter Total number of MCMC iterations.
#' @param n_burn Number of initial burn-in drawings removed from the returned MCMC sample.
#' @param hyper List of named objects with hyperparameter values for the conjugate prior specification: \code{shape0} is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of shape hyperparameters; \code{rate0} is a numeric vector of \eqn{G} rate hyperparameters; \code{alpha0} is a numeric vector of \eqn{G} Dirichlet hyperparameters. Default is vague prior setting.
#' @param centered_start Logical: whether a random start whose support parameters and weights should be centered around the observed relative frequency that each item has been ranked top. Default is \code{FALSE}. Ignored when \code{init} is not \code{NULL}.
#'
#' @return A list of S3 class \code{gsPLMIX} with named elements:
#'
#' \item{\code{W}}{ Numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with MCMC samples of the mixture weights.}
#' \item{\code{P}}{ Numeric \eqn{L}\eqn{\times}{x}\eqn{(G*K)} matrix with MCMC samples of the component-specific support parameters.}
#' \item{\code{log_lik}}{ Numeric vector of \eqn{L} posterior log-likelihood values.}
#' \item{\code{deviance}}{ Numeric vector of \eqn{L} posterior deviance values (\eqn{-2 * }\code{log_lik}).}
#' \item{\code{objective}}{ Numeric vector of \eqn{L} objective function values (that is the kernel of the log-posterior distribution).}
#' \item{\code{call}}{ The matched call.}
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @examples
#'
#' data(d_carconf)
#' GIBBS <- gibbsPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_iter=30, n_burn=10)
#' str(GIBBS)
#' GIBBS$P
#' GIBBS$W
#'
#' @export
cl=match.call()
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
N <- nrow(pi_inv)
n_rank <- howmanyranked(pi_inv)
rho <- matrix(1:K,nrow=G,ncol=K,byrow=TRUE)
if(is.null(init$z)){
z <- binary_group_ind(class=sample(1:G,size=N,replace=TRUE),G=G)
}else{
z <- init$z
}
omega <- colMeans(z)
if(is.null(init$p)){
if(centered_start){
print("CENTERED START !!")
# omega <- rdirichlet(1,rep(1,G))
mle1comp <- matrix(prop.table(table(factor(pi_inv[,1],levels=1:K))),nrow=1)
p <- random_start(mlesupp=mle1comp, givenweights=omega)
# p <- p/rowSums(p)
}else{
print("COMPLETELY RANDOM (uniform support, rescaled) START")
p <- matrix(rgamma(n=G*K,shape=1,rate=1),nrow=G,ncol=K)
}
}else{
p <- init$p
}
shape0 <- hyper$shape0
rate0 <- hyper$rate0
alpha0 <- hyper$alpha0
u_bin <- umat(pi_inv=pi_inv)
log_lik <- c(loglikPLMIX(p=p,ref_order=rho,weights=omega,pi_inv=pi_inv),
rep(NA,n_iter))
log_prior <- c(log(ddirichlet(omega,alpha0))+sum(dgamma(p,shape=shape0,rate=rate0,log=TRUE)),
rep(NA,n_iter))
objective <- log_lik+log_prior
Pi <- array(NA,dim=c(G,K,n_iter+1))
Pi[,,1] <- p
Zeta <- z
Omega <- matrix(NA,nrow=n_iter+1,ncol=G)
Omega[1,] <- omega
for(l in 1:n_iter){
if(l%%500==0){
print(paste("GIBBS iteration",l))
}
Omega[l+1,] <- rdirichlet(n=1,alpha=alpha0+colSums(Zeta))
temprate <- CompRateYpartial(p=adrop(Pi[,,l,drop=FALSE],3),pi_inv=pi_inv,ref_order=rho,z=Zeta,n_rank=n_rank)
Ypsilon <- SimYpsilon(rate=temprate,n_rank=n_rank)
Pi[,,l+1] <- matrix(rgamma(n=G*K,shape=shape0+gammamat(u_bin=u_bin,z_hat=Zeta),
rate <- CompRateP(pi_inv=pi_inv, Y=Ypsilon, z=Zeta, u_bin=u_bin, n_rank=n_rank, rate0=rate0)),nrow=G,ncol=K)
Zeta <- binary_group_ind(apply(CompProbZpartial(p=adrop(Pi[,,l+1,drop=FALSE],3),pi_inv=pi_inv,Y=Ypsilon, u_bin=u_bin,n_rank,omega=Omega[l+1,]),1,FUN=sample,x=1:G,replace=TRUE,size=1),G=G)
log_lik[l+1] <- loglikPLMIX(p=adrop(Pi[,,l+1,drop=FALSE],3),ref_order=rho,weights=Omega[l+1,],
pi_inv=pi_inv)
log_prior[l+1] <- log(ddirichlet(Omega[l+1,],alpha0))+sum(dgamma(adrop(Pi[,,l+1,drop=FALSE],3),shape=shape0,rate=rate0,log=TRUE))
objective[l+1] <- log_lik[l+1]+log_prior[l+1]
}
log_lik <- log_lik[-c(1:(n_burn+1))]
objective <- objective[-c(1:(n_burn+1))]
Omega <- Omega[-c(1:(n_burn+1)),,drop=FALSE]
colnames(Omega) <- paste0("w_",1:G)
Pi <- array(apply(Pi,3,FUN=function(x)x/rowSums(x)),c(G,K,n_iter+1))
Pi=t(apply(Pi,3,c))[-c(1:(n_burn+1)),]
colnames(Pi) <- paste0("p_",rep(1:G,K),rep(1:K,each=G))
out=list(W=Omega,P=Pi,log_lik=log_lik,deviance=-2*log_lik,objective=objective,call=cl)
class(out)="gsPLMIX"
return(out)
}
gsPLMIX_to_mcmc <- function(gsPLMIX_out){
#' MCMC class objects from the Gibbs sampling simulations of a Bayesian mixture of Plackett-Luce models
#'
#' Coerce the Gibbs sampling simulations for a Bayesian mixture of Plackett-Luce models into an \code{mcmc} class object.
#'
#' \code{gsPLMIX_to_mcmc} attemps to coerce its argument by recalling the \code{as.mcmc} function of the \code{coda} package.
#'
#' @param gsPLMIX_out Object of class \code{gsPLMIX} returned by the \code{gibbsPLMIX} function.
#'
#' @return An \code{mcmc} class object.
#'
#' @references
#' Plummer, M., Best, N., Cowles, K. and Vines, K. (2006). CODA: Convergence Diagnosis and Output Analysis for MCMC, \emph{R News}, \bold{6}, pages 7--11, ISSN: 1609-3631.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link[coda]{as.mcmc}}
#'
#' @examples
#'
#' data(d_carconf)
#' GIBBS <- gibbsPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_iter=30, n_burn=10)
#'
#' ## Coerce the posterior samples into an mcmc class object
#' gsPLMIX_to_mcmc(GIBBS)
#'
#' @export
if(class(gsPLMIX_out)!="gsPLMIX"){
stop("The function requires an object of S3 class 'gsPLMIX' as its first argument.")
}
G=ncol(gsPLMIX_out$W)
K=ncol(gsPLMIX_out$P)/G
class(gsPLMIX_out)="list"
gsPLMIX_out=gsPLMIX_out[-length(gsPLMIX_out)] # to remove call element
out=as.data.frame(gsPLMIX_out)
colnames(out)=c(paste0("w_",1:G),paste0("p_",rep(1:G,K),rep(1:K,each=G)),"log_lik","deviance","objective")
out=as.mcmc(out)
return(out)
}
summary.gsPLMIX <- function(object,quantiles=c(0.025, 0.25, 0.5, 0.75, 0.975),hpd_prob=0.95,digits=2,...){
#' Summary of the Gibbs sampling procedure for a Bayesian mixture of Plackett-Luce models
#'
#' \code{summary} method for class \code{gsPLMIX}. It provides summary statistics and credible intervals for the Gibbs sampling simulation of a Bayesian mixture of Plackett-Luce models.
#'
#' Posterior summaries include means, standard deviations, naive standard errors of the means (ignoring autocorrelation of the chain) and time-series standard errors based on an estimate of the spectral density at 0. They correspond to the \code{statistics} element of the output returned by the \code{summary.mcmc} function of the \code{coda} package. Highest posterior density (HPD) intervals are obtained by recalling the \code{HPDinterval} function of the \code{coda} package.
#'
#' @param object Object of class \code{gsPLMIX} returned by the \code{gibbsPLMIX} function.
#' @param quantiles Numeric vector of quantile probabilities.
#' @param hpd_prob Numeric scalar in the grid of values spanning the interval (0,1) by 0.05, giving the posterior probability content of the HPD intervals. Supplied values outside the grid are rounded.
#' @param digits Number of decimal places for rounding the posterior summaries.
#' @param ... Further arguments passed to or from other methods (not used).
#'
#' @return A list of summary statistics for the \code{gsPLMIX} class object:
#'
#' \item{\code{statistics}}{ Numeric matrix with posterior summaries in each row (see 'Details').}
#' \item{\code{quantiles}}{ Numeric matrix with posterior quantiles at the given \code{quantiles} probabilities in each row.}
#' \item{\code{HPDintervals}}{ Numeric matrix with 100\eqn{*}\code{hpd_prob}\% HPD intervals in each row.}
#' \item{\code{Modal_orderings}}{ Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix with the estimated posterior modal orderings of each mixture component.}
#' \item{\code{call}}{ The matched call.}
#'
#' @references
#' Plummer, M., Best, N., Cowles, K. and Vines, K. (2006). CODA: Convergence Diagnosis and Output Analysis for MCMC, \emph{R News}, \bold{6}, pages 7--11, ISSN: 1609-3631.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link[coda]{summary.mcmc}} and \code{\link[coda]{HPDinterval}}
#'
#' @examples
#'
#' data(d_carconf)
#' GIBBS <- gibbsPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_iter=30, n_burn=10)
#'
#' ## Summary of the Gibbs sampling procedure
#' summary(GIBBS)
#' @export summary.gsPLMIX
#' @export
gsPLMIX_out=object
G=ncol(gsPLMIX_out$W)
K=ncol(gsPLMIX_out$P)/G
cl=gsPLMIX_out$call
mcmc_obj=gsPLMIX_to_mcmc(gsPLMIX_out)
p_idx=grep(pattern="p_",x=colnames(mcmc_obj))
temp=getFromNamespace("summary.mcmc",ns="coda")(object=mcmc_obj,quantiles=quantiles)[1:2]
hpd_int=HPDinterval(mcmc_obj,prob=hpd_prob)
# attr(hpd_int,"Probability")=NULL
out=list(statistics=temp[[1]],quantiles=as.matrix(temp[[2]]),HPD_intervals=hpd_int,
Modal_orderings=t(apply(matrix(temp$statistics[p_idx,"Mean"],nrow=G,ncol=K),1,order,decreasing=TRUE)),
call=cl)
out[1:3]=lapply(out[1:3],round,digits=digits)
names(out)[3]=paste0(100*hpd_prob,"%_HPD_intervals")
if(length(quantiles)==1){
colnames(out$quantiles)=paste0(quantiles*100,"%")
}
dimnames(out$Modal_orderings) <- list(paste0("g_",1:G),paste0("Rank_",1:K))
class(out)="summary.gsPLMIX"
out
}
plot.gsPLMIX <- function(x,file="ggmcmc-output.pdf",family=NA,plot=NULL,param_page=5,width=7,height=10,dev_type_html="png",post_est="mean",max_scale_radar=NULL,...){
#' Plot the Gibbs sampling simulations for a Bayesian mixture of Plackett-Luce models
#'
#' \code{plot} method for class \code{gsPLMIX}. It builds a suite of plots, visual convergence diagnostics and credible intervals for the MCMC samples of a Bayesian mixture of Plackett-Luce models. Graphics can be plotted directly into the current working device or stored into an external file placed into the current working directory.
#'
#' Plots of the MCMC samples include histograms, densities, traceplots, running means plots, overlapped densities comparing the complete and partial samples, autocorrelation functions, crosscorrelation plots and caterpillar plots of the 90 and 95\% equal-tails credible intervals. Note that the latter are created for the support parameters (when either \code{family=NA} or \code{family="p"}), for the mixture weights in the case \eqn{G>1} (when either \code{family=NA} or \code{family="w"}), for the log-likelihood values (when \code{family="log_lik"}), for the deviance values (when \code{family="deviance"}). Convergence tools include the potential scale reduction factor and the Geweke z-score. These functionalities are implemented with a call to the \code{ggs} and \code{ggmcmc} functions of the \code{ggmcmc} package (see 'Examples' for the specification of the \code{plot} argument) and for the objective function values (when \code{family="objective"}).
#'
#' By recalling the \code{chartJSRadar} function from the \code{radarchart} package and the routines of the \code{ggplot2} package, \code{plot.gsPLMIX} additionally produces a radar plot of the support parameters and, when \eqn{G>1}, a donut plot of the mixture weights based on the posterior point estimates. The radar chart is returned in the Viewer Pane.
#'
#' @param x Object of class \code{gsPLMIX} returned by the \code{gibbsPLMIX} function.
#' @param file Character vector with the name of the file to be created in the current working directory. Defaults is "ggmcmc-output.pdf". When NULL, plots are directly returned into the current working device (not recommended). This option allows also the user to work with an opened pdf (or other) device. When the file has an html file extension, the output is an Rmarkdown report with the figures embedded in the html file.
#' @param family Character string indicating the name of the family of parameters to be plotted. A family of parameters is considered to be any group of parameters with the same name but different numerical values (for example \code{w[1]}, \code{w[2]}, etc). Default is \code{NA} meaning that all the parameters in the chain are plotted. Alternatively, one can choose \code{"w"}, \code{"p"}, \code{"log_lik"}, \code{"deviance"} or \code{"objective"}.
#' @param plot Character vector containing the names of the desired plots. Default is \code{NULL} meaning that all the plots and convergence diagnostics are built (see 'Details').
#' @param param_page Number of parameters to be plotted in each page. Defaults is 5.
#' @param width Numeric scalar indicating the width of the pdf display in inches. Defaults is 7.
#' @param height Numeric scalar indicating the height of the pdf display in inches. Defaults is 10.
#' @param dev_type_html Character vector indicating the type of graphical device for the html output. Default is \code{"png"}. Alternatively, one can choose \code{"svg"}.
#' @param post_est Character string indicating the point estimates of the Plackett-Luce mixture parameters to be computed from the \code{gsPLMIX} class object and then plotted in the current working device. Default is \code{"mean"}. Alternatively, one can choose \code{"median"}.
#' @param max_scale_radar Numeric scalar indicating the maximum value on each axis of the radar plot for the support parameter point estimates. Default is \code{NULL} meaning that the maximum of the estimated support parameters is used.
#' @param ... Further arguments passed to or from other methods (not used).
#'
#'
#' @references
#' Ashton, D. and Porter, S. (2016). radarchart: Radar Chart from 'Chart.js'. R package version 0.3.1. \url{https://CRAN.R-project.org/package=radarchart}
#'
#' Wickham, H. (2009). ggplot2: Elegant Graphics for Data Analysis. Springer-Verlag New York.
#'
#' Fernandez-i-Marin, X. (2006). ggmcmc: Analysis of MCMC Samples and Bayesian Inference, \emph{Journal of Statistical Software}, \bold{70}(9), pages 1--20, DOI: 10.18637/jss.v070.i09.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link[ggmcmc]{ggs}}, \code{\link[ggmcmc]{ggmcmc}}, \code{\link[radarchart]{chartJSRadar}} and \code{\link[ggplot2]{ggplot}}
#'
#' @examples
#'
#' # Not run:
#' data(d_carconf)
#' GIBBS <- gibbsPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=5, n_iter=30, n_burn=10)
#'
#' # Not run:
#' # Plot posterior samples supplied as an gsPLMIX class object
#' # plot(GIBBS)
#'
#' # Selected plots of the posterior samples of the support parameters
#' # plot(GIBBS, family="p", plot=c("compare_partial","Rhat","caterpillar"), param_page=6)
#'
#' # Selected plots of the posterior samples of the mixture weights
#' # plot(GIBBS, family="w", plot=c("histogram","running","crosscorrelation","caterpillar"))
#'
#' # Selected plots of the posterior log-likelihood values
#' # plot(GIBBS, family="log_lik", plot=c("autocorrelation","geweke"), param_page=1)
#'
#' # Selected plots of the posterior deviance values
#' # plot(GIBBS, family="deviance", plot=c("traceplot","density"), param_page=1)
#' @export plot.gsPLMIX
#' @export
gsPLMIX_out=x
mcmc_obj=gsPLMIX_to_mcmc(gsPLMIX_out=gsPLMIX_out)
G=ncol(gsPLMIX_out$W)
K=ncol(gsPLMIX_out$P)/G
n_par=G+G*K
colnames(mcmc_obj)[1:n_par]=gsub("_","[",colnames(mcmc_obj)[1:n_par])
colnames(mcmc_obj)[1:n_par]=paste0(colnames(mcmc_obj)[1:n_par],"]")
if(G==1){
mcmc_obj=mcmc_obj[,-1]
}
tbl_obj = ggs(S=mcmc_obj)
if(!is.na(family) & family=="w" & G==1){
message(paste("No. of mixture components:",G, "====> w[1] = 1"))
}else{
simplify_traceplot=NULL
mcmc_plot=ggmcmc(tbl_obj,file=file,family=family,plot=plot,param_page=param_page,width=width,height=height,
simplify_traceplot=simplify_traceplot,dev_type_html=dev_type_html)
print(mcmc_plot)
}
# if(!is.na(family) & family=="p"){
labs <- paste("Item",1:K)
temp_radar=summary(object=gsPLMIX_out,quantiles=0.5)
if(post_est=="mean"){
scores=as.list(as.data.frame(t(matrix(temp_radar$statistics[grep("p",rownames(temp_radar$quantiles)),"Mean"],nrow=G,ncol=K))))
main_radar="Posterior means of the support parameters"
}else{
scores=as.list(as.data.frame(t(matrix(temp_radar$quantiles[grep("p",rownames(temp_radar$quantiles)),],nrow=G,ncol=K))))
main_radar="Posterior medians of the support parameters"
}
names(scores)=paste("Group",1:G)
oo=chartJSRadar(scores = scores, labs = labs, main=main_radar,maxScale = ifelse(is.null(max_scale_radar),max(unlist(scores)),max_scale_radar))
print(oo)
# }
# if(!is.na(family) & family=="w" & G>1){
if(G>1){
temp_radar=summary(object=gsPLMIX_out,quantiles=0.5)
if(post_est=="mean"){
temp_value=temp_radar$statistics[grep("w",rownames(temp_radar$quantiles)),"Mean"]
}else{
temp_value=temp_radar$quantiles[grep("w",rownames(temp_radar$quantiles)),]
}
df_w <- data.frame(Composition = paste0(paste("Group",1:G),":"),value=temp_value,label=paste(paste0(paste("Group",1:G),":"),paste0(round(temp_value*100), "%")))
pp=ggplot(df_w, aes_string(x = 2, y = "value", fill = "label")) +
geom_bar(stat = "identity", color = "white") +
coord_polar(theta = "y", start = 0)+
labs(x = NULL, y = NULL, fill = NULL, title = "Sample composition by group membership")+
# geom_text(aes(label = paste0(round(value*100), "%")), position = position_stack(vjust = 0.5))+
scale_fill_brewer(palette="Blues")+
theme_void()+
xlim(0.5, 2.5)
print(pp)
}
}
print.gsPLMIX <- function(x,...){
#' Print of the Gibbs sampling simulation of a Bayesian mixture of Plackett-Luce models
#'
#' \code{print} method for class \code{gsPLMIX}. It shows some general information on the Gibbs sampling simulation for a Bayesian mixture of Plackett-Luce models.
#'
#'
#' @param x Object of class \code{gsPLMIX} returned by the \code{gibbsPLMIX} function.
#' @param ... Further arguments passed to or from other methods (not used).
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{gibbsPLMIX}}
#'
#' @examples
#'
#' ## Print of the Gibbs sampling procedure
#' data(d_carconf)
#' GIBBS <- gibbsPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_iter=30, n_burn=10)
#' print(GIBBS)
#' @export print.gsPLMIX
#' @export
gsPLMIX_out=x
if(class(gsPLMIX_out)!="gsPLMIX"){
stop("The function requires an object of S3 class 'gsPLMIX' as its first argument.")
}
G=ncol(gsPLMIX_out$W)
K=ncol(gsPLMIX_out$P)/G
L=nrow(gsPLMIX_out$W)
cat("\nCall:\n", paste(deparse(gsPLMIX_out$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
cat("Gibbs sampling procedure for a Bayesian mixture of Plackett-Luce models:\n")
cat("\n")
cat("No. of items:",K,"\n")
cat("No. of mixture components:",G,"\n")
cat("No. of saved MCMC samples:",L,"\n")
cat("\n")
cat("Max. posterior log-likelihood:",max(gsPLMIX_out$log_lik,na.rm=TRUE),"\n")
cat("Min. posterior deviance:",min(gsPLMIX_out$deviance,na.rm=TRUE),"\n")
cat("Max. objective function:",max(gsPLMIX_out$objective,na.rm=TRUE),"\n")
cat("\n")
cat("Use functions summary() and plot() to summarize and visualize the object of class 'gsPLMIX'.")
}
print.summary.gsPLMIX <- function(x,...){
#/' Print of the summary of Gibbs sampling simulation of a Bayesian mixture of Plackett-Luce models.
#/'
#/' \code{print} method for class \code{summary.gsPLMIX}. It shows some general information on the Gibbs sampling simulation of a Bayesian mixture of Plackett-Luce models.
#/'
#/'
#/' @param x Object of class \code{summary.gsPLMIX} returned by the \code{summary.gibbsPLMIX} function.
#/' @param ... Further arguments passed to or from other methods (not used).
#/'
#/'
#/' @references
#/' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#/'
#/' Mollica, C. and Tardella, L. (2014). Epitope profiling via mixture modeling for ranked data. \emph{Statistics in Medicine}, \bold{33}(21), pages 3738--3758, ISSN: 0277-6715, DOI: 10.1002/sim.6224.
#/'
#/' @author Cristina Mollica and Luca Tardella
summary.gsPLMIX_out=x
if(class(summary.gsPLMIX_out)!="summary.gsPLMIX"){
stop("The function requires an object of S3 class 'summary.gsPLMIX' as its first argument.")
}
cat("\nCall:\n", paste(deparse(summary.gsPLMIX_out$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
cat("Posterior statistics:\n")
print(summary.gsPLMIX_out$statistics)
cat("\n")
cat("Quantiles:\n")
print(summary.gsPLMIX_out$quantiles)
cat("\n")
pr=paste0(100*attr(summary.gsPLMIX_out[[grep("HPD",names(summary.gsPLMIX_out))]],"Probability"),"%")
cat(pr,"HPD intervals:\n")
attr(summary.gsPLMIX_out[[grep("HPD",names(summary.gsPLMIX_out))]],"Probability")=NULL
print(summary.gsPLMIX_out[[grep("HPD",names(summary.gsPLMIX_out))]])
cat("\n")
cat("Estimated component-specific modal orderings:\n")
print(summary.gsPLMIX_out$Modal_orderings)
}
random_start <- function(mlesupp, givenweights, alpha=rep(1,G)){
#/' Appropriate simulation of starting values for tandom initialization of Gibbs Sampling. It start from the mle corresponding to no-group structure and then it randomly selects rescaled random support points (with sum 1) of G mixture components such that the marginal support coincides with the mle support for G=1
#/' Random generation of starting values of the component-specific support parameters for Gibbs sampling
#/'
#/' @param mlesupp MLE of support parameters
#/' @param givenweights A numeric vector of \eqn{G} mixture weights
#/' @param alpha A numeric vector of \eqn{G} positive reals to be used as Dirichlet parameters for the random start which corresponds to a convex combination of \eqn{G} support parameter vertices
#/'
#/' @return \code{out} A numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix with starting values of the component-specific support parameters
#/'
#/' @author Cristina Mollica and Luca Tardella
K <- length(mlesupp)
G <- length(givenweights)
out <- matrix(NA,nrow=G,ncol=K)
if(G==1){
out[1,] <- mlesupp
}else{
# for each component
# compute the H-representation
# transform it into the V-representation
# draw a random sample from the symplex
for( j in 1:K ) {
Aineq <- rbind(-diag(G))
bineq <- c(rep(0, G))
Aeq <- matrix(givenweights,nrow=1)
beq <- mlesupp[j]
hr <- makeH(Aineq,bineq,Aeq,beq)
vr <- scdd(hr)
Vertexes <- t(vr$output[,-c(1,2)]) # as column vectors
myrandomcomponentwithconstrainedmean <- Vertexes%*%t(rdirichlet(1,alpha))
out[,j] <- myrandomcomponentwithconstrainedmean
}
}
return(out)
}
#### Selection criteria
selectPLMIX_single <- function(pi_inv,G,
MCMCsampleP=NULL,
MCMCsampleW=NULL,
MAPestP,
MAPestW,
deviance,
post_est="mean"){
#/' Bayesian selection criteria for mixtures of Plackett-Luce models
#/'
#/' Compute Bayesian comparison criteria for mixtures of Plackett-Luce models with a different number of components.
#/'
#/' Two versions of DIC and BPIC are returned corresponding to two alternative ways of computing the penalty term: the former was proposed by Spiegelhalter et al. (2002) and is denoted with \code{pD}, whereas the latter was proposed by Gelman et al. (2004) and is denoted with \code{pV}. DIC2 coincides with AICM, that is, the Bayesian counterpart of AIC introduced by Raftery et al. (2007).
#/'
#/' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#/' @param G Number of mixture components.
#/' @param MCMCsampleP Numeric \eqn{L}\eqn{\times}{x}\eqn{G*K} matrix with the MCMC samples of the component-specific support parameters.
#/' @param MCMCsampleW Numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights.
#/' @param MAPestP Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of MAP component-specific support parameter estimates.
#/' @param MAPestW Numeric vector of the \eqn{G} MAP estimates of the mixture weights.
#/' @param deviance Numeric vector of posterior deviance values.
#/' @param post_est Character string indicating the point estimates of the Plackett-Luce mixture parameters to be computed from the MCMC sample. This argument is ignored when MAP estimates are supplied in the \code{MAPestP} and \code{MAPestW} arguments. Default is \code{"mean"}. Alternatively, one can choose \code{"median"}.
#/'
#/' @return A list of named objects:
#/'
#/' \item{\code{point_estP}}{ Numeric \eqn{G}\eqn{\times}{x}\eqn{(K+1)} matrix with the point estimates of the Plackett-Luce mixture parameters. The \eqn{(K+1)}-th column contains estimates of the mixture weights.}
#/' \item{\code{point_estW}}{ Numeric \eqn{G}\eqn{\times}{x}\eqn{(K+1)} matrix with the point estimates of the Plackett-Luce mixture parameters. The \eqn{(K+1)}-th column contains estimates of the mixture weights.}
#/' \item{\code{D_bar}}{ Posterior expected deviance.}
#/' \item{\code{D_hat}}{ Deviance function evaluated at \code{point_est}.}
#/' \item{\code{pD}}{ Effective number of parameters computed as \code{D_bar}-\code{D_hat}.}
#/' \item{\code{pV}}{ Effective number of parameters computed as half the posterior variance of the deviance.}
#/' \item{\code{DIC1}}{ Deviance Information Criterion with penalty term equal to \code{pD}.}
#/' \item{\code{DIC2}}{ Deviance Information Criterion with penalty term equal to \code{pV}.}
#/' \item{\code{BPIC1}}{ Bayesian Predictive Information Criterion obtained from \code{DIC1} by doubling its penalty term.}
#/' \item{\code{BPIC2}}{ Bayesian Predictive Information Criterion obtained from \code{DIC2} by doubling its penalty term.}
#/' \item{\code{BICM1}}{ Bayesian Information Criterion-Monte Carlo.}
#/' \item{\code{BICM2}}{ Bayesian Information Criterion-Monte Carlo based on the actual MAP estimate given in the \code{MAPestP} and \code{MAPestW} arguments (unlike \code{BICM1}, no approximation of the MAP estimate from the MCMC sample).}
#/'
#/'
#/' @references
#/' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#/'
#/' Ando, T. (2007). Bayesian predictive information criterion for the evaluation of hierarchical Bayesian and empirical Bayes models. \emph{Biometrika}, \bold{94}(2), pages 443--458.
#/'
#/' Raftery, A. E, Satagopan, J. M., Newton M. A. and Krivitsky, P. N. (2007). BAYESIAN STATISTICS 8. \emph{Proceedings of the eighth Valencia International Meeting 2006}, pages 371--416. Oxford University Press.
#/'
#/' Gelman, A., Carlin, J. B., Stern, H. S. and Rubin, D. B. (2004). Bayesian data analysis. Chapman & Hall/CRC, Second Edition, ISBN: 1-58488-388-X. New York.
#/'
#/' Spiegelhalter, D. J., Best, N. G., Carlin, B. P., Van Der Linde, A. (2002). Bayesian measures of model complexity and fit. \emph{Journal of the Royal Statistical Society: Series B (Statistical Methodology)}, \bold{64}(4), pages 583--639.
#/'
#/' @author Cristina Mollica and Luca Tardella
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
N <- nrow(pi_inv)
K <- ncol(pi_inv)
D_bar <- mean(deviance)
if(!is.null(MAPestP) & !is.null(MAPestW)){
point_estP <- MAPestP
point_estW <- MAPestW
}else{
if(post_est=="mean"){
point_estP <- matrix(colMeans(MCMCsampleP),G,K)
point_estW <- colMeans(MCMCsampleW)
}else{
point_estP <- matrix(apply(MCMCsampleP,2,FUN=median),G,K)
point_estW <- apply(MCMCsampleW,2,FUN=median)
}
}
rho <- matrix(1:K,nrow=G,ncol=K,byrow=TRUE)
D_hat <- -2*loglikPLMIX(p=point_estP,weights=point_estW,ref_order=rho,pi_inv=pi_inv)
pD <- D_bar-D_hat
pV <- var(deviance)/2
return(list(point_estP=point_estP,point_estW=point_estW,D_bar=D_bar,D_hat=D_hat,pD=pD,pV=pV,DIC1=D_bar+pD,DIC2=D_bar+pV,
BPIC1=D_bar+2*pD,BPIC2=D_bar+2*pV,BICM1=D_bar+pV*(log(x=N)-1),BICM2=D_hat+pV*log(x=N)))
}
selectPLMIX <- function(pi_inv,seq_G,
MCMCsampleP=vector(mode="list",length=length(seq_G)),
MCMCsampleW=vector(mode="list",length=length(seq_G)),
MAPestP,
MAPestW,
deviance,
post_est="mean",
parallel=FALSE){
#' Bayesian selection criteria for mixtures of Plackett-Luce models
#'
#' Compute Bayesian comparison criteria for mixtures of Plackett-Luce models with a different number of components.
#'
#' The \code{selectPLMIX} function privileges the use of the MAP point estimates to compute the Bayesian model comparison criteria, since they are not affected by the label switching issue. By setting both the \code{MAPestP} and \code{MAPestW} arguments equal to NULL, the user can alternatively compute the selection measures by relying on a different posterior summary (\code{"mean"} or \code{"median"}) specified in the \code{post_est} argument. In the latter case, the MCMC samples for each Plackett-Luce mixture must be supplied in the lists \code{MCMCsampleP} and \code{MCMCsampleW}. The drawback when working with point estimates other than the MAP is that the possible presence of label switching has to be previously removed from the traces to obtain meaningful results. See the \code{\link{label_switchPLMIX}} function to perfom label switching adjustment of the MCMC samples.
#'
#' Several model selection criteria are returned. The two versions of DIC correspond to alternative ways of computing the effective number of parameters: DIC1 was proposed by Spiegelhalter et al. (2002) with penalty named \code{pD}, whereas DIC2 was proposed by Gelman et al. (2004) with penalty named \code{pV}. The latter coincides with the AICM introduced by Raftery et al. (2007), that is, the Bayesian counterpart of AIC. BPIC1 and BPIC2 are obtained from the two DIC by simply doubling the penalty term, as suggested by Ando (2007) to contrast DIC's tendency to overfitting. BICM1 is the Bayesian variant of the BIC, originally presented by Raftery et al. (2007) and entirely based on the MCMC sample. The BICM2, instead, involved the MAP estimate without the need of its approximation from the MCMC sample as for the BICM1.
#'
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @param seq_G Numeric vector with the number of components of the Plackett-Luce mixtures to be compared.
#' @param MCMCsampleP List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{(G*K)} matrix with the MCMC samples of the component-specific support parameters. Default is list of \code{NULL} elements.
#' @param MCMCsampleW List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights. Default is list of \code{NULL} elements.
#' @param MAPestP List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix with the MAP estimates of the component-specific support parameters.
#' @param MAPestW List of size \code{length(seq_G)}, whose generic element is a numeric vector with the MAP estimates of the \eqn{G} mixture weights.
#' @param deviance List of size \code{length(seq_G)}, whose generic element is a numeric vector of posterior deviance values.
#' @param post_est Character string indicating the point estimates of the Plackett-Luce mixture parameters to be computed from the MCMC sample. This argument is ignored when MAP estimates are supplied in the \code{MAPestP} and \code{MAPestW} arguments. Default is \code{"mean"}. Alternatively, one can choose \code{"median"} (see 'Details').
#' @param parallel Logical: whether parallelization should be used. Default is \code{FALSE}.
#'
#' @return A list of named objects:
#'
#' \item{\code{point_estP}}{ List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix with the point estimates of the component-specific support parameters employed for the computation of the criteria.}
#' \item{\code{point_estW}}{ List of size \code{length(seq_G)}, whose generic element is a numeric vector with the \eqn{G} point estimates of the mixture weights employed for the computation of the criteria.}
#' \item{\code{fitting}}{ Numeric \code{length(seq_G)}\eqn{\times}{x}\eqn{2} matrix with the fitting terms of the comparison measures, given by the posterior expected deviance \code{D_bar} and the deviance \code{D_hat} evaluated at the point estimate.}
#' \item{\code{penalties}}{ Numeric \code{length(seq_G)}\eqn{\times}{x}\eqn{2} matrix with the penalty terms \code{pD} and \code{pV} (effective number of parameters).}
#' \item{\code{criteria}}{ Numeric \code{length(seq_G)}\eqn{\times}{x}\eqn{6} matrix of Bayesian model selection criteria: \code{DIC1}, \code{DIC2}, \code{BPIC1}, \code{BPIC2}, \code{BICM1} and \code{BICM2} (see 'Details').}
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' Ando, T. (2007). Bayesian predictive information criterion for the evaluation of hierarchical Bayesian and empirical Bayes models. \emph{Biometrika}, \bold{94}(2), pages 443--458.
#'
#' Raftery, A. E, Satagopan, J. M., Newton M. A. and Krivitsky, P. N. (2007). BAYESIAN STATISTICS 8. \emph{Proceedings of the eighth Valencia International Meeting 2006}, pages 371--416. Oxford University Press.
#'
#' Gelman, A., Carlin, J. B., Stern, H. S. and Rubin, D. B. (2004). Bayesian data analysis. Chapman & Hall/CRC, Second Edition, ISBN: 1-58488-388-X. New York.
#'
#' Spiegelhalter, D. J., Best, N. G., Carlin, B. P. and Van Der Linde, A. (2002). Bayesian measures of model complexity and fit. \emph{Journal of the Royal Statistical Society: Series B (Statistical Methodology)}, \bold{64}(4), pages 583--639.
#'
#' @author Cristina Mollica and Luca Tardella
#' @examples
#'
#' data(d_carconf)
#' K <- ncol(d_carconf)
#'
#' ## Fit 1- and 2-component PL mixtures via MAP estimation
#' MAP_1 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=1,
#' n_start=2, n_iter=400*1)
#'
#' MAP_2 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=2,
#' n_start=2, n_iter=400*2)
#'
#' mcmc_iter <- 30
#' burnin <- 10
#'
#' ## Fit 1- and 2-component PL mixtures via Gibbs sampling procedure
#' GIBBS_1 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=1, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_1$mod$P_map,
#' z=binary_group_ind(MAP_1$mod$class_map,G=1)))
#' GIBBS_2 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=2, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_2$mod$P_map,
#' z=binary_group_ind(MAP_2$mod$class_map,G=2)))
#' ## Select the optimal number of components
#' SELECT <- selectPLMIX(pi_inv=d_carconf, seq_G=1:2,
#' MAPestP=list(MAP_1$mod$P_map, MAP_2$mod$P_map),
#' MAPestW=list(MAP_1$mod$W_map, MAP_2$mod$W_map),
#' deviance=list(GIBBS_1$deviance, GIBBS_2$deviance))
#' SELECT$criteria
#'
#' @export
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
ncomp <- length(seq_G)
if(!parallel){
selection <- vector(mode="list",length=ncomp)
for(l in 1:ncomp){
print(paste("SELECTION CRITERIA FOR G=",seq_G[l]))
selection[[l]] <- selectPLMIX_single(pi_inv=pi_inv,G=seq_G[l],MCMCsampleP=MCMCsampleP[[l]],
MCMCsampleW=MCMCsampleW[[l]],MAPestP=MAPestP[[l]],
MAPestW=MAPestW[[l]],deviance=deviance[[l]],post_est=post_est)
}
}else{
selection <- foreach(l=1:ncomp) %dopar%{
tempselection <- selectPLMIX_single(pi_inv=pi_inv,G=seq_G[l],MCMCsampleP=MCMCsampleP[[l]],
MCMCsampleW=MCMCsampleW[[l]],MAPestP=MAPestP[[l]],
MAPestW=MAPestW[[l]],deviance=deviance[[l]],post_est=post_est)
}
}
point_estP <- sapply(selection,"[[","point_estP")
point_estW <- sapply(selection,"[[","point_estW")
fitting <- t(sapply(lapply(selection,"[",c("D_bar","D_hat")),unlist))
effective_numer_of_parameters <- t(sapply(lapply(selection,"[",c("pD","pV")),unlist))
criteria <- t(sapply(lapply(selection,"[",c("DIC1","DIC2","BPIC1","BPIC2","BICM1","BICM2")),unlist))
names(point_estP) <- names(point_estW) <- rownames(fitting) <- rownames(effective_numer_of_parameters) <- rownames(criteria) <- paste0("G_",seq_G)
out <- list(point_estP=point_estP,point_estW=point_estW,fitting=fitting,
effective_numer_of_parameters=effective_numer_of_parameters,criteria=criteria)
return(out)
}
#### Label switching adjustment
label_switchPLMIX_single <- function(pi_inv,G,
MCMCsampleP,
MCMCsampleW,
MAPestP,
MAPestW){
#/' Label switching adjustment for mixtures of Plackett-Luce models
#/'
#/' Remove the label switching phenomenon from the MCMC samples of Bayesian mixtures of Plackett-Luce models with a different number of components.
#/'
#/' The \code{label_switchPLMIX} function performs the label switching adjustment of the MCMC samples via the Pivotal Reordering Algorithm (PRA) described in Marin et al (2005), by recalling the \code{\link[label.switching]{pra}} function from the \code{\link[label.switching]{label.switching}} package.
#/'
#/' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#/' @param G Number of mixture components.
#/' @param MCMCsampleP Numeric \eqn{L}\eqn{\times}{x}\eqn{G*K} matrix with the MCMC samples of the component-specific support parameters to be processed.
#/' @param MCMCsampleW Numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights to be processed.
#/' @param MAPestP Numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix of MAP component-specific support parameter estimates to be used as pivot in the PRA method.
#/' @param MAPestW Numeric vector of the \eqn{G} MAP estimates of the mixture weights as pivot in the PRA method.
#/'
#/' @return A list of named objects:
#/'
#/' \item{\code{final_sampleP}}{ Numeric \eqn{G}\eqn{\times}{x}\eqn{K}\eqn{\times}{x}\eqn{L} array MCMC samples of the component-specific support parameters adjusted for label switching.}
#/' \item{\code{final_sampleW}}{ Numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix of MCMC samples of the mixture weights adjusted for label switching.}
#/'
#/' @author Cristina Mollica and Luca Tardella
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
N <- nrow(pi_inv)
K <- ncol(pi_inv)
L <- nrow(MCMCsampleW)
mcmc.sample <- array(cbind(MCMCsampleP,MCMCsampleW),c(L,G,(K+1)))
if(G==1){
reordered.pra <- list(output=NULL)
reordered.pra$output <- mcmc.sample
}else{
print("LABEL SWITCHING ADJUSTMENT WITH PIVOTAL REORDERING ALGORITHM")
pivot.input <- cbind(MAPestP,MAPestW)
lab.pra <- pra(mcmc.pars=mcmc.sample,pivot=pivot.input)
reordered.pra <- permute.mcmc(mcmc=mcmc.sample,permutations=lab.pra$permutations)
}
final.sample <- matrix(reordered.pra$output,nrow=L,ncol=G*(K+1))
final_sampleP <- array(t(final.sample[,1:(G*K)]),c(G,K,L))
final_sampleW <- final.sample[,-c(1:(G*K)),drop=FALSE]
out <- list(final_sampleP=final_sampleP,final_sampleW=final_sampleW)
return(out)
}
label_switchPLMIX <- function(pi_inv,seq_G,
MCMCsampleP,
MCMCsampleW,
MAPestP,
MAPestW,
parallel=FALSE){
#' Label switching adjustment of the Gibbs sampling simulations for Bayesian mixtures of Plackett-Luce models
#'
#' Remove the label switching phenomenon from the MCMC samples of Bayesian mixtures of Plackett-Luce models with \eqn{G>1} components.
#'
#' The \code{label_switchPLMIX} function performs the label switching adjustment of the MCMC samples via the Pivotal Reordering Algorithm (PRA) described in Marin et al (2005), by recalling the \code{\link[label.switching]{pra}} function from the \code{\link[label.switching]{label.switching}} package.
#'
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @param seq_G Numeric vector with the number of components of the Plackett-Luce mixtures to be assessed.
#' @param MCMCsampleP List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{(G*K)} matrix with the MCMC samples of the component-specific support parameters to be processed.
#' @param MCMCsampleW List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights to be processed.
#' @param MAPestP List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{G}\eqn{\times}{x}\eqn{K} matrix with the MAP estimates of the component-specific support parameters to be used as a pivot in the PRA method (see 'Details').
#' @param MAPestW List of size \code{length(seq_G)}, whose generic element is a numeric vector with the MAP estimates of the \eqn{G} mixture weights to be used as a pivot in the PRA method (see 'Details').
#' @param parallel Logical: whether parallelization should be used. Default is \code{FALSE}.
#'
#' @return A list of named objects:
#'
#' \item{\code{final_sampleP}}{ List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{G}\eqn{\times}{x}\eqn{K}\eqn{\times}{x}\eqn{L} array with the MCMC samples of the component-specific support parameters adjusted for label switching.}
#' \item{\code{final_sampleW}}{ List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights adjusted for label switching.}
#'
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' Papastamoulis, P. (2016). label.switching: An R Package for Dealing with the Label Switching Problem in MCMC Outputs. \emph{Journal of Statistical Software}, \bold{69}(1), pages 1--24, DOI: 10.18637/jss.v069.c01.
#'
#' Marin, J. M., Mengersen, K. and Robert, C.P. (2005). Bayesian modelling and inference on mixtures of distributions. \emph{Handbook of Statistics} (25), D. Dey and C.R. Rao (eds). Elsevier-Sciences.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link[label.switching]{pra}}
#'
#' @examples
#'
#' data(d_carconf)
#' K <- ncol(d_carconf)
#'
#' ## Fit 1- and 2-component PL mixtures via MAP estimation
#' MAP_1 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=1,
#' n_start=2, n_iter=400*1)
#'
#' MAP_2 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=2,
#' n_start=2, n_iter=400*2)
#'
#' MAP_3 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=3,
#' n_start=2, n_iter=400*3)
#'
#' mcmc_iter <- 30
#' burnin <- 10
#'
#' ## Fit 1- and 2-component PL mixtures via Gibbs sampling procedure
#' GIBBS_1 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=1, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_1$mod$P_map,
#' z=binary_group_ind(MAP_1$mod$class_map,G=1)))
#' GIBBS_2 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=2, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_2$mod$P_map,
#' z=binary_group_ind(MAP_2$mod$class_map,G=2)))
#' GIBBS_3 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=3, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_3$mod$P_map,
#' z=binary_group_ind(MAP_3$mod$class_map,G=3)))
#'
#' ## Adjusting the MCMC samples for label switching
#' LS <- label_switchPLMIX(pi_inv=d_carconf, seq_G=1:3,
#' MCMCsampleP=list(GIBBS_1$P, GIBBS_2$P, GIBBS_3$P),
#' MCMCsampleW=list(GIBBS_1$W, GIBBS_2$W, GIBBS_3$W),
#' MAPestP=list(MAP_1$mod$P_map, MAP_2$mod$P_map, MAP_3$mod$P_map),
#' MAPestW=list(MAP_1$mod$W_map, MAP_2$mod$W_map, MAP_3$mod$W_map))
#' str(LS)
#' @export
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
ncomp <- length(seq_G)
if(!parallel){
adjust <- vector(mode="list",length=ncomp)
for(l in 1:ncomp){
adjust[[l]] <- label_switchPLMIX_single(pi_inv=pi_inv,G=seq_G[l],MCMCsampleP=MCMCsampleP[[l]],
MCMCsampleW=MCMCsampleW[[l]],MAPestP=MAPestP[[l]],
MAPestW=MAPestW[[l]])
}
}else{
adjust <- foreach(l=1:ncomp) %dopar%{
tempadjust <- label_switchPLMIX_single(pi_inv=pi_inv,G=seq_G[l],MCMCsampleP=MCMCsampleP[[l]],
MCMCsampleW=MCMCsampleW[[l]],MAPestP=MAPestP[[l]],
MAPestW=MAPestW[[l]])
}
}
# OLD final_sampleP <- sapply(adjust,"[[","final_sampleP")
# OLD final_sampleW <- sapply(adjust,"[[","final_sampleW")
final_sampleP <- drop(simplify2array(simplify2array(lapply(adjust,function(x){lapply("final_sampleP",function(y)do.call("[[",list(x,y)))}))))
final_sampleW <- drop(simplify2array(simplify2array(lapply(adjust,function(x){lapply("final_sampleW",function(y)do.call("[[",list(x,y)))}))))
if(length(seq_G)>1){
names(final_sampleP) <- names(final_sampleW) <- paste0("G_",seq_G)
}else{
final_sampleP <- list(final_sampleP)
final_sampleW <- list(final_sampleW)
names(final_sampleP) <- names(final_sampleW) <- paste0("G_",seq_G)
}
out <- list(final_sampleP=final_sampleP,final_sampleW=final_sampleW)
return(out)
}
#### Posterior predictive check
ppcheckPLMIX_single <- function(pi_inv,G,
MCMCsampleP,
MCMCsampleW,
top1=TRUE,
paired=TRUE){
#/' Posterior predictive check for a mixture of Plackett-Luce models
#/'
#/' Compute predictive posterior \eqn{p}-values based on top item and paired comparison frequencies to assess the goodness-of-fit of a Bayesian mixtures of Plackett-Luce models for partial orderings.
#/'
#/' In the case of partial orderings, the same missingness patterns of the observed dataset, i.e., the number of items ranked by each sample unit, are reproduced on the replicated datasets.
#/'
#/' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#/' @param G Number of mixture components.
#/' @param MCMCsampleP Numeric \eqn{L}\eqn{\times}{x}\eqn{G*K} matrix with the MCMC samples of the component-specific support parameters.
#/' @param MCMCsampleW Numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights.
#/' @param top1 Logical: whether the posterior predictive \eqn{p}-value based on top frequencies has to be computed. Default is \code{TRUE}.
#/' @param paired Logical: whether the posterior predictive \eqn{p}-value based on paired comparison frequencies has to be computed. Default is \code{TRUE}.
#/'
#/' @return A list of named objects:
#/'
#/' \item{\code{post_pred_pvalue_top1}}{ If \code{top1} is \code{TRUE}, posterior predictive \eqn{p}-value based on top frequencies, otherwise \code{NULL}.}
#/' \item{\code{post_pred_pvalue_paired}}{ If \code{paired} is \code{TRUE}, posterior predictive \eqn{p}-value based on paired comparison frequencies, otherwise \code{NULL}.}
#/'
#/' @author Cristina Mollica and Luca Tardella
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
N <- nrow(pi_inv)
K <- ncol(pi_inv)
L <- nrow(MCMCsampleW)
final.sample <- cbind(MCMCsampleP,MCMCsampleW)
final_sampleP <- array(c(t(MCMCsampleP)),c(G,K,L))
final_sampleW <- MCMCsampleW
pi_inv_int <- pi_inv
mode(pi_inv_int) <- "integer"
rho <- matrix(1:K,nrow=G,ncol=K,byrow=TRUE)
if(top1){
print(paste("POSTERIOR PREDICTIVE CHECK FOR G=",G))
print("Top1 frequencies-based posterior predictive p-value")
chi.obs.top1 <- rep(NA,L)
chi.rep.top1 <- rep(NA,L)
for(l in 1:L){
(if((l%%200)==0) print(l))
chi.obs.top1[l] <- chisqmeasureobs1dim(pi_inv_int, p=matrix(final_sampleP[,,l],nrow=G), weights=final_sampleW[l,])
chi.rep.top1[l] <- chisqmeasuretheo1dim(N,ref_order=rho, p=matrix(final_sampleP[,,l],nrow=G), weights=final_sampleW[l,],pi_inv_int)
}
post_pred_pvalue_top1 <- mean(chi.rep.top1 >= chi.obs.top1)
}else{
post_pred_pvalue_top1 <- NA
}
if(paired){
print(paste("POSTERIOR PREDICTIVE CHECK FOR G=",G))
print("Paired comparison frequencies-based posterior predictive p-value")
chi.obs.paired <- rep(NA,L)
chi.rep.paired <- rep(NA,L)
for(l in 1:L){
(if((l%%200)==0) print(l))
chi.obs.paired[l] <- chisqmeasureobs(pi_inv_int, p=matrix(final_sampleP[,,l],nrow=G), weights=final_sampleW[l,])
chi.rep.paired[l] <- chisqmeasuretheo(N,ref_order=rho, p=matrix(final_sampleP[,,l],nrow=G), weights=final_sampleW[l,],pi_inv_int)
}
post_pred_pvalue_paired <- mean(chi.rep.paired >= chi.obs.paired)
}else{
post_pred_pvalue_paired <- NA
}
out <- list(post_pred_pvalue_top1=post_pred_pvalue_top1,post_pred_pvalue_paired=post_pred_pvalue_paired)
return(out)
}
ppcheckPLMIX <- function(pi_inv,seq_G,
MCMCsampleP,
MCMCsampleW,
top1=TRUE,
paired=TRUE,
parallel=FALSE){
#' Posterior predictive check for Bayesian mixtures of Plackett-Luce models
#'
#' Perform posterior predictive check to assess the goodness-of-fit of Bayesian mixtures of Plackett-Luce models with a different number of components.
#'
#' The \code{ppcheckPLMIX} function returns two posterior predictive \eqn{p}-values based on two chi squared discrepancy variables involving: (i) the top item frequencies and (ii) the paired comparison frequencies. In the presence of partial sequences in the \code{pi_inv} matrix, the same missingness patterns observed in the dataset (i.e., the number of items ranked by each sample unit) are reproduced on the replicated datasets from the posterior predictive distribution.
#'
#'
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @param seq_G Numeric vector with the number of components of the Plackett-Luce mixtures to be assessed.
#' @param MCMCsampleP List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{(G*K)} matrix with the MCMC samples of the component-specific support parameters.
#' @param MCMCsampleW List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights.
#' @param top1 Logical: whether the posterior predictive \eqn{p}-value based on the top item frequencies has to be computed. Default is \code{TRUE}.
#' @param paired Logical: whether the posterior predictive \eqn{p}-value based on the paired comparison frequencies has to be computed. Default is \code{TRUE}.
#' @param parallel Logical: whether parallelization should be used. Default is \code{FALSE}.
#'
#' @return A list with a named element:
#'
#' \item{\code{post_pred_pvalue}}{ Numeric \code{length(seq_G)}\eqn{\times}{x}\eqn{2} matrix of posterior predictive \eqn{p}-values based on the top item and paired comparison frequencies. If either \code{top1} or \code{paired} argument is \code{FALSE}, the corresponding matrix entries are \code{NA}.}
#'
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{ppcheckPLMIX_cond}}
#'
#' @examples
#'
#' data(d_carconf)
#' K <- ncol(d_carconf)
#'
#' ## Fit 1- and 2-component PL mixtures via MAP estimation
#' MAP_1 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=1,
#' n_start=2, n_iter=400*1)
#'
#' MAP_2 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=2,
#' n_start=2, n_iter=400*2)
#'
#' MAP_3 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=3,
#' n_start=2, n_iter=400*3)
#'
#' mcmc_iter <- 30
#' burnin <- 10
#'
#' ## Fit 1- and 2-component PL mixtures via Gibbs sampling procedure
#' GIBBS_1 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=1, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_1$mod$P_map,
#' z=binary_group_ind(MAP_1$mod$class_map,G=1)))
#' GIBBS_2 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=2, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_2$mod$P_map,
#' z=binary_group_ind(MAP_2$mod$class_map,G=2)))
#' GIBBS_3 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=3, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_3$mod$P_map,
#' z=binary_group_ind(MAP_3$mod$class_map,G=3)))
#'
#' ## Checking goodness-of-fit of the estimated mixtures
#' CHECK <- ppcheckPLMIX(pi_inv=d_carconf, seq_G=1:3,
#' MCMCsampleP=list(GIBBS_1$P, GIBBS_2$P, GIBBS_3$P),
#' MCMCsampleW=list(GIBBS_1$W, GIBBS_2$W, GIBBS_3$W))
#' CHECK$post_pred_pvalue
#'
#' @export
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
ncomp <- length(seq_G)
if(!parallel){
fitting <- vector(mode="list",length=ncomp)
for(l in 1:ncomp){
fitting[[l]] <- ppcheckPLMIX_single(pi_inv=pi_inv,G=seq_G[l],MCMCsampleP=MCMCsampleP[[l]],
MCMCsampleW=MCMCsampleW[[l]],top1=top1,paired=paired)
}
}else{
fitting <- foreach(l=1:ncomp) %dopar%{
tempfitting <- ppcheckPLMIX_single(pi_inv=pi_inv,G=seq_G[l],MCMCsampleP=MCMCsampleP[[l]],
MCMCsampleW=MCMCsampleW[[l]],top1=top1,paired=paired)
}
}
post_pred_pvalue <- t(sapply(lapply(fitting,"[",c("post_pred_pvalue_top1","post_pred_pvalue_paired")),unlist))
if(!is.numeric(post_pred_pvalue)){
post_pred_pvalue <- matrix(NA,nrow=length(seq_G),ncol=2)
}
attributes(post_pred_pvalue) <- attributes(post_pred_pvalue)[c("dim","dimnames")]
post_pred_pvalue <- as.matrix(post_pred_pvalue)
rownames(post_pred_pvalue) <- paste0("G_",seq_G)
out <- list(post_pred_pvalue=post_pred_pvalue)
return(out)
}
ppcheckPLMIX_cond_single <- function(pi_inv,G,
MCMCsampleP,
MCMCsampleW,
top1=TRUE,
paired=TRUE){
#/' Conditional predictive posterior \eqn{p}-values
#/'
#/' Compute conditional predictive posterior \eqn{p}-values based on top paired comparison frequencies to assess the goodness-of-fit of a Bayesian mixtures of Plackett-Luce models for partial orderings.
#/'
#/' In the case of partial orderings, the same missingness patterns of the observed dataset, i.e., the number of items ranked by each sample unit, are reproduced on the replicated datasets.
#/'
#/' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#/' @param G Number of mixture components.
#/' @param MCMCsampleP Numeric \eqn{L}\eqn{\times}{x}\eqn{G*K} matrix with the MCMC samples of the component-specific support parameters.
#/' @param MCMCsampleW Numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights.
#/' @param top1 Logical: whether the posterior predictive \eqn{p}-value based on top frequencies has to be computed. Default is \code{TRUE}.
#/' @param paired Logical: whether the posterior predictive \eqn{p}-value based on paired comparison frequencies has to be computed. Default is \code{TRUE}.
#/'
#/' @return A list of named objects:
#/'
#/' \item{\code{post_pred_pvalue_top1}}{ If \code{top1} is \code{TRUE}, posterior predictive \eqn{p}-value based on top frequencies, otherwise \code{NULL}.}
#/' \item{\code{post_pred_pvalue_paired}}{ If \code{paired} is \code{TRUE}, posterior predictive \eqn{p}-value based on paired comparison frequencies, otherwise \code{NULL}.}
#/'
#/' @author Cristina Mollica and Luca Tardella
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
N <- nrow(pi_inv)
K <- ncol(pi_inv)
L <- nrow(MCMCsampleW)
final.sample <- cbind(MCMCsampleP,MCMCsampleW)
final_sampleP <- array(c(t(MCMCsampleP)),c(G,K,L))
final_sampleW <- MCMCsampleW
pi_inv_int <- pi_inv
mode(pi_inv_int) <- "integer"
rho <- matrix(1:K,nrow=G,ncol=K,byrow=TRUE)
if(top1){
print(paste("CONDITIONAL POSTERIOR PREDICTIVE CHECK FOR G=",G))
print("Conditional top1 frequencies-based posterior predictive p-value")
chi.obs.top1.cond <- rep(NA,L)
chi.rep.top1.cond <- rep(NA,L)
chi.obs.top1.mat <- array(NA,dim=c(K,K,L))
chi.rep.top1.mat <- array(NA,dim=c(K,K,L))
for(l in 1:L){
(if((l%%200)==0) print(l))
chi.obs.top1.mat[,,l] <- chisqmeasureobsmatrix1dim(pi_inv_int, p=matrix(final_sampleP[,,l],nrow=G), weights=final_sampleW[l,])
chi.rep.top1.mat[,,l] <- chisqmeasuretheomatrix1dim(N,ref_order=rho, p=matrix(final_sampleP[,,l],nrow=G), weights=final_sampleW[l,],pi_inv_int)
chi.obs.top1.cond[l] <- sum(chi.obs.top1.mat[,,l])
chi.rep.top1.cond[l] <- sum(chi.rep.top1.mat[,,l])
}
post_pred_pvalue_top1_cond <- mean(chi.rep.top1.cond >= chi.obs.top1.cond)
}else{
post_pred_pvalue_top1_cond <- NA
}
if(paired){
print(paste("CONDITIONAL POSTERIOR PREDICTIVE CHECK FOR G=",G))
print("Conditional paired comparison frequencies-based posterior predictive p-value")
chi.obs.paired.cond=rep(NA,L)
chi.rep.paired.cond=rep(NA,L)
for(l in 1:L){
(if((l%%200)==0) print(l))
chi.obs.paired.cond[l] <- chisqmeasureobscond(pi_inv_int, p=matrix(final_sampleP[,,l],nrow=G), weights=final_sampleW[l,])
chi.rep.paired.cond[l] <- chisqmeasuretheocond(N,ref_order=rho, p=matrix(final_sampleP[,,l],nrow=G), weights=final_sampleW[l,],pi_inv_int)
}
post_pred_pvalue_paired_cond <- mean(chi.rep.paired.cond >= chi.obs.paired.cond)
}else{
post_pred_pvalue_paired_cond <- NA
}
out <- list(post_pred_pvalue_top1_cond=post_pred_pvalue_top1_cond,post_pred_pvalue_paired_cond=post_pred_pvalue_paired_cond)
return(out)
}
ppcheckPLMIX_cond <- function(pi_inv,seq_G,
MCMCsampleP,
MCMCsampleW,
top1=TRUE,
paired=TRUE,
parallel=FALSE){
#' Conditional posterior predictive check for Bayesian mixtures of Plackett-Luce models
#'
#' Perform conditional posterior predictive check to assess the goodness-of-fit of Bayesian mixtures of Plackett-Luce models with a different number of components.
#'
#' The \code{ppcheckPLMIX_cond} function returns two posterior predictive \eqn{p}-values based on two chi squared discrepancy variables involving: (i) the top item frequencies and (ii) the paired comparison frequencies. In the presence of partial sequences in the \code{pi_inv} matrix, the same missingness patterns observed in the dataset (i.e., the number of items ranked by each sample unit) are reproduced on the replicated datasets from the posterior predictive distribution. Differently from the \code{ppcheckPLMIX} function, the condional discrepancy measures are obtained by summing up the chi squared discrepancies computed on subsamples of observations with the same number of ranked items.
#'
#'
#' @param pi_inv An object of class \code{top_ordering}, collecting the numeric \eqn{N}\eqn{\times}{x}\eqn{K} data matrix of partial orderings, or an object that can be coerced with \code{\link{as.top_ordering}}.
#' @param seq_G Numeric vector with the number of components of the Plackett-Luce mixtures to be assessed.
#' @param MCMCsampleP List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{(G*K)} matrix with the MCMC samples of the component-specific support parameters.
#' @param MCMCsampleW List of size \code{length(seq_G)}, whose generic element is a numeric \eqn{L}\eqn{\times}{x}\eqn{G} matrix with the MCMC samples of the mixture weights.
#' @param top1 Logical: whether the posterior predictive \eqn{p}-value based on the top item frequencies has to be computed. Default is \code{TRUE}.
#' @param paired Logical: whether the posterior predictive \eqn{p}-value based on the paired comparison frequencies has to be computed. Default is \code{TRUE}.
#' @param parallel Logical: whether parallelization should be used. Default is \code{FALSE}.
#'
#' @return A list with a named element:
#'
#' \item{\code{post_pred_pvalue_cond}}{ Numeric \code{length(seq_G)}\eqn{\times}{x}\eqn{2} matrix of posterior predictive \eqn{p}-values based on the top item and paired comparison frequencies. If either \code{top1} or \code{paired} argument is \code{FALSE}, the corresponding matrix entries are \code{NA}.}
#'
#'
#' @references
#' Mollica, C. and Tardella, L. (2017). Bayesian Plackett-Luce mixture models for partially ranked data. \emph{Psychometrika}, \bold{82}(2), pages 442--458, ISSN: 0033-3123, DOI: 10.1007/s11336-016-9530-0.
#'
#' @author Cristina Mollica and Luca Tardella
#'
#' @seealso \code{\link{ppcheckPLMIX}}
#'
#' @examples
#'
#' data(d_carconf)
#' K <- ncol(d_carconf)
#'
#' ## Fit 1- and 2-component PL mixtures via MAP estimation
#' MAP_1 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=1,
#' n_start=2, n_iter=400*1)
#'
#' MAP_2 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=2,
#' n_start=2, n_iter=400*2)
#'
#' MAP_3 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=3,
#' n_start=2, n_iter=400*3)
#'
#' mcmc_iter <- 30
#' burnin <- 10
#'
#' ## Fit 1- and 2-component PL mixtures via Gibbs sampling procedure
#' GIBBS_1 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=1, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_1$mod$P_map,
#' z=binary_group_ind(MAP_1$mod$class_map,G=1)))
#' GIBBS_2 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=2, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_2$mod$P_map,
#' z=binary_group_ind(MAP_2$mod$class_map,G=2)))
#' GIBBS_3 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=3, n_iter=mcmc_iter,
#' n_burn=burnin, init=list(p=MAP_3$mod$P_map,
#' z=binary_group_ind(MAP_3$mod$class_map,G=3)))
#'
#' ## Checking goodness-of-fit of the estimated mixtures
#' CHECKCOND <- ppcheckPLMIX_cond(pi_inv=d_carconf, seq_G=1:3,
#' MCMCsampleP=list(GIBBS_1$P, GIBBS_2$P, GIBBS_3$P),
#' MCMCsampleW=list(GIBBS_1$W, GIBBS_2$W, GIBBS_3$W))
#' CHECKCOND$post_pred_pvalue
#'
#' @export
if(class(pi_inv)[1]!="top_ordering"){
if(class(pi_inv)[1]=="RankData"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="rankings"){
pi_inv=as.top_ordering(data=pi_inv)
}
if(class(pi_inv)[1]=="matrix" | class(pi_inv)[1]=="data.frame"){
pi_inv=as.top_ordering(data=pi_inv,format_input="ordering",aggr=FALSE)
}
}
pi_inv <- fill_single_entries(data=pi_inv)
ncomp <- length(seq_G)
if(!parallel){
fitting <- vector(mode="list",length=ncomp)
for(l in 1:ncomp){
fitting[[l]] <- ppcheckPLMIX_cond_single(pi_inv=pi_inv,G=seq_G[l],MCMCsampleP=MCMCsampleP[[l]],
MCMCsampleW=MCMCsampleW[[l]],top1=top1,paired=paired)
}
}else{
fitting <- foreach(l=1:ncomp) %dopar%{
tempfitting <- ppcheckPLMIX_cond_single(pi_inv=pi_inv,G=seq_G[l],MCMCsampleP=MCMCsampleP[[l]],
MCMCsampleW=MCMCsampleW[[l]],top1=top1,paired=paired)
}
}
post_pred_pvalue_cond <- t(sapply(lapply(fitting,"[",c("post_pred_pvalue_top1_cond","post_pred_pvalue_paired_cond")),unlist))
if(!is.numeric(post_pred_pvalue_cond)){
post_pred_pvalue_cond <- matrix(NA,nrow=length(seq_G),ncol=2)
}
attributes(post_pred_pvalue_cond) <- attributes(post_pred_pvalue_cond)[c("dim","dimnames")]
post_pred_pvalue_cond <- as.matrix(post_pred_pvalue_cond)
rownames(post_pred_pvalue_cond) <- paste0("G_",seq_G)
out <- list(post_pred_pvalue_cond=post_pred_pvalue_cond)
return(out)
}
|
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
if (length(args) != 1) {
stop("Exactly 1 argument must be supplied: project", call.=FALSE)
}
library("SigProfilerPlottingR")
project = args[1]
plot_path <- paste0("/data/",project,"/output/plot/")
if (! file.exists(plot_path)){
dir.create(plot_path)
}
out_dir <- paste0("/data/",project,"/output")
DBS_type=c("1248","186","2976","78")
for (i in DBS_type) {
plotDBS(paste0(out_dir,"/DBS/",project,".DBS",i,".all"), file.path(out_dir,"plot/"), project, i, percentage=FALSE)
}
SBS_type=c("1536","384","6144","24","6","96")
for (i in SBS_type) {
plotSBS(paste0(out_dir,"/SBS/",project,".SBS",i,".all"), file.path(out_dir,"plot/"), project, i, percentage=FALSE)
}
ID_type=c("28","415","83","8628","96")
for (i in ID_type) {
plotID(paste0(out_dir,"/ID/",project,".ID",i,".all"), file.path(out_dir,"plot/"), project, i, percentage=FALSE)
} | /SigMutations_combined/Plot_SigMutation_abs.R | no_license | luolingqi/MMRd.Project | R | false | false | 936 | r | #!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
if (length(args) != 1) {
stop("Exactly 1 argument must be supplied: project", call.=FALSE)
}
library("SigProfilerPlottingR")
project = args[1]
plot_path <- paste0("/data/",project,"/output/plot/")
if (! file.exists(plot_path)){
dir.create(plot_path)
}
out_dir <- paste0("/data/",project,"/output")
DBS_type=c("1248","186","2976","78")
for (i in DBS_type) {
plotDBS(paste0(out_dir,"/DBS/",project,".DBS",i,".all"), file.path(out_dir,"plot/"), project, i, percentage=FALSE)
}
SBS_type=c("1536","384","6144","24","6","96")
for (i in SBS_type) {
plotSBS(paste0(out_dir,"/SBS/",project,".SBS",i,".all"), file.path(out_dir,"plot/"), project, i, percentage=FALSE)
}
ID_type=c("28","415","83","8628","96")
for (i in ID_type) {
plotID(paste0(out_dir,"/ID/",project,".ID",i,".all"), file.path(out_dir,"plot/"), project, i, percentage=FALSE)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/automl_objects.R
\name{TranslationEvaluationMetrics}
\alias{TranslationEvaluationMetrics}
\title{TranslationEvaluationMetrics Object}
\usage{
TranslationEvaluationMetrics(bleuScore = NULL, baseBleuScore = NULL)
}
\arguments{
\item{bleuScore}{Output only}
\item{baseBleuScore}{Output only}
}
\value{
TranslationEvaluationMetrics object
}
\description{
TranslationEvaluationMetrics Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Evaluation metrics for the dataset.
}
\concept{TranslationEvaluationMetrics functions}
| /googleautomlv1beta1.auto/man/TranslationEvaluationMetrics.Rd | no_license | justinjm/autoGoogleAPI | R | false | true | 633 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/automl_objects.R
\name{TranslationEvaluationMetrics}
\alias{TranslationEvaluationMetrics}
\title{TranslationEvaluationMetrics Object}
\usage{
TranslationEvaluationMetrics(bleuScore = NULL, baseBleuScore = NULL)
}
\arguments{
\item{bleuScore}{Output only}
\item{baseBleuScore}{Output only}
}
\value{
TranslationEvaluationMetrics object
}
\description{
TranslationEvaluationMetrics Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Evaluation metrics for the dataset.
}
\concept{TranslationEvaluationMetrics functions}
|
# build lake station methods
# run Rmd to line 126
lakeStations <- filter(stationTable, str_detect(ID305B_1, 'L_') | str_detect(ID305B_2, 'L_') | str_detect(ID305B_3, 'L_') | str_detect(ID305B_4, 'L_') |
str_detect(ID305B_5, 'L_') | str_detect(ID305B_6, 'L_') | str_detect(ID305B_7, 'L_') | str_detect(ID305B_8, 'L_') |
str_detect(ID305B_9, 'L_') | str_detect(ID305B_10, 'L_') )
stationTable1 <- stationTable # save a copy
stationTable <- lakeStations # only deal with lake stations for now
station <- '2-JKS046.40'
station <- '2-TRH000.40' # non 187 lake with WQS
station <- '2-JMS042.92' # for ammonia depth testing
#i = 18 # '2-JKS046.40'
#i = 5 # non 187 lake
# pull one station data
stationData <- filter(conventionals, FDT_STA_ID %in% station) %>% #stationTable$STATION_ID[i]) %>% #
left_join(stationTable, by = c('FDT_STA_ID' = 'STATION_ID')) %>%
pHSpecialStandardsCorrection() %>% # correct pH to special standards where necessary
# special lake steps
{if(station %in% lakeStations$STATION_ID) #if(stationTable$STATION_ID[i] %in% lakeStations$STATION_ID)
mutate(., lakeStation = TRUE) %>%
thermoclineDepth() # adds thermocline information and SampleDate
else mutate(., lakeStation = FALSE)}
x <- stationData
View(dplyr::select(stationTable, STATION_ID, LACUSTRINE, REGION, WQS_ID:`Total Phosphorus (ug/L)`))
| /3.automatedAssessment/lakeMethods.R | no_license | EmmaVJones/IR2022 | R | false | false | 1,399 | r | # build lake station methods
# run Rmd to line 126
lakeStations <- filter(stationTable, str_detect(ID305B_1, 'L_') | str_detect(ID305B_2, 'L_') | str_detect(ID305B_3, 'L_') | str_detect(ID305B_4, 'L_') |
str_detect(ID305B_5, 'L_') | str_detect(ID305B_6, 'L_') | str_detect(ID305B_7, 'L_') | str_detect(ID305B_8, 'L_') |
str_detect(ID305B_9, 'L_') | str_detect(ID305B_10, 'L_') )
stationTable1 <- stationTable # save a copy
stationTable <- lakeStations # only deal with lake stations for now
station <- '2-JKS046.40'
station <- '2-TRH000.40' # non 187 lake with WQS
station <- '2-JMS042.92' # for ammonia depth testing
#i = 18 # '2-JKS046.40'
#i = 5 # non 187 lake
# pull one station data
stationData <- filter(conventionals, FDT_STA_ID %in% station) %>% #stationTable$STATION_ID[i]) %>% #
left_join(stationTable, by = c('FDT_STA_ID' = 'STATION_ID')) %>%
pHSpecialStandardsCorrection() %>% # correct pH to special standards where necessary
# special lake steps
{if(station %in% lakeStations$STATION_ID) #if(stationTable$STATION_ID[i] %in% lakeStations$STATION_ID)
mutate(., lakeStation = TRUE) %>%
thermoclineDepth() # adds thermocline information and SampleDate
else mutate(., lakeStation = FALSE)}
x <- stationData
View(dplyr::select(stationTable, STATION_ID, LACUSTRINE, REGION, WQS_ID:`Total Phosphorus (ug/L)`))
|
testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1942759639L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707568L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) | /dexterMST/inst/testfiles/is_person_booklet_sorted/AFL_is_person_booklet_sorted/is_person_booklet_sorted_valgrind_files/1615938001-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 826 | r | testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1942759639L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707568L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) |
.Random.seed <-
c(403L, 26L, 1079060254L, 631101813L, 1792243849L, 469870141L,
-491346856L, -46669809L, 907346788L, -2073377944L, -1715830637L,
-2051828330L, -1581115384L, 1759781727L, -624075574L, 677518528L,
-1534605181L, 965074941L, -1323524497L, -496360780L, 2139171120L,
348701467L, -1937054869L, 1872411521L, -1815564319L, -2097168728L,
-1520857228L, -1173757501L, -1487902446L, 1930384212L, 289273045L,
-1288592424L, -978507503L, -880488351L, 1493479884L, 1891903143L,
-800531837L, -1132190105L, 2137259799L, 924767708L, 299631356L,
-1210418972L, -1984549854L, -1973165470L, -637138560L, -1121270335L,
-811911508L, 158170961L, 1765251294L, -1887227185L, -1415423491L,
946851316L, 1409546807L, 1452558247L, 2071065449L, -900398494L,
-545771922L, -1415716213L, 1369913288L, 133010735L, 532742576L,
-795211473L, 980025171L, -455715575L, -1627675361L, 810068244L,
1901007404L, 2070660020L, 1670142289L, -1316809358L, -1122781268L,
906198049L, 601976043L, 215019894L, -408576012L, -1357189911L,
-1586634746L, -1755850335L, -2128682035L, 1346930404L, -1618350077L,
-953426305L, 2141650072L, 522709794L, 1446406299L, 1804326721L,
1530957977L, 793501046L, -2004607948L, -275789279L, -823747884L,
-495700094L, -1128717465L, 1430584417L, 670720225L, 934225487L,
-716942778L, 1899131932L, -418331163L, 692248867L, -1669088961L,
-1862557123L, -96944915L, -1341145644L, -435565977L, -1332329564L,
-2036033206L, -993595632L, -1484447852L, -1517484040L, -1502860047L,
-70922527L, 1163826078L, -686363220L, -875560854L, 2057290006L,
1427627790L, 552126638L, 1210442217L, 337471740L, -1774921984L,
465752188L, -69099856L, 739669259L, 778076781L, -130765251L,
-1925050662L, 892832165L, 257246139L, 870807559L, 1292645910L,
-673052609L, -593922425L, -1004120936L, -1395297831L, 110037204L,
-697048080L, 1879014959L, 753229924L, 450366683L, 338882373L,
-118386055L, 72145213L, 198297222L, 1382962145L, -178355679L,
1797788856L, -1679472923L, -1643795220L, -755693473L, 696826861L,
1436608299L, 307335600L, -1584181931L, -770016090L, -1662268850L,
-1892550299L, -1227524786L, -1711577264L, 765126753L, 933962101L,
1040983895L, 1590481413L, 824031335L, -1778554954L, 1322853385L,
1566460375L, 994764169L, -128796744L, -1816189277L, -400818957L,
2118750143L, -2125748687L, -1808400958L, -933333264L, 1461567041L,
1906378254L, -710315551L, 501904972L, -772896035L, 1646277772L,
-563927420L, -2079002804L, 300014270L, -602177782L, 1046783124L,
446442043L, 1160858777L, -1839050592L, 104403854L, 966529055L,
-293469321L, -191957665L, -2068903990L, 1936726780L, -956802784L,
-559485999L, 102605410L, 1283844974L, 1030738768L, -271182839L,
-948977616L, 1820337674L, -1253515573L, -1746371874L, -6924241L,
-2016745545L, 1774565373L, -1976966102L, -1014167259L, 757242346L,
545391547L, -578777356L, -31447147L, 572237407L, -134944388L,
800804208L, -2040810382L, 578272955L, -1940322297L, 1303883146L,
1480550556L, 1062184953L, 268708524L, -1468362059L, -1800469160L,
1071485182L, 1589535077L, 1956523894L, -302010451L, -55707889L,
1536841577L, 2097671870L, -992029547L, -1663466834L, 942979030L,
-833209501L, 625666632L, -2125786911L, -1411450524L, 1774337957L,
538272036L, -544624592L, 1469340049L, -1949828901L, -669831725L,
1561863571L, 816599433L, -1166046306L, 2051872325L, 236144043L,
-1854822488L, -1949942499L, -1310901541L, 1444527581L, 1891047398L,
820136785L, 1404231373L, -1432781758L, 1103633905L, -14767928L,
110936894L, -1655227290L, 1964697513L, 620659239L, -456799059L,
145076232L, 1562444105L, 1976461388L, 437986336L, 1486885307L,
-774477654L, 84108721L, 931341012L, -1657723165L, -1214187236L,
1394920950L, -1611320669L, 1289476165L, -1111227740L, -2047926171L,
-182918347L, -578803656L, -105995641L, -1113551769L, -703298914L,
1690554611L, -1500908949L, 138944776L, -223189620L, -1987335271L,
-2139865540L, 1880565999L, -1391046525L, -918967651L, 522589569L,
-519742199L, 212924611L, -1228762382L, -1482324265L, -429491L,
272372280L, -1357429509L, -203144070L, 1068040709L, -1537010950L,
-1008484343L, 1068378693L, 1383807253L, -1674914312L, -1339400916L,
-863237916L, -1247162838L, -116657924L, 1366569826L, 832342336L,
-1760039296L, -1620186737L, -1007657258L, -764708787L, 2026779735L,
1382904614L, -673150152L, -696386579L, 1056438488L, -259194269L,
1246562713L, 1593809010L, 608997775L, -1648804075L, 748094293L,
-469495478L, -708313351L, -1975049055L, 1421574648L, -1319923002L,
-1528586043L, 451405443L, -665998493L, -1141813522L, -1335674707L,
-2075936202L, -777191740L, 366765959L, -635236104L, -514104092L,
929878778L, 602460533L, -1108637928L, -1042022603L, -1152510101L,
889562505L, 970321786L, -2081717191L, 1485471639L, -1665181614L,
-992110867L, -1584178085L, -64873697L, -1301327564L, -1728743018L,
1606641544L, -745457039L, 952310332L, -446651020L, 1964811114L,
919417835L, 1144118062L, -775694267L, -230332004L, 1527788333L,
-2074355143L, -798107801L, 1848101440L, -978462142L, -1488291290L,
480598615L, -1848525914L, -1826592656L, -2000168331L, -796486178L,
-1718268345L, 1961000732L, -1870631843L, -1978886426L, -1790765042L,
845512272L, 868764261L, -176051644L, -1104437017L, -2117141317L,
846171573L, -1802327239L, 327076112L, 1751169301L, 1416211817L,
1318812211L, -526232715L, 565746625L, -1416044679L, 980561471L,
-329270505L, -1113370183L, -1706626482L, -674604833L, -1404967510L,
-1220918685L, 371039639L, 398289733L, 802458391L, -130015079L,
-442522723L, -532773018L, 950798874L, 1880413509L, 1026256690L,
-642248732L, 1502909432L, -985222070L, -1841028995L, 453504756L,
-297052769L, 408707622L, -1542932680L, -813207113L, -1271879355L,
982461741L, -1032353848L, -408796871L, -236472439L, 1222231673L,
1457286405L, 1444619560L, -467429925L, 2120324710L, 755646019L,
767434965L, 998123310L, -1496042887L, -597904707L, 5980822L,
-680904164L, -798937754L, 246307232L, -1539117013L, 1672607054L,
-1872743463L, -1138852060L, 1050742528L, -530007789L, -735851944L,
-78956778L, -22894050L, 1639323813L, -700126944L, -1738204444L,
2146334790L, 965975709L, 707185863L, -405057831L, 1550368085L,
202923197L, 645814520L, -138038281L, 114356696L, -1308671125L,
-1482730683L, -2117884212L, -676423380L, 1495711528L, 1434422019L,
1803755846L, 2024846823L, -983553446L, 1832443599L, 1675340013L,
-773882893L, 297406138L, 1798356501L, -694502319L, 516645126L,
-199936570L, 312854405L, 450720345L, -1818926090L, -2081666313L,
-1883332573L, 1642685147L, -485089831L, 1241630209L, 837616495L,
1680934247L, 276934507L, 873380646L, 1715435107L, -1218078803L,
942405585L, 632590136L, 1992147608L, 540445633L, 48893248L, -1256687487L,
811928177L, 880216344L, 1607621553L, -1825062812L, 402059149L,
-320745648L, -1045478898L, -528838921L, 1383738992L, 1344945255L,
-1191536738L, -716025062L, 1729396927L, -913613156L, -305797281L,
-351272547L, 1048396734L, -1682483660L, -765280870L, 601133887L,
-1799906525L, -216398368L, 652948899L, -1742509448L, -1122354081L,
-1635939014L, -2038399668L, -1100592376L, -578134701L, 1628377535L,
-382726383L, -1945657836L, -1223312502L, -1445026649L, 635607057L,
-1568716150L, -1931654499L, -606585452L, 536062430L, 418031572L,
-1333942401L, 1017292864L, 153680248L, -1440670050L, 2118644640L,
298464026L, 180403444L, 534147358L, -739693710L, 1816860441L,
-1114753078L, 306124926L, -739255879L, -62006362L, -2023678844L,
1230590350L, 1520998182L, 1800076401L, 953689300L, 171722912L,
-303843624L, -15864925L, 879381108L, 667589118L, 1103521364L,
37991603L, 466697570L, -328156625L, 1869079052L, -1644387501L,
526412145L, 2141862417L, -896219642L, 1523381971L, -805940623L,
-11463658L, -363516362L, 1816872769L, 367489961L, -1837011341L,
-941862754L, -30770504L, 1746081966L, 389723718L, -1581475109L,
1551223921L, -1745112824L, 2006931087L, -957639174L, -649543247L,
262275362L, -1099656217L, -1500279621L, -1885580679L, -387848022L,
-931203347L, 1669706655L, -377726274L, -1447457562L, -152427236L,
-2045002699L, -1122163647L, -2031280351L, 617074209L, 1431079088L,
-511538176L, 782805478L, -187015127L, 2686869L, -1436433186L,
-2036386032L, -1167238174L, -568510134L, 1587775141L, -470613196L,
-527492339L, 399118160L, 1531539539L, 1754894818L, -1457247283L,
1495242925L, -466455285L, 869207127L, 1099958350L)
| /lmf/R/lmf-internal.R | no_license | ingted/R-Examples | R | false | false | 8,399 | r | .Random.seed <-
c(403L, 26L, 1079060254L, 631101813L, 1792243849L, 469870141L,
-491346856L, -46669809L, 907346788L, -2073377944L, -1715830637L,
-2051828330L, -1581115384L, 1759781727L, -624075574L, 677518528L,
-1534605181L, 965074941L, -1323524497L, -496360780L, 2139171120L,
348701467L, -1937054869L, 1872411521L, -1815564319L, -2097168728L,
-1520857228L, -1173757501L, -1487902446L, 1930384212L, 289273045L,
-1288592424L, -978507503L, -880488351L, 1493479884L, 1891903143L,
-800531837L, -1132190105L, 2137259799L, 924767708L, 299631356L,
-1210418972L, -1984549854L, -1973165470L, -637138560L, -1121270335L,
-811911508L, 158170961L, 1765251294L, -1887227185L, -1415423491L,
946851316L, 1409546807L, 1452558247L, 2071065449L, -900398494L,
-545771922L, -1415716213L, 1369913288L, 133010735L, 532742576L,
-795211473L, 980025171L, -455715575L, -1627675361L, 810068244L,
1901007404L, 2070660020L, 1670142289L, -1316809358L, -1122781268L,
906198049L, 601976043L, 215019894L, -408576012L, -1357189911L,
-1586634746L, -1755850335L, -2128682035L, 1346930404L, -1618350077L,
-953426305L, 2141650072L, 522709794L, 1446406299L, 1804326721L,
1530957977L, 793501046L, -2004607948L, -275789279L, -823747884L,
-495700094L, -1128717465L, 1430584417L, 670720225L, 934225487L,
-716942778L, 1899131932L, -418331163L, 692248867L, -1669088961L,
-1862557123L, -96944915L, -1341145644L, -435565977L, -1332329564L,
-2036033206L, -993595632L, -1484447852L, -1517484040L, -1502860047L,
-70922527L, 1163826078L, -686363220L, -875560854L, 2057290006L,
1427627790L, 552126638L, 1210442217L, 337471740L, -1774921984L,
465752188L, -69099856L, 739669259L, 778076781L, -130765251L,
-1925050662L, 892832165L, 257246139L, 870807559L, 1292645910L,
-673052609L, -593922425L, -1004120936L, -1395297831L, 110037204L,
-697048080L, 1879014959L, 753229924L, 450366683L, 338882373L,
-118386055L, 72145213L, 198297222L, 1382962145L, -178355679L,
1797788856L, -1679472923L, -1643795220L, -755693473L, 696826861L,
1436608299L, 307335600L, -1584181931L, -770016090L, -1662268850L,
-1892550299L, -1227524786L, -1711577264L, 765126753L, 933962101L,
1040983895L, 1590481413L, 824031335L, -1778554954L, 1322853385L,
1566460375L, 994764169L, -128796744L, -1816189277L, -400818957L,
2118750143L, -2125748687L, -1808400958L, -933333264L, 1461567041L,
1906378254L, -710315551L, 501904972L, -772896035L, 1646277772L,
-563927420L, -2079002804L, 300014270L, -602177782L, 1046783124L,
446442043L, 1160858777L, -1839050592L, 104403854L, 966529055L,
-293469321L, -191957665L, -2068903990L, 1936726780L, -956802784L,
-559485999L, 102605410L, 1283844974L, 1030738768L, -271182839L,
-948977616L, 1820337674L, -1253515573L, -1746371874L, -6924241L,
-2016745545L, 1774565373L, -1976966102L, -1014167259L, 757242346L,
545391547L, -578777356L, -31447147L, 572237407L, -134944388L,
800804208L, -2040810382L, 578272955L, -1940322297L, 1303883146L,
1480550556L, 1062184953L, 268708524L, -1468362059L, -1800469160L,
1071485182L, 1589535077L, 1956523894L, -302010451L, -55707889L,
1536841577L, 2097671870L, -992029547L, -1663466834L, 942979030L,
-833209501L, 625666632L, -2125786911L, -1411450524L, 1774337957L,
538272036L, -544624592L, 1469340049L, -1949828901L, -669831725L,
1561863571L, 816599433L, -1166046306L, 2051872325L, 236144043L,
-1854822488L, -1949942499L, -1310901541L, 1444527581L, 1891047398L,
820136785L, 1404231373L, -1432781758L, 1103633905L, -14767928L,
110936894L, -1655227290L, 1964697513L, 620659239L, -456799059L,
145076232L, 1562444105L, 1976461388L, 437986336L, 1486885307L,
-774477654L, 84108721L, 931341012L, -1657723165L, -1214187236L,
1394920950L, -1611320669L, 1289476165L, -1111227740L, -2047926171L,
-182918347L, -578803656L, -105995641L, -1113551769L, -703298914L,
1690554611L, -1500908949L, 138944776L, -223189620L, -1987335271L,
-2139865540L, 1880565999L, -1391046525L, -918967651L, 522589569L,
-519742199L, 212924611L, -1228762382L, -1482324265L, -429491L,
272372280L, -1357429509L, -203144070L, 1068040709L, -1537010950L,
-1008484343L, 1068378693L, 1383807253L, -1674914312L, -1339400916L,
-863237916L, -1247162838L, -116657924L, 1366569826L, 832342336L,
-1760039296L, -1620186737L, -1007657258L, -764708787L, 2026779735L,
1382904614L, -673150152L, -696386579L, 1056438488L, -259194269L,
1246562713L, 1593809010L, 608997775L, -1648804075L, 748094293L,
-469495478L, -708313351L, -1975049055L, 1421574648L, -1319923002L,
-1528586043L, 451405443L, -665998493L, -1141813522L, -1335674707L,
-2075936202L, -777191740L, 366765959L, -635236104L, -514104092L,
929878778L, 602460533L, -1108637928L, -1042022603L, -1152510101L,
889562505L, 970321786L, -2081717191L, 1485471639L, -1665181614L,
-992110867L, -1584178085L, -64873697L, -1301327564L, -1728743018L,
1606641544L, -745457039L, 952310332L, -446651020L, 1964811114L,
919417835L, 1144118062L, -775694267L, -230332004L, 1527788333L,
-2074355143L, -798107801L, 1848101440L, -978462142L, -1488291290L,
480598615L, -1848525914L, -1826592656L, -2000168331L, -796486178L,
-1718268345L, 1961000732L, -1870631843L, -1978886426L, -1790765042L,
845512272L, 868764261L, -176051644L, -1104437017L, -2117141317L,
846171573L, -1802327239L, 327076112L, 1751169301L, 1416211817L,
1318812211L, -526232715L, 565746625L, -1416044679L, 980561471L,
-329270505L, -1113370183L, -1706626482L, -674604833L, -1404967510L,
-1220918685L, 371039639L, 398289733L, 802458391L, -130015079L,
-442522723L, -532773018L, 950798874L, 1880413509L, 1026256690L,
-642248732L, 1502909432L, -985222070L, -1841028995L, 453504756L,
-297052769L, 408707622L, -1542932680L, -813207113L, -1271879355L,
982461741L, -1032353848L, -408796871L, -236472439L, 1222231673L,
1457286405L, 1444619560L, -467429925L, 2120324710L, 755646019L,
767434965L, 998123310L, -1496042887L, -597904707L, 5980822L,
-680904164L, -798937754L, 246307232L, -1539117013L, 1672607054L,
-1872743463L, -1138852060L, 1050742528L, -530007789L, -735851944L,
-78956778L, -22894050L, 1639323813L, -700126944L, -1738204444L,
2146334790L, 965975709L, 707185863L, -405057831L, 1550368085L,
202923197L, 645814520L, -138038281L, 114356696L, -1308671125L,
-1482730683L, -2117884212L, -676423380L, 1495711528L, 1434422019L,
1803755846L, 2024846823L, -983553446L, 1832443599L, 1675340013L,
-773882893L, 297406138L, 1798356501L, -694502319L, 516645126L,
-199936570L, 312854405L, 450720345L, -1818926090L, -2081666313L,
-1883332573L, 1642685147L, -485089831L, 1241630209L, 837616495L,
1680934247L, 276934507L, 873380646L, 1715435107L, -1218078803L,
942405585L, 632590136L, 1992147608L, 540445633L, 48893248L, -1256687487L,
811928177L, 880216344L, 1607621553L, -1825062812L, 402059149L,
-320745648L, -1045478898L, -528838921L, 1383738992L, 1344945255L,
-1191536738L, -716025062L, 1729396927L, -913613156L, -305797281L,
-351272547L, 1048396734L, -1682483660L, -765280870L, 601133887L,
-1799906525L, -216398368L, 652948899L, -1742509448L, -1122354081L,
-1635939014L, -2038399668L, -1100592376L, -578134701L, 1628377535L,
-382726383L, -1945657836L, -1223312502L, -1445026649L, 635607057L,
-1568716150L, -1931654499L, -606585452L, 536062430L, 418031572L,
-1333942401L, 1017292864L, 153680248L, -1440670050L, 2118644640L,
298464026L, 180403444L, 534147358L, -739693710L, 1816860441L,
-1114753078L, 306124926L, -739255879L, -62006362L, -2023678844L,
1230590350L, 1520998182L, 1800076401L, 953689300L, 171722912L,
-303843624L, -15864925L, 879381108L, 667589118L, 1103521364L,
37991603L, 466697570L, -328156625L, 1869079052L, -1644387501L,
526412145L, 2141862417L, -896219642L, 1523381971L, -805940623L,
-11463658L, -363516362L, 1816872769L, 367489961L, -1837011341L,
-941862754L, -30770504L, 1746081966L, 389723718L, -1581475109L,
1551223921L, -1745112824L, 2006931087L, -957639174L, -649543247L,
262275362L, -1099656217L, -1500279621L, -1885580679L, -387848022L,
-931203347L, 1669706655L, -377726274L, -1447457562L, -152427236L,
-2045002699L, -1122163647L, -2031280351L, 617074209L, 1431079088L,
-511538176L, 782805478L, -187015127L, 2686869L, -1436433186L,
-2036386032L, -1167238174L, -568510134L, 1587775141L, -470613196L,
-527492339L, 399118160L, 1531539539L, 1754894818L, -1457247283L,
1495242925L, -466455285L, 869207127L, 1099958350L)
|
library(plyr); library(readr)
nds1<-read_excel("data/Enclosure_Stoichiometry.xlsx", sheet="NDSrawdata")
read.xlsx("./FEn17_data/Field Encl Stoich.xlsx", sheetIndex = 3, stringAsFactors=F)
nds<-nds1[-c(167,168),c(2,3,5,6,14)] #remove extra rows and columns
nds<-nds[,c(2:5,1)]#longform bar, treatment, chlorophyll
nd<-nds[nds$Bar.!="047",] #had to remove this because only had two C discs
#define some summary functions
SE<-function(x){sem<-sd(x)/sqrt(length(x))}
trtlist<-unique(nd[,c(1:4)]) #site,mussel reach, treatment
ls2<-trtlist[trtlist$Treat!="C",]
a<-list() #start with an empty list
#creates a list that isolates each type of disc (bar,treat) and then gives outer table
for(i in unique(na.exclude(nd$Bar.))){
for(j in c("N", "P", "NP")){
n<-length(a)+1
a[[n]]<-list(i,j,(outer(nd$Chl.A..mg.m2.[nd$Bar.==i & nd$Treat==j],
nd$Chl.A..mg.m2.[nd$Bar.==i & nd$Treat=="C"],FUN="/")))
}
}
#decompose list of lists into a list of N, P, NP
df_list <- tapply(a,
sapply(a,`[[`,2), FUN=function(x) do.call(rbind,lapply(a,`[[`,3)))
sapply(df_list, function(x){
list("mean"=mean(unlist(x), na.rm = T),
"median"=median(unlist(x), na.rm = T),
"se"=SE(x[is.finite(x)]),
"sd"=sd(unlist(x), na.rm = T),
"n"=length(x[is.finite(x)]))})
q<-ldply(a, .fun=function(x){data.frame(barID=x[[1]],
trt=x[[2]],
as.data.frame(sapply(x[[3]],unlist)))})
dnova1<-ddply(q, .variables=c("barID", "trt"),
.fun=function(x){data.frame(mean=mean(x[,3], na.rm = T),
median=median(x[,3], na.rm = T),
se=SE(x[is.finite(x[,3]),3]),
sd=sd(x[,3], na.rm = T),
n=length(x[is.finite(x[,3]),3]))})
dnova1$Type<-trtlist[match(dnova1$barID, trtlist$Bar.),2]
dnova1$Week<-trtlist[match(dnova1$barID, trtlist$Bar.),1]
mod1<-lm(log(mean)~Type*trt,data=dnova1)
mod4<-lm(mean~Type*trt,data=dnova1[!dnova1$Week==4,])
summary(mod4)
anova(mod4)
#####################################################################################
###Analysis with mean subtracted Chl-a samples
#####################################################################################
#'normalize' each bar by subtracting out the average chl-a control on that bar
dnova_norm<-ddply(nds, .variables=c("Week","Type","Bar."),
.fun=function(x){data.frame(RR=x$Chl.A..mg.m2.-mean(x$Chl.A..mg.m2.[x$Treat=="C"]), Treat=x$Treat)})
###Conduct anova at the site level after subtracting out the control values for each bar
mod2<-lm(RR~Bar.*Treat,data=dnova_norm[dnova_norm$Week!=4 & dnova_norm$Type=="AL",])
mod<-lm(Chl.A..mg.m2.~Type*Treat, data=nds[nds$Week!=4,])
mod3<-aov(RR~Type*Treat,data=dnova_norm[dnova_norm$Week!=4,])
mod4<-lm(RR~Type*Treat*Week,data=dnova_norm)
tuklet<-TukeyHSD(mod3)
HSm1<-lm(RR~Treat, data=dnova_norm[dnova_norm$Week!=4 & dnova_norm$Type=="HS",])
ALm1<-lm(RR~Treat, data=dnova_norm[dnova_norm$Week!=4 & dnova_norm$Type=="AL",])
library(car)
Anova(mod2,type=2)
summary(mod2)
hist(residuals(mod), col="darkgray") #normal distribution assumption
plot(fitted(mod), residuals(mod2)) #homoscedastic assumption
library(lsmeans)
leastm<-lsmeans(mod2, "Treat",adjust="tukey")
cld(leastm, alpha=.05, Letters=letters)
#### plots ####
library(ggplot2)
ggplot(nds[nds$Week!=4,], aes(x=Treat, y=Chl.A..mg.m2.)) + geom_boxplot() +facet_wrap(~Type)
| /NDS_analysis.R | no_license | TraciPopejoy/FEn17 | R | false | false | 3,561 | r | library(plyr); library(readr)
nds1<-read_excel("data/Enclosure_Stoichiometry.xlsx", sheet="NDSrawdata")
read.xlsx("./FEn17_data/Field Encl Stoich.xlsx", sheetIndex = 3, stringAsFactors=F)
nds<-nds1[-c(167,168),c(2,3,5,6,14)] #remove extra rows and columns
nds<-nds[,c(2:5,1)]#longform bar, treatment, chlorophyll
nd<-nds[nds$Bar.!="047",] #had to remove this because only had two C discs
#define some summary functions
SE<-function(x){sem<-sd(x)/sqrt(length(x))}
trtlist<-unique(nd[,c(1:4)]) #site,mussel reach, treatment
ls2<-trtlist[trtlist$Treat!="C",]
a<-list() #start with an empty list
#creates a list that isolates each type of disc (bar,treat) and then gives outer table
for(i in unique(na.exclude(nd$Bar.))){
for(j in c("N", "P", "NP")){
n<-length(a)+1
a[[n]]<-list(i,j,(outer(nd$Chl.A..mg.m2.[nd$Bar.==i & nd$Treat==j],
nd$Chl.A..mg.m2.[nd$Bar.==i & nd$Treat=="C"],FUN="/")))
}
}
#decompose list of lists into a list of N, P, NP
df_list <- tapply(a,
sapply(a,`[[`,2), FUN=function(x) do.call(rbind,lapply(a,`[[`,3)))
sapply(df_list, function(x){
list("mean"=mean(unlist(x), na.rm = T),
"median"=median(unlist(x), na.rm = T),
"se"=SE(x[is.finite(x)]),
"sd"=sd(unlist(x), na.rm = T),
"n"=length(x[is.finite(x)]))})
q<-ldply(a, .fun=function(x){data.frame(barID=x[[1]],
trt=x[[2]],
as.data.frame(sapply(x[[3]],unlist)))})
dnova1<-ddply(q, .variables=c("barID", "trt"),
.fun=function(x){data.frame(mean=mean(x[,3], na.rm = T),
median=median(x[,3], na.rm = T),
se=SE(x[is.finite(x[,3]),3]),
sd=sd(x[,3], na.rm = T),
n=length(x[is.finite(x[,3]),3]))})
dnova1$Type<-trtlist[match(dnova1$barID, trtlist$Bar.),2]
dnova1$Week<-trtlist[match(dnova1$barID, trtlist$Bar.),1]
mod1<-lm(log(mean)~Type*trt,data=dnova1)
mod4<-lm(mean~Type*trt,data=dnova1[!dnova1$Week==4,])
summary(mod4)
anova(mod4)
#####################################################################################
###Analysis with mean subtracted Chl-a samples
#####################################################################################
#'normalize' each bar by subtracting out the average chl-a control on that bar
dnova_norm<-ddply(nds, .variables=c("Week","Type","Bar."),
.fun=function(x){data.frame(RR=x$Chl.A..mg.m2.-mean(x$Chl.A..mg.m2.[x$Treat=="C"]), Treat=x$Treat)})
###Conduct anova at the site level after subtracting out the control values for each bar
mod2<-lm(RR~Bar.*Treat,data=dnova_norm[dnova_norm$Week!=4 & dnova_norm$Type=="AL",])
mod<-lm(Chl.A..mg.m2.~Type*Treat, data=nds[nds$Week!=4,])
mod3<-aov(RR~Type*Treat,data=dnova_norm[dnova_norm$Week!=4,])
mod4<-lm(RR~Type*Treat*Week,data=dnova_norm)
tuklet<-TukeyHSD(mod3)
HSm1<-lm(RR~Treat, data=dnova_norm[dnova_norm$Week!=4 & dnova_norm$Type=="HS",])
ALm1<-lm(RR~Treat, data=dnova_norm[dnova_norm$Week!=4 & dnova_norm$Type=="AL",])
library(car)
Anova(mod2,type=2)
summary(mod2)
hist(residuals(mod), col="darkgray") #normal distribution assumption
plot(fitted(mod), residuals(mod2)) #homoscedastic assumption
library(lsmeans)
leastm<-lsmeans(mod2, "Treat",adjust="tukey")
cld(leastm, alpha=.05, Letters=letters)
#### plots ####
library(ggplot2)
ggplot(nds[nds$Week!=4,], aes(x=Treat, y=Chl.A..mg.m2.)) + geom_boxplot() +facet_wrap(~Type)
|
tbpgls <- function(phy, trait) {
require(ape)
require(caper)
# Calculate terminal edge lengths
n <- length(phy$tip.label)
# based on post on Liam Revell's blog:
invis <- setNames(phy$edge.length[sapply(1:n, function(x,y) which(y==x), y=phy$edge[,2])], phy$tip.label)
tb <- 1/invis
# Make phylo comparative data object with trait and inverse splits stat for each species
dframe <- data.frame(names(trait), trait, log(tb[as.vector(names(trait))]))
colnames(dframe) <- c("species", "trait", "tb")
data <- comparative.data(data=dframe, phy=phy, names.col="species")
# PGLS of correlation between inverse splits statistic and trait using Caper
res <- pgls(tb ~ trait, data=data)
corr <- summary(res)$coefficients[2,1]
pval <- summary(res)$coefficients[2,4]
result <- as.vector(c(corr, pval))
names(result) <- c("PGLS Slope", "P Value")
return(result)
}
| /R/other_scripts/tbpgls.R | permissive | mgharvey/ES-sim | R | false | false | 875 | r | tbpgls <- function(phy, trait) {
require(ape)
require(caper)
# Calculate terminal edge lengths
n <- length(phy$tip.label)
# based on post on Liam Revell's blog:
invis <- setNames(phy$edge.length[sapply(1:n, function(x,y) which(y==x), y=phy$edge[,2])], phy$tip.label)
tb <- 1/invis
# Make phylo comparative data object with trait and inverse splits stat for each species
dframe <- data.frame(names(trait), trait, log(tb[as.vector(names(trait))]))
colnames(dframe) <- c("species", "trait", "tb")
data <- comparative.data(data=dframe, phy=phy, names.col="species")
# PGLS of correlation between inverse splits statistic and trait using Caper
res <- pgls(tb ~ trait, data=data)
corr <- summary(res)$coefficients[2,1]
pval <- summary(res)$coefficients[2,4]
result <- as.vector(c(corr, pval))
names(result) <- c("PGLS Slope", "P Value")
return(result)
}
|
\name{importGTF}
\alias{importGTF}
\title{
Import Transcripts from a GTF file into R
}
\description{
Function for importing a (gziped or unpacked) GTF/GFF file into R as a \code{switchAnalyzeRlist}. This approach is well suited if you just want to annotate a transcriptome and are not interested in expression. If you are interested in expression estimates it is easier to use \link{importRdata}.
}
\usage{
importGTF(
pathToGTF,
isoformNtFasta = NULL,
extractAaSeq = FALSE,
addAnnotatedORFs=TRUE,
onlyConsiderFullORF=FALSE,
removeNonConvensionalChr=FALSE,
ignoreAfterBar = TRUE,
ignoreAfterSpace = TRUE,
ignoreAfterPeriod=FALSE,
removeTECgenes = TRUE,
PTCDistance=50,
quiet=FALSE
)
}
\arguments{
\item{pathToGTF}{
Can either be:
\itemize{
\item{\code{1}: A string indicating the full path to the (gziped or unpacked) GTF file which have been quantified. If supplied the exon structure and isoform annotation will be obtained from the GTF file. An example could be "myAnnotation/myGenome/isoformsQuantified.gtf")}
\item{\code{2}: A string indicating the full path to the (gziped or unpacked) RefSeq GFF file which have been quantified. If supplied the exon structure and isoform annotation will be obtained from the GFF file. Please note only GFF files from RefSeq downloaded from \url{ftp://ftp.ncbi.nlm.nih.gov/genomes/} are supported (see database FAQ in vignette for more info). An example could be "refSeq/isoformsQuantified.gff")}
}
}
\item{isoformNtFasta}{
A (vector of) text string(s) providing the path(s) to the a fasta file containing the nucloetide (genomic) sequence of all isoforms quantified. This is usefull for: 1) people working with non-model organisms where extracting the sequnce from a BSgenome might require extra work. 2) workflow speed-up for people who already have the fasta file (which most people running Salmon, Kallisto or RSEM for the quantification have as that is used to build the index).
}
\item{extractAaSeq}{
A logic indicating whether the nucleotide sequence imported via \code{isoformNtFasta} should be translated to amino acid sequence and stored in the switchAnalyzeList. Requires ORFs are imported, see \code{addAnnotatedORFs}. Default is true if a fasta file is supplied.
}
\item{addAnnotatedORFs}{ A logic indicating whether the ORF from the GTF should be added to the \code{switchAnalyzeRlist}. This ORF is defined as the regions annoated as 'CDS' in the 'type' collumn (collumn 3). Default is TRUE.}
\item{onlyConsiderFullORF}{ A logic indicating whether the ORFs added should only be added if they are fully annotated. Here fully annoated is defined as those that both have a annotated 'start_codon' and 'stop_codon' in the 'type' column (column 3). This argument is only considered if onlyConsiderFullORF=TRUE. Default is FALSE.}
\item{removeNonConvensionalChr}{ A logic indicating whether non-conventional chromosomes, here defined as chromosome names containing either a '_' or a period ('.'). These regions are typically used to annotate regions that cannot be assocaiated to a specific region (such as the human 'chr1_gl000191_random') or regions quite different due to different haplotypes (e.g. the 'chr6_cox_hap2'). Default is FALSE.}
\item{ignoreAfterBar}{A logic indicating whether to subset the isoform ids by ignoring everything after the first bar ("|"). Usefull for analysis of GENCODE files. Default is TRUE.}
\item{ignoreAfterSpace}{A logic indicating whether to subset the isoform ids by ignoring everything after the first space (" "). Usefull for analysis of gffutils generated GTF files. Default is TRUE.}
\item{ignoreAfterPeriod}{ A logic indicating whether to subset the gene/isoform is by ignoring everything after the first periot ("."). Should be used with care. Default is FALSE.}
\item{removeTECgenes}{A logic indicating whether to remove genes marked as "To be Experimentally Confirmed" (if annotation is available). The default is TRUE aka to remove them which is in line
with Gencode recomendations (TEC are not in gencode annotations). For more info about TEC see \url{https://www.gencodegenes.org/pages/biotypes.html}.}
\item{PTCDistance}{ Only considered if \code{addAnnotatedORFs=TRUE}. A numeric giving the premature termination codon-distance: The minimum distance from the annotated STOP to the final exon-exon junction, for a transcript to be marked as NMD-sensitive. Default is 50}
\item{quiet}{ A logic indicating whether to avoid printing progress messages. Default is FALSE}
}
\details{
The GTF file must have the following 3 annotation in column 9: 'transcript_id', 'gene_id', and 'gene_name'. Furthermore if addAnnotatedORFs is to be used the 'type' column (column 3) must contain the features marked as 'CDS'. If the onlyConsiderFullORF argument should work the GTF must also have 'start_codon' and 'stop_codon' annoated in the 'type' column (column 3).
}
\value{
A \code{switchAnalyzeRlist} containing a all the gene and transcript information as well as the transcipt models. See ?switchAnalyzeRlist for more details.
If \code{addAnnotatedORFs=TRUE} a \code{data.frame} containing the details of the ORF analysis have been added to the switchAnalyzeRlist under the name 'orfAnalysis'.
The data.frame added have one row pr isoform and contains 11 columns:
\itemize{
\item{\code{isoform_id}: The name of the isoform analyzed. Mathces the 'isoform_id' entry in the 'isoformFeatures' entry of the switchAnalyzeRlist}
\item{\code{orfTransciptStart}: The start position of the ORF in transcript cooridnats, here defined as the position of the 'A' in the 'AUG' start motif.}
\item{\code{orfTransciptEnd}: The end position of the ORF in transcript coordinats, here defined as the last nucleotide before the STOP codon (meaning the stop codon is not included in these coordinates).}
\item{\code{orfTransciptLength}: The length of the ORF}
\item{\code{orfStarExon}: The exon in which the start codon is}
\item{\code{orfEndExon}: The exon in which the stop codon is}
\item{\code{orfStartGenomic}: The start position of the ORF in genomic cooridnats, here defined as the the position of the 'A' in the 'AUG' start motif.}
\item{\code{orfEndGenomic}: The end position of the ORF in genomic coordinats, here defined as the last nucleotide before the STOP codon (meaning the stop codon is not included in these coordinates).}
\item{\code{stopDistanceToLastJunction}: Distance from stop codon to the last exon-exon junction}
\item{\code{stopIndex}: The index, counting from the last exon (which is 0), of which exon is the stop codon is in.}
\item{\code{PTC}: A logic indicating whether the isoform is classified as having a Premature Termination Codon. This is defined as having a stop codon more than \code{PTCDistance} (default is 50) nt upstream of the last exon exon junciton.}
}
NA means no information was advailable aka no ORF (passing the \code{minORFlength} filter) was found.
}
\references{
Vitting-Seerup et al. The Landscape of Isoform Switches in Human Cancers. Mol. Cancer Res. (2017).
}
\author{
Kristoffer Vitting-Seerup
}
\seealso{
\code{\link{createSwitchAnalyzeRlist}}\cr
\code{\link{preFilter}}
}
\examples{
# Note the way of importing files in the following example with
# "system.file('pathToFile', package="IsoformSwitchAnalyzeR") is
# specialized way of accessing the example data in the IsoformSwitchAnalyzeR package
# and not something you need to do - just supply the string e.g.
# "myAnnotation/isoformsQuantified.gtf" to the functions
aSwitchList <- importGTF(pathToGTF=system.file("extdata/example.gtf.gz", package="IsoformSwitchAnalyzeR"))
aSwitchList
}
| /man/importGTF.Rd | no_license | johnalanwillis/IsoformSwitchAnalyzeR | R | false | false | 7,680 | rd | \name{importGTF}
\alias{importGTF}
\title{
Import Transcripts from a GTF file into R
}
\description{
Function for importing a (gziped or unpacked) GTF/GFF file into R as a \code{switchAnalyzeRlist}. This approach is well suited if you just want to annotate a transcriptome and are not interested in expression. If you are interested in expression estimates it is easier to use \link{importRdata}.
}
\usage{
importGTF(
pathToGTF,
isoformNtFasta = NULL,
extractAaSeq = FALSE,
addAnnotatedORFs=TRUE,
onlyConsiderFullORF=FALSE,
removeNonConvensionalChr=FALSE,
ignoreAfterBar = TRUE,
ignoreAfterSpace = TRUE,
ignoreAfterPeriod=FALSE,
removeTECgenes = TRUE,
PTCDistance=50,
quiet=FALSE
)
}
\arguments{
\item{pathToGTF}{
Can either be:
\itemize{
\item{\code{1}: A string indicating the full path to the (gziped or unpacked) GTF file which have been quantified. If supplied the exon structure and isoform annotation will be obtained from the GTF file. An example could be "myAnnotation/myGenome/isoformsQuantified.gtf")}
\item{\code{2}: A string indicating the full path to the (gziped or unpacked) RefSeq GFF file which have been quantified. If supplied the exon structure and isoform annotation will be obtained from the GFF file. Please note only GFF files from RefSeq downloaded from \url{ftp://ftp.ncbi.nlm.nih.gov/genomes/} are supported (see database FAQ in vignette for more info). An example could be "refSeq/isoformsQuantified.gff")}
}
}
\item{isoformNtFasta}{
A (vector of) text string(s) providing the path(s) to the a fasta file containing the nucloetide (genomic) sequence of all isoforms quantified. This is usefull for: 1) people working with non-model organisms where extracting the sequnce from a BSgenome might require extra work. 2) workflow speed-up for people who already have the fasta file (which most people running Salmon, Kallisto or RSEM for the quantification have as that is used to build the index).
}
\item{extractAaSeq}{
A logic indicating whether the nucleotide sequence imported via \code{isoformNtFasta} should be translated to amino acid sequence and stored in the switchAnalyzeList. Requires ORFs are imported, see \code{addAnnotatedORFs}. Default is true if a fasta file is supplied.
}
\item{addAnnotatedORFs}{ A logic indicating whether the ORF from the GTF should be added to the \code{switchAnalyzeRlist}. This ORF is defined as the regions annoated as 'CDS' in the 'type' collumn (collumn 3). Default is TRUE.}
\item{onlyConsiderFullORF}{ A logic indicating whether the ORFs added should only be added if they are fully annotated. Here fully annoated is defined as those that both have a annotated 'start_codon' and 'stop_codon' in the 'type' column (column 3). This argument is only considered if onlyConsiderFullORF=TRUE. Default is FALSE.}
\item{removeNonConvensionalChr}{ A logic indicating whether non-conventional chromosomes, here defined as chromosome names containing either a '_' or a period ('.'). These regions are typically used to annotate regions that cannot be assocaiated to a specific region (such as the human 'chr1_gl000191_random') or regions quite different due to different haplotypes (e.g. the 'chr6_cox_hap2'). Default is FALSE.}
\item{ignoreAfterBar}{A logic indicating whether to subset the isoform ids by ignoring everything after the first bar ("|"). Usefull for analysis of GENCODE files. Default is TRUE.}
\item{ignoreAfterSpace}{A logic indicating whether to subset the isoform ids by ignoring everything after the first space (" "). Usefull for analysis of gffutils generated GTF files. Default is TRUE.}
\item{ignoreAfterPeriod}{ A logic indicating whether to subset the gene/isoform is by ignoring everything after the first periot ("."). Should be used with care. Default is FALSE.}
\item{removeTECgenes}{A logic indicating whether to remove genes marked as "To be Experimentally Confirmed" (if annotation is available). The default is TRUE aka to remove them which is in line
with Gencode recomendations (TEC are not in gencode annotations). For more info about TEC see \url{https://www.gencodegenes.org/pages/biotypes.html}.}
\item{PTCDistance}{ Only considered if \code{addAnnotatedORFs=TRUE}. A numeric giving the premature termination codon-distance: The minimum distance from the annotated STOP to the final exon-exon junction, for a transcript to be marked as NMD-sensitive. Default is 50}
\item{quiet}{ A logic indicating whether to avoid printing progress messages. Default is FALSE}
}
\details{
The GTF file must have the following 3 annotation in column 9: 'transcript_id', 'gene_id', and 'gene_name'. Furthermore if addAnnotatedORFs is to be used the 'type' column (column 3) must contain the features marked as 'CDS'. If the onlyConsiderFullORF argument should work the GTF must also have 'start_codon' and 'stop_codon' annoated in the 'type' column (column 3).
}
\value{
A \code{switchAnalyzeRlist} containing a all the gene and transcript information as well as the transcipt models. See ?switchAnalyzeRlist for more details.
If \code{addAnnotatedORFs=TRUE} a \code{data.frame} containing the details of the ORF analysis have been added to the switchAnalyzeRlist under the name 'orfAnalysis'.
The data.frame added have one row pr isoform and contains 11 columns:
\itemize{
\item{\code{isoform_id}: The name of the isoform analyzed. Mathces the 'isoform_id' entry in the 'isoformFeatures' entry of the switchAnalyzeRlist}
\item{\code{orfTransciptStart}: The start position of the ORF in transcript cooridnats, here defined as the position of the 'A' in the 'AUG' start motif.}
\item{\code{orfTransciptEnd}: The end position of the ORF in transcript coordinats, here defined as the last nucleotide before the STOP codon (meaning the stop codon is not included in these coordinates).}
\item{\code{orfTransciptLength}: The length of the ORF}
\item{\code{orfStarExon}: The exon in which the start codon is}
\item{\code{orfEndExon}: The exon in which the stop codon is}
\item{\code{orfStartGenomic}: The start position of the ORF in genomic cooridnats, here defined as the the position of the 'A' in the 'AUG' start motif.}
\item{\code{orfEndGenomic}: The end position of the ORF in genomic coordinats, here defined as the last nucleotide before the STOP codon (meaning the stop codon is not included in these coordinates).}
\item{\code{stopDistanceToLastJunction}: Distance from stop codon to the last exon-exon junction}
\item{\code{stopIndex}: The index, counting from the last exon (which is 0), of which exon is the stop codon is in.}
\item{\code{PTC}: A logic indicating whether the isoform is classified as having a Premature Termination Codon. This is defined as having a stop codon more than \code{PTCDistance} (default is 50) nt upstream of the last exon exon junciton.}
}
NA means no information was advailable aka no ORF (passing the \code{minORFlength} filter) was found.
}
\references{
Vitting-Seerup et al. The Landscape of Isoform Switches in Human Cancers. Mol. Cancer Res. (2017).
}
\author{
Kristoffer Vitting-Seerup
}
\seealso{
\code{\link{createSwitchAnalyzeRlist}}\cr
\code{\link{preFilter}}
}
\examples{
# Note the way of importing files in the following example with
# "system.file('pathToFile', package="IsoformSwitchAnalyzeR") is
# specialized way of accessing the example data in the IsoformSwitchAnalyzeR package
# and not something you need to do - just supply the string e.g.
# "myAnnotation/isoformsQuantified.gtf" to the functions
aSwitchList <- importGTF(pathToGTF=system.file("extdata/example.gtf.gz", package="IsoformSwitchAnalyzeR"))
aSwitchList
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cross_validation.R
\name{cross_validation}
\alias{cross_validation}
\title{A function that uses cross-validation to select seed-wave combination for
estimation of a degree's frequency.}
\usage{
cross_validation(network, n.seeds, n.neigh, n.boot, kmax, proxyRep = 19,
proxyOrder = 30)
}
\arguments{
\item{network}{A network object that is list containing:
\describe{
\item{edges}{The edgelist of the network. A two column
\code{matrix} where each row is an edge.}
\item{degree}{The degree sequence of the network, which is
an \code{integer} vector of length n.}
\item{n}{The network order.}
}
The object can be created by \code{\link{local.network.MR.new5}} or
it can be imported.}
\item{n.seeds}{A numeric vector for the different sample sizes of seed to use
in cross-validation.}
\item{n.neigh}{A numeric vector for the different waves to use
in cross-validation.}
\item{n.boot}{The number of bootstrap sample.}
\item{kmax}{The largest degree to preform cross-validation on.}
\item{proxyRep}{The number of time to sample a proxy. Default is 19.}
\item{proxyOrder}{The size of the proxy sample. Default is 30.}
}
\value{
A list consisting of
\item{selected_seed_wave}{A list of 3 matrices (one per estimation method.
See supporting documentation \code{\link{bootdeg}}). Each matrix provides
the best seed-wave combinations (obtained via cross-validation) for
the respective estimation method.}
\item{selected_seed_wave}{A list of 3 matrices (one per estimation method.
See supporting documentation \code{\link{bootdeg}}). Each matrix provides
the 95 percent bootstrap confidence intervals for the estimated degree frequency
using the best seed-wave combinations (see above).}
}
\description{
The function's inputs are a network, a vector of possible seed sample-sizes,
a vector of possible waves, and a few tuning parameters. The output will
contain the best seed-wave combination for each degree and the width of the
95 percent bootstrap confidence intervals at each degree for
the best seed-wave combination.
}
\note{
Only one LSMI per seed-wave combination is currently supported.
}
\examples{
net <- artificial_networks[[1]]
a <- cross_validation(network = net, n.seeds = c(10, 20, 30), n.neigh = c(1, 2),
n.boot = 200, kmax = 30)
}
\references{
Efron, B. (1979). Bootstrap methods: another look at the
jackknife. The annals of Statistics, 1-26.
Thompson, M. E., Ramirez Ramirez, L. L., Lyubchich, V. and
Gel, Y. R. (2015), Using the bootstrap for statistical inference
on random graphs. Can J Statistics. doi: 10.1002/cjs.11271
}
| /man/cross_validation.Rd | no_license | cakcora/snowboot | R | false | true | 2,665 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cross_validation.R
\name{cross_validation}
\alias{cross_validation}
\title{A function that uses cross-validation to select seed-wave combination for
estimation of a degree's frequency.}
\usage{
cross_validation(network, n.seeds, n.neigh, n.boot, kmax, proxyRep = 19,
proxyOrder = 30)
}
\arguments{
\item{network}{A network object that is list containing:
\describe{
\item{edges}{The edgelist of the network. A two column
\code{matrix} where each row is an edge.}
\item{degree}{The degree sequence of the network, which is
an \code{integer} vector of length n.}
\item{n}{The network order.}
}
The object can be created by \code{\link{local.network.MR.new5}} or
it can be imported.}
\item{n.seeds}{A numeric vector for the different sample sizes of seed to use
in cross-validation.}
\item{n.neigh}{A numeric vector for the different waves to use
in cross-validation.}
\item{n.boot}{The number of bootstrap sample.}
\item{kmax}{The largest degree to preform cross-validation on.}
\item{proxyRep}{The number of time to sample a proxy. Default is 19.}
\item{proxyOrder}{The size of the proxy sample. Default is 30.}
}
\value{
A list consisting of
\item{selected_seed_wave}{A list of 3 matrices (one per estimation method.
See supporting documentation \code{\link{bootdeg}}). Each matrix provides
the best seed-wave combinations (obtained via cross-validation) for
the respective estimation method.}
\item{selected_seed_wave}{A list of 3 matrices (one per estimation method.
See supporting documentation \code{\link{bootdeg}}). Each matrix provides
the 95 percent bootstrap confidence intervals for the estimated degree frequency
using the best seed-wave combinations (see above).}
}
\description{
The function's inputs are a network, a vector of possible seed sample-sizes,
a vector of possible waves, and a few tuning parameters. The output will
contain the best seed-wave combination for each degree and the width of the
95 percent bootstrap confidence intervals at each degree for
the best seed-wave combination.
}
\note{
Only one LSMI per seed-wave combination is currently supported.
}
\examples{
net <- artificial_networks[[1]]
a <- cross_validation(network = net, n.seeds = c(10, 20, 30), n.neigh = c(1, 2),
n.boot = 200, kmax = 30)
}
\references{
Efron, B. (1979). Bootstrap methods: another look at the
jackknife. The annals of Statistics, 1-26.
Thompson, M. E., Ramirez Ramirez, L. L., Lyubchich, V. and
Gel, Y. R. (2015), Using the bootstrap for statistical inference
on random graphs. Can J Statistics. doi: 10.1002/cjs.11271
}
|
####################################################################################
####################################################################################
# Kickstarter project #
####################################################################################
####################################################################################
####################################################################################
# Import data #
####################################################################################
starcraft_scouting<-read.csv(file = "00-DATA_BASE/starcraft_scouting.csv")
| /01-DATA_MANIPULATION.R | no_license | DamienHennom/Kaggle_Starcraft | R | false | false | 758 | r | ####################################################################################
####################################################################################
# Kickstarter project #
####################################################################################
####################################################################################
####################################################################################
# Import data #
####################################################################################
starcraft_scouting<-read.csv(file = "00-DATA_BASE/starcraft_scouting.csv")
|
library(truncnorm)
library(mvtnorm)
getwd()
setwd("/Users/cheeseloveicecream/Documents/missouri/STAT4999/NBA")
n.raw<-data.frame(read.csv("ndata.csv",sep=",",quote=""))
name1<-data.frame(read.csv("name.csv",sep=",",quote=""))
n.raw1<-data.matrix(n.raw)
colnames(n.raw1)<-c("teamH","PtsH","teamA","PtsA")
x<- matrix(0,1230,31)
x[,1]<-1
for(i in 1:1230){
if(n.raw1[i,2]<n.raw1[i,4]){
x[i,(n.raw1[i,1]+1)]<- 1
x[i,(n.raw1[i,3]+1)]<- -1
}
else{
x[i,(n.raw1[i,1]+1)]<- -1
x[i,(n.raw1[i,3]+1)]<- 1
}
}
set.seed(234567)
xtranspose<-t(x)
beta0<-matrix(0,ncol=1,nrow=31)
mu<-x%*%beta0
diagsigma<-diag(1,31,31)
Lambda<-diagsigma
Lambda1<-solve(Lambda)
phi<-rep(NA,1230)
z<-solve(Lambda1+xtranspose%*%x)
v<-z%*%xtranspose
N<-10000
output<-matrix(NA,N,31)
sum1<-sum(n.raw1[,2]>n.raw1[,4])
sum2<-sum(n.raw1[,2]<n.raw1[,4])
ptm<- proc.time()
for(n in 1:N){
#draw phi from N+(mu_g,1)
# phi[i]<-qnorm(runif(1)*(1-pnorm(mu[i],1)),mu[i],1)
# phi[i]<- mu[i]+qnorm((1-runif(1)*pnorm(mu[i],mu[i],1)),mu[i],1)
phi[n.raw1[,2]>n.raw1[,4]]<- rtruncnorm(sum1,a=0,b=Inf,mean=mu[(n.raw1[,2]>n.raw1[,4])],sd=1)
#neutral stadium counts as win team being home team cause homeadv==0
#draw phi from N-(mu_g,1)
# phi[i]<-qnorm((pnorm(mu[i],1)*runif(1)),mu[i],1)
# phi[i]<- mu[i]-qnorm(runif(1)*pnorm(mu[i],mu[i],1),mu[i],1)
phi[n.raw1[,2]<n.raw1[,4]]<- rtruncnorm(sum2,a=-Inf,b=0,mean=mu[(n.raw1[,2]< n.raw1[,4])],sd=1)
# #draw B from N(mubeta,sigmabeta)
mubeta <- v%*%phi
sigmabeta <- z
beta1<-rmvnorm(1,mubeta,sigmabeta)
output[n,]<-beta1
mu<-x%*%t(beta1)}
proc.time() -ptm
estbg <- as.matrix(apply(output, 2, mean))
estbg1<- data.frame(c("home",levels(name1[,1])),estbg)
names(estbg1)[1]<- paste("team")
names(estbg1)[2]<- paste("ability")
attach(estbg1)
estbg2<- estbg1[order(ability,team,decreasing=TRUE),]
# write.table(estbg2,"nba_gibbs.txt",sep="\t")
# estbg2 | /NBA/NBA_Gibbs.R | no_license | cks1001652/R | R | false | false | 1,925 | r | library(truncnorm)
library(mvtnorm)
getwd()
setwd("/Users/cheeseloveicecream/Documents/missouri/STAT4999/NBA")
n.raw<-data.frame(read.csv("ndata.csv",sep=",",quote=""))
name1<-data.frame(read.csv("name.csv",sep=",",quote=""))
n.raw1<-data.matrix(n.raw)
colnames(n.raw1)<-c("teamH","PtsH","teamA","PtsA")
x<- matrix(0,1230,31)
x[,1]<-1
for(i in 1:1230){
if(n.raw1[i,2]<n.raw1[i,4]){
x[i,(n.raw1[i,1]+1)]<- 1
x[i,(n.raw1[i,3]+1)]<- -1
}
else{
x[i,(n.raw1[i,1]+1)]<- -1
x[i,(n.raw1[i,3]+1)]<- 1
}
}
set.seed(234567)
xtranspose<-t(x)
beta0<-matrix(0,ncol=1,nrow=31)
mu<-x%*%beta0
diagsigma<-diag(1,31,31)
Lambda<-diagsigma
Lambda1<-solve(Lambda)
phi<-rep(NA,1230)
z<-solve(Lambda1+xtranspose%*%x)
v<-z%*%xtranspose
N<-10000
output<-matrix(NA,N,31)
sum1<-sum(n.raw1[,2]>n.raw1[,4])
sum2<-sum(n.raw1[,2]<n.raw1[,4])
ptm<- proc.time()
for(n in 1:N){
#draw phi from N+(mu_g,1)
# phi[i]<-qnorm(runif(1)*(1-pnorm(mu[i],1)),mu[i],1)
# phi[i]<- mu[i]+qnorm((1-runif(1)*pnorm(mu[i],mu[i],1)),mu[i],1)
phi[n.raw1[,2]>n.raw1[,4]]<- rtruncnorm(sum1,a=0,b=Inf,mean=mu[(n.raw1[,2]>n.raw1[,4])],sd=1)
#neutral stadium counts as win team being home team cause homeadv==0
#draw phi from N-(mu_g,1)
# phi[i]<-qnorm((pnorm(mu[i],1)*runif(1)),mu[i],1)
# phi[i]<- mu[i]-qnorm(runif(1)*pnorm(mu[i],mu[i],1),mu[i],1)
phi[n.raw1[,2]<n.raw1[,4]]<- rtruncnorm(sum2,a=-Inf,b=0,mean=mu[(n.raw1[,2]< n.raw1[,4])],sd=1)
# #draw B from N(mubeta,sigmabeta)
mubeta <- v%*%phi
sigmabeta <- z
beta1<-rmvnorm(1,mubeta,sigmabeta)
output[n,]<-beta1
mu<-x%*%t(beta1)}
proc.time() -ptm
estbg <- as.matrix(apply(output, 2, mean))
estbg1<- data.frame(c("home",levels(name1[,1])),estbg)
names(estbg1)[1]<- paste("team")
names(estbg1)[2]<- paste("ability")
attach(estbg1)
estbg2<- estbg1[order(ability,team,decreasing=TRUE),]
# write.table(estbg2,"nba_gibbs.txt",sep="\t")
# estbg2 |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oisst_data_funs.R
\name{env_data_extract}
\alias{env_data_extract}
\title{Satellite Data Extraction Function}
\usage{
env_data_extract(
data.set = "OISST",
dates = NULL,
box = c(-77, -60, 35, 46),
out.dir = here::here("data", "OISST_thredd_test"),
mask = NULL
)
}
\arguments{
\item{data.set}{Env data to extract (options = ERSST, OISST, MURSST)}
\item{dates}{If !NULL, subset full time series to specific dates. Dates should be specified as dates = c("YYYY-MM-DD", "YYYY-MM-DD")}
\item{box}{If !NULL, crop rasters to sepcific box faster downloading and processing. Box should be specified as box = c(xmin, xmax, ymin, ymax).}
\item{out.dir}{Directory to store resulting raster stack. Note, this will overwrite any rasters with the existing name.}
\item{mask}{optional mask to trim data with}
}
\value{
stack.out Stack of daily OISST files and also saves the raster stack as a .grd file in the output directory specified by out.dir
}
\description{
This function accesses webhosted satellite data and then downloads a subset of the data based
on dates and long/lat bounding box. After downloading the data, the function processes it and saves it as
one raster stack file. Installs "ncdf4" and "raster" packages if not installed.
This function is helpful for getting fresh downloads from thredds. In most cases it is faster to access the data that is stored in shared
resource locations.
}
| /man/env_data_extract.Rd | permissive | dzaugis/gmRi | R | false | true | 1,480 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oisst_data_funs.R
\name{env_data_extract}
\alias{env_data_extract}
\title{Satellite Data Extraction Function}
\usage{
env_data_extract(
data.set = "OISST",
dates = NULL,
box = c(-77, -60, 35, 46),
out.dir = here::here("data", "OISST_thredd_test"),
mask = NULL
)
}
\arguments{
\item{data.set}{Env data to extract (options = ERSST, OISST, MURSST)}
\item{dates}{If !NULL, subset full time series to specific dates. Dates should be specified as dates = c("YYYY-MM-DD", "YYYY-MM-DD")}
\item{box}{If !NULL, crop rasters to sepcific box faster downloading and processing. Box should be specified as box = c(xmin, xmax, ymin, ymax).}
\item{out.dir}{Directory to store resulting raster stack. Note, this will overwrite any rasters with the existing name.}
\item{mask}{optional mask to trim data with}
}
\value{
stack.out Stack of daily OISST files and also saves the raster stack as a .grd file in the output directory specified by out.dir
}
\description{
This function accesses webhosted satellite data and then downloads a subset of the data based
on dates and long/lat bounding box. After downloading the data, the function processes it and saves it as
one raster stack file. Installs "ncdf4" and "raster" packages if not installed.
This function is helpful for getting fresh downloads from thredds. In most cases it is faster to access the data that is stored in shared
resource locations.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats-lm-tidiers.R
\name{augment.lm}
\alias{augment.lm}
\title{Augment data with information from a(n) lm object}
\usage{
\method{augment}{lm}(x, data = stats::model.frame(x), newdata,
type.predict, type.residuals, ...)
}
\arguments{
\item{x}{An \code{lm} object created by \code{\link[stats:lm]{stats::lm()}}.}
\item{data}{A \code{\link[=data.frame]{data.frame()}} or \code{\link[tibble:tibble]{tibble::tibble()}} containing the original
data that was used to produce the object \code{x}. Defaults to
\code{stats::model.frame(x)} so that \code{augment(my_fit)} returns the augmented
original data. \strong{Do not} pass new data to the \code{data} argument.
Augment will report information such as influence and cooks distance for
data passed to the \code{data} argument. These measures are only defined for
the original training data.}
\item{newdata}{A \code{\link[=data.frame]{data.frame()}} or \code{\link[tibble:tibble]{tibble::tibble()}} containing all
the original predictors used to create \code{x}. Defaults to \code{NULL}, indicating
that nothing has been passed to \code{newdata}. If \code{newdata} is specified,
the \code{data} argument will be ignored.}
\item{type.predict}{Type of predictions to use when \code{x} is a \code{glm} object.
Passed to \code{\link[stats:predict.glm]{stats::predict.glm()}}.}
\item{type.residuals}{Type of residuals to use when \code{x} is a \code{glm} object.
Passed to \code{\link[stats:residuals.glm]{stats::residuals.glm()}}.}
\item{...}{Additional arguments. Not used. Needed to match generic
signature only. \strong{Cautionary note:} Misspelled arguments will be
absorbed in \code{...}, where they will be ignored. If the misspelled
argument has a default value, the default value will be used.
For example, if you pass \code{conf.lvel = 0.9}, all computation will
proceed using \code{conf.level = 0.95}. Additionally, if you pass
\code{newdata = my_tibble} to an \code{\link[=augment]{augment()}} method that does not
accept a \code{newdata} argument, it will use the default value for
the \code{data} argument.}
}
\value{
When \code{newdata} is not supplied \code{augment.lm} returns
one row for each observation, with seven columns added to the original
data:
\item{.hat}{Diagonal of the hat matrix}
\item{.sigma}{Estimate of residual standard deviation when
corresponding observation is dropped from model}
\item{.cooksd}{Cooks distance, \code{\link[=cooks.distance]{cooks.distance()}}}
\item{.fitted}{Fitted values of model}
\item{.se.fit}{Standard errors of fitted values}
\item{.resid}{Residuals}
\item{.std.resid}{Standardised residuals}
Some unusual \code{lm} objects, such as \code{rlm} from MASS, may omit \code{.cooksd}
and \code{.std.resid}. \code{gam} from mgcv omits \code{.sigma}.
When \code{newdata} is supplied, returns one row for each observation, with
three columns added to the new data:
\item{.fitted}{Fitted values of model}
\item{.se.fit}{Standard errors of fitted values}
\item{.resid}{Residuals of fitted values on the new data}
}
\description{
Augment accepts a model object and a dataset and adds
information about each observation in the dataset. Most commonly, this
includes predicted values in the \code{.fitted} column, residuals in the
\code{.resid} column, and standard errors for the fitted values in a \code{.se.fit}
column. New columns always begin with a \code{.} prefix to avoid overwriting
columns in the original dataset.
Users may pass data to augment via either the \code{data} argument or the
\code{newdata} argument. If the user passes data to the \code{data} argument,
it \strong{must} be exactly the data that was used to fit the model
object. Pass datasets to \code{newdata} to augment data that was not used
during model fitting. This still requires that all columns used to fit
the model are present.
Augment will often behavior different depending on whether \code{data} or
\code{newdata} is specified. This is because there is often information
associated with training observations (such as influences or related)
measures that is not meaningfully defined for new observations.
For convenience, many augment methods provide default \code{data} arguments,
so that \code{augment(fit)} will return the augmented training data. In these
cases augment tries to reconstruct the original data based on the model
object, with some varying degrees of success.
The augmented dataset is always returned as a \link[tibble:tibble]{tibble::tibble} with the
\strong{same number of rows} as the passed dataset. This means that the
passed data must be coercible to a tibble. At this time, tibbles do not
support matrix-columns. This means you should not specify a matrix
of covariates in a model formula during the original model fitting
process, and that \code{\link[splines:ns]{splines::ns()}}, \code{\link[stats:poly]{stats::poly()}} and
\code{\link[survival:Surv]{survival::Surv()}} objects are not supported in input data. If you
encounter errors, try explicitly passing a tibble, or fitting the original
model on data in a tibble.
We are in the process of defining behaviors for models fit with various
\link{na.action} arguments, but make no guarantees about behavior when data is
missing at this time.
}
\details{
When the modeling was performed with \code{na.action = "na.omit"}
(as is the typical default), rows with NA in the initial data are omitted
entirely from the augmented data frame. When the modeling was performed
with \code{na.action = "na.exclude"}, one should provide the original data
as a second argument, at which point the augmented data will contain those
rows (typically with NAs in place of the new columns). If the original data
is not provided to \code{\link[=augment]{augment()}} and \code{na.action = "na.exclude"}, a
warning is raised and the incomplete rows are dropped.
}
\seealso{
\link{na.action}
\code{\link[=augment]{augment()}}, \code{\link[stats:predict.lm]{stats::predict.lm()}}
Other lm tidiers: \code{\link{augment.glm}},
\code{\link{glance.glm}}, \code{\link{glance.lm}},
\code{\link{tidy.glm}}, \code{\link{tidy.lm}}
}
\concept{lm tidiers}
| /man/augment.lm.Rd | no_license | sjewo/broom | R | false | true | 6,172 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats-lm-tidiers.R
\name{augment.lm}
\alias{augment.lm}
\title{Augment data with information from a(n) lm object}
\usage{
\method{augment}{lm}(x, data = stats::model.frame(x), newdata,
type.predict, type.residuals, ...)
}
\arguments{
\item{x}{An \code{lm} object created by \code{\link[stats:lm]{stats::lm()}}.}
\item{data}{A \code{\link[=data.frame]{data.frame()}} or \code{\link[tibble:tibble]{tibble::tibble()}} containing the original
data that was used to produce the object \code{x}. Defaults to
\code{stats::model.frame(x)} so that \code{augment(my_fit)} returns the augmented
original data. \strong{Do not} pass new data to the \code{data} argument.
Augment will report information such as influence and cooks distance for
data passed to the \code{data} argument. These measures are only defined for
the original training data.}
\item{newdata}{A \code{\link[=data.frame]{data.frame()}} or \code{\link[tibble:tibble]{tibble::tibble()}} containing all
the original predictors used to create \code{x}. Defaults to \code{NULL}, indicating
that nothing has been passed to \code{newdata}. If \code{newdata} is specified,
the \code{data} argument will be ignored.}
\item{type.predict}{Type of predictions to use when \code{x} is a \code{glm} object.
Passed to \code{\link[stats:predict.glm]{stats::predict.glm()}}.}
\item{type.residuals}{Type of residuals to use when \code{x} is a \code{glm} object.
Passed to \code{\link[stats:residuals.glm]{stats::residuals.glm()}}.}
\item{...}{Additional arguments. Not used. Needed to match generic
signature only. \strong{Cautionary note:} Misspelled arguments will be
absorbed in \code{...}, where they will be ignored. If the misspelled
argument has a default value, the default value will be used.
For example, if you pass \code{conf.lvel = 0.9}, all computation will
proceed using \code{conf.level = 0.95}. Additionally, if you pass
\code{newdata = my_tibble} to an \code{\link[=augment]{augment()}} method that does not
accept a \code{newdata} argument, it will use the default value for
the \code{data} argument.}
}
\value{
When \code{newdata} is not supplied \code{augment.lm} returns
one row for each observation, with seven columns added to the original
data:
\item{.hat}{Diagonal of the hat matrix}
\item{.sigma}{Estimate of residual standard deviation when
corresponding observation is dropped from model}
\item{.cooksd}{Cooks distance, \code{\link[=cooks.distance]{cooks.distance()}}}
\item{.fitted}{Fitted values of model}
\item{.se.fit}{Standard errors of fitted values}
\item{.resid}{Residuals}
\item{.std.resid}{Standardised residuals}
Some unusual \code{lm} objects, such as \code{rlm} from MASS, may omit \code{.cooksd}
and \code{.std.resid}. \code{gam} from mgcv omits \code{.sigma}.
When \code{newdata} is supplied, returns one row for each observation, with
three columns added to the new data:
\item{.fitted}{Fitted values of model}
\item{.se.fit}{Standard errors of fitted values}
\item{.resid}{Residuals of fitted values on the new data}
}
\description{
Augment accepts a model object and a dataset and adds
information about each observation in the dataset. Most commonly, this
includes predicted values in the \code{.fitted} column, residuals in the
\code{.resid} column, and standard errors for the fitted values in a \code{.se.fit}
column. New columns always begin with a \code{.} prefix to avoid overwriting
columns in the original dataset.
Users may pass data to augment via either the \code{data} argument or the
\code{newdata} argument. If the user passes data to the \code{data} argument,
it \strong{must} be exactly the data that was used to fit the model
object. Pass datasets to \code{newdata} to augment data that was not used
during model fitting. This still requires that all columns used to fit
the model are present.
Augment will often behavior different depending on whether \code{data} or
\code{newdata} is specified. This is because there is often information
associated with training observations (such as influences or related)
measures that is not meaningfully defined for new observations.
For convenience, many augment methods provide default \code{data} arguments,
so that \code{augment(fit)} will return the augmented training data. In these
cases augment tries to reconstruct the original data based on the model
object, with some varying degrees of success.
The augmented dataset is always returned as a \link[tibble:tibble]{tibble::tibble} with the
\strong{same number of rows} as the passed dataset. This means that the
passed data must be coercible to a tibble. At this time, tibbles do not
support matrix-columns. This means you should not specify a matrix
of covariates in a model formula during the original model fitting
process, and that \code{\link[splines:ns]{splines::ns()}}, \code{\link[stats:poly]{stats::poly()}} and
\code{\link[survival:Surv]{survival::Surv()}} objects are not supported in input data. If you
encounter errors, try explicitly passing a tibble, or fitting the original
model on data in a tibble.
We are in the process of defining behaviors for models fit with various
\link{na.action} arguments, but make no guarantees about behavior when data is
missing at this time.
}
\details{
When the modeling was performed with \code{na.action = "na.omit"}
(as is the typical default), rows with NA in the initial data are omitted
entirely from the augmented data frame. When the modeling was performed
with \code{na.action = "na.exclude"}, one should provide the original data
as a second argument, at which point the augmented data will contain those
rows (typically with NAs in place of the new columns). If the original data
is not provided to \code{\link[=augment]{augment()}} and \code{na.action = "na.exclude"}, a
warning is raised and the incomplete rows are dropped.
}
\seealso{
\link{na.action}
\code{\link[=augment]{augment()}}, \code{\link[stats:predict.lm]{stats::predict.lm()}}
Other lm tidiers: \code{\link{augment.glm}},
\code{\link{glance.glm}}, \code{\link{glance.lm}},
\code{\link{tidy.glm}}, \code{\link{tidy.lm}}
}
\concept{lm tidiers}
|
/ejercicio12.R | no_license | naimmanriquez/Estadistica_R_Tecmilenio | R | false | false | 2,417 | r | ||
## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----echo=TRUE,eval = FALSE----------------------------------------------
# newData <- hydramse::lengthen_hydra_data(hydradata::hydraDataList,nYrs=150)
## ----echo=TRUE,eval =FALSE-----------------------------------------------
# outDirForDatPin <- here::here("darwin") # folder for dumping hydra output prior to analysis
# outPath <- here::here("successfulSims") # folder containing parameters of successful model runs
#
# if(!dir.exists(outDirForDatPin)){dir.create(outDirForDatPin)}
# if(!dir.exists(outPath)){dir.create(outPath)}
## ---- eval=F,echo=T------------------------------------------------------
# hydraVersion <- "hydra_sim"
## ----eval=F, echo=T------------------------------------------------------
# ipass <- 0
# simulationRules <- hydramse::darwinRules
# darwinD <- hydramse::darwinData
#
# # simulate a set of parameters
# simulatedValues <- hydramse::simulate_parameters(darwinD,simulationRules,SRFunctionChoice = NULL)
# # combine with extended time series data for darwinian use
# hydraD <- hydramse::update_hydra_data(hydradata::hydraDataList,simulatedValues) # update with simulated values
# hydraD <- hydramse::update_hydra_data(hydradata::hydraDataList,noFishingData) # update with simulated values
# # create dat and pin files
# inputOptions <- hydradata::setup_default_inputs(outDir=outDirForDatPin) # gets options for hydraRuns
# inputOptions$scenarioFlag <- "darwin"
# hydraD$flagMSE <- 2 # reduces output from hydra run
# hydraD$recStochastic <- hydraD$recStochastic*0 # no stochasticity used in model run
# hydraD <- hydradata::create_datpin_files(inputOptions,hydraD) # creates dat and pin file
#
# # run hydra
# #specify the path the input files and the Hydra model executable
# datPath <- paste0(outDirForDatPin,"/",inputOptions$outputFilename,".dat")
# pinPath <- paste0(outDirForDatPin,"/",inputOptions$outputFilename,".pin")
# exePath <- paste(pathToTPL,hydraVersion,sep="/")
# hydramse::run_single_hydra(iseed=1,exePath=exePath,datPath=datPath,pinPath=pinPath)
#
# # move output file
# if (Sys.info()['sysname']=="Windows") {
# shell(paste0("move *.text ",paste0(outDirForDatPin,"/")),intern=T) # move files to darwin folder
# } else if (Sys.info()['sysname']=="Linux") {
# system(paste0("mv *.text ",paste0(outDirForDatPin,"/")),intern=T) # move files to darwin folder
# }
## ----eval= F,echo=T------------------------------------------------------
# # processes output from model (1 file)
# output <- hydramse::process_darwin_output(outDirForDatPin)
# biomass <- output$biomass
# catch <- output$catch
# # Do these simulations satisfy the rules(historical equivalence)
# # No Fishing Biomass Rule. assumes equilibrium is reached
# rule1 <- hydramse::rule1_biomass(biomass,nYrsFishing,stockRecruitData$historicBounds,simulationRules)
# if(rule1$pass == F) { next}
# # Fishing Biomass Rule. use mean of last 10 years
# rule2 <- hydramse::rule2_biomass(biomass,nYrs,stockRecruitData$historicBounds,simulationRules)
# if(rule2$pass == F) { next}
# # Catch Rule
# rule3 <- hydramse::rule3_landings(catch,nYrs,stockRecruitData$historicBounds,simulationRules)
# if(rule3$pass == F) { next}
#
# ipass <- ipass + 1
# # save all data to RDA file for use in MSE
# darwinRes <- list(hydraDarwin=hydraD,simulatedValues=simulatedValues,simulationRules=simulationRules,inputOptions=inputOptions)
# saveRDS(darwinRes,file=paste0(outPath,"/success",ipass,".RDS"))
#
#
## ----eval=F,echo=T-------------------------------------------------------
# rootFolder <- "define_the_folder_where_output_for_all_scenario_runs_be_stored"
# darwinRes <- readRDS(file=paste0(outPath,"/success1.RDS")) # read in the RDS
# dataToUse <- hydramse::update_hydra_data(hydradata::hydraDataList,darwinRes$simulatedValues)# sets the simulated values to the hydra model data
# hcrTypes=c("Fixed","Ramp") # harvest control rules types
# hcrLevels=c("Complex","HTSpecies","LTSpecies") #harvest control rule levels
# scenarios <- apply(expand.grid(hcrTypes,hcrLevels),1,paste,collapse="")
# # Create harvest control rule levels
# dataToUse <- hydradata::set_hcr(dataToUse,minMaxExploitations = c(.05,0.4),increment=0.05)
# dataToUse$flagMSE <- 1
# exploitationRates = round(100*dataToUse$exploitationOptions[dataToUse$Nthresholds,])
# # create folder structure for run (given exploitation rates). This creates folders in users working directory
# folderStructure <- hydramse::create_folder_setup(rootFolder,exploitationRates = exploitationRates)
# scenarios <- unique(apply(as.matrix(subset(folderStructure,select=c("hcrType","hcrLevels"))),1,function(x) paste0(x,collapse = "")))
# folderDirs <- apply(folderStructure,1,function(x) paste0(x,collapse = "")) # vector of dirs
# outputScenarioDirs <- here::here(rootFolder,paste0("Exploitation",folderDirs))
#
# for (iscenario in 1:length(outputScenarioDirs)) {
# outputScenarioDir <- outputScenarioDirs[iscenario]
# print(outputScenarioDir)
# # set up scenario types to run
# if (grepl("LT",folderStructure$hcrLevels[iscenario])) speciesFlag <- "low"
# if (grepl("HT",folderStructure$hcrLevels[iscenario])) speciesFlag <- "high"
# if (grepl("omplex",folderStructure$hcrLevels[iscenario])) speciesFlag <- "none"
# scenarioInputs <- hydradata::setup_default_inputs(outDir=outputScenarioDir,scenarioFlag="assessment",temperatureFlag="mean",
# scenarioType=folderStructure$hcrType[iscenario],maxExploitationRate=as.numeric(folderStructure$exploitationRate[iscenario]),
# assessmentSpeciesFlag=speciesFlag)
#
# # create dat and pin files for scenario
# dataToPrint <- hydradata::create_datpin_files(listOfParameters=scenarioInputs, dataList=dataToUse)
# }
## ----eval=F, echo = T----------------------------------------------------
# #nCores <- parallel::detectCores()-1
# #cl <- parallel::makeCluster(nCores)
#
# nSims <- 100 # set number of simulation for each scenario
# exePath <- paste(pathToTPL,hydraVersion,sep="/")
# for (iscenario in 1:dim(folderStructure)[1]) {
# datpinPath <- here::here(rootFolder,paste0("Exploitation",paste0(folderStructure[iscenario,],collapse="")))
# datPath <- paste0(datpinPath,"/",hydraVersion,".dat")
# pinPath <- paste0(datpinPath,"/",hydraVersion,".pin")
# #parallel::parLapply(cl,1:nSims,hydramse::run_single_hydra,exePath=exePath,datPath=datPath,pinPath=pinPath)
# lapply(1:nSims,hydramse::run_single_hydra,exePath=exePath,datPath=datPath,pinPath=pinPath)
# # move files
# if (Sys.info()['sysname']=="Windows") {
# shell(paste0("move *.txt ",paste0(datpinPath,"/indices")),intern=T) # move files to assessment folder
# } else if (Sys.info()['sysname']=="Linux") {
# system(paste0("mv *.txt ",paste0(datpinPath,"/indices")),intern=T) # move files to assessment folder
# }
# }
# #parallel::stopCluster(cl)
#
## ---- eval = F,echo = T--------------------------------------------------
# indices <- c("index_LFI_Catch","index_LFI_Biomass","index_stdev_catch","avByr","est_catch_biomass","index_status_species","index_status_guild")
# rD <- read.csv("revenuePricePerPound2012.csv",header=TRUE)
# otherData <- list()
# otherData$name = rD[,1]
# otherData$ppp = rD[,2]
# hydramse::process_model_runs(dataToUse,indices,scenarios,rootFolder,outputScenarioDirs,otherData,outputType="indices")
#
## ---- eval=F, echo=T-----------------------------------------------------
# hydramse::plot_box_whiskers(here::here(),rootFolder,inputFile="species_bio_rate.rds")
| /vignettes/package_usage.R | no_license | andybeet/hydramse | R | false | false | 7,891 | r | ## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----echo=TRUE,eval = FALSE----------------------------------------------
# newData <- hydramse::lengthen_hydra_data(hydradata::hydraDataList,nYrs=150)
## ----echo=TRUE,eval =FALSE-----------------------------------------------
# outDirForDatPin <- here::here("darwin") # folder for dumping hydra output prior to analysis
# outPath <- here::here("successfulSims") # folder containing parameters of successful model runs
#
# if(!dir.exists(outDirForDatPin)){dir.create(outDirForDatPin)}
# if(!dir.exists(outPath)){dir.create(outPath)}
## ---- eval=F,echo=T------------------------------------------------------
# hydraVersion <- "hydra_sim"
## ----eval=F, echo=T------------------------------------------------------
# ipass <- 0
# simulationRules <- hydramse::darwinRules
# darwinD <- hydramse::darwinData
#
# # simulate a set of parameters
# simulatedValues <- hydramse::simulate_parameters(darwinD,simulationRules,SRFunctionChoice = NULL)
# # combine with extended time series data for darwinian use
# hydraD <- hydramse::update_hydra_data(hydradata::hydraDataList,simulatedValues) # update with simulated values
# hydraD <- hydramse::update_hydra_data(hydradata::hydraDataList,noFishingData) # update with simulated values
# # create dat and pin files
# inputOptions <- hydradata::setup_default_inputs(outDir=outDirForDatPin) # gets options for hydraRuns
# inputOptions$scenarioFlag <- "darwin"
# hydraD$flagMSE <- 2 # reduces output from hydra run
# hydraD$recStochastic <- hydraD$recStochastic*0 # no stochasticity used in model run
# hydraD <- hydradata::create_datpin_files(inputOptions,hydraD) # creates dat and pin file
#
# # run hydra
# #specify the path the input files and the Hydra model executable
# datPath <- paste0(outDirForDatPin,"/",inputOptions$outputFilename,".dat")
# pinPath <- paste0(outDirForDatPin,"/",inputOptions$outputFilename,".pin")
# exePath <- paste(pathToTPL,hydraVersion,sep="/")
# hydramse::run_single_hydra(iseed=1,exePath=exePath,datPath=datPath,pinPath=pinPath)
#
# # move output file
# if (Sys.info()['sysname']=="Windows") {
# shell(paste0("move *.text ",paste0(outDirForDatPin,"/")),intern=T) # move files to darwin folder
# } else if (Sys.info()['sysname']=="Linux") {
# system(paste0("mv *.text ",paste0(outDirForDatPin,"/")),intern=T) # move files to darwin folder
# }
## ----eval= F,echo=T------------------------------------------------------
# # processes output from model (1 file)
# output <- hydramse::process_darwin_output(outDirForDatPin)
# biomass <- output$biomass
# catch <- output$catch
# # Do these simulations satisfy the rules(historical equivalence)
# # No Fishing Biomass Rule. assumes equilibrium is reached
# rule1 <- hydramse::rule1_biomass(biomass,nYrsFishing,stockRecruitData$historicBounds,simulationRules)
# if(rule1$pass == F) { next}
# # Fishing Biomass Rule. use mean of last 10 years
# rule2 <- hydramse::rule2_biomass(biomass,nYrs,stockRecruitData$historicBounds,simulationRules)
# if(rule2$pass == F) { next}
# # Catch Rule
# rule3 <- hydramse::rule3_landings(catch,nYrs,stockRecruitData$historicBounds,simulationRules)
# if(rule3$pass == F) { next}
#
# ipass <- ipass + 1
# # save all data to RDA file for use in MSE
# darwinRes <- list(hydraDarwin=hydraD,simulatedValues=simulatedValues,simulationRules=simulationRules,inputOptions=inputOptions)
# saveRDS(darwinRes,file=paste0(outPath,"/success",ipass,".RDS"))
#
#
## ----eval=F,echo=T-------------------------------------------------------
# rootFolder <- "define_the_folder_where_output_for_all_scenario_runs_be_stored"
# darwinRes <- readRDS(file=paste0(outPath,"/success1.RDS")) # read in the RDS
# dataToUse <- hydramse::update_hydra_data(hydradata::hydraDataList,darwinRes$simulatedValues)# sets the simulated values to the hydra model data
# hcrTypes=c("Fixed","Ramp") # harvest control rules types
# hcrLevels=c("Complex","HTSpecies","LTSpecies") #harvest control rule levels
# scenarios <- apply(expand.grid(hcrTypes,hcrLevels),1,paste,collapse="")
# # Create harvest control rule levels
# dataToUse <- hydradata::set_hcr(dataToUse,minMaxExploitations = c(.05,0.4),increment=0.05)
# dataToUse$flagMSE <- 1
# exploitationRates = round(100*dataToUse$exploitationOptions[dataToUse$Nthresholds,])
# # create folder structure for run (given exploitation rates). This creates folders in users working directory
# folderStructure <- hydramse::create_folder_setup(rootFolder,exploitationRates = exploitationRates)
# scenarios <- unique(apply(as.matrix(subset(folderStructure,select=c("hcrType","hcrLevels"))),1,function(x) paste0(x,collapse = "")))
# folderDirs <- apply(folderStructure,1,function(x) paste0(x,collapse = "")) # vector of dirs
# outputScenarioDirs <- here::here(rootFolder,paste0("Exploitation",folderDirs))
#
# for (iscenario in 1:length(outputScenarioDirs)) {
# outputScenarioDir <- outputScenarioDirs[iscenario]
# print(outputScenarioDir)
# # set up scenario types to run
# if (grepl("LT",folderStructure$hcrLevels[iscenario])) speciesFlag <- "low"
# if (grepl("HT",folderStructure$hcrLevels[iscenario])) speciesFlag <- "high"
# if (grepl("omplex",folderStructure$hcrLevels[iscenario])) speciesFlag <- "none"
# scenarioInputs <- hydradata::setup_default_inputs(outDir=outputScenarioDir,scenarioFlag="assessment",temperatureFlag="mean",
# scenarioType=folderStructure$hcrType[iscenario],maxExploitationRate=as.numeric(folderStructure$exploitationRate[iscenario]),
# assessmentSpeciesFlag=speciesFlag)
#
# # create dat and pin files for scenario
# dataToPrint <- hydradata::create_datpin_files(listOfParameters=scenarioInputs, dataList=dataToUse)
# }
## ----eval=F, echo = T----------------------------------------------------
# #nCores <- parallel::detectCores()-1
# #cl <- parallel::makeCluster(nCores)
#
# nSims <- 100 # set number of simulation for each scenario
# exePath <- paste(pathToTPL,hydraVersion,sep="/")
# for (iscenario in 1:dim(folderStructure)[1]) {
# datpinPath <- here::here(rootFolder,paste0("Exploitation",paste0(folderStructure[iscenario,],collapse="")))
# datPath <- paste0(datpinPath,"/",hydraVersion,".dat")
# pinPath <- paste0(datpinPath,"/",hydraVersion,".pin")
# #parallel::parLapply(cl,1:nSims,hydramse::run_single_hydra,exePath=exePath,datPath=datPath,pinPath=pinPath)
# lapply(1:nSims,hydramse::run_single_hydra,exePath=exePath,datPath=datPath,pinPath=pinPath)
# # move files
# if (Sys.info()['sysname']=="Windows") {
# shell(paste0("move *.txt ",paste0(datpinPath,"/indices")),intern=T) # move files to assessment folder
# } else if (Sys.info()['sysname']=="Linux") {
# system(paste0("mv *.txt ",paste0(datpinPath,"/indices")),intern=T) # move files to assessment folder
# }
# }
# #parallel::stopCluster(cl)
#
## ---- eval = F,echo = T--------------------------------------------------
# indices <- c("index_LFI_Catch","index_LFI_Biomass","index_stdev_catch","avByr","est_catch_biomass","index_status_species","index_status_guild")
# rD <- read.csv("revenuePricePerPound2012.csv",header=TRUE)
# otherData <- list()
# otherData$name = rD[,1]
# otherData$ppp = rD[,2]
# hydramse::process_model_runs(dataToUse,indices,scenarios,rootFolder,outputScenarioDirs,otherData,outputType="indices")
#
## ---- eval=F, echo=T-----------------------------------------------------
# hydramse::plot_box_whiskers(here::here(),rootFolder,inputFile="species_bio_rate.rds")
|
setwd("C:/Users/dyeany/Dropbox/2018_ShaleGas_Analysis/2018data")
coords <- read.csv("pointID_coord.csv", stringsAsFactors = FALSE)
bird <- read.csv("bird_veg_2017_test.csv", stringsAsFactors = FALSE)
coords <- coords[c(3,7:8)]
bird <- merge(bird,coords,by="pt_id")
bird <- bird[c(2:3,1,4:106)]
write.csv(bird,"bird_veg_2017_xy.csv", row.names=FALSE)
| /xy_join.R | no_license | PNHP/ShaleGasBird | R | false | false | 354 | r | setwd("C:/Users/dyeany/Dropbox/2018_ShaleGas_Analysis/2018data")
coords <- read.csv("pointID_coord.csv", stringsAsFactors = FALSE)
bird <- read.csv("bird_veg_2017_test.csv", stringsAsFactors = FALSE)
coords <- coords[c(3,7:8)]
bird <- merge(bird,coords,by="pt_id")
bird <- bird[c(2:3,1,4:106)]
write.csv(bird,"bird_veg_2017_xy.csv", row.names=FALSE)
|
# Color housekeeping
library(RColorBrewer)
rf <- colorRampPalette(rev(brewer.pal(11,'Spectral')))
r <- rf(32)
# Create normally distributed data for plotting
x <- rnorm(mean=1.5, 5000)
y <- rnorm(mean=1.6, 5000)
df <- data.frame(x,y)
# Plot
plot(df, pch=16, col='black', cex=0.5)
##### OPTION 1: hexbin from package 'hexbin' #######
library(hexbin)
# Create hexbin object and plot
h <- hexbin(df)
plot(h)
plot(h, colramp=rf)
# hexbinplot function allows greater flexibility
hexbinplot(y~x, data=df, colramp=rf)
# Setting max and mins
hexbinplot(y~x, data=df, colramp=rf, mincnt=2, maxcnt=60)
# Scaling of legend - must provide both trans and inv functions
hexbinplot(y~x, data=df, colramp=rf, trans=log, inv=exp)
| /VIZ/histograms.r | no_license | Aurametrix/R | R | false | false | 718 | r | # Color housekeeping
library(RColorBrewer)
rf <- colorRampPalette(rev(brewer.pal(11,'Spectral')))
r <- rf(32)
# Create normally distributed data for plotting
x <- rnorm(mean=1.5, 5000)
y <- rnorm(mean=1.6, 5000)
df <- data.frame(x,y)
# Plot
plot(df, pch=16, col='black', cex=0.5)
##### OPTION 1: hexbin from package 'hexbin' #######
library(hexbin)
# Create hexbin object and plot
h <- hexbin(df)
plot(h)
plot(h, colramp=rf)
# hexbinplot function allows greater flexibility
hexbinplot(y~x, data=df, colramp=rf)
# Setting max and mins
hexbinplot(y~x, data=df, colramp=rf, mincnt=2, maxcnt=60)
# Scaling of legend - must provide both trans and inv functions
hexbinplot(y~x, data=df, colramp=rf, trans=log, inv=exp)
|
library(greybox)
### Name: cramer
### Title: Calculate Cramer's V for categorical variables
### Aliases: cramer
### Keywords: htest
### ** Examples
cramer(mtcars$am, mtcars$gear)
| /data/genthat_extracted_code/greybox/examples/cramer.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 188 | r | library(greybox)
### Name: cramer
### Title: Calculate Cramer's V for categorical variables
### Aliases: cramer
### Keywords: htest
### ** Examples
cramer(mtcars$am, mtcars$gear)
|
setwd("/home/hoagy/crime_regression/")
regression.coefs_burglary <- c()
standard.errors <- c()
for (i in 1:20) {
panel.data <- readRDS(paste("data/prepared_datasets/burglary_by_month_av_", i, ".RDS", sep=""))
panel.data <- panel.data[!is.na(panel.data[, ncol(panel.data)]), ]
fixed.effects.reg <- lm(as.formula(paste("crimerate ~ ", paste(tail(names(panel.data), -3), collapse=" + "))),
data=panel.data)
print(paste("completed coefficient estimation for", i, "months"))
regression.coefs_burglary[i] <- tail(fixed.effects.reg$coefficients, 1)
} | /regressions_av.R | no_license | HoagyC/crime-regression | R | false | false | 588 | r | setwd("/home/hoagy/crime_regression/")
regression.coefs_burglary <- c()
standard.errors <- c()
for (i in 1:20) {
panel.data <- readRDS(paste("data/prepared_datasets/burglary_by_month_av_", i, ".RDS", sep=""))
panel.data <- panel.data[!is.na(panel.data[, ncol(panel.data)]), ]
fixed.effects.reg <- lm(as.formula(paste("crimerate ~ ", paste(tail(names(panel.data), -3), collapse=" + "))),
data=panel.data)
print(paste("completed coefficient estimation for", i, "months"))
regression.coefs_burglary[i] <- tail(fixed.effects.reg$coefficients, 1)
} |
timss.log.pv <-
function(pvlabel="BSMMAT", x, by, cutoff, data,
export=FALSE, name= "output", folder=getwd()) {
intsvy.log.pv(x=x, pvlabel=pvlabel, cutoff=cutoff, by=by, data=data, export=export,
name= name, folder=folder, config=timss8_conf)
} | /intsvy/R/timss.log.pv.R | no_license | ingted/R-Examples | R | false | false | 293 | r | timss.log.pv <-
function(pvlabel="BSMMAT", x, by, cutoff, data,
export=FALSE, name= "output", folder=getwd()) {
intsvy.log.pv(x=x, pvlabel=pvlabel, cutoff=cutoff, by=by, data=data, export=export,
name= name, folder=folder, config=timss8_conf)
} |
## Function translated automatically using 'matlab.to.r()'
## Author: Andrew Hooker
m1 <- function(model_switch,xt_ind,x,a,bpop,b_ind,bocc_ind,d,poped.db){
#
# function computes the derivative of the
# linerarized model function w$r.t. bpop
# for an individual
#
# the output is a matrix with dimensions (ind_samps X nbpop)
df_dbeta = zeros(size(xt_ind,1),sum(poped.db$parameters$notfixed_bpop))
epsi0 = zeros(1,length(poped.db$parameters$notfixed_sigma))
h = poped.db$settings$hm1
# create linearized model
if((poped.db$settings$iApproximationMethod==0 || poped.db$settings$iApproximationMethod==3) ){#FO, FOI
b_ind=zeros(poped.db$parameters$NumRanEff,1)
}
if((poped.db$settings$m1_switch[1] == 1)){
#Central approximation
k=1
for(i in 1:poped.db$parameters$nbpop){
if((poped.db$parameters$notfixed_bpop[i]==1)){
bpop_plus=bpop
bpop_minus=bpop
bpop_plus[i]=bpop_plus[i]+h
bpop_minus[i]=bpop_minus[i]-h
if((poped.db$settings$bCalculateEBE)){
start_bind = t(b_ind)
b_ind_plus = ind_estimates(poped.db$mean_data,bpop_plus,d,poped.db$parameters$sigma,start_bind,(poped.db$settings$iApproximationMethod==2),FALSE,model_switch,xt_ind,x,a,b_ind,bocc_ind,poped.db)
b_ind_minus = ind_estimates(poped.db$mean_data,bpop_minus,d,poped.db$parameters$sigma,start_bind,(poped.db$settings$iApproximationMethod==2),FALSE,model_switch,xt_ind,x,a,b_ind,bocc_ind,poped.db)
} else {
b_ind_plus = b_ind
b_ind_minus = b_ind
}
g_plus=feval(poped.db$model$fg_pointer,x,a,bpop_plus,b_ind_plus,bocc_ind)
g_minus=feval(poped.db$model$fg_pointer,x,a,bpop_minus,b_ind_minus,bocc_ind)
if((poped.db$settings$iApproximationMethod==0 || poped.db$settings$iApproximationMethod==3 || (isempty(b_ind) && isempty(bocc_ind))) ){#FO, FOI
returnArgs <- feval(poped.db$model$ferror_pointer,model_switch,xt_ind,g_plus,epsi0,poped.db)
ferror_plus <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
returnArgs <- feval(poped.db$model$ferror_pointer,model_switch,xt_ind,g_minus,epsi0,poped.db)
ferror_minus <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
if((poped.db$settings$bUseSecondOrder)){
hess_eta_plus = zeros(length(xt_ind),1)
hess_eta_minus = zeros(length(xt_ind),1)
for(o in 1:length(xt_ind)){
hessian_eta_plus = hessian_eta_complex(model_switch[o],xt_ind[o],x,a,bpop_plus,b_ind,bocc_ind,poped.db)
hessian_eta_minus = hessian_eta_complex(model_switch[o],xt_ind[o],x,a,bpop_minus,b_ind,bocc_ind,poped.db)
hess_eta_plus[o] = 1/2*trace_matrix(hessian_eta_plus*d)
hess_eta_minus[o] = 1/2*trace_matrix(hessian_eta_minus*d)
}
ferror_plus = ferror_plus+hess_eta_plus
ferror_minus = ferror_minus+hess_eta_minus
}
df_dbeta[,k]=(ferror_plus-ferror_minus)/(2.0*h)
} else { #FOCE, FOCEI
returnArgs <- feval(poped.db$model$ferror_pointer,model_switch,xt_ind,g_plus,epsi0,poped.db)
ferror_plus <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
returnArgs <- LinMatrixL(model_switch,xt_ind,x,a,bpop_plus,b_ind_plus,bocc_ind,poped.db)
l_plus <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
returnArgs <- feval(poped.db$model$ferror_pointer,model_switch,xt_ind,g_minus,epsi0,poped.db)
ferror_minus <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
returnArgs <- LinMatrixL(model_switch,xt_ind,x,a,bpop_minus,b_ind_minus,bocc_ind,poped.db)
l_minus <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
occ_add_plus = zeros(size(xt_ind,1), 1)
occ_add_minus = zeros(size(xt_ind,1), 1)
if((isempty(b_ind)) ){#No IIV present
l_plus = zeros(size(xt_ind,1), 1)
l_minus = zeros(size(xt_ind,1),1)
} else {
l_plus = l_plus%*%b_ind_plus
l_minus = l_minus%*%b_ind_minus
}
if(poped.db$parameters$NumOcc!=0){
for(m in 1:poped.db$parameters$NumOcc){
returnArgs <- LinMatrixL_occ(model_switch,xt_ind,x,a,bpop_plus,b_ind,bocc_ind,m,poped.db)
l_plus_occ <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
returnArgs <- LinMatrixL_occ(model_switch,xt_ind,x,a,bpop_minus,b_ind,bocc_ind,m,poped.db)
l_minus_occ <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
occ_add_plus=occ_add_plus+l_plus_occ*(bocc_ind[,m])
occ_add_minus=occ_add_minus+l_minus_occ*(bocc_ind[,m])
}
}
df_dbeta[,k]=((ferror_plus-(l_plus+occ_add_plus))-(ferror_minus-(l_minus+occ_add_minus)))/(2*h)
}
k=k+1
}
}
} else {
#Complex derivative
if((poped.db$settings$m1_switch[1] == 0)){
k=1
for(i in 1:poped.db$parameters$nbpop){
if((poped.db$parameters$notfixed_bpop[i]==1)){
bpop_plus=bpop
bpop_plus[i] = complex(real=bpop_plus[i],imaginary=h)
g_plus=feval(poped.db$model$fg_pointer,x,a,bpop_plus,b_ind,bocc_ind)
if((poped.db$settings$iApproximationMethod==0 || poped.db$settings$iApproximationMethod==3) ){#FO, FOI
returnArgs <- feval(poped.db$model$ferror_pointer,model_switch,xt_ind,g_plus,epsi0,poped.db)
ferror_tmp <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
df_dbeta[,k] = Im(ferror_tmp)/h
} else { #FOCE, FOCEI
returnArgs <- feval(poped.db$model$ferror_pointer,model_switch,xt_ind,g_plus,epsi0,poped.db)
ferror_tmp <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
#dLinMatrixL/dbpop, dLinMatrixL_occ must be central difference to assure
#that complex step can be used within Linmatrix
bpop_plus_c = bpop
bpop_minus_c = bpop
bpop_plus_c[i]=bpop_plus_c[i]+h
bpop_minus_c[i]=bpop_minus_c[i]-h
returnArgs <- LinMatrixL(model_switch,xt_ind,x,a,bpop_plus_c,b_ind,bocc_ind,poped.db)
l_plus <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
returnArgs <- LinMatrixL(model_switch,xt_ind,x,a,bpop_minus_c,b_ind,bocc_ind,poped.db)
l_minus <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
dL_dbpop = ((l_plus-l_minus))/(2*h)
occ_add_plus = zeros(size(xt_ind,1), 1)
occ_add_minus = zeros(size(xt_ind,1), 1)
if((isempty(b_ind)) ){#No IIV present
dL_dbpop = zeros(size(xt_ind,1), 1)
} else {
dL_dbpop = dL_dbpop*b_ind
}
for(m in 1:poped.db$parameters$NumOcc){
returnArgs <- LinMatrixL_occ(model_switch,xt_ind,x,a,bpop_plus_c,b_ind,bocc_ind,m,poped.db)
l_plus_occ <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
returnArgs <- LinMatrixL_occ(model_switch,xt_ind,x,a,bpop_minus_c,b_ind,bocc_ind,m,poped.db)
l_minus_occ <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
occ_add_plus=occ_add_plus+l_plus_occ*(bocc_ind[,m])
occ_add_minus=occ_add_minus+l_minus_occ*(bocc_ind[,m])
}
df_dbeta[,k] = Im(ferror_tmp)/h-(dL_dbpop+(occ_add_plus-occ_add_minus)/(2*h))
}
k=k+1
}
}
} else {
if((poped.db$settings$m1_switch[1] == 20) ){#Analytic derivative
df_dbeta_tmp = zeros(size(xt_ind,1),length(poped.db$parameters$notfixed_bpop))
for(k in 1:size(xt_ind,1)){
df_dbeta_tmp[k,] = eval(sprintf('analytic_dff_dbpop%d(model_switch,xt_ind[k],x,a,bpop,b_ind)',model_switch[k]))
}
m=1
for(i in 1:poped.db$parameters$nbpop){
if((poped.db$parameters$notfixed_bpop[i]==1)){
df_dbeta[,m] = df_dbeta_tmp(,i)
m=m+1
}
}
} else {
if((poped.db$settings$m1_switch[1] == 30) ){#Automatic differentiation using INTLab
if((poped.db$settings$Engine$Type==2) ){#FreeMat
stop(sprintf('Automatic differentiation is not available in PopED with FreeMat'))
}
if((poped.db$settings$iApproximationMethod==0 || poped.db$settings$iApproximationMethod==3 || (isempty(b_ind) && isempty(bocc_ind))) ){#FO, FOI
stop("Automatic differentiation not currently implemented in PopED for R")
# bpop_init = gradientinit(bpop)
# fg_init=feval(poped.db$model$fg_pointer,x,a,bpop_init,b_ind,bocc_ind)
# returnArgs <- feval(poped.db$model$ferror_pointer,model_switch,xt_ind,fg_init,epsi0,poped.db)
# val <- returnArgs[[1]]
# poped.db <- returnArgs[[2]]
# df_dbeta = val$dx
# for(i in poped.db$parameters$nbpop:-1:1){
# if((poped.db$parameters$notfixed_bpop[i]==0)){
# df_dbeta[,i]=matrix(0,0,0)
# }
# }
} else { #FOCE, FOCEI
stop("Automatic differentiation not currently implemented in PopED for R")
#bpop_init = gradientinit(bpop)
# fg_init=feval(poped.db$model$fg_pointer,x,a,bpop_init,b_ind,bocc_ind)
# returnArgs <- feval(poped.db$model$ferror_pointer,model_switch,xt_ind,fg_init,epsi0,poped.db)
# val <- returnArgs[[1]]
# poped.db <- returnArgs[[2]]
# returnArgs <- dLinMatrixL_dbpop[model_switch,xt_ind,x,a,bpop,b_ind,bocc_ind,poped.db]
# cellDeriv <- returnArgs[[1]]
# L <- returnArgs[[2]]
# poped.db <- returnArgs[[3]]
# returnArgs <- dLinMatrixL_occ_dbpop[model_switch,xt_ind,x,a,bpop,b_ind,bocc_ind,poped.db]
# cellDerivOcc <- returnArgs[[1]]
# L_occ <- returnArgs[[2]]
# poped.db <- returnArgs[[3]]
# o = 1
# for(k in 1:poped.db$parameters$nbpop){
# if((poped.db$parameters$notfixed_bpop[k]==1)){
# if((isempty(cellDeriv)) ){#Add linmatrix
# l_tmp = zeros(size(xt_ind,1),1)
# } else {
# l_tmp = cellDeriv[[k]]*b_ind
# }
# occ_add = zeros(size(xt_ind,1),1)
# for(m in 1:poped.db$parameters$NumOcc ){#Add occcasion
# occ_add=occ_add+cellDerivOcc[[m,k]]*(bocc_ind(,m))
# }
# df_dbeta[,o] = val$dx(,k) - (l_tmp+occ_add)
# o=o+1
# }
# }
}
} else {
stop(sprintf('Unknown derivative option for m1'))
}
}
}
}
return(list( df_dbeta= df_dbeta,poped.db=poped.db))
}
| /PopED/R/m1.R | no_license | ingted/R-Examples | R | false | false | 11,506 | r | ## Function translated automatically using 'matlab.to.r()'
## Author: Andrew Hooker
m1 <- function(model_switch,xt_ind,x,a,bpop,b_ind,bocc_ind,d,poped.db){
#
# function computes the derivative of the
# linerarized model function w$r.t. bpop
# for an individual
#
# the output is a matrix with dimensions (ind_samps X nbpop)
df_dbeta = zeros(size(xt_ind,1),sum(poped.db$parameters$notfixed_bpop))
epsi0 = zeros(1,length(poped.db$parameters$notfixed_sigma))
h = poped.db$settings$hm1
# create linearized model
if((poped.db$settings$iApproximationMethod==0 || poped.db$settings$iApproximationMethod==3) ){#FO, FOI
b_ind=zeros(poped.db$parameters$NumRanEff,1)
}
if((poped.db$settings$m1_switch[1] == 1)){
#Central approximation
k=1
for(i in 1:poped.db$parameters$nbpop){
if((poped.db$parameters$notfixed_bpop[i]==1)){
bpop_plus=bpop
bpop_minus=bpop
bpop_plus[i]=bpop_plus[i]+h
bpop_minus[i]=bpop_minus[i]-h
if((poped.db$settings$bCalculateEBE)){
start_bind = t(b_ind)
b_ind_plus = ind_estimates(poped.db$mean_data,bpop_plus,d,poped.db$parameters$sigma,start_bind,(poped.db$settings$iApproximationMethod==2),FALSE,model_switch,xt_ind,x,a,b_ind,bocc_ind,poped.db)
b_ind_minus = ind_estimates(poped.db$mean_data,bpop_minus,d,poped.db$parameters$sigma,start_bind,(poped.db$settings$iApproximationMethod==2),FALSE,model_switch,xt_ind,x,a,b_ind,bocc_ind,poped.db)
} else {
b_ind_plus = b_ind
b_ind_minus = b_ind
}
g_plus=feval(poped.db$model$fg_pointer,x,a,bpop_plus,b_ind_plus,bocc_ind)
g_minus=feval(poped.db$model$fg_pointer,x,a,bpop_minus,b_ind_minus,bocc_ind)
if((poped.db$settings$iApproximationMethod==0 || poped.db$settings$iApproximationMethod==3 || (isempty(b_ind) && isempty(bocc_ind))) ){#FO, FOI
returnArgs <- feval(poped.db$model$ferror_pointer,model_switch,xt_ind,g_plus,epsi0,poped.db)
ferror_plus <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
returnArgs <- feval(poped.db$model$ferror_pointer,model_switch,xt_ind,g_minus,epsi0,poped.db)
ferror_minus <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
if((poped.db$settings$bUseSecondOrder)){
hess_eta_plus = zeros(length(xt_ind),1)
hess_eta_minus = zeros(length(xt_ind),1)
for(o in 1:length(xt_ind)){
hessian_eta_plus = hessian_eta_complex(model_switch[o],xt_ind[o],x,a,bpop_plus,b_ind,bocc_ind,poped.db)
hessian_eta_minus = hessian_eta_complex(model_switch[o],xt_ind[o],x,a,bpop_minus,b_ind,bocc_ind,poped.db)
hess_eta_plus[o] = 1/2*trace_matrix(hessian_eta_plus*d)
hess_eta_minus[o] = 1/2*trace_matrix(hessian_eta_minus*d)
}
ferror_plus = ferror_plus+hess_eta_plus
ferror_minus = ferror_minus+hess_eta_minus
}
df_dbeta[,k]=(ferror_plus-ferror_minus)/(2.0*h)
} else { #FOCE, FOCEI
returnArgs <- feval(poped.db$model$ferror_pointer,model_switch,xt_ind,g_plus,epsi0,poped.db)
ferror_plus <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
returnArgs <- LinMatrixL(model_switch,xt_ind,x,a,bpop_plus,b_ind_plus,bocc_ind,poped.db)
l_plus <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
returnArgs <- feval(poped.db$model$ferror_pointer,model_switch,xt_ind,g_minus,epsi0,poped.db)
ferror_minus <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
returnArgs <- LinMatrixL(model_switch,xt_ind,x,a,bpop_minus,b_ind_minus,bocc_ind,poped.db)
l_minus <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
occ_add_plus = zeros(size(xt_ind,1), 1)
occ_add_minus = zeros(size(xt_ind,1), 1)
if((isempty(b_ind)) ){#No IIV present
l_plus = zeros(size(xt_ind,1), 1)
l_minus = zeros(size(xt_ind,1),1)
} else {
l_plus = l_plus%*%b_ind_plus
l_minus = l_minus%*%b_ind_minus
}
if(poped.db$parameters$NumOcc!=0){
for(m in 1:poped.db$parameters$NumOcc){
returnArgs <- LinMatrixL_occ(model_switch,xt_ind,x,a,bpop_plus,b_ind,bocc_ind,m,poped.db)
l_plus_occ <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
returnArgs <- LinMatrixL_occ(model_switch,xt_ind,x,a,bpop_minus,b_ind,bocc_ind,m,poped.db)
l_minus_occ <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
occ_add_plus=occ_add_plus+l_plus_occ*(bocc_ind[,m])
occ_add_minus=occ_add_minus+l_minus_occ*(bocc_ind[,m])
}
}
df_dbeta[,k]=((ferror_plus-(l_plus+occ_add_plus))-(ferror_minus-(l_minus+occ_add_minus)))/(2*h)
}
k=k+1
}
}
} else {
#Complex derivative
if((poped.db$settings$m1_switch[1] == 0)){
k=1
for(i in 1:poped.db$parameters$nbpop){
if((poped.db$parameters$notfixed_bpop[i]==1)){
bpop_plus=bpop
bpop_plus[i] = complex(real=bpop_plus[i],imaginary=h)
g_plus=feval(poped.db$model$fg_pointer,x,a,bpop_plus,b_ind,bocc_ind)
if((poped.db$settings$iApproximationMethod==0 || poped.db$settings$iApproximationMethod==3) ){#FO, FOI
returnArgs <- feval(poped.db$model$ferror_pointer,model_switch,xt_ind,g_plus,epsi0,poped.db)
ferror_tmp <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
df_dbeta[,k] = Im(ferror_tmp)/h
} else { #FOCE, FOCEI
returnArgs <- feval(poped.db$model$ferror_pointer,model_switch,xt_ind,g_plus,epsi0,poped.db)
ferror_tmp <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
#dLinMatrixL/dbpop, dLinMatrixL_occ must be central difference to assure
#that complex step can be used within Linmatrix
bpop_plus_c = bpop
bpop_minus_c = bpop
bpop_plus_c[i]=bpop_plus_c[i]+h
bpop_minus_c[i]=bpop_minus_c[i]-h
returnArgs <- LinMatrixL(model_switch,xt_ind,x,a,bpop_plus_c,b_ind,bocc_ind,poped.db)
l_plus <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
returnArgs <- LinMatrixL(model_switch,xt_ind,x,a,bpop_minus_c,b_ind,bocc_ind,poped.db)
l_minus <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
dL_dbpop = ((l_plus-l_minus))/(2*h)
occ_add_plus = zeros(size(xt_ind,1), 1)
occ_add_minus = zeros(size(xt_ind,1), 1)
if((isempty(b_ind)) ){#No IIV present
dL_dbpop = zeros(size(xt_ind,1), 1)
} else {
dL_dbpop = dL_dbpop*b_ind
}
for(m in 1:poped.db$parameters$NumOcc){
returnArgs <- LinMatrixL_occ(model_switch,xt_ind,x,a,bpop_plus_c,b_ind,bocc_ind,m,poped.db)
l_plus_occ <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
returnArgs <- LinMatrixL_occ(model_switch,xt_ind,x,a,bpop_minus_c,b_ind,bocc_ind,m,poped.db)
l_minus_occ <- returnArgs[[1]]
poped.db <- returnArgs[[2]]
occ_add_plus=occ_add_plus+l_plus_occ*(bocc_ind[,m])
occ_add_minus=occ_add_minus+l_minus_occ*(bocc_ind[,m])
}
df_dbeta[,k] = Im(ferror_tmp)/h-(dL_dbpop+(occ_add_plus-occ_add_minus)/(2*h))
}
k=k+1
}
}
} else {
if((poped.db$settings$m1_switch[1] == 20) ){#Analytic derivative
df_dbeta_tmp = zeros(size(xt_ind,1),length(poped.db$parameters$notfixed_bpop))
for(k in 1:size(xt_ind,1)){
df_dbeta_tmp[k,] = eval(sprintf('analytic_dff_dbpop%d(model_switch,xt_ind[k],x,a,bpop,b_ind)',model_switch[k]))
}
m=1
for(i in 1:poped.db$parameters$nbpop){
if((poped.db$parameters$notfixed_bpop[i]==1)){
df_dbeta[,m] = df_dbeta_tmp(,i)
m=m+1
}
}
} else {
if((poped.db$settings$m1_switch[1] == 30) ){#Automatic differentiation using INTLab
if((poped.db$settings$Engine$Type==2) ){#FreeMat
stop(sprintf('Automatic differentiation is not available in PopED with FreeMat'))
}
if((poped.db$settings$iApproximationMethod==0 || poped.db$settings$iApproximationMethod==3 || (isempty(b_ind) && isempty(bocc_ind))) ){#FO, FOI
stop("Automatic differentiation not currently implemented in PopED for R")
# bpop_init = gradientinit(bpop)
# fg_init=feval(poped.db$model$fg_pointer,x,a,bpop_init,b_ind,bocc_ind)
# returnArgs <- feval(poped.db$model$ferror_pointer,model_switch,xt_ind,fg_init,epsi0,poped.db)
# val <- returnArgs[[1]]
# poped.db <- returnArgs[[2]]
# df_dbeta = val$dx
# for(i in poped.db$parameters$nbpop:-1:1){
# if((poped.db$parameters$notfixed_bpop[i]==0)){
# df_dbeta[,i]=matrix(0,0,0)
# }
# }
} else { #FOCE, FOCEI
stop("Automatic differentiation not currently implemented in PopED for R")
#bpop_init = gradientinit(bpop)
# fg_init=feval(poped.db$model$fg_pointer,x,a,bpop_init,b_ind,bocc_ind)
# returnArgs <- feval(poped.db$model$ferror_pointer,model_switch,xt_ind,fg_init,epsi0,poped.db)
# val <- returnArgs[[1]]
# poped.db <- returnArgs[[2]]
# returnArgs <- dLinMatrixL_dbpop[model_switch,xt_ind,x,a,bpop,b_ind,bocc_ind,poped.db]
# cellDeriv <- returnArgs[[1]]
# L <- returnArgs[[2]]
# poped.db <- returnArgs[[3]]
# returnArgs <- dLinMatrixL_occ_dbpop[model_switch,xt_ind,x,a,bpop,b_ind,bocc_ind,poped.db]
# cellDerivOcc <- returnArgs[[1]]
# L_occ <- returnArgs[[2]]
# poped.db <- returnArgs[[3]]
# o = 1
# for(k in 1:poped.db$parameters$nbpop){
# if((poped.db$parameters$notfixed_bpop[k]==1)){
# if((isempty(cellDeriv)) ){#Add linmatrix
# l_tmp = zeros(size(xt_ind,1),1)
# } else {
# l_tmp = cellDeriv[[k]]*b_ind
# }
# occ_add = zeros(size(xt_ind,1),1)
# for(m in 1:poped.db$parameters$NumOcc ){#Add occcasion
# occ_add=occ_add+cellDerivOcc[[m,k]]*(bocc_ind(,m))
# }
# df_dbeta[,o] = val$dx(,k) - (l_tmp+occ_add)
# o=o+1
# }
# }
}
} else {
stop(sprintf('Unknown derivative option for m1'))
}
}
}
}
return(list( df_dbeta= df_dbeta,poped.db=poped.db))
}
|
library(dash)
library(dashCoreComponents)
library(dashHtmlComponents)
utils <- new.env()
source('dash_docs/utils.R', local=utils)
layout <- htmlDiv(list(
htmlH1('Deploying Dash Apps'),
dccMarkdown("
By default, Dash apps run on `localhost` - you can only access them on your
own machine. To share a Dash app, you need to 'deploy' your Dash app to a
server and open up the server's firewall to the public or to a restricted
set of IP addresses.
## Dash Enterprise
[Dash Enterprise](https://plotly.com/dash/pricing/?_ga=2.249471751.1080104966.1578062860-1986131108.1567098614) is Plotly's commercial product for deploying Dash
Apps on your company's servers or on AWS, Google Cloud, or Azure. It
offers an enterprise-wide Dash App Portal, easy git-based deployment,
automatic URL namespacing, built-in SSL support, LDAP authentication, and
more. [Learn more about Dash Enterprise](https://plotly.com/dash/pricing?_ga=2.249471751.1080104966.1578062860-1986131108.1567098614) or [get in touch to start a
trial](https://go.plotly.com/dash-doc?_ga=2.48144423.1080104966.1578062860-1986131108.1567098614).
For existing customers, see the [Dash Enterprise Documentation](https://dash.plotly.com/dash-enterprise).
## Dash and Fiery
Dash apps are web applications. Dash uses Fiery as the web framework.
The underlying Fiery app is available at `app`, that is:
```r
library(dash)
app <- Dash$new()
```
## Heroku Example
Heroku is one of the easiest platforms for deploying and managing public web applications.
Here is a simple example. This example requires a Heroku account and `git`. We currently recommend using a Dockerfile-based
approach when deploying Dash for R applications to Heroku. You may use our base image (as below), or supply your own.
For more information about this deployment method, [please consult the Heroku documentation](https://devcenter.heroku.com/articles/build-docker-images-heroku-yml).
---
Step 1. Create a new folder for your project:
```
$ mkdir dash_app_example
$ cd dash_app_example
```
---
Step 2. Initialize the folder with `git`
```
$ git init # initializes an empty git repo
```
---
Step 3. Initialize the folder with a sample app (`app.R`), a `.gitignore` file (not required, but will avoid committing any files that aren't necessary for your app to function), `Dockerfile`, `heroku.yml` for deployment.
Create the following files in your project folder:
**`app.R`**
```r
app <- Dash$new()
app$layout(htmlDiv(list(htmlH2('Hello World'),
dccDropdown(id = 'dropdown',
options = list(
list('label' = 'LA', 'value' = 'LA'),
list('label' = 'NYC', 'value' = 'NYC'),
list('label' = 'MTL', 'value' = 'MTL')
),
value = 'LA'),
htmlDiv(id = 'display-value'))
)
)
app$callback(output=list(id='display-value', property='children'),
params=list(
input(id='dropdown', property='value')),
function(value)
{
sprintf('You have selected %s', value)
}
)
app$run_server(host = '0.0.0.0', port = Sys.getenv('PORT', 8050))
```
---
---
**`Dockerfile`**
```
FROM plotly/heroku-docker-r:3.6.2_heroku18
# on build, copy application files
COPY . /app/
# for installing additional dependencies etc.
RUN if [ -f '/app/onbuild' ]; then bash /app/onbuild; fi;
# look for /app/apt-packages and if it exists, install the packages contained
RUN if [ -f '/app/apt-packages' ]; then apt-get update -q && cat apt-packages | xargs apt-get -qy install && rm -rf /var/lib/apt/lists/*; fi;
# look for /app/init.R and if it exists, execute it
RUN if [ -f '/app/init.R' ]; then /usr/bin/R --no-init-file --no-save --quiet --slave -f /app/init.R; fi;
# here app.R needs to match the name of the file which contains your app
CMD cd /app && /usr/bin/R --no-save -f /app/app.R
```
---
**`heroku.yml`**
```
build:
docker:
web: Dockerfile
```
---
**`init.R`**
`init.R` describes your R dependencies. Here is an example script. At minimum, you'll
want to install Dash for R to ensure that you're always using the latest version.
It's fairly trivial to install packages from both CRAN mirrors and GitHub repositories.
```r
# R script to run author supplied code, typically used to install additional R packages
# contains placeholders which are inserted by the compile script
# NOTE: this script is executed in the chroot context; check paths!
r <- getOption('repos')
r['CRAN'] <- 'http://cloud.r-project.org'
options(repos=r)
# ======================================================================
# packages go here
install.packages('remotes')
remotes::install_github('plotly/dashR', upgrade=TRUE)
```
---
**`apt-packages`**
`apt-packages` describes system-level dependencies. For example, one might add
three packages by including their names, one per line, within this file:
```
libcurl4-openssl-dev
libxml2-dev
libv8-3.14-dev
```
---
4. Initialize Heroku, add files to Git, and deploy
```
$ heroku create --stack container my-dash-app # change my-dash-app to a unique name
$ git add . # add all files to git
$ git commit -m 'Initial app boilerplate'
$ git push heroku master # deploy code to Heroku
$ heroku ps:scale web=1 # run the app with one Heroku 'dyno'
```
You should be able to access your app at `https://my-dash-app.herokuapp.com` (changing my-dash-app to the name of your app).
---
5. Update the code and redeploy
When you modify app.R with your own code, you will need to add the changes to git and push those changes to heroku.
```
$ git status # view the changes
$ git add . # add all the changes
$ git commit -m 'a description of the changes'
$ git push heroku master
```
If you're ready to take your apps to the next level, and deliver interactive analytics at scale, we invite you to learn more about Dash Enterprise.
[Click here for more information](https://plotly.com/dash/pricing/?_ga=2.176345125.1075922756.1562168385-916141078.1562168385) or [get in touch](https://plotly.typeform.com/to/rkO85m?_ga=2.176345125.1075922756.1562168385-916141078.1562168385).
"),
htmlHr(),
dccMarkdown("
[Back to the Table of Contents](/)
")
))
| /dash_docs/chapters/deployment/index.R | permissive | eddy-geek/dash-docs | R | false | false | 6,530 | r | library(dash)
library(dashCoreComponents)
library(dashHtmlComponents)
utils <- new.env()
source('dash_docs/utils.R', local=utils)
layout <- htmlDiv(list(
htmlH1('Deploying Dash Apps'),
dccMarkdown("
By default, Dash apps run on `localhost` - you can only access them on your
own machine. To share a Dash app, you need to 'deploy' your Dash app to a
server and open up the server's firewall to the public or to a restricted
set of IP addresses.
## Dash Enterprise
[Dash Enterprise](https://plotly.com/dash/pricing/?_ga=2.249471751.1080104966.1578062860-1986131108.1567098614) is Plotly's commercial product for deploying Dash
Apps on your company's servers or on AWS, Google Cloud, or Azure. It
offers an enterprise-wide Dash App Portal, easy git-based deployment,
automatic URL namespacing, built-in SSL support, LDAP authentication, and
more. [Learn more about Dash Enterprise](https://plotly.com/dash/pricing?_ga=2.249471751.1080104966.1578062860-1986131108.1567098614) or [get in touch to start a
trial](https://go.plotly.com/dash-doc?_ga=2.48144423.1080104966.1578062860-1986131108.1567098614).
For existing customers, see the [Dash Enterprise Documentation](https://dash.plotly.com/dash-enterprise).
## Dash and Fiery
Dash apps are web applications. Dash uses Fiery as the web framework.
The underlying Fiery app is available at `app`, that is:
```r
library(dash)
app <- Dash$new()
```
## Heroku Example
Heroku is one of the easiest platforms for deploying and managing public web applications.
Here is a simple example. This example requires a Heroku account and `git`. We currently recommend using a Dockerfile-based
approach when deploying Dash for R applications to Heroku. You may use our base image (as below), or supply your own.
For more information about this deployment method, [please consult the Heroku documentation](https://devcenter.heroku.com/articles/build-docker-images-heroku-yml).
---
Step 1. Create a new folder for your project:
```
$ mkdir dash_app_example
$ cd dash_app_example
```
---
Step 2. Initialize the folder with `git`
```
$ git init # initializes an empty git repo
```
---
Step 3. Initialize the folder with a sample app (`app.R`), a `.gitignore` file (not required, but will avoid committing any files that aren't necessary for your app to function), `Dockerfile`, `heroku.yml` for deployment.
Create the following files in your project folder:
**`app.R`**
```r
app <- Dash$new()
app$layout(htmlDiv(list(htmlH2('Hello World'),
dccDropdown(id = 'dropdown',
options = list(
list('label' = 'LA', 'value' = 'LA'),
list('label' = 'NYC', 'value' = 'NYC'),
list('label' = 'MTL', 'value' = 'MTL')
),
value = 'LA'),
htmlDiv(id = 'display-value'))
)
)
app$callback(output=list(id='display-value', property='children'),
params=list(
input(id='dropdown', property='value')),
function(value)
{
sprintf('You have selected %s', value)
}
)
app$run_server(host = '0.0.0.0', port = Sys.getenv('PORT', 8050))
```
---
---
**`Dockerfile`**
```
FROM plotly/heroku-docker-r:3.6.2_heroku18
# on build, copy application files
COPY . /app/
# for installing additional dependencies etc.
RUN if [ -f '/app/onbuild' ]; then bash /app/onbuild; fi;
# look for /app/apt-packages and if it exists, install the packages contained
RUN if [ -f '/app/apt-packages' ]; then apt-get update -q && cat apt-packages | xargs apt-get -qy install && rm -rf /var/lib/apt/lists/*; fi;
# look for /app/init.R and if it exists, execute it
RUN if [ -f '/app/init.R' ]; then /usr/bin/R --no-init-file --no-save --quiet --slave -f /app/init.R; fi;
# here app.R needs to match the name of the file which contains your app
CMD cd /app && /usr/bin/R --no-save -f /app/app.R
```
---
**`heroku.yml`**
```
build:
docker:
web: Dockerfile
```
---
**`init.R`**
`init.R` describes your R dependencies. Here is an example script. At minimum, you'll
want to install Dash for R to ensure that you're always using the latest version.
It's fairly trivial to install packages from both CRAN mirrors and GitHub repositories.
```r
# R script to run author supplied code, typically used to install additional R packages
# contains placeholders which are inserted by the compile script
# NOTE: this script is executed in the chroot context; check paths!
r <- getOption('repos')
r['CRAN'] <- 'http://cloud.r-project.org'
options(repos=r)
# ======================================================================
# packages go here
install.packages('remotes')
remotes::install_github('plotly/dashR', upgrade=TRUE)
```
---
**`apt-packages`**
`apt-packages` describes system-level dependencies. For example, one might add
three packages by including their names, one per line, within this file:
```
libcurl4-openssl-dev
libxml2-dev
libv8-3.14-dev
```
---
4. Initialize Heroku, add files to Git, and deploy
```
$ heroku create --stack container my-dash-app # change my-dash-app to a unique name
$ git add . # add all files to git
$ git commit -m 'Initial app boilerplate'
$ git push heroku master # deploy code to Heroku
$ heroku ps:scale web=1 # run the app with one Heroku 'dyno'
```
You should be able to access your app at `https://my-dash-app.herokuapp.com` (changing my-dash-app to the name of your app).
---
5. Update the code and redeploy
When you modify app.R with your own code, you will need to add the changes to git and push those changes to heroku.
```
$ git status # view the changes
$ git add . # add all the changes
$ git commit -m 'a description of the changes'
$ git push heroku master
```
If you're ready to take your apps to the next level, and deliver interactive analytics at scale, we invite you to learn more about Dash Enterprise.
[Click here for more information](https://plotly.com/dash/pricing/?_ga=2.176345125.1075922756.1562168385-916141078.1562168385) or [get in touch](https://plotly.typeform.com/to/rkO85m?_ga=2.176345125.1075922756.1562168385-916141078.1562168385).
"),
htmlHr(),
dccMarkdown("
[Back to the Table of Contents](/)
")
))
|
#ridge model fitting
model1 <- model2 <- lm(volact ~ 1 + race + fire + age + income, data = project_data[c(-7),])
plot(model1)
y_tr <- sqrtshift(model1)$zt
X <- model.matrix(~ 1 + race + fire + age + theft + income, data = project_data)
ridge_model <- cv.glmnet(X, y_tr)
opt_lambda <- ridge_model$lambda.min
ridge_fit <- ridge_model$glmnet.fit
y_ridge <- predict(ridge_fit, s = opt_lambda, newx = X)
plot(y_ridge, y_tr)
abline(0, 1)
sst <- sum((y_tr - mean(y_tr))^2)
sse <- sum((y_ridge - y_tr)^2)
# R squared
rsq <- 1 - sse / sst
rsq
print_glmnet_coefs(ridge_fit, opt_lambda)
| /ridge model fitting.R | no_license | pritam-dey3/Project-On-Regression | R | false | false | 604 | r | #ridge model fitting
model1 <- model2 <- lm(volact ~ 1 + race + fire + age + income, data = project_data[c(-7),])
plot(model1)
y_tr <- sqrtshift(model1)$zt
X <- model.matrix(~ 1 + race + fire + age + theft + income, data = project_data)
ridge_model <- cv.glmnet(X, y_tr)
opt_lambda <- ridge_model$lambda.min
ridge_fit <- ridge_model$glmnet.fit
y_ridge <- predict(ridge_fit, s = opt_lambda, newx = X)
plot(y_ridge, y_tr)
abline(0, 1)
sst <- sum((y_tr - mean(y_tr))^2)
sse <- sum((y_ridge - y_tr)^2)
# R squared
rsq <- 1 - sse / sst
rsq
print_glmnet_coefs(ridge_fit, opt_lambda)
|
library(data.table)
library(rvest)
library(lubridate)
library(stringr)
deleteslashes <- function(x) {str_replace_all(x, "[\n\t\r]", "")}
extractnames <- function(names) {
if (length(grep("-THE-", names)) == 1) {
namesenglish <- str_extract(names, ".+-THE-")
} else if (length(grep("-The-", names)) == 1) {
namesenglish <- str_extract(names, ".+-The-")
} else if (length(grep("Limited", names)) == 1) {
namesenglish <- str_extract(names, ".+Limited")
} else if (length(grep("LTD", names)) == 1) {
namesenglish <- str_extract(names, ".+LTD")
} else {
namesenglish <- str_extract(names, ".+LIMITED")
}
nameschinese <- str_replace(names, fixed(namesenglish), "")
return(list(deleteslashes(namesenglish), deleteslashes(nameschinese)))
}
scrape <- function(crno, crdata) {
crnostr = sprintf("%07d", crno)
baseurl = "https://www.mobile-cr.gov.hk/mob/cps_criteria.do?queryCRNO="
url = paste0(baseurl, crnostr)
print(c("trying", crnostr))
html <- html(url, encoding = "utf-8")
if (length(html_nodes(html, "img")) > 0) {
print(c("sleeping, then trying again", crnostr))
save(scrapelist, file = "scrapelist.Rdata")
Sys.sleep(50)
html <- html(url, encoding = "utf-8")
}
if (!is.null(html_node(html, "body"))) {
if (length(grep('沒有紀錄與輸入的查詢資料相符', html_text(html_node(html, "body"))) > 0)) {
print('NO MATCHING RECORD FOUND FOR THE SEARCH INFORMATION INPUT!')
return(crdata)
}
}
tds <- html_text(html_nodes(html, "td:nth-child(2)"))
crdata <- rbindlist(list(crdata, list(crno = tds[1], companytype = deleteslashes(tds[2]), incorporationdate = dmy(tds[3]), status = deleteslashes(tds[4]), windingupmode = deleteslashes(tds[5]), dissolutiondate = dmy(tds[6]), registeravailable = deleteslashes(tds[7]))), fill = TRUE)
setkey(crdata, crno)
companyname <- str_replace(deleteslashes(html_text(html_node(html, "td tr:nth-child(2) td"))), "公司名稱:", "")
companynameextract <- extractnames(companyname)
crdata[crnostr,currentnameenglish := companynameextract[[1]]]
crdata[crnostr,currentnamechinese := companynameextract[[2]]]
crdata[crnostr, remarks := deleteslashes(html_text(html_node(html, ".sameasbody")))]
crdata[crnostr, note := str_replace(html_text(html_node(html, "tr:nth-child(10) td")), fixed("重要事項:\r\n\t\t\t\t\t"), "")]
names <- html_nodes(html, ".data")
for (name in 1:length(names)) {
tempname <- html_text(names[name])
crdata[crnostr, paste0("namesdate", name) := dmy(str_sub(tempname, 1, 10))]
tempname <- str_sub(tempname, 11)
tempextract <- extractnames(tempname)
crdata[crnostr, paste0("namesenglish", name) := tempextract[[1]]]
crdata[crnostr, paste0("nameschinese", name) := tempextract[[2]]]
}
return(crdata)
}
if (file.exists("scrapelist.Rdata")) {
load("scrapelist.Rdata")
} else {
scrapelist <- data.table(crno = "0", companytype = "blah")
setkey(scrapelist, crno)
}
maxcr = 2120960
mincr = scrapelist[,max(as.numeric(crno))]
for (crnonumber in (mincr:maxcr)) {
if (is.na(scrapelist[sprintf("%07d", crnonumber), companytype])) {
scrapelist <- scrape(crnonumber, scrapelist)
}
save(scrapelist, file = "scrapelist.Rdata")
}
| /scraper.R | no_license | gfrmin/hong_kong_company_records | R | false | false | 3,473 | r | library(data.table)
library(rvest)
library(lubridate)
library(stringr)
deleteslashes <- function(x) {str_replace_all(x, "[\n\t\r]", "")}
extractnames <- function(names) {
if (length(grep("-THE-", names)) == 1) {
namesenglish <- str_extract(names, ".+-THE-")
} else if (length(grep("-The-", names)) == 1) {
namesenglish <- str_extract(names, ".+-The-")
} else if (length(grep("Limited", names)) == 1) {
namesenglish <- str_extract(names, ".+Limited")
} else if (length(grep("LTD", names)) == 1) {
namesenglish <- str_extract(names, ".+LTD")
} else {
namesenglish <- str_extract(names, ".+LIMITED")
}
nameschinese <- str_replace(names, fixed(namesenglish), "")
return(list(deleteslashes(namesenglish), deleteslashes(nameschinese)))
}
scrape <- function(crno, crdata) {
crnostr = sprintf("%07d", crno)
baseurl = "https://www.mobile-cr.gov.hk/mob/cps_criteria.do?queryCRNO="
url = paste0(baseurl, crnostr)
print(c("trying", crnostr))
html <- html(url, encoding = "utf-8")
if (length(html_nodes(html, "img")) > 0) {
print(c("sleeping, then trying again", crnostr))
save(scrapelist, file = "scrapelist.Rdata")
Sys.sleep(50)
html <- html(url, encoding = "utf-8")
}
if (!is.null(html_node(html, "body"))) {
if (length(grep('沒有紀錄與輸入的查詢資料相符', html_text(html_node(html, "body"))) > 0)) {
print('NO MATCHING RECORD FOUND FOR THE SEARCH INFORMATION INPUT!')
return(crdata)
}
}
tds <- html_text(html_nodes(html, "td:nth-child(2)"))
crdata <- rbindlist(list(crdata, list(crno = tds[1], companytype = deleteslashes(tds[2]), incorporationdate = dmy(tds[3]), status = deleteslashes(tds[4]), windingupmode = deleteslashes(tds[5]), dissolutiondate = dmy(tds[6]), registeravailable = deleteslashes(tds[7]))), fill = TRUE)
setkey(crdata, crno)
companyname <- str_replace(deleteslashes(html_text(html_node(html, "td tr:nth-child(2) td"))), "公司名稱:", "")
companynameextract <- extractnames(companyname)
crdata[crnostr,currentnameenglish := companynameextract[[1]]]
crdata[crnostr,currentnamechinese := companynameextract[[2]]]
crdata[crnostr, remarks := deleteslashes(html_text(html_node(html, ".sameasbody")))]
crdata[crnostr, note := str_replace(html_text(html_node(html, "tr:nth-child(10) td")), fixed("重要事項:\r\n\t\t\t\t\t"), "")]
names <- html_nodes(html, ".data")
for (name in 1:length(names)) {
tempname <- html_text(names[name])
crdata[crnostr, paste0("namesdate", name) := dmy(str_sub(tempname, 1, 10))]
tempname <- str_sub(tempname, 11)
tempextract <- extractnames(tempname)
crdata[crnostr, paste0("namesenglish", name) := tempextract[[1]]]
crdata[crnostr, paste0("nameschinese", name) := tempextract[[2]]]
}
return(crdata)
}
if (file.exists("scrapelist.Rdata")) {
load("scrapelist.Rdata")
} else {
scrapelist <- data.table(crno = "0", companytype = "blah")
setkey(scrapelist, crno)
}
maxcr = 2120960
mincr = scrapelist[,max(as.numeric(crno))]
for (crnonumber in (mincr:maxcr)) {
if (is.na(scrapelist[sprintf("%07d", crnonumber), companytype])) {
scrapelist <- scrape(crnonumber, scrapelist)
}
save(scrapelist, file = "scrapelist.Rdata")
}
|
library(iml)
### Name: FeatureImp
### Title: Feature importance
### Aliases: FeatureImp
### ** Examples
if (require("rpart")) {
# We train a tree on the Boston dataset:
data("Boston", package = "MASS")
tree = rpart(medv ~ ., data = Boston)
y = Boston$medv
X = Boston[-which(names(Boston) == "medv")]
mod = Predictor$new(tree, data = X, y = y)
# Compute feature importances as the performance drop in mean absolute error
imp = FeatureImp$new(mod, loss = "mae")
# Plot the results directly
plot(imp)
# Since the result is a ggplot object, you can extend it:
if (require("ggplot2")) {
plot(imp) + theme_bw()
# If you want to do your own thing, just extract the data:
imp.dat = imp$results
head(imp.dat)
ggplot(imp.dat, aes(x = feature, y = importance)) + geom_point() +
theme_bw()
}
# We can also look at the difference in model error instead of the ratio
imp = FeatureImp$new(mod, loss = "mae", compare = "difference")
# Plot the results directly
plot(imp)
# FeatureImp also works with multiclass classification.
# In this case, the importance measurement regards all classes
tree = rpart(Species ~ ., data= iris)
X = iris[-which(names(iris) == "Species")]
y = iris$Species
mod = Predictor$new(tree, data = X, y = y, type = "prob")
# For some models we have to specify additional arguments for the predict function
imp = FeatureImp$new(mod, loss = "ce")
plot(imp)
# For multiclass classification models, you can choose to only compute performance for one class.
# Make sure to adapt y
mod = Predictor$new(tree, data = X, y = y == "virginica",
type = "prob", class = "virginica")
imp = FeatureImp$new(mod, loss = "ce")
plot(imp)
}
| /data/genthat_extracted_code/iml/examples/FeatureImp.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,668 | r | library(iml)
### Name: FeatureImp
### Title: Feature importance
### Aliases: FeatureImp
### ** Examples
if (require("rpart")) {
# We train a tree on the Boston dataset:
data("Boston", package = "MASS")
tree = rpart(medv ~ ., data = Boston)
y = Boston$medv
X = Boston[-which(names(Boston) == "medv")]
mod = Predictor$new(tree, data = X, y = y)
# Compute feature importances as the performance drop in mean absolute error
imp = FeatureImp$new(mod, loss = "mae")
# Plot the results directly
plot(imp)
# Since the result is a ggplot object, you can extend it:
if (require("ggplot2")) {
plot(imp) + theme_bw()
# If you want to do your own thing, just extract the data:
imp.dat = imp$results
head(imp.dat)
ggplot(imp.dat, aes(x = feature, y = importance)) + geom_point() +
theme_bw()
}
# We can also look at the difference in model error instead of the ratio
imp = FeatureImp$new(mod, loss = "mae", compare = "difference")
# Plot the results directly
plot(imp)
# FeatureImp also works with multiclass classification.
# In this case, the importance measurement regards all classes
tree = rpart(Species ~ ., data= iris)
X = iris[-which(names(iris) == "Species")]
y = iris$Species
mod = Predictor$new(tree, data = X, y = y, type = "prob")
# For some models we have to specify additional arguments for the predict function
imp = FeatureImp$new(mod, loss = "ce")
plot(imp)
# For multiclass classification models, you can choose to only compute performance for one class.
# Make sure to adapt y
mod = Predictor$new(tree, data = X, y = y == "virginica",
type = "prob", class = "virginica")
imp = FeatureImp$new(mod, loss = "ce")
plot(imp)
}
|
#' compare_two_years
#'
#' Returns scatter plot for 2 years for data elements of interest.
#' Option to narrow down data geographically
#'
#' @param w.use dataframe, the water use data
#' @param data.elements chr, vector of data elements to be plotted (1 plot per element)
#' @param areas chr, codes indicating HUCs, counties, states, aquifers, etc.
#' @param year.x.y int, 2-element vector specifying the two years to be plotted
#' @param area.column character that defines which column to use to specify area
#' @param legend is a logical function to include list of counties in a legend if manageable, default is FALSE
#'
#' @export
#'
#' @import ggplot2
#' @importFrom tidyr gather_
#'
#' @examples
#' w.use <- wUseSample
#' data.elements <- c("PS.TOPop", "PS.SWPop")
#' areas <- "10" # NA uses all areas
#' area.column <- "STATECODE"
#' year.x.y <- c(2005,2010)
#' compare_two_years(w.use, data.elements, year.x.y, area.column, areas)
#' compare_two_years(w.use, data.elements, year.x.y, area.column)
#' compare_two_years(w.use, "PS.TOPop", year.x.y, area.column)
compare_two_years <- function(w.use, data.elements, year.x.y, area.column, areas=NA, legend = FALSE){
w.use.sub <- subset_wuse(w.use, data.elements, area.column, areas)
w.use.sub <- w.use.sub[w.use.sub$YEAR %in% year.x.y,]
x <- gather_(w.use.sub, "Key", "Value", data.elements)
for(i in data.elements){
df_x<-x[x$YEAR == year.x.y[1] & x$Key == i,][["Value"]]
df_y<-x[x$YEAR == year.x.y[2] & x$Key == i,][["Value"]]
df_site <- x[x$YEAR == year.x.y[2] & x$Key == i,][[area.column]]
if(!length(df_x)==length(df_y)) { # I found this issue with Alaska between 2005 and 2010.
stop('Different number of counties from one compilation year to the other not supported yet.')}
if(all(is.na(df_x))) stop('No Data Available for First Year Selected')
if(all(is.na(df_y))) stop('No Data Available for Second Year Selected')
df <- data.frame(
x = df_x,
y = df_y,
site = df_site,
stringsAsFactors = FALSE)
df$Key <- i
if(i == data.elements[1]){
df_full <- df
} else {
df_full <- rbind(df_full, df)
}
}# i
compare.plot <- ggplot(data = df_full) +
geom_point(aes_string(x = "x", y = "y", color = "site"),
show.legend = legend, size = 3) +
geom_line(aes_string(x = "x", y = "x"),col="red") +
facet_wrap(~ Key, ncol = 1) +
xlab(year.x.y[1]) +
ylab(year.x.y[2])
compare.plot
return(compare.plot)
}# compare_two_years | /R/compare_two_years.R | permissive | ralex-USGS/wateRuse | R | false | false | 2,568 | r | #' compare_two_years
#'
#' Returns scatter plot for 2 years for data elements of interest.
#' Option to narrow down data geographically
#'
#' @param w.use dataframe, the water use data
#' @param data.elements chr, vector of data elements to be plotted (1 plot per element)
#' @param areas chr, codes indicating HUCs, counties, states, aquifers, etc.
#' @param year.x.y int, 2-element vector specifying the two years to be plotted
#' @param area.column character that defines which column to use to specify area
#' @param legend is a logical function to include list of counties in a legend if manageable, default is FALSE
#'
#' @export
#'
#' @import ggplot2
#' @importFrom tidyr gather_
#'
#' @examples
#' w.use <- wUseSample
#' data.elements <- c("PS.TOPop", "PS.SWPop")
#' areas <- "10" # NA uses all areas
#' area.column <- "STATECODE"
#' year.x.y <- c(2005,2010)
#' compare_two_years(w.use, data.elements, year.x.y, area.column, areas)
#' compare_two_years(w.use, data.elements, year.x.y, area.column)
#' compare_two_years(w.use, "PS.TOPop", year.x.y, area.column)
compare_two_years <- function(w.use, data.elements, year.x.y, area.column, areas=NA, legend = FALSE){
w.use.sub <- subset_wuse(w.use, data.elements, area.column, areas)
w.use.sub <- w.use.sub[w.use.sub$YEAR %in% year.x.y,]
x <- gather_(w.use.sub, "Key", "Value", data.elements)
for(i in data.elements){
df_x<-x[x$YEAR == year.x.y[1] & x$Key == i,][["Value"]]
df_y<-x[x$YEAR == year.x.y[2] & x$Key == i,][["Value"]]
df_site <- x[x$YEAR == year.x.y[2] & x$Key == i,][[area.column]]
if(!length(df_x)==length(df_y)) { # I found this issue with Alaska between 2005 and 2010.
stop('Different number of counties from one compilation year to the other not supported yet.')}
if(all(is.na(df_x))) stop('No Data Available for First Year Selected')
if(all(is.na(df_y))) stop('No Data Available for Second Year Selected')
df <- data.frame(
x = df_x,
y = df_y,
site = df_site,
stringsAsFactors = FALSE)
df$Key <- i
if(i == data.elements[1]){
df_full <- df
} else {
df_full <- rbind(df_full, df)
}
}# i
compare.plot <- ggplot(data = df_full) +
geom_point(aes_string(x = "x", y = "y", color = "site"),
show.legend = legend, size = 3) +
geom_line(aes_string(x = "x", y = "x"),col="red") +
facet_wrap(~ Key, ncol = 1) +
xlab(year.x.y[1]) +
ylab(year.x.y[2])
compare.plot
return(compare.plot)
}# compare_two_years |
# Text classification using a Naive Bayes scheme
# Data : 20 Newsgroups
# Download link : http://www.cs.umb.edu/~smimarog/textmining/datasets/
# Load all the required libraries. Note : Packages need to be installed first.
library(dplyr)
library(caret)
library(tm)
library(RTextTools)
library(doMC)
library(e1071)
registerDoMC(cores=detectCores())
# Load data.
# We will use the 'train-all-terms' file which contains over 11300 messages.
# Read file as a dataframe
ng.df <- read.table("20ng-train-all-terms.txt", header=FALSE, sep="\t", quote="", stringsAsFactors=FALSE, col.names = c("topic", "text"))
# Preview the dataframe
# head(ng.df) # or use View(ng.df)
# How many messages do each of the 20 categories contain?
table(ng.df$topic)
# Read topic variable as a factor variable
ng.df$topic <- as.factor(ng.df$topic)
# Randomize : Shuffle rows randomly.
set.seed(2016)
ng.df <- ng.df[sample(nrow(ng.df)), ]
ng.df <- ng.df[sample(nrow(ng.df)), ]
# Create corpus of the entire text
corpus <- Corpus(VectorSource(ng.df$text))
# Total size of the corpus
length(corpus)
# Inspect the corpus
inspect(corpus[1:5])
# Tidy up the corpus using 'tm_map' function. Make the following transformations on the corpus : change to lower case, removing numbers,
# punctuation and white space. We also eliminate common english stop words like "his", "our", "hadn't", couldn't", etc using the
# stopwords() function.
# Use 'dplyr' package's excellent pipe utility to do this neatly
corpus.clean <- corpus %>%
tm_map(content_transformer(tolower)) %>%
tm_map(removePunctuation) %>%
tm_map(removeNumbers) %>%
tm_map(removeWords, stopwords(kind="en")) %>%
tm_map(stripWhitespace)
# Create document term matrix
dtm <- DocumentTermMatrix(corpus.clean)
dim(dtm)
# Create a 75:25 data partition. Note : 5000 (~50% of the entire set) messages were used for this analysis.
ng.df.train <- ng.df[1:8470,]
ng.df.test <- ng.df[8471:11293,]
dtm.train <- dtm[1:8470,]
dtm.test <- dtm[8471:11293,]
dim(dtm.test)
corpus.train <- corpus.clean[1:8470]
corpus.test <- corpus.clean[8471:11293]
# Find frequent words which appear five times or more
fivefreq <- findFreqTerms(dtm.train, 5)
length(fivefreq)
dim(dtm.train)
# Build dtm using fivefreq words only. Reduce number of features to length(fivefreq)
system.time( dtm.train.five <- DocumentTermMatrix(corpus.train, control = list(dictionary=fivefreq)) )
system.time( dtm.test.five <- DocumentTermMatrix(corpus.test, control = list(dictionary=fivefreq)) )
# converting word counts (0 or more) to presence or absense (yes or no) for each word
convert_count <- function(x) {
y <- ifelse(x > 0, 1,0)
y <- factor(y, levels=c(0,1), labels=c("No", "Yes"))
y
}
# Apply yes/no function to get final training and testing dtms
system.time( ng.train <- apply(dtm.train.five, 2, convert_count) )
system.time ( ng.test <- apply(dtm.test.five, 2, convert_count) )
# Build the NB classifier
system.time (ngclassifier <- naiveBayes(ng.train, ng.df.train$topic))
# Make predictions on the test set
system.time( predictions <- predict(ngclassifier, newdata=ng.test) )
predictions
cm <- confusionMatrix(predictions, ng.df.test$topic )
cm | /R/TextAndWebMining/nb_20newsgroups.R | no_license | Fr4nc3/code-hints | R | false | false | 3,201 | r | # Text classification using a Naive Bayes scheme
# Data : 20 Newsgroups
# Download link : http://www.cs.umb.edu/~smimarog/textmining/datasets/
# Load all the required libraries. Note : Packages need to be installed first.
library(dplyr)
library(caret)
library(tm)
library(RTextTools)
library(doMC)
library(e1071)
registerDoMC(cores=detectCores())
# Load data.
# We will use the 'train-all-terms' file which contains over 11300 messages.
# Read file as a dataframe
ng.df <- read.table("20ng-train-all-terms.txt", header=FALSE, sep="\t", quote="", stringsAsFactors=FALSE, col.names = c("topic", "text"))
# Preview the dataframe
# head(ng.df) # or use View(ng.df)
# How many messages do each of the 20 categories contain?
table(ng.df$topic)
# Read topic variable as a factor variable
ng.df$topic <- as.factor(ng.df$topic)
# Randomize : Shuffle rows randomly.
set.seed(2016)
ng.df <- ng.df[sample(nrow(ng.df)), ]
ng.df <- ng.df[sample(nrow(ng.df)), ]
# Create corpus of the entire text
corpus <- Corpus(VectorSource(ng.df$text))
# Total size of the corpus
length(corpus)
# Inspect the corpus
inspect(corpus[1:5])
# Tidy up the corpus using 'tm_map' function. Make the following transformations on the corpus : change to lower case, removing numbers,
# punctuation and white space. We also eliminate common english stop words like "his", "our", "hadn't", couldn't", etc using the
# stopwords() function.
# Use 'dplyr' package's excellent pipe utility to do this neatly
corpus.clean <- corpus %>%
tm_map(content_transformer(tolower)) %>%
tm_map(removePunctuation) %>%
tm_map(removeNumbers) %>%
tm_map(removeWords, stopwords(kind="en")) %>%
tm_map(stripWhitespace)
# Create document term matrix
dtm <- DocumentTermMatrix(corpus.clean)
dim(dtm)
# Create a 75:25 data partition. Note : 5000 (~50% of the entire set) messages were used for this analysis.
ng.df.train <- ng.df[1:8470,]
ng.df.test <- ng.df[8471:11293,]
dtm.train <- dtm[1:8470,]
dtm.test <- dtm[8471:11293,]
dim(dtm.test)
corpus.train <- corpus.clean[1:8470]
corpus.test <- corpus.clean[8471:11293]
# Find frequent words which appear five times or more
fivefreq <- findFreqTerms(dtm.train, 5)
length(fivefreq)
dim(dtm.train)
# Build dtm using fivefreq words only. Reduce number of features to length(fivefreq)
system.time( dtm.train.five <- DocumentTermMatrix(corpus.train, control = list(dictionary=fivefreq)) )
system.time( dtm.test.five <- DocumentTermMatrix(corpus.test, control = list(dictionary=fivefreq)) )
# converting word counts (0 or more) to presence or absense (yes or no) for each word
convert_count <- function(x) {
y <- ifelse(x > 0, 1,0)
y <- factor(y, levels=c(0,1), labels=c("No", "Yes"))
y
}
# Apply yes/no function to get final training and testing dtms
system.time( ng.train <- apply(dtm.train.five, 2, convert_count) )
system.time ( ng.test <- apply(dtm.test.five, 2, convert_count) )
# Build the NB classifier
system.time (ngclassifier <- naiveBayes(ng.train, ng.df.train$topic))
# Make predictions on the test set
system.time( predictions <- predict(ngclassifier, newdata=ng.test) )
predictions
cm <- confusionMatrix(predictions, ng.df.test$topic )
cm |
## function to import R data objects from server
##' Import data from King's College London networks
##'
##' Function for importing hourly mean data from King's College London
##' networks. Files are imported from a remote server operated by King's
##' College London that provides air quality data files as R data objects.
##'
##' The \code{importKCL} function has been written to make it easy to import
##' data from the King's College London air pollution networks. KCL have
##' provided .RData files (R workspaces) of all individual sites and years for
##' the KCL networks. These files are updated on a weekly basis. This approach
##' requires a link to the Internet to work.
##'
##' There are several advantages over the web portal approach where .csv files
##' are downloaded. First, it is quick to select a range of sites, pollutants
##' and periods (see examples below). Second, storing the data as .RData
##' objects is very efficient as they are about four times smaller than .csv
##' files --- which means the data downloads quickly and saves bandwidth.
##' Third, the function completely avoids any need for data manipulation or
##' setting time formats, time zones etc. Finally, it is easy to import many
##' years of data beyond the current limit of about 64,000 lines. The final
##' point makes it possible to download several long time series in one go. The
##' function also has the advantage that the proper site name is imported and
##' used in \code{openair} functions.
##'
##' The site codes and pollutant names can be upper or lower case. The function
##' will issue a warning when data less than six months old is downloaded,
##' which may not be ratified.
##'
##' The data are imported by stacking sites on top of one another and will have
##' field names \code{date}, \code{site}, \code{code} (the site code) and
##' pollutant(s). Sometimes it is useful to have columns of site data. This can
##' be done using the \code{reshape} function --- see examples below.
##'
##' The situation for particle measurements is not straightforward
##' given the variety of methods used to measure particle mass and
##' changes in their use over time. The \code{importKCL} function
##' imports two measures of PM10 where available. \code{PM10_raw} are
##' TEOM measurements with a 1.3 factor applied to take account of
##' volatile losses. The \code{PM10} data is a current best estimate
##' of a gravimetric equivalent measure as described below. NOTE! many
##' sites have several instruments that measure PM10 or PM2.5. In the
##' case of FDMS measurements, these are given as separate site codes
##' (see below). For example "MY1" will be TEOM with VCM applied and
##' "MY7" is the FDMS data.
##'
##' Where FDMS data are used the volatile and non-volatile components
##' are separately reported i.e. v10 = volatile PM10, v2.5 = volatile
##' PM2.5, nv10 = non-volatile PM10 and nv2.5 = non-volatile
##' PM2.5. Therefore, PM10 = v10 + nv10 and PM2.5 = v2.5 + nv2.5.
##'
##' For the assessment of the EU Limit Values, PM10 needs to be measured using
##' the reference method or one shown to be equivalent to the reference method.
##' Defra carried out extensive trials between 2004 and 2006 to establish which
##' types of particulate analysers in use in the UK were equivalent. These
##' trials found that measurements made using Partisol, FDMS, BAM and SM200
##' instruments were shown to be equivalent to the PM10 reference method.
##' However, correction factors need to be applied to measurements from the
##' SM200 and BAM instruments. Importantly, the TEOM was demonstrated as not
##' being equivalent to the reference method due to the loss of volatile PM,
##' even when the 1.3 correction factor was applied. The Volatile Correction
##' Model (VCM) was developed for Defra at King's to allow measurements of PM10
##' from TEOM instruments to be converted to reference equivalent; it uses the
##' measurements of volatile PM made using nearby FDMS instruments to correct
##' the measurements made by the TEOM. It passed the equivalence testing using
##' the same methodology used in the Defra trials and is now the recommended
##' method for correcting TEOM measurements (Defra, 2009). VCM correction of
##' TEOM measurements can only be applied after 1st January 2004, when
##' sufficiently widespread measurements of volatile PM became available. The
##' 1.3 correction factor is now considered redundant for measurements of PM10
##' made after 1st January 2004. Further information on the VCM can be found
##' at \url{http://www.volatile-correction-model.info/}.
##'
##' All PM10 statistics on the LondonAir web site, including the bulletins and
##' statistical tools (and in the RData objects downloaded using
##' \code{importKCL}), now report PM10 results as reference equivalent. For
##' PM10 measurements made by BAM and SM200 analysers the applicable correction
##' factors have been applied. For measurements from TEOM analysers the 1.3
##' factor has been applied up to 1st January 2004, then the VCM method has
##' been used to convert to reference equivalent.
##'
##' The meteorological data are meant to represent 'typical' conditions in
##' London, but users may prefer to use their own data. The data provide a an
##' estimate of general meteorological conditions across Greater London. For
##' meteorological species (wd, ws, rain, solar) each data point is formed by
##' averaging measurements from a subset of LAQN monitoring sites that have
##' been identified as having minimal disruption from local obstacles and a
##' long term reliable dataset. The exact sites used varies between species,
##' but include between two and five sites per species. Therefore, the data
##' should represent 'London scale' meteorology, rather than local conditions.
##'
##' While the function is being developed, the following site codes should help
##' with selection. We will also make available other meta data such as site
##' type and location to make it easier to select sites based on other
##' information. Note that these codes need to be refined because only the
##' common species are available for export currently i.e. NOx, NO2, O3, CO,
##' SO2, PM10, PM2.5.
##'
##' \itemize{ \item A30 | Kingston - Kingston Bypass A3 | Roadside
##' \item AD1 | Shoreham-by-Sea | Kerbside \item AR1 | Chichester -
##' Lodsworth | Rural \item AR2 | Wealden - Isfield | Rural \item AS1
##' | Bath Aethalometer | Urban Background \item BA1 | Basildon -
##' Gloucester Park | Roadside \item BB1 | Broxbourne (Roadside) |
##' Roadside \item BE0 | Belfast - Carbon | Urban Background \item BE1
##' | Belfast Centre AURN | Urban Background \item BE3 | Belfast
##' Centre Aethalometer | Urban Background \item BE7 | Belfast Centre
##' FDMS trial | Urban Background \item BE8 | Belfast - Nitrate |
##' Urban Background \item BE9 | Belfast - Partisol SO4 | Urban
##' Background \item BF1 | Bedford Stewartby (Rural) | Industrial
##' \item BF3 | Bedford - Kempston | Industrial \item BF4 | Bedford -
##' Prebend Street | Roadside \item BF5 | Bedford - Lurke Street |
##' Roadside \item BG1 | Barking and Dagenham - Rush Green | Suburban
##' \item BG2 | Barking and Dagenham - Scrattons Farm | Suburban \item
##' BG3 | Barking and Dagenham - North Street | Kerbside \item BH0 |
##' Brighton Preston Park AURN | Urban Background \item BH1 | Brighton
##' Roadside | Roadside \item BH2 | Brighton and Hove - Hove Town Hall
##' | Roadside \item BH3 | Brighton and Hove - Foredown Tower | Urban
##' Background \item BH5 | Brighton Mobile (Preston Fire Station) |
##' Roadside \item BH6 | Brighton Mobile (Lewes Road) | Roadside \item
##' BH7 | Brighton Mobile (Gloucester Road) | Roadside \item BH8 |
##' Brighton and Hove - Stanmer Park | Rural \item BH9 | Brighton
##' Mobile Beaconsfield Road | Roadside \item BI1 | Birmingham Tyburn
##' CPC | Urban Background \item BL0 | Camden - Bloomsbury | Urban
##' Background \item BL1 | Bloomsbury AURN SMPS | Urban Background
##' \item BM1 | Ballymena - Ballykeel | Suburban \item BM2 | Ballymena
##' - North Road | Roadside \item BN1 | Barnet - Tally Ho Corner |
##' Kerbside \item BN2 | Barnet - Finchley | Urban Background \item
##' BN3 | Barnet - Strawberry Vale | Urban Background \item BO1 |
##' Ballymoney 1 | Suburban \item BP0 | Westminster - Bridge Place |
##' Urban Background \item BQ5 | Bexley - Manor Road West Gravimetric
##' | Industrial \item BQ6 | Bexley - Manor Road East Gravimetric |
##' Industrial \item BQ7 | Belvedere West | Urban Background \item BQ8
##' | Belvedere West FDMS | Urban Background \item BT1 | Brent -
##' Kingsbury | Suburban \item BT2 | Brent - Ikea Car Park | Roadside
##' \item BT3 | Brent - Harlesden | Roadside \item BT4 | Brent - Ikea
##' | Roadside \item BT5 | Brent - Neasden Lane | Industrial \item BT6
##' | Brent - John Keble Primary School | Roadside \item BT7 | Brent -
##' St Marys Primary School | Urban Background \item BW1 | Brentwood -
##' Brentwood Town Hall | Urban Background \item BX0 | Bexley -
##' Belvedere FDMS | Suburban \item BX1 | Bexley - Slade Green |
##' Suburban \item BX2 | Bexley - Belvedere | Suburban \item BX3 |
##' Bexley - Thamesmead | Suburban \item BX4 | Bexley - Erith |
##' Industrial \item BX5 | Bexley - Bedonwell | Suburban \item BX6 |
##' Bexley - Thames Road North FDMS | Roadside \item BX7 | Bexley -
##' Thames Road North | Roadside \item BX8 | Bexley - Thames Road
##' South | Roadside \item BX9 | Bexley - Slade Green FDMS | Suburban
##' \item BY1 | Bromley - Rent Office | Urban Background \item BY4 |
##' Bromley - Tweedy Rd | Roadside \item BY5 | Bromley - Biggin Hill |
##' Suburban \item BY7 | Bromley - Harwood Avenue | Roadside \item CA1
##' | Crawley Background | Urban Background \item CA2 | Crawley -
##' Gatwick Airport | Urban Background \item CB1 | Chelmsford - Fire
##' Station | Roadside \item CB2 | Chelmsford - Springfield Road |
##' Roadside \item CB3 | Chelmsford - Chignal St James | Urban
##' Background \item CB4 | Chelmsford - Baddow Road | Roadside \item
##' CC1 | Colchester - Lucy Lane South | Roadside \item CC2 |
##' Colchester - Brook Street | Roadside \item CC3 | Colchester -
##' Mersea Road | Roadside \item CD1 | Camden - Swiss Cottage |
##' Kerbside \item CD3 | Camden - Shaftesbury Avenue | Roadside \item
##' CD4 | Camden - St Martins College (NOX 1) | Urban Background \item
##' CD5 | Camden - St Martins College (NOX 2) | Urban Background \item
##' CD7 | Camden - Swiss Cottage Partisol | Kerbside \item CD9 |
##' Camden - Euston Road | Roadside \item CF1 | Cardiff Aethalometer |
##' Urban Background \item CH1 | Cheltenham | Urban Background \item
##' CI1 | Chichester - A27 Chichester Bypass | Roadside \item CI4 |
##' Chichester - Orchard Street | Roadside \item CK1 | Cookstown |
##' Suburban \item CP1 | Castle Point - Canvey Island | Urban
##' Background \item CR2 | Croydon - Purley Way | Roadside \item CR3 |
##' Croydon - Thornton Heath | Suburban \item CR4 | Croydon - George
##' Street | Roadside \item CR5 | Croydon - Norbury | Kerbside \item
##' CR6 | Croydon - Euston Road | Suburban \item CT1 | City of London
##' - Senator House | Urban Background \item CT2 | City of London -
##' Farringdon Street | Kerbside \item CT3 | City of London - Sir John
##' Cass School | Urban Background \item CT4 | City of London - Beech
##' Street | Roadside \item CT6 | City of London - Walbrook Wharf |
##' Roadside \item CT8 | City of London - Upper Thames Street |
##' Roadside \item CY1 | Crystal Palace - Crystal Palace Parade |
##' Roadside \item DC1 | Dacorum 1 Hemel Hempstead (Background) |
##' Urban Background \item DC2 | Dacorum 2 Hemel Hempstead
##' (Background) | Urban Background \item DC3 | High Street
##' Northchurch | Roadside \item DE1 | Derry City - Brandywell | Urban
##' Background \item DE2 | Derry City - Dales Corner | Roadside \item
##' DM1 | Dunmurry Aethalometer | Urban Background \item EA0 | Ealing
##' - Acton Town Hall FDMS | Roadside \item EA1 | Ealing - Ealing Town
##' Hall | Urban Background \item EA2 | Ealing - Acton Town Hall |
##' Roadside \item EA3 | Ealing 3 - A40 East Acton | Roadside \item
##' EA4 | Ealing Mobile - Hamilton Road | Roadside \item EA5 | Ealing
##' Mobile - Southall | Roadside \item EA6 | Ealing - Hanger Lane
##' Gyratory | Roadside \item EA7 | Ealing - Southall | Urban
##' Background \item EA8 | Ealing - Horn Lane | Industrial \item EA9 |
##' Ealing - Court Way | Roadside \item EB1 | Eastbourne - Devonshire
##' Park | Urban Background \item EB3 | Eastbourne - Holly Place |
##' Urban Background \item EH1 | E Herts Throcking (Rural) | Rural
##' \item EH2 | East Herts Sawbridgeworth (Background) | Urban
##' Background \item EH3 | East Herts Sawbridgeworth (Roadside) |
##' Roadside \item EH4 | East Herts Ware | Roadside \item EH5 | East
##' Herts Bishops Stortford | Roadside \item EI0 | Ealing - Greenford
##' | Urban Background \item EI1 | Ealing - Western Avenue | Roadside
##' \item EL1 | Elmbridge - Bell Farm Hersham | Urban Background \item
##' EL2 | Elmbridge - Esher High Street | Roadside \item EL3 |
##' Elmbridge - Hampton Court Parade | Roadside \item EL4 | Elmbridge
##' - Walton High Street | Kerbside \item EN1 | Enfield - Bushhill
##' Park | Suburban \item EN2 | Enfield - Church Street | Roadside
##' \item EN3 | Enfield - Salisbury School | Urban Background \item
##' EN4 | Enfield - Derby Road | Roadside \item EN5 | Enfield - Bowes
##' Primary School | Roadside \item FB1 | Rushmoor - Medway Drive |
##' Roadside \item GB0 | Greenwich and Bexley - Falconwood FDMS |
##' Roadside \item GB6 | Greenwich and Bexley - Falconwood | Roadside
##' \item GL1 | Glasgow Centre | Suburban \item GL4 | Glasgow Centre
##' Aethalometer | Suburban \item GN0 | Greenwich - A206 Burrage Grove
##' | Roadside \item GN2 | Greenwich - Millennium Village | Industrial
##' \item GN3 | Greenwich - Plumstead High Street | Roadside \item GN4
##' | Greenwich - Fiveways Sidcup Rd A20 | Roadside \item GR4 |
##' Greenwich - Eltham | Suburban \item GR5 | Greenwich - Trafalgar
##' Road | Roadside \item GR7 | Greenwich - Blackheath | Roadside
##' \item GR8 | Greenwich - Woolwich Flyover | Roadside \item GR9 |
##' Greenwich - Westhorne Avenue | Roadside \item HA0 | Harwell -
##' Carbon | Rural \item HA1 | Harwell Rural AURN | Rural \item HA2 |
##' Harwell Rural PARTISOL | Rural \item HA4 | Harwell Rural SMPS |
##' Rural \item HA9 | Harwell - Partisol SO4 | Urban Background \item
##' HF1 | Hammersmith and Fulham - Broadway | Roadside \item HF2 |
##' Hammersmith and Fulham - Brook Green | Urban Background \item HF3
##' | Hammersmith and Fulham - Scrubs Lane | Kerbside \item HG1 |
##' Haringey - Haringey Town Hall | Roadside \item HG2 | Haringey -
##' Priory Park | Urban Background \item HG3 | Haringey - Bounds Green
##' | Roadside \item HI0 | Hillingdon - Sipson Road | Suburban \item
##' HI1 | Hillingdon - South Ruislip | Roadside \item HI2 | Hillingdon
##' - Hillingdon Hospital | Roadside \item HI3 | Hillingdon - Oxford
##' Avenue | Roadside \item HK4 | Hackney - Clapton | Urban Background
##' \item HK6 | Hackney - Old Street | Roadside \item HL1 | Halifax
##' Aethalometer | Urban Background \item HM1 | Hertsmere Borehamwood
##' 1 (Background) | Urban Background \item HM4 | Hertsmere -
##' Borehamwood | Urban Background \item HO1 | Horsham Background |
##' Urban Background \item HO2 | Horsham - Park Way | Roadside \item
##' HO4 | Horsham - Storrington | Roadside \item HO5 | Horsham -
##' Cowfold | Roadside \item HR1 | Harrow - Stanmore | Urban
##' Background \item HR2 | Harrow - Pinner Road | Roadside \item HS1 |
##' Hounslow - Brentford | Roadside \item HS2 | Hounslow - Cranford |
##' Suburban \item HS3 | Hounslow - Brentford | Roadside \item HS4 |
##' Hounslow - Chiswick High Road | Roadside \item HS5 | Hounslow -
##' Brentford | Roadside \item HS6 | Hounslow - Heston Road | Roadside
##' \item HS7 | Hounslow - Hatton Cross | Urban Background \item HS9 |
##' Hounslow - Feltham | Roadside \item HT1 | Hastings - Bulverhythe |
##' Roadside \item HT2 | Hastings - Fresh Fields | Roadside \item HV1
##' | Havering - Rainham | Roadside \item HV2 | Havering - Harold Hill
##' | Suburban \item HV3 | Havering - Romford | Roadside \item HX0 |
##' Birmingham Tyburn Aethalometer | Urban Background \item IC6 | City
##' of London - Walbrook Wharf Indoor | Roadside \item IG4 | Greenwich
##' - Eltham Ecology Centre Indoor | Urban Background \item IS1 |
##' Islington - Upper Street | Urban Background \item IS2 | Islington
##' - Holloway Road | Roadside \item IS4 | Islington - Foxham Gardens
##' | Urban Background \item IS5 | Islington - Duncan Terrace |
##' Roadside \item IS6 | Islington - Arsenal | Urban Background \item
##' IT2 | Tower Hamlets - Mile End Road | Roadside \item KB1 | South
##' Kirkby Aethalometer | Urban Background \item KC0 | North
##' Kensington - Carbon | Urban Background \item KC1 | Kensington and
##' Chelsea - North Ken | Urban Background \item KC2 | Kensington and
##' Chelsea - Cromwell Road | Roadside \item KC3 | Kensington and
##' Chelsea - Knightsbridge | Roadside \item KC4 | Kensington and
##' Chelsea - Kings Road | Roadside \item KC5 | Kensington and Chelsea
##' - Earls Court Rd | Kerbside \item KC7 | Kensington and Chelsea -
##' North Ken FDMS | Urban Background \item KC9 | North Kensington -
##' Partisol SO4 | Urban Background \item KT1 | Kingston - Chessington
##' | Suburban \item KT2 | Kingston - Town Centre | Roadside \item LA1
##' | Luton Airport | Urban Background \item LB1 | Lambeth -
##' Christchurch Road | Roadside \item LB2 | Lambeth - Vauxhall Cross
##' | Roadside \item LB3 | Lambeth - Loughborough Junct | Urban
##' Background \item LB4 | Lambeth - Brixton Road | Kerbside \item LB5
##' | Lambeth - Bondway Interchange | Roadside \item LB6 | Lambeth -
##' Streatham Green | Urban Background \item LH0 | Hillingdon -
##' Harlington | Urban Background \item LH2 | Heathrow Airport | Urban
##' Background \item LL1 | Lullington Heath Rural AURN | Rural \item
##' LN1 | Luton - Challney Community College | Urban Background \item
##' LS1 | Lewes - Telscombe Cliffs | Roadside \item LS2 | Lewes -
##' Commercial Square | Roadside \item LS4 | Newhaven - Denton School
##' | Urban Background \item LW1 | Lewisham - Catford | Urban
##' Background \item LW2 | Lewisham - New Cross | Roadside \item LW3 |
##' Lewisham - Mercury Way | Industrial \item MA1 | Manchester
##' Piccadilly CPC | Urban Background \item MA2 | Manchester
##' Piccadilly | Urban Background \item MD1 | Mid Beds Biggleswade
##' (Roadside) | Roadside \item MD2 | Mid Beds Silsoe (Rural) | Rural
##' \item MD3 | Central Beds - Sandy | Roadside \item MD4 | Central
##' Beds - Marston Vale | Rural \item ME1 | Merton - Morden Civic
##' Centre | Roadside \item MP1 | Marchwood Power - Marchwood |
##' Industrial \item MP2 | Marchwood Power - Millbrook Rd Soton |
##' Industrial \item MR3 | Marylebone Road Aethalometer | Kerbside
##' \item MV1 | Mole Valley - Leatherhead | Rural \item MV2 | Mole
##' Valley - Lower Ashtead | Suburban \item MV3 | Mole Valley -
##' Dorking | Urban Background \item MW1 | Windsor and Maidenhead -
##' Frascati Way | Roadside \item MW2 | Windsor and Maidenhead -
##' Clarence Road | Roadside \item MW3 | Windsor and Maidenhead -
##' Ascot | Rural \item MY0 | Marylebone Road - Carbon | Kerbside
##' \item MY1 | Westminster - Marylebone Road | Kerbside \item MY7 |
##' Westminster - Marylebone Road FDMS | Kerbside \item NA5 |
##' Newtownabbey- Mallusk | Urban Background \item NA6 | Newtownabbey-
##' Shore Road | Roadside \item NE2 | Port Talbot TEOM and CPC | Urban
##' Background \item NF1 | New Forest - Holbury | Industrial \item NF2
##' | New Forest - Fawley | Industrial \item NF3 | New Forest -
##' Ringwood | Urban Background \item NF4 | New Forest - Totton |
##' Roadside \item NF5 | New Forest - Lyndhurst | Roadside \item NH1 |
##' North Herts Mobile - Baldock 1 | Roadside \item NH2 | North Herts
##' Mobile - Baldock 2 | Roadside \item NH3 | North Herts Mobile -
##' Royston | Urban Background \item NH4 | North Herts - Breechwood
##' Green | Urban Background \item NH5 | North Herts - Baldock
##' Roadside | Roadside \item NH6 | North Herts - Hitchin Library |
##' Roadside \item NK1 | North Kensington - CPC | Urban Background
##' \item NK3 | North Kensington Aethalometer | Urban Background \item
##' NK6 | North Kensington - URG | Urban Background \item NM1 | Newham
##' - Tant Avenue | Urban Background \item NM2 | Newham - Cam Road |
##' Roadside \item NM3 | Newham - Wren Close | Urban Background \item
##' NW1 | Norwich Centre Aethalometer | Urban Background \item OX0 |
##' Oxford Centre Roadside AURN | Urban Background \item OX1 | South
##' Oxfordshire - Henley | Roadside \item OX2 | South Oxfordshire -
##' Wallingford | Roadside \item OX3 | South Oxfordshire - Watlington
##' | Roadside \item OX4 | Oxford St Ebbes AURN | Urban Background
##' \item PO1 | Portsmouth Background AURN | Urban Background \item
##' PT6 | Port Talbot Dyffryn School | Industrial \item RB1 |
##' Redbridge - Perth Terrace | Urban Background \item RB2 | Redbridge
##' - Ilford Broadway | Kerbside \item RB3 | Redbridge - Fullwell
##' Cross | Kerbside \item RB4 | Redbridge - Gardner Close | Roadside
##' \item RB5 | Redbridge - South Woodford | Roadside \item RD0 |
##' Reading AURN - New Town | Urban Background \item RD1 | Reading -
##' Caversham Road | Roadside \item RD2 | Reading - Kings Road |
##' Roadside \item RD3 | Reading - Oxford Road | Roadside \item RG1 |
##' Reigate and Banstead - Horley | Suburban \item RG2 | Reigate and
##' Banstead - Horley South | Suburban \item RG3 | Reigate and
##' Banstead - Poles Lane | Rural \item RG4 | Reigate and Banstead -
##' Reigate High St | Kerbside \item RHA | Richmond - Lower Mortlake
##' Road | Roadside \item RHB | Richmond - Lower Mortlake Road |
##' Roadside \item RI1 | Richmond - Castelnau | Roadside \item RI2 |
##' Richmond - Barnes Wetlands | Suburban \item RI5 | Richmond Mobile
##' - St Margarets | Kerbside \item RI6 | Richmond Mobile - St
##' Margarets | Kerbside \item RI7 | Richmond Mobile - Richmond Park |
##' Suburban \item RI8 | Richmond Mobile - Richmond Park | Suburban
##' \item RIA | Richmond Mobile - George Street | Kerbside \item RIB |
##' Richmond Mobile - George Street | Kerbside \item RIC | Richmond
##' Mobile - Kew Rd | Kerbside \item RID | Richmond Mobile - Kew Rd |
##' Kerbside \item RIE | Richmond Mobile - Richmond Rd Twickenham |
##' Roadside \item RIF | Richmond Mobile - Richmond Rd Twickenham |
##' Roadside \item RIG | Richmond Mobile - Upper Teddington Rd |
##' Roadside \item RIH | Richmond Mobile - Upper Teddington Rd |
##' Roadside \item RII | Richmond Mobile - Somerset Rd Teddington |
##' Urban Background \item RIJ | Richmond Mobile - Somerset Rd
##' Teddington | Urban Background \item RIK | Richmond Mobile - St.
##' Margarets Grove | Urban Background \item RIL | Richmond Mobile -
##' St. Margarets Grove | Urban Background \item RIM | Richmond Mobile
##' - Petersham Rd Ham | Roadside \item RIN | Richmond Mobile -
##' Petersham Rd Ham | Roadside \item RIO | Richmond Mobile - Stanley
##' Rd Twickenham | Roadside \item RIP | Richmond Mobile - Stanley Rd
##' Twickenham | Roadside \item RIQ | Richmond Mobile - Richmond Rd
##' Twickenham | Roadside \item RIR | Richmond Mobile - Richmond Rd
##' Twickenham | Roadside \item RIS | Richmond Mobile - Lincoln Ave
##' Twickenham | Roadside \item RIU | Richmond Mobile - Mortlake Rd
##' Kew | Roadside \item RIW | Richmond - Upper Teddington Road |
##' Roadside \item RIY | Richmond - Hampton Court Road | Kerbside
##' \item RO1 | Rochford - Rayleigh High Street | Roadside \item RY1 |
##' Rother - Rye Harbour | Rural \item RY2 | Rother - De La Warr Road
##' | Roadside \item SA1 | St Albans - Fleetville | Urban Background
##' \item SB1 | South Beds - Dunstable | Urban Background \item SC1 |
##' Sevenoaks 1 | Suburban \item SD1 | Southend-on-Sea AURN | Urban
##' Background \item SE1 | Stevenage - Lytton Way | Roadside \item SH1
##' | Southampton Background AURN | Urban Background \item SH2 |
##' Southampton - Redbridge | Roadside \item SH3 | Southampton -
##' Onslow Road | Roadside \item SH4 | Southampton - Bitterne | Urban
##' Background \item SK1 | Southwark - Larcom Street | Urban
##' Background \item SK2 | Southwark - Old Kent Road | Roadside \item
##' SK5 | Southwark - A2 Old Kent Road | Roadside \item SL1 |
##' Sunderland Aethalometer | Urban Background \item ST1 | Sutton -
##' Robin Hood School | Roadside \item ST2 | Sutton - North Cheam |
##' Urban Background \item ST3 | Sutton - Carshalton | Suburban \item
##' ST4 | Sutton - Wallington | Kerbside \item ST5 | Sutton -
##' Beddington Lane | Industrial \item ST6 | Sutton - Worcester Park |
##' Kerbside \item ST7 | Sutton - Therapia Lane | Industrial \item SU1
##' | Sussex Mobile10 Stockbridge | Kerbside \item SU2 | Sussex
##' Mobile11 Jct Whitley Rd | Kerbside \item SU3 | Sussex Mobile12
##' Cowfold | Kerbside \item SU4 | Sussex Mobile 13 Newhaven |
##' Roadside \item SU5 | Sussex Mobile 14 Crawley | Roadside \item SU6
##' | Sussex Mobile15 Chichester County Hall | Urban Background \item
##' SU7 | Sussex Mobile 16 Warnham | Rural \item SU8 | Sussex Mobile
##' 17 Newhaven Paradise Park | Roadside \item SX1 | Sussex Mobile 1 |
##' Urban Background \item SX2 | Sussex Mobile 2 North Berstead |
##' Roadside \item SX3 | Sussex Mobile 3 | Roadside \item SX4 | Sussex
##' Mobile 4 Adur | Roadside \item SX5 | Sussex Mobile 5 Fresh Fields
##' Rd Hastings | Roadside \item SX6 | Sussex Mobile 6 Orchard St
##' Chichester | Roadside \item SX7 | Sussex Mobile 7 New Road
##' Newhaven | Roadside \item SX8 | Sussex Mobile 8 Arundel | Kerbside
##' \item SX9 | Sussex Mobile 9 Newhaven Kerbside | Kerbside \item TD0
##' | Richmond - National Physical Laboratory | Suburban \item TE0 |
##' Tendring St Osyth AURN | Rural \item TE1 | Tendring - Town Hall |
##' Roadside \item TH1 | Tower Hamlets - Poplar | Urban Background
##' \item TH2 | Tower Hamlets - Mile End Road | Roadside \item TH3 |
##' Tower Hamlets - Bethnal Green | Urban Background \item TH4 | Tower
##' Hamlets - Blackwall | Roadside \item TK1 | Thurrock - London Road
##' (Grays) | Urban Background \item TK2 | Thurrock - Purfleet |
##' Roadside \item TK3 | Thurrock - Stanford-le-Hope | Roadside \item
##' TK8 | Thurrock - London Road (Purfleet) | Roadside \item TR1 |
##' Three Rivers - Rickmansworth | Urban Background \item UT1 |
##' Uttlesford - Saffron Walden Fire Station | Roadside \item UT2 |
##' Uttlesford - Takeley | Urban Background \item UT3 | Uttlesford -
##' Broxted Farm | Rural \item VS1 | Westminster - Victoria Street |
##' Kerbside \item WA1 | Wandsworth - Garratt Lane | Roadside \item
##' WA2 | Wandsworth - Town Hall | Urban Background \item WA3 |
##' Wandsworth - Roehampton | Rural \item WA4 | Wandsworth - High
##' Street | Roadside \item WA6 | Wandsworth - Tooting | Roadside
##' \item WA7 | Wandsworth - Putney High Street | Kerbside \item WA8 |
##' Wandsworth - Putney High Street Facade | Roadside \item WA9 |
##' Wandsworth - Putney | Urban Background \item WE0 | Kensington and
##' Chelsea - Pembroke Road | Urban Background \item WF1 | Watford
##' (Roadside) | Roadside \item WF2 | Watford - Watford Town Hall |
##' Roadside \item WH1 | Welwyn Hatfield - Council Offices | Urban
##' Background \item WL1 | Waltham Forest - Dawlish Road | Urban
##' Background \item WL2 | Waltham Forest - Mobile | Roadside \item
##' WL3 | Waltham Forest - Chingford | Roadside \item WL4 | Waltham
##' Forest - Crooked Billet | Kerbside \item WL5 | Waltham Forest -
##' Leyton | Roadside \item WM0 | Westminster - Horseferry Road |
##' Urban Background \item WM3 | Westminster - Hyde Park Partisol |
##' Roadside \item WM4 | Westminster - Charing Cross Library |
##' Roadside \item WM5 | Westminster - Covent Garden | Urban
##' Background \item WM6 | Westminster - Oxford St | Kerbside \item
##' WR1 | Bradford Town Hall Aethalometer | Urban Background \item WT1
##' | Worthing - Grove Lodge | Kerbside \item XB1 | Bletchley | Rural
##' \item XS1 | Shukri Outdoor | Industrial \item XS2 | Shukri Indoor
##' | Industrial \item XS3 | Osiris mobile | Urban Background \item
##' YH1 | Harrogate Roadside | Roadside \item ZA1 | Ashford Rural -
##' Pluckley | Rural \item ZA2 | Ashford Roadside | Roadside \item ZA3
##' | Ashford Background | Urban Background \item ZA4 | Ashford M20
##' Background | Urban Background \item ZC1 | Chatham Roadside - A2 |
##' Roadside \item ZD1 | Dover Roadside - Town Hall | Roadside \item
##' ZD2 | Dover Roadside - Townwall Street | Roadside \item ZD3 |
##' Dover Background - Langdon Cliff | Urban Background \item ZD4 |
##' Dover Background - East Cliff | Urban Background \item ZD5 | Dover
##' Coast Guard Met | Urban Background \item ZD6 | Dover Docks |
##' Industrial \item ZF1 | Folkestone Suburban - Cheriton | Suburban
##' \item ZG1 | Gravesham Backgrnd - Northfleet | Urban Background
##' \item ZG2 | Gravesham Roadside - A2 | Roadside \item ZG3 |
##' Gravesham Ind Bgd - Northfleet | Urban Background \item ZH1 |
##' Thanet Rural - Minster | Rural \item ZH2 | Thanet Background -
##' Margate | Urban Background \item ZH3 | Thanet Airport - Manston |
##' Urban Background \item ZH4 | Thanet Roadside - Ramsgate | Roadside
##' \item ZL1 | Luton Background | Urban Background \item ZM1 |
##' Maidstone Meteorological | Urban Background \item ZM2 | Maidstone
##' Roadside - Fairmeadow | Kerbside \item ZM3 | Maidstone Rural -
##' Detling | Rural \item ZR1 | Dartford Roadside - St Clements |
##' Kerbside \item ZR2 | Dartford Roadside 2 - Town Centre | Roadside
##' \item ZR3 | Dartford Roadside 3 - Bean Interchange | Roadside
##' \item ZS1 | Stoke Rural AURN | Rural \item ZT1 | Tonbridge
##' Roadside - Town Centre | Roadside \item ZT2 | Tunbridge Wells
##' Background - Town Hall | Urban Background \item ZT3 | Tunbridge
##' Wells Rural - Southborough | Rural \item ZT4 | Tunbridge Wells
##' Roadside - St Johns | Roadside \item ZT5 | Tonbridge Roadside 2 -
##' High St | Roadside \item ZV1 | Sevenoaks - Greatness Park | Urban
##' Background \item ZV2 | Sevenoaks - Bat and Ball | Roadside \item
##' ZW1 | Swale Roadside - Ospringe A2 | Roadside \item ZW2 | Swale
##' Background - Sheerness | Urban Background \item ZW3 | Swale
##' Roadside 2 - Ospringe Street | Roadside \item ZY1 | Canterbury
##' Backgrnd - Chaucer TS | Urban Background \item ZY2 | Canterbury
##' Roadside - St Dunstans | Roadside \item ZY4 | Canterbury St Peters
##' Place | Roadside }
##'
##' @param site Site code of the network site to import e.g. "my1" is
##' Marylebone Road. Several sites can be imported with \code{site = c("my1",
##' "kc1")} --- to import Marylebone Road and North Kensignton for example.
##' @param year Year or years to import. To import a sequence of years from
##' 1990 to 2000 use \code{year = 1990:2000}. To import several specfic years
##' use \code{year = c(1990, 1995, 2000)} for example.
##' @param pollutant Pollutants to import. If omitted will import all
##' pollutants from a site. To import only NOx and NO2 for example use
##' \code{pollutant = c("nox", "no2")}.
##' @param met Should meteorological data be added to the import data? The
##' default is \code{FALSE}. If \code{TRUE} wind speed (m/s), wind direction
##' (degrees), solar radiation and rain amount are available. See details
##' below.
##'
##' Access to reliable and free meteorological data is problematic.
##' @param units By default the returned data frame expresses the units in mass
##' terms (ug/m3 for NOx, NO2, O3, SO2; mg/m3 for CO). Use \code{units =
##' "volume"} to use ppb etc. PM10_raw TEOM data are multiplied by 1.3 and
##' PM2.5 have no correction applied. See details below concerning PM10
##' concentrations.
##' @param extra Not currently used.
##' @export
##' @return Returns a data frame of hourly mean values with date in POSIXct
##' class and time zone GMT.
##' @author David Carslaw and Ben Barratt
##' @seealso \code{\link{importAURN}}, \code{\link{importADMS}},
##' \code{\link{importSAQN}}
##' @keywords methods
##' @examples
##'
##'
##' ## import all pollutants from Marylebone Rd from 1990:2009
##' \dontrun{mary <- importKCL(site = "my1", year = 2000:2009)}
##'
##' ## import nox, no2, o3 from Marylebone Road and North Kensignton for 2000
##' \dontrun{thedata <- importKCL(site = c("my1", "kc1"), year = 2000,
##' pollutant = c("nox", "no2", "o3"))}
##'
##' ## import met data too...
##' \dontrun{my1 <- importKCL(site = "my1", year = 2008, met = TRUE)}
##'
##' # see example in importAURN about how to reshape the output
##'
##'
##'
##'
importKCL <- function(site = "my1", year = 2009, pollutant = "all", met = FALSE,
units = "mass", extra = FALSE) {
## get rid of R check annoyances
sites <- NULL; v10 <- NULL; v2.5 <- NULL
site <- toupper(site)
## rows with these site codes
## this preserves order of site names
con <- url((paste("http://www.londonair.org.uk/r_data/", "sites", ".RData", sep = "")))
load(con)
close(con)
id <- sapply(site, function(x) which(sites$SiteCode %in% toupper(x)))
site.name <- sites$SiteName[id]
## RData files to import
files <- lapply(site, function (x) paste(x, "_", year, sep = ""))
files <- do.call(c, files)
loadData <- function(x) {
tryCatch({
fileName <- paste("http://www.londonair.org.uk/r_data/", x, ".RData", sep = "")
con <- url(fileName)
load(con)
close(con)
## need to check the date starts at start of year...
start <- ISOdatetime(year = as.numeric(format(x$date[1], "%Y")), month = 1,
day = 1, hour = 0, min = 0, sec = 0, tz = "GMT")
if (x$date[1] != start) {
## add first row
x1 <- data.frame(date = start, site = x$site[1])
x <- plyr::rbind.fill(x1, x)
}
x <- date.pad(x) ## pad out missing dates
x
},
error = function(ex) {cat(x, "does not exist - ignoring that one.\n")})
}
thedata <- lapply(files, loadData)
thedata <- plyr::ldply(thedata, bind_rows)
if (is.null(thedata)) {
warning("No data to import - check site codes and year.", call. = FALSE)
return()
}
if (nrow(thedata) < 1) {
warning("No data to import - check site codes and year.", call. = FALSE)
return()
}
thedata$code <- thedata$site
thedata$site <- factor(thedata$site, labels = site.name, levels = site)
## change names
names(thedata) <- tolower(names(thedata))
## if particular pollutants have been selected
if (!missing(pollutant)) {
if (pollutant != "all") {
thedata <- thedata[, c("date", pollutant, "site", "code")]
}
}
## change units to mass units, use values in ugm3Conversion table
if (units == "mass") {
if ("nox" %in% names(thedata)) thedata$nox <- thedata$nox * 1.91
if ("no2" %in% names(thedata)) thedata$no2 <- thedata$no2 * 1.91
if ("o3" %in% names(thedata)) thedata$o3 <- thedata$o3 * 2.00
if ("so2" %in% names(thedata)) thedata$so2 <- thedata$so2 * 2.66
if ("co" %in% names(thedata)) thedata$co <- thedata$co * 1.16
if ("pm10_raw" %in% names(thedata)) thedata$pm10_raw <- thedata$pm10_raw* 1.30
unitMessage <- "NOTE - mass units are used \nug/m3 for NOx, NO2, SO2, O3; mg/m3 for CO\nPM10_raw is raw data multiplied by 1.3\n"
}
## rename PM volatile/non volatile components if present
if ("pmfr" %in% names(thedata)) {
thedata <- rename_(thedata, v10 = "pmfr")
thedata <- transform(thedata, v10 = -1 * v10)
}
if ("p2fr" %in% names(thedata)) {
thedata <- rename_(thedata, v2.5 = "p2fr")
thedata <- transform(thedata, v2.5 = -1 * v2.5)
}
if ("pmfb" %in% names(thedata)) thedata <- rename_(thedata, nv10 = "pmfb")
if ("p2fb" %in% names(thedata)) thedata <- rename_(thedata, nv2.5 = "p2fb")
if (units != "mass") {
if ("pm10" %in% names(thedata)) thedata$pm10_raw <- thedata$pm10_raw* 1.30
unitMessage <- "NOTE - volume units are used \nppbv for NOx, NO2, SO2, O3; ppmv for CO\nPM10_raw is raw data multiplied by 1.3\n"
}
## don't add additional species
if (!extra) {
theNames <- c("date", "co", "nox", "no2", "no", "o3", "so2", "pm10", "pm10_raw", "pm25",
"v10", "v2.5", "nv10", "nv2.5", "code", "site")
thedata <- thedata[, which(names(thedata) %in% theNames)]
}
if (is.null(nrow(thedata))) return()
## warning about recent, possibly unratified data
timeDiff <- difftime(Sys.time(), max(thedata$date), units='days')
if (timeDiff < 180) {
warning("Some of the more recent data may not be ratified.")}
if (met) { ## merge met data
load(url(paste("http://www.londonair.org.uk/r_data/", "metData", ".RData", sep = "")))
#closeAllConnections()
thedata <- merge(thedata, met, by = "date")
}
## make sure it is in GMT
attr(thedata$date, "tzone") <- "GMT"
thedata <- thedata[order(thedata$site, thedata$date), ]
cat(unitMessage)
thedata
}
| /R/importKCL.R | no_license | willdrysdale/ggopenair | R | false | false | 37,422 | r | ## function to import R data objects from server
##' Import data from King's College London networks
##'
##' Function for importing hourly mean data from King's College London
##' networks. Files are imported from a remote server operated by King's
##' College London that provides air quality data files as R data objects.
##'
##' The \code{importKCL} function has been written to make it easy to import
##' data from the King's College London air pollution networks. KCL have
##' provided .RData files (R workspaces) of all individual sites and years for
##' the KCL networks. These files are updated on a weekly basis. This approach
##' requires a link to the Internet to work.
##'
##' There are several advantages over the web portal approach where .csv files
##' are downloaded. First, it is quick to select a range of sites, pollutants
##' and periods (see examples below). Second, storing the data as .RData
##' objects is very efficient as they are about four times smaller than .csv
##' files --- which means the data downloads quickly and saves bandwidth.
##' Third, the function completely avoids any need for data manipulation or
##' setting time formats, time zones etc. Finally, it is easy to import many
##' years of data beyond the current limit of about 64,000 lines. The final
##' point makes it possible to download several long time series in one go. The
##' function also has the advantage that the proper site name is imported and
##' used in \code{openair} functions.
##'
##' The site codes and pollutant names can be upper or lower case. The function
##' will issue a warning when data less than six months old is downloaded,
##' which may not be ratified.
##'
##' The data are imported by stacking sites on top of one another and will have
##' field names \code{date}, \code{site}, \code{code} (the site code) and
##' pollutant(s). Sometimes it is useful to have columns of site data. This can
##' be done using the \code{reshape} function --- see examples below.
##'
##' The situation for particle measurements is not straightforward
##' given the variety of methods used to measure particle mass and
##' changes in their use over time. The \code{importKCL} function
##' imports two measures of PM10 where available. \code{PM10_raw} are
##' TEOM measurements with a 1.3 factor applied to take account of
##' volatile losses. The \code{PM10} data is a current best estimate
##' of a gravimetric equivalent measure as described below. NOTE! many
##' sites have several instruments that measure PM10 or PM2.5. In the
##' case of FDMS measurements, these are given as separate site codes
##' (see below). For example "MY1" will be TEOM with VCM applied and
##' "MY7" is the FDMS data.
##'
##' Where FDMS data are used the volatile and non-volatile components
##' are separately reported i.e. v10 = volatile PM10, v2.5 = volatile
##' PM2.5, nv10 = non-volatile PM10 and nv2.5 = non-volatile
##' PM2.5. Therefore, PM10 = v10 + nv10 and PM2.5 = v2.5 + nv2.5.
##'
##' For the assessment of the EU Limit Values, PM10 needs to be measured using
##' the reference method or one shown to be equivalent to the reference method.
##' Defra carried out extensive trials between 2004 and 2006 to establish which
##' types of particulate analysers in use in the UK were equivalent. These
##' trials found that measurements made using Partisol, FDMS, BAM and SM200
##' instruments were shown to be equivalent to the PM10 reference method.
##' However, correction factors need to be applied to measurements from the
##' SM200 and BAM instruments. Importantly, the TEOM was demonstrated as not
##' being equivalent to the reference method due to the loss of volatile PM,
##' even when the 1.3 correction factor was applied. The Volatile Correction
##' Model (VCM) was developed for Defra at King's to allow measurements of PM10
##' from TEOM instruments to be converted to reference equivalent; it uses the
##' measurements of volatile PM made using nearby FDMS instruments to correct
##' the measurements made by the TEOM. It passed the equivalence testing using
##' the same methodology used in the Defra trials and is now the recommended
##' method for correcting TEOM measurements (Defra, 2009). VCM correction of
##' TEOM measurements can only be applied after 1st January 2004, when
##' sufficiently widespread measurements of volatile PM became available. The
##' 1.3 correction factor is now considered redundant for measurements of PM10
##' made after 1st January 2004. Further information on the VCM can be found
##' at \url{http://www.volatile-correction-model.info/}.
##'
##' All PM10 statistics on the LondonAir web site, including the bulletins and
##' statistical tools (and in the RData objects downloaded using
##' \code{importKCL}), now report PM10 results as reference equivalent. For
##' PM10 measurements made by BAM and SM200 analysers the applicable correction
##' factors have been applied. For measurements from TEOM analysers the 1.3
##' factor has been applied up to 1st January 2004, then the VCM method has
##' been used to convert to reference equivalent.
##'
##' The meteorological data are meant to represent 'typical' conditions in
##' London, but users may prefer to use their own data. The data provide a an
##' estimate of general meteorological conditions across Greater London. For
##' meteorological species (wd, ws, rain, solar) each data point is formed by
##' averaging measurements from a subset of LAQN monitoring sites that have
##' been identified as having minimal disruption from local obstacles and a
##' long term reliable dataset. The exact sites used varies between species,
##' but include between two and five sites per species. Therefore, the data
##' should represent 'London scale' meteorology, rather than local conditions.
##'
##' While the function is being developed, the following site codes should help
##' with selection. We will also make available other meta data such as site
##' type and location to make it easier to select sites based on other
##' information. Note that these codes need to be refined because only the
##' common species are available for export currently i.e. NOx, NO2, O3, CO,
##' SO2, PM10, PM2.5.
##'
##' \itemize{ \item A30 | Kingston - Kingston Bypass A3 | Roadside
##' \item AD1 | Shoreham-by-Sea | Kerbside \item AR1 | Chichester -
##' Lodsworth | Rural \item AR2 | Wealden - Isfield | Rural \item AS1
##' | Bath Aethalometer | Urban Background \item BA1 | Basildon -
##' Gloucester Park | Roadside \item BB1 | Broxbourne (Roadside) |
##' Roadside \item BE0 | Belfast - Carbon | Urban Background \item BE1
##' | Belfast Centre AURN | Urban Background \item BE3 | Belfast
##' Centre Aethalometer | Urban Background \item BE7 | Belfast Centre
##' FDMS trial | Urban Background \item BE8 | Belfast - Nitrate |
##' Urban Background \item BE9 | Belfast - Partisol SO4 | Urban
##' Background \item BF1 | Bedford Stewartby (Rural) | Industrial
##' \item BF3 | Bedford - Kempston | Industrial \item BF4 | Bedford -
##' Prebend Street | Roadside \item BF5 | Bedford - Lurke Street |
##' Roadside \item BG1 | Barking and Dagenham - Rush Green | Suburban
##' \item BG2 | Barking and Dagenham - Scrattons Farm | Suburban \item
##' BG3 | Barking and Dagenham - North Street | Kerbside \item BH0 |
##' Brighton Preston Park AURN | Urban Background \item BH1 | Brighton
##' Roadside | Roadside \item BH2 | Brighton and Hove - Hove Town Hall
##' | Roadside \item BH3 | Brighton and Hove - Foredown Tower | Urban
##' Background \item BH5 | Brighton Mobile (Preston Fire Station) |
##' Roadside \item BH6 | Brighton Mobile (Lewes Road) | Roadside \item
##' BH7 | Brighton Mobile (Gloucester Road) | Roadside \item BH8 |
##' Brighton and Hove - Stanmer Park | Rural \item BH9 | Brighton
##' Mobile Beaconsfield Road | Roadside \item BI1 | Birmingham Tyburn
##' CPC | Urban Background \item BL0 | Camden - Bloomsbury | Urban
##' Background \item BL1 | Bloomsbury AURN SMPS | Urban Background
##' \item BM1 | Ballymena - Ballykeel | Suburban \item BM2 | Ballymena
##' - North Road | Roadside \item BN1 | Barnet - Tally Ho Corner |
##' Kerbside \item BN2 | Barnet - Finchley | Urban Background \item
##' BN3 | Barnet - Strawberry Vale | Urban Background \item BO1 |
##' Ballymoney 1 | Suburban \item BP0 | Westminster - Bridge Place |
##' Urban Background \item BQ5 | Bexley - Manor Road West Gravimetric
##' | Industrial \item BQ6 | Bexley - Manor Road East Gravimetric |
##' Industrial \item BQ7 | Belvedere West | Urban Background \item BQ8
##' | Belvedere West FDMS | Urban Background \item BT1 | Brent -
##' Kingsbury | Suburban \item BT2 | Brent - Ikea Car Park | Roadside
##' \item BT3 | Brent - Harlesden | Roadside \item BT4 | Brent - Ikea
##' | Roadside \item BT5 | Brent - Neasden Lane | Industrial \item BT6
##' | Brent - John Keble Primary School | Roadside \item BT7 | Brent -
##' St Marys Primary School | Urban Background \item BW1 | Brentwood -
##' Brentwood Town Hall | Urban Background \item BX0 | Bexley -
##' Belvedere FDMS | Suburban \item BX1 | Bexley - Slade Green |
##' Suburban \item BX2 | Bexley - Belvedere | Suburban \item BX3 |
##' Bexley - Thamesmead | Suburban \item BX4 | Bexley - Erith |
##' Industrial \item BX5 | Bexley - Bedonwell | Suburban \item BX6 |
##' Bexley - Thames Road North FDMS | Roadside \item BX7 | Bexley -
##' Thames Road North | Roadside \item BX8 | Bexley - Thames Road
##' South | Roadside \item BX9 | Bexley - Slade Green FDMS | Suburban
##' \item BY1 | Bromley - Rent Office | Urban Background \item BY4 |
##' Bromley - Tweedy Rd | Roadside \item BY5 | Bromley - Biggin Hill |
##' Suburban \item BY7 | Bromley - Harwood Avenue | Roadside \item CA1
##' | Crawley Background | Urban Background \item CA2 | Crawley -
##' Gatwick Airport | Urban Background \item CB1 | Chelmsford - Fire
##' Station | Roadside \item CB2 | Chelmsford - Springfield Road |
##' Roadside \item CB3 | Chelmsford - Chignal St James | Urban
##' Background \item CB4 | Chelmsford - Baddow Road | Roadside \item
##' CC1 | Colchester - Lucy Lane South | Roadside \item CC2 |
##' Colchester - Brook Street | Roadside \item CC3 | Colchester -
##' Mersea Road | Roadside \item CD1 | Camden - Swiss Cottage |
##' Kerbside \item CD3 | Camden - Shaftesbury Avenue | Roadside \item
##' CD4 | Camden - St Martins College (NOX 1) | Urban Background \item
##' CD5 | Camden - St Martins College (NOX 2) | Urban Background \item
##' CD7 | Camden - Swiss Cottage Partisol | Kerbside \item CD9 |
##' Camden - Euston Road | Roadside \item CF1 | Cardiff Aethalometer |
##' Urban Background \item CH1 | Cheltenham | Urban Background \item
##' CI1 | Chichester - A27 Chichester Bypass | Roadside \item CI4 |
##' Chichester - Orchard Street | Roadside \item CK1 | Cookstown |
##' Suburban \item CP1 | Castle Point - Canvey Island | Urban
##' Background \item CR2 | Croydon - Purley Way | Roadside \item CR3 |
##' Croydon - Thornton Heath | Suburban \item CR4 | Croydon - George
##' Street | Roadside \item CR5 | Croydon - Norbury | Kerbside \item
##' CR6 | Croydon - Euston Road | Suburban \item CT1 | City of London
##' - Senator House | Urban Background \item CT2 | City of London -
##' Farringdon Street | Kerbside \item CT3 | City of London - Sir John
##' Cass School | Urban Background \item CT4 | City of London - Beech
##' Street | Roadside \item CT6 | City of London - Walbrook Wharf |
##' Roadside \item CT8 | City of London - Upper Thames Street |
##' Roadside \item CY1 | Crystal Palace - Crystal Palace Parade |
##' Roadside \item DC1 | Dacorum 1 Hemel Hempstead (Background) |
##' Urban Background \item DC2 | Dacorum 2 Hemel Hempstead
##' (Background) | Urban Background \item DC3 | High Street
##' Northchurch | Roadside \item DE1 | Derry City - Brandywell | Urban
##' Background \item DE2 | Derry City - Dales Corner | Roadside \item
##' DM1 | Dunmurry Aethalometer | Urban Background \item EA0 | Ealing
##' - Acton Town Hall FDMS | Roadside \item EA1 | Ealing - Ealing Town
##' Hall | Urban Background \item EA2 | Ealing - Acton Town Hall |
##' Roadside \item EA3 | Ealing 3 - A40 East Acton | Roadside \item
##' EA4 | Ealing Mobile - Hamilton Road | Roadside \item EA5 | Ealing
##' Mobile - Southall | Roadside \item EA6 | Ealing - Hanger Lane
##' Gyratory | Roadside \item EA7 | Ealing - Southall | Urban
##' Background \item EA8 | Ealing - Horn Lane | Industrial \item EA9 |
##' Ealing - Court Way | Roadside \item EB1 | Eastbourne - Devonshire
##' Park | Urban Background \item EB3 | Eastbourne - Holly Place |
##' Urban Background \item EH1 | E Herts Throcking (Rural) | Rural
##' \item EH2 | East Herts Sawbridgeworth (Background) | Urban
##' Background \item EH3 | East Herts Sawbridgeworth (Roadside) |
##' Roadside \item EH4 | East Herts Ware | Roadside \item EH5 | East
##' Herts Bishops Stortford | Roadside \item EI0 | Ealing - Greenford
##' | Urban Background \item EI1 | Ealing - Western Avenue | Roadside
##' \item EL1 | Elmbridge - Bell Farm Hersham | Urban Background \item
##' EL2 | Elmbridge - Esher High Street | Roadside \item EL3 |
##' Elmbridge - Hampton Court Parade | Roadside \item EL4 | Elmbridge
##' - Walton High Street | Kerbside \item EN1 | Enfield - Bushhill
##' Park | Suburban \item EN2 | Enfield - Church Street | Roadside
##' \item EN3 | Enfield - Salisbury School | Urban Background \item
##' EN4 | Enfield - Derby Road | Roadside \item EN5 | Enfield - Bowes
##' Primary School | Roadside \item FB1 | Rushmoor - Medway Drive |
##' Roadside \item GB0 | Greenwich and Bexley - Falconwood FDMS |
##' Roadside \item GB6 | Greenwich and Bexley - Falconwood | Roadside
##' \item GL1 | Glasgow Centre | Suburban \item GL4 | Glasgow Centre
##' Aethalometer | Suburban \item GN0 | Greenwich - A206 Burrage Grove
##' | Roadside \item GN2 | Greenwich - Millennium Village | Industrial
##' \item GN3 | Greenwich - Plumstead High Street | Roadside \item GN4
##' | Greenwich - Fiveways Sidcup Rd A20 | Roadside \item GR4 |
##' Greenwich - Eltham | Suburban \item GR5 | Greenwich - Trafalgar
##' Road | Roadside \item GR7 | Greenwich - Blackheath | Roadside
##' \item GR8 | Greenwich - Woolwich Flyover | Roadside \item GR9 |
##' Greenwich - Westhorne Avenue | Roadside \item HA0 | Harwell -
##' Carbon | Rural \item HA1 | Harwell Rural AURN | Rural \item HA2 |
##' Harwell Rural PARTISOL | Rural \item HA4 | Harwell Rural SMPS |
##' Rural \item HA9 | Harwell - Partisol SO4 | Urban Background \item
##' HF1 | Hammersmith and Fulham - Broadway | Roadside \item HF2 |
##' Hammersmith and Fulham - Brook Green | Urban Background \item HF3
##' | Hammersmith and Fulham - Scrubs Lane | Kerbside \item HG1 |
##' Haringey - Haringey Town Hall | Roadside \item HG2 | Haringey -
##' Priory Park | Urban Background \item HG3 | Haringey - Bounds Green
##' | Roadside \item HI0 | Hillingdon - Sipson Road | Suburban \item
##' HI1 | Hillingdon - South Ruislip | Roadside \item HI2 | Hillingdon
##' - Hillingdon Hospital | Roadside \item HI3 | Hillingdon - Oxford
##' Avenue | Roadside \item HK4 | Hackney - Clapton | Urban Background
##' \item HK6 | Hackney - Old Street | Roadside \item HL1 | Halifax
##' Aethalometer | Urban Background \item HM1 | Hertsmere Borehamwood
##' 1 (Background) | Urban Background \item HM4 | Hertsmere -
##' Borehamwood | Urban Background \item HO1 | Horsham Background |
##' Urban Background \item HO2 | Horsham - Park Way | Roadside \item
##' HO4 | Horsham - Storrington | Roadside \item HO5 | Horsham -
##' Cowfold | Roadside \item HR1 | Harrow - Stanmore | Urban
##' Background \item HR2 | Harrow - Pinner Road | Roadside \item HS1 |
##' Hounslow - Brentford | Roadside \item HS2 | Hounslow - Cranford |
##' Suburban \item HS3 | Hounslow - Brentford | Roadside \item HS4 |
##' Hounslow - Chiswick High Road | Roadside \item HS5 | Hounslow -
##' Brentford | Roadside \item HS6 | Hounslow - Heston Road | Roadside
##' \item HS7 | Hounslow - Hatton Cross | Urban Background \item HS9 |
##' Hounslow - Feltham | Roadside \item HT1 | Hastings - Bulverhythe |
##' Roadside \item HT2 | Hastings - Fresh Fields | Roadside \item HV1
##' | Havering - Rainham | Roadside \item HV2 | Havering - Harold Hill
##' | Suburban \item HV3 | Havering - Romford | Roadside \item HX0 |
##' Birmingham Tyburn Aethalometer | Urban Background \item IC6 | City
##' of London - Walbrook Wharf Indoor | Roadside \item IG4 | Greenwich
##' - Eltham Ecology Centre Indoor | Urban Background \item IS1 |
##' Islington - Upper Street | Urban Background \item IS2 | Islington
##' - Holloway Road | Roadside \item IS4 | Islington - Foxham Gardens
##' | Urban Background \item IS5 | Islington - Duncan Terrace |
##' Roadside \item IS6 | Islington - Arsenal | Urban Background \item
##' IT2 | Tower Hamlets - Mile End Road | Roadside \item KB1 | South
##' Kirkby Aethalometer | Urban Background \item KC0 | North
##' Kensington - Carbon | Urban Background \item KC1 | Kensington and
##' Chelsea - North Ken | Urban Background \item KC2 | Kensington and
##' Chelsea - Cromwell Road | Roadside \item KC3 | Kensington and
##' Chelsea - Knightsbridge | Roadside \item KC4 | Kensington and
##' Chelsea - Kings Road | Roadside \item KC5 | Kensington and Chelsea
##' - Earls Court Rd | Kerbside \item KC7 | Kensington and Chelsea -
##' North Ken FDMS | Urban Background \item KC9 | North Kensington -
##' Partisol SO4 | Urban Background \item KT1 | Kingston - Chessington
##' | Suburban \item KT2 | Kingston - Town Centre | Roadside \item LA1
##' | Luton Airport | Urban Background \item LB1 | Lambeth -
##' Christchurch Road | Roadside \item LB2 | Lambeth - Vauxhall Cross
##' | Roadside \item LB3 | Lambeth - Loughborough Junct | Urban
##' Background \item LB4 | Lambeth - Brixton Road | Kerbside \item LB5
##' | Lambeth - Bondway Interchange | Roadside \item LB6 | Lambeth -
##' Streatham Green | Urban Background \item LH0 | Hillingdon -
##' Harlington | Urban Background \item LH2 | Heathrow Airport | Urban
##' Background \item LL1 | Lullington Heath Rural AURN | Rural \item
##' LN1 | Luton - Challney Community College | Urban Background \item
##' LS1 | Lewes - Telscombe Cliffs | Roadside \item LS2 | Lewes -
##' Commercial Square | Roadside \item LS4 | Newhaven - Denton School
##' | Urban Background \item LW1 | Lewisham - Catford | Urban
##' Background \item LW2 | Lewisham - New Cross | Roadside \item LW3 |
##' Lewisham - Mercury Way | Industrial \item MA1 | Manchester
##' Piccadilly CPC | Urban Background \item MA2 | Manchester
##' Piccadilly | Urban Background \item MD1 | Mid Beds Biggleswade
##' (Roadside) | Roadside \item MD2 | Mid Beds Silsoe (Rural) | Rural
##' \item MD3 | Central Beds - Sandy | Roadside \item MD4 | Central
##' Beds - Marston Vale | Rural \item ME1 | Merton - Morden Civic
##' Centre | Roadside \item MP1 | Marchwood Power - Marchwood |
##' Industrial \item MP2 | Marchwood Power - Millbrook Rd Soton |
##' Industrial \item MR3 | Marylebone Road Aethalometer | Kerbside
##' \item MV1 | Mole Valley - Leatherhead | Rural \item MV2 | Mole
##' Valley - Lower Ashtead | Suburban \item MV3 | Mole Valley -
##' Dorking | Urban Background \item MW1 | Windsor and Maidenhead -
##' Frascati Way | Roadside \item MW2 | Windsor and Maidenhead -
##' Clarence Road | Roadside \item MW3 | Windsor and Maidenhead -
##' Ascot | Rural \item MY0 | Marylebone Road - Carbon | Kerbside
##' \item MY1 | Westminster - Marylebone Road | Kerbside \item MY7 |
##' Westminster - Marylebone Road FDMS | Kerbside \item NA5 |
##' Newtownabbey- Mallusk | Urban Background \item NA6 | Newtownabbey-
##' Shore Road | Roadside \item NE2 | Port Talbot TEOM and CPC | Urban
##' Background \item NF1 | New Forest - Holbury | Industrial \item NF2
##' | New Forest - Fawley | Industrial \item NF3 | New Forest -
##' Ringwood | Urban Background \item NF4 | New Forest - Totton |
##' Roadside \item NF5 | New Forest - Lyndhurst | Roadside \item NH1 |
##' North Herts Mobile - Baldock 1 | Roadside \item NH2 | North Herts
##' Mobile - Baldock 2 | Roadside \item NH3 | North Herts Mobile -
##' Royston | Urban Background \item NH4 | North Herts - Breechwood
##' Green | Urban Background \item NH5 | North Herts - Baldock
##' Roadside | Roadside \item NH6 | North Herts - Hitchin Library |
##' Roadside \item NK1 | North Kensington - CPC | Urban Background
##' \item NK3 | North Kensington Aethalometer | Urban Background \item
##' NK6 | North Kensington - URG | Urban Background \item NM1 | Newham
##' - Tant Avenue | Urban Background \item NM2 | Newham - Cam Road |
##' Roadside \item NM3 | Newham - Wren Close | Urban Background \item
##' NW1 | Norwich Centre Aethalometer | Urban Background \item OX0 |
##' Oxford Centre Roadside AURN | Urban Background \item OX1 | South
##' Oxfordshire - Henley | Roadside \item OX2 | South Oxfordshire -
##' Wallingford | Roadside \item OX3 | South Oxfordshire - Watlington
##' | Roadside \item OX4 | Oxford St Ebbes AURN | Urban Background
##' \item PO1 | Portsmouth Background AURN | Urban Background \item
##' PT6 | Port Talbot Dyffryn School | Industrial \item RB1 |
##' Redbridge - Perth Terrace | Urban Background \item RB2 | Redbridge
##' - Ilford Broadway | Kerbside \item RB3 | Redbridge - Fullwell
##' Cross | Kerbside \item RB4 | Redbridge - Gardner Close | Roadside
##' \item RB5 | Redbridge - South Woodford | Roadside \item RD0 |
##' Reading AURN - New Town | Urban Background \item RD1 | Reading -
##' Caversham Road | Roadside \item RD2 | Reading - Kings Road |
##' Roadside \item RD3 | Reading - Oxford Road | Roadside \item RG1 |
##' Reigate and Banstead - Horley | Suburban \item RG2 | Reigate and
##' Banstead - Horley South | Suburban \item RG3 | Reigate and
##' Banstead - Poles Lane | Rural \item RG4 | Reigate and Banstead -
##' Reigate High St | Kerbside \item RHA | Richmond - Lower Mortlake
##' Road | Roadside \item RHB | Richmond - Lower Mortlake Road |
##' Roadside \item RI1 | Richmond - Castelnau | Roadside \item RI2 |
##' Richmond - Barnes Wetlands | Suburban \item RI5 | Richmond Mobile
##' - St Margarets | Kerbside \item RI6 | Richmond Mobile - St
##' Margarets | Kerbside \item RI7 | Richmond Mobile - Richmond Park |
##' Suburban \item RI8 | Richmond Mobile - Richmond Park | Suburban
##' \item RIA | Richmond Mobile - George Street | Kerbside \item RIB |
##' Richmond Mobile - George Street | Kerbside \item RIC | Richmond
##' Mobile - Kew Rd | Kerbside \item RID | Richmond Mobile - Kew Rd |
##' Kerbside \item RIE | Richmond Mobile - Richmond Rd Twickenham |
##' Roadside \item RIF | Richmond Mobile - Richmond Rd Twickenham |
##' Roadside \item RIG | Richmond Mobile - Upper Teddington Rd |
##' Roadside \item RIH | Richmond Mobile - Upper Teddington Rd |
##' Roadside \item RII | Richmond Mobile - Somerset Rd Teddington |
##' Urban Background \item RIJ | Richmond Mobile - Somerset Rd
##' Teddington | Urban Background \item RIK | Richmond Mobile - St.
##' Margarets Grove | Urban Background \item RIL | Richmond Mobile -
##' St. Margarets Grove | Urban Background \item RIM | Richmond Mobile
##' - Petersham Rd Ham | Roadside \item RIN | Richmond Mobile -
##' Petersham Rd Ham | Roadside \item RIO | Richmond Mobile - Stanley
##' Rd Twickenham | Roadside \item RIP | Richmond Mobile - Stanley Rd
##' Twickenham | Roadside \item RIQ | Richmond Mobile - Richmond Rd
##' Twickenham | Roadside \item RIR | Richmond Mobile - Richmond Rd
##' Twickenham | Roadside \item RIS | Richmond Mobile - Lincoln Ave
##' Twickenham | Roadside \item RIU | Richmond Mobile - Mortlake Rd
##' Kew | Roadside \item RIW | Richmond - Upper Teddington Road |
##' Roadside \item RIY | Richmond - Hampton Court Road | Kerbside
##' \item RO1 | Rochford - Rayleigh High Street | Roadside \item RY1 |
##' Rother - Rye Harbour | Rural \item RY2 | Rother - De La Warr Road
##' | Roadside \item SA1 | St Albans - Fleetville | Urban Background
##' \item SB1 | South Beds - Dunstable | Urban Background \item SC1 |
##' Sevenoaks 1 | Suburban \item SD1 | Southend-on-Sea AURN | Urban
##' Background \item SE1 | Stevenage - Lytton Way | Roadside \item SH1
##' | Southampton Background AURN | Urban Background \item SH2 |
##' Southampton - Redbridge | Roadside \item SH3 | Southampton -
##' Onslow Road | Roadside \item SH4 | Southampton - Bitterne | Urban
##' Background \item SK1 | Southwark - Larcom Street | Urban
##' Background \item SK2 | Southwark - Old Kent Road | Roadside \item
##' SK5 | Southwark - A2 Old Kent Road | Roadside \item SL1 |
##' Sunderland Aethalometer | Urban Background \item ST1 | Sutton -
##' Robin Hood School | Roadside \item ST2 | Sutton - North Cheam |
##' Urban Background \item ST3 | Sutton - Carshalton | Suburban \item
##' ST4 | Sutton - Wallington | Kerbside \item ST5 | Sutton -
##' Beddington Lane | Industrial \item ST6 | Sutton - Worcester Park |
##' Kerbside \item ST7 | Sutton - Therapia Lane | Industrial \item SU1
##' | Sussex Mobile10 Stockbridge | Kerbside \item SU2 | Sussex
##' Mobile11 Jct Whitley Rd | Kerbside \item SU3 | Sussex Mobile12
##' Cowfold | Kerbside \item SU4 | Sussex Mobile 13 Newhaven |
##' Roadside \item SU5 | Sussex Mobile 14 Crawley | Roadside \item SU6
##' | Sussex Mobile15 Chichester County Hall | Urban Background \item
##' SU7 | Sussex Mobile 16 Warnham | Rural \item SU8 | Sussex Mobile
##' 17 Newhaven Paradise Park | Roadside \item SX1 | Sussex Mobile 1 |
##' Urban Background \item SX2 | Sussex Mobile 2 North Berstead |
##' Roadside \item SX3 | Sussex Mobile 3 | Roadside \item SX4 | Sussex
##' Mobile 4 Adur | Roadside \item SX5 | Sussex Mobile 5 Fresh Fields
##' Rd Hastings | Roadside \item SX6 | Sussex Mobile 6 Orchard St
##' Chichester | Roadside \item SX7 | Sussex Mobile 7 New Road
##' Newhaven | Roadside \item SX8 | Sussex Mobile 8 Arundel | Kerbside
##' \item SX9 | Sussex Mobile 9 Newhaven Kerbside | Kerbside \item TD0
##' | Richmond - National Physical Laboratory | Suburban \item TE0 |
##' Tendring St Osyth AURN | Rural \item TE1 | Tendring - Town Hall |
##' Roadside \item TH1 | Tower Hamlets - Poplar | Urban Background
##' \item TH2 | Tower Hamlets - Mile End Road | Roadside \item TH3 |
##' Tower Hamlets - Bethnal Green | Urban Background \item TH4 | Tower
##' Hamlets - Blackwall | Roadside \item TK1 | Thurrock - London Road
##' (Grays) | Urban Background \item TK2 | Thurrock - Purfleet |
##' Roadside \item TK3 | Thurrock - Stanford-le-Hope | Roadside \item
##' TK8 | Thurrock - London Road (Purfleet) | Roadside \item TR1 |
##' Three Rivers - Rickmansworth | Urban Background \item UT1 |
##' Uttlesford - Saffron Walden Fire Station | Roadside \item UT2 |
##' Uttlesford - Takeley | Urban Background \item UT3 | Uttlesford -
##' Broxted Farm | Rural \item VS1 | Westminster - Victoria Street |
##' Kerbside \item WA1 | Wandsworth - Garratt Lane | Roadside \item
##' WA2 | Wandsworth - Town Hall | Urban Background \item WA3 |
##' Wandsworth - Roehampton | Rural \item WA4 | Wandsworth - High
##' Street | Roadside \item WA6 | Wandsworth - Tooting | Roadside
##' \item WA7 | Wandsworth - Putney High Street | Kerbside \item WA8 |
##' Wandsworth - Putney High Street Facade | Roadside \item WA9 |
##' Wandsworth - Putney | Urban Background \item WE0 | Kensington and
##' Chelsea - Pembroke Road | Urban Background \item WF1 | Watford
##' (Roadside) | Roadside \item WF2 | Watford - Watford Town Hall |
##' Roadside \item WH1 | Welwyn Hatfield - Council Offices | Urban
##' Background \item WL1 | Waltham Forest - Dawlish Road | Urban
##' Background \item WL2 | Waltham Forest - Mobile | Roadside \item
##' WL3 | Waltham Forest - Chingford | Roadside \item WL4 | Waltham
##' Forest - Crooked Billet | Kerbside \item WL5 | Waltham Forest -
##' Leyton | Roadside \item WM0 | Westminster - Horseferry Road |
##' Urban Background \item WM3 | Westminster - Hyde Park Partisol |
##' Roadside \item WM4 | Westminster - Charing Cross Library |
##' Roadside \item WM5 | Westminster - Covent Garden | Urban
##' Background \item WM6 | Westminster - Oxford St | Kerbside \item
##' WR1 | Bradford Town Hall Aethalometer | Urban Background \item WT1
##' | Worthing - Grove Lodge | Kerbside \item XB1 | Bletchley | Rural
##' \item XS1 | Shukri Outdoor | Industrial \item XS2 | Shukri Indoor
##' | Industrial \item XS3 | Osiris mobile | Urban Background \item
##' YH1 | Harrogate Roadside | Roadside \item ZA1 | Ashford Rural -
##' Pluckley | Rural \item ZA2 | Ashford Roadside | Roadside \item ZA3
##' | Ashford Background | Urban Background \item ZA4 | Ashford M20
##' Background | Urban Background \item ZC1 | Chatham Roadside - A2 |
##' Roadside \item ZD1 | Dover Roadside - Town Hall | Roadside \item
##' ZD2 | Dover Roadside - Townwall Street | Roadside \item ZD3 |
##' Dover Background - Langdon Cliff | Urban Background \item ZD4 |
##' Dover Background - East Cliff | Urban Background \item ZD5 | Dover
##' Coast Guard Met | Urban Background \item ZD6 | Dover Docks |
##' Industrial \item ZF1 | Folkestone Suburban - Cheriton | Suburban
##' \item ZG1 | Gravesham Backgrnd - Northfleet | Urban Background
##' \item ZG2 | Gravesham Roadside - A2 | Roadside \item ZG3 |
##' Gravesham Ind Bgd - Northfleet | Urban Background \item ZH1 |
##' Thanet Rural - Minster | Rural \item ZH2 | Thanet Background -
##' Margate | Urban Background \item ZH3 | Thanet Airport - Manston |
##' Urban Background \item ZH4 | Thanet Roadside - Ramsgate | Roadside
##' \item ZL1 | Luton Background | Urban Background \item ZM1 |
##' Maidstone Meteorological | Urban Background \item ZM2 | Maidstone
##' Roadside - Fairmeadow | Kerbside \item ZM3 | Maidstone Rural -
##' Detling | Rural \item ZR1 | Dartford Roadside - St Clements |
##' Kerbside \item ZR2 | Dartford Roadside 2 - Town Centre | Roadside
##' \item ZR3 | Dartford Roadside 3 - Bean Interchange | Roadside
##' \item ZS1 | Stoke Rural AURN | Rural \item ZT1 | Tonbridge
##' Roadside - Town Centre | Roadside \item ZT2 | Tunbridge Wells
##' Background - Town Hall | Urban Background \item ZT3 | Tunbridge
##' Wells Rural - Southborough | Rural \item ZT4 | Tunbridge Wells
##' Roadside - St Johns | Roadside \item ZT5 | Tonbridge Roadside 2 -
##' High St | Roadside \item ZV1 | Sevenoaks - Greatness Park | Urban
##' Background \item ZV2 | Sevenoaks - Bat and Ball | Roadside \item
##' ZW1 | Swale Roadside - Ospringe A2 | Roadside \item ZW2 | Swale
##' Background - Sheerness | Urban Background \item ZW3 | Swale
##' Roadside 2 - Ospringe Street | Roadside \item ZY1 | Canterbury
##' Backgrnd - Chaucer TS | Urban Background \item ZY2 | Canterbury
##' Roadside - St Dunstans | Roadside \item ZY4 | Canterbury St Peters
##' Place | Roadside }
##'
##' @param site Site code of the network site to import e.g. "my1" is
##' Marylebone Road. Several sites can be imported with \code{site = c("my1",
##' "kc1")} --- to import Marylebone Road and North Kensignton for example.
##' @param year Year or years to import. To import a sequence of years from
##' 1990 to 2000 use \code{year = 1990:2000}. To import several specfic years
##' use \code{year = c(1990, 1995, 2000)} for example.
##' @param pollutant Pollutants to import. If omitted will import all
##' pollutants from a site. To import only NOx and NO2 for example use
##' \code{pollutant = c("nox", "no2")}.
##' @param met Should meteorological data be added to the import data? The
##' default is \code{FALSE}. If \code{TRUE} wind speed (m/s), wind direction
##' (degrees), solar radiation and rain amount are available. See details
##' below.
##'
##' Access to reliable and free meteorological data is problematic.
##' @param units By default the returned data frame expresses the units in mass
##' terms (ug/m3 for NOx, NO2, O3, SO2; mg/m3 for CO). Use \code{units =
##' "volume"} to use ppb etc. PM10_raw TEOM data are multiplied by 1.3 and
##' PM2.5 have no correction applied. See details below concerning PM10
##' concentrations.
##' @param extra Not currently used.
##' @export
##' @return Returns a data frame of hourly mean values with date in POSIXct
##' class and time zone GMT.
##' @author David Carslaw and Ben Barratt
##' @seealso \code{\link{importAURN}}, \code{\link{importADMS}},
##' \code{\link{importSAQN}}
##' @keywords methods
##' @examples
##'
##'
##' ## import all pollutants from Marylebone Rd from 1990:2009
##' \dontrun{mary <- importKCL(site = "my1", year = 2000:2009)}
##'
##' ## import nox, no2, o3 from Marylebone Road and North Kensignton for 2000
##' \dontrun{thedata <- importKCL(site = c("my1", "kc1"), year = 2000,
##' pollutant = c("nox", "no2", "o3"))}
##'
##' ## import met data too...
##' \dontrun{my1 <- importKCL(site = "my1", year = 2008, met = TRUE)}
##'
##' # see example in importAURN about how to reshape the output
##'
##'
##'
##'
importKCL <- function(site = "my1", year = 2009, pollutant = "all", met = FALSE,
units = "mass", extra = FALSE) {
## get rid of R check annoyances
sites <- NULL; v10 <- NULL; v2.5 <- NULL
site <- toupper(site)
## rows with these site codes
## this preserves order of site names
con <- url((paste("http://www.londonair.org.uk/r_data/", "sites", ".RData", sep = "")))
load(con)
close(con)
id <- sapply(site, function(x) which(sites$SiteCode %in% toupper(x)))
site.name <- sites$SiteName[id]
## RData files to import
files <- lapply(site, function (x) paste(x, "_", year, sep = ""))
files <- do.call(c, files)
loadData <- function(x) {
tryCatch({
fileName <- paste("http://www.londonair.org.uk/r_data/", x, ".RData", sep = "")
con <- url(fileName)
load(con)
close(con)
## need to check the date starts at start of year...
start <- ISOdatetime(year = as.numeric(format(x$date[1], "%Y")), month = 1,
day = 1, hour = 0, min = 0, sec = 0, tz = "GMT")
if (x$date[1] != start) {
## add first row
x1 <- data.frame(date = start, site = x$site[1])
x <- plyr::rbind.fill(x1, x)
}
x <- date.pad(x) ## pad out missing dates
x
},
error = function(ex) {cat(x, "does not exist - ignoring that one.\n")})
}
thedata <- lapply(files, loadData)
thedata <- plyr::ldply(thedata, bind_rows)
if (is.null(thedata)) {
warning("No data to import - check site codes and year.", call. = FALSE)
return()
}
if (nrow(thedata) < 1) {
warning("No data to import - check site codes and year.", call. = FALSE)
return()
}
thedata$code <- thedata$site
thedata$site <- factor(thedata$site, labels = site.name, levels = site)
## change names
names(thedata) <- tolower(names(thedata))
## if particular pollutants have been selected
if (!missing(pollutant)) {
if (pollutant != "all") {
thedata <- thedata[, c("date", pollutant, "site", "code")]
}
}
## change units to mass units, use values in ugm3Conversion table
if (units == "mass") {
if ("nox" %in% names(thedata)) thedata$nox <- thedata$nox * 1.91
if ("no2" %in% names(thedata)) thedata$no2 <- thedata$no2 * 1.91
if ("o3" %in% names(thedata)) thedata$o3 <- thedata$o3 * 2.00
if ("so2" %in% names(thedata)) thedata$so2 <- thedata$so2 * 2.66
if ("co" %in% names(thedata)) thedata$co <- thedata$co * 1.16
if ("pm10_raw" %in% names(thedata)) thedata$pm10_raw <- thedata$pm10_raw* 1.30
unitMessage <- "NOTE - mass units are used \nug/m3 for NOx, NO2, SO2, O3; mg/m3 for CO\nPM10_raw is raw data multiplied by 1.3\n"
}
## rename PM volatile/non volatile components if present
if ("pmfr" %in% names(thedata)) {
thedata <- rename_(thedata, v10 = "pmfr")
thedata <- transform(thedata, v10 = -1 * v10)
}
if ("p2fr" %in% names(thedata)) {
thedata <- rename_(thedata, v2.5 = "p2fr")
thedata <- transform(thedata, v2.5 = -1 * v2.5)
}
if ("pmfb" %in% names(thedata)) thedata <- rename_(thedata, nv10 = "pmfb")
if ("p2fb" %in% names(thedata)) thedata <- rename_(thedata, nv2.5 = "p2fb")
if (units != "mass") {
if ("pm10" %in% names(thedata)) thedata$pm10_raw <- thedata$pm10_raw* 1.30
unitMessage <- "NOTE - volume units are used \nppbv for NOx, NO2, SO2, O3; ppmv for CO\nPM10_raw is raw data multiplied by 1.3\n"
}
## don't add additional species
if (!extra) {
theNames <- c("date", "co", "nox", "no2", "no", "o3", "so2", "pm10", "pm10_raw", "pm25",
"v10", "v2.5", "nv10", "nv2.5", "code", "site")
thedata <- thedata[, which(names(thedata) %in% theNames)]
}
if (is.null(nrow(thedata))) return()
## warning about recent, possibly unratified data
timeDiff <- difftime(Sys.time(), max(thedata$date), units='days')
if (timeDiff < 180) {
warning("Some of the more recent data may not be ratified.")}
if (met) { ## merge met data
load(url(paste("http://www.londonair.org.uk/r_data/", "metData", ".RData", sep = "")))
#closeAllConnections()
thedata <- merge(thedata, met, by = "date")
}
## make sure it is in GMT
attr(thedata$date, "tzone") <- "GMT"
thedata <- thedata[order(thedata$site, thedata$date), ]
cat(unitMessage)
thedata
}
|
library(ggplot2)
### Name: qplot
### Title: Quick plot
### Aliases: qplot quickplot
### ** Examples
# Use data from data.frame
qplot(mpg, wt, data = mtcars)
qplot(mpg, wt, data = mtcars, colour = cyl)
qplot(mpg, wt, data = mtcars, size = cyl)
qplot(mpg, wt, data = mtcars, facets = vs ~ am)
## No test:
qplot(1:10, rnorm(10), colour = runif(10))
qplot(1:10, letters[1:10])
mod <- lm(mpg ~ wt, data = mtcars)
qplot(resid(mod), fitted(mod))
f <- function() {
a <- 1:10
b <- a ^ 2
qplot(a, b)
}
f()
# To set aesthetics, wrap in I()
qplot(mpg, wt, data = mtcars, colour = I("red"))
# qplot will attempt to guess what geom you want depending on the input
# both x and y supplied = scatterplot
qplot(mpg, wt, data = mtcars)
# just x supplied = histogram
qplot(mpg, data = mtcars)
# just y supplied = scatterplot, with x = seq_along(y)
qplot(y = mpg, data = mtcars)
# Use different geoms
qplot(mpg, wt, data = mtcars, geom = "path")
qplot(factor(cyl), wt, data = mtcars, geom = c("boxplot", "jitter"))
qplot(mpg, data = mtcars, geom = "dotplot")
## End(No test)
| /data/genthat_extracted_code/ggplot2/examples/qplot.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,077 | r | library(ggplot2)
### Name: qplot
### Title: Quick plot
### Aliases: qplot quickplot
### ** Examples
# Use data from data.frame
qplot(mpg, wt, data = mtcars)
qplot(mpg, wt, data = mtcars, colour = cyl)
qplot(mpg, wt, data = mtcars, size = cyl)
qplot(mpg, wt, data = mtcars, facets = vs ~ am)
## No test:
qplot(1:10, rnorm(10), colour = runif(10))
qplot(1:10, letters[1:10])
mod <- lm(mpg ~ wt, data = mtcars)
qplot(resid(mod), fitted(mod))
f <- function() {
a <- 1:10
b <- a ^ 2
qplot(a, b)
}
f()
# To set aesthetics, wrap in I()
qplot(mpg, wt, data = mtcars, colour = I("red"))
# qplot will attempt to guess what geom you want depending on the input
# both x and y supplied = scatterplot
qplot(mpg, wt, data = mtcars)
# just x supplied = histogram
qplot(mpg, data = mtcars)
# just y supplied = scatterplot, with x = seq_along(y)
qplot(y = mpg, data = mtcars)
# Use different geoms
qplot(mpg, wt, data = mtcars, geom = "path")
qplot(factor(cyl), wt, data = mtcars, geom = c("boxplot", "jitter"))
qplot(mpg, data = mtcars, geom = "dotplot")
## End(No test)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fills.R
\name{fills}
\alias{fills}
\title{Get List of Most Recent Fills}
\usage{
fills(api.key, secret, passphrase)
}
\arguments{
\item{api.key}{Mandatory character value. This is the API key as generated by GDAX. Typically a 32 character value.}
\item{secret}{Mandatory character value. This is the API secret as generated by GDAX. Typically a 88 character value.}
\item{passphrase}{Mandatory character value. This is the passphrase as generated by GDAX. Typically a 11 character value.}
}
\value{
Dataframe with fills for all products or for the provided products. The volume is quoted in USD.
}
\description{
This is an auth based function. User must have valid api keys generated by GADX which must be passed as mandatory arguments. The functions takes no additional arguments and returns a list of all previously filled orders.
}
\examples{
\dontrun{
fills(api.key = your_key, secret = your_api_secret, passphrase = your_api_pass)
}
}
| /man/fills.Rd | permissive | QuickQuant/rgdax | R | false | true | 1,048 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fills.R
\name{fills}
\alias{fills}
\title{Get List of Most Recent Fills}
\usage{
fills(api.key, secret, passphrase)
}
\arguments{
\item{api.key}{Mandatory character value. This is the API key as generated by GDAX. Typically a 32 character value.}
\item{secret}{Mandatory character value. This is the API secret as generated by GDAX. Typically a 88 character value.}
\item{passphrase}{Mandatory character value. This is the passphrase as generated by GDAX. Typically a 11 character value.}
}
\value{
Dataframe with fills for all products or for the provided products. The volume is quoted in USD.
}
\description{
This is an auth based function. User must have valid api keys generated by GADX which must be passed as mandatory arguments. The functions takes no additional arguments and returns a list of all previously filled orders.
}
\examples{
\dontrun{
fills(api.key = your_key, secret = your_api_secret, passphrase = your_api_pass)
}
}
|
\name{stat0026.ChenShapiro}
\alias{stat0026.ChenShapiro}
\alias{stat0026}
\title{The Chen-Shapiro test for normality}
\description{The Chen-Shapiro test for normality is used
- to compute its statistic and p-value by calling function \code{\link{statcompute}};
- to compute its quantiles by calling function \code{\link{compquant}} or \code{\link{many.crit}};
- to compute its power by calling function \code{\link{powcomp.fast}} or \code{\link{powcomp.easy}}.
}
\references{
Pierre Lafaye de Micheaux, Viet Anh Tran (2016). PoweR: A
Reproducible Research Tool to Ease Monte Carlo Power Simulation
Studies for Goodness-of-fit Tests in R. \emph{Journal of Statistical Software}, \bold{69(3)}, 1--42. doi:10.18637/jss.v069.i03
Chen, L. and Shapiro, S.S (1995), An alternative test for normality based on normalized spacings,
\emph{Journal of Statistical Computation and Simulation}, \bold{53}, 269--288.
}
\author{P. Lafaye de Micheaux, V. A. Tran}
\seealso{See \code{\link{Normality.tests}} for other goodness-of-fit tests for normality.
}
\keyword{statistic, test, normality, Chen-Shapiro}
| /fuzzedpackages/PoweR/man/stat0026.ChenShapiro.Rd | no_license | akhikolla/testpackages | R | false | false | 1,127 | rd | \name{stat0026.ChenShapiro}
\alias{stat0026.ChenShapiro}
\alias{stat0026}
\title{The Chen-Shapiro test for normality}
\description{The Chen-Shapiro test for normality is used
- to compute its statistic and p-value by calling function \code{\link{statcompute}};
- to compute its quantiles by calling function \code{\link{compquant}} or \code{\link{many.crit}};
- to compute its power by calling function \code{\link{powcomp.fast}} or \code{\link{powcomp.easy}}.
}
\references{
Pierre Lafaye de Micheaux, Viet Anh Tran (2016). PoweR: A
Reproducible Research Tool to Ease Monte Carlo Power Simulation
Studies for Goodness-of-fit Tests in R. \emph{Journal of Statistical Software}, \bold{69(3)}, 1--42. doi:10.18637/jss.v069.i03
Chen, L. and Shapiro, S.S (1995), An alternative test for normality based on normalized spacings,
\emph{Journal of Statistical Computation and Simulation}, \bold{53}, 269--288.
}
\author{P. Lafaye de Micheaux, V. A. Tran}
\seealso{See \code{\link{Normality.tests}} for other goodness-of-fit tests for normality.
}
\keyword{statistic, test, normality, Chen-Shapiro}
|
#
# plot.ppp.R
#
# $Revision: 1.78 $ $Date: 2014/11/10 11:16:58 $
#
#
#--------------------------------------------------------------------------
plot.ppp <- local({
## determine symbol map for marks of points
default.symap.points <- function(x, ...,
chars=NULL, cols=NULL,
maxsize=NULL, meansize=NULL, markscale=NULL) {
marx <- marks(x)
if(is.null(marx)) {
## null or constant map
return(symbolmap(..., chars=chars, cols=cols))
}
if(!is.null(dim(marx)))
stop("Internal error: multivariate marks in default.symap.points")
argnames <- names(list(...))
shapegiven <- "shape" %in% argnames
chargiven <- (!is.null(chars)) || ("pch" %in% argnames)
assumecircles <- !(shapegiven || chargiven)
sizegiven <- ("size" %in% argnames) ||
(("cex" %in% argnames) && !shapegiven)
if(inherits(marx, c("Date", "POSIXt"))) {
## ......... marks are dates or date/times .....................
timerange <- range(marx, na.rm=TRUE)
shapedefault <- if(!assumecircles) list() else list(shape="circles")
if(sizegiven) {
g <- do.call("symbolmap",
resolve.defaults(list(range=timerange),
list(...),
shapedefault,
list(chars=chars, cols=cols)))
return(g)
}
## attempt to determine a scale for the marks
y <- scaletointerval(marx, 0, 1, timerange)
y <- y[is.finite(y)]
if(length(y) == 0) return(symbolmap(..., chars=chars, cols=cols))
scal <- mark.scale.default(y, as.owin(x),
markscale=markscale, maxsize=maxsize,
meansize=meansize,
characters=chargiven)
if(is.na(scal)) return(symbolmap(..., chars=chars, cols=cols))
## scale determined
sizefun <- function(x, scal=1) {
(scal/2) * scaletointerval(x, 0, 1, timerange)
}
formals(sizefun)[[2]] <- scal ## ensures value of 'scal' is printed
##
g <- do.call("symbolmap",
resolve.defaults(list(range=timerange),
list(...),
shapedefault,
list(size=sizefun)))
return(g)
}
if(is.numeric(marx)) {
## ............. marks are numeric values ...................
marx <- marx[is.finite(marx)]
if(length(marx) == 0)
return(symbolmap(..., chars=chars, cols=cols))
markrange <- range(marx)
##
if(sizegiven) {
g <- do.call("symbolmap",
resolve.defaults(list(range=markrange),
list(...),
if(assumecircles) list(shape="circles") else list(),
list(chars=chars, cols=cols)))
return(g)
}
## attempt to determine a scale for the marks
if(all(markrange == 0))
return(symbolmap(..., chars=chars, cols=cols))
scal <- mark.scale.default(marx, as.owin(x),
markscale=markscale, maxsize=maxsize,
meansize=meansize,
characters=chargiven)
if(is.na(scal)) return(symbolmap(..., chars=chars, cols=cols))
## scale determined
if(markrange[1] >= 0) {
## all marks are nonnegative
shapedefault <-
if(!assumecircles) list() else list(shape="circles")
cexfun <- function(x, scal=1) { scal * x }
circfun <- function(x, scal=1) { scal * x/2 }
formals(cexfun)[[2]] <- formals(circfun)[[2]] <- scal
sizedefault <-
if(sizegiven) list() else
if(chargiven) list(cex=cexfun) else list(size=circfun)
} else {
## some marks are negative
shapedefault <-
if(!assumecircles) list() else
list(shape=function(x) { ifelse(x >= 0, "circles", "squares") })
cexfun <- function(x, scal=1) { scal * abs(x) }
circfun <- function(x, scal=1) { scal * ifelse(x >= 0, x/2, -x) }
formals(cexfun)[[2]] <- formals(circfun)[[2]] <- scal
sizedefault <-
if(sizegiven) list() else
if(chargiven) list(cex=cexfun) else list(size=circfun)
}
g <- do.call("symbolmap",
resolve.defaults(list(range=markrange),
list(...),
shapedefault,
sizedefault,
chars=chars, cols=cols))
return(g)
}
## ........... non-numeric marks .........................
um <- if(is.factor(marx)) levels(marx) else sort(unique(marx))
ntypes <- length(um)
## resolve parameters 'chars' and 'cols'
chars <- default.charmap(ntypes, chars)
if(!is.null(cols))
cols <- rep.int(cols, ntypes)[1:ntypes]
g <- symbolmap(inputs=um, ..., chars=chars, cols=cols)
return(g)
}
default.charmap <- function(n, ch=NULL) {
if(!is.null(ch))
return(rep.int(ch, n)[1:n])
if(n <= 25)
return(1:n)
ltr <- c(letters, LETTERS)
if(n <= 52)
return(ltr[1:n])
## wrapped sequence of letters
warning("Too many types to display every type as a different character")
return(ltr[1 + (0:(n - 1) %% 52)])
}
## main function
plot.ppp <-
function(x, main, ..., clipwin=NULL,
chars=NULL, cols=NULL, use.marks=TRUE,
which.marks=NULL, add=FALSE, type=c("p", "n"),
legend=TRUE, leg.side=c("left", "bottom", "top", "right"),
leg.args=list(),
symap=NULL, maxsize=NULL, meansize=NULL, markscale=NULL, zap=0.01,
show.window=show.all, show.all=!add, do.plot=TRUE,
multiplot=TRUE)
{
if(missing(main))
main <- short.deparse(substitute(x))
type <- match.arg(type)
if(!missing(maxsize) || !missing(markscale) || !missing(meansize))
warn.once("circlescale",
"Interpretation of arguments maxsize and markscale",
"has changed (in spatstat version 1.37-0 and later).",
"Size of a circle is now measured by its diameter.")
if(!is.null(clipwin))
x <- x[clipwin]
## sensible default position
legend <- legend && show.all
if(legend) {
leg.side <- match.arg(leg.side)
vertical <- (leg.side %in% c("left", "right"))
}
# if(type == "n" || npoints(x) == 0) {
# ## plot the window only
# xwindow <- x$window
# if(do.plot)
# do.call("plot.owin",
# resolve.defaults(list(xwindow),
# list(...),
# list(main=main, invert=TRUE, add=add,
# type=if(show.window) "w" else "n")))
# if(is.null(symap)) symap <- symbolmap()
# attr(symap, "bbox") <- as.rectangle(xwindow)
# return(invisible(symap))
# }
## ................................................................
## Handle multiple columns of marks as separate plots
## (unless add=TRUE or which.marks selects a single column
## or multipage = FALSE)
if(use.marks && is.data.frame(mx <- marks(x))) {
implied.all <- is.null(which.marks)
want.several <- implied.all || is.data.frame(mx <- mx[,which.marks])
do.several <- want.several && !add && multiplot
if(do.several) {
## generate one plot for each column of marks
y <- as.listof(lapply(mx, function(z, P) setmarks(P,z), P=x))
out <- do.call("plot",
resolve.defaults(list(x=y, main=main,
show.window=show.window,
do.plot=do.plot,
type=type),
list(...),
list(equal.scales=TRUE),
list(legend=legend,
leg.side=leg.side,
leg.args=leg.args),
list(chars=chars, cols=cols,
maxsize=maxsize,
meansize=meansize,
markscale=markscale,
zap=zap)))
return(invisible(out))
}
if(is.null(which.marks)) {
which.marks <- 1
if(do.plot) message("Plotting the first column of marks")
}
}
## ............... unmarked, or single column of marks ....................
## Determine symbol map and mark values to be used
y <- x
if(!is.marked(x, na.action="ignore") || !use.marks) {
## Marks are not mapped.
marx <- NULL
if(is.null(symap)) symap <- symbolmap(..., chars=chars, cols=cols)
} else {
## Marked point pattern
marx <- marks(y, dfok=TRUE)
if(is.data.frame(marx)) {
## select column or take first colum
marx <- marx[, which.marks]
y <- setmarks(y, marx)
}
if(npoints(y) > 0) {
ok <- complete.cases(as.data.frame(y))
if(!any(ok)) {
warning("All mark values are NA; plotting locations only.")
if(is.null(symap)) symap <- symbolmap()
} else if(any(!ok)) {
warning(paste("Some marks are NA;",
"corresponding points are omitted."))
x <- x[ok]
y <- y[ok]
marx <- marks(y)
}
}
## apply default symbol map
if(is.null(symap))
symap <- default.symap.points(y, chars=chars, cols=cols,
maxsize=maxsize, meansize=meansize,
markscale=markscale,
...)
}
# gtype <- symbolmaptype(symap)
## Determine bounding box for main plot
BB <- as.rectangle(x)
sick <- inherits(x, "ppp") && !is.null(rejects <- attr(x, "rejects"))
if(sick) {
## Get relevant parameters
par.direct <- list(main=main, use.marks=use.marks,
maxsize=maxsize, meansize=meansize, markscale=markscale)
par.rejects <- resolve.1.default(list(par.rejects=list(pch="+")),
list(...))
par.all <- resolve.defaults(par.rejects, par.direct)
rw <- resolve.defaults(list(...), list(rejectwindow=NULL))$rejectwindow
## determine window for rejects
rwin <-
if(is.null(rw))
rejects$window
else if(is.logical(rw) && rw)
rejects$window
else if(inherits(rw, "owin"))
rw
else if(is.character(rw)) {
switch(rw,
box={boundingbox(rejects, x)},
ripras={ripras(c(rejects$x, x$x), c(rejects$y, x$y))},
stop(paste("Unrecognised option: rejectwindow=", rw)))
} else stop("Unrecognised format for rejectwindow")
if(is.null(rwin))
stop("Selected window for rejects pattern is NULL")
BB <- boundingbox(BB, as.rectangle(rwin))
}
## Augment bounding box with space for legend, if appropriate
legend <- legend && (symbolmaptype(symap) != "constant")
if(legend) {
## guess maximum size of symbols
maxsize <- invoke.symbolmap(symap, marx,
corners(as.rectangle(x)),
add=add, do.plot=FALSE)
sizeguess <- if(maxsize <= 0) NULL else (1.5 * maxsize)
leg.args <- append(list(side=leg.side, vertical=vertical), leg.args)
## draw up layout
legbox <- do.call.matched(plan.legend.layout,
append(list(B=BB, size = sizeguess,
started=FALSE, map=symap),
leg.args))
## bounding box for everything
BB <- legbox$A
}
## return now if not plotting
attr(symap, "bbox") <- BB
if(!do.plot)
return(invisible(symap))
## ............. start plotting .......................
pt <- prepareTitle(main)
main <- pt$main
nlines <- pt$nlines
blankmain <- if(nlines == 0) "" else rep(" ", nlines)
cex.main <- resolve.1.default(list(cex.main=1), list(...))
plot(BB, type="n", add=add, main=blankmain, show.all=show.all,
cex.main=cex.main)
if(sick) {
if(show.window) {
## plot windows
if(!is.null(rw)) {
## plot window for rejects
rwinpardefault <- list(lty=2,lwd=1,border=1)
rwinpars <-
resolve.defaults(par.rejects, rwinpardefault)[names(rwinpardefault)]
do.call("plot.owin", append(list(rwin, add=TRUE), rwinpars))
}
## plot window of main pattern
do.call("plot.owin",
resolve.defaults(list(x$window, add=TRUE),
list(...),
list(invert=TRUE)))
}
if(type != "n") {
## plot reject points
do.call("plot.ppp", append(list(rejects, add=TRUE), par.all))
warning(paste(rejects$n, "illegal points also plotted"))
}
## the rest is added
add <- TRUE
}
## Now convert to bona fide point pattern
x <- as.ppp(x)
xwindow <- x$window
## Plot observation window (or at least the main title)
do.call("plot.owin",
resolve.defaults(list(x=xwindow,
add=TRUE,
main=main,
type=if(show.window) "w" else "n",
show.all=show.all),
list(...),
list(invert=TRUE)))
# else if(show.all) fakemaintitle(as.rectangle(xwindow), main, ...)
if(type != "n") {
## plot symbols ##
invoke.symbolmap(symap, marx, x, add=TRUE)
}
## add legend
if(legend) {
b <- legbox$b
legendmap <- if(length(leg.args) == 0) symap else
do.call("update", append(list(object=symap), leg.args))
do.call("plot",
append(list(x=legendmap, main="", add=TRUE,
xlim=b$xrange, ylim=b$yrange),
leg.args))
}
return(invisible(symap))
}
plot.ppp
})
mark.scale.default <- function(marx, w, markscale=NULL,
maxsize=NULL, meansize=NULL,
characters=FALSE) {
## establish values of markscale, maxsize, meansize
ngiven <- (!is.null(markscale)) +
(!is.null(maxsize)) +
(!is.null(meansize))
if(ngiven > 1)
stop("Only one of the arguments markscale, maxsize, meansize",
" should be given", call.=FALSE)
if(ngiven == 0) {
## if ALL are absent, enforce the spatstat defaults
## (which could also be null)
pop <- spatstat.options("par.points")
markscale <- pop$markscale
maxsize <- pop$maxsize
meansize <- pop$meansize
}
## Now check whether markscale is fixed
if(!is.null(markscale)) {
stopifnot(markscale > 0)
return(markscale)
}
# Usual case: markscale is to be determined from maximum/mean physical size
if(is.null(maxsize) && is.null(meansize)) {
## compute default value of 'maxsize'
## guess appropriate max physical size of symbols
bb <- as.rectangle(w)
maxsize <- 1.4/sqrt(pi * length(marx)/area(bb))
maxsize <- min(maxsize, diameter(bb) * 0.07)
## updated: maxsize now represents *diameter*
maxsize <- 2 * maxsize
} else {
if(!is.null(maxsize)) stopifnot(maxsize > 0) else stopifnot(meansize > 0)
}
# Examine mark values
absmarx <- abs(marx)
maxabs <- max(absmarx)
tiny <- (maxabs < 4 * .Machine$double.eps)
if(tiny)
return(NA)
## finally determine physical scale for symbols
if(!is.null(maxsize)) {
scal <- maxsize/maxabs
} else {
meanabs <- mean(absmarx)
scal <- meansize/meanabs
}
if(!characters) return(scal)
## if using characters ('pch') we need to
## convert physical sizes to 'cex' values
charsize <- max(sidelengths(as.rectangle(w)))/40
return(scal/charsize)
}
fakemaintitle <- function(bb, main, ...) {
## Try to imitate effect of 'title(main=main)' above a specified box
if(!any(nzchar(main))) return(invisible(NULL))
bb <- as.rectangle(bb)
x0 <- mean(bb$xrange)
y0 <- bb$yrange[2] + length(main) * diff(bb$yrange)/12
parnames <- c('cex.main', 'col.main', 'font.main')
parlist <- par(parnames)
parlist <- resolve.defaults(list(...), parlist)[parnames]
names(parlist) <- c('cex', 'col', 'font')
do.call.matched("text.default",
resolve.defaults(list(x=x0, y=y0, labels=main),
parlist, list(...)))
return(invisible(NULL))
}
| /R/plot.ppp.R | no_license | jmetz/spatstat | R | false | false | 16,728 | r | #
# plot.ppp.R
#
# $Revision: 1.78 $ $Date: 2014/11/10 11:16:58 $
#
#
#--------------------------------------------------------------------------
plot.ppp <- local({
## determine symbol map for marks of points
default.symap.points <- function(x, ...,
chars=NULL, cols=NULL,
maxsize=NULL, meansize=NULL, markscale=NULL) {
marx <- marks(x)
if(is.null(marx)) {
## null or constant map
return(symbolmap(..., chars=chars, cols=cols))
}
if(!is.null(dim(marx)))
stop("Internal error: multivariate marks in default.symap.points")
argnames <- names(list(...))
shapegiven <- "shape" %in% argnames
chargiven <- (!is.null(chars)) || ("pch" %in% argnames)
assumecircles <- !(shapegiven || chargiven)
sizegiven <- ("size" %in% argnames) ||
(("cex" %in% argnames) && !shapegiven)
if(inherits(marx, c("Date", "POSIXt"))) {
## ......... marks are dates or date/times .....................
timerange <- range(marx, na.rm=TRUE)
shapedefault <- if(!assumecircles) list() else list(shape="circles")
if(sizegiven) {
g <- do.call("symbolmap",
resolve.defaults(list(range=timerange),
list(...),
shapedefault,
list(chars=chars, cols=cols)))
return(g)
}
## attempt to determine a scale for the marks
y <- scaletointerval(marx, 0, 1, timerange)
y <- y[is.finite(y)]
if(length(y) == 0) return(symbolmap(..., chars=chars, cols=cols))
scal <- mark.scale.default(y, as.owin(x),
markscale=markscale, maxsize=maxsize,
meansize=meansize,
characters=chargiven)
if(is.na(scal)) return(symbolmap(..., chars=chars, cols=cols))
## scale determined
sizefun <- function(x, scal=1) {
(scal/2) * scaletointerval(x, 0, 1, timerange)
}
formals(sizefun)[[2]] <- scal ## ensures value of 'scal' is printed
##
g <- do.call("symbolmap",
resolve.defaults(list(range=timerange),
list(...),
shapedefault,
list(size=sizefun)))
return(g)
}
if(is.numeric(marx)) {
## ............. marks are numeric values ...................
marx <- marx[is.finite(marx)]
if(length(marx) == 0)
return(symbolmap(..., chars=chars, cols=cols))
markrange <- range(marx)
##
if(sizegiven) {
g <- do.call("symbolmap",
resolve.defaults(list(range=markrange),
list(...),
if(assumecircles) list(shape="circles") else list(),
list(chars=chars, cols=cols)))
return(g)
}
## attempt to determine a scale for the marks
if(all(markrange == 0))
return(symbolmap(..., chars=chars, cols=cols))
scal <- mark.scale.default(marx, as.owin(x),
markscale=markscale, maxsize=maxsize,
meansize=meansize,
characters=chargiven)
if(is.na(scal)) return(symbolmap(..., chars=chars, cols=cols))
## scale determined
if(markrange[1] >= 0) {
## all marks are nonnegative
shapedefault <-
if(!assumecircles) list() else list(shape="circles")
cexfun <- function(x, scal=1) { scal * x }
circfun <- function(x, scal=1) { scal * x/2 }
formals(cexfun)[[2]] <- formals(circfun)[[2]] <- scal
sizedefault <-
if(sizegiven) list() else
if(chargiven) list(cex=cexfun) else list(size=circfun)
} else {
## some marks are negative
shapedefault <-
if(!assumecircles) list() else
list(shape=function(x) { ifelse(x >= 0, "circles", "squares") })
cexfun <- function(x, scal=1) { scal * abs(x) }
circfun <- function(x, scal=1) { scal * ifelse(x >= 0, x/2, -x) }
formals(cexfun)[[2]] <- formals(circfun)[[2]] <- scal
sizedefault <-
if(sizegiven) list() else
if(chargiven) list(cex=cexfun) else list(size=circfun)
}
g <- do.call("symbolmap",
resolve.defaults(list(range=markrange),
list(...),
shapedefault,
sizedefault,
chars=chars, cols=cols))
return(g)
}
## ........... non-numeric marks .........................
um <- if(is.factor(marx)) levels(marx) else sort(unique(marx))
ntypes <- length(um)
## resolve parameters 'chars' and 'cols'
chars <- default.charmap(ntypes, chars)
if(!is.null(cols))
cols <- rep.int(cols, ntypes)[1:ntypes]
g <- symbolmap(inputs=um, ..., chars=chars, cols=cols)
return(g)
}
default.charmap <- function(n, ch=NULL) {
if(!is.null(ch))
return(rep.int(ch, n)[1:n])
if(n <= 25)
return(1:n)
ltr <- c(letters, LETTERS)
if(n <= 52)
return(ltr[1:n])
## wrapped sequence of letters
warning("Too many types to display every type as a different character")
return(ltr[1 + (0:(n - 1) %% 52)])
}
## main function
plot.ppp <-
function(x, main, ..., clipwin=NULL,
chars=NULL, cols=NULL, use.marks=TRUE,
which.marks=NULL, add=FALSE, type=c("p", "n"),
legend=TRUE, leg.side=c("left", "bottom", "top", "right"),
leg.args=list(),
symap=NULL, maxsize=NULL, meansize=NULL, markscale=NULL, zap=0.01,
show.window=show.all, show.all=!add, do.plot=TRUE,
multiplot=TRUE)
{
if(missing(main))
main <- short.deparse(substitute(x))
type <- match.arg(type)
if(!missing(maxsize) || !missing(markscale) || !missing(meansize))
warn.once("circlescale",
"Interpretation of arguments maxsize and markscale",
"has changed (in spatstat version 1.37-0 and later).",
"Size of a circle is now measured by its diameter.")
if(!is.null(clipwin))
x <- x[clipwin]
## sensible default position
legend <- legend && show.all
if(legend) {
leg.side <- match.arg(leg.side)
vertical <- (leg.side %in% c("left", "right"))
}
# if(type == "n" || npoints(x) == 0) {
# ## plot the window only
# xwindow <- x$window
# if(do.plot)
# do.call("plot.owin",
# resolve.defaults(list(xwindow),
# list(...),
# list(main=main, invert=TRUE, add=add,
# type=if(show.window) "w" else "n")))
# if(is.null(symap)) symap <- symbolmap()
# attr(symap, "bbox") <- as.rectangle(xwindow)
# return(invisible(symap))
# }
## ................................................................
## Handle multiple columns of marks as separate plots
## (unless add=TRUE or which.marks selects a single column
## or multipage = FALSE)
if(use.marks && is.data.frame(mx <- marks(x))) {
implied.all <- is.null(which.marks)
want.several <- implied.all || is.data.frame(mx <- mx[,which.marks])
do.several <- want.several && !add && multiplot
if(do.several) {
## generate one plot for each column of marks
y <- as.listof(lapply(mx, function(z, P) setmarks(P,z), P=x))
out <- do.call("plot",
resolve.defaults(list(x=y, main=main,
show.window=show.window,
do.plot=do.plot,
type=type),
list(...),
list(equal.scales=TRUE),
list(legend=legend,
leg.side=leg.side,
leg.args=leg.args),
list(chars=chars, cols=cols,
maxsize=maxsize,
meansize=meansize,
markscale=markscale,
zap=zap)))
return(invisible(out))
}
if(is.null(which.marks)) {
which.marks <- 1
if(do.plot) message("Plotting the first column of marks")
}
}
## ............... unmarked, or single column of marks ....................
## Determine symbol map and mark values to be used
y <- x
if(!is.marked(x, na.action="ignore") || !use.marks) {
## Marks are not mapped.
marx <- NULL
if(is.null(symap)) symap <- symbolmap(..., chars=chars, cols=cols)
} else {
## Marked point pattern
marx <- marks(y, dfok=TRUE)
if(is.data.frame(marx)) {
## select column or take first colum
marx <- marx[, which.marks]
y <- setmarks(y, marx)
}
if(npoints(y) > 0) {
ok <- complete.cases(as.data.frame(y))
if(!any(ok)) {
warning("All mark values are NA; plotting locations only.")
if(is.null(symap)) symap <- symbolmap()
} else if(any(!ok)) {
warning(paste("Some marks are NA;",
"corresponding points are omitted."))
x <- x[ok]
y <- y[ok]
marx <- marks(y)
}
}
## apply default symbol map
if(is.null(symap))
symap <- default.symap.points(y, chars=chars, cols=cols,
maxsize=maxsize, meansize=meansize,
markscale=markscale,
...)
}
# gtype <- symbolmaptype(symap)
## Determine bounding box for main plot
BB <- as.rectangle(x)
sick <- inherits(x, "ppp") && !is.null(rejects <- attr(x, "rejects"))
if(sick) {
## Get relevant parameters
par.direct <- list(main=main, use.marks=use.marks,
maxsize=maxsize, meansize=meansize, markscale=markscale)
par.rejects <- resolve.1.default(list(par.rejects=list(pch="+")),
list(...))
par.all <- resolve.defaults(par.rejects, par.direct)
rw <- resolve.defaults(list(...), list(rejectwindow=NULL))$rejectwindow
## determine window for rejects
rwin <-
if(is.null(rw))
rejects$window
else if(is.logical(rw) && rw)
rejects$window
else if(inherits(rw, "owin"))
rw
else if(is.character(rw)) {
switch(rw,
box={boundingbox(rejects, x)},
ripras={ripras(c(rejects$x, x$x), c(rejects$y, x$y))},
stop(paste("Unrecognised option: rejectwindow=", rw)))
} else stop("Unrecognised format for rejectwindow")
if(is.null(rwin))
stop("Selected window for rejects pattern is NULL")
BB <- boundingbox(BB, as.rectangle(rwin))
}
## Augment bounding box with space for legend, if appropriate
legend <- legend && (symbolmaptype(symap) != "constant")
if(legend) {
## guess maximum size of symbols
maxsize <- invoke.symbolmap(symap, marx,
corners(as.rectangle(x)),
add=add, do.plot=FALSE)
sizeguess <- if(maxsize <= 0) NULL else (1.5 * maxsize)
leg.args <- append(list(side=leg.side, vertical=vertical), leg.args)
## draw up layout
legbox <- do.call.matched(plan.legend.layout,
append(list(B=BB, size = sizeguess,
started=FALSE, map=symap),
leg.args))
## bounding box for everything
BB <- legbox$A
}
## return now if not plotting
attr(symap, "bbox") <- BB
if(!do.plot)
return(invisible(symap))
## ............. start plotting .......................
pt <- prepareTitle(main)
main <- pt$main
nlines <- pt$nlines
blankmain <- if(nlines == 0) "" else rep(" ", nlines)
cex.main <- resolve.1.default(list(cex.main=1), list(...))
plot(BB, type="n", add=add, main=blankmain, show.all=show.all,
cex.main=cex.main)
if(sick) {
if(show.window) {
## plot windows
if(!is.null(rw)) {
## plot window for rejects
rwinpardefault <- list(lty=2,lwd=1,border=1)
rwinpars <-
resolve.defaults(par.rejects, rwinpardefault)[names(rwinpardefault)]
do.call("plot.owin", append(list(rwin, add=TRUE), rwinpars))
}
## plot window of main pattern
do.call("plot.owin",
resolve.defaults(list(x$window, add=TRUE),
list(...),
list(invert=TRUE)))
}
if(type != "n") {
## plot reject points
do.call("plot.ppp", append(list(rejects, add=TRUE), par.all))
warning(paste(rejects$n, "illegal points also plotted"))
}
## the rest is added
add <- TRUE
}
## Now convert to bona fide point pattern
x <- as.ppp(x)
xwindow <- x$window
## Plot observation window (or at least the main title)
do.call("plot.owin",
resolve.defaults(list(x=xwindow,
add=TRUE,
main=main,
type=if(show.window) "w" else "n",
show.all=show.all),
list(...),
list(invert=TRUE)))
# else if(show.all) fakemaintitle(as.rectangle(xwindow), main, ...)
if(type != "n") {
## plot symbols ##
invoke.symbolmap(symap, marx, x, add=TRUE)
}
## add legend
if(legend) {
b <- legbox$b
legendmap <- if(length(leg.args) == 0) symap else
do.call("update", append(list(object=symap), leg.args))
do.call("plot",
append(list(x=legendmap, main="", add=TRUE,
xlim=b$xrange, ylim=b$yrange),
leg.args))
}
return(invisible(symap))
}
plot.ppp
})
mark.scale.default <- function(marx, w, markscale=NULL,
maxsize=NULL, meansize=NULL,
characters=FALSE) {
## establish values of markscale, maxsize, meansize
ngiven <- (!is.null(markscale)) +
(!is.null(maxsize)) +
(!is.null(meansize))
if(ngiven > 1)
stop("Only one of the arguments markscale, maxsize, meansize",
" should be given", call.=FALSE)
if(ngiven == 0) {
## if ALL are absent, enforce the spatstat defaults
## (which could also be null)
pop <- spatstat.options("par.points")
markscale <- pop$markscale
maxsize <- pop$maxsize
meansize <- pop$meansize
}
## Now check whether markscale is fixed
if(!is.null(markscale)) {
stopifnot(markscale > 0)
return(markscale)
}
# Usual case: markscale is to be determined from maximum/mean physical size
if(is.null(maxsize) && is.null(meansize)) {
## compute default value of 'maxsize'
## guess appropriate max physical size of symbols
bb <- as.rectangle(w)
maxsize <- 1.4/sqrt(pi * length(marx)/area(bb))
maxsize <- min(maxsize, diameter(bb) * 0.07)
## updated: maxsize now represents *diameter*
maxsize <- 2 * maxsize
} else {
if(!is.null(maxsize)) stopifnot(maxsize > 0) else stopifnot(meansize > 0)
}
# Examine mark values
absmarx <- abs(marx)
maxabs <- max(absmarx)
tiny <- (maxabs < 4 * .Machine$double.eps)
if(tiny)
return(NA)
## finally determine physical scale for symbols
if(!is.null(maxsize)) {
scal <- maxsize/maxabs
} else {
meanabs <- mean(absmarx)
scal <- meansize/meanabs
}
if(!characters) return(scal)
## if using characters ('pch') we need to
## convert physical sizes to 'cex' values
charsize <- max(sidelengths(as.rectangle(w)))/40
return(scal/charsize)
}
fakemaintitle <- function(bb, main, ...) {
## Try to imitate effect of 'title(main=main)' above a specified box
if(!any(nzchar(main))) return(invisible(NULL))
bb <- as.rectangle(bb)
x0 <- mean(bb$xrange)
y0 <- bb$yrange[2] + length(main) * diff(bb$yrange)/12
parnames <- c('cex.main', 'col.main', 'font.main')
parlist <- par(parnames)
parlist <- resolve.defaults(list(...), parlist)[parnames]
names(parlist) <- c('cex', 'col', 'font')
do.call.matched("text.default",
resolve.defaults(list(x=x0, y=y0, labels=main),
parlist, list(...)))
return(invisible(NULL))
}
|
#................................
#....Data Preprocessing Part ....
#................................
#Choose Either a real dataset or a synthetic dataset
#Select a synthetic data set from Synthetic Datasets folder
Y = read.csv('Synthetic Datasets/data1.csv')
#Select a real data set from Real Datasets folder.
#Remove # from 3 lines below to select a real dataset
#data <- readMat('Real Datasets/mnist.mat')
#data = lapply(data, unlist, use.names=FALSE)
#Y <- as.data.frame(data)
#Choose the number of columns(features) from the Y
X = Y[,c(1:2)]
#.......................
#....Functions Part ....
#.......................
#get k from natural neighbor
NaN_k <- function(dataset)
{
dataset <- as.matrix(dataset)
if(!is.numeric(dataset)){
stop('dataset input is not numeric')
}
n <- nrow(dataset)
r <- 1
nn <- ceiling(sqrt(n))
dist.obj <- dbscan::kNN(dataset, nn)
while(TRUE){
if(r>nn){
nn <- r + 10
dist.obj <- dbscan::kNN(dataset, nn)
}
nb_0 <- tabulate(dist.obj$id[,1:r])
numb <- length(nb_0[nb_0==0])
if(r==1){
numb_upd <- numb
}
if(r!=1 & numb_upd==numb){
break
}
numb_upd <- length(nb_0[nb_0==0])
r=r+1
}
#max_nb <- max(nb_0)
#return(max_nb)
return(r)
}
#RDos_gaussian_function
RDOS_gaussian <- function(dataset, k=5, h=1)
{
n <- nrow(dataset)
d <- ncol(dataset)
dataset <- as.matrix(dataset)
if(!is.numeric(k))
{
stop('k input must be numeric')
}
if(k>=n||k<1)
{
stop('k input must be less than number of observations and greater than 0')
}
if(!is.numeric(h))
{
stop('h input must be numeric')
}
if(!is.numeric(dataset))
{
stop('dataset input is not numeric')
}
distMatrix <- as.matrix(dist(dataset))
dist.obj <- dbscan::kNN(dataset, k)
#sNN matrix
func.dist <- function(x1, x2)
{
length(intersect(x1, x2))
}
sNN_matrix <- as.matrix(proxy::dist(x = dist.obj$id, method = func.dist, diag = T, upper = T))
neighborhood <- list()
#neighborhood loop
for(i in 1:n)
{
kNN <- dist.obj$id[i,]
rNN <- as.numeric(which(dist.obj$id==i, arr.ind = TRUE)[,1])
sNN <- as.numeric(names(sNN_matrix[i,][sNN_matrix[i,]>0]))
neighborhood[[i]] <- union(kNN, c(rNN, sNN))
}
px <- NULL
#gaussian kernel loop
for(i in 1:n)
{
Kgaussian <- 1/((2*pi)^(d/2))*exp(-((distMatrix[i, neighborhood[[i]]])/(h^2)))
px[i] <- (1/(length(neighborhood[[i]])+1))*sum((1/(h^d))*Kgaussian)
}
#RDOS <- NULL
RDOS <- vector()
#RDOS
for(i in 1:n)
{
RDOS[i] <- (sum(px[neighborhood[[i]]]))/(length(neighborhood[[i]])*px[i])
}
return(RDOS)
}
#RDos_laplace function
RDOS_laplace <- function(dataset, k=5, h=1)
{
n <- nrow(dataset)
d <- ncol(dataset)
dataset <- as.matrix(dataset)
if(!is.numeric(k))
{
stop('k input must be numeric')
}
if(k>=n||k<1)
{
stop('k input must be less than number of observations and greater than 0')
}
if(!is.numeric(h))
{
stop('h input must be numeric')
}
if(!is.numeric(dataset))
{
stop('dataset input is not numeric')
}
distMatrix <- as.matrix(dist(dataset))
dist.obj <- dbscan::kNN(dataset, k)
#sNN matrix
func.dist <- function(x1, x2)
{
length(intersect(x1, x2))
}
sNN_matrix <- as.matrix(proxy::dist(x = dist.obj$id, method = func.dist, diag = T, upper = T))
neighborhood <- list()
#neighborhood loop
for(i in 1:n)
{
kNN <- dist.obj$id[i,]
rNN <- as.numeric(which(dist.obj$id==i, arr.ind = TRUE)[,1])
sNN <- as.numeric(names(sNN_matrix[i,][sNN_matrix[i,]>0]))
neighborhood[[i]] <- union(kNN, c(rNN, sNN))
}
px <- NULL
#laplace kernel loop
for(i in 1:n)
{
Klaplace <- exp(-(distMatrix[i, neighborhood[[i]]]) * 0.5)
px[i] <- (1/(length(neighborhood[[i]])+1))*sum(Klaplace)
}
#RDOS <- NULL
RDOS <- vector()
#RDOS
for(i in 1:n)
{
RDOS[i] <- (sum(px[neighborhood[[i]]]))/(length(neighborhood[[i]])*px[i])
}
return(RDOS)
}
#get data frame from numeric
getDataFrame <- function(v)
{
res <- NULL
for(i in v)
{
res <- rbind(res, data.frame(i))
}
return(res)
}
#Function to convert numeric to Factor
getFactor <- function(v)
{
res <- factor(v)
return(res)
}
#Function to print AUC vs h graph
plotAUC_h <- function(X, Y, k)
{
t <- 0;
X_h <- NULL;
for(i in 1:10)
{
t <- t + 0.2;
X_h <- c(X_h, t)
}
true_values <- Y[,c(3)]
actual_values <- ifelse(true_values==0,0,1)
Auc <- NULL
for(h in X_h)
{
outlier_score <- RDOS_gaussian(dataset=X, k, h)
names(outlier_score) <- 1:nrow(X)
outlier_value <- ifelse(outlier_score>1.2, 1, 0)
predicted_values <- outlier_value
pred.obj <- prediction(predicted_values, actual_values)
auc.tmp <- performance(pred.obj,"auc")
auc <- as.numeric(auc.tmp@y.values)
Auc <- c(Auc, auc)
}
X_h <- getDataFrame(X_h)
#df dataframe will contain two columns: col1 - h_values, col2 - auc_values
df <- cbind(X_h, Auc)
colnames(df) <- c("h", "AUC")
#View(df)
p <- ggplot(df, aes(h)) + geom_line(aes(y = AUC)) +
ylab(label = "AUC") + xlab(label = "h") +ggtitle("AUC vs h")
print(p)
#savePlot(p)
}
#Function to plot points
plotPoints <- function(df, outlier_val, k)
{
df <- cbind(df, outlier_val)
colnames(df) <- c("x", "y", "outlier")
p <- ggplot(df, aes(x, y)) + scale_shape_identity() +
geom_point(aes(color = factor(outlier)), position = "jitter", size = 2) +
ggtitle("Data 8") + scale_color_discrete(name="outlier")
p <- p + theme(
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
#print(p)
return(p)
}
#Function to save graph in plots folfer
savePlot <- function(myPlot)
{
#enter name of plot in here
#plot will be saved under the name dummy_1 in plots folder
jpeg("plots/dummy_1.jpeg")
print(myPlot)
dev.off()
}
getPerformanceMetrics <- function(o_v, t_v)
{
predicted_values = getFactor(o_v)
actual_values = getFactor(t_v)
result <- confusionMatrix(predicted_values, actual_values)
#print(result)
precision <- result$byClass['Neg Pred Value']
recall <- result$byClass['Specificity']
fmeasure <- 2 * ((precision * recall)/(precision + recall))
accuracy <- result$byClass['Accuracy']
#auc <- result$byClass['Balanced Accuracy']
pred.obj <- prediction(o_v, t_v)
auc.tmp <- performance(pred.obj,"auc");
auc <- as.numeric(auc.tmp@y.values)
print("Precision")
print(precision)
print("Recall")
print(recall)
print("Fmeasure")
print(fmeasure)
print("AUC")
print(auc)
}
#..................................
#....Running the functions Part....
#..................................
#..Get value of k from Natural Neighbor..
k <- NaN_k(dataset = X)
print(k)
plotAUC_h(X, Y, k)
outlier_score <- RDOS_gaussian(dataset=X, k, h=1.2)
#outlier_score <- RDOS_laplace(dataset=X, k, h=1.2)
names(outlier_score) <- 1:nrow(X)
#Threshold for outlier score is user defined from observation from dataset
outlier_score1 <- ifelse(outlier_score>1.1, 1, 0)
#.........................................
#........Performance metrics..............
#.........................................
#third row of Y is the labeled column showing weather object is an outlier or not
true_values <- Y[,c(3)]
true_values <- ifelse(true_values==0,0,1)
getPerformanceMetrics(outlier_score1, true_values)
#.........................................
#.............Plot points.................
#.........................................
#plot points based on outlier
#0 means point is a normal point and 1 means point is an outlier
outlier_val <- getDataFrame(outlier_score1)
p <- plotPoints(X, outlier_val, k)
print(p)
#savePlot(p)
| /NaN_OD.R | no_license | abdul1617/Outlier-Detection | R | false | false | 8,164 | r | #................................
#....Data Preprocessing Part ....
#................................
#Choose Either a real dataset or a synthetic dataset
#Select a synthetic data set from Synthetic Datasets folder
Y = read.csv('Synthetic Datasets/data1.csv')
#Select a real data set from Real Datasets folder.
#Remove # from 3 lines below to select a real dataset
#data <- readMat('Real Datasets/mnist.mat')
#data = lapply(data, unlist, use.names=FALSE)
#Y <- as.data.frame(data)
#Choose the number of columns(features) from the Y
X = Y[,c(1:2)]
#.......................
#....Functions Part ....
#.......................
#get k from natural neighbor
NaN_k <- function(dataset)
{
dataset <- as.matrix(dataset)
if(!is.numeric(dataset)){
stop('dataset input is not numeric')
}
n <- nrow(dataset)
r <- 1
nn <- ceiling(sqrt(n))
dist.obj <- dbscan::kNN(dataset, nn)
while(TRUE){
if(r>nn){
nn <- r + 10
dist.obj <- dbscan::kNN(dataset, nn)
}
nb_0 <- tabulate(dist.obj$id[,1:r])
numb <- length(nb_0[nb_0==0])
if(r==1){
numb_upd <- numb
}
if(r!=1 & numb_upd==numb){
break
}
numb_upd <- length(nb_0[nb_0==0])
r=r+1
}
#max_nb <- max(nb_0)
#return(max_nb)
return(r)
}
#RDos_gaussian_function
RDOS_gaussian <- function(dataset, k=5, h=1)
{
n <- nrow(dataset)
d <- ncol(dataset)
dataset <- as.matrix(dataset)
if(!is.numeric(k))
{
stop('k input must be numeric')
}
if(k>=n||k<1)
{
stop('k input must be less than number of observations and greater than 0')
}
if(!is.numeric(h))
{
stop('h input must be numeric')
}
if(!is.numeric(dataset))
{
stop('dataset input is not numeric')
}
distMatrix <- as.matrix(dist(dataset))
dist.obj <- dbscan::kNN(dataset, k)
#sNN matrix
func.dist <- function(x1, x2)
{
length(intersect(x1, x2))
}
sNN_matrix <- as.matrix(proxy::dist(x = dist.obj$id, method = func.dist, diag = T, upper = T))
neighborhood <- list()
#neighborhood loop
for(i in 1:n)
{
kNN <- dist.obj$id[i,]
rNN <- as.numeric(which(dist.obj$id==i, arr.ind = TRUE)[,1])
sNN <- as.numeric(names(sNN_matrix[i,][sNN_matrix[i,]>0]))
neighborhood[[i]] <- union(kNN, c(rNN, sNN))
}
px <- NULL
#gaussian kernel loop
for(i in 1:n)
{
Kgaussian <- 1/((2*pi)^(d/2))*exp(-((distMatrix[i, neighborhood[[i]]])/(h^2)))
px[i] <- (1/(length(neighborhood[[i]])+1))*sum((1/(h^d))*Kgaussian)
}
#RDOS <- NULL
RDOS <- vector()
#RDOS
for(i in 1:n)
{
RDOS[i] <- (sum(px[neighborhood[[i]]]))/(length(neighborhood[[i]])*px[i])
}
return(RDOS)
}
#RDos_laplace function
RDOS_laplace <- function(dataset, k=5, h=1)
{
n <- nrow(dataset)
d <- ncol(dataset)
dataset <- as.matrix(dataset)
if(!is.numeric(k))
{
stop('k input must be numeric')
}
if(k>=n||k<1)
{
stop('k input must be less than number of observations and greater than 0')
}
if(!is.numeric(h))
{
stop('h input must be numeric')
}
if(!is.numeric(dataset))
{
stop('dataset input is not numeric')
}
distMatrix <- as.matrix(dist(dataset))
dist.obj <- dbscan::kNN(dataset, k)
#sNN matrix
func.dist <- function(x1, x2)
{
length(intersect(x1, x2))
}
sNN_matrix <- as.matrix(proxy::dist(x = dist.obj$id, method = func.dist, diag = T, upper = T))
neighborhood <- list()
#neighborhood loop
for(i in 1:n)
{
kNN <- dist.obj$id[i,]
rNN <- as.numeric(which(dist.obj$id==i, arr.ind = TRUE)[,1])
sNN <- as.numeric(names(sNN_matrix[i,][sNN_matrix[i,]>0]))
neighborhood[[i]] <- union(kNN, c(rNN, sNN))
}
px <- NULL
#laplace kernel loop
for(i in 1:n)
{
Klaplace <- exp(-(distMatrix[i, neighborhood[[i]]]) * 0.5)
px[i] <- (1/(length(neighborhood[[i]])+1))*sum(Klaplace)
}
#RDOS <- NULL
RDOS <- vector()
#RDOS
for(i in 1:n)
{
RDOS[i] <- (sum(px[neighborhood[[i]]]))/(length(neighborhood[[i]])*px[i])
}
return(RDOS)
}
#get data frame from numeric
getDataFrame <- function(v)
{
res <- NULL
for(i in v)
{
res <- rbind(res, data.frame(i))
}
return(res)
}
#Function to convert numeric to Factor
getFactor <- function(v)
{
res <- factor(v)
return(res)
}
#Function to print AUC vs h graph
plotAUC_h <- function(X, Y, k)
{
t <- 0;
X_h <- NULL;
for(i in 1:10)
{
t <- t + 0.2;
X_h <- c(X_h, t)
}
true_values <- Y[,c(3)]
actual_values <- ifelse(true_values==0,0,1)
Auc <- NULL
for(h in X_h)
{
outlier_score <- RDOS_gaussian(dataset=X, k, h)
names(outlier_score) <- 1:nrow(X)
outlier_value <- ifelse(outlier_score>1.2, 1, 0)
predicted_values <- outlier_value
pred.obj <- prediction(predicted_values, actual_values)
auc.tmp <- performance(pred.obj,"auc")
auc <- as.numeric(auc.tmp@y.values)
Auc <- c(Auc, auc)
}
X_h <- getDataFrame(X_h)
#df dataframe will contain two columns: col1 - h_values, col2 - auc_values
df <- cbind(X_h, Auc)
colnames(df) <- c("h", "AUC")
#View(df)
p <- ggplot(df, aes(h)) + geom_line(aes(y = AUC)) +
ylab(label = "AUC") + xlab(label = "h") +ggtitle("AUC vs h")
print(p)
#savePlot(p)
}
#Function to plot points
plotPoints <- function(df, outlier_val, k)
{
df <- cbind(df, outlier_val)
colnames(df) <- c("x", "y", "outlier")
p <- ggplot(df, aes(x, y)) + scale_shape_identity() +
geom_point(aes(color = factor(outlier)), position = "jitter", size = 2) +
ggtitle("Data 8") + scale_color_discrete(name="outlier")
p <- p + theme(
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
#print(p)
return(p)
}
#Function to save graph in plots folfer
savePlot <- function(myPlot)
{
#enter name of plot in here
#plot will be saved under the name dummy_1 in plots folder
jpeg("plots/dummy_1.jpeg")
print(myPlot)
dev.off()
}
getPerformanceMetrics <- function(o_v, t_v)
{
predicted_values = getFactor(o_v)
actual_values = getFactor(t_v)
result <- confusionMatrix(predicted_values, actual_values)
#print(result)
precision <- result$byClass['Neg Pred Value']
recall <- result$byClass['Specificity']
fmeasure <- 2 * ((precision * recall)/(precision + recall))
accuracy <- result$byClass['Accuracy']
#auc <- result$byClass['Balanced Accuracy']
pred.obj <- prediction(o_v, t_v)
auc.tmp <- performance(pred.obj,"auc");
auc <- as.numeric(auc.tmp@y.values)
print("Precision")
print(precision)
print("Recall")
print(recall)
print("Fmeasure")
print(fmeasure)
print("AUC")
print(auc)
}
#..................................
#....Running the functions Part....
#..................................
#..Get value of k from Natural Neighbor..
k <- NaN_k(dataset = X)
print(k)
plotAUC_h(X, Y, k)
outlier_score <- RDOS_gaussian(dataset=X, k, h=1.2)
#outlier_score <- RDOS_laplace(dataset=X, k, h=1.2)
names(outlier_score) <- 1:nrow(X)
#Threshold for outlier score is user defined from observation from dataset
outlier_score1 <- ifelse(outlier_score>1.1, 1, 0)
#.........................................
#........Performance metrics..............
#.........................................
#third row of Y is the labeled column showing weather object is an outlier or not
true_values <- Y[,c(3)]
true_values <- ifelse(true_values==0,0,1)
getPerformanceMetrics(outlier_score1, true_values)
#.........................................
#.............Plot points.................
#.........................................
#plot points based on outlier
#0 means point is a normal point and 1 means point is an outlier
outlier_val <- getDataFrame(outlier_score1)
p <- plotPoints(X, outlier_val, k)
print(p)
#savePlot(p)
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
.datatable.aware=TRUE
##-------------------------------------------------------------------------------------------------#
##' Converts a met CF file to a model specific met file. The input
##' files are calld <in.path>/<in.prefix>.YYYY.cf
##'
##' @name met2model.BIOCRO
##' @title Write BioCro met files
##' @param in.path path on disk where CF file lives
##' @param in.prefix prefix for each file
##' @param outfolder location where model specific output is written.
##' @param ... can pass lat, lon, start.date and end.date
##' @return OK if everything was succesful.
##' @export
##' @author Rob Kooper, David LeBauer
##-------------------------------------------------------------------------------------------------#
met2model.BIOCRO <- function(in.path, in.prefix, outfolder, overwrite=FALSE, ...) {
ncfiles = dir(in.path, full.names = TRUE, pattern = paste0(in.prefix, "*.nc$"),
all.files = FALSE, recursive = FALSE)
metlist <- list()
for(file in ncfiles){
met.nc <- nc_open(file)
tmp.met <- load.cfmet(met.nc, lat = lat, lon = lon, start.date = start.date, end.date = end.date)
metlist[[file]] <- cf2biocro(tmp.met)
}
met <- rbindlist(metlist)
return(met)
}
##-------------------------------------------------------------------------------------------------#
##' Converts a CF data frame into a BioCro met input
##'
##' @name cf2biocro
##' @title Convert CF-formatted met data to BioCro met
##' @param met data.table object with met for a single site; output from \code{\link{load.cfmet}}
##' \begin{itemize}
##' \item year int
##' \item month int
##' \item day int: day of month (1-31)
##' \item doy int: day of year (1-366)
##' \item hour int (0-23)
##' \item date YYYY-MM-DD HH:MM:SS POSIXct
##' \item wind_speed num m/s
##' \item northward_wind
##' \item eastward_wind
##' \item ppfd (optional; if missing, requires surface_downwelling_shortwave_flux_in_air)
##' \item surface_downwelling_shortwave_flux_in_air
##' \item air_pressure (Pa) (optional; if missing, requires relative_humidity)
##' \item specific_humidity (optional; if missing, requires relative_humidity)
##' \item relative_humidity (optional; if missing, requires air_pressure and specific_humidity)
##' \item precipitation_flux
##' \item air_temperature
##' \end{itemize}
##' @return data.table / data.frame with fields
##' \begin{itemize}
##' \item doy day of year
##' \item hr hour
##' \item solar solar radiation (PPFD)
##' \item temp temperature, degrees celsius
##' \item rh relative humidity, as fraction (0-1)
##' \item windspeed m/s
##' \item precip cm/h
##' \end{itemize}
##' @export cf2biocro
##' @author David LeBauer
cf2biocro <- function(met, longitude = NULL, zulu2solarnoon = FALSE){
if((!is.null(longitude)) & zulu2solarnoon){
solarnoon_offset <- ud.convert(longitude / 360, 'day', 'minute')
met[, `:=` (solardate = date + minutes(solarnoon_offset))]
}
if(!"relative_humidity" %in% colnames(met)){
if(all(c("air_temperature", "air_pressure", "specific_humidity") %in% colnames(met))){
rh <- qair2rh(qair = met$specific_humidity,
temp = ud.convert(met$air_temperature, "Kelvin", "Celsius"),
pres = ud.convert(met$air_pressure, "Pa", "hPa"))
met <- cbind(met, relative_humidity = rh * 100)
} else {
logger.error("neither relative_humidity nor [air_temperature, air_pressure, and specific_humidity]",
"are in met data")
}
}
if(!"ppfd" %in% colnames(met)){
if("surface_downwelling_shortwave_flux_in_air" %in% colnames(met)){
par <- sw2par(met$surface_downwelling_shortwave_flux_in_air)
ppfd <- par2ppfd(par)
} else {
logger.error("Need either ppfd or surface_downwelling_shortwave_flux_in_air in met dataset")
}
}
if(!"wind_speed" %in% colnames(met)){
if(all(c("northward_wind", "eastward_wind") %in% colnames(met))){
wind_speed <- sqrt(met$northward_wind^2 + met$eastward_wind^2)
} else {
logger.error("neither wind_speed nor both eastward_wind and northward_wind are present in met data")
}
}
## Convert RH from percent to fraction
## BioCro functions
if(met[,max(relative_humidity ) > 1]){ ## just to confirm
met[, `:=` (relative_humidity = relative_humidity/100)]
}
newmet <- met[, list(year = year(date), doy = yday(date),
hour = round(hour(date) + minute(date) / 60, 1),
SolarR = ppfd,
Temp = ud.convert(air_temperature, "Kelvin", "Celsius"),
RH = relative_humidity,
WS = wind_speed,
precip = ud.convert(precipitation_flux, "s-1", "h-1"))]
return(as.data.frame(newmet))
}
| /models/biocro/R/met2model.BIOCRO.R | permissive | davidjpmoore/pecan | R | false | false | 5,252 | r | #-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
.datatable.aware=TRUE
##-------------------------------------------------------------------------------------------------#
##' Converts a met CF file to a model specific met file. The input
##' files are calld <in.path>/<in.prefix>.YYYY.cf
##'
##' @name met2model.BIOCRO
##' @title Write BioCro met files
##' @param in.path path on disk where CF file lives
##' @param in.prefix prefix for each file
##' @param outfolder location where model specific output is written.
##' @param ... can pass lat, lon, start.date and end.date
##' @return OK if everything was succesful.
##' @export
##' @author Rob Kooper, David LeBauer
##-------------------------------------------------------------------------------------------------#
met2model.BIOCRO <- function(in.path, in.prefix, outfolder, overwrite=FALSE, ...) {
ncfiles = dir(in.path, full.names = TRUE, pattern = paste0(in.prefix, "*.nc$"),
all.files = FALSE, recursive = FALSE)
metlist <- list()
for(file in ncfiles){
met.nc <- nc_open(file)
tmp.met <- load.cfmet(met.nc, lat = lat, lon = lon, start.date = start.date, end.date = end.date)
metlist[[file]] <- cf2biocro(tmp.met)
}
met <- rbindlist(metlist)
return(met)
}
##-------------------------------------------------------------------------------------------------#
##' Converts a CF data frame into a BioCro met input
##'
##' @name cf2biocro
##' @title Convert CF-formatted met data to BioCro met
##' @param met data.table object with met for a single site; output from \code{\link{load.cfmet}}
##' \begin{itemize}
##' \item year int
##' \item month int
##' \item day int: day of month (1-31)
##' \item doy int: day of year (1-366)
##' \item hour int (0-23)
##' \item date YYYY-MM-DD HH:MM:SS POSIXct
##' \item wind_speed num m/s
##' \item northward_wind
##' \item eastward_wind
##' \item ppfd (optional; if missing, requires surface_downwelling_shortwave_flux_in_air)
##' \item surface_downwelling_shortwave_flux_in_air
##' \item air_pressure (Pa) (optional; if missing, requires relative_humidity)
##' \item specific_humidity (optional; if missing, requires relative_humidity)
##' \item relative_humidity (optional; if missing, requires air_pressure and specific_humidity)
##' \item precipitation_flux
##' \item air_temperature
##' \end{itemize}
##' @return data.table / data.frame with fields
##' \begin{itemize}
##' \item doy day of year
##' \item hr hour
##' \item solar solar radiation (PPFD)
##' \item temp temperature, degrees celsius
##' \item rh relative humidity, as fraction (0-1)
##' \item windspeed m/s
##' \item precip cm/h
##' \end{itemize}
##' @export cf2biocro
##' @author David LeBauer
cf2biocro <- function(met, longitude = NULL, zulu2solarnoon = FALSE){
if((!is.null(longitude)) & zulu2solarnoon){
solarnoon_offset <- ud.convert(longitude / 360, 'day', 'minute')
met[, `:=` (solardate = date + minutes(solarnoon_offset))]
}
if(!"relative_humidity" %in% colnames(met)){
if(all(c("air_temperature", "air_pressure", "specific_humidity") %in% colnames(met))){
rh <- qair2rh(qair = met$specific_humidity,
temp = ud.convert(met$air_temperature, "Kelvin", "Celsius"),
pres = ud.convert(met$air_pressure, "Pa", "hPa"))
met <- cbind(met, relative_humidity = rh * 100)
} else {
logger.error("neither relative_humidity nor [air_temperature, air_pressure, and specific_humidity]",
"are in met data")
}
}
if(!"ppfd" %in% colnames(met)){
if("surface_downwelling_shortwave_flux_in_air" %in% colnames(met)){
par <- sw2par(met$surface_downwelling_shortwave_flux_in_air)
ppfd <- par2ppfd(par)
} else {
logger.error("Need either ppfd or surface_downwelling_shortwave_flux_in_air in met dataset")
}
}
if(!"wind_speed" %in% colnames(met)){
if(all(c("northward_wind", "eastward_wind") %in% colnames(met))){
wind_speed <- sqrt(met$northward_wind^2 + met$eastward_wind^2)
} else {
logger.error("neither wind_speed nor both eastward_wind and northward_wind are present in met data")
}
}
## Convert RH from percent to fraction
## BioCro functions
if(met[,max(relative_humidity ) > 1]){ ## just to confirm
met[, `:=` (relative_humidity = relative_humidity/100)]
}
newmet <- met[, list(year = year(date), doy = yday(date),
hour = round(hour(date) + minute(date) / 60, 1),
SolarR = ppfd,
Temp = ud.convert(air_temperature, "Kelvin", "Celsius"),
RH = relative_humidity,
WS = wind_speed,
precip = ud.convert(precipitation_flux, "s-1", "h-1"))]
return(as.data.frame(newmet))
}
|
#' reclass_data
#' function to create a reclassification matrix from a min and max value and then create a reclassified raster
#'
#' @param raster the binary raster file created in data_processing.Rmd
#' @param min_thresh the minimum threshold value
#' @param max_thresh max threshold value
#' @param one_thresh TRUE if raster is only min SST or max SST, FALSE if using raster that already has a range in it
#' @param which_thresh MAX if using a maximum raster like max SST MIN if using a min raster
#'
#' @return a reclassified raster of suitable areas from thresholds
reclass_data = function(raster, min_thresh, max_thresh,
one_thresh = FALSE, which_thresh = ""){
# create reclassification matrix
if (one_thresh == TRUE & which_thresh == "MAX"){
# if using a max raster layer
rcl_matrix <- c(-Inf, max_thresh, 1,
max_thresh, Inf, 0)
} else if (one_thresh == TRUE & which_thresh == "MIN"){
# if using a min raster layer
rcl_matrix <- c(-Inf, min_thresh, 0,
min_thresh, Inf, 1)
} else { # if raster has both min and max values
rcl_matrix <- c(-Inf, min_thresh, 0,
min_thresh, max_thresh, 1,
max_thresh, Inf, 0)
}
# reclassify
rcl_raster <- reclassify(raster, rcl = rcl_matrix)
return(rcl_raster)
} | /functions/reclass_data.R | no_license | annaclairemarley/gulf_seaweed_suit | R | false | false | 1,370 | r | #' reclass_data
#' function to create a reclassification matrix from a min and max value and then create a reclassified raster
#'
#' @param raster the binary raster file created in data_processing.Rmd
#' @param min_thresh the minimum threshold value
#' @param max_thresh max threshold value
#' @param one_thresh TRUE if raster is only min SST or max SST, FALSE if using raster that already has a range in it
#' @param which_thresh MAX if using a maximum raster like max SST MIN if using a min raster
#'
#' @return a reclassified raster of suitable areas from thresholds
reclass_data = function(raster, min_thresh, max_thresh,
one_thresh = FALSE, which_thresh = ""){
# create reclassification matrix
if (one_thresh == TRUE & which_thresh == "MAX"){
# if using a max raster layer
rcl_matrix <- c(-Inf, max_thresh, 1,
max_thresh, Inf, 0)
} else if (one_thresh == TRUE & which_thresh == "MIN"){
# if using a min raster layer
rcl_matrix <- c(-Inf, min_thresh, 0,
min_thresh, Inf, 1)
} else { # if raster has both min and max values
rcl_matrix <- c(-Inf, min_thresh, 0,
min_thresh, max_thresh, 1,
max_thresh, Inf, 0)
}
# reclassify
rcl_raster <- reclassify(raster, rcl = rcl_matrix)
return(rcl_raster)
} |
# We are going to use the GA package
# Make sure that the package is installed.
# You install a package in R with the function install.packages():
#
# install.packages("GA")
library(GA)
#
# To install packages without root access:
#
# install.packages("GA", lib="/mylibs/Rpackages/") ## or some other path, e.g., C:\yourFolder
# library(GA, lib.loc="/mylibs/Rpackages/")
#
#
# EXAMPLE 1: One-dimensional function optimization
#
#
# The asymmetric double claw is difficult to maximize because there are many local solutions.
# Standard derivative-based optimizers would simply climb up the hill closest to the starting value.
f <- function(x)
{
y <- (0.46 * (dnorm(x, -1, 2/3) + dnorm(x, 1, 2/3)) +
(1/300) * (dnorm(x, -0.5, 0.01) + dnorm(x, -1, 0.01) +
dnorm(x, -1.5, 0.01)) +
(7/300) * (dnorm(x, 0.5, 0.07) + dnorm(x, 1, 0.07) +
dnorm(x, 1.5, 0.07)))
y ### return(y)
}
# Plot the double claw
curve(f, from = -3, to = 3, n = 1000)
# For the maximization of this function we may use f directly as the fitness function
GA <- ga(type = "real-valued", fitness = f, lower = -3, upper = 3)
# we have real values on x axis
# lower and upper are ranges of exploration
# The object returned can be plotted
plot(GA)
summary(GA)
# plot the solution
curve(f, from = -3, to = 3, n = 1000)
points(GA@solution, f(GA@solution), col="red")
# red circle na mestu x, f(x)
# The evolution of the population units and the corresponding functions values at each
# generation can be obtained by defining a new monitor function and then passing this
# function as an optional argument to ga
myMonitor <- function(obj)
{
curve(f, obj@lower, obj@upper, n = 1000, main = paste("iteration =", obj@iter))
points(obj@population, obj@fitness, pch = 20, col = 2)
rug(obj@population, col = 2)
Sys.sleep(0.2)
}
GA <- ga(type = "real-valued", fitness = f, lower = -3, upper = 3, monitor = myMonitor)
## Inspect fitness across generations
plot(GA)
#
#
# EXAMPLE 2: Model fitting
#
#
# We consider a data on the growth of trees
# The age at which the tree was measured
Age <- c(2.44, 12.44, 22.44, 32.44, 42.44, 52.44, 62.44, 72.44, 82.44, 92.44, 102.44, 112.44)
# The bole volume of the tree
Vol <- c(2.2, 20.0, 93.0, 262.0, 476.0, 705.0, 967.0, 1203.0, 1409.0, 1659.0, 1898.0, 2106.0)
plot(Age, Vol)
# An ecological model for the plant size (measured by volume) as a function of age is the Richards curve:
# f(x) = a*(1-exp(-b*x))^c, where a, b, in c are the model parameters
# Let's fit the Richards curve using genetic algorithms
# We first define our model function (argument params represents a vector of the parameters a, b, and c)
model <- function(params)
{
params[1] * (1 - exp(-params[2] * Age))^params[3]
}
# We define the fitness function as the sum of squares of the differences between estimated and observed data
myFitness2 <- function(params)
{
-sum((Vol - model(params))^2)
}
# The fitness function needs to be maximized with respect to the model's parameters, given the observed data in x and y.
# A blend crossover is used for improving the search over the parameter space: for two parents x1 and x2 (assume x1 < x2)
# it randomly picks a solution in the range [x1 - k*(x2-x1), x2 + k*(x2-x1)], where k represents a constant between 0 and 1.
# We restrict the search interval for a,b, and c to [1000.0, 5000.0], [0.0, 5.0], and [0.0, 5.0], respectively.
GA2 <- ga(type = "real-valued", fitness = myFitness2, lower = c(1000, 0, 0), upper = c(5000, 5, 5),
popSize = 500, crossover = gareal_blxCrossover, maxiter = 5000, run = 200, names = c("a", "b", "c"))
# check what gareal_blxCrossover is by writing ?gareal_blxCrossover
summary(GA2)
# Let's plot our solution
plot(Age, Vol)
lines(Age, model(GA2@solution))
# we can use a monitor function to plot the current solution
myMonitor2 <- function(obj)
{
i <- which.max(obj@fitness)
plot(Age, Vol)
lines(Age, model(obj@population[i,]), col="red")
title(paste("iteration =", obj@iter), font.main = 1)
Sys.sleep(0.1)
}
GA2 <- ga(type = "real-valued", fitness = myFitness2, lower = c(1000, 0, 0), upper = c(5000, 5, 5),
popSize = 500, crossover = gareal_blxCrossover, maxiter = 500, run = 200, names = c("a", "b", "c"), monitor=myMonitor2)
#
#
# EXAMPLE 3: The Knapsack problem
#
#
# The Knapsack problem is defined as follows: given a set of items, each with a mass and a value, determine the subset
# of items to be included in a collection so that the total weight is less than or equal to a given limit and the total value
# is as large as possible.
# a vector of the items' values
values <- c(5, 8, 3, 4, 6, 5, 4, 3, 2)
# a vector of the item's weights
weights <- c(1, 3, 2, 4, 2, 1, 3, 4, 5)
# the knapsack capacity
Capacity <- 10
# This will be our constraint
# A binary GA can be used to solve the knapsack problem. The solution to this problem is a binary string equal to the number
# of items where the ith bit is 1 if the ith item is in the subset and 0 otherwise. The fitness function should penalize
# unfeasible solutions.
knapsack <- function(x)
{
f <- sum(x * values) # indicator function, only when the binary value is one, we take the value
w <- sum(x * weights)
if (w > Capacity)
f <- Capacity - w # penalising the result so GA will reject it
f
}
GA3 <- ga(type = "binary", fitness = knapsack, nBits = length(weights), maxiter = 1000, run = 200, popSize = 100)
# binary - vektorji so binarni
# maximiziramo optimizacijo f-ja
summary(GA3)
GA3@solution
#
# Example 4: ESTABLISHING A TIMETABLE
#
# A small football club has a youth team and a senior team. The player
# training program has seven components: stamina training, strength training,
# technique, tactics, psychological preparation, teamwork, and regeneration.
# Due to lack of funds, for each component, a single staff member is responsible
# for both the youth and the senior team, with the exceptions of tactics and
# stamina training, where two staff members are assigned, one to each team.
#
# The weekly training regime is summarized in the following table:
#
#+----------+---------------------+-----------------+-----------------+
#| Coach | Component | Senior team | Youth team |
#+----------+---------------------+-----------------+-----------------+
#| Anze | Strength training | 1 time a week | 1 time a week |
#| Bojan | Technique | 3 times a week | 3 times a week |
#| Ciril | Regeneration | 2 times a week | 2 times a week |
#| Dusan | Stamina training | doesn't conduct | 4 times a week |
#| Erik | Stamina training | 4 times a week | doesn't conduct |
#| Filip | Teamwork | 3 times a week | 3 times a week |
#| Gasper | Psychological prep. | 1 time a week | 1 time a week |
#| Hugo | Tactics | 1 time a week | doesn't conduct |
#| Iztok | Tactics | doesn't conduct | 1 time a week |
#+----------+---------------------+-----------------+-----------------+
#
# Training is performed from Monday to Friday in four different time slots:
# 8:00 - 10:00, 10:15 - 12:15, 14:00 - 16:00, and 16:15 - 18:15.
#
# Constraints:
#
# - each time slot can hold only one component for the youth team and one component
# for the senior team (the youth and senior teams train separately, so a single
# staff member can only train one of the two teams in a single time slot).
#
# - a team is not allowed to train the same component 2 or more times within one day.
#
# - the main purpose of the Tactics training component is to prepare the team for
# the upcoming match. Matches are usually played during the weekend, so Tactics
# training should be scheduled for Thursday in the 16:15 - 18:15 time slot.
#
# - after a match, the players need to rest. Therefore, there is no training in
# the Monday 8:00 - 10:00 time slot.
#
# - the stamina training coach Dusan is not available on Monday mornings
# (8:00 - 10:00 in 10:15 - 12:15 time slots)
#
# - there can be no Technique training on Wednesdays, because coach Bojan is
# not available.
#
#
# Produce a training schedule that takes into account these two and all of
# the above restrictions!
#
#
# VARIABLES
#
# senior - number of sessions per component for the senior team
# youth - number of sessions per component for the youth team
# staff - coaching staff -> the staff's actual occupacy is solved for, how much a certain coach can handle is (hard) coded in senior and youth variables!
# slots - possible slots
senior = c(1, 3, 2, 0, 4, 3, 1, 1, 0)
youth = c(1, 3, 2, 4, 0, 3, 1, 0, 1)
slots = 4*5
valueBin <- function(timetable)
{
# organize data into a multi-dimensional array
# days, time slots, staff, teams
t <- array(as.integer(timetable), c(5,4,9,2))
violations <- 0 # minimize the violation to a specific timetable
# check all the conditions
# check the number of sessions per component
for (i in 1:9)
{
violations <- violations + abs(sum(t[,,i,1]) - senior[i])
violations <- violations + abs(sum(t[,,i,2]) - youth[i])
}
# it is not allowed to train the same component 2 or more times within one day
for (i in 1:9)
{
violations <- violations + sum(apply(t[,,i,1], 1, sum) > 1)
violations <- violations + sum(apply(t[,,i,2], 1, sum) > 1)
}
# a single staff member can only train one of the two teams in a single time slot
violations <- violations + sum(t[,,,1] == t[,,,2] & t[,,,1] != 0)
# each time slot can hold only one component for the youth team and one component
# for the senior team
for (i in 1:5)
for (j in 1:4)
{
violations <- violations + max(0, sum(t[i,j,,1]) - 1)
violations <- violations + max(0, sum(t[i,j,,2]) - 1)
}
# Tactics training should be scheduled for Thursday in the 16:15 - 18:15 time slot
violations <- violations + (t[4,3,8,1] != 1)
violations <- violations + (t[4,3,9,2] != 1)
# there is no training in the Monday 8:00 - 10:00 time slot
violations <- violations + sum(t[1,1,,])
# the stamina training coach Dusan is not available on Monday mornings
violations <- violations + sum(t[1,1:2,4,] == 1)
# there can be no Technique training on Wednesdays
violations <- violations + sum(t[3,,2,] == 1)
-violations # algo will try to minimize this value and get as close to 0 as possible
} # algo maksimizira to negativno vrednost (želi imeti čim manj violacij)
myInitPopulation <- function(object)
{
p <- gabin_Population(object)
for (i in 1:nrow(p))
{
t <- array(p[i,], c(5,4,9,2))
# Tactics training on Thursdays in the 16:15 - 18:15 time slot
t[4,3,8,1]=1
t[4,3,9,2]=1
# there is no training in the Monday 8:00 - 10:00 time slot
t[1,1,,] = 0
# there is no Stamina training on Monday mornings
t[1,1:2,4,] = 0
# there is no Technique training on Wednesdays
t[3,,2,] = 0
p[i,] <- as.vector(t)
}
p
}
GA4 <- ga(type = "binary", fitness = valueBin, nBits = 4*5*9*2,
popSize = 500, maxiter = 10, run = 200, population = myInitPopulation)
timetable2 <- function(solution,coach,team){
t <- array(solution, c(5,4,9,2))
t[,,coach,team]
} # p-1 ??
## timetable of a coach 2 for team 1.
t <- timetable2(GA4@solution[1,],2,1)
t
#
#
# EXAMPLE 5: Traveling salesman problem
#
#
# permutacijski genetski tip problema
# Given a list of cities and the distances between each pair of cities, what is the shortest possible route that visits
# each city exactly once and returns to the origin city?
data("eurodist", package = "datasets")
D <- as.matrix(eurodist)
D
# An individual round tour is represented as a permutation of a default numbering of the cities defining the current order
# in which the cities are to be visited
# Calculation of the tour length
tourLength <- function(tour)
{
N <- length(tour)
dist <- 0
for (i in 2:N)
dist <- dist + D[tour[i-1],tour[i]]
dist <- dist + D[tour[N],tour[1]]
dist
}
# The fitness function to be maximized is defined as the reciprocal of the tour length.
tspFitness <- function(tour)
{
1/tourLength(tour)
} # minimiziramo problem z obratno vrednostjo, ker dist maximiziramo in rezultat minimiziramo
GA5 <- ga(type = "permutation", fitness = tspFitness, lower = 1, upper = ncol(D), popSize = 50, maxiter = 5000, run = 500, pmutation = 0.2)
# ti algoritmi so stohastični, kar pomeni, da lahko dobiš različne rezultate
summary(GA5)
# Reconstruct the solution found
tour <- GA5@solution[1, ]
tour <- c(tour, tour[1])
tourLength(tour)
colnames(D)[tour]
| /practicals/02_GA&data_visualization/lab2_code.R | permissive | jakobudovic/IS2020 | R | false | false | 12,745 | r | # We are going to use the GA package
# Make sure that the package is installed.
# You install a package in R with the function install.packages():
#
# install.packages("GA")
library(GA)
#
# To install packages without root access:
#
# install.packages("GA", lib="/mylibs/Rpackages/") ## or some other path, e.g., C:\yourFolder
# library(GA, lib.loc="/mylibs/Rpackages/")
#
#
# EXAMPLE 1: One-dimensional function optimization
#
#
# The asymmetric double claw is difficult to maximize because there are many local solutions.
# Standard derivative-based optimizers would simply climb up the hill closest to the starting value.
f <- function(x)
{
y <- (0.46 * (dnorm(x, -1, 2/3) + dnorm(x, 1, 2/3)) +
(1/300) * (dnorm(x, -0.5, 0.01) + dnorm(x, -1, 0.01) +
dnorm(x, -1.5, 0.01)) +
(7/300) * (dnorm(x, 0.5, 0.07) + dnorm(x, 1, 0.07) +
dnorm(x, 1.5, 0.07)))
y ### return(y)
}
# Plot the double claw
curve(f, from = -3, to = 3, n = 1000)
# For the maximization of this function we may use f directly as the fitness function
GA <- ga(type = "real-valued", fitness = f, lower = -3, upper = 3)
# we have real values on x axis
# lower and upper are ranges of exploration
# The object returned can be plotted
plot(GA)
summary(GA)
# plot the solution
curve(f, from = -3, to = 3, n = 1000)
points(GA@solution, f(GA@solution), col="red")
# red circle na mestu x, f(x)
# The evolution of the population units and the corresponding functions values at each
# generation can be obtained by defining a new monitor function and then passing this
# function as an optional argument to ga
myMonitor <- function(obj)
{
curve(f, obj@lower, obj@upper, n = 1000, main = paste("iteration =", obj@iter))
points(obj@population, obj@fitness, pch = 20, col = 2)
rug(obj@population, col = 2)
Sys.sleep(0.2)
}
GA <- ga(type = "real-valued", fitness = f, lower = -3, upper = 3, monitor = myMonitor)
## Inspect fitness across generations
plot(GA)
#
#
# EXAMPLE 2: Model fitting
#
#
# We consider a data on the growth of trees
# The age at which the tree was measured
Age <- c(2.44, 12.44, 22.44, 32.44, 42.44, 52.44, 62.44, 72.44, 82.44, 92.44, 102.44, 112.44)
# The bole volume of the tree
Vol <- c(2.2, 20.0, 93.0, 262.0, 476.0, 705.0, 967.0, 1203.0, 1409.0, 1659.0, 1898.0, 2106.0)
plot(Age, Vol)
# An ecological model for the plant size (measured by volume) as a function of age is the Richards curve:
# f(x) = a*(1-exp(-b*x))^c, where a, b, in c are the model parameters
# Let's fit the Richards curve using genetic algorithms
# We first define our model function (argument params represents a vector of the parameters a, b, and c)
model <- function(params)
{
params[1] * (1 - exp(-params[2] * Age))^params[3]
}
# We define the fitness function as the sum of squares of the differences between estimated and observed data
myFitness2 <- function(params)
{
-sum((Vol - model(params))^2)
}
# The fitness function needs to be maximized with respect to the model's parameters, given the observed data in x and y.
# A blend crossover is used for improving the search over the parameter space: for two parents x1 and x2 (assume x1 < x2)
# it randomly picks a solution in the range [x1 - k*(x2-x1), x2 + k*(x2-x1)], where k represents a constant between 0 and 1.
# We restrict the search interval for a,b, and c to [1000.0, 5000.0], [0.0, 5.0], and [0.0, 5.0], respectively.
GA2 <- ga(type = "real-valued", fitness = myFitness2, lower = c(1000, 0, 0), upper = c(5000, 5, 5),
popSize = 500, crossover = gareal_blxCrossover, maxiter = 5000, run = 200, names = c("a", "b", "c"))
# check what gareal_blxCrossover is by writing ?gareal_blxCrossover
summary(GA2)
# Let's plot our solution
plot(Age, Vol)
lines(Age, model(GA2@solution))
# we can use a monitor function to plot the current solution
myMonitor2 <- function(obj)
{
i <- which.max(obj@fitness)
plot(Age, Vol)
lines(Age, model(obj@population[i,]), col="red")
title(paste("iteration =", obj@iter), font.main = 1)
Sys.sleep(0.1)
}
GA2 <- ga(type = "real-valued", fitness = myFitness2, lower = c(1000, 0, 0), upper = c(5000, 5, 5),
popSize = 500, crossover = gareal_blxCrossover, maxiter = 500, run = 200, names = c("a", "b", "c"), monitor=myMonitor2)
#
#
# EXAMPLE 3: The Knapsack problem
#
#
# The Knapsack problem is defined as follows: given a set of items, each with a mass and a value, determine the subset
# of items to be included in a collection so that the total weight is less than or equal to a given limit and the total value
# is as large as possible.
# a vector of the items' values
values <- c(5, 8, 3, 4, 6, 5, 4, 3, 2)
# a vector of the item's weights
weights <- c(1, 3, 2, 4, 2, 1, 3, 4, 5)
# the knapsack capacity
Capacity <- 10
# This will be our constraint
# A binary GA can be used to solve the knapsack problem. The solution to this problem is a binary string equal to the number
# of items where the ith bit is 1 if the ith item is in the subset and 0 otherwise. The fitness function should penalize
# unfeasible solutions.
knapsack <- function(x)
{
f <- sum(x * values) # indicator function, only when the binary value is one, we take the value
w <- sum(x * weights)
if (w > Capacity)
f <- Capacity - w # penalising the result so GA will reject it
f
}
GA3 <- ga(type = "binary", fitness = knapsack, nBits = length(weights), maxiter = 1000, run = 200, popSize = 100)
# binary - vektorji so binarni
# maximiziramo optimizacijo f-ja
summary(GA3)
GA3@solution
#
# Example 4: ESTABLISHING A TIMETABLE
#
# A small football club has a youth team and a senior team. The player
# training program has seven components: stamina training, strength training,
# technique, tactics, psychological preparation, teamwork, and regeneration.
# Due to lack of funds, for each component, a single staff member is responsible
# for both the youth and the senior team, with the exceptions of tactics and
# stamina training, where two staff members are assigned, one to each team.
#
# The weekly training regime is summarized in the following table:
#
#+----------+---------------------+-----------------+-----------------+
#| Coach | Component | Senior team | Youth team |
#+----------+---------------------+-----------------+-----------------+
#| Anze | Strength training | 1 time a week | 1 time a week |
#| Bojan | Technique | 3 times a week | 3 times a week |
#| Ciril | Regeneration | 2 times a week | 2 times a week |
#| Dusan | Stamina training | doesn't conduct | 4 times a week |
#| Erik | Stamina training | 4 times a week | doesn't conduct |
#| Filip | Teamwork | 3 times a week | 3 times a week |
#| Gasper | Psychological prep. | 1 time a week | 1 time a week |
#| Hugo | Tactics | 1 time a week | doesn't conduct |
#| Iztok | Tactics | doesn't conduct | 1 time a week |
#+----------+---------------------+-----------------+-----------------+
#
# Training is performed from Monday to Friday in four different time slots:
# 8:00 - 10:00, 10:15 - 12:15, 14:00 - 16:00, and 16:15 - 18:15.
#
# Constraints:
#
# - each time slot can hold only one component for the youth team and one component
# for the senior team (the youth and senior teams train separately, so a single
# staff member can only train one of the two teams in a single time slot).
#
# - a team is not allowed to train the same component 2 or more times within one day.
#
# - the main purpose of the Tactics training component is to prepare the team for
# the upcoming match. Matches are usually played during the weekend, so Tactics
# training should be scheduled for Thursday in the 16:15 - 18:15 time slot.
#
# - after a match, the players need to rest. Therefore, there is no training in
# the Monday 8:00 - 10:00 time slot.
#
# - the stamina training coach Dusan is not available on Monday mornings
# (8:00 - 10:00 in 10:15 - 12:15 time slots)
#
# - there can be no Technique training on Wednesdays, because coach Bojan is
# not available.
#
#
# Produce a training schedule that takes into account these two and all of
# the above restrictions!
#
#
# VARIABLES
#
# senior - number of sessions per component for the senior team
# youth - number of sessions per component for the youth team
# staff - coaching staff -> the staff's actual occupacy is solved for, how much a certain coach can handle is (hard) coded in senior and youth variables!
# slots - possible slots
senior = c(1, 3, 2, 0, 4, 3, 1, 1, 0)
youth = c(1, 3, 2, 4, 0, 3, 1, 0, 1)
slots = 4*5
valueBin <- function(timetable)
{
# organize data into a multi-dimensional array
# days, time slots, staff, teams
t <- array(as.integer(timetable), c(5,4,9,2))
violations <- 0 # minimize the violation to a specific timetable
# check all the conditions
# check the number of sessions per component
for (i in 1:9)
{
violations <- violations + abs(sum(t[,,i,1]) - senior[i])
violations <- violations + abs(sum(t[,,i,2]) - youth[i])
}
# it is not allowed to train the same component 2 or more times within one day
for (i in 1:9)
{
violations <- violations + sum(apply(t[,,i,1], 1, sum) > 1)
violations <- violations + sum(apply(t[,,i,2], 1, sum) > 1)
}
# a single staff member can only train one of the two teams in a single time slot
violations <- violations + sum(t[,,,1] == t[,,,2] & t[,,,1] != 0)
# each time slot can hold only one component for the youth team and one component
# for the senior team
for (i in 1:5)
for (j in 1:4)
{
violations <- violations + max(0, sum(t[i,j,,1]) - 1)
violations <- violations + max(0, sum(t[i,j,,2]) - 1)
}
# Tactics training should be scheduled for Thursday in the 16:15 - 18:15 time slot
violations <- violations + (t[4,3,8,1] != 1)
violations <- violations + (t[4,3,9,2] != 1)
# there is no training in the Monday 8:00 - 10:00 time slot
violations <- violations + sum(t[1,1,,])
# the stamina training coach Dusan is not available on Monday mornings
violations <- violations + sum(t[1,1:2,4,] == 1)
# there can be no Technique training on Wednesdays
violations <- violations + sum(t[3,,2,] == 1)
-violations # algo will try to minimize this value and get as close to 0 as possible
} # algo maksimizira to negativno vrednost (želi imeti čim manj violacij)
myInitPopulation <- function(object)
{
p <- gabin_Population(object)
for (i in 1:nrow(p))
{
t <- array(p[i,], c(5,4,9,2))
# Tactics training on Thursdays in the 16:15 - 18:15 time slot
t[4,3,8,1]=1
t[4,3,9,2]=1
# there is no training in the Monday 8:00 - 10:00 time slot
t[1,1,,] = 0
# there is no Stamina training on Monday mornings
t[1,1:2,4,] = 0
# there is no Technique training on Wednesdays
t[3,,2,] = 0
p[i,] <- as.vector(t)
}
p
}
GA4 <- ga(type = "binary", fitness = valueBin, nBits = 4*5*9*2,
popSize = 500, maxiter = 10, run = 200, population = myInitPopulation)
timetable2 <- function(solution,coach,team){
t <- array(solution, c(5,4,9,2))
t[,,coach,team]
} # p-1 ??
## timetable of a coach 2 for team 1.
t <- timetable2(GA4@solution[1,],2,1)
t
#
#
# EXAMPLE 5: Traveling salesman problem
#
#
# permutacijski genetski tip problema
# Given a list of cities and the distances between each pair of cities, what is the shortest possible route that visits
# each city exactly once and returns to the origin city?
data("eurodist", package = "datasets")
D <- as.matrix(eurodist)
D
# An individual round tour is represented as a permutation of a default numbering of the cities defining the current order
# in which the cities are to be visited
# Calculation of the tour length
tourLength <- function(tour)
{
N <- length(tour)
dist <- 0
for (i in 2:N)
dist <- dist + D[tour[i-1],tour[i]]
dist <- dist + D[tour[N],tour[1]]
dist
}
# The fitness function to be maximized is defined as the reciprocal of the tour length.
tspFitness <- function(tour)
{
1/tourLength(tour)
} # minimiziramo problem z obratno vrednostjo, ker dist maximiziramo in rezultat minimiziramo
GA5 <- ga(type = "permutation", fitness = tspFitness, lower = 1, upper = ncol(D), popSize = 50, maxiter = 5000, run = 500, pmutation = 0.2)
# ti algoritmi so stohastični, kar pomeni, da lahko dobiš različne rezultate
summary(GA5)
# Reconstruct the solution found
tour <- GA5@solution[1, ]
tour <- c(tour, tour[1])
tourLength(tour)
colnames(D)[tour]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/androidenterprise_objects.R
\name{CollectionsListResponse}
\alias{CollectionsListResponse}
\title{CollectionsListResponse Object}
\usage{
CollectionsListResponse(collection = NULL)
}
\arguments{
\item{collection}{An ordered collection of products which can be made visible on the Google Play Store to a selected group of users}
}
\value{
CollectionsListResponse object
}
\description{
CollectionsListResponse Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The collection resources for the enterprise.
}
| /googleandroidenterprisev1.auto/man/CollectionsListResponse.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 622 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/androidenterprise_objects.R
\name{CollectionsListResponse}
\alias{CollectionsListResponse}
\title{CollectionsListResponse Object}
\usage{
CollectionsListResponse(collection = NULL)
}
\arguments{
\item{collection}{An ordered collection of products which can be made visible on the Google Play Store to a selected group of users}
}
\value{
CollectionsListResponse object
}
\description{
CollectionsListResponse Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The collection resources for the enterprise.
}
|
#Load the irtpp library
library(IRTpp)
#Simulate 100 tests of 10 items and 1000 individuals
x = simulateTest(model="2PL",items=10,individuals=1000,reps=100,threshold=0.05,seed=1)
#Calibrate these tests
ip=lapply(x$test,function(t)irtpp(t,2))
#Mean of the estimations
mn=Reduce("+",ip)/100
par(mfrow=c(1,3))
flat = lapply(ip,c)
pa = unlist(lapply(flat,function(x)x[1:10]))
pb = unlist(lapply(flat,function(x)x[11:20]))
pc = unlist(lapply(flat,function(x)x[21:30]))
hist(pa,breaks=30);rug(pa);
hist(pb,breaks=30);rug(pb);
hist(pc,breaks=30);rug(pc);
#Poblational parameters
matrix(c(unlist(x$itempars)),ncol=3)
#Estimation mean
mn
lapply(ip,function(x){sum(x[,3])})
ip[[98]]
est = irtpp(x$test[[100]],2)
ip[[100]]
est
| /irtppbenchmark.R | permissive | jcliberatol/irtppscripts | R | false | false | 718 | r | #Load the irtpp library
library(IRTpp)
#Simulate 100 tests of 10 items and 1000 individuals
x = simulateTest(model="2PL",items=10,individuals=1000,reps=100,threshold=0.05,seed=1)
#Calibrate these tests
ip=lapply(x$test,function(t)irtpp(t,2))
#Mean of the estimations
mn=Reduce("+",ip)/100
par(mfrow=c(1,3))
flat = lapply(ip,c)
pa = unlist(lapply(flat,function(x)x[1:10]))
pb = unlist(lapply(flat,function(x)x[11:20]))
pc = unlist(lapply(flat,function(x)x[21:30]))
hist(pa,breaks=30);rug(pa);
hist(pb,breaks=30);rug(pb);
hist(pc,breaks=30);rug(pc);
#Poblational parameters
matrix(c(unlist(x$itempars)),ncol=3)
#Estimation mean
mn
lapply(ip,function(x){sum(x[,3])})
ip[[98]]
est = irtpp(x$test[[100]],2)
ip[[100]]
est
|
## R Code for Plot 2
# City Emissions
baltcitymary.emissions<-summarise(group_by(filter(NEI, fips == "24510"), year), Emissions=sum(Emissions))
clrs <- c("red", "green", "blue", "yellow")
x2<-barplot(height=baltcitymary.emissions$Emissions/1000, names.arg=baltcitymary.emissions$year,
xlab="years", ylab=expression('total PM'[2.5]*' emission in kilotons'),ylim=c(0,4),
main=expression('Total PM'[2.5]*' emissions in Baltimore City-MD in kilotons'),col=clrs)
## Add text at top of bars
text(x = x2, y = round(baltcitymary.emissions$Emissions/1000,2), label = round(baltcitymary.emissions$Emissions/1000,2), pos = 3, cex = 0.8, col = "black")
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
| /plot2.R | no_license | aclyrics65M/Exploratory-Data-Analysis-Project-02 | R | false | false | 725 | r | ## R Code for Plot 2
# City Emissions
baltcitymary.emissions<-summarise(group_by(filter(NEI, fips == "24510"), year), Emissions=sum(Emissions))
clrs <- c("red", "green", "blue", "yellow")
x2<-barplot(height=baltcitymary.emissions$Emissions/1000, names.arg=baltcitymary.emissions$year,
xlab="years", ylab=expression('total PM'[2.5]*' emission in kilotons'),ylim=c(0,4),
main=expression('Total PM'[2.5]*' emissions in Baltimore City-MD in kilotons'),col=clrs)
## Add text at top of bars
text(x = x2, y = round(baltcitymary.emissions$Emissions/1000,2), label = round(baltcitymary.emissions$Emissions/1000,2), pos = 3, cex = 0.8, col = "black")
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_lags_and_leads.R
\name{add_lags_and_leads}
\alias{add_lags_and_leads}
\title{Return data.table with lag}
\usage{
add_lags_and_leads(dt, varnames, tvar = "date", by = NULL, n = 3,
typenames = c("lag", "lead"))
}
\arguments{
\item{dt}{data.table}
\item{varnames}{Character vector of variables to lag or lead}
\item{tvar}{Name of time variable. Default is "date".}
\item{by}{(Optional) Name of variable to run lags/leads by}
\item{n}{(Optional) Number of lags/leads to create}
\item{typenames}{(Optional) Character vector equal to c("lag"), c("lead")", or c("lag", "lead") (default)}
}
\value{
data.table with lags and leads added
}
\description{
Return data.table with lag
}
| /man/add_lags_and_leads.Rd | no_license | astraetech/baylisR | R | false | true | 762 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_lags_and_leads.R
\name{add_lags_and_leads}
\alias{add_lags_and_leads}
\title{Return data.table with lag}
\usage{
add_lags_and_leads(dt, varnames, tvar = "date", by = NULL, n = 3,
typenames = c("lag", "lead"))
}
\arguments{
\item{dt}{data.table}
\item{varnames}{Character vector of variables to lag or lead}
\item{tvar}{Name of time variable. Default is "date".}
\item{by}{(Optional) Name of variable to run lags/leads by}
\item{n}{(Optional) Number of lags/leads to create}
\item{typenames}{(Optional) Character vector equal to c("lag"), c("lead")", or c("lag", "lead") (default)}
}
\value{
data.table with lags and leads added
}
\description{
Return data.table with lag
}
|
library(SingleR)
library(Seurat)
library(magrittr)
library(pheatmap)
library(kableExtra)
library(dplyr)
library(tidyr)
library(ggpubr)
source("https://raw.githubusercontent.com/nyuhuyang/SeuratExtra/master/R/Seurat3_functions.R")
source("https://raw.githubusercontent.com/nyuhuyang/SeuratExtra/master/R/SingleR_functions.R")
source("R/util.R")
path <- paste0("output/",gsub("-","",Sys.Date()),"/")
if(!dir.exists(path)) dir.create(path, recursive = T)
#====== 3.2 SingleR specifications ==========================================
# Step 1: Spearman coefficient
(load(file = "data/MCL_41_harmony_20191231.Rda"))
(load(file="output/singlerT_MCL_41_20200225.Rda"))
# if singler didn't find all cell labels
length(singler$singler[[1]]$SingleR.single$labels) == ncol(object)
if(length(singler$singler[[1]]$SingleR.single$labels) < ncol(object)){
all.cell = colnames(object);length(all.cell)
know.cell = names(singler$singler[[1]]$SingleR.single$labels);length(know.cell)
object = subset(object, cells = know.cell)
}
table(rownames(singler$singler[[1]]$SingleR.single$labels) == colnames(object))
if(!all(rownames(singler$singler[[1]]$SingleR.single$labels) %in% colnames(object))){
cells = rownames(singler$singler[[1]]$SingleR.single$labels)
cells = cells[!cells %in% colnames(object)]
unique(gsub("_.*","",cells))
rownames(singler$singler[[1]]$SingleR.single$labels) %<>% gsub("Pt-28-PB-C25D1",
"Pt-28-PB-C28D1",.)
}
singler$meta.data$orig.ident = object$orig.ident # the original identities, if not supplied in 'annot'
singler$meta.data$xy = object@reductions$tsne@cell.embeddings # the tSNE coordinates
singler$meta.data$clusters = Idents(object) # the Seurat clusters (if 'clusters' not provided)
save(singler,file="output/singlerT_MCL_41_20200225.Rda")
##############################
# check the spearman correlation
###############################
#Or by all cell types (showing the top 50 cell types):
jpeg(paste0(path,"DrawHeatmap_sub1.jpeg"), units="in", width=10, height=7,
res=600)
print(SingleR.DrawHeatmap(singler$singler[[1]]$SingleR.single, top.n = 50,normalize = F))
dev.off()
jpeg(paste0(path,"DrawHeatmap_sub1_N.jpeg"), units="in", width=10, height=7,
res=600)
print(SingleR.DrawHeatmap(singler$singler[[1]]$SingleR.single,top.n = 50,normalize = T))
dev.off()
#Finally, we can also view the labeling as a table compared to the original identities:
singlerDF = data.frame("singler1sub" = singler$singler[[1]]$SingleR.single$labels,
"orig.ident" = gsub("_.*","",singler$singler[[1]]$SingleR.single$cell.names))
singlerDF = singlerDF[colnames(object),]
table(singlerDF$singler1sub, singlerDF$orig.ident) %>% kable %>%
kable_styling()
##############################
# adjust cell label
##############################
# combine cell types
singlerDF$singler1sub = gsub("MCL:.*","MCL",singlerDF$singler1sub)
singlerDF$singler1sub = gsub("B_cells:PB","B_cells:Plasma_cells",singlerDF$singler1sub)
singlerDF$cell.types = gsub("B_cells:.*","B_cells",singlerDF$singler1sub)
singlerDF$cell.types = gsub("MEP|CLP|HSC|CMP|GMP|MPP","HSC/progenitors",singlerDF$cell.types)
singlerDF$cell.types = gsub("T_cells:CD4\\+_.*","T_cells:CD4+",singlerDF$cell.types)
singlerDF$cell.types = gsub("T_cells:CD8\\+_.*","T_cells:CD8+",singlerDF$cell.types)
singlerDF$cell.types = gsub("T_cells:Tregs","T_cells:CD4+",singlerDF$cell.types)
singlerDF$cell.types = gsub("DC|Macrophages|Macrophages:M1","Myeloid cells",singlerDF$cell.types)
singlerDF$cell.types = gsub("Erythrocytes","Myeloid cells",singlerDF$cell.types)
singlerDF$cell.types = gsub("Eosinophils|Megakaryocytes|Monocytes","Myeloid cells",singlerDF$cell.types)
singlerDF$cell.types = gsub("Adipocytes|Fibroblasts|mv_Endothelial_cells","Nonhematopoietic cells",singlerDF$cell.types)
table(singlerDF$cell.types, singlerDF$orig.ident) %>% kable() %>% kable_styling()
# reduce false positive results (B cells are labeled as MCL in normal samples)
# and false negative results (MCL cells are labeled as B cells in MCL samples)
# singler1sub false negative results =========
CCND1 = FetchData(object,"CCND1")
singlerDF$CCND1 = CCND1$CCND1
singlerDF[(singlerDF$CCND1 >0 & singlerDF$cell.types %in% "B_cells"),"cell.types"] = "MCL"
# cell.types false positive results ========
table(singlerDF$cell.types, object@meta.data$orig.ident) %>% kable %>% kable_styling()
normal_cells <- object$sample %in% c("BH","DJ","MD","NZ") %>% rownames(singlerDF)[.]
singlerDF[normal_cells,"cell.types"] %<>% gsub("MCL","B_cells",.)
# singler1sub false positive results =========
table(singlerDF$singler1sub, object$orig.ident) %>% kable %>% kable_styling()
singlerDF[normal_cells,"singler1sub"] %<>% gsub("MCL:.*$","B_cells:Memory",.)
table(singlerDF$cell.types, object$orig.ident) %>% kable %>% kable_styling()
table(singlerDF$singler1sub, object$orig.ident)%>% kable %>% kable_styling()
table(singlerDF$cell.types %>% sort)%>% kable %>% kable_styling()
table(singlerDF$cell.types) %>% kable() %>% kable_styling()
#======================================
# subset Monocytes
Idents(object) = "SCT_snn_res.0.8"
c_5 <- subset(object, idents = c(5,16))
DefaultAssay(c_5) = "SCT"
c_5 %<>% FindNeighbors(reduction = "harmony",dims = 1:85)
c_5 %<>% FindClusters(resolution = 0.1)
TSNEPlot.1(c_5,do.print = T, title = "re-cluster cluster 5 ")
features <- FilterGenes(object,c("FCN1","ITGAL","ITGAM","FCGR1A",
"MS4A7","CDKN1C", "CSF1R","FCGR3A",
"VCAN","S100A8","CD14","CSF3R"))
FeaturePlot.1(c_5,features = features, pt.size = 0.005, cols = c("gray90", "red"),
alpha = 1,reduction = "tsne",
threshold = 1, text.size = 20, border = T,do.print = T, do.return = F,ncol = 4,
units = "in",width=12, height=9, no.legend = T)
CD14 <- colnames(c_5)[c_5$SCT_snn_res.0.1 %in% c(0,2)]
CD16 <- colnames(c_5)[c_5$SCT_snn_res.0.1 == 1]
singlerDF[CD14,"cell.types"] = "Monocytes:CD14+"
singlerDF[CD16,"cell.types"] = "Monocytes:CD16+"
##############################
# process color scheme
##############################
#singler_colors <- readxl::read_excel("doc/singler.colors.xlsx")
#singler_colors1 = as.vector(singler_colors$singler.color1[!is.na(singler_colors$singler.color1)])
#singler_colors1[duplicated(singler_colors1)]
#singler_colors2 = as.vector(singler_colors$singler.color2[!is.na(singler_colors$singler.color2)])
singler_colors2 = c("#E6AB02","#6A3D9A", "#2055da","#ADDFEE","#FB9A99","#FF0000", "#A65628", "#B3B3B3", "#B3DE69", "#F0027F")
object <- AddMetaData(object = object,metadata = singlerDF["cell.types"])
object <- AddMetaColor(object = object, label= "cell.types", colors = singler_colors2)
Idents(object) <- "cell.types"
lapply(c(UMAPPlot.1,TSNEPlot.1), function(fun)
fun(object = object, label = F, group.by = "cell.types",
cols = ExtractMetaColor(object),no.legend = F,
pt.size = 0.1,label.size = 3, do.print = T,do.return = F,
title = "Cell type labeling by Blueprint + Encode + MCL"))
save(object,file="data/MCL_41_harmony_20200225.Rda")
##############################
# draw tsne plot
##############################
object <- subset(object,idents = c("HSC/progenitors","Nonhematopoietic cells"), invert = TRUE)
table(Idents(object))
Idents(object)="Doublets"
object %<>% subset(idents = "Singlet")
cell_Freq <- table(object$cell.types) %>% as.data.frame
cell_Freq = cell_Freq[order(cell_Freq$Var1),]
cell_Freq$col = ExtractMetaColor(object)
cell_Freq = cell_Freq[order(cell_Freq$Freq,decreasing = T),]
cell_Freq$Var1 %<>% factor(levels = as.character(cell_Freq$Var1))
colnames(cell_Freq)[1:2] = c("Cell_Type", "Cell_Number")
jpeg(paste0(path,"cell_type_numbers.jpeg"), units="in", width=10, height=7,res=600)
ggbarplot(cell_Freq, "Cell_Type", "Cell_Number",
fill = "Cell_Type", color = "Cell_Type",xlab = "",
palette = cell_Freq$col,x.text.angle = 90,
title = "Numbers of major cell types in total 43 samples")+NoLegend()+
theme(plot.title = element_text(hjust = 0.5,size=15))
dev.off()
cell_num <- table(object$orig.ident,object$cell.types)
cell_Freq <- round(prop.table(cell_num,margin = 1)*100,digits = 3) %>%
as.data.frame.matrix()
cell_num %<>% as.data.frame.matrix()
res <- list("cell.types.num"=cell_num,
"cell.types.perc" = cell_Freq)
openxlsx::write.xlsx(res,
file = paste0(path,"20210426_PALIBR_cell.types.xlsx"),
colNames = TRUE, rowNames = TRUE,
borders = "surrounding",colWidths = c(NA, "auto", "auto"))
| /R/Seurat3/Phase_I/SingleR_figures.R | no_license | nyuhuyang/scRNAseq-MCL | R | false | false | 8,662 | r | library(SingleR)
library(Seurat)
library(magrittr)
library(pheatmap)
library(kableExtra)
library(dplyr)
library(tidyr)
library(ggpubr)
source("https://raw.githubusercontent.com/nyuhuyang/SeuratExtra/master/R/Seurat3_functions.R")
source("https://raw.githubusercontent.com/nyuhuyang/SeuratExtra/master/R/SingleR_functions.R")
source("R/util.R")
path <- paste0("output/",gsub("-","",Sys.Date()),"/")
if(!dir.exists(path)) dir.create(path, recursive = T)
#====== 3.2 SingleR specifications ==========================================
# Step 1: Spearman coefficient
(load(file = "data/MCL_41_harmony_20191231.Rda"))
(load(file="output/singlerT_MCL_41_20200225.Rda"))
# if singler didn't find all cell labels
length(singler$singler[[1]]$SingleR.single$labels) == ncol(object)
if(length(singler$singler[[1]]$SingleR.single$labels) < ncol(object)){
all.cell = colnames(object);length(all.cell)
know.cell = names(singler$singler[[1]]$SingleR.single$labels);length(know.cell)
object = subset(object, cells = know.cell)
}
table(rownames(singler$singler[[1]]$SingleR.single$labels) == colnames(object))
if(!all(rownames(singler$singler[[1]]$SingleR.single$labels) %in% colnames(object))){
cells = rownames(singler$singler[[1]]$SingleR.single$labels)
cells = cells[!cells %in% colnames(object)]
unique(gsub("_.*","",cells))
rownames(singler$singler[[1]]$SingleR.single$labels) %<>% gsub("Pt-28-PB-C25D1",
"Pt-28-PB-C28D1",.)
}
singler$meta.data$orig.ident = object$orig.ident # the original identities, if not supplied in 'annot'
singler$meta.data$xy = object@reductions$tsne@cell.embeddings # the tSNE coordinates
singler$meta.data$clusters = Idents(object) # the Seurat clusters (if 'clusters' not provided)
save(singler,file="output/singlerT_MCL_41_20200225.Rda")
##############################
# check the spearman correlation
###############################
#Or by all cell types (showing the top 50 cell types):
jpeg(paste0(path,"DrawHeatmap_sub1.jpeg"), units="in", width=10, height=7,
res=600)
print(SingleR.DrawHeatmap(singler$singler[[1]]$SingleR.single, top.n = 50,normalize = F))
dev.off()
jpeg(paste0(path,"DrawHeatmap_sub1_N.jpeg"), units="in", width=10, height=7,
res=600)
print(SingleR.DrawHeatmap(singler$singler[[1]]$SingleR.single,top.n = 50,normalize = T))
dev.off()
#Finally, we can also view the labeling as a table compared to the original identities:
singlerDF = data.frame("singler1sub" = singler$singler[[1]]$SingleR.single$labels,
"orig.ident" = gsub("_.*","",singler$singler[[1]]$SingleR.single$cell.names))
singlerDF = singlerDF[colnames(object),]
table(singlerDF$singler1sub, singlerDF$orig.ident) %>% kable %>%
kable_styling()
##############################
# adjust cell label
##############################
# combine cell types
singlerDF$singler1sub = gsub("MCL:.*","MCL",singlerDF$singler1sub)
singlerDF$singler1sub = gsub("B_cells:PB","B_cells:Plasma_cells",singlerDF$singler1sub)
singlerDF$cell.types = gsub("B_cells:.*","B_cells",singlerDF$singler1sub)
singlerDF$cell.types = gsub("MEP|CLP|HSC|CMP|GMP|MPP","HSC/progenitors",singlerDF$cell.types)
singlerDF$cell.types = gsub("T_cells:CD4\\+_.*","T_cells:CD4+",singlerDF$cell.types)
singlerDF$cell.types = gsub("T_cells:CD8\\+_.*","T_cells:CD8+",singlerDF$cell.types)
singlerDF$cell.types = gsub("T_cells:Tregs","T_cells:CD4+",singlerDF$cell.types)
singlerDF$cell.types = gsub("DC|Macrophages|Macrophages:M1","Myeloid cells",singlerDF$cell.types)
singlerDF$cell.types = gsub("Erythrocytes","Myeloid cells",singlerDF$cell.types)
singlerDF$cell.types = gsub("Eosinophils|Megakaryocytes|Monocytes","Myeloid cells",singlerDF$cell.types)
singlerDF$cell.types = gsub("Adipocytes|Fibroblasts|mv_Endothelial_cells","Nonhematopoietic cells",singlerDF$cell.types)
table(singlerDF$cell.types, singlerDF$orig.ident) %>% kable() %>% kable_styling()
# reduce false positive results (B cells are labeled as MCL in normal samples)
# and false negative results (MCL cells are labeled as B cells in MCL samples)
# singler1sub false negative results =========
CCND1 = FetchData(object,"CCND1")
singlerDF$CCND1 = CCND1$CCND1
singlerDF[(singlerDF$CCND1 >0 & singlerDF$cell.types %in% "B_cells"),"cell.types"] = "MCL"
# cell.types false positive results ========
table(singlerDF$cell.types, object@meta.data$orig.ident) %>% kable %>% kable_styling()
normal_cells <- object$sample %in% c("BH","DJ","MD","NZ") %>% rownames(singlerDF)[.]
singlerDF[normal_cells,"cell.types"] %<>% gsub("MCL","B_cells",.)
# singler1sub false positive results =========
table(singlerDF$singler1sub, object$orig.ident) %>% kable %>% kable_styling()
singlerDF[normal_cells,"singler1sub"] %<>% gsub("MCL:.*$","B_cells:Memory",.)
table(singlerDF$cell.types, object$orig.ident) %>% kable %>% kable_styling()
table(singlerDF$singler1sub, object$orig.ident)%>% kable %>% kable_styling()
table(singlerDF$cell.types %>% sort)%>% kable %>% kable_styling()
table(singlerDF$cell.types) %>% kable() %>% kable_styling()
#======================================
# subset Monocytes
Idents(object) = "SCT_snn_res.0.8"
c_5 <- subset(object, idents = c(5,16))
DefaultAssay(c_5) = "SCT"
c_5 %<>% FindNeighbors(reduction = "harmony",dims = 1:85)
c_5 %<>% FindClusters(resolution = 0.1)
TSNEPlot.1(c_5,do.print = T, title = "re-cluster cluster 5 ")
features <- FilterGenes(object,c("FCN1","ITGAL","ITGAM","FCGR1A",
"MS4A7","CDKN1C", "CSF1R","FCGR3A",
"VCAN","S100A8","CD14","CSF3R"))
FeaturePlot.1(c_5,features = features, pt.size = 0.005, cols = c("gray90", "red"),
alpha = 1,reduction = "tsne",
threshold = 1, text.size = 20, border = T,do.print = T, do.return = F,ncol = 4,
units = "in",width=12, height=9, no.legend = T)
CD14 <- colnames(c_5)[c_5$SCT_snn_res.0.1 %in% c(0,2)]
CD16 <- colnames(c_5)[c_5$SCT_snn_res.0.1 == 1]
singlerDF[CD14,"cell.types"] = "Monocytes:CD14+"
singlerDF[CD16,"cell.types"] = "Monocytes:CD16+"
##############################
# process color scheme
##############################
#singler_colors <- readxl::read_excel("doc/singler.colors.xlsx")
#singler_colors1 = as.vector(singler_colors$singler.color1[!is.na(singler_colors$singler.color1)])
#singler_colors1[duplicated(singler_colors1)]
#singler_colors2 = as.vector(singler_colors$singler.color2[!is.na(singler_colors$singler.color2)])
singler_colors2 = c("#E6AB02","#6A3D9A", "#2055da","#ADDFEE","#FB9A99","#FF0000", "#A65628", "#B3B3B3", "#B3DE69", "#F0027F")
object <- AddMetaData(object = object,metadata = singlerDF["cell.types"])
object <- AddMetaColor(object = object, label= "cell.types", colors = singler_colors2)
Idents(object) <- "cell.types"
lapply(c(UMAPPlot.1,TSNEPlot.1), function(fun)
fun(object = object, label = F, group.by = "cell.types",
cols = ExtractMetaColor(object),no.legend = F,
pt.size = 0.1,label.size = 3, do.print = T,do.return = F,
title = "Cell type labeling by Blueprint + Encode + MCL"))
save(object,file="data/MCL_41_harmony_20200225.Rda")
##############################
# draw tsne plot
##############################
object <- subset(object,idents = c("HSC/progenitors","Nonhematopoietic cells"), invert = TRUE)
table(Idents(object))
Idents(object)="Doublets"
object %<>% subset(idents = "Singlet")
cell_Freq <- table(object$cell.types) %>% as.data.frame
cell_Freq = cell_Freq[order(cell_Freq$Var1),]
cell_Freq$col = ExtractMetaColor(object)
cell_Freq = cell_Freq[order(cell_Freq$Freq,decreasing = T),]
cell_Freq$Var1 %<>% factor(levels = as.character(cell_Freq$Var1))
colnames(cell_Freq)[1:2] = c("Cell_Type", "Cell_Number")
jpeg(paste0(path,"cell_type_numbers.jpeg"), units="in", width=10, height=7,res=600)
ggbarplot(cell_Freq, "Cell_Type", "Cell_Number",
fill = "Cell_Type", color = "Cell_Type",xlab = "",
palette = cell_Freq$col,x.text.angle = 90,
title = "Numbers of major cell types in total 43 samples")+NoLegend()+
theme(plot.title = element_text(hjust = 0.5,size=15))
dev.off()
cell_num <- table(object$orig.ident,object$cell.types)
cell_Freq <- round(prop.table(cell_num,margin = 1)*100,digits = 3) %>%
as.data.frame.matrix()
cell_num %<>% as.data.frame.matrix()
res <- list("cell.types.num"=cell_num,
"cell.types.perc" = cell_Freq)
openxlsx::write.xlsx(res,
file = paste0(path,"20210426_PALIBR_cell.types.xlsx"),
colNames = TRUE, rowNames = TRUE,
borders = "surrounding",colWidths = c(NA, "auto", "auto"))
|
library(ggplot2)
library(dplyr)
library(tidyr)
weath = read.csv("~/Downloads/knb-lter-nwt.413.10/sdltdayv.ml.data.csv")
nrow(weath)
head(weath)
weath = read.csv("~/Downloads/knb-lter-nwt.413.10/sdltdayv.ml.data.csv") %>%
mutate(year = date %>% as.character() %>% strsplit(split = '\\-') %>%
sapply(function(x) unlist(x)[1] %>% as.numeric()),
mnth = date %>% as.character() %>% strsplit(split = '\\-') %>%
sapply(function(x) unlist(x)[2] %>% as.numeric()),
dymn = date %>% as.character() %>% strsplit(split = '\\-') %>%
sapply(function(x) unlist(x)[3] %>% as.numeric()),
seas = year + as.numeric(mnth > 9),
date.origin = as.Date.character(paste0(year, '-10-01'), format = '%Y-%m-%d'),
i = 1:nrow(.)) %>%
group_by(i) %>%
mutate(jd = date %>%
as.Date('%Y-%m-%d') %>%
julian.Date(origin = date.origin) %>%
(function(x) x[1]),
jd = jd + ifelse(jd < 0, 365 + as.numeric(year %% 4), 0)) %>%
ungroup() %>%
select(-c(i, date.origin))
head(weath)
# No plot substructure.
# These are daily records!
weath %>% group_by(year) %>% summarise(n.obs = n()) %>% print(n = 30)
weath %>%
ggplot() +
geom_line(aes(x = jd, y = airtemp_avg, group = seas), size = 0.1)
weath.c = weath %>%
group_by(jd) %>%
mutate(c.mean = airtemp_avg - mean(airtemp_avg, na.rm = TRUE))
weath.c %>%
filter(year > 1993) %>%
ggplot() +
geom_segment(x = 0, xend = 365, y = 0, yend = 0, colour = 'blue') +
geom_line(aes(x = jd, y = c.mean, group = seas), size = 0.4) +
facet_wrap(~ seas)
lm(c.mean ~ year, weath.c) %>% summary() # trend of increasing over time (weak)
weath %>%
filter(seas > 1993) %>%
ggplot() +
geom_line(aes(x = jd, y = airtemp_avg, group = seas), size = 0.1) +
geom_segment(x = 0, xend = 365, y = 0, yend = 0, colour = 'blue') +
facet_wrap(~ seas)
# THIS data could be splined very easily.
weath.thaw = weath %>%
group_by(seas) %>%
mutate(thaw = airtemp_avg > 0 & jd > 100,
thaw = ifelse(is.na(thaw), FALSE, thaw),
cuml.thaw = cumsum(thaw))
weath.thaw %>%
filter(seas > 1993) %>%
ggplot() +
geom_line(aes(x = jd, y = cuml.thaw, group = seas))
weath.thaw %>%
filter(cuml.thaw %in% 5) %>%
distinct(seas, .keep_all = TRUE) %>%
ggplot() +
geom_histogram(aes(x = jd, fill = seas), binwidth = 1)
####
# Advice from Cliff:
# Growing season starts (?) with three consec. days of min. temp above -3C
weath %>%
select(-c(LTER_site, local_site, flag_airtemp_max, flag_airtemp_min)) %>%
filter(seas > 1981) %>%
arrange(seas, jd) %>%
group_by(seas) %>%
mutate(a3 = airtemp_min > -3,
c3 = cumsum(a3),
d1 = c(0, diff(c3)),
d2 = c(0, diff(d1)),
d3 = c(0, diff(d2)),
flag = d1 & d2 & d3) %>% View()
# This is hard to do with a dplyr chain.
# Growing degree days is number of days with minimum daily temperature above 5deg C
wggdn = weath %>%
filter(seas > 1981) %>%
arrange(seas, jd) %>%
group_by(seas) %>%
mutate(gddn = cumsum(airtemp_min > 5)) %>%
ungroup()
ggplot(wggdn) +
geom_line(aes(x = jd, y = gddn, group = seas))
# There are NAs here. There's also another data file.
##### Try to merge together varios datas.
# loggr is new data
loggr = read.csv('00_raw_data/temp_new_loggers/sdlcr23x-cr1000.daily.ml.data.csv')
head(loggr)
table(loggr$LTER_site)
table(loggr$local_site)
table(loggr$logger)
loggr = loggr %>%
select(-c(LTER_site, local_site)) %>%
mutate(year = date %>% as.character() %>% strsplit(split = '\\-') %>%
sapply(function(x) unlist(x)[1] %>% as.numeric()),
mnth = date %>% as.character() %>% strsplit(split = '\\-') %>%
sapply(function(x) unlist(x)[2] %>% as.numeric()),
dymn = date %>% as.character() %>% strsplit(split = '\\-') %>%
sapply(function(x) unlist(x)[3] %>% as.numeric()),
seas = year + as.numeric(mnth > 9),
jdseas = jday - (274 + as.numeric(!year %% 4)),
jdseas = jdseas + ifelse(jdseas < 0, 365, 0))
table(loggr$jdseas)
# Plot: relative humiditiy
loggr %>%
group_by(seas) %>%
mutate(ann_mean_rhs = mean(rh_avg, na.rm = TRUE)) %>%
ggplot(aes(x = jdseas)) +
geom_segment(aes(x = 0, xend = 365,
y = ann_mean_rhs,
yend = ann_mean_rhs),
colour = 'purple') +
geom_line(aes(y = rh_avg), size = 0.2) +
geom_ribbon(aes(ymin = rh_min, ymax = rh_max),
alpha = 0.2) +
facet_wrap(~ seas)
# Not much annual difference in relative humidity.
# Look at barometric pressure
loggr %>%
group_by(seas) %>%
mutate(ann_mean_bp = mean(bp_avg, na.rm = TRUE)) %>%
ggplot(aes(x = jdseas)) +
geom_segment(aes(x = 0, xend = 365,
y = ann_mean_bp,
yend = ann_mean_bp),
colour = 'purple') +
geom_line(aes(y = bp_avg), size = 0.2) +
geom_ribbon(aes(ymin = bp_min, ymax = bp_max),
alpha = 0.2) +
facet_wrap(~ seas)
# Not much annual difference in BP.
# Look at solar radiation
loggr %>%
group_by(seas) %>%
mutate(ann_mean_rad = mean(solrad_tot, na.rm = TRUE)) %>%
ggplot(aes(x = jdseas)) +
geom_segment(aes(x = 0, xend = 365,
y = ann_mean_rad,
yend = ann_mean_rad),
colour = 'purple') +
geom_line(aes(y = solrad_tot), size = 0.2) +
facet_wrap(~ seas)
# Detrend
loggr %>%
group_by(jdseas) %>%
mutate(dtrnd_mean_rad = solrad_tot - mean(solrad_tot, na.rm = TRUE)) %>%
group_by(seas) %>%
mutate(ann_mean_rad = mean(dtrnd_mean_rad[jdseas %in% 200:350], na.rm = TRUE)) %>%
ggplot(aes(x = jdseas)) +
geom_line(aes(y = dtrnd_mean_rad, group = seas), size = 0.25) +
geom_segment(aes(x = 200, xend = 350,
y = ann_mean_rad,
yend = ann_mean_rad),
colour = 'purple') +
facet_wrap(~ seas)
# No real evidence for differences in solar radiation
##### Okay. Look at the daily temperature data.
daily = read.csv('01_process_data/output/daily_airtemp_all.csv')
head(daily)
# Goddamnit
# Need julian date
# Now, want water date for each year.
daily = daily %>%
mutate(jd = paste('1970', month, day, sep = '-') %>% as.Date() %>% julian(),
wyear = year + as.numeric(month > 9),
wd = jd - 273 + ifelse(wyear == year, 365, 0))
head(daily)
daily %>%
ggplot() +
geom_line(aes(x = wd, y = avg_temp)) +
facet_wrap(~ wyear)
# Looks good.
# Try first: simple means from the previous year.
ann.means = daily %>%
group_by(wyear) %>%
summarise(jja_mean = mean(avg_temp[month %in% 6:8])) %>%
mutate(p1y = wyear + 1,
p2y = wyear + 2,
p3y = wyear + 3)
rolling.means = merge(x = ann.means %>% select(-c(p1y, p2y, p3y)),
y = ann.means %>% select(-c(wyear, p2y, p3y)),
by.x = 'wyear', by.y = 'p1y',
suffixes = c('', '_p1')) %>%
merge(y = ann.means %>% select(-c(wyear, p1y, p3y)),
by.x = 'wyear', by.y = 'p2y',
suffixes = c('', '_p2')) %>%
merge(y = ann.means %>% select(-c(wyear, p1y, p2y)),
by.x = 'wyear', by.y = 'p3y',
suffixes = c('', '_p3')) %>%
mutate(jja_mean1 = (jja_mean + jja_mean_p1)/2,
jja_mean2 = (jja_mean + jja_mean_p1 + jja_mean_p2)/3,
jja_mean3 = (jja_mean + jja_mean_p1 + jja_mean_p2 + jja_mean_p3)/3)
# Also want: maybe the cumulative number of days above 5 degrees (i.e., growing
# degree days) by the sampling date.
# GDD for spring of _that year_ (april 1 - june 30)
# (earliest samples are june ~20, but most samples are july/august)
gdd.spring = daily %>%
filter(month %in% 4:6) %>%
group_by(wyear) %>%
summarise(spring.gdd = sum(min_temp > 5))
# Now, let's make moving averages of GDD
# Go up to three years prevous
gdd.allyrs = daily %>%
group_by(wyear) %>%
summarise(season.gdd = sum(min_temp > 5)) %>%
mutate(p1y = wyear + 1,
p2y = wyear + 2,
p3y = wyear + 3)
# Note: we don't want year of so we'll clip those out in select(-season.gdd)
rolling.gdds = merge(x = gdd.allyrs %>% select(-c(p1y, p2y, p3y)),
y = gdd.allyrs %>% select(-c(wyear, p2y, p3y)),
by.x = 'wyear', by.y = 'p1y',
suffixes = c('', '_p1')) %>%
merge(y = gdd.allyrs %>% select(-c(wyear, p1y, p3y)),
by.x = 'wyear', by.y = 'p2y',
suffixes = c('', '_p2')) %>%
merge(y = gdd.allyrs %>% select(-c(wyear, p1y, p2y)),
by.x = 'wyear', by.y = 'p3y',
suffixes = c('', '_p3')) %>%
select(-season.gdd) %>%
mutate(season.gdd2 = (season.gdd_p1 + season.gdd_p2)/2,
season.gdd3 = (season.gdd_p1 + season.gdd_p2 + season.gdd_p3)/3) %>%
rename(season.gdd1 = season.gdd_p1)
head(rolling.gdds)
# Okay, a tricky one. How to get start of growing season.
# This data might be kinda confounding with the snowmelt GAMs.
# diff(cumsum(min_temp > -3), lag = 3) gives the number of days out of the last
# three which have had min_temp above -3
seas.start = daily %>%
filter(month %in% 1:8) %>%
select(wyear, year, month, day, jd, min_temp) %>%
arrange(wyear, jd) %>%
group_by(wyear) %>%
mutate(three.day.lag = c(0, 0, 0, 0, diff(cumsum(min_temp > -3), lag = 4))) %>%
filter(three.day.lag %in% 3) %>%
distinct(wyear, .keep_all = TRUE) %>%
select(wyear, jd, year, month, day)
seas.start %>% print(n = 35)
# Okay. Cool and good!
hist(seas.start$jd)
seas.start %>% filter(jd < 100)
# March date... are these okay? | /01_process_data/exploration/temperature_exploration.R | no_license | EBIO6100Spring2020/saddle-plants | R | false | false | 9,542 | r | library(ggplot2)
library(dplyr)
library(tidyr)
weath = read.csv("~/Downloads/knb-lter-nwt.413.10/sdltdayv.ml.data.csv")
nrow(weath)
head(weath)
weath = read.csv("~/Downloads/knb-lter-nwt.413.10/sdltdayv.ml.data.csv") %>%
mutate(year = date %>% as.character() %>% strsplit(split = '\\-') %>%
sapply(function(x) unlist(x)[1] %>% as.numeric()),
mnth = date %>% as.character() %>% strsplit(split = '\\-') %>%
sapply(function(x) unlist(x)[2] %>% as.numeric()),
dymn = date %>% as.character() %>% strsplit(split = '\\-') %>%
sapply(function(x) unlist(x)[3] %>% as.numeric()),
seas = year + as.numeric(mnth > 9),
date.origin = as.Date.character(paste0(year, '-10-01'), format = '%Y-%m-%d'),
i = 1:nrow(.)) %>%
group_by(i) %>%
mutate(jd = date %>%
as.Date('%Y-%m-%d') %>%
julian.Date(origin = date.origin) %>%
(function(x) x[1]),
jd = jd + ifelse(jd < 0, 365 + as.numeric(year %% 4), 0)) %>%
ungroup() %>%
select(-c(i, date.origin))
head(weath)
# No plot substructure.
# These are daily records!
weath %>% group_by(year) %>% summarise(n.obs = n()) %>% print(n = 30)
weath %>%
ggplot() +
geom_line(aes(x = jd, y = airtemp_avg, group = seas), size = 0.1)
weath.c = weath %>%
group_by(jd) %>%
mutate(c.mean = airtemp_avg - mean(airtemp_avg, na.rm = TRUE))
weath.c %>%
filter(year > 1993) %>%
ggplot() +
geom_segment(x = 0, xend = 365, y = 0, yend = 0, colour = 'blue') +
geom_line(aes(x = jd, y = c.mean, group = seas), size = 0.4) +
facet_wrap(~ seas)
lm(c.mean ~ year, weath.c) %>% summary() # trend of increasing over time (weak)
weath %>%
filter(seas > 1993) %>%
ggplot() +
geom_line(aes(x = jd, y = airtemp_avg, group = seas), size = 0.1) +
geom_segment(x = 0, xend = 365, y = 0, yend = 0, colour = 'blue') +
facet_wrap(~ seas)
# THIS data could be splined very easily.
weath.thaw = weath %>%
group_by(seas) %>%
mutate(thaw = airtemp_avg > 0 & jd > 100,
thaw = ifelse(is.na(thaw), FALSE, thaw),
cuml.thaw = cumsum(thaw))
weath.thaw %>%
filter(seas > 1993) %>%
ggplot() +
geom_line(aes(x = jd, y = cuml.thaw, group = seas))
weath.thaw %>%
filter(cuml.thaw %in% 5) %>%
distinct(seas, .keep_all = TRUE) %>%
ggplot() +
geom_histogram(aes(x = jd, fill = seas), binwidth = 1)
####
# Advice from Cliff:
# Growing season starts (?) with three consec. days of min. temp above -3C
weath %>%
select(-c(LTER_site, local_site, flag_airtemp_max, flag_airtemp_min)) %>%
filter(seas > 1981) %>%
arrange(seas, jd) %>%
group_by(seas) %>%
mutate(a3 = airtemp_min > -3,
c3 = cumsum(a3),
d1 = c(0, diff(c3)),
d2 = c(0, diff(d1)),
d3 = c(0, diff(d2)),
flag = d1 & d2 & d3) %>% View()
# This is hard to do with a dplyr chain.
# Growing degree days is number of days with minimum daily temperature above 5deg C
wggdn = weath %>%
filter(seas > 1981) %>%
arrange(seas, jd) %>%
group_by(seas) %>%
mutate(gddn = cumsum(airtemp_min > 5)) %>%
ungroup()
ggplot(wggdn) +
geom_line(aes(x = jd, y = gddn, group = seas))
# There are NAs here. There's also another data file.
##### Try to merge together varios datas.
# loggr is new data
loggr = read.csv('00_raw_data/temp_new_loggers/sdlcr23x-cr1000.daily.ml.data.csv')
head(loggr)
table(loggr$LTER_site)
table(loggr$local_site)
table(loggr$logger)
loggr = loggr %>%
select(-c(LTER_site, local_site)) %>%
mutate(year = date %>% as.character() %>% strsplit(split = '\\-') %>%
sapply(function(x) unlist(x)[1] %>% as.numeric()),
mnth = date %>% as.character() %>% strsplit(split = '\\-') %>%
sapply(function(x) unlist(x)[2] %>% as.numeric()),
dymn = date %>% as.character() %>% strsplit(split = '\\-') %>%
sapply(function(x) unlist(x)[3] %>% as.numeric()),
seas = year + as.numeric(mnth > 9),
jdseas = jday - (274 + as.numeric(!year %% 4)),
jdseas = jdseas + ifelse(jdseas < 0, 365, 0))
table(loggr$jdseas)
# Plot: relative humiditiy
loggr %>%
group_by(seas) %>%
mutate(ann_mean_rhs = mean(rh_avg, na.rm = TRUE)) %>%
ggplot(aes(x = jdseas)) +
geom_segment(aes(x = 0, xend = 365,
y = ann_mean_rhs,
yend = ann_mean_rhs),
colour = 'purple') +
geom_line(aes(y = rh_avg), size = 0.2) +
geom_ribbon(aes(ymin = rh_min, ymax = rh_max),
alpha = 0.2) +
facet_wrap(~ seas)
# Not much annual difference in relative humidity.
# Look at barometric pressure
loggr %>%
group_by(seas) %>%
mutate(ann_mean_bp = mean(bp_avg, na.rm = TRUE)) %>%
ggplot(aes(x = jdseas)) +
geom_segment(aes(x = 0, xend = 365,
y = ann_mean_bp,
yend = ann_mean_bp),
colour = 'purple') +
geom_line(aes(y = bp_avg), size = 0.2) +
geom_ribbon(aes(ymin = bp_min, ymax = bp_max),
alpha = 0.2) +
facet_wrap(~ seas)
# Not much annual difference in BP.
# Look at solar radiation
loggr %>%
group_by(seas) %>%
mutate(ann_mean_rad = mean(solrad_tot, na.rm = TRUE)) %>%
ggplot(aes(x = jdseas)) +
geom_segment(aes(x = 0, xend = 365,
y = ann_mean_rad,
yend = ann_mean_rad),
colour = 'purple') +
geom_line(aes(y = solrad_tot), size = 0.2) +
facet_wrap(~ seas)
# Detrend
loggr %>%
group_by(jdseas) %>%
mutate(dtrnd_mean_rad = solrad_tot - mean(solrad_tot, na.rm = TRUE)) %>%
group_by(seas) %>%
mutate(ann_mean_rad = mean(dtrnd_mean_rad[jdseas %in% 200:350], na.rm = TRUE)) %>%
ggplot(aes(x = jdseas)) +
geom_line(aes(y = dtrnd_mean_rad, group = seas), size = 0.25) +
geom_segment(aes(x = 200, xend = 350,
y = ann_mean_rad,
yend = ann_mean_rad),
colour = 'purple') +
facet_wrap(~ seas)
# No real evidence for differences in solar radiation
##### Okay. Look at the daily temperature data.
daily = read.csv('01_process_data/output/daily_airtemp_all.csv')
head(daily)
# Goddamnit
# Need julian date
# Now, want water date for each year.
daily = daily %>%
mutate(jd = paste('1970', month, day, sep = '-') %>% as.Date() %>% julian(),
wyear = year + as.numeric(month > 9),
wd = jd - 273 + ifelse(wyear == year, 365, 0))
head(daily)
daily %>%
ggplot() +
geom_line(aes(x = wd, y = avg_temp)) +
facet_wrap(~ wyear)
# Looks good.
# Try first: simple means from the previous year.
ann.means = daily %>%
group_by(wyear) %>%
summarise(jja_mean = mean(avg_temp[month %in% 6:8])) %>%
mutate(p1y = wyear + 1,
p2y = wyear + 2,
p3y = wyear + 3)
rolling.means = merge(x = ann.means %>% select(-c(p1y, p2y, p3y)),
y = ann.means %>% select(-c(wyear, p2y, p3y)),
by.x = 'wyear', by.y = 'p1y',
suffixes = c('', '_p1')) %>%
merge(y = ann.means %>% select(-c(wyear, p1y, p3y)),
by.x = 'wyear', by.y = 'p2y',
suffixes = c('', '_p2')) %>%
merge(y = ann.means %>% select(-c(wyear, p1y, p2y)),
by.x = 'wyear', by.y = 'p3y',
suffixes = c('', '_p3')) %>%
mutate(jja_mean1 = (jja_mean + jja_mean_p1)/2,
jja_mean2 = (jja_mean + jja_mean_p1 + jja_mean_p2)/3,
jja_mean3 = (jja_mean + jja_mean_p1 + jja_mean_p2 + jja_mean_p3)/3)
# Also want: maybe the cumulative number of days above 5 degrees (i.e., growing
# degree days) by the sampling date.
# GDD for spring of _that year_ (april 1 - june 30)
# (earliest samples are june ~20, but most samples are july/august)
gdd.spring = daily %>%
filter(month %in% 4:6) %>%
group_by(wyear) %>%
summarise(spring.gdd = sum(min_temp > 5))
# Now, let's make moving averages of GDD
# Go up to three years prevous
gdd.allyrs = daily %>%
group_by(wyear) %>%
summarise(season.gdd = sum(min_temp > 5)) %>%
mutate(p1y = wyear + 1,
p2y = wyear + 2,
p3y = wyear + 3)
# Note: we don't want year of so we'll clip those out in select(-season.gdd)
rolling.gdds = merge(x = gdd.allyrs %>% select(-c(p1y, p2y, p3y)),
y = gdd.allyrs %>% select(-c(wyear, p2y, p3y)),
by.x = 'wyear', by.y = 'p1y',
suffixes = c('', '_p1')) %>%
merge(y = gdd.allyrs %>% select(-c(wyear, p1y, p3y)),
by.x = 'wyear', by.y = 'p2y',
suffixes = c('', '_p2')) %>%
merge(y = gdd.allyrs %>% select(-c(wyear, p1y, p2y)),
by.x = 'wyear', by.y = 'p3y',
suffixes = c('', '_p3')) %>%
select(-season.gdd) %>%
mutate(season.gdd2 = (season.gdd_p1 + season.gdd_p2)/2,
season.gdd3 = (season.gdd_p1 + season.gdd_p2 + season.gdd_p3)/3) %>%
rename(season.gdd1 = season.gdd_p1)
head(rolling.gdds)
# Okay, a tricky one. How to get start of growing season.
# This data might be kinda confounding with the snowmelt GAMs.
# diff(cumsum(min_temp > -3), lag = 3) gives the number of days out of the last
# three which have had min_temp above -3
seas.start = daily %>%
filter(month %in% 1:8) %>%
select(wyear, year, month, day, jd, min_temp) %>%
arrange(wyear, jd) %>%
group_by(wyear) %>%
mutate(three.day.lag = c(0, 0, 0, 0, diff(cumsum(min_temp > -3), lag = 4))) %>%
filter(three.day.lag %in% 3) %>%
distinct(wyear, .keep_all = TRUE) %>%
select(wyear, jd, year, month, day)
seas.start %>% print(n = 35)
# Okay. Cool and good!
hist(seas.start$jd)
seas.start %>% filter(jd < 100)
# March date... are these okay? |
makeCacheMatrix <- function( myMatrix = matrix() ) {
inv <- NULL
# first a function to set the myMatrix
setMatrix <- function( newMatrix ) {
myMatrix <<- newMatrix
# inverse would change
inv <<- NULL
}
# function to get the myMatrix
getMatrix <- function() myMatrix
# function to set the value of inverse
setInverse <- function( inverse ) {
inv <<- inverse
}
# finally, a function to get inverse
getInverse <- function() inv
# create a list of functions to get inverse of a myMatrix and set it
list( setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse, getInverse = getInverse )
}
cacheSolve <- function( x, ... ) {
# get the inverse for this matrix object
inverse <- x$getInverse()
# see if it's sane
if( !is.null( inverse ) ) {
# inverse found in cache, let's get it from there and be done
return( inverse )
}
# can't find inverse in cache, calculate and save
matrix <- x$get()
inverse <- solve( matrix )
x$setInverse( inverse )
inverse
}
| /cachematrix.R | no_license | connecttoneha08/ProgrammingAssignment2 | R | false | false | 1,092 | r | makeCacheMatrix <- function( myMatrix = matrix() ) {
inv <- NULL
# first a function to set the myMatrix
setMatrix <- function( newMatrix ) {
myMatrix <<- newMatrix
# inverse would change
inv <<- NULL
}
# function to get the myMatrix
getMatrix <- function() myMatrix
# function to set the value of inverse
setInverse <- function( inverse ) {
inv <<- inverse
}
# finally, a function to get inverse
getInverse <- function() inv
# create a list of functions to get inverse of a myMatrix and set it
list( setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse, getInverse = getInverse )
}
cacheSolve <- function( x, ... ) {
# get the inverse for this matrix object
inverse <- x$getInverse()
# see if it's sane
if( !is.null( inverse ) ) {
# inverse found in cache, let's get it from there and be done
return( inverse )
}
# can't find inverse in cache, calculate and save
matrix <- x$get()
inverse <- solve( matrix )
x$setInverse( inverse )
inverse
}
|
set.seed(1993,kind=NULL)
library(VIM)
## model setting ##
nsim=1000 #number of simulation runs
N=10000 #Population size
n=200 #sample size
x=1+rexp(N)
e=rnorm(N)
y=4+x+e ##
muy=mean(y)
mux=mean(x)
## missing data ##
eta_i<-cbind(rep(1,N),x) %*% t(t(c(2,0.1)))
expit<-function(x){
return(exp(x)/(1+exp(x)))
}
R<-unlist(lapply(eta_i,FUN = function(x){
return(rbinom(1,1,expit(x)))
}))
Y<-R*y
Y[Y==0]<-NA
muY<-mean(Y,na.rm = T)
muY
muy
z=cbind(x,Y)
miss.vals = which(apply(z,1,function(z){return(sum(is.na(z)))})!=0)
c(nrow(z), nrow(z) - length(miss.vals), length(miss.vals)/nrow(z) )
## take a sample by PPS ##
z=x/sum(x)
syspps=function(x,n){
N=length(x)
U=sample(N,N)
xx=x[U]
z=rep(0,N)
for(i in 1:N) z[i]=n*sum(xx[1:i])/sum(x)
r=runif(1)
s=numeric()
for(i in 1:N){
if(z[i]>=r){
s=c(s,U[i])
r=r+1
}
}
return(s[order(s)])
}
s=syspps(z,n)
zs=z[s]
Ys=Y[s]
xs=x[s]
pis=n*zs
zz=cbind(xs,Ys)
miss.vals = which(apply(zz,1,function(zz){return(sum(is.na(zz)))})!=0)
c(nrow(zz), nrow(zz) - length(miss.vals), length(miss.vals)/nrow(zz) )
si.nni=kNN(zz,k=1)
si.nni=subset(si.nni,select=xs:Ys)
plot(zz,pch=20,main="Figure 1")
points(si.nni[miss.vals,],pch=4,col=c(4))
ys=si.nni$Ys
muyhat_ht=sum(ys/pis)/N
muxhat_ht=sum(xs/pis)/N
muyhat_gr=(muyhat_ht/muxhat_ht)*mux
s2x=var(xs)
sxy=cov(xs,ys)
betahat=sxy/s2x
muyhat_greg = muyhat_ht+betahat*(mux-muxhat_ht)
muyhat_gr
muyhat_greg
##variance estimation for generalized ratio estimator ##
r=muyhat_ht/muxhat_ht
li=ys-r*xs
Ri=li/zs
s2=sum((Ri-mean(Ri))^2)/(n-1)
VL_gr=s2/n/N^2
VBoot_gr=function(xs,ys,zs,mux,n,B)
{
V=rep(0,B)
for(i in 1:B)
{
bsam=sample(n,n,replace=T)
bx=xs[bsam]
by=ys[bsam]
bz=zs[bsam]
bpi=n*bz
muyhat_ht=sum(by/bpi)/N
muxhat_ht=sum(bx/bpi)/N
V[i]=(muyhat_ht/muxhat_ht)*mux
}
VB_gr=(B-1)*var(V)/B
return(VB_gr)
}
VB_gr=VBoot_gr(xs,ys,zs,mux,n,1000)
VJack_gr=function(xs,ys,pis,mux,n)
{
V=rep(0,n)
for (i in 1:n)
{
muyhat_ht=sum(ys[-i]/pis[-i])/N
muxhat_ht=sum(xs[-i]/pis[-i])/N
V[i]=(muyhat_ht/muxhat_ht)*mux
}
VJ_gr=((n-1)^2/n)*var(V)
return(VJ_gr)
}
VJ_gr=VJack_gr(xs,ys,pis,mux,n)
VL_gr
VB_gr
VJ_gr
## CIs for generalized ratio estimator ##
mu1=muyhat_gr-1.96*sqrt(VL_gr)
mu2=muyhat_gr+1.96*sqrt(VL_gr)
mu3=muyhat_gr-1.96*sqrt(VB_gr)
mu4=muyhat_gr+1.96*sqrt(VB_gr)
mu5=muyhat_gr-1.96*sqrt(VJ_gr)
mu6=muyhat_gr+1.96*sqrt(VJ_gr)
CI_gr1=c(mu1,mu2)
CI_gr2=c(mu3,mu4)
CI_gr3=c(mu5,mu6)
CI_gr1
CI_gr2
CI_gr3
## variance estimation for generalized regression estimator ##
ei=ys-betahat*xs
Ri=ei/zs
s2=sum((Ri-mean(Ri))^2)/(n-1)
VL_greg=s2/n/N^2
VBoot_greg=function(xs,ys,zs,mux,n,B)
{
V=rep(0,B)
for(i in 1:B)
{
bsam=sample(n,n,replace=T)
bx=xs[bsam]
by=ys[bsam]
bz=zs[bsam]
bpi=n*bz
muyhat_ht=sum(by/bpi)/N
muxhat_ht=sum(bx/bpi)/N
sxy=cov(bx,by)
s2x=var(bx)
betahat=sxy/s2x
V[i]=muyhat_ht+betahat*(mux-muxhat_ht)
}
VB_greg=(B-1)*var(V)/B
return(VB_greg)
}
VB_greg=VBoot_greg(xs,ys,zs,mux,n,1000)
VJack_greg=function(xs,ys,pis,mux,n)
{
V=rep(0,n)
for (i in 1:n)
{
sxyj=cov(xs[-i],ys[-i])
s2xj=var(xs[-i])
betahatj=sxyj/s2xj
muyhat_ht=sum(ys[-i]/pis[-i])/N
muxhat_ht=sum(xs[-i]/pis[-i])/N
V[i]=muyhat_ht+betahatj*(mux-muxhat_ht)
}
VJ_greg=((n-1)^2/n)*var(V)
return(VJ_greg)
}
VJ_greg=VJack_greg(xs,ys,pis,mux,n)
VL_greg
VB_greg
VJ_greg
## CIs for generalized regression estimator ##
mu1=muyhat_greg-1.96*sqrt(VL_greg)
mu2=muyhat_greg+1.96*sqrt(VL_greg)
mu3=muyhat_greg-1.96*sqrt(VB_greg)
mu4=muyhat_greg+1.96*sqrt(VB_greg)
mu5=muyhat_greg-1.96*sqrt(VJ_greg)
mu6=muyhat_greg+1.96*sqrt(VJ_greg)
CI_greg1=c(mu1,mu2)
CI_greg2=c(mu3,mu4)
CI_greg3=c(mu5,mu6)
CI_greg1
CI_greg2
CI_greg3
## bias, MSE, length, cp for generalized ratio ##
bias=c(0)
mse=c(0)
cp=matrix(0,3,3)
len=c(0,0,0)
for(m in 1:nsim){
s=syspps(z,n)
zs=z[s]
Ys=Y[s]
xs=x[s]
pis=n*zs
zz=cbind(xs,Ys)
si.nni=kNN(zz,k=1)
si.nni=subset(si.nni,select=xs:Ys)
ys=si.nni$Ys
muyhat_ht=sum(ys/pis)/N
muxhat_ht=sum(xs/pis)/N
muyhat_gr=(muyhat_ht/muxhat_ht)*mux
r=muyhat_ht/muxhat_ht
li=ys-r*xs
Ri=li/zs
s2=sum((Ri-mean(Ri))^2)/(n-1)
VL_gr=s2/n/N^2
VB_gr=VBoot_gr(xs,ys,zs,mux,n,1000)
VJ_gr=VJack_gr(xs,ys,pis,mux,n)
mu1=muyhat_gr-1.96*sqrt(VL_gr)
mu2=muyhat_gr+1.96*sqrt(VL_gr)
mu3=muyhat_gr-1.96*sqrt(VB_gr)
mu4=muyhat_gr+1.96*sqrt(VB_gr)
mu5=muyhat_gr-1.96*sqrt(VJ_gr)
mu6=muyhat_gr+1.96*sqrt(VJ_gr)
bias[1]=bias[1]+muyhat_gr-muy
mse[1]=mse[1]+(muyhat_gr-muy)^2
len[1]=len[1]+mu2-mu1
len[2]=len[2]+mu4-mu3
len[3]=len[3]+mu6-mu5
cp[1,1]=cp[1,1]+(muy<=mu1)
cp[1,2]=cp[1,2]+(muy>mu1)*(muy<mu2)
cp[1,3]=cp[1,3]+(muy>=mu2)
cp[2,1]=cp[2,1]+(muy<=mu3)
cp[2,2]=cp[2,2]+(muy>mu3)*(muy<mu4)
cp[2,3]=cp[2,3]+(muy>=mu4)
cp[3,1]=cp[3,1]+(muy<=mu5)
cp[3,2]=cp[3,2]+(muy>mu5)*(muy<mu6)
cp[3,3]=cp[3,3]+(muy>=mu6)
}
bias_gr=bias/(muy*nsim)
mse_gr=mse/nsim
len_gr=len/nsim
cp_gr=cp/nsim
bias_gr
mse_gr
len_gr
cp_gr
## bias, MSE, length, cp for generalized regression ##
bias=c(0)
mse=c(0)
cp=matrix(0,3,3)
len=c(0,0,0)
for(m in 1:nsim){
s=syspps(z,n)
zs=z[s]
Ys=Y[s]
xs=x[s]
pis=n*zs
zz=cbind(xs,Ys)
si.nni=kNN(zz,k=1)
si.nni=subset(si.nni,select=xs:Ys)
ys=si.nni$Ys
muyhat_ht=sum(ys/pis)/N
muxhat_ht=sum(xs/pis)/N
s2x=var(xs)
sxy=cov(xs,ys)
betahat=sxy/s2x
muyhat_greg=muyhat_ht+betahat*(mux-muxhat_ht)
ei=ys-betahat*xs
Ri=ei/zs
s2=sum((Ri-mean(Ri))^2)/(n-1)
VL_greg=s2/n/N^2
VB_greg=VBoot_greg(xs,ys,zs,mux,n,1000)
VJ_greg=VJack_greg(xs,ys,pis,mux,n)
mu1=muyhat_greg-1.96*sqrt(VL_greg)
mu2=muyhat_greg+1.96*sqrt(VL_greg)
mu3=muyhat_greg-1.96*sqrt(VB_greg)
mu4=muyhat_greg+1.96*sqrt(VB_greg)
mu5=muyhat_greg-1.96*sqrt(VJ_greg)
mu6=muyhat_greg+1.96*sqrt(VJ_greg)
bias[1]=bias[1]+muyhat_greg-muy
mse[1]=mse[1]+(muyhat_greg-muy)^2
len[1]=len[1]+mu2-mu1
len[2]=len[2]+mu4-mu3
len[3]=len[3]+mu6-mu5
cp[1,1]=cp[1,1]+(muy<=mu1)
cp[1,2]=cp[1,2]+(muy>mu1)*(muy<mu2)
cp[1,3]=cp[1,3]+(muy>=mu2)
cp[2,1]=cp[2,1]+(muy<=mu3)
cp[2,2]=cp[2,2]+(muy>mu3)*(muy<mu4)
cp[2,3]=cp[2,3]+(muy>=mu4)
cp[3,1]=cp[3,1]+(muy<=mu5)
cp[3,2]=cp[3,2]+(muy>mu5)*(muy<mu6)
cp[3,3]=cp[3,3]+(muy>=mu6)
}
bias_greg=bias/(muy*nsim)
mse_greg=mse/nsim
len_greg=len/nsim
cp_greg=cp/nsim
bias_greg
mse_greg
len_greg
cp_greg | /RP2.R | no_license | WenqiangGao/MRP | R | false | false | 6,345 | r | set.seed(1993,kind=NULL)
library(VIM)
## model setting ##
nsim=1000 #number of simulation runs
N=10000 #Population size
n=200 #sample size
x=1+rexp(N)
e=rnorm(N)
y=4+x+e ##
muy=mean(y)
mux=mean(x)
## missing data ##
eta_i<-cbind(rep(1,N),x) %*% t(t(c(2,0.1)))
expit<-function(x){
return(exp(x)/(1+exp(x)))
}
R<-unlist(lapply(eta_i,FUN = function(x){
return(rbinom(1,1,expit(x)))
}))
Y<-R*y
Y[Y==0]<-NA
muY<-mean(Y,na.rm = T)
muY
muy
z=cbind(x,Y)
miss.vals = which(apply(z,1,function(z){return(sum(is.na(z)))})!=0)
c(nrow(z), nrow(z) - length(miss.vals), length(miss.vals)/nrow(z) )
## take a sample by PPS ##
z=x/sum(x)
syspps=function(x,n){
N=length(x)
U=sample(N,N)
xx=x[U]
z=rep(0,N)
for(i in 1:N) z[i]=n*sum(xx[1:i])/sum(x)
r=runif(1)
s=numeric()
for(i in 1:N){
if(z[i]>=r){
s=c(s,U[i])
r=r+1
}
}
return(s[order(s)])
}
s=syspps(z,n)
zs=z[s]
Ys=Y[s]
xs=x[s]
pis=n*zs
zz=cbind(xs,Ys)
miss.vals = which(apply(zz,1,function(zz){return(sum(is.na(zz)))})!=0)
c(nrow(zz), nrow(zz) - length(miss.vals), length(miss.vals)/nrow(zz) )
si.nni=kNN(zz,k=1)
si.nni=subset(si.nni,select=xs:Ys)
plot(zz,pch=20,main="Figure 1")
points(si.nni[miss.vals,],pch=4,col=c(4))
ys=si.nni$Ys
muyhat_ht=sum(ys/pis)/N
muxhat_ht=sum(xs/pis)/N
muyhat_gr=(muyhat_ht/muxhat_ht)*mux
s2x=var(xs)
sxy=cov(xs,ys)
betahat=sxy/s2x
muyhat_greg = muyhat_ht+betahat*(mux-muxhat_ht)
muyhat_gr
muyhat_greg
##variance estimation for generalized ratio estimator ##
r=muyhat_ht/muxhat_ht
li=ys-r*xs
Ri=li/zs
s2=sum((Ri-mean(Ri))^2)/(n-1)
VL_gr=s2/n/N^2
VBoot_gr=function(xs,ys,zs,mux,n,B)
{
V=rep(0,B)
for(i in 1:B)
{
bsam=sample(n,n,replace=T)
bx=xs[bsam]
by=ys[bsam]
bz=zs[bsam]
bpi=n*bz
muyhat_ht=sum(by/bpi)/N
muxhat_ht=sum(bx/bpi)/N
V[i]=(muyhat_ht/muxhat_ht)*mux
}
VB_gr=(B-1)*var(V)/B
return(VB_gr)
}
VB_gr=VBoot_gr(xs,ys,zs,mux,n,1000)
VJack_gr=function(xs,ys,pis,mux,n)
{
V=rep(0,n)
for (i in 1:n)
{
muyhat_ht=sum(ys[-i]/pis[-i])/N
muxhat_ht=sum(xs[-i]/pis[-i])/N
V[i]=(muyhat_ht/muxhat_ht)*mux
}
VJ_gr=((n-1)^2/n)*var(V)
return(VJ_gr)
}
VJ_gr=VJack_gr(xs,ys,pis,mux,n)
VL_gr
VB_gr
VJ_gr
## CIs for generalized ratio estimator ##
mu1=muyhat_gr-1.96*sqrt(VL_gr)
mu2=muyhat_gr+1.96*sqrt(VL_gr)
mu3=muyhat_gr-1.96*sqrt(VB_gr)
mu4=muyhat_gr+1.96*sqrt(VB_gr)
mu5=muyhat_gr-1.96*sqrt(VJ_gr)
mu6=muyhat_gr+1.96*sqrt(VJ_gr)
CI_gr1=c(mu1,mu2)
CI_gr2=c(mu3,mu4)
CI_gr3=c(mu5,mu6)
CI_gr1
CI_gr2
CI_gr3
## variance estimation for generalized regression estimator ##
ei=ys-betahat*xs
Ri=ei/zs
s2=sum((Ri-mean(Ri))^2)/(n-1)
VL_greg=s2/n/N^2
VBoot_greg=function(xs,ys,zs,mux,n,B)
{
V=rep(0,B)
for(i in 1:B)
{
bsam=sample(n,n,replace=T)
bx=xs[bsam]
by=ys[bsam]
bz=zs[bsam]
bpi=n*bz
muyhat_ht=sum(by/bpi)/N
muxhat_ht=sum(bx/bpi)/N
sxy=cov(bx,by)
s2x=var(bx)
betahat=sxy/s2x
V[i]=muyhat_ht+betahat*(mux-muxhat_ht)
}
VB_greg=(B-1)*var(V)/B
return(VB_greg)
}
VB_greg=VBoot_greg(xs,ys,zs,mux,n,1000)
VJack_greg=function(xs,ys,pis,mux,n)
{
V=rep(0,n)
for (i in 1:n)
{
sxyj=cov(xs[-i],ys[-i])
s2xj=var(xs[-i])
betahatj=sxyj/s2xj
muyhat_ht=sum(ys[-i]/pis[-i])/N
muxhat_ht=sum(xs[-i]/pis[-i])/N
V[i]=muyhat_ht+betahatj*(mux-muxhat_ht)
}
VJ_greg=((n-1)^2/n)*var(V)
return(VJ_greg)
}
VJ_greg=VJack_greg(xs,ys,pis,mux,n)
VL_greg
VB_greg
VJ_greg
## CIs for generalized regression estimator ##
mu1=muyhat_greg-1.96*sqrt(VL_greg)
mu2=muyhat_greg+1.96*sqrt(VL_greg)
mu3=muyhat_greg-1.96*sqrt(VB_greg)
mu4=muyhat_greg+1.96*sqrt(VB_greg)
mu5=muyhat_greg-1.96*sqrt(VJ_greg)
mu6=muyhat_greg+1.96*sqrt(VJ_greg)
CI_greg1=c(mu1,mu2)
CI_greg2=c(mu3,mu4)
CI_greg3=c(mu5,mu6)
CI_greg1
CI_greg2
CI_greg3
## bias, MSE, length, cp for generalized ratio ##
bias=c(0)
mse=c(0)
cp=matrix(0,3,3)
len=c(0,0,0)
for(m in 1:nsim){
s=syspps(z,n)
zs=z[s]
Ys=Y[s]
xs=x[s]
pis=n*zs
zz=cbind(xs,Ys)
si.nni=kNN(zz,k=1)
si.nni=subset(si.nni,select=xs:Ys)
ys=si.nni$Ys
muyhat_ht=sum(ys/pis)/N
muxhat_ht=sum(xs/pis)/N
muyhat_gr=(muyhat_ht/muxhat_ht)*mux
r=muyhat_ht/muxhat_ht
li=ys-r*xs
Ri=li/zs
s2=sum((Ri-mean(Ri))^2)/(n-1)
VL_gr=s2/n/N^2
VB_gr=VBoot_gr(xs,ys,zs,mux,n,1000)
VJ_gr=VJack_gr(xs,ys,pis,mux,n)
mu1=muyhat_gr-1.96*sqrt(VL_gr)
mu2=muyhat_gr+1.96*sqrt(VL_gr)
mu3=muyhat_gr-1.96*sqrt(VB_gr)
mu4=muyhat_gr+1.96*sqrt(VB_gr)
mu5=muyhat_gr-1.96*sqrt(VJ_gr)
mu6=muyhat_gr+1.96*sqrt(VJ_gr)
bias[1]=bias[1]+muyhat_gr-muy
mse[1]=mse[1]+(muyhat_gr-muy)^2
len[1]=len[1]+mu2-mu1
len[2]=len[2]+mu4-mu3
len[3]=len[3]+mu6-mu5
cp[1,1]=cp[1,1]+(muy<=mu1)
cp[1,2]=cp[1,2]+(muy>mu1)*(muy<mu2)
cp[1,3]=cp[1,3]+(muy>=mu2)
cp[2,1]=cp[2,1]+(muy<=mu3)
cp[2,2]=cp[2,2]+(muy>mu3)*(muy<mu4)
cp[2,3]=cp[2,3]+(muy>=mu4)
cp[3,1]=cp[3,1]+(muy<=mu5)
cp[3,2]=cp[3,2]+(muy>mu5)*(muy<mu6)
cp[3,3]=cp[3,3]+(muy>=mu6)
}
bias_gr=bias/(muy*nsim)
mse_gr=mse/nsim
len_gr=len/nsim
cp_gr=cp/nsim
bias_gr
mse_gr
len_gr
cp_gr
## bias, MSE, length, cp for generalized regression ##
bias=c(0)
mse=c(0)
cp=matrix(0,3,3)
len=c(0,0,0)
for(m in 1:nsim){
s=syspps(z,n)
zs=z[s]
Ys=Y[s]
xs=x[s]
pis=n*zs
zz=cbind(xs,Ys)
si.nni=kNN(zz,k=1)
si.nni=subset(si.nni,select=xs:Ys)
ys=si.nni$Ys
muyhat_ht=sum(ys/pis)/N
muxhat_ht=sum(xs/pis)/N
s2x=var(xs)
sxy=cov(xs,ys)
betahat=sxy/s2x
muyhat_greg=muyhat_ht+betahat*(mux-muxhat_ht)
ei=ys-betahat*xs
Ri=ei/zs
s2=sum((Ri-mean(Ri))^2)/(n-1)
VL_greg=s2/n/N^2
VB_greg=VBoot_greg(xs,ys,zs,mux,n,1000)
VJ_greg=VJack_greg(xs,ys,pis,mux,n)
mu1=muyhat_greg-1.96*sqrt(VL_greg)
mu2=muyhat_greg+1.96*sqrt(VL_greg)
mu3=muyhat_greg-1.96*sqrt(VB_greg)
mu4=muyhat_greg+1.96*sqrt(VB_greg)
mu5=muyhat_greg-1.96*sqrt(VJ_greg)
mu6=muyhat_greg+1.96*sqrt(VJ_greg)
bias[1]=bias[1]+muyhat_greg-muy
mse[1]=mse[1]+(muyhat_greg-muy)^2
len[1]=len[1]+mu2-mu1
len[2]=len[2]+mu4-mu3
len[3]=len[3]+mu6-mu5
cp[1,1]=cp[1,1]+(muy<=mu1)
cp[1,2]=cp[1,2]+(muy>mu1)*(muy<mu2)
cp[1,3]=cp[1,3]+(muy>=mu2)
cp[2,1]=cp[2,1]+(muy<=mu3)
cp[2,2]=cp[2,2]+(muy>mu3)*(muy<mu4)
cp[2,3]=cp[2,3]+(muy>=mu4)
cp[3,1]=cp[3,1]+(muy<=mu5)
cp[3,2]=cp[3,2]+(muy>mu5)*(muy<mu6)
cp[3,3]=cp[3,3]+(muy>=mu6)
}
bias_greg=bias/(muy*nsim)
mse_greg=mse/nsim
len_greg=len/nsim
cp_greg=cp/nsim
bias_greg
mse_greg
len_greg
cp_greg |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comparison_functions.R
\name{combined_results_graph}
\alias{combined_results_graph}
\title{Combined Results Graph}
\usage{
combined_results_graph(
combined_df,
selected_terms = "common",
use_description = FALSE,
layout = "auto",
node_size = "num_genes"
)
}
\arguments{
\item{combined_df}{Data frame of combined pathfindR enrichment results}
\item{selected_terms}{the vector of selected terms for creating the graph
(either IDs or term descriptions). If set to \code{"common"}, all of the
common terms are used. (default = "common")}
\item{use_description}{Boolean argument to indicate whether term descriptions
(in the "Term_Description" column) should be used. (default = \code{FALSE})}
\item{layout}{The type of layout to create (see \code{\link[ggraph]{ggraph}} for details. Default = "auto")}
\item{node_size}{Argument to indicate whether to use number of significant genes ("num_genes")
or the -log10(lowest p value) ("p_val") for adjusting the node sizes (default = "num_genes")}
}
\value{
a \code{\link[ggraph]{ggraph}} object containing the combined term-gene graph.
Each node corresponds to an enriched term (orange if common, different shades of blue otherwise),
an up-regulated gene (green), a down-regulated gene (red) or
a conflicting (i.e. up in one analysis, down in the other or vice versa) gene
(gray). An edge between a term and a gene indicates
that the given term involves the gene. Size of a term node is proportional
to either the number of genes (if \code{node_size = "num_genes"}) or
the -log10(lowest p value) (if \code{node_size = "p_val"}).
}
\description{
Combined Results Graph
}
\examples{
combined_results <- combined_results <- combine_pathfindR_results(RA_output, RA_comparison_output, plot_common = FALSE)
g <- combined_results_graph(combined_results, selected_terms = sample(combined_results$ID, 3))
}
| /man/combined_results_graph.Rd | permissive | aj12aj/pathfindR | R | false | true | 1,937 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comparison_functions.R
\name{combined_results_graph}
\alias{combined_results_graph}
\title{Combined Results Graph}
\usage{
combined_results_graph(
combined_df,
selected_terms = "common",
use_description = FALSE,
layout = "auto",
node_size = "num_genes"
)
}
\arguments{
\item{combined_df}{Data frame of combined pathfindR enrichment results}
\item{selected_terms}{the vector of selected terms for creating the graph
(either IDs or term descriptions). If set to \code{"common"}, all of the
common terms are used. (default = "common")}
\item{use_description}{Boolean argument to indicate whether term descriptions
(in the "Term_Description" column) should be used. (default = \code{FALSE})}
\item{layout}{The type of layout to create (see \code{\link[ggraph]{ggraph}} for details. Default = "auto")}
\item{node_size}{Argument to indicate whether to use number of significant genes ("num_genes")
or the -log10(lowest p value) ("p_val") for adjusting the node sizes (default = "num_genes")}
}
\value{
a \code{\link[ggraph]{ggraph}} object containing the combined term-gene graph.
Each node corresponds to an enriched term (orange if common, different shades of blue otherwise),
an up-regulated gene (green), a down-regulated gene (red) or
a conflicting (i.e. up in one analysis, down in the other or vice versa) gene
(gray). An edge between a term and a gene indicates
that the given term involves the gene. Size of a term node is proportional
to either the number of genes (if \code{node_size = "num_genes"}) or
the -log10(lowest p value) (if \code{node_size = "p_val"}).
}
\description{
Combined Results Graph
}
\examples{
combined_results <- combined_results <- combine_pathfindR_results(RA_output, RA_comparison_output, plot_common = FALSE)
g <- combined_results_graph(combined_results, selected_terms = sample(combined_results$ID, 3))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correctionMethods.R
\name{correctGC}
\alias{correctGC}
\title{GC correction}
\usage{
correctGC(binned.data.list, GC.BSgenome, same.binsize = FALSE,
method = "loess", return.plot = FALSE, bins = NULL)
}
\arguments{
\item{binned.data.list}{A \code{list} with \code{\link{binned.data}} objects or a list of filenames containing such objects.}
\item{GC.BSgenome}{A \code{BSgenome} object which contains the DNA sequence that is used for the GC correction.}
\item{same.binsize}{If \code{TRUE} the GC content will only be calculated once. Set this to \code{TRUE} if all \code{\link{binned.data}} objects describe the same genome at the same binsize and stepsize.}
\item{method}{One of \code{c('quadratic', 'loess')}. Option \code{method='quadratic'} uses the method described in the Supplementary of \code{citation("AneuFinder")}. Option \code{method='loess'} uses a loess fit to adjust the read count.}
\item{return.plot}{Set to \code{TRUE} if plots should be returned for visual assessment of the GC correction.}
\item{bins}{A \code{\link{binned.data}} object with meta-data column 'GC'. If this is specified, \code{GC.BSgenome} is ignored. Beware, no format checking is done.}
}
\value{
A \code{list()} with \code{\link{binned.data}} objects with adjusted read counts. Alternatively a \code{list()} with \code{\link[ggplot2]{ggplot}} objects if \code{return.plot=TRUE}.
}
\description{
Correct a list of \code{\link{binned.data}} by GC content.
}
\details{
Two methods are available for GC correction: Option \code{method='quadratic'} uses the method described in the Supplementary of \code{citation("AneuFinder")}. Option \code{method='loess'} uses a loess fit to adjust the read count.
}
\examples{
## Get a BED file, bin it and run GC correction
bedfile <- system.file("extdata", "KK150311_VI_07.bam.bed.gz", package="AneuFinderData")
binned <- binReads(bedfile, assembly='mm10', binsize=1e6,
chromosomes=c(1:19,'X','Y'))
plot(binned[[1]], type=1)
if (require(BSgenome.Mmusculus.UCSC.mm10)) {
binned.GC <- correctGC(list(binned[[1]]), GC.BSgenome=BSgenome.Mmusculus.UCSC.mm10)
plot(binned.GC[[1]], type=1)
}
}
\author{
Aaron Taudt
}
| /man/correctGC.Rd | no_license | ataudt/aneufinder | R | false | true | 2,240 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correctionMethods.R
\name{correctGC}
\alias{correctGC}
\title{GC correction}
\usage{
correctGC(binned.data.list, GC.BSgenome, same.binsize = FALSE,
method = "loess", return.plot = FALSE, bins = NULL)
}
\arguments{
\item{binned.data.list}{A \code{list} with \code{\link{binned.data}} objects or a list of filenames containing such objects.}
\item{GC.BSgenome}{A \code{BSgenome} object which contains the DNA sequence that is used for the GC correction.}
\item{same.binsize}{If \code{TRUE} the GC content will only be calculated once. Set this to \code{TRUE} if all \code{\link{binned.data}} objects describe the same genome at the same binsize and stepsize.}
\item{method}{One of \code{c('quadratic', 'loess')}. Option \code{method='quadratic'} uses the method described in the Supplementary of \code{citation("AneuFinder")}. Option \code{method='loess'} uses a loess fit to adjust the read count.}
\item{return.plot}{Set to \code{TRUE} if plots should be returned for visual assessment of the GC correction.}
\item{bins}{A \code{\link{binned.data}} object with meta-data column 'GC'. If this is specified, \code{GC.BSgenome} is ignored. Beware, no format checking is done.}
}
\value{
A \code{list()} with \code{\link{binned.data}} objects with adjusted read counts. Alternatively a \code{list()} with \code{\link[ggplot2]{ggplot}} objects if \code{return.plot=TRUE}.
}
\description{
Correct a list of \code{\link{binned.data}} by GC content.
}
\details{
Two methods are available for GC correction: Option \code{method='quadratic'} uses the method described in the Supplementary of \code{citation("AneuFinder")}. Option \code{method='loess'} uses a loess fit to adjust the read count.
}
\examples{
## Get a BED file, bin it and run GC correction
bedfile <- system.file("extdata", "KK150311_VI_07.bam.bed.gz", package="AneuFinderData")
binned <- binReads(bedfile, assembly='mm10', binsize=1e6,
chromosomes=c(1:19,'X','Y'))
plot(binned[[1]], type=1)
if (require(BSgenome.Mmusculus.UCSC.mm10)) {
binned.GC <- correctGC(list(binned[[1]]), GC.BSgenome=BSgenome.Mmusculus.UCSC.mm10)
plot(binned.GC[[1]], type=1)
}
}
\author{
Aaron Taudt
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NCEP.R
\docType{class}
\name{NCEPRefClass-class}
\alias{NCEPRefClass-class}
\alias{NCEPRefClass}
\title{A subclass of SPNCRefClass for NCEP/NCM}
\description{
This NetCDF data is projected and can be ready by the \code{raster} package. Unlike
other members of the SPNCRefClass, this class stores the data as raster. The get
\code{get_raster()} and \code{get_points()} functions are still exposed for consistency.
}
\section{Fields}{
\describe{
\item{\code{R}}{a Raster* class object}
}}
| /man/NCEPRefClass-class.Rd | permissive | BigelowLab/spnc | R | false | true | 569 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NCEP.R
\docType{class}
\name{NCEPRefClass-class}
\alias{NCEPRefClass-class}
\alias{NCEPRefClass}
\title{A subclass of SPNCRefClass for NCEP/NCM}
\description{
This NetCDF data is projected and can be ready by the \code{raster} package. Unlike
other members of the SPNCRefClass, this class stores the data as raster. The get
\code{get_raster()} and \code{get_points()} functions are still exposed for consistency.
}
\section{Fields}{
\describe{
\item{\code{R}}{a Raster* class object}
}}
|
context("hitman")
test_that("E numeric", {
ee <- rnorm(length(pheno.v))
expect_message(hm <- hitman(E=ee, M=M, Y=pheno.v, verbose = TRUE))
expect_lt(mean(hm$comb.p < 0.05), 0.1)
covar.tmp <- rnorm(length(pheno.v))
hm2 <- hitman(E=ee, M=M, Y=pheno.v, covariates=covar.tmp)
expect_lte(mean(hm$comb.p==hm2[rownames(hm), "comb.p"]), 0.01)
expect_error(hitman(E=numeric(length(pheno.v)), M=M, Y=pheno.v))
})
test_that("E binary", {
expect_message(hm <- hitman(E=grp, M=M, Y=pheno.v, verbose = TRUE))
expect_lt(mean(hm$comb.p < 0.05), 0.2)
#same as factor but not numeric
hm2 <- hitman(E=factor(grp), M=M, Y=pheno.v)
expect_equal(hm$comb.p, hm2[rownames(hm), "comb.p"])
covar.tmp <- rnorm(length(pheno.v))
hm3 <- hitman(E=grp, M=M, Y=pheno.v, covariates=covar.tmp)
expect_lte(mean(hm$comb.p==hm3[rownames(hm), "comb.p"]), 0.01)
#try to get ey.t=0, but EY.t~=1e-16
# y <- rep(1:3, times=2)
# limma_dep(object=y, Y=grp, prefix="EY")
# hm4 <- hitman(E=grp, M=M, Y=rep(1:3, times=2))
})
test_that("E nominal", {
grp.tmp <- rep(letters[1:3], each=2)
hm <- hitman(E=grp.tmp, M=M, Y=pheno.v)
expect_lt(mean(hm$comb.p < 0.05), 0.2)
#same as factor but not numeric
hm2 <- hitman(E=factor(grp.tmp), M=M, Y=pheno.v)
expect_equal(hm$comb.p, hm2[rownames(hm), "comb.p"])
covar.tmp <- rnorm(length(pheno.v))
hm3 <- hitman(E=grp.tmp, M=M, Y=pheno.v, covariates=covar.tmp)
expect_lte(mean(hm$comb.p==hm3[rownames(hm), "comb.p"]), 0.01)
expect_error(hitman(E=rep("a", length(pheno.v)), M=M, Y=pheno.v))
expect_error(hitman(E=c(rep("a", length(pheno.v)-1), NA), M=M, Y=pheno.v))
expect_error(hitman(E=rep(NA, length(pheno.v)-1), M=M, Y=pheno.v))
})
test_that("gene1", {
hm <- hitman(E=grp, M=M, Y=M[1,])
expect_equal(rownames(hm)[1], "gene1")
expect_equal(hm["gene1", "MY.p"], hm["gene1", "MY2.p"])
expect_equal(hm["gene1", "EM.p"], hm["gene1", "EM2.p"])
expect_equal(hm["gene1", "comb.p"], max(hm["gene1", "EM.p"], hm["gene1", "MY.p"])^2)
}) | /tests/testthat/test_hitman.R | permissive | gdaher/ezlimma | R | false | false | 2,024 | r | context("hitman")
test_that("E numeric", {
ee <- rnorm(length(pheno.v))
expect_message(hm <- hitman(E=ee, M=M, Y=pheno.v, verbose = TRUE))
expect_lt(mean(hm$comb.p < 0.05), 0.1)
covar.tmp <- rnorm(length(pheno.v))
hm2 <- hitman(E=ee, M=M, Y=pheno.v, covariates=covar.tmp)
expect_lte(mean(hm$comb.p==hm2[rownames(hm), "comb.p"]), 0.01)
expect_error(hitman(E=numeric(length(pheno.v)), M=M, Y=pheno.v))
})
test_that("E binary", {
expect_message(hm <- hitman(E=grp, M=M, Y=pheno.v, verbose = TRUE))
expect_lt(mean(hm$comb.p < 0.05), 0.2)
#same as factor but not numeric
hm2 <- hitman(E=factor(grp), M=M, Y=pheno.v)
expect_equal(hm$comb.p, hm2[rownames(hm), "comb.p"])
covar.tmp <- rnorm(length(pheno.v))
hm3 <- hitman(E=grp, M=M, Y=pheno.v, covariates=covar.tmp)
expect_lte(mean(hm$comb.p==hm3[rownames(hm), "comb.p"]), 0.01)
#try to get ey.t=0, but EY.t~=1e-16
# y <- rep(1:3, times=2)
# limma_dep(object=y, Y=grp, prefix="EY")
# hm4 <- hitman(E=grp, M=M, Y=rep(1:3, times=2))
})
test_that("E nominal", {
grp.tmp <- rep(letters[1:3], each=2)
hm <- hitman(E=grp.tmp, M=M, Y=pheno.v)
expect_lt(mean(hm$comb.p < 0.05), 0.2)
#same as factor but not numeric
hm2 <- hitman(E=factor(grp.tmp), M=M, Y=pheno.v)
expect_equal(hm$comb.p, hm2[rownames(hm), "comb.p"])
covar.tmp <- rnorm(length(pheno.v))
hm3 <- hitman(E=grp.tmp, M=M, Y=pheno.v, covariates=covar.tmp)
expect_lte(mean(hm$comb.p==hm3[rownames(hm), "comb.p"]), 0.01)
expect_error(hitman(E=rep("a", length(pheno.v)), M=M, Y=pheno.v))
expect_error(hitman(E=c(rep("a", length(pheno.v)-1), NA), M=M, Y=pheno.v))
expect_error(hitman(E=rep(NA, length(pheno.v)-1), M=M, Y=pheno.v))
})
test_that("gene1", {
hm <- hitman(E=grp, M=M, Y=M[1,])
expect_equal(rownames(hm)[1], "gene1")
expect_equal(hm["gene1", "MY.p"], hm["gene1", "MY2.p"])
expect_equal(hm["gene1", "EM.p"], hm["gene1", "EM2.p"])
expect_equal(hm["gene1", "comb.p"], max(hm["gene1", "EM.p"], hm["gene1", "MY.p"])^2)
}) |
library(Rga4gh)
### Name: search_individuals
### Title: Search for Individuals
### Aliases: search_individuals
### ** Examples
ref_client <- ga4gh_client("http://1kgenomes.ga4gh.org", api_location = "")
## Not run:
##D library(magrittr)
##D ## Find a dataset to search in
##D datasets <- ref_client %>% search_datasets() %>% content()
##D d_id <- datasets$datasets[[1]]$id
##D
##D ## Search for individuals in the dataset
##D individuals <- ref_client %>% search_individuals(d_id) %>% content()
##D individuals
##D
## End(Not run)
| /data/genthat_extracted_code/Rga4gh/examples/search_individuals.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 544 | r | library(Rga4gh)
### Name: search_individuals
### Title: Search for Individuals
### Aliases: search_individuals
### ** Examples
ref_client <- ga4gh_client("http://1kgenomes.ga4gh.org", api_location = "")
## Not run:
##D library(magrittr)
##D ## Find a dataset to search in
##D datasets <- ref_client %>% search_datasets() %>% content()
##D d_id <- datasets$datasets[[1]]$id
##D
##D ## Search for individuals in the dataset
##D individuals <- ref_client %>% search_individuals(d_id) %>% content()
##D individuals
##D
## End(Not run)
|
test_that("get_events_plots() generates plots for github event type and payload",{
y <-get_events_plots()
expect_equal(length(y[[1]]$grobs), 3)
expect_identical(y[[1]]$layout$clip[1], "off")
expect_identical(y[[1]]$layout$name[1], "arrange")
}) | /tests/testthat/test-get-events-plots.R | permissive | jasmine2chen/gitevents | R | false | false | 252 | r | test_that("get_events_plots() generates plots for github event type and payload",{
y <-get_events_plots()
expect_equal(length(y[[1]]$grobs), 3)
expect_identical(y[[1]]$layout$clip[1], "off")
expect_identical(y[[1]]$layout$name[1], "arrange")
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io_export.R
\name{export_scores_to_mongodb}
\alias{export_scores_to_mongodb}
\title{Export des scores dans une collection mongodb}
\usage{
export_scores_to_mongodb(
formatted_data,
algo,
batch,
f_scores,
database,
collection,
mongodb_uri
)
}
\arguments{
\item{formatted_data}{\code{data.frame()} \cr Données avec les champs "siret",
"periode", "score" et "score_diff". C'est le cas des données formatées par
\code{\link{format_for_export}}.}
\item{algo}{\code{character(1)} \cr Nom de l'algo qui figurera dans les objets
exportés}
\item{batch}{\code{character(1)} \cr Nom du batch qui figurera dans les objets
exportés}
\item{f_scores}{\code{character(2)} \cr Vecteur de scores F1 et F2. Doit être
nommé avec comme noms "F1" et "F2"}
\item{database}{\code{character(1)} \cr Nom de la base de données vers laquelle
param exporter.}
\item{collection}{`character(1)' \cr Nom de la collection vers laquelle
exporter.}
\item{mongodb_uri}{\code{character(1)} \cr url to the database in mongodb uri
format.}
}
\value{
Retourne TRUE. \cr Les objets insérés dans la base de données ont les
champs:
\itemize{
\item "_id" (ObjectId générés),
\item "alert", qui peut prendre les valeurs \emph{Alerte seuil F1}, \emph{Alerte seuil F2} et \emph{Pas d'alerte},
\item "algo" et "batch" tel qu'entrés en paramètres,
\item "siret", "periode", "score" et "score_diff" tel qu'extraits de la table \code{formatted_data},
\item "timestamp" qui donne la date et l'heure.
}
}
\description{
Exporte les scores vers une collection mongodb à partir des données formattées par la fonction
\code{\link{format_for_export}}.
}
| /man/export_scores_to_mongodb.Rd | permissive | signaux-faibles/rsignauxfaibles | R | false | true | 1,713 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io_export.R
\name{export_scores_to_mongodb}
\alias{export_scores_to_mongodb}
\title{Export des scores dans une collection mongodb}
\usage{
export_scores_to_mongodb(
formatted_data,
algo,
batch,
f_scores,
database,
collection,
mongodb_uri
)
}
\arguments{
\item{formatted_data}{\code{data.frame()} \cr Données avec les champs "siret",
"periode", "score" et "score_diff". C'est le cas des données formatées par
\code{\link{format_for_export}}.}
\item{algo}{\code{character(1)} \cr Nom de l'algo qui figurera dans les objets
exportés}
\item{batch}{\code{character(1)} \cr Nom du batch qui figurera dans les objets
exportés}
\item{f_scores}{\code{character(2)} \cr Vecteur de scores F1 et F2. Doit être
nommé avec comme noms "F1" et "F2"}
\item{database}{\code{character(1)} \cr Nom de la base de données vers laquelle
param exporter.}
\item{collection}{`character(1)' \cr Nom de la collection vers laquelle
exporter.}
\item{mongodb_uri}{\code{character(1)} \cr url to the database in mongodb uri
format.}
}
\value{
Retourne TRUE. \cr Les objets insérés dans la base de données ont les
champs:
\itemize{
\item "_id" (ObjectId générés),
\item "alert", qui peut prendre les valeurs \emph{Alerte seuil F1}, \emph{Alerte seuil F2} et \emph{Pas d'alerte},
\item "algo" et "batch" tel qu'entrés en paramètres,
\item "siret", "periode", "score" et "score_diff" tel qu'extraits de la table \code{formatted_data},
\item "timestamp" qui donne la date et l'heure.
}
}
\description{
Exporte les scores vers une collection mongodb à partir des données formattées par la fonction
\code{\link{format_for_export}}.
}
|
#' Text Justification
#'
#' \code{left.just} - Left justifies a text/character column.
#'
#' @param dataframe A data.frame object with the text column.
#' @param column The column to be justified. If \code{NULL} all columns are
#' justified.
#' @param keep.class logical. If \code{TRUE} will attempt to keep the original
#' classes of the dataframe if the justification is not altered (i.e., numeric
#' will not be honored but factor may be).
#' @return Returns a dataframe with selected text column left/right justified.
#' @rdname justification
#' @note \code{\link[qdap]{left.just}} inserts spaces to achieve the
#' justification. This could interfere with analysis and therefore the output
#' from \code{\link[qdap]{left.just}} should only be used for visualization
#' purposes, not analysis.
#' @keywords justify, justification
#' @export
#' @examples
#' \dontrun{
#' left.just(DATA)
#' left.just(DATA, "state")
#' left.just(CO2[1:15,])
#' right.just(left.just(CO2[1:15,]))
#' }
left.just <-
function(dataframe, column = NULL, keep.class = FALSE) {
df.class <- function(dataframe) {
sapply(1:ncol(dataframe), function(i) {
x <- class(dataframe[, i])
x[length(x)]
})
}
CLASS <- df.class(dataframe)
left.j <- function(x) {
n <- max(nchar(x))
return(sprintf(paste("%-", n, "s", sep = ""), x))
}
if (is.null(column)) column <- colnames(dataframe)
lj <- function(DF2, column) {
if (is.null(column)) column <- colnames(DF2)
Q <- max(nchar(c(as.character(DF2[, column]), names(DF2)[column])))
DF2 <- data.frame(rbind(colnames(DF2), do.call(cbind,
lapply(DF2, as.character))), check.names = FALSE)
DF2[, column] <- left.j(as.character(DF2[, column]))
if (is.character(column)) {
col <- names(DF2)[which(names(DF2) == column)]
names(DF2)[which(names(DF2) == column)] <- sprintf(paste("%-",
Q, "s", sep = ""), col)
} else {
if (is.numeric(column)) {
col <- names(DF2)[column]
names(DF2)[column] <- sprintf(paste("%-", Q, "s",
sep = ""), col)
}
}
DF2 <- data.frame(DF2[-1, , drop = FALSE], check.names = FALSE)
rownames(DF2) <- NULL
return(DF2)
}
if (length(column) < 2) {
if (!is.data.frame(dataframe)) {
y <- as.character(substitute(dataframe))
dataframe <- data.frame(dataframe, check.names = FALSE)
y <- if (y[1]%in%c("[", "$")) y[2] else y[1]
names(dataframe) <- y
}
DF3 <- lj(DF2=dataframe, column=column)
} else {
if (!is.numeric(column)) column <- match(column, names(dataframe))
dat <- dataframe[, -c(column), drop=FALSE]
ndf <- colnames(dataframe)
LIST <- lapply(column, function(x) {
lj(DF2=dataframe[, x, drop=FALSE], column = NULL)
})
dat2 <- data.frame(cbind(do.call('cbind', LIST), dat), checknames=FALSE)
NAMES <- colnames(dat2)
STrim <- function (x) gsub("^\\s+|\\s+$|\\.+$", "", x)
newloc <- match(ndf, STrim(NAMES))
DF3 <- dat2[, newloc]
}
if (keep.class) {
colClasses <- function(d, colClasses) {
colClasses <- rep(colClasses, len=length(d))
d[] <- lapply(seq_along(d), function(i) switch(colClasses[i],
numeric=as.numeric(d[[i]]),
character=as.character(d[[i]]),
Date=as.Date(d[[i]], origin='1970-01-01'),
POSIXct=as.POSIXct(d[[i]], origin='1970-01-01'),
factor=as.factor(d[[i]]),
as(d[[i]], colClasses[i]) ))
d
}
DF3 <- colClasses(DF3, CLASS)
}
colnames(DF3) <- gsub("\\.(?=\\.*$)", " ", colnames(DF3), perl=TRUE)
return(DF3)
}
#' Right Justify Text
#'
#' \code{right.just} - A means of undoing a left justification.
#'
#' @rdname justification
#' @export
right.just <-
function(dataframe){
NAMES <- Trim(names(dataframe))
x <- data.frame(sapply(dataframe, Trim))
names(x) <- NAMES
return(x)
}
| /R/left.just.R | no_license | abresler/qdap | R | false | false | 4,208 | r | #' Text Justification
#'
#' \code{left.just} - Left justifies a text/character column.
#'
#' @param dataframe A data.frame object with the text column.
#' @param column The column to be justified. If \code{NULL} all columns are
#' justified.
#' @param keep.class logical. If \code{TRUE} will attempt to keep the original
#' classes of the dataframe if the justification is not altered (i.e., numeric
#' will not be honored but factor may be).
#' @return Returns a dataframe with selected text column left/right justified.
#' @rdname justification
#' @note \code{\link[qdap]{left.just}} inserts spaces to achieve the
#' justification. This could interfere with analysis and therefore the output
#' from \code{\link[qdap]{left.just}} should only be used for visualization
#' purposes, not analysis.
#' @keywords justify, justification
#' @export
#' @examples
#' \dontrun{
#' left.just(DATA)
#' left.just(DATA, "state")
#' left.just(CO2[1:15,])
#' right.just(left.just(CO2[1:15,]))
#' }
left.just <-
function(dataframe, column = NULL, keep.class = FALSE) {
df.class <- function(dataframe) {
sapply(1:ncol(dataframe), function(i) {
x <- class(dataframe[, i])
x[length(x)]
})
}
CLASS <- df.class(dataframe)
left.j <- function(x) {
n <- max(nchar(x))
return(sprintf(paste("%-", n, "s", sep = ""), x))
}
if (is.null(column)) column <- colnames(dataframe)
lj <- function(DF2, column) {
if (is.null(column)) column <- colnames(DF2)
Q <- max(nchar(c(as.character(DF2[, column]), names(DF2)[column])))
DF2 <- data.frame(rbind(colnames(DF2), do.call(cbind,
lapply(DF2, as.character))), check.names = FALSE)
DF2[, column] <- left.j(as.character(DF2[, column]))
if (is.character(column)) {
col <- names(DF2)[which(names(DF2) == column)]
names(DF2)[which(names(DF2) == column)] <- sprintf(paste("%-",
Q, "s", sep = ""), col)
} else {
if (is.numeric(column)) {
col <- names(DF2)[column]
names(DF2)[column] <- sprintf(paste("%-", Q, "s",
sep = ""), col)
}
}
DF2 <- data.frame(DF2[-1, , drop = FALSE], check.names = FALSE)
rownames(DF2) <- NULL
return(DF2)
}
if (length(column) < 2) {
if (!is.data.frame(dataframe)) {
y <- as.character(substitute(dataframe))
dataframe <- data.frame(dataframe, check.names = FALSE)
y <- if (y[1]%in%c("[", "$")) y[2] else y[1]
names(dataframe) <- y
}
DF3 <- lj(DF2=dataframe, column=column)
} else {
if (!is.numeric(column)) column <- match(column, names(dataframe))
dat <- dataframe[, -c(column), drop=FALSE]
ndf <- colnames(dataframe)
LIST <- lapply(column, function(x) {
lj(DF2=dataframe[, x, drop=FALSE], column = NULL)
})
dat2 <- data.frame(cbind(do.call('cbind', LIST), dat), checknames=FALSE)
NAMES <- colnames(dat2)
STrim <- function (x) gsub("^\\s+|\\s+$|\\.+$", "", x)
newloc <- match(ndf, STrim(NAMES))
DF3 <- dat2[, newloc]
}
if (keep.class) {
colClasses <- function(d, colClasses) {
colClasses <- rep(colClasses, len=length(d))
d[] <- lapply(seq_along(d), function(i) switch(colClasses[i],
numeric=as.numeric(d[[i]]),
character=as.character(d[[i]]),
Date=as.Date(d[[i]], origin='1970-01-01'),
POSIXct=as.POSIXct(d[[i]], origin='1970-01-01'),
factor=as.factor(d[[i]]),
as(d[[i]], colClasses[i]) ))
d
}
DF3 <- colClasses(DF3, CLASS)
}
colnames(DF3) <- gsub("\\.(?=\\.*$)", " ", colnames(DF3), perl=TRUE)
return(DF3)
}
#' Right Justify Text
#'
#' \code{right.just} - A means of undoing a left justification.
#'
#' @rdname justification
#' @export
right.just <-
function(dataframe){
NAMES <- Trim(names(dataframe))
x <- data.frame(sapply(dataframe, Trim))
names(x) <- NAMES
return(x)
}
|
\name{crcpp-package}
\alias{crcpp-package}
\alias{crcpp}
\docType{package}
\title{
A short title line describing what the package does
}
\description{
A more detailed description of what the package does. A length
of about one to five lines is recommended.
}
\details{
This section should provide a more detailed overview of how to use the
package, including the most important functions.
}
\author{
Your Name, email optional.
Maintainer: Your Name <your@email.com>
}
\references{
This optional section can contain literature or other references for
background information.
}
\keyword{ package }
\seealso{
Optional links to other man pages
}
\examples{
\dontrun{
## Optional simple examples of the most important functions
## These can be in \dontrun{} and \donttest{} blocks.
}
}
| /man/crcpp-package.Rd | no_license | SymbolixAU/crcpp | R | false | false | 815 | rd | \name{crcpp-package}
\alias{crcpp-package}
\alias{crcpp}
\docType{package}
\title{
A short title line describing what the package does
}
\description{
A more detailed description of what the package does. A length
of about one to five lines is recommended.
}
\details{
This section should provide a more detailed overview of how to use the
package, including the most important functions.
}
\author{
Your Name, email optional.
Maintainer: Your Name <your@email.com>
}
\references{
This optional section can contain literature or other references for
background information.
}
\keyword{ package }
\seealso{
Optional links to other man pages
}
\examples{
\dontrun{
## Optional simple examples of the most important functions
## These can be in \dontrun{} and \donttest{} blocks.
}
}
|
## script for marker finding and proportion analysis DDTx project
# May 16 2023
# WTC
# open filtered dataset
library(Seurat)
data<-readRDS("/source/ddtx_merged_demultiplexed_clustered_compartment_azi_elmentaiteadultileum_below60pctmito_withoutEpithelialR.rds")
#Vlnplots to check donor/recipient cellorigin
library(ggplot2)
VlnPlot(data, "CCR6", assay="RNA", group.by="compartment_final", split.by="donor_recipient") # CCR6 higher in donor immune cells (as expected)
ggsave("CCR6_donor_recipient.png", width = 20, height = 5, dpi = 600)
VlnPlot(data, "SELL", assay="RNA", group.by="compartment_final", split.by="donor_recipient") # CD62L higher in recipient immune cells (as expected)
ggsave("SELL_donor_recipient.png", width = 20, height = 5, dpi = 600)
#find general markers for donor and recipient, all cells and datapoints taken together
Idents(data)<-"donor_recipient"
markers_data <- FindAllMarkers(data, only.pos = TRUE, min.pct = 0.25)
write.csv(markers_data, "markers_donor_recipient_mito_epi_filtered_data.csv")
#find general markers per cell type, all participants and datapoints taken together
Idents(data)<-"predicted.celltype.elmentaiteadultileum"
markers_data <- FindAllMarkers(data, only.pos = TRUE, min.pct = 0.25)
write.csv(markers_data, "markers_allcelltypes_mito_epi_filtered_data.csv")
#downsampling to equal cell numbers per sample for cell proportion analysis
library(Seurat)
Idents(data)<-"sample"
data_1949<-subset(data, downsample=1949)
saveRDS(data_1949, "DDTX_sample_1949cells.rds")
#generate CD4 T subsets
## find subsets T cells
Idents(data)<-"predicted.celltype.elmentaiteadultileum"
cd4T<-subset(data,idents="Activated CD4 T")
DimPlot(cd4T)
ggsave("cd4T_before_reclustering.png", width = 5, height = 5, dpi = 600)
DefaultAssay(object = cd4T) <- "RNA"
cd4T <- NormalizeData(cd4T, normalization.method = "LogNormalize", scale.factor = 10000)
cd4T <- NormalizeData(cd4T)
cd4T <- FindVariableFeatures(cd4T, selection.method = "vst", nfeatures = 2000)
# Identify the 10 most highly variable genes
top10 <- head(VariableFeatures(cd4T), 10)
all.genes <- rownames(cd4T)
cd4T <- ScaleData(cd4T, features = all.genes)
cd4T <- RunPCA(cd4T, features = VariableFeatures(object = cd4T))
ElbowPlot(cd4T)
ggsave("Elbowplot_cd4T_after_reclustering.png", width = 5, height = 5, dpi = 600)
cd4T <- FindNeighbors(cd4T, dims = 1:10)
cd4T <- FindClusters(cd4T, resolution = 0.4)
head(Idents(cd4T), 5)
cd4T <- RunUMAP(cd4T, dims = 1:10)
DimPlot(cd4T, reduction = "umap")
ggsave("cd4T_after_reclustering.png", width = 5, height = 5, dpi = 600)
markers_cd4T <- FindAllMarkers(cd4T, only.pos = TRUE, min.pct = 0.25)
write.csv(markers_cd4T, "markers_cd4T_mito_epi_filtered_data.csv")
FeaturePlot(cd4T, c("IL17A"))
ggsave("cd4T_after_reclustering_il17.png", width = 5, height = 5, dpi = 600)
## markers for different timepoints epithelial cells in patient 1,2,3
Idents(data)<-"compartment_final"
epi<-subset(data, ident="Epithelial")
Idents(epi)<-"patient"
epi_pt3<-subset(epi, ident="UMCGDDtx00005")
Idents(epi_pt3)<-"Timepoint_days"
markers_epi_pt3<-FindAllMarkers(epi_pt3, only.pos=T)
write.csv(markers_epi_pt3, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_epi_pt3_timepoints.csv")
epi_pt2<-subset(epi, ident="UMCGDDtx00004")
Idents(epi_pt2)<-"Timepoint_days"
markers_epi_pt2<-FindAllMarkers(epi_pt2, only.pos=T)
write.csv(markers_epi_pt2, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_epi_pt2_timepoints.csv")
epi_pt1<-subset(epi, ident="UMCGDDtx00003")
Idents(epi_pt1)<-"Timepoint_days"
markers_epi_pt1<-FindAllMarkers(epi_pt1, only.pos=T)
write.csv(markers_epi_pt1, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_epi_pt1_timepoints.csv")
## find endothelial markers
Idents(data)<-"major_celltype"
endo<-subset(data, ident="Endothelial")
Idents(endo)<-"patient"
endo_pt3<-subset(endo, ident="UMCGDDtx00005")
Idents(endo_pt3)<-"Timepoint_days"
markers_endo_pt3<-FindAllMarkers(endo_pt3, only.pos=T)
write.csv(markers_endo_pt3, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_endo_pt3_timepoints.csv")
endo_pt2<-subset(endo, ident="UMCGDDtx00004")
Idents(endo_pt2)<-"Timepoint_days"
markers_endo_pt2<-FindAllMarkers(endo_pt2, only.pos=T)
write.csv(markers_endo_pt2, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_endo_pt2_timepoints.csv")
endo_pt1<-subset(endo, ident="UMCGDDtx00003")
Idents(endo_pt1)<-"Timepoint_days"
markers_endo_pt1<-FindAllMarkers(endo_pt1, only.pos=T)
write.csv(markers_endo_pt1, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_endo_pt1_timepoints.csv")
## find CD4T markers
Idents(data)<-"predicted.celltype.elmentaiteadultileum"
CD4<-subset(data, ident="Activated CD4 T")
Idents(CD4)<-"patient"
CD4_pt3<-subset(CD4, ident="UMCGDDtx00005")
Idents(CD4_pt3)<-"Timepoint_days"
markers_CD4_pt3<-FindAllMarkers(CD4_pt3, only.pos=T)
write.csv(markers_CD4_pt3, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_CD4_pt3_timepoints.csv")
CD4_pt2<-subset(CD4, ident="UMCGDDtx00004")
Idents(CD4_pt2)<-"Timepoint_days"
markers_CD4_pt2<-FindAllMarkers(CD4_pt2, only.pos=T)
write.csv(markers_CD4_pt2, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_CD4_pt2_timepoints.csv")
CD4_pt1<-subset(CD4, ident="UMCGDDtx00003")
Idents(CD4_pt1)<-"Timepoint_days"
markers_CD4_pt1<-FindAllMarkers(CD4_pt1, only.pos=T)
write.csv(markers_CD4_pt1, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_CD4_pt1_timepoints.csv")
## find CD8T markers
CD8<-subset(data, ident="Activated CD8 T")
Idents(CD8)<-"patient"
CD8_pt3<-subset(CD8, ident="UMCGDDtx00005")
Idents(CD8_pt3)<-"Timepoint_days"
markers_CD8_pt3<-FindAllMarkers(CD8_pt3, only.pos=T)
write.csv(markers_CD8_pt3, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_CD8_pt3_timepoints.csv")
CD8_pt2<-subset(CD8, ident="UMCGDDtx00004")
Idents(CD8_pt2)<-"Timepoint_days"
markers_CD8_pt2<-FindAllMarkers(CD8_pt2, only.pos=T)
write.csv(markers_CD8_pt2, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_CD8_pt2_timepoints.csv")
CD8_pt1<-subset(CD8, ident="UMCGDDtx00003")
Idents(CD8_pt1)<-"Timepoint_days"
markers_CD8_pt1<-FindAllMarkers(CD8_pt1, only.pos=T)
write.csv(markers_CD8_pt1, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_CD8_pt1_timepoints.csv")
# check markers on gutcellatlas
| /DDTx/find_markers_and_analyze_proportions.R | no_license | WeersmaLabIBD/SingleCell | R | false | false | 6,650 | r | ## script for marker finding and proportion analysis DDTx project
# May 16 2023
# WTC
# open filtered dataset
library(Seurat)
data<-readRDS("/source/ddtx_merged_demultiplexed_clustered_compartment_azi_elmentaiteadultileum_below60pctmito_withoutEpithelialR.rds")
#Vlnplots to check donor/recipient cellorigin
library(ggplot2)
VlnPlot(data, "CCR6", assay="RNA", group.by="compartment_final", split.by="donor_recipient") # CCR6 higher in donor immune cells (as expected)
ggsave("CCR6_donor_recipient.png", width = 20, height = 5, dpi = 600)
VlnPlot(data, "SELL", assay="RNA", group.by="compartment_final", split.by="donor_recipient") # CD62L higher in recipient immune cells (as expected)
ggsave("SELL_donor_recipient.png", width = 20, height = 5, dpi = 600)
#find general markers for donor and recipient, all cells and datapoints taken together
Idents(data)<-"donor_recipient"
markers_data <- FindAllMarkers(data, only.pos = TRUE, min.pct = 0.25)
write.csv(markers_data, "markers_donor_recipient_mito_epi_filtered_data.csv")
#find general markers per cell type, all participants and datapoints taken together
Idents(data)<-"predicted.celltype.elmentaiteadultileum"
markers_data <- FindAllMarkers(data, only.pos = TRUE, min.pct = 0.25)
write.csv(markers_data, "markers_allcelltypes_mito_epi_filtered_data.csv")
#downsampling to equal cell numbers per sample for cell proportion analysis
library(Seurat)
Idents(data)<-"sample"
data_1949<-subset(data, downsample=1949)
saveRDS(data_1949, "DDTX_sample_1949cells.rds")
#generate CD4 T subsets
## find subsets T cells
Idents(data)<-"predicted.celltype.elmentaiteadultileum"
cd4T<-subset(data,idents="Activated CD4 T")
DimPlot(cd4T)
ggsave("cd4T_before_reclustering.png", width = 5, height = 5, dpi = 600)
DefaultAssay(object = cd4T) <- "RNA"
cd4T <- NormalizeData(cd4T, normalization.method = "LogNormalize", scale.factor = 10000)
cd4T <- NormalizeData(cd4T)
cd4T <- FindVariableFeatures(cd4T, selection.method = "vst", nfeatures = 2000)
# Identify the 10 most highly variable genes
top10 <- head(VariableFeatures(cd4T), 10)
all.genes <- rownames(cd4T)
cd4T <- ScaleData(cd4T, features = all.genes)
cd4T <- RunPCA(cd4T, features = VariableFeatures(object = cd4T))
ElbowPlot(cd4T)
ggsave("Elbowplot_cd4T_after_reclustering.png", width = 5, height = 5, dpi = 600)
cd4T <- FindNeighbors(cd4T, dims = 1:10)
cd4T <- FindClusters(cd4T, resolution = 0.4)
head(Idents(cd4T), 5)
cd4T <- RunUMAP(cd4T, dims = 1:10)
DimPlot(cd4T, reduction = "umap")
ggsave("cd4T_after_reclustering.png", width = 5, height = 5, dpi = 600)
markers_cd4T <- FindAllMarkers(cd4T, only.pos = TRUE, min.pct = 0.25)
write.csv(markers_cd4T, "markers_cd4T_mito_epi_filtered_data.csv")
FeaturePlot(cd4T, c("IL17A"))
ggsave("cd4T_after_reclustering_il17.png", width = 5, height = 5, dpi = 600)
## markers for different timepoints epithelial cells in patient 1,2,3
Idents(data)<-"compartment_final"
epi<-subset(data, ident="Epithelial")
Idents(epi)<-"patient"
epi_pt3<-subset(epi, ident="UMCGDDtx00005")
Idents(epi_pt3)<-"Timepoint_days"
markers_epi_pt3<-FindAllMarkers(epi_pt3, only.pos=T)
write.csv(markers_epi_pt3, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_epi_pt3_timepoints.csv")
epi_pt2<-subset(epi, ident="UMCGDDtx00004")
Idents(epi_pt2)<-"Timepoint_days"
markers_epi_pt2<-FindAllMarkers(epi_pt2, only.pos=T)
write.csv(markers_epi_pt2, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_epi_pt2_timepoints.csv")
epi_pt1<-subset(epi, ident="UMCGDDtx00003")
Idents(epi_pt1)<-"Timepoint_days"
markers_epi_pt1<-FindAllMarkers(epi_pt1, only.pos=T)
write.csv(markers_epi_pt1, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_epi_pt1_timepoints.csv")
## find endothelial markers
Idents(data)<-"major_celltype"
endo<-subset(data, ident="Endothelial")
Idents(endo)<-"patient"
endo_pt3<-subset(endo, ident="UMCGDDtx00005")
Idents(endo_pt3)<-"Timepoint_days"
markers_endo_pt3<-FindAllMarkers(endo_pt3, only.pos=T)
write.csv(markers_endo_pt3, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_endo_pt3_timepoints.csv")
endo_pt2<-subset(endo, ident="UMCGDDtx00004")
Idents(endo_pt2)<-"Timepoint_days"
markers_endo_pt2<-FindAllMarkers(endo_pt2, only.pos=T)
write.csv(markers_endo_pt2, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_endo_pt2_timepoints.csv")
endo_pt1<-subset(endo, ident="UMCGDDtx00003")
Idents(endo_pt1)<-"Timepoint_days"
markers_endo_pt1<-FindAllMarkers(endo_pt1, only.pos=T)
write.csv(markers_endo_pt1, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_endo_pt1_timepoints.csv")
## find CD4T markers
Idents(data)<-"predicted.celltype.elmentaiteadultileum"
CD4<-subset(data, ident="Activated CD4 T")
Idents(CD4)<-"patient"
CD4_pt3<-subset(CD4, ident="UMCGDDtx00005")
Idents(CD4_pt3)<-"Timepoint_days"
markers_CD4_pt3<-FindAllMarkers(CD4_pt3, only.pos=T)
write.csv(markers_CD4_pt3, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_CD4_pt3_timepoints.csv")
CD4_pt2<-subset(CD4, ident="UMCGDDtx00004")
Idents(CD4_pt2)<-"Timepoint_days"
markers_CD4_pt2<-FindAllMarkers(CD4_pt2, only.pos=T)
write.csv(markers_CD4_pt2, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_CD4_pt2_timepoints.csv")
CD4_pt1<-subset(CD4, ident="UMCGDDtx00003")
Idents(CD4_pt1)<-"Timepoint_days"
markers_CD4_pt1<-FindAllMarkers(CD4_pt1, only.pos=T)
write.csv(markers_CD4_pt1, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_CD4_pt1_timepoints.csv")
## find CD8T markers
CD8<-subset(data, ident="Activated CD8 T")
Idents(CD8)<-"patient"
CD8_pt3<-subset(CD8, ident="UMCGDDtx00005")
Idents(CD8_pt3)<-"Timepoint_days"
markers_CD8_pt3<-FindAllMarkers(CD8_pt3, only.pos=T)
write.csv(markers_CD8_pt3, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_CD8_pt3_timepoints.csv")
CD8_pt2<-subset(CD8, ident="UMCGDDtx00004")
Idents(CD8_pt2)<-"Timepoint_days"
markers_CD8_pt2<-FindAllMarkers(CD8_pt2, only.pos=T)
write.csv(markers_CD8_pt2, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_CD8_pt2_timepoints.csv")
CD8_pt1<-subset(CD8, ident="UMCGDDtx00003")
Idents(CD8_pt1)<-"Timepoint_days"
markers_CD8_pt1<-FindAllMarkers(CD8_pt1, only.pos=T)
write.csv(markers_CD8_pt1, "/groups/umcg-weersma/tmp01/projects/ddtx/ongoing/seurat_preprocess_samples/DE/markers_CD8_pt1_timepoints.csv")
# check markers on gutcellatlas
|
# ----------------------
# Author: Andreas Alfons
# KU Leuven
# ----------------------
#' Fast implementation of the median
#'
#' Compute the sample median with a fast C++ implementation.
#'
#' @param x a numeric vector.
#'
#' @return The sample median.
#'
#' @note Functionality for removing observations with missing values is
#' currently not implemented.
#'
#' @author Andreas Alfons
#'
#' @seealso \code{\link{fastMAD}}, \code{\link[stats]{median}}
#'
#' @examples
#' set.seed(1234) # for reproducibility
#' x <- rnorm(100)
#' fastMedian(x)
#'
#' @keywords multivariate robust
#'
#' @importFrom Rcpp evalCpp
#' @useDynLib ccaPP, .registration = TRUE
#' @export
fastMedian <- function(x) {
# initializations
x <- as.numeric(x)
if(length(x) == 0) return(NA) # zero length vector
# call C++ function
.Call("R_fastMedian", R_x=x, PACKAGE="ccaPP")
}
| /fuzzedpackages/ccaPP/R/fastMedian.R | no_license | akhikolla/testpackages | R | false | false | 883 | r | # ----------------------
# Author: Andreas Alfons
# KU Leuven
# ----------------------
#' Fast implementation of the median
#'
#' Compute the sample median with a fast C++ implementation.
#'
#' @param x a numeric vector.
#'
#' @return The sample median.
#'
#' @note Functionality for removing observations with missing values is
#' currently not implemented.
#'
#' @author Andreas Alfons
#'
#' @seealso \code{\link{fastMAD}}, \code{\link[stats]{median}}
#'
#' @examples
#' set.seed(1234) # for reproducibility
#' x <- rnorm(100)
#' fastMedian(x)
#'
#' @keywords multivariate robust
#'
#' @importFrom Rcpp evalCpp
#' @useDynLib ccaPP, .registration = TRUE
#' @export
fastMedian <- function(x) {
# initializations
x <- as.numeric(x)
if(length(x) == 0) return(NA) # zero length vector
# call C++ function
.Call("R_fastMedian", R_x=x, PACKAGE="ccaPP")
}
|
### Libraries ###
library(MALDIquant)
library(MALDIquantForeign)
library(dplyr)
library(purrr)
library(tidyr)
library(binda)
library(broom)
### Custom Functions ###
Binary.analisis<-function(feature.matrix,Peaks,spot,
sensi,other,title) {
Ytrain1<-sensi
categorias<-table(Ytrain1)
total.de.NA<-sum(is.na(intensityMatrix(Peaks)))
promedio.de.NA<-mean(apply(is.na(intensityMatrix(Peaks)), 1, sum))
mz.full <- as.double(colnames(feature.matrix))
mz <- round( mz.full )
picos.ducplicados<-any(duplicated(mz))
thr <- optimizeThreshold(feature.matrix, Ytrain1)
Xtrain.b <- dichotomize(feature.matrix, thr)
colnames(Xtrain.b) <-mz
rownames(Xtrain.b) <- paste(spot,other,sensi, sep=".")
Chequeo.dicotomico<-is.binaryMatrix(Xtrain.b)
Xtrain.b.naive <- ifelse(is.na(intensityMatrix(Peaks)), as.integer(0), as.integer(1))
rownames(Xtrain.b.naive) <-paste(spot,other,sensi, sep=".")
colnames(Xtrain.b.naive) <-mz
Chequeo.binario<-is.binaryMatrix(Xtrain.b.naive)
Feature.dicho3<-Xtrain.b
Feature.bina3<-Xtrain.b.naive
save(Feature.dicho3, Feature.bina3,
file=paste("Matrix", title, "rda", sep="."))
print(categorias)
print(paste("Total NA",total.de.NA,sep=" "))
print(paste("Average NA",promedio.de.NA,sep=" "))
print(paste("Any duplicate peak",picos.ducplicados,sep=" "))
print(paste("Dicho is binary",Chequeo.dicotomico,sep=" "))
print(paste("Bina is binary",Chequeo.binario,sep=" "))
}
Peaks_extraction_BDA<-function(Matrix, Clasif, Num.peaks,
Name.Class1,Name.Class2, title) {
br <- binda.ranking(Matrix, Clasif, verbose=FALSE)
br2 <- br[1:Num.peaks, c(1:4)]
selPeaks23<-br2[,2]
Most.dif2<-tidy(selPeaks23)
BDA.Rank<-Most.dif2
names(BDA.Rank)<-c("Picos","BDA.score")
selPeaks232<-br2[,3]
Most.dif22<-tidy(selPeaks232)
names(Most.dif22)<-c("Picos",Name.Class1)
selPeaks233<-br2[,4]
Most.dif23<-tidy(selPeaks233)
names(Most.dif23)<-c("Picos",Name.Class2)
BDA.Rank.2<-BDA.Rank%>%
left_join(Most.dif22,by="Picos")%>%
left_join(Most.dif23,by="Picos")
BDA.Rank<-gather(BDA.Rank.2,Class,t.score,c(3:4))
Picos.significativos<-BDA.Rank
save(Picos.significativos,
file=paste("Peak", title, "rda", sep="."))
}
### Read Bruker data ###
so2<-importBrukerFlex("Path_to_spectra_folder",
verbose=FALSE)
# Labeling spectra
Newman.table<-read.csv2("Path_to_csv_data")
Newman.table<-Newman.table%>%
mutate(spot.a.1=paste(ID,Bacteria,Medio.Cultivo,sep="."))
names<-Newman.table$spot.a.1
repeticion<-Newman.table$n.esp
Names.Sa.S.Pocillo<- rep(names,repeticion)
Names.arr<-array(Names.Sa.S.Pocillo)
for (h in 1:length(so2)) {
metaData(so2[[h]])$spot<-Names.arr[[h]]
}
# Spectra Pre-proccessing
so5<-so2
spot.factor <- factor(sapply(so5,function(x)metaData(x)$spot))
so5 <- trim(so5)
so5 <- transformIntensity(so5, method="sqrt")
so5 <- smoothIntensity(so5, method="SavitzkyGolay",
halfWindowSize=30)
so5 <- removeBaseline(so5, method="SNIP", iterations=100)
so5 <- calibrateIntensity(so5, method="TIC")
so5 <- alignSpectra(so5, halfWindowSize=30,SNR=5,
tolerance=0.2, warpingMethod="quadratic")
avgSpectra.Newman.HCCA <-averageMassSpectra(so5, labels=spot.factor, method="sum")
peaks <- detectPeaks(avgSpectra.Newman.HCCA, SNR=5, method="MAD", halfWindowSize=30)
peaks <- binPeaks(peaks,tolerance=0.2)
species.Ave<-factor(Newman.table$Growth.medium)
spot.factor.Avera<-factor(Newman.table$spot.a.1)
peaks <- filterPeaks(peaks, minFrequency=c(rep(1/2,2)),
labels = species.Ave,
mergeWhitelists=TRUE)
featureMatrix <- intensityMatrix(peaks, avgSpectra.Newman.HCCA)
# Binarization
Binary.analisis(feature.matrix= featureMatrix,
Peaks=peaks,
spot= spot.factor.Avera,
sensi=species.Ave,
other="",
title="NewmanData")
load("Matrix.NewmanData.rda")
# Discriminant-peaks selection
Peaks_extraction_BDA(Feature.dicho3, species.Ave, 30 ,"Sal","Sin sal","30.BDA")
load("Peak.30.BDA.rda")
| /NewmanScript.R | no_license | MarManLed/Newman | R | false | false | 4,238 | r | ### Libraries ###
library(MALDIquant)
library(MALDIquantForeign)
library(dplyr)
library(purrr)
library(tidyr)
library(binda)
library(broom)
### Custom Functions ###
Binary.analisis<-function(feature.matrix,Peaks,spot,
sensi,other,title) {
Ytrain1<-sensi
categorias<-table(Ytrain1)
total.de.NA<-sum(is.na(intensityMatrix(Peaks)))
promedio.de.NA<-mean(apply(is.na(intensityMatrix(Peaks)), 1, sum))
mz.full <- as.double(colnames(feature.matrix))
mz <- round( mz.full )
picos.ducplicados<-any(duplicated(mz))
thr <- optimizeThreshold(feature.matrix, Ytrain1)
Xtrain.b <- dichotomize(feature.matrix, thr)
colnames(Xtrain.b) <-mz
rownames(Xtrain.b) <- paste(spot,other,sensi, sep=".")
Chequeo.dicotomico<-is.binaryMatrix(Xtrain.b)
Xtrain.b.naive <- ifelse(is.na(intensityMatrix(Peaks)), as.integer(0), as.integer(1))
rownames(Xtrain.b.naive) <-paste(spot,other,sensi, sep=".")
colnames(Xtrain.b.naive) <-mz
Chequeo.binario<-is.binaryMatrix(Xtrain.b.naive)
Feature.dicho3<-Xtrain.b
Feature.bina3<-Xtrain.b.naive
save(Feature.dicho3, Feature.bina3,
file=paste("Matrix", title, "rda", sep="."))
print(categorias)
print(paste("Total NA",total.de.NA,sep=" "))
print(paste("Average NA",promedio.de.NA,sep=" "))
print(paste("Any duplicate peak",picos.ducplicados,sep=" "))
print(paste("Dicho is binary",Chequeo.dicotomico,sep=" "))
print(paste("Bina is binary",Chequeo.binario,sep=" "))
}
Peaks_extraction_BDA<-function(Matrix, Clasif, Num.peaks,
Name.Class1,Name.Class2, title) {
br <- binda.ranking(Matrix, Clasif, verbose=FALSE)
br2 <- br[1:Num.peaks, c(1:4)]
selPeaks23<-br2[,2]
Most.dif2<-tidy(selPeaks23)
BDA.Rank<-Most.dif2
names(BDA.Rank)<-c("Picos","BDA.score")
selPeaks232<-br2[,3]
Most.dif22<-tidy(selPeaks232)
names(Most.dif22)<-c("Picos",Name.Class1)
selPeaks233<-br2[,4]
Most.dif23<-tidy(selPeaks233)
names(Most.dif23)<-c("Picos",Name.Class2)
BDA.Rank.2<-BDA.Rank%>%
left_join(Most.dif22,by="Picos")%>%
left_join(Most.dif23,by="Picos")
BDA.Rank<-gather(BDA.Rank.2,Class,t.score,c(3:4))
Picos.significativos<-BDA.Rank
save(Picos.significativos,
file=paste("Peak", title, "rda", sep="."))
}
### Read Bruker data ###
so2<-importBrukerFlex("Path_to_spectra_folder",
verbose=FALSE)
# Labeling spectra
Newman.table<-read.csv2("Path_to_csv_data")
Newman.table<-Newman.table%>%
mutate(spot.a.1=paste(ID,Bacteria,Medio.Cultivo,sep="."))
names<-Newman.table$spot.a.1
repeticion<-Newman.table$n.esp
Names.Sa.S.Pocillo<- rep(names,repeticion)
Names.arr<-array(Names.Sa.S.Pocillo)
for (h in 1:length(so2)) {
metaData(so2[[h]])$spot<-Names.arr[[h]]
}
# Spectra Pre-proccessing
so5<-so2
spot.factor <- factor(sapply(so5,function(x)metaData(x)$spot))
so5 <- trim(so5)
so5 <- transformIntensity(so5, method="sqrt")
so5 <- smoothIntensity(so5, method="SavitzkyGolay",
halfWindowSize=30)
so5 <- removeBaseline(so5, method="SNIP", iterations=100)
so5 <- calibrateIntensity(so5, method="TIC")
so5 <- alignSpectra(so5, halfWindowSize=30,SNR=5,
tolerance=0.2, warpingMethod="quadratic")
avgSpectra.Newman.HCCA <-averageMassSpectra(so5, labels=spot.factor, method="sum")
peaks <- detectPeaks(avgSpectra.Newman.HCCA, SNR=5, method="MAD", halfWindowSize=30)
peaks <- binPeaks(peaks,tolerance=0.2)
species.Ave<-factor(Newman.table$Growth.medium)
spot.factor.Avera<-factor(Newman.table$spot.a.1)
peaks <- filterPeaks(peaks, minFrequency=c(rep(1/2,2)),
labels = species.Ave,
mergeWhitelists=TRUE)
featureMatrix <- intensityMatrix(peaks, avgSpectra.Newman.HCCA)
# Binarization
Binary.analisis(feature.matrix= featureMatrix,
Peaks=peaks,
spot= spot.factor.Avera,
sensi=species.Ave,
other="",
title="NewmanData")
load("Matrix.NewmanData.rda")
# Discriminant-peaks selection
Peaks_extraction_BDA(Feature.dicho3, species.Ave, 30 ,"Sal","Sin sal","30.BDA")
load("Peak.30.BDA.rda")
|
## Construct a custom theme based on supplied colors. Defaults to
## colors from RColorBrewer
custom.theme <-
function(symbol = brewer.pal(n = 8, name = "Dark2"),
fill = brewer.pal(n = 12, name = "Set3"),
region = brewer.pal(n = 11, name = "Spectral"),
reference = "#e8e8e8",
bg = "transparent",
fg = "black",
...)
{
theme <-
list(plot.polygon = list(col = fill[1], border = fg[1]),
box.rectangle = list(col= symbol[1]),
box.umbrella = list(col= symbol[1]),
dot.line = list(col = reference),
dot.symbol = list(col = symbol[1]),
plot.line = list(col = symbol[1]),
plot.symbol = list(col= symbol[1]),
regions = list(col = colorRampPalette(region)(100)),
reference.line = list(col = reference),
superpose.line = list(col = symbol),
superpose.symbol = list(col = symbol),
superpose.polygon = list(col = fill, border = fg),
background = list(col = bg),
add.line = list(col = fg),
add.text = list(col = fg),
box.dot = list(col = fg),
axis.line = list(col = fg),
axis.text = list(col = fg),
strip.border = list(col = fg),
box.3d = list(col = fg),
par.xlab.text = list(col = fg),
par.ylab.text = list(col = fg),
par.zlab.text = list(col = fg),
par.main.text = list(col = fg),
par.sub.text = list(col = fg))
modifyList(modifyList(standard.theme("pdf"), theme), simpleTheme(...))
}
custom.theme.2 <- function(...)
{
doit <-
function(symbol = brewer.pal(n = 9, name = "Set1")[c(2:1, 3:5, 7:9)], ## blue first
fill = brewer.pal(n = 8, name = "Accent"),
region = brewer.pal(n = 11, name = "RdBu"),
...)
{
custom.theme(symbol = symbol, fill = fill, region = region, ...)
}
doit(...)
}
| /latticeExtra/R/custom.theme.R | no_license | ingted/R-Examples | R | false | false | 2,231 | r |
## Construct a custom theme based on supplied colors. Defaults to
## colors from RColorBrewer
custom.theme <-
function(symbol = brewer.pal(n = 8, name = "Dark2"),
fill = brewer.pal(n = 12, name = "Set3"),
region = brewer.pal(n = 11, name = "Spectral"),
reference = "#e8e8e8",
bg = "transparent",
fg = "black",
...)
{
theme <-
list(plot.polygon = list(col = fill[1], border = fg[1]),
box.rectangle = list(col= symbol[1]),
box.umbrella = list(col= symbol[1]),
dot.line = list(col = reference),
dot.symbol = list(col = symbol[1]),
plot.line = list(col = symbol[1]),
plot.symbol = list(col= symbol[1]),
regions = list(col = colorRampPalette(region)(100)),
reference.line = list(col = reference),
superpose.line = list(col = symbol),
superpose.symbol = list(col = symbol),
superpose.polygon = list(col = fill, border = fg),
background = list(col = bg),
add.line = list(col = fg),
add.text = list(col = fg),
box.dot = list(col = fg),
axis.line = list(col = fg),
axis.text = list(col = fg),
strip.border = list(col = fg),
box.3d = list(col = fg),
par.xlab.text = list(col = fg),
par.ylab.text = list(col = fg),
par.zlab.text = list(col = fg),
par.main.text = list(col = fg),
par.sub.text = list(col = fg))
modifyList(modifyList(standard.theme("pdf"), theme), simpleTheme(...))
}
custom.theme.2 <- function(...)
{
doit <-
function(symbol = brewer.pal(n = 9, name = "Set1")[c(2:1, 3:5, 7:9)], ## blue first
fill = brewer.pal(n = 8, name = "Accent"),
region = brewer.pal(n = 11, name = "RdBu"),
...)
{
custom.theme(symbol = symbol, fill = fill, region = region, ...)
}
doit(...)
}
|
/fuzzedpackages/surveillance/man/algo.hmm.Rd | no_license | akhikolla/testpackages | R | false | false | 7,132 | rd | ||
/separate_scripts/01.glm.model.R | no_license | jjvanderwal/MQ_JCU_work | R | false | false | 9,633 | r | ||
#' An R6 object for creating municipio-level choropleths.
#'
#' @export
#' @importFrom R6 R6Class
#' @param show_states draw state borders
#' @importFrom stringr str_sub
#' @importFrom ggplot2 ggplot aes geom_polygon scale_fill_brewer ggtitle theme theme_grey element_blank geom_text coord_map
#' @importFrom ggplot2 scale_fill_continuous scale_colour_brewer ggplotGrob annotation_custom
#'
#' @examples
#' library(viridis)
#' library(scales)
#'
#' df_mxmunicipio$value <- df_mxmunicipio$indigenous / df_mxmunicipio$pop
#' gg = MXMunicipioChoropleth$new(df_mxmunicipio)
#' gg$title <- "Percentage of the population that self-identifies as indigenous"
#' gg$set_num_colors(1)
#' gg$ggplot_scale <- scale_fill_viridis("percent", labels = percent)
#' gg$render()
MXMunicipioChoropleth = R6Class("MXMunicipioChoropleth",
inherit = choroplethr:::Choropleth,
public = list(
#' @field show_states boolean, draw state borders
show_states = TRUE,
#' @description
#' Render the map of Mexico
#' @param user.df df
#' @return A new ggplot2 object with a map of Mexico.
render = function()
{
self$prepare_map()
gg <- ggplot(self$choropleth.df, aes(long, lat, group = group)) +
geom_polygon(aes(fill = value), color = "dark grey", size = 0.08) +
self$get_scale() +
self$theme_clean() +
ggtitle(self$title)
state_zoom <- unique(str_sub(private$zoom, start = 1, end = 2))
if(self$show_states) {
data(mxstate.map, package="mxmaps", envir=environment())
gg <- gg + geom_polygon(
data = subset(mxstate.map, region %in% state_zoom),
fill = "transparent",
color = "#333333",
size = .15)
}
xmin <- min(self$choropleth.df$long)
xmax <- max(self$choropleth.df$long)
ymin <- min(self$choropleth.df$lat)
ymax <- max(self$choropleth.df$lat)
xpad <- (xmax - xmin) * .05
ypad <- (ymax - ymin) * .05
return(gg + coord_map(xlim = c(xmin - xpad,
xmax + xpad),
ylim = c(ymin - ypad,
ymax + ypad)
)
)
},
#' @description
#' Initialize the map of Mexico
#' @param user.df df
#' @return A new `MXMunicipioChoropleth` object.
initialize = function(user.df)
{
#if (!requireNamespace("mxmapsData", quietly = TRUE)) {
# stop("Package mxmapsData is needed for this function to work. Please install it.", call. = FALSE)
#}
data(mxmunicipio.map, package="mxmaps", envir=environment())
super$initialize(mxmunicipio.map, user.df)
if (private$has_invalid_regions)
{
warning("Please see df_mxmunicipio for a list of mappable regions")
}
}
)
)
#' Create a municipio-level choropleth
#'
#' The map used is mxmunicipio.map. See ?mxmunicipio.map for
#' more information.
#'
#' @param df A data.frame with a column named "region" and a column named "value". Elements in
#' the "region" column must exactly match how regions are named in the "region" column in ?df_mxmunicipio
#' @param title An optional title for the map.
#' @param legend An optional name for the legend.
#' @param num_colors The number of colors to use on the map. A value of 1
#' will use a continuous scale, and a value in [2, 9] will use that many colors.
#' @param zoom An optional vector of countries to zoom in on. Elements of this vector must exactly
#' match the names of countries as they appear in the "region" column of ?country.regions
#' @param show_states Wether to draw state borders.
#' @examples
#' df <- df_mxmunicipio
#' df$value <- df$indigenous
#' mxmunicipio_choropleth(df)
#' @export
mxmunicipio_choropleth <- function(df, title="", legend="", num_colors=7, zoom=NULL,
show_states = TRUE)
{
if("region" %in% colnames(df)) {
df$region <- str_mxmunicipio(df$region)
}
if(!is.null(zoom)) {
zoom <- str_mxmunicipio(zoom)
}
c = MXMunicipioChoropleth$new(df)
c$title = title
c$legend = legend
c$set_num_colors(num_colors)
c$set_zoom(zoom)
c$show_states = show_states
c$render()
}
| /R/municipio.R | permissive | chabeliita/mxmaps | R | false | false | 5,650 | r | #' An R6 object for creating municipio-level choropleths.
#'
#' @export
#' @importFrom R6 R6Class
#' @param show_states draw state borders
#' @importFrom stringr str_sub
#' @importFrom ggplot2 ggplot aes geom_polygon scale_fill_brewer ggtitle theme theme_grey element_blank geom_text coord_map
#' @importFrom ggplot2 scale_fill_continuous scale_colour_brewer ggplotGrob annotation_custom
#'
#' @examples
#' library(viridis)
#' library(scales)
#'
#' df_mxmunicipio$value <- df_mxmunicipio$indigenous / df_mxmunicipio$pop
#' gg = MXMunicipioChoropleth$new(df_mxmunicipio)
#' gg$title <- "Percentage of the population that self-identifies as indigenous"
#' gg$set_num_colors(1)
#' gg$ggplot_scale <- scale_fill_viridis("percent", labels = percent)
#' gg$render()
MXMunicipioChoropleth = R6Class("MXMunicipioChoropleth",
inherit = choroplethr:::Choropleth,
public = list(
#' @field show_states boolean, draw state borders
show_states = TRUE,
#' @description
#' Render the map of Mexico
#' @param user.df df
#' @return A new ggplot2 object with a map of Mexico.
render = function()
{
self$prepare_map()
gg <- ggplot(self$choropleth.df, aes(long, lat, group = group)) +
geom_polygon(aes(fill = value), color = "dark grey", size = 0.08) +
self$get_scale() +
self$theme_clean() +
ggtitle(self$title)
state_zoom <- unique(str_sub(private$zoom, start = 1, end = 2))
if(self$show_states) {
data(mxstate.map, package="mxmaps", envir=environment())
gg <- gg + geom_polygon(
data = subset(mxstate.map, region %in% state_zoom),
fill = "transparent",
color = "#333333",
size = .15)
}
xmin <- min(self$choropleth.df$long)
xmax <- max(self$choropleth.df$long)
ymin <- min(self$choropleth.df$lat)
ymax <- max(self$choropleth.df$lat)
xpad <- (xmax - xmin) * .05
ypad <- (ymax - ymin) * .05
return(gg + coord_map(xlim = c(xmin - xpad,
xmax + xpad),
ylim = c(ymin - ypad,
ymax + ypad)
)
)
},
#' @description
#' Initialize the map of Mexico
#' @param user.df df
#' @return A new `MXMunicipioChoropleth` object.
initialize = function(user.df)
{
#if (!requireNamespace("mxmapsData", quietly = TRUE)) {
# stop("Package mxmapsData is needed for this function to work. Please install it.", call. = FALSE)
#}
data(mxmunicipio.map, package="mxmaps", envir=environment())
super$initialize(mxmunicipio.map, user.df)
if (private$has_invalid_regions)
{
warning("Please see df_mxmunicipio for a list of mappable regions")
}
}
)
)
#' Create a municipio-level choropleth
#'
#' The map used is mxmunicipio.map. See ?mxmunicipio.map for
#' more information.
#'
#' @param df A data.frame with a column named "region" and a column named "value". Elements in
#' the "region" column must exactly match how regions are named in the "region" column in ?df_mxmunicipio
#' @param title An optional title for the map.
#' @param legend An optional name for the legend.
#' @param num_colors The number of colors to use on the map. A value of 1
#' will use a continuous scale, and a value in [2, 9] will use that many colors.
#' @param zoom An optional vector of countries to zoom in on. Elements of this vector must exactly
#' match the names of countries as they appear in the "region" column of ?country.regions
#' @param show_states Wether to draw state borders.
#' @examples
#' df <- df_mxmunicipio
#' df$value <- df$indigenous
#' mxmunicipio_choropleth(df)
#' @export
mxmunicipio_choropleth <- function(df, title="", legend="", num_colors=7, zoom=NULL,
show_states = TRUE)
{
if("region" %in% colnames(df)) {
df$region <- str_mxmunicipio(df$region)
}
if(!is.null(zoom)) {
zoom <- str_mxmunicipio(zoom)
}
c = MXMunicipioChoropleth$new(df)
c$title = title
c$legend = legend
c$set_num_colors(num_colors)
c$set_zoom(zoom)
c$show_states = show_states
c$render()
}
|
#############################################
## Plot 1 (histogram of Global Active Power)
#############################################
##########################
## Download and read data
##########################
##setwd("~/Developer/DataScience/ExploratoryDataAnalysis")
urlPath <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
temp <- tempfile()
download.file(urlPath, destfile=temp, method="curl")
data <- read.table(unz(temp,"household_power_consumption.txt"),
header=TRUE, sep = ";", na.strings="?", stringsAsFactors=FALSE, dec=".")
unlink(temp)
##########################
## Data cleanup
##########################
data <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007",]
global_active_power <- as.numeric(as.character(data$Global_active_power))
rm(data)
##########################
## Generate plot
##########################
par(mfcol = c(1,1))
hist(global_active_power, col="red", freq = TRUE, main="Global Active Power", xlab = "Global Active Power (kilowatts)")
##########################
## Save plot
##########################
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
| /plot1.R | no_license | Stephane5/ExData_Plotting1 | R | false | false | 1,186 | r | #############################################
## Plot 1 (histogram of Global Active Power)
#############################################
##########################
## Download and read data
##########################
##setwd("~/Developer/DataScience/ExploratoryDataAnalysis")
urlPath <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
temp <- tempfile()
download.file(urlPath, destfile=temp, method="curl")
data <- read.table(unz(temp,"household_power_consumption.txt"),
header=TRUE, sep = ";", na.strings="?", stringsAsFactors=FALSE, dec=".")
unlink(temp)
##########################
## Data cleanup
##########################
data <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007",]
global_active_power <- as.numeric(as.character(data$Global_active_power))
rm(data)
##########################
## Generate plot
##########################
par(mfcol = c(1,1))
hist(global_active_power, col="red", freq = TRUE, main="Global Active Power", xlab = "Global Active Power (kilowatts)")
##########################
## Save plot
##########################
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
|
# NAVER 영화('Spiderman') 일반인 리뷰 크롤링
library(rvest)
library(stringr)
library(dplyr)
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
url_base <- 'https://movie.naver.com'
start_url <- "/movie/bi/mi/point.nhn?code=173123#tab"
url <- paste0(url_base,start_url, encoding="euc-kr")
html <- read_html(url)
html %>%
html_node('iframe.ifr') %>%
html_attr('src') -> url2
url2
page <- "&page="
score <- c()
review <- c()
writer <- c()
time <- c()
for(i in 1:250){
ifr_url
ifr_url <- paste0(url_base, url2,page,i)
html2 <- read_html(ifr_url)
html2 %>%
html_node('div.score_result') %>%
html_nodes('li') -> lis
for (li in lis) {
score <- c(score, html_node(li, '.star_score') %>% html_text('em') %>% trim())
li %>%
html_node('.score_reple') %>%
html_text('p') %>%
trim() -> tmp
idx <- str_locate(tmp, "\r")
review <- c(review, str_sub(tmp, 1, idx[1]-1))
tmp <- trim(str_sub(tmp, idx[1], -1))
idx <- str_locate(tmp, "\r")
writer <- c(writer, str_sub(tmp, 1, idx[1]-1))
tmp <- trim(str_sub(tmp, idx[1], -1))
idx <- str_locate(tmp, "\r")
time <- c(time, str_sub(tmp, 1, idx[1]-1))
#print(time)
}
url2
}
review = data.frame(score=score, review=review, writer=writer, time=time)
View(review)
class(review$score)
review$score_asnum <- as.numeric(as.character(review$score))
View(review$score_asnum)
View(review$score_asnum)
mean(review$score_asnum)
setwd('d:/workspace/R_Crawling/')
write.csv(review,"spiderman_review.csv")
| /Spiderman_Review.R | no_license | joeychoi12/R_Crawling | R | false | false | 1,538 | r | # NAVER 영화('Spiderman') 일반인 리뷰 크롤링
library(rvest)
library(stringr)
library(dplyr)
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
url_base <- 'https://movie.naver.com'
start_url <- "/movie/bi/mi/point.nhn?code=173123#tab"
url <- paste0(url_base,start_url, encoding="euc-kr")
html <- read_html(url)
html %>%
html_node('iframe.ifr') %>%
html_attr('src') -> url2
url2
page <- "&page="
score <- c()
review <- c()
writer <- c()
time <- c()
for(i in 1:250){
ifr_url
ifr_url <- paste0(url_base, url2,page,i)
html2 <- read_html(ifr_url)
html2 %>%
html_node('div.score_result') %>%
html_nodes('li') -> lis
for (li in lis) {
score <- c(score, html_node(li, '.star_score') %>% html_text('em') %>% trim())
li %>%
html_node('.score_reple') %>%
html_text('p') %>%
trim() -> tmp
idx <- str_locate(tmp, "\r")
review <- c(review, str_sub(tmp, 1, idx[1]-1))
tmp <- trim(str_sub(tmp, idx[1], -1))
idx <- str_locate(tmp, "\r")
writer <- c(writer, str_sub(tmp, 1, idx[1]-1))
tmp <- trim(str_sub(tmp, idx[1], -1))
idx <- str_locate(tmp, "\r")
time <- c(time, str_sub(tmp, 1, idx[1]-1))
#print(time)
}
url2
}
review = data.frame(score=score, review=review, writer=writer, time=time)
View(review)
class(review$score)
review$score_asnum <- as.numeric(as.character(review$score))
View(review$score_asnum)
View(review$score_asnum)
mean(review$score_asnum)
setwd('d:/workspace/R_Crawling/')
write.csv(review,"spiderman_review.csv")
|
jf.pct <- function(numeric) {
if(!is.numeric(numeric)) stop('Function requires numeric value.')
numeric %>%
multiply_by(100) %>%
round(2) %>%
format(nsmall=2) %>%
paste0('%')
} | /R/jf.pct.R | no_license | jfreels/r_jfreels | R | false | false | 191 | r | jf.pct <- function(numeric) {
if(!is.numeric(numeric)) stop('Function requires numeric value.')
numeric %>%
multiply_by(100) %>%
round(2) %>%
format(nsmall=2) %>%
paste0('%')
} |
library(linkcomm)
### Name: getNestedHierarchies
### Title: Find Nested Structures in Communities
### Aliases: getNestedHierarchies
### ** Examples
## Generate graph and extract link communities.
g <- swiss[,3:4]
lc <- getLinkCommunities(g)
## Determine if community 1 is nested in any other communities.
getNestedHierarchies(lc, clusid = 1)
| /data/genthat_extracted_code/linkcomm/examples/getNestedHierarchies.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 350 | r | library(linkcomm)
### Name: getNestedHierarchies
### Title: Find Nested Structures in Communities
### Aliases: getNestedHierarchies
### ** Examples
## Generate graph and extract link communities.
g <- swiss[,3:4]
lc <- getLinkCommunities(g)
## Determine if community 1 is nested in any other communities.
getNestedHierarchies(lc, clusid = 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.