content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
berechne<- function(coocTerm, numberOfCoocs, binDTM){
coocs <- calculateCoocStatistics(coocTerm, binDTM, measure="LOGLIK")
# Display the numberOfCoocs main terms
print(coocs[1:numberOfCoocs])
resultGraph <- data.frame(from = character(), to = character(), sig = numeric(0))
# The structure of the temporary graph object is equal to that of the resultGraph
tmpGraph <- data.frame(from = character(), to = character(), sig = numeric(0))
# Fill the data.frame to produce the correct number of lines
tmpGraph[1:numberOfCoocs, 3] <- coocs[1:numberOfCoocs]
# Entry of the search word into the first column in all lines
tmpGraph[, 1] <- coocTerm
# Entry of the co-occurrences into the second column of the respective line
tmpGraph[, 2] <- names(coocs)[1:numberOfCoocs]
# Set the significances
tmpGraph[, 3] <- coocs[1:numberOfCoocs]
# Attach the triples to resultGraph
resultGraph <- rbind(resultGraph, tmpGraph)
# Iteration over the most significant numberOfCoocs co-occurrences of the search term
for (i in 1:numberOfCoocs){
# Calling up the co-occurrence calculation for term i from the search words co-occurrences
newCoocTerm <- names(coocs)[i]
coocs2 <- calculateCoocStatistics(newCoocTerm, binDTM, measure="LOGLIK")
#print the co-occurrences
coocs2[1:10]
# Structure of the temporary graph object
tmpGraph <- data.frame(from = character(), to = character(), sig = numeric(0))
tmpGraph[1:numberOfCoocs, 3] <- coocs2[1:numberOfCoocs]
tmpGraph[, 1] <- newCoocTerm
tmpGraph[, 2] <- names(coocs2)[1:numberOfCoocs]
tmpGraph[, 3] <- coocs2[1:numberOfCoocs]
#Append the result to the result graph
resultGraph <- rbind(resultGraph, tmpGraph[2:length(tmpGraph[, 1]), ])
}
# Sample of some examples from resultGraph
resultGraph[sample(nrow(resultGraph), 6), ]
require(igraph)
# Set the graph and type
graphNetwork <- graph.data.frame(resultGraph, directed = F)
# Identification of all nodes with less than 2 edges
graphVs <- V(graphNetwork)[degree(graphNetwork) < 2]
# These edges are removed from the graph
graphNetwork <- delete.vertices(graphNetwork, graphVs)
# Assign colors to edges and nodes (searchterm blue, rest orange)
V(graphNetwork)$color <- ifelse(V(graphNetwork)$name == coocTerm, 'cornflowerblue', 'orange')
# Edges with a significance of at least 50% of the maximum sig- nificance in the graph are drawn in orange
halfMaxSig <- max(E(graphNetwork)$sig) * 0.5
E(graphNetwork)$color <- ifelse(E(graphNetwork)$sig > halfMaxSig, "coral", "azure3")
# Disable edges with radius
E(graphNetwork)$curved <- 0
# Size the nodes by their degree of networking
V(graphNetwork)$size <- log(degree(graphNetwork)) * 5
# All nodes must be assigned a standard minimum-size
V(graphNetwork)$size[V(graphNetwork)$size < 5] <- 3
# edge thickness
E(graphNetwork)$width <- 2
# Define the frame and spacing for the plot
par(mai=c(0,0,1,0))
library(visNetwork)
library(geomnet)
data <- toVisNetworkData(graphNetwork)
visNetwork(nodes = data$nodes, edges = data$edges, physics = TRUE, height = "1000px", width = "1000px")%>%
visPhysics(stabilization = TRUE, barnesHut = list(avoidOverlap = 1))%>%
visEdges(smooth = FALSE)
}
|
/berechne.R
|
no_license
|
tkatzer/blackrockTextAnalytics
|
R
| false
| false
| 3,342
|
r
|
berechne<- function(coocTerm, numberOfCoocs, binDTM){
coocs <- calculateCoocStatistics(coocTerm, binDTM, measure="LOGLIK")
# Display the numberOfCoocs main terms
print(coocs[1:numberOfCoocs])
resultGraph <- data.frame(from = character(), to = character(), sig = numeric(0))
# The structure of the temporary graph object is equal to that of the resultGraph
tmpGraph <- data.frame(from = character(), to = character(), sig = numeric(0))
# Fill the data.frame to produce the correct number of lines
tmpGraph[1:numberOfCoocs, 3] <- coocs[1:numberOfCoocs]
# Entry of the search word into the first column in all lines
tmpGraph[, 1] <- coocTerm
# Entry of the co-occurrences into the second column of the respective line
tmpGraph[, 2] <- names(coocs)[1:numberOfCoocs]
# Set the significances
tmpGraph[, 3] <- coocs[1:numberOfCoocs]
# Attach the triples to resultGraph
resultGraph <- rbind(resultGraph, tmpGraph)
# Iteration over the most significant numberOfCoocs co-occurrences of the search term
for (i in 1:numberOfCoocs){
# Calling up the co-occurrence calculation for term i from the search words co-occurrences
newCoocTerm <- names(coocs)[i]
coocs2 <- calculateCoocStatistics(newCoocTerm, binDTM, measure="LOGLIK")
#print the co-occurrences
coocs2[1:10]
# Structure of the temporary graph object
tmpGraph <- data.frame(from = character(), to = character(), sig = numeric(0))
tmpGraph[1:numberOfCoocs, 3] <- coocs2[1:numberOfCoocs]
tmpGraph[, 1] <- newCoocTerm
tmpGraph[, 2] <- names(coocs2)[1:numberOfCoocs]
tmpGraph[, 3] <- coocs2[1:numberOfCoocs]
#Append the result to the result graph
resultGraph <- rbind(resultGraph, tmpGraph[2:length(tmpGraph[, 1]), ])
}
# Sample of some examples from resultGraph
resultGraph[sample(nrow(resultGraph), 6), ]
require(igraph)
# Set the graph and type
graphNetwork <- graph.data.frame(resultGraph, directed = F)
# Identification of all nodes with less than 2 edges
graphVs <- V(graphNetwork)[degree(graphNetwork) < 2]
# These edges are removed from the graph
graphNetwork <- delete.vertices(graphNetwork, graphVs)
# Assign colors to edges and nodes (searchterm blue, rest orange)
V(graphNetwork)$color <- ifelse(V(graphNetwork)$name == coocTerm, 'cornflowerblue', 'orange')
# Edges with a significance of at least 50% of the maximum sig- nificance in the graph are drawn in orange
halfMaxSig <- max(E(graphNetwork)$sig) * 0.5
E(graphNetwork)$color <- ifelse(E(graphNetwork)$sig > halfMaxSig, "coral", "azure3")
# Disable edges with radius
E(graphNetwork)$curved <- 0
# Size the nodes by their degree of networking
V(graphNetwork)$size <- log(degree(graphNetwork)) * 5
# All nodes must be assigned a standard minimum-size
V(graphNetwork)$size[V(graphNetwork)$size < 5] <- 3
# edge thickness
E(graphNetwork)$width <- 2
# Define the frame and spacing for the plot
par(mai=c(0,0,1,0))
library(visNetwork)
library(geomnet)
data <- toVisNetworkData(graphNetwork)
visNetwork(nodes = data$nodes, edges = data$edges, physics = TRUE, height = "1000px", width = "1000px")%>%
visPhysics(stabilization = TRUE, barnesHut = list(avoidOverlap = 1))%>%
visEdges(smooth = FALSE)
}
|
suppressMessages(library(CCELIM))
options(echo=TRUE) # if you want see commands in output file
args <- commandArgs(trailingOnly = TRUE)
source('settings.r')
name = paste('EXP001-', args[1], sep='')
cycle = as.integer(args[1])
jmp = jmp[cycle]
model = ReadModel("./input/Model.xls", "./input/Constraints.xls", model.name="Subduction", constraint.name = 'Data001', cycle, 141, 24, 18, 138)
model$G[model$G == 24.24] = -resp.f[cycle]
model$G[model$G == 27.27] = -resp.f[cycle]
SaveModel(model, name)
res = RunModel(model, iter = iter, out.length = out.length, burn.length = burn.length, jmp = jmp)
SaveSolution(res, name)
notify(name)
|
/EXP001.R
|
permissive
|
tbrycekelly/Inverse_DVM
|
R
| false
| false
| 656
|
r
|
suppressMessages(library(CCELIM))
options(echo=TRUE) # if you want see commands in output file
args <- commandArgs(trailingOnly = TRUE)
source('settings.r')
name = paste('EXP001-', args[1], sep='')
cycle = as.integer(args[1])
jmp = jmp[cycle]
model = ReadModel("./input/Model.xls", "./input/Constraints.xls", model.name="Subduction", constraint.name = 'Data001', cycle, 141, 24, 18, 138)
model$G[model$G == 24.24] = -resp.f[cycle]
model$G[model$G == 27.27] = -resp.f[cycle]
SaveModel(model, name)
res = RunModel(model, iter = iter, out.length = out.length, burn.length = burn.length, jmp = jmp)
SaveSolution(res, name)
notify(name)
|
# Gnome R Data Miner: GNOME interface to R for Data Mining
#
# Time-stamp: <2015-11-15 09:02:15 gjw>
#
# DATA TAB
#
# Copyright (c) 2009 Togaware Pty Ltd
#
# This file is part of Rattle.
#
# Rattle is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Rattle is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rattle. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
#
# I've removed the Data Entry radio button because why, really, would
# anyone be interested in manually entering some data - use Gnumeric
# or some other spreadsheet to do that.
#
########################################################################
# TODO
#
# 100308 Consider using vcdExtras for displaying categoric data.
########################################################################
# UTILITIES
overwriteModel <- function()
{
# 080523 If a model exists then warn the user about losing the model
# on loading a new dataset. Perhaps this could be generalised to any
# kind of opration that replaces the current model.
if (not.null(listBuiltModels()))
return(questionDialog(Rtxt("You have chosen to load/reload the dataset.",
"This will clear the current project",
"(dataset and models).",
"If you choose not to continue",
"you can then save the current project before",
"loading the dataset.",
"\n\nDo you wish to continue and so overwrite",
"the current project?")))
else
return(TRUE)
}
dataTabShow <- function(...)
{
# A support function to display the indicated widgets and hide all
# others, within the data tab. When new widgets are added to the tab
# through editting the XML file with glade, be sure to add it to the
# list of known widgets here.
widgets <- c(...)
known <- c("data_filename_label",
"data_filechooserbutton",
"data_separator_label",
"data_separator_entry",
"data_decimal_label",
"data_decimal_entry",
"data_header_checkbutton",
"data_name_label",
"data_name_combobox",
"data_odbc_dsn_label",
"data_odbc_dsn_entry",
"data_odbc_table_label",
"data_odbc_table_combobox",
"data_odbc_limit_label",
"data_odbc_limit_spinbutton",
"data_odbc_believeNRows_checkbutton")
for (w in widgets) theWidget(w)$show()
for (w in setdiff(known, widgets)) theWidget(w)$hide()
}
showDataViewButtons <- function(action=TRUE)
{
# Rattle starts up with the View (081228 but not now the Edit)
# buttons of the Data tab not sensitive. Once data has been loaded
# we make these tabs sensitive. The ACTION option allows for the
# case where we might want to make them not sensitive. This option
# (action=FALSE) is not currently used but cold be in the future,
# probably when we click New project.
if (! is.logical(action)) warning(Rtxt("action must be a logical"))
theWidget("data_view_button")$setSensitive(action)
theWidget("data_edit_button")$setSensitive(action)
}
urlModTime <- function(filename)
{
# Return the modification time of the file. Strip out any "file://"
# prefix to the filename. We note that this will not work for
# http:// urls.
return(file.info(gsub("file:///", "/", filename))$mtime)
}
dataNeedsLoading <- function()
{
# 080520 Determine whether any of the data source aspects of the
# Data tab have changed. This is probably limited to checking things
# relevant to the currently selected data source radio button.
# 080712 If there is no dataname stored, then don't bother testing
# any other conditions. The dataset should be loaded. 090315 Never
# reload unless there is nothing loaded - that won't work when user
# changes Filename we want to load.
if (is.null(crs$dataname)) return(TRUE)
# 080712 Check what data source is active, and act
# appropriately. For those I have yet to work on, simply return TRUE
# so that at least the data always gets loaded. But this does then
# wipe out any changes the user makes to selections.
if (theWidget("data_csv_radiobutton")$getActive() ||
theWidget("data_arff_radiobutton")$getActive())
{
# 100409 Do the URLdecode here, then encode as UTF-8. Previously
# no UTF-8 and the URLdecode was done 5 separate times below. The
# mtime below did not URLdecode, but do so now, and make sure it
# still works. Seems okay.
filename <- theWidget("data_filechooserbutton")$getUri()
if (is.null(filename)) return(TRUE)
filename <- URLdecode(filename)
Encoding(filename) <- "UTF-8"
if (is.null(crs$dwd)) return(TRUE)
if (isWindows())
{
# MS/Windows is not case sensitive.
if (tolower(basename(filename))
!= tolower(crs$dataname) ||
tolower(dirname(filename)) != tolower(crs$dwd))
return(TRUE)
}
else
{
if (basename(filename) != crs$dataname ||
dirname(filename) != crs$dwd)
return(TRUE)
}
# 080606 TODO Test if file date has changed, and if so, return
# TRUE. Note that file.info does not handle URLs so have to
# specially handle this. Note that under MS/Windows this returns
# NA so we don't get a chance to notice updated files.
now.mtime <- urlModTime(filename)
if (not.null(crs$mtime) && not.null(now.mtime) && now.mtime > crs$mtime)
return(TRUE)
}
if (theWidget("data_rdataset_radiobutton")$getActive())
{
dataname <- theWidget("data_name_combobox")$getActiveText()
if (is.null(dataname) || crs$dataname != dataname)
return(TRUE)
}
if (theWidget("data_library_radiobutton")$getActive())
{
dataname <- theWidget("data_name_combobox")$getActiveText()
if (is.null(crs$datapkg) || is.null(dataname))
return(TRUE)
adsname <- gsub('([^ :]*).*$', '\\1', unlist(strsplit(dataname, ":"))[1])
dspkg <- unlist(strsplit(dataname, ":"))[2]
if (crs$dataname != adsname
|| crs$datapkg != dspkg)
return(TRUE)
}
if (theWidget("data_rdata_radiobutton")$getActive())
{
dataname <- theWidget("data_name_combobox")$getActiveText()
if (is.null(dataname) || crs$dataname != dataname) return(TRUE)
}
if (theWidget("data_odbc_radiobutton")$getActive())
{
table <- theWidget("data_odbc_table_combobox")$getActiveText()
if (is.null(table) || crs$dataname != table) return(TRUE)
}
if (theWidget("data_corpus_radiobutton")$getActive())
{
filename <- theWidget("data_corpus_location_filechooserbutton")$getUri()
if (is.null(filename)) return(TRUE)
return(TRUE) # Always reload for now.
}
if (theWidget("data_script_radiobutton")$getActive())
{
return(TRUE)
}
# Return FALSE if we did not detect any changes.
return(FALSE)
}
updateFilenameFilters <- function(button, fname)
{
# Add the filters appropriate to the filter name (fname) supplied.
if (is.character(button)) button <- theWidget(button)
filters <- button$listFilters()
if (fname == "CSV")
{
if (! (length(filters) && filters[[1]]$getName() == Rtxt("CSV Files")))
{
lapply(filters, function(x) button$removeFilter(x))
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("CSV Files"))
ff$addPattern("*.csv")
button$addFilter(ff)
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("TXT Files"))
ff$addPattern("*.txt")
button$addFilter(ff)
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("Excel Files"))
ff$addPattern("*.xls")
button$addFilter(ff)
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("Excel 2007 Files"))
ff$addPattern("*.xlsx")
button$addFilter(ff)
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("All Files"))
ff$addPattern("*")
button$addFilter(ff)
}
}
else if (fname == "ARFF")
{
if (! (length(filters) && filters[[1]]$getName() == Rtxt("ARFF Files")))
{
lapply(filters, function(x) button$removeFilter(x))
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("ARFF Files"))
ff$addPattern("*.arff")
button$addFilter(ff)
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("All Files"))
ff$addPattern("*")
button$addFilter(ff)
}
}
else if (fname == "Rdata")
{
if (! (length(filters) && filters[[1]]$getName() == Rtxt("Rdata Files")))
{
lapply(filters, function(x) button$removeFilter(x))
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("Rdata Files"))
ff$addPattern("*.R[Dd]ata")
button$addFilter(ff)
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("All Files"))
ff$addPattern("*")
button$addFilter(ff)
}
}
# Kick the GTK event loop otherwise you end up waiting until the
# mouse is moved, for example.
while (RGtk2::gtkEventsPending()) RGtk2::gtkMainIterationDo(blocking=FALSE)
}
newSampling <- function()
{
return(crv$appname != "RStat")
}
validateSampleEntry <- function()
{
sampling <- parseSampleEntry()
result <- TRUE
if (sampling[1] == 0)
{
errorDialog(Rtxt("A training set partition of 0 does not make sense.",
"\n\nPlease choose a non-zero, positive percentage, up to 100."))
result <- FALSE
}
else if (any(sampling < 0))
{
errorDialog(Rtxt("A percentage of less than 0 for the partition",
"does not make sense.",
"\n\nPlease choose percentages in the range 0-100."))
result <- FALSE
}
else if (sum(sampling) != 100)
{
errorDialog(sprintf(Rtxt("The sum of the partition proportions does not add",
"to 100 (percent): %d + %d + %d = %d.",
"\n\nPlease rectify."),
sampling[1], sampling[2], sampling[3], sum(sampling)))
result <- FALSE
}
return(result)
}
parseSampleEntry <- function()
{
ptext <- theWidget("data_sample_entry")$getText()
splitter <- function(x) as.integer(strsplit(x, "/")[[1]])
if (! nchar(ptext))
partition <- splitter(crv$default.sample)
else
partition <- splitter(ptext)
if (length(partition) == 1)
partition <- c(partition, 0, 100-partition)
else if (length(partition) == 2)
partition <- c(partition[1], 100-sum(partition), partition[2])
return(partition)
}
getTrainingPercent <- function()
{
return(parseSampleEntry()[1])
}
#-----------------------------------------------------------------------
# These are for handling protos (or envs for now). Moved into package
# container.
whichNumerics <- function(data)
{
names(data)[sapply(data, is.numeric)]
}
setupDataset <- function(env, seed=NULL)
{
# We assume the following dataset specific variables exist in env
# data This is the actual data frame containing the dataset
# target The single target variable for prediction
# [risk] The single risk variable
# [inputs] The other variables used as inputs to predictive model
# [ignore] This overrides inputs if it is given.
# Then we add the following variables to env
# vars Variables used for modelling
# numerics The numeric vars within inputs
# nobs The number of observations
# ninputs The number of input variables
# form Formula for building models
# train A 70% training dataset
if (! is.null(seed)) set.seed(seed)
evalq({
if (! exists("risk", inherits=FALSE))
risk <- NULL
if (exists("ignore", inherits=FALSE) && ! exists("inputs", inherits=FALSE))
inputs <- setdiff(names(data), c(target, risk, ignore))
if (! exists("inputs", inherits=FALSE))
inputs <- setdiff(names(data), c(target, risk))
vars <- c(inputs, target)
ninputs <- length(inputs)
nobs <- nrow(data)
numerics <- whichNumerics(data[inputs])
form <- as.formula(paste(target, "~ ."))
train <- sample(nobs, 0.7*nobs)
test <- setdiff(1:nobs, train)
na.obs <- attr(na.omit(data[vars]), "na.action")
train.na.omit <- setdiff(train, na.obs)
test.na.omit <- setdiff(test, na.obs)
time.stamp <- date()
}, env)
}
########################################################################
# CALLBACKS
on_data_csv_radiobutton_toggled <- function(button)
{
if (button$getActive())
{
dataTabShow("data_filename_label",
"data_filechooserbutton",
"data_separator_label",
"data_separator_entry",
"data_decimal_label",
"data_decimal_entry",
"data_header_checkbutton")
updateFilenameFilters("data_filechooserbutton", "CSV")
if (not.null(crs$data.tab.csv.filename))
theWidget("data_filechooserbutton")$setUri(crs$data.tab.csv.filename)
}
else
{
crs$data.tab.csv.filename <- theWidget("data_filechooserbutton")$getUri()
}
}
on_data_arff_radiobutton_toggled <- function(button)
{
if (button$getActive())
{
dataTabShow("data_filename_label",
"data_filechooserbutton")
updateFilenameFilters("data_filechooserbutton", "ARFF")
if (not.null(crs$data.tab.arff.filename))
theWidget("data_filechooserbutton")$setUri(crs$data.tab.arff.filename)
}
else
{
crs$data.tab.arff.filename <- theWidget("data_filechooserbutton")$getUri()
}
}
on_data_rdata_radiobutton_toggled <- function(button)
{
if (button$getActive())
{
dataTabShow("data_filename_label",
"data_filechooserbutton",
"data_name_label",
"data_name_combobox")
updateFilenameFilters("data_filechooserbutton", "Rdata")
cbox <- theWidget("data_name_combobox")
cbox$getModel()$clear()
if (not.null(crs$data.tab.rdata.filename))
theWidget("data_filechooserbutton")$setUri(crs$data.tab.rdata.filename)
if (not.null(crs$data.tab.rdata.active))
{
theWidget("data_name_combobox")$setActive(crs$data.tab.rdata.active)
}
}
else
{
crs$data.tab.rdata.filename <- theWidget("data_filechooserbutton")$getUri()
crs$data.tab.rdata.active <- theWidget("data_name_combobox")$getActive()
}
}
on_data_rdataset_radiobutton_toggled <- function(button)
{
if (button$getActive())
{
dataTabShow("data_name_label", "data_name_combobox")
updateRDatasets(current=crs$data.tab.rdataset.name)
}
else
{
crs$data.tab.rdataset.name <- theWidget("data_name_combobox")$getActiveText()
}
}
on_data_corpus_radiobutton_toggled <- function(button)
{
if (button$getActive())
{
crv$DATA.NOTEBOOK$setCurrentPage(crv$DATA.CORPUS.TAB)
}
else
{
crv$DATA.NOTEBOOK$setCurrentPage(crv$DATA.CSV.TAB)
}
}
# 080907 Trying to get an event that will auto update the combobox
# without having to move to another radio button and then back again.
on_data_name_combobox_button_press_event <- function(button)
{
print("Button Press")
updateRDatasets()
}
on_data_name_combobox_enter_notify_event <- function(button)
{
print("Enter Notify")
updateRDatasets()
}
on_data_name_combobox_focus <- function(button)
{
print("Focus")
updateRDatasets()
}
on_data_name_combobox_set_focus_child<- function(direction, data)
{
print("Focus Child")
#print(direction)
print(data)
#updateRDatasets()
}
on_data_name_combobox_focus_in_event<- function(direction, data)
{
print("Focus In")
#print(direction)
#updateRDatasets()
}
#
on_data_library_radiobutton_toggled <- function(button)
{
if (button$getActive())
{
dataTabShow("data_name_label", "data_name_combobox")
updateDataLibrary(crs$data.tab.library.name)
}
else
{
crs$data.tab.library.name <- theWidget("data_name_combobox")$getActiveText()
}
}
on_data_odbc_radiobutton_toggled <- function(button)
{
if (button$getActive())
dataTabShow("data_odbc_dsn_label",
"data_odbc_dsn_entry",
"data_odbc_table_label",
"data_odbc_table_combobox",
"data_odbc_limit_label",
"data_odbc_limit_spinbutton",
"data_odbc_believeNRows_checkbutton")
}
updateRDatasets <- function(current=NULL, cbox.name="data_name_combobox")
{
# Update a combo box with just the available data frames and matrices.
set.cursor("watch", Rtxt("Determining the available datasets...."))
# 130126 We might be able to use get.objects("data.frame") here?
dl <- unlist(sapply(ls(sys.frame(0)),
function(x)
{
cmd <- sprintf(paste("is.data.frame(%s) ||",
'inherits(%s,',
'"sqlite.data.frame")'), x, x)
var <- try(ifelse(eval(parse(text=cmd), sys.frame(0)),
x, NULL), silent=TRUE)
if (inherits(var, "try-error"))
var <- NULL
return(var)
}))
cbox <- theWidget(cbox.name)
cbox$getModel()$clear()
if (not.null(dl))
{
lapply(dl, cbox$appendText)
# Set the selection to that which was is supplied.
if (not.null(current) && current %in% dl)
cbox$setActive(which(sapply(dl, function(x) x==current))[1]-1)
}
set.cursor(message=Rtxt("Data Names updated."))
}
on_data_target_survival_radiobutton_toggled <- function(button)
{
# 091206 When the Survival radio button is toggled, change the names
# of the Target/Risk columns to match the paradigm.
target <- theWidget("select_treeview")$getColumn(crv$COLUMN["target"])
risk <- theWidget("select_treeview")$getColumn(crv$COLUMN["risk"])
if (button$getActive())
{
target$setTitle(Rtxt("Time"))
risk$setTitle(Rtxt("Status"))
}
else
{
target$setTitle(Rtxt("Target"))
risk$setTitle(Rtxt("Risk"))
}
}
########################################################################
# EXECUTE
executeDataTab <- function(csvname=NULL)
{
# Dispatch to the task indicated by the selected radio button within
# the Data tab. 090315 Previously I tested if there is was a change
# to the data source (with dataNeedsLoading) but this continually
# got complicated between different OS and different data sources,
# etc. So now we never reload a dataset, unless no dataset is
# loaded. To load a new dataset, click New project first. Unless the
# data type label is not sensitive (i.e., we have loaded a project),
# simply update the variable roles without reloading the data.
# if (not.null(csvname))
# {
# if (! executeDataCSV(csvname)) return(FALSE)
# }
if (theWidget("data_type_label")$isSensitive() && dataNeedsLoading())
{
if (theWidget("data_csv_radiobutton")$getActive())
{
if (! executeDataCSV(csvname)) return(FALSE)
}
else if (theWidget("data_arff_radiobutton")$getActive())
{
if (! executeDataARFF()) return(FALSE)
}
else if (theWidget("data_odbc_radiobutton")$getActive())
{
if (! executeDataODBC()) return(FALSE)
}
else if (theWidget("data_rdata_radiobutton")$getActive())
{
if (! executeDataRdata()) return()
}
else if (theWidget("data_rdataset_radiobutton")$getActive())
{
if (! executeDataRdataset()) return()
}
else if (theWidget("data_library_radiobutton")$getActive())
{
if (! executeDataLibrary()) return()
}
else if (theWidget("data_corpus_radiobutton")$getActive())
{
if (! executeDataCorpus()) return()
}
else if (theWidget("data_script_radiobutton")$getActive())
{
if (! executeDataScript()) return()
}
else
return()
# Update the select treeview. This is done on a Data execute only
# when a new dataset has been loaded. If the user has simply
# changed some of the roles or the sampling then we do not do a
# reset, just an update.
createVariablesModel(colnames(crs$dataset))
# Whether we have changed the dataset or not we need to generate the
# sample and then record the variable roles.
# Turn sampling on, set range bounds and generate the default 70%
# sample. Do the range bounds first since otherwise the value gets
# set back to 1. Also, need to set both the percentage and the count
# since if the old percentage is 70 and the new is 70, then no
# change in value is noticed, and thus the count is not
# automatically updated.
# 090315 Sampling should be on by default. I had a test here
# "!is.null(RATTLE.SCORE.IN)" which, after cleaning up the
# handling of global variables, is now FALSE, whereas previously
# it must have been TRUE. Simply set to TRUE here until we find
# why that was being done. Might need another crv tuning
# parameter.
theWidget("data_sample_checkbutton")$setActive(TRUE)
# 090513 Reset the default sample size percentage and ensure it
# holds (hence we need more than just setting the percentage spin
# button.
nrows <- nrow(crs$dataset)
per <- crv$default.train.percentage
srows <- round(nrows * per / 100)
theWidget("sample_count_spinbutton")$setRange(1,nrows)
theWidget("sample_count_spinbutton")$setValue(srows)
theWidget("sample_percentage_spinbutton")$setValue(per)
theWidget("data_sample_entry")$setText(crv$default.sample)
}
else
resetRattle(new.dataset=FALSE)
# 090416 Move the following from the above if branch to here. Reset
# the sampling options here, except for whether sampling is
# on/off. Thus, on loading a new dataset, sampling is set on
# above. But if we modify the dataset external to Rattle, we want to
# set new parameters here, yet leave the sampling checkbutton as it
# was. The extra settings here are often redundant, but needed for
# the "modified in R" case. 090513 Though now that I have this code
# both here and above, we might need to revist the logic!
#
# We set range bounds and generate the default 70% sample. Do the
# range bounds first since otherwise the value gets set back to
# 1. Also, need to set both the percentage and the count since if
# the old percentage is 70 and the new is 70, then no change in
# value is noticed, and thus the count is not automatically updated,
# even if the number of rows has been changed.
nrows <- nrow(crs$dataset)
# 090513 Remove the resetting of the sample size to 70 from here,
# but get the current value. Otherwise, the sample size is always
# reset to 70 on each Execute of the Data tab - not desired. Now
# need to only reset it to 70 on loading a new dataset.
if (newSampling())
per <- getTrainingPercent()
else
per <- theWidget("sample_percentage_spinbutton")$getValue()
srows <- round(nrows * per / 100)
theWidget("sample_count_spinbutton")$setRange(1,nrows)
theWidget("sample_count_spinbutton")$setValue(srows)
theWidget("sample_percentage_spinbutton")$setValue(per)
crv$DATA.DISPLAY.NOTEBOOK$setCurrentPage(crv$DATA.DISPLAY.TREEVIEW.TAB)
# else
# {
# resetRattle(new.dataset=FALSE)
#
# if (dataNeedsLoading())
# {
#
# # Just duplicate above for now to get this working.
# createVariablesModel(colnames(crs$dataset)) # BUT THIS REVERTS TO DEFAULTS
# nrows <- nrow(crs$dataset)
# per <- 70
# srows <- round(nrows * per / 100)
# theWidget("data_sample_checkbutton")$setActive(not.null(RATTLE.SCORE.IN))
# theWidget("sample_count_spinbutton")$setRange(1,nrows)
# theWidget("sample_count_spinbutton")$setValue(srows)
# theWidget("sample_percentage_spinbutton")$setValue(per)
# }
#
# }
# TODO 080520 Change the name to updateSample.
## 080603 NOT NEEDED AS DONE IN executeSelectTab
## executeSelectSample()
# Execute the SELECT tab. Changes have bene made and we need to
# ensure the cached role variables are updated, or else we might see
# unexpected warnings about changes having been made but not
# EXECTUEd. [071125]
if (theWidget("data_sample_checkbutton")$getActive() &&
! validateSampleEntry()) return(FALSE)
# TODO 080520 Change the name to updateRoles.
setGuiDefaultsSurvival() # 100505 Moved here from below
executeSelectTab()
resetTestTab()
resetExploreTab()
# 100505 Move to before executeSelectTab, ohterwise the labels get set
# back to stating no variables selected.
# setGuiDefaultsSurvival()
# Set the risk label appropriately.
theWidget("evaluate_risk_label")$setText(crs$risk)
# Enable the Data View and Edit buttons.
showDataViewButtons()
return()
}
#-----------------------------------------------------------------------
# EXECUTE DATA CSV
executeDataCSV <- function(filename=NULL)
{
# Either a filename is supplied in the function call or a filename
# is expected to be available in the data_filechooserbutton. This
# could be either a CSV or TXT file. If no filename is supplied,
# then give the user the option to load a sample dataset (for now,
# the weather dataset).
supplied <- filename
# Begin by collecting the relevant data from the interface. 080511
# The file chooser button has a getFilename to retrieve the
# filename. The getUri also retrieves the file name, but as a
# URL. So we use this, since R can handle the
# "file:///home/kayon/audit.csv" just fine. Thus I have now allowed
# the filechooser button to accept non-local files (i.e.,
# URLs). Unfortunately I can't yet get the basename of the URL to be
# displayed in the button text. 080512 The URLdecode will replace
# the %3F with "?" and %3D with "=", etc, as is required for using
# this with the read.csv function.
if (is.null(filename))
filename <- theWidget("data_filechooserbutton")$getUri()
# If no filename has been supplied give the user the option to use
# the Rattle supplied sample dataset.
use.sample.dataset <- FALSE
if (not.null(supplied))
{
# 090314 Trying to get the scenario of a supplied filename
# working, so that it is displayed in the Filename box and
# dataNeedsLoading does not think a new file needs loading on the
# next Execute.
if (substr(filename, 1, 5) != "file:")
{
if (substr(filename, 1, 1) == "/")
filename <- paste("file://", filename, sep="")
else
filename <- paste("file:///", filename, sep="")
}
# 090314 Added to ensure we get the filename listed properly. This
# seems to be relevant only if a filename was supplied (it is also
# done below for the case when the rattle supplied dataset is
# laoded. Perhaps this should be done up there?
theWidget("data_filechooserbutton")$setUri(filename)
# 090314 Do this because it was done below.
while (RGtk2::gtkEventsPending()) RGtk2::gtkMainIterationDo(blocking=FALSE)
}
else if (is.null(filename))
{
if (! questionDialog(sprintf(Rtxt("No CSV filename has been provided.",
"\n\nWe require a dataset to be loaded.",
"\n\nWould you like to use the example",
"%s dataset?"),
Rtxt(crv$sample.dataset))))
# If no filename is given and the user decides not to go with
# the sample dataset then return without doing anything.
return(FALSE)
else
{
# 080515 Use the Rattle provided sample dataset.
use.sample.dataset <- TRUE
filename <- system.file("csv", paste(crv$sample.dataset, ".csv", sep=""),
package="rattle")
theWidget("data_filechooserbutton")$setFilename(filename)
# 130825 This does not get reflected in the GUI? Can't work out
# how to make it so. For now it stays as None.
# Make sure we end up with a URI since a URI is otherwise used
# when retrieving the information from the filechooserbutton
# widget. If we don't do this then the crs$dwd does not include
# the "file://" bit, and thus dataNeedsLoading returns TRUE the
# next time, which is not right! 090214 This does not work for
# MS/Windows. The filename is something like "C:/..." and this
# ends up adding "file://" but it should be "file:///". So check
# for this.
if (substr(filename, 1, 1) == "/")
filename <- paste("file://", filename, sep="")
else
filename <- paste("file:///", filename, sep="")
# 080713 We still need the events flush with tootiphack set
# since otherwise we have to lose focus before the screen gets
# updated.
while (RGtk2::gtkEventsPending()) RGtk2::gtkMainIterationDo(blocking=FALSE)
#gtkmainquit_handler(NULL, NULL)
#gtkmain_handler(NULL, NULL)
}
}
else
{
filename <- URLdecode(filename)
Encoding(filename) <- "UTF-8" # 100408 Japanese otherwise dirname fails. Try for all.
}
crs$dwd <- dirname(filename)
crs$mtime <- urlModTime(filename)
# If there is a model warn about losing it.
if (! overwriteModel()) return(FALSE)
# Fix filename for MS - otherwise eval/parse strip the \\.
if (isWindows()) filename <- gsub("\\\\", "/", filename)
# Get the separator and decimal to use.
sep = theWidget("data_separator_entry")$getText()
if (sep != ",")
sep <- sprintf(', sep="%s"', sep)
else
sep <- ""
dec = theWidget("data_decimal_entry")$getText()
if (dec != ".")
dec <- sprintf(', dec="%s"', dec)
else
dec <- ""
# Check whether we expect a header or not.
if (theWidget("data_header_checkbutton")$getActive())
hdr <- ""
else
hdr <- ", header=FALSE"
nastring <- ', na.strings=c(".", "NA", "", "?")'
stripwhite <- ', strip.white=TRUE'
# Generate commands to read the data. 091130 Add encoding to use the
# configured encoding.
if (use.sample.dataset)
read.cmd <- sprintf(paste('crs$dataset <-',
'read.csv(system.file("csv",',
'"%s.csv", package="rattle"),',
'encoding="%s")'),
crv$sample.dataset, crv$csv.encoding)
else if (tolower(get.extension(filename)) %in% c("xls", "xlsx"))
{
if (! packageIsAvailable("readxl", Rtxt("read .xls or .xlsx files"))) return(FALSE)
# 100114 A quick hack to allow reading MS/Excel files. 150517
# Notice the use of library() rather than require(). We really
# need to attach the package not try to attach the package.
read.cmd <- sprintf(paste("library(readxl, quietly=TRUE)",
'crs$dataset <- read_excel("%s")',
# Make sure we return the actual dataset
# as the result as that is assumed.
"crs$dataset",
sep="\n"),
sub("file:///", ifelse(isWindows(), "", "/"), filename))
# 130612 Still needed for isWindows? sub("file:///", "", filename))
}
else
# 100428 With read.csv("...", encoding="UTF-8") column names that
# are purely UTF-8 see the trailing comma as part of the column
# name, and so get merged with the next column. Need to ensure the
# encodng option is included in the file argument instead. I think
# that readTableHeader might be the culprit., but not tested. TODO
# This will need fixing everywhere that read.csv is used.
# 10429 Only use file(..., encoding) for Japanese. Otherwise
# put the encoding as argument to read.csv which always works on
# Linux?
if (isJapanese())
read.cmd <- sprintf('crs$dataset <- read.csv(file("%s", encoding="%s")%s%s%s%s%s)',
filename, crv$csv.encoding, hdr, sep, dec, nastring,
stripwhite)
else
read.cmd <- sprintf('crs$dataset <- read.csv("%s"%s%s%s%s%s, encoding="%s")',
filename, hdr, sep, dec, nastring, stripwhite,
crv$csv.encoding)
# Start logging and executing the R code.
startLog()
appendLog(Rtxt("Load the data."), read.cmd)
resetRattle()
result <- try(eval(parse(text=read.cmd)), silent=TRUE)
if (inherits(result, "try-error"))
{
if (any(grep("cannot open the connection", result)))
{
errorDialog(sprintf(Rtxt("The file you specified could not be found:",
"\n\n\t%s",
"\n\nPlease check the filename and try again."),
filename))
return(FALSE)
}
else if (any(grep("no lines available in input", result))
| any(grep("first five rows are empty: giving up", result)))
{
errorDialog(sprintf(Rtxt("The file you specified is empty:",
"\n\n\t%s",
"\n\nPlease check the file and try again."),
filename))
return(FALSE)
}
else if (any(grep("duplicate", result)))
{
errorDialog(sprintf(Rtxt("The dataset loaded from the file:",
"\n\n\t%s",
"\n\nhas duplicate columns.",
"This is sometimes due to using an incorrect",
"separator (%s) or decimal point (%s) in the file.",
"Or it might be because the file has no header line.",
"\n\nThe actual error message was: %s",
"\nPlease check the file format and the defaults",
"set in the Data tab and try again."),
filename, theWidget("data_separator_entry")$getText(),
theWidget("data_decimal_entry")$getText(), result))
return(FALSE)
}
else
return(errorReport(read.cmd, result))
}
if (ncol(result) < 2)
{
errorDialog(sprintf(Rtxt("The data from the file:",
"\n\n\t%s",
"\n\ncontains only a single column.",
"This is not usually what is expected and",
"is often due to using something other than the specified",
"separator (%s) and decimal point (%s) in the file.",
"\n\nPlease check the file format and the defaults",
"set in the Data tab and try again."),
filename, theWidget("data_separator_entry")$getText(),
theWidget("data_decimal_entry")$getText()))
return(FALSE)
}
crs$dataname <- basename(filename)
# 110306 Encoding(crs$dataname) <- "UTF-8"
# 110306 For Japanese hopefully this works better:
if (isJapanese()) crs$dataname <- iconv(crs$dataname, from="UTF-8")
setMainTitle(crs$dataname)
# Update the Data Tab Treeview and Samples.
## resetVariableRoles(colnames(crs$dataset), nrow(crs$dataset))
# Enable the Data View and Edit buttons.
## showDataViewButtons()
setStatusBar(sprintf(Rtxt("The CSV file has been loaded: %s.",
"Please wait whilst we extract its structure..."),
crs$dataname))
return(TRUE)
}
########################################################################
# OLD DATA TAB STUFF MIGRATING TO THE ABOVE
#
on_data_view_button_clicked <- function(button)
{
viewData()
}
on_data_edit_button_clicked <- function(button)
{
editData()
}
on_data_filechooserbutton_file_set <- function(button)
{
# When the filename has been changed on the Data tab check if
# further action is required. If RData File is active, then load the
# corresponding .Rdata file and extract the dataset names to be
# chosen from.
if (theWidget("data_rdata_radiobutton")$getActive())
updateRDataNames()
}
updateRDataNames <- function(filename=NULL)
{
# Collect relevant data
filename <- theWidget("data_filechooserbutton")$getFilename()
crs$dwd <- dirname(filename)
crs$mtime <- urlModTime(filename)
# Fix filename for MS - otherwise eval/parse strip the \\.
if (isWindows()) filename <- gsub("\\\\", "/", filename)
# Generate commands to read the data and then display the structure.
load.cmd <- sprintf('crs$rdata.datasets <- load("%s")', filename)
# Start logging and executing the R code.
startLog()
appendLog(Rtxt("Load an RData file containing R objects."), load.cmd)
set.cursor("watch", Rtxt("Loading the RData file..."))
eval(parse(text=load.cmd), .GlobalEnv) # Env so datasets are globally available.
# Add new data frames to the combo box.
combobox <- theWidget("data_name_combobox")
if (not.null(crs$rdata.datasets))
{
combobox$getModel()$clear()
lapply(crs$rdata.datasets, combobox$appendText)
}
setStatusBar(Rtxt("The list of available datasets has been updated",
"from the supplied data file.",
"Choose one dataset from the Data Name box."))
}
#-----------------------------------------------------------------------
# DATA LIBRAY
#
# 080522 Migrated this from the old interface to the new
# interface. Maybe this is now called whenever the Library radio
# button is activated.
#
# OLD: Update the library combo box with all of the available
# datasets. Can take a little time the first time to generate the
# list. I've associated this with the focus callback, but then it is
# called also when it loses focus!!!
updateDataLibrary <- function(current=NULL)
{
# OLD: TODO How to tell that this is a "gain focus" action and not a
# "lose focus" action, since we only want to build the list on
# gaining focus.
data.name.combobox <- theWidget("data_name_combobox")
# Record the current selection so that we can keep it as the default.
if (is.null(current)) current <- data.name.combobox$getActiveText()
## if (not.null(current)) return()
# This could take a little while, so use to watch cursor to indicate
# we are busy.
set.cursor("watch", Rtxt("Determining the available datasets from all packages...."))
# 090418 Suppress warnings about datasets having moved to 'datasets'
opt <- options(warn=-1)
da <- data(package = .packages(all.available = TRUE))
options(opt)
dl <- sort(paste(da$results[,'Item'], ":", da$results[,'Package'],
":", da$results[,'Title'], sep=""))
# Add the entries to the combo box.
data.name.combobox$getModel()$clear()
if (not.null(dl))
{
lapply(dl, data.name.combobox$appendText)
# Set the selection to that which was already selected, if possible.
if (not.null(current) && current %in% dl)
data.name.combobox$setActive(which(sapply(dl, function(x) x==current))[1]-1)
}
set.cursor(message="")
}
#-----------------------------------------------------------------------
open_odbc_set_combo <- function(button)
{
openODBCSetTables()
}
openODBCSetTables <- function()
{
# This is for use in the callback for when the ODBC DSN name has
# changed (associated with the "activate" signal). Load the known
# tables from the specified ODBC database. The ODBC connection will
# be opened and queried for the list of tables.
# Obtain the name of the DSN.
DSNname <- theWidget("data_odbc_dsn_entry")$getText()
# Check if we should believe the number of rows.
bnumrows <- sprintf(", believeNRows=%s",
ifelse(theWidget("data_odbc_believeNRows_checkbutton")$getActive(),
"TRUE", "FALSE"))
# Generate commands to connect to the database and retrieve the tables.
lib.cmd <- sprintf("library(RODBC)")
connect.cmd <- sprintf('crs$odbc <- odbcConnect("%s"%s)', DSNname, bnumrows)
tables.cmd <- sprintf('crs$odbc.tables <- sqlTables(crs$odbc)$TABLE_NAME')
# Ensure the RODBC library is available or else we can not support ODBC.
if (! packageIsAvailable("RODBC", Rtxt("connect to an ODBC database"))) return(FALSE)
startLog(Rtxt("Open an ODBC connection."))
appendLog(Rtxt("Require the RODBC package."), lib.cmd)
# 140906 Move to using namespaces within the code, though still
# expose the interactive commands.
#set.cursor("watch")
#eval(parse(text=lib.cmd))
#set.cursor()
# Close all currently open channels. This assumes that the user is
# not openning channels themselves. It could be a bad choice, but
# assume we are addressing the usual Rattle user.
RODBC::odbcCloseAll()
appendLog(Rtxt("Open the connection to the ODBC service."), connect.cmd)
result <- try(eval(parse(text=connect.cmd)))
if (inherits(result, "try-error"))
{
errorDialog(Rtxt("The attempt to open the ODBC connection failed.",
"Please check that the DSN is correct.",
"See the R Console for further details."))
return(FALSE)
}
appendLog(Rtxt("Load the names of available tables."), tables.cmd)
set.cursor("watch")
result <- try(eval(parse(text=tables.cmd)))
set.cursor()
if (inherits(result, "try-error"))
{
errorDialog(Rtxt("The attempt to query the ODBC connection failed.",
"Please check that the DSN is correct.",
"See the R Console for further details."))
return(FALSE)
}
# Add list of tables to the combo box.
combobox <- theWidget("data_odbc_table_combobox")
if (not.null(crs$odbc.tables))
{
combobox$getModel()$clear()
lapply(crs$odbc.tables, combobox$appendText)
}
setStatusBar(Rtxt("ODBC connection to database established. Now select a table."))
return(TRUE)
}
#----------------------------------------------------------------------
#
# Execution
#
resetVariableRoles <- function(variables, nrows, input=NULL, target=NULL,
risk=NULL, ident=NULL, ignore=NULL, weight=NULL,
zero=NULL, mean=NULL,
boxplot=NULL,
hisplot=NULL, cumplot=NULL, benplot=NULL,
barplot=NULL, dotplot=NULL, mosplot=NULL, paiplot=NULL,
resample=TRUE, autoroles=TRUE)
{
# Update the SELECT treeview with the dataset variables.
createVariablesModel(variables, input, target, risk, ident, ignore,
weight, zero, mean, boxplot, hisplot, cumplot,
benplot, barplot, dotplot, mosplot, paiplot,
autoroles=autoroles)
if (resample)
{
# Turn sampling on, set range bounds and generate the default 70%
# sample. Do the range bounds first since otherwise the value gets
# set back to 1. Also, need to set both the percentage and the
# count since if the old percentage is 70 and the new is 70, then
# no change in value is noticed, and thus the count is not
# automatically updated.
per <- 70
srows <- round(nrows * per / 100)
theWidget("data_sample_checkbutton")$setActive(TRUE)
theWidget("sample_count_spinbutton")$setRange(1,nrows)
theWidget("sample_count_spinbutton")$setValue(srows)
theWidget("sample_percentage_spinbutton")$setValue(per)
theWidget("data_sample_entry")$setText(crv$default.sample)
executeSelectSample()
}
# Execute the SELECT tab. Changes have bene made and we need to
# ensure the cached role variables are updated, or else we might see
# unexpected warnings about changes having been made but not
# EXECTUEd. [071125]
executeSelectTab(resample)
# Set the risk label appropriately.
theWidget("evaluate_risk_label")$setText(crs$risk)
}
resetDatasetViews <- function(input, target, risk, ident, ignore, weight=NULL)
{
# Reset the treeviews.
theWidget("select_treeview")$getModel()$clear()
theWidget("impute_treeview")$getModel()$clear()
theWidget("categorical_treeview")$getModel()$clear()
theWidget("continuous_treeview")$getModel()$clear()
# Recreate the treeviews, setting the roles as provided.
resetVariableRoles(colnames(crs$dataset), nrow(crs$dataset),
input=input, target=target, risk=risk,
ident=ident, ignore=ignore, weight=weight,
resample=FALSE, autoroles=FALSE)
}
executeDataScript <- function()
{
setStatusBar(Rtxt("The script option is not yet implemented."))
return(FALSE)
}
executeDataARFF <- function()
{
if (!exists("getRversion", baseenv()) || getRversion() <= "2.4.0")
{
infoDialog(Rtxt("Support for ARFF is only available in R 2.5.0 and beyond."))
return(FALSE)
}
# Collect relevant data
filename <- theWidget("data_filechooserbutton")$getUri()
# If no filename is given then return without doing anything.
if (is.null(filename))
{
errorDialog(Rtxt("No ARFF Filename has been chosen yet.",
"You must choose one before execution."))
return(FALSE)
}
filename <- URLdecode(filename)
crs$dwd <- dirname(filename)
crs$mtime <- urlModTime(filename)
# We need the foreign package to read ARFF data.
if (! packageIsAvailable("foreign", Rtxt("read an ARFF dataset"))) return(FALSE)
lib.cmd <- "library(foreign, quietly=TRUE)"
# If there is a model warn about losing it.
if (! overwriteModel()) return(FALSE)
# Fix filename for MS - otherwise eval/parse strip the \\.
if (isWindows()) filename <- gsub("\\\\", "/", filename)
# Generate commands to read the data and then display the structure.
read.cmd <- sprintf('crs$dataset <- read.arff("%s")', filename)
str.cmd <- "str(crs$dataset)"
# Start logging and executing the R code.
startLog()
##theWidget(TV)$setWrapMode("none") # On for welcome msg
##resetTextview(TV)
appendLog(packageProvides("foreign", "read.arff"), lib.cmd)
eval(parse(text=lib.cmd))
appendLog(Rtxt("Load an ARFF file."), read.cmd)
resetRattle()
eval(parse(text=read.cmd))
crs$dataname <- basename(filename)
setMainTitle(crs$dataname)
# appendLog(Rtxt("Display a simple summary (structure) of the dataset."), str.cmd)
##appendTextview(TV, sprintf("Structure of %s.\n\n", filename),
## collectOutput(str.cmd))
## Update the select treeview and samples.
## resetVariableRoles(colnames(crs$dataset), nrow(crs$dataset))
# Enable the Data View button.
## showDataViewButtons()
setStatusBar(sprintf(Rtxt("The ARFF data has been loaded: %s."), crs$dataname))
return(TRUE)
}
executeDataODBC <- function()
{
# Retrieve data from a data source name (DSN) as provided through
# the data_odbc_dsn_entry. Note that there is no standard LIMIT
# option in SQL, but it is LIMIT in Teradata, so perhaps we go with
# that for now?
dsn.name <- theWidget("data_odbc_dsn_entry")$getText()
table <- theWidget("data_odbc_table_combobox")$getActiveText()
row.limit <- theWidget("data_odbc_limit_spinbutton")$getValue()
believe.nrows <- theWidget("data_odbc_believeNRows_checkbutton")$getActive()
# warn.many <- theWidget("data_odbc_warnmany_checkbutton")$getActive()
sql.query <- "" # theWidget("odbc_sql_entry")$getText()
# If the ODBC channel has not been openned, then tell the user how
# to do so.
if (class(crs$odbc) != "RODBC")
{
errorDialog(Rtxt("A connection to an ODBC data source name (DSN) has not been",
"established. Please enter the DSN and press the Enter key.",
"This will also populate the list of tables to choose from.",
"After establishing the connection you can choose a table",
"or else enter a specific SQL query to retrieve a dataset."))
return(FALSE)
}
# Error if no table from the database has been chosen.
if (sql.query == "" && is.null(table))
{
errorDialog(Rtxt("No table nor SQL query has been specified.",
"Please identify the name of the table you wish to load.",
"All tables in the connected database are listed",
"once a connection is made.",
"\n\nAlternatively, enter a query to retrieve a dataset."))
return(FALSE)
}
# If there is a model warn about losing it.
if (! overwriteModel()) return(FALSE)
if (sql.query != "")
sql <- sql.query
else
{
sql <- sprintf("SELECT * FROM %s", table)
if (row.limit > 0) sql <- paste(sql, "LIMIT", row.limit)
}
#assign.cmd <- "crs$dataset <- sqlFetch(crs$odbc, table)"
assign.cmd <- paste("crs$dataset <- sqlQuery(crs$odbc, ", '"', sql, '"',
ifelse(believe.nrows, "", ", believeNRows=FALSE"),
")", sep="")
str.cmd <- "str(crs$dataset)"
if (row.limit == 0)
{
# Double check with the user if we are about to extract a large
# number of rows.
numRows <- RODBC::sqlQuery(crs$odbc, sprintf("SELECT count(*) FROM %s", table))
if (crv$odbc.large != 0 && numRows > crv$odbc.large)
if (! questionDialog(sprintf(Rtxt("You are about to extract %s",
"rows from the table %s",
"of the %s ODBC connection.",
"\n\nDo you wish to continue?"),
numRows, table, dsn.name)))
return()
}
# Start logging and executing the R code.
startLog()
appendLog(Rtxt("Load dataset from ODBC database table."), assign.cmd)
resetRattle()
eval(parse(text=assign.cmd))
crs$dataname <- table
setMainTitle(crs$dataname)
appendLog(Rtxt("Display a simple summary (structure) of the dataset."), str.cmd)
setStatusBar(sprintf(Rtxt("The ODBC data has been loaded: %s."), crs$dataname))
return(TRUE)
}
executeDataRdata <- function()
{
# Collect relevant data.
filename <- theWidget("data_filechooserbutton")$getFilename()
dataset <- theWidget("data_name_combobox")$getActiveText()
# Error exit if no filename is given.
if (is.null(filename))
{
errorDialog(Rtxt("No Rdata filename has been chosen yet.",
"You must choose one before execution."))
return(FALSE)
}
crs$dwd <- dirname(filename)
crs$mtime <- urlModTime(filename)
# Error if no dataset from the Rdata file has been chosen.
if (is.null(dataset))
{
errorDialog(Rtxt("No R dataset name has been specified.",
"Please identify the name of the R dataset.",
"Any data frames that were found in the loaded Rdata",
"file are available to choose from in the Data Name",
"combo box."))
return(FALSE)
}
# If there is a model warn about losing it.
if (! overwriteModel()) return(FALSE)
# Generate commands.
assign.cmd <- sprintf('crs$dataset <- %s', dataset)
str.cmd <- "str(crs$dataset)"
# Start logging and executing the R code.
startLog()
appendLog(Rtxt("Load an RData file."), assign.cmd)
resetRattle()
eval(parse(text=assign.cmd))
crs$dataname <- dataset
setMainTitle(crs$dataname)
setStatusBar(sprintf(Rtxt("The data has been loaded: %s.",
"Please wait whilst we extract its structure..."),
crs$dataname))
return(TRUE)
}
executeDataRdataset <- function()
{
# Collect relevant data
.dataset <- theWidget("data_name_combobox")$getActiveText()
# 080907 Can we do this here each time? I haven't work out a way to
# update the combobox when it is clicked - this is what would be
# best! But at least having it in here means we can update it when
# it is executed.
updateRDatasets(current=.dataset)
if (is.null(.dataset))
{
errorDialog(Rtxt("No R dataset name has been specified.",
"Please identify the name of the R dataset.",
"Any data frames that exist in the R Console",
"are available from the Data Name combo box."))
return(FALSE)
}
# If there is a model then warn about losing it.
if (! overwriteModel()) return(FALSE)
# Generate commands.
assign.cmd <- sprintf('crs$dataset <- %s', .dataset)
str.cmd <- "str(crs$dataset)"
# Start logging and executing the R code.
startLog()
#theWidget(TV)$setWrapMode("none") # On for welcome msg
#resetTextview(TV)
appendLog(Rtxt("Load an R data frame."), assign.cmd)
resetRattle()
eval(parse(text=assign.cmd))
crs$dataname <- .dataset
setMainTitle(crs$dataname)
# 080328 Fix up any non-supported characters in the variable names,
# otherwise they cause problems, e.g. "a-b" when used as ds$a-b is
# interpreted as (ds$a - b)!
names(crs$dataset) <- make.names(names(crs$dataset))
appendLog(Rtxt("Display a simple summary (structure) of the dataset."), str.cmd)
setStatusBar(Rtxt("The R dataset has been loaded.",
"Please wait whilst we extract its structure..."))
return(TRUE)
}
executeDataLibrary <- function()
{
# 080521 Load a dataset from a particular R package.
# Collect relevant data.
dataset <- theWidget("data_name_combobox")$getActiveText()
if (is.null(dataset))
{
errorDialog(Rtxt("No dataset from the R libraries has been specified.",
"\n\nPlease identify the name of the dataset",
"you wish to load using the Data Name chooser."))
return(FALSE)
}
# Actual dataset name as known when loaded.
adsname <- gsub('([^ :]*).*$', '\\1', unlist(strsplit(dataset, ":"))[1])
# Some datasets are loaded through loading another name (which
# appears in parentheses. Extract the actual name of the dataset
# that has to be named to be loaded.
dsname <- gsub('.* \\((.*)\\)$', '\\1', unlist(strsplit(dataset, ":"))[1])
# Extract the name of the package from which the dataset is loaded.
dspkg <- unlist(strsplit(dataset, ":"))[2]
# If there is a model then warn about losing it.
if (! overwriteModel()) return()
# Generate commands. 090321 Add a command to fix the variable
# names. Some datasets, like AdultUCI in arules, have names like
# education-num, which is some cases looks like a subtraction in
# R. Without changing it here I would need to fix other code up to
# quote the use of the variable name, and it might be that rpart has
# an issue with it also (but not confirmed).
assign.cmd <- sprintf(paste('data(list = "%s", package = "%s")\n',
'crs$dataset <- %s\n',
'names(crs$dataset) <- ',
'gsub("-", ".", names(crs$dataset))',
sep=""),
dsname, dspkg, adsname)
# Start logging and executing the R code.
startLog()
appendLog(Rtxt("Load an R dataset."), assign.cmd)
resetRattle()
eval(parse(text=assign.cmd))
if (class(crs$dataset) != "data.frame")
{
errorDialog(sprintf(Rtxt("The selected dataset, '%s', from the '%s' package",
"is not of class data frame (the data type).",
"Its data class is '%s.'",
"This is not currently supported by %s",
"and so it can not be loaded. Perhaps choose a different",
"dataset from the library."),
adsname, dspkg, class(crs$dataset), crv$appname))
return(FALSE)
}
crs$dataname <- adsname
crs$datapkg <- dspkg
setMainTitle(crs$dataname)
setStatusBar(Rtxt("The R package data is now available."))
return(TRUE)
}
viewData <- function()
{
startLog(Rtxt("View the dataset."))
if (packageIsAvailable("RGtk2Extras", Rtxt("view data in a spreadsheet")))
{
# 151115 We currently get the issue:
#
# Error in MakeDFEditWindow(.local, .local$theFrame, size.request, col.width) (from <text>#1) :
# could not find function "gtkTreePathNewFromString"
#
# This is a NAMESPACE issue and a workaround is to
# require(RGkt2Extras). Eventually need to work out the correct
# solution.
lib.cmd <- sprintf("library(RGtk2Extras)")
appendLog(packageProvides("RGtk2Extras", "dfedit"), lib.cmd)
eval(parse(text=lib.cmd))
view.cmd <- paste('RGtk2Extras::dfedit(crs$dataset,\n',
' ',
'dataset.name=Rtxt("Rattle Dataset"),\n',
' ',
'size=c(800, 400))')
appendLog(Rtxt("Please note that any edits will be ignored."), view.cmd)
eval(parse(text=view.cmd))
}
else
{
result <- try(etc <- file.path(path.package(package="rattle")[1], "etc"),
silent=TRUE)
if (inherits(result, "try-error"))
crs$viewdataGUI <- gladeXMLNew("rattle.glade", root="viewdata_window")
else
crs$viewdataGUI <- gladeXMLNew(file.path(etc,"rattle.glade"),
root="viewdata_window")
gladeXMLSignalAutoconnect(crs$viewdataGUI)
tv <- crs$viewdataGUI$getWidget("viewdata_textview")
tv$modifyFont(RGtk2::pangoFontDescriptionFromString(crv$textview.font))
op <- options(width=10000)
tv$getBuffer()$setText(collectOutput("print(crs$dataset)"))
options(op)
crs$viewdataGUI$getWidget("viewdata_window")$
setTitle(paste(crv$appname, ": ", Rtxt("Data Viewer"), sep=""))
}
}
editData <- function()
{
# Check if there is a model first and then warn about losing it.
if (! overwriteModel()) return()
# Start logging.
startLog(Rtxt("Edit the dataset."))
# Generate command to execute.
assign.cmd <- if (is.null(crs$dataset))
'crs$dataset <- edit(data.frame())'
else if (packageIsAvailable("RGtk2Extras"))
paste('crs$dataset <- RGtk2Extras::dfedit(crs$dataset,\n',
' ',
'dataset.name=Rtxt("Rattle Dataset"),\n',
' ',
'size=c(800, 400))')
else
'crs$dataset <- edit(crs$dataset)'
# Update the log withe the command that is run.
appendLog(Rtxt("Note that edits overwrite the current dataset."), assign.cmd)
# These are needed because resetRattle clears everything
ds <- crs$dataset
resetRattle()
crs$dataset <- ds
eval(parse(text=assign.cmd))
crs$dataname <- "dataset"
# TODO fn <- theWidget("data_filechooserbutton")$getValue()
setMainTitle(crs$dataname)
# Update the select treeview and samples.
createVariablesModel(colnames(crs$dataset))
# Ensure we are viewing the treeview tab rather than the Welcome
# message.
crv$DATA.DISPLAY.NOTEBOOK$setCurrentPage(crv$DATA.DISPLAY.TREEVIEW.TAB)
setStatusBar(Rtxt("The supplied data is now available."))
set.cursor()
}
exportDataTab <- function()
{
# Don't export an empty dataset.
if (is.null(crs$dataset))
{
errorDialog(Rtxt("There is no dataset loaded, and so",
"there is nothing to export."))
return(FALSE)
}
sampling <- theWidget("data_sample_checkbutton")$getActive()
# Obtain filename to write the dataset as CSV to.
dialog <- RGtk2::gtkFileChooserDialog("Export Dataset", NULL, "save",
"gtk-cancel", RGtk2::GtkResponseType["cancel"],
"gtk-save", RGtk2::GtkResponseType["accept"])
dialog$setDoOverwriteConfirmation(TRUE)
if(not.null(crs$dataname))
dialog$setCurrentName(paste(get.stem(crs$dataname), "_",
ifelse(sampling, "sample", "saved"),
".csv", sep=""))
# 081222 I get an error on doing the following:
#
### dialog$setCurrentFolder(crs$dwd)
#
# (R:14058): libgnomevfs-CRITICAL **:
# gnome_vfs_get_uri_from_local_path: assertion `g_path_is_absolute
# (local_full_path)' failed
#
# I note that crs$dwd is
# "file:///usr/local/lib/R/site-library/rattle/csv" which is not
# what I want anyhow!
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("CSV Files"))
ff$addPattern("*.csv")
dialog$addFilter(ff)
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("All Files"))
ff$addPattern("*")
dialog$addFilter(ff)
if (dialog$run() == RGtk2::GtkResponseType["accept"])
{
save.name <- dialog$getFilename()
dialog$destroy()
}
else
{
dialog$destroy()
return()
}
if (tolower(get.extension(save.name)) != "csv")
save.name <- sprintf("%s.csv", save.name)
# If sample is active then only save the sample.
if (sampling)
writeCSV(crs$dataset[crs$sample,], save.name)
else
writeCSV(crs$dataset, save.name)
if (sampling)
msg <- Rtxt("The training dataset has been exported to %s.")
else
msg <- Rtxt("The dataset has been exported to %s.")
setStatusBar(sprintf(msg, save.name))
}
########################################################################
# DATA ROLES
#
# The DATA Execute will perform a sampling of the data and stores
# the indicies in crs$sample. It will also build the list of variable
# roles and stores these in crs$input, crs$ident, crs$ignore,
# crs$target, and crs$risk. This is then used in MODEL to limit the
# dataset in the call to rpart to just the crs$input variables. In
# EVALUATE the crs$risk is used for the Risk Chart.
#------------------------------------------------------------------------
# Interface
on_data_sample_checkbutton_toggled <- function(button)
{
if (button$getActive())
{
theWidget("sample_percentage_spinbutton")$setSensitive(TRUE)
theWidget("sample_percentage_label")$setSensitive(TRUE)
theWidget("sample_count_spinbutton")$setSensitive(TRUE)
theWidget("sample_count_label")$setSensitive(TRUE)
theWidget("sample_seed_spinbutton")$setSensitive(TRUE)
theWidget("sample_seed_button")$setSensitive(TRUE)
theWidget("data_sample_entry")$setSensitive(TRUE)
# 090617 Do not show this label in the tool bar - It is mixing
# information with actions and thus is conceptually not a good
# thing to do. [Rado]
# theWidget("explore_sample_label")$show()
}
else
{
theWidget("sample_percentage_spinbutton")$setSensitive(FALSE)
theWidget("sample_percentage_label")$setSensitive(FALSE)
theWidget("sample_count_spinbutton")$setSensitive(FALSE)
theWidget("sample_count_label")$setSensitive(FALSE)
theWidget("sample_seed_spinbutton")$setSensitive(FALSE)
theWidget("sample_seed_button")$setSensitive(FALSE)
theWidget("data_sample_entry")$setSensitive(FALSE)
# theWidget("explore_sample_label")$hide()
}
crs$sample <- crs$train <- crs$validate <- crs$test <- NULL
setStatusBar()
}
on_sample_percentage_spinbutton_changed <- function(action, window)
{
if (is.null(crs$dataset)) return()
per <- theWidget("sample_percentage_spinbutton")$getValue()
rows <- round(nrow(crs$dataset) * per / 100)
crows <- theWidget("sample_count_spinbutton")$getValue()
if (rows != crows)
theWidget("sample_count_spinbutton")$setValue(rows)
setStatusBar()
}
on_sample_count_spinbutton_changed <- function(action, window)
{
if (is.null(crs$dataset)) return()
rows <- theWidget("sample_count_spinbutton")$getValue()
per <- round(100*rows/nrow(crs$dataset))
cper <- theWidget("sample_percentage_spinbutton")$getValue()
if (per != cper)
theWidget("sample_percentage_spinbutton")$setValue(per)
setStatusBar()
}
on_sample_seed_button_clicked <- function(button)
{
rseed <- as.integer(runif(1, 0, 1000000))
theWidget("sample_seed_spinbutton")$setValue(rseed)
}
item.toggled <- function(cell, path.str, model)
{
# The data passed in is the model used in the treeview.
RGtk2::checkPtrType(model, "GtkTreeModel")
# Extract the column number of the model that has changed.
column <- cell$getData("column")
# Get the current value of the corresponding flag
path <- RGtk2::gtkTreePathNewFromString(path.str) # Current row
iter <- model$getIter(path)$iter # Iter for the row
current <- model$get(iter, column)[[1]] # Get data from specific column
# Only invert the current value if it is False - work like a radio button
if (! current)
{
model$set(iter, column, !current)
# Uncheck all other Roles for this row, acting like radio buttons.
columns <- crv$COLUMNstart:crv$COLUMNend
lapply(setdiff(columns, column), function(x) model$set(iter, x, FALSE))
# TODO Now fix up other buttons. Any in the same column, if it is
# Target, must be unchecked and the corresponding row made
# Ignore. Currently, just check this on Execute and complain. Can
# we use groups?
}
# 100829 Check if we need to toggle the Weight Calculator - note
# that this is done each time an item is toggled because we don't
# get called when weight is untoggled?
# if (names(column) == "weight")
if (length(getSelectedVariables("weight")) > 0)
{
theWidget("weight_label")$setSensitive(FALSE)
theWidget("weight_entry")$setSensitive(FALSE)
}
else
{
theWidget("weight_label")$setSensitive(TRUE)
theWidget("weight_entry")$setSensitive(TRUE)
}
}
on_variables_toggle_ignore_button_clicked <- function(action, window)
{
# Set the ignore flag for all selected variables, and ensure all
# other roles are unchecked.
#ptm <- proc.time()
set.cursor("watch")
tree.selection <- theWidget("select_treeview")$getSelection()
# Under MS/Windows with Terminal Services to the host we get very
# slow redraws? Tried fixing it with freezeUpdates and thawUpdates
# but it had no impact. Changing 500 variables takes 5
# seconds. When connected over terminal services the elapsed time
# is 16 seconds, still with 5 seconds user time.
# theWidget("rattle_window")$getWindow()$freezeUpdates()
# 071113 Use the data parameter to avoid an RGtk2 bug in 2.12.1,
# fixed in next release.
tree.selection$selectedForeach(function(model, path, iter, data)
{
model$set(iter, crv$COLUMN[["ignore"]], TRUE)
columns <- setdiff(crv$COLUMNstart:crv$COLUMNend,
crv$COLUMN[["ignore"]])
# Timing indicates the for loop is slower on GNU/Linux but faster
# on MS/Windows 500! But the extra test also slows things down,
# so best not to conditionalise for now.
#if (isWindows())
for (c in columns)
if (model$get(iter, c)[[1]]) model$set(iter, c, FALSE)
#else
# lapply(columns, function(x) model$set(iter, x, FALSE))
return(FALSE) # Keep going through all rows
}, data=TRUE)
#cat("->Ig", proc.time() - ptm, "\n")
set.cursor()
# theWidget("rattle_window")$getWindow()$thawUpdates()
}
on_variables_toggle_input_button_clicked <- function(action, window)
{
# Set the input flag for all selected variables within the Select
# tab, and ensure all other roles for these variables are unchecked.
#ptm <- proc.time()
set.cursor("watch")
treeview <- theWidget("select_treeview")
tree.selection <- treeview$getSelection()
#theWidget("rattle_window")$getWindow()$freezeUpdates()
# Use the data parameter to avoid an RGtk2 bug in 2.12.1, fixed in
# next release. 071113
tree.selection$selectedForeach(function(model, path, iter, data)
{
model$set(iter, crv$COLUMN[["input"]], TRUE)
columns <- setdiff(crv$COLUMNstart:crv$COLUMNend,
crv$COLUMN[["input"]])
#if (isWindows())
for (c in columns)
if (model$get(iter, c)[[1]]) model$set(iter, c, FALSE)
#else
# lapply(columns, function(x) model$set(iter, x, FALSE))
return(FALSE) # Keep going through all rows
}, data=TRUE)
#cat("->In", proc.time() - ptm, "\n")
set.cursor()
#theWidget("rattle_window")$getWindow()$thawUpdates()
}
#----------------------------------------------------------------------
# Execution
executeSelectTab <- function(resample=TRUE)
{
# 080520 TODO May want to rename this as SELECT is no longer a tab
# but is now part of the DATA tab. Perhaps we call it
# resetSelections.
# Check for pre-requisites.
# Can not do any preparation if there is no dataset.
if (noDatasetLoaded()) return()
set.cursor("watch", Rtxt("Determining variable roles and characteristics..."))
startLog(Rtxt("Note the user selections."))
if (resample) executeSelectSample()
input <- getSelectedVariables("input")
target <- getSelectedVariables("target")
risk <- getSelectedVariables("risk")
ident <- getSelectedVariables("ident")
ignore <- getSelectedVariables("ignore")
weight <- getSelectedVariables("weight")
weights <- theWidget("weight_entry")$getText()
if (weights == "") weights <- NULL
# Fail if there is more than one target.
if (length(target) > 1)
{
errorDialog(sprintf(Rtxt("Multiple Targets have been identified (%s).",
"Only a single Target is allowed."),
paste(getSelectedVariables("target", FALSE), target,
sep=":", collapse=", ")))
return()
}
# Ask if the Target does not look like a target.
if (length(target))
target.levels <- length(levels(as.factor(crs$dataset[[target]])))
else
target.levels <- 0
# Fail if there is more than one risk.
if (length(risk) > 1)
{
errorDialog(sprintf(Rtxt("More than a single %s",
"variable has been identified (%s).",
"Only a single variable is allowed.\n",
"\nPlease change the role of one of the variables."),
ifelse(survivalTarget(), "Status", "Risk"),
paste(getSelectedVariables("risk", FALSE), risk,
sep=":", collapse=", ")))
return()
}
# Fail if the Risk column is not numeric.
if (length(risk) && ! is.numeric(crs$dataset[[risk]]))
{
errorDialog(sprintf(Rtxt("The variable selected for your %s (%s)",
"is not numeric.",
"\n\nPlease select a numeric variable."),
ifelse(survivalTarget(), "Status", "Risk"), risk))
return()
}
# Deal with weights.
# 100829 Fail if there is more than one weight selected. Note that
# once a weight is selected the Weight Calculator is not sensitive
# and so any Weight formula there will be ignored.
if (length(weight) > 1)
{
errorDialog(sprintf(Rtxt("Multiple Weights have been identified (%s).",
"Only a single Weight is allowed.\n",
"\nPlease reconfigure the roles."),
paste(getSelectedVariables("weight", FALSE), weight,
sep=":", collapse=", ")))
return()
}
else if (length(weight) == 1)
{
weights <- sprintf("crs$dataset$%s", weight)
}
else if (theWidget("weight_entry")$isSensitive() &&
not.null(weights) &&
nchar(weights) > 0)
{
identifiers <- unlist(strsplit(weights, "[^a-zA-Z._]"))
identifiers <- identifiers[nchar(identifiers) > 0]
identifiers <- union(identifiers,identifiers) # Each var/id just once
funs <- unlist(lapply(identifiers,
function(x)
{
try(eval(parse(text=sprintf("class(%s)", x))),
silent=TRUE) == "function"}))
vars <- ! funs
allvars <- union(input, union(target, union(risk, union(ident, ignore))))
for (i in seq_len(sum(vars)))
{
# Check for any missing variables
if (identifiers[vars][i] %notin% allvars)
{
errorDialog(sprintf(Rtxt("The Weight Calculator contains the variable %s",
"which is not known in the dataset."),
identifiers[vars][i]))
return()
}
# Check if Weight variables are not ignored, and inform user if not
if (identifiers[vars][i] %notin%
union(ident, union(target, union(ignore, risk))))
{
infoDialog(sprintf(Rtxt("You have used the variable %s",
"in the weights formula but it is an input.",
"This is unusual since it is both an input variable",
"and used to weight the outputs.",
"It is suggested that you ignore this variable."),
identifiers[vars][i]))
}
# For each Weights variable, replace with full reference to
# crs$dataset, since the variable is ignored.
weights <- gsub(identifiers[vars][i],
sprintf("crs$dataset$%s", identifiers[vars][i]),
weights)
}
}
#------------------------------------------------------------------------
# Record appropriate information.
crs$input <- input
crs$target <- target
crs$risk <- risk
crs$ident <- ident
crs$ignore <- ignore
crs$weights <- weights
crs$numeric <- colnames(crs$dataset)[getNumericVariables(type="indicies")]
crs$categoric <- getCategoricVariables(type="names")
# 091206 Add the information to the Log tab
convertOneMany <- function(x)
switch(min(length(x)+1, 3), 'NULL', sprintf('"%s"', x),
sprintf('c("%s")', paste(x, collapse='", "')))
appendLog(Rtxt("The following variable selections have been noted."),
'crs$input <- ', gsub("(([^,]*,){4})", "\\1\n ",
convertOneMany(input)),
'\n\ncrs$numeric <- ', gsub("(([^,]*,){4})", "\\1\n ",
convertOneMany(crs$numeric)),
'\n\ncrs$categoric <- ', gsub("(([^,]*,){4})", "\\1\n ",
convertOneMany(crs$categoric)),
'\n\ncrs$target <- ', convertOneMany(target),
'\ncrs$risk <- ', convertOneMany(risk),
'\ncrs$ident <- ', convertOneMany(ident),
'\ncrs$ignore <- ', convertOneMany(ignore),
'\ncrs$weights <- ', convertOneMany(weights))
# 090801 Update the transforms list, so that any transforms that are
# not ignore/ident will be noted as active. The status is used when
# exporting to XML since we want to keep ignored transforms (since
# they might be used in other transforms) but don't want them
# exported unnecessarily.
for (i in seq_along(crs$transforms))
if (names(crs$transforms)[i] %in% union(ident, ignore))
crs$transforms[[i]]$status <- "inactive"
else
crs$transforms[[i]]$status <- "active"
# Update MODEL targets
the.target <- ifelse(length(target), sprintf(Rtxt("Target: %s"), target),
Rtxt("No Target"))
the.risk <- ifelse(length(risk), sprintf(Rtxt("Status: %s"), risk),
Rtxt("No Risk"))
theWidget("explot_target_label")$setText(the.target)
theWidget("test_groupby_target_label")$setText(the.target)
theWidget("rpart_target_label")$setText(the.target)
theWidget("rf_target_label")$setText(the.target)
theWidget("svm_target_label")$setText(the.target)
# theWidget("gbm_target_label")$setText(the.target)
theWidget("ada_target_label")$setText(the.target)
theWidget("glm_target_label")$setText(the.target)
theWidget("nnet_target_label")$setText(the.target)
theWidget("model_survival_radiobutton")$setSensitive(TRUE)
theWidget("model_survival_time_var_label")$setText(sub(Rtxt("Target:"),
Rtxt("Time:"), the.target))
theWidget("model_survival_status_var_label")$setText(the.risk)
# Update MODEL weights
if (not.null(weights))
{
weights.display <- gsub('crs\\$dataset\\$', '', weights)
the.weight <- sprintf(Rtxt("Weights: %s"), weights.display)
# 080815 Just display Weights if there is a weights value, and
# empty otherwise.
# theWidget("model_tree_rpart_weights_label")$setText(the.weight)
theWidget("model_tree_rpart_weights_label")$setText(Rtxt("Weights in use."))
}
else
{
theWidget("model_tree_rpart_weights_label")$
setText("")
}
# 080413 Update MODEL types that are available.
# With more than two classes we can't use AdaBoost since the current
# package does not support more than 2 classes.
if (categoricTarget() && target.levels <= 2)
theWidget("boost_radiobutton")$setSensitive(TRUE)
else
theWidget("boost_radiobutton")$setSensitive(FALSE)
# Update various MODEL options
if (survivalTarget())
{
theWidget("model_survival_radiobutton")$setSensitive(TRUE)
theWidget("model_survival_radiobutton")$setActive(TRUE)
theWidget("rpart_radiobutton")$setSensitive(FALSE)
theWidget("boost_radiobutton")$setSensitive(FALSE)
theWidget("rf_radiobutton")$setSensitive(FALSE)
theWidget("svm_radiobutton")$setSensitive(FALSE)
theWidget("model_linear_radiobutton")$setSensitive(FALSE)
theWidget("nnet_radiobutton")$setSensitive(FALSE)
theWidget("all_models_radiobutton")$setSensitive(FALSE)
}
else if (categoricTarget())
{
theWidget("model_survival_radiobutton")$setSensitive(FALSE)
theWidget("rpart_radiobutton")$setSensitive(TRUE)
theWidget("rf_radiobutton")$setSensitive(TRUE)
theWidget("svm_radiobutton")$setSensitive(TRUE)
theWidget("model_linear_radiobutton")$setSensitive(TRUE)
theWidget("all_models_radiobutton")$setSensitive(TRUE)
# For linear models, if it is categoric and binomial then assume
# logistic regression (default to binmoial distribution and the
# logit link function) otherwise it is multinomial so assume
# poisson regression (default to poisson distribution and log link
# function).
theWidget("model_linear_poisson_radiobutton")$setSensitive(FALSE)
if (binomialTarget())
{
theWidget("model_linear_builder_label")$setText("glm (Logistic)")
theWidget("glm_linear_radiobutton")$setSensitive(FALSE)
theWidget("glm_gaussian_radiobutton")$setSensitive(FALSE)
theWidget("glm_logistic_radiobutton")$setSensitive(TRUE)
theWidget("glm_logistic_radiobutton")$setActive(TRUE)
theWidget("model_linear_probit_radiobutton")$setSensitive(TRUE)
theWidget("glm_multinomial_radiobutton")$setSensitive(FALSE)
theWidget("nnet_radiobutton")$setSensitive(TRUE)
theWidget("nnet_hidden_nodes_label")$setSensitive(TRUE)
theWidget("nnet_hidden_nodes_spinbutton")$setSensitive(TRUE)
theWidget("nnet_builder_label")$setText("nnet (0/1)")
}
else
{
theWidget("model_linear_builder_label")$setText("multinom")
theWidget("glm_linear_radiobutton")$setSensitive(FALSE)
theWidget("glm_gaussian_radiobutton")$setSensitive(FALSE)
theWidget("glm_logistic_radiobutton")$setSensitive(FALSE)
theWidget("model_linear_probit_radiobutton")$setSensitive(FALSE)
theWidget("glm_multinomial_radiobutton")$setSensitive(TRUE)
theWidget("glm_multinomial_radiobutton")$setActive(TRUE)
theWidget("nnet_radiobutton")$setSensitive(FALSE)
# I don't think these need tgo be done. We can't see the options
# when the nnet button is not sensitive
#theWidget("nnet_hidden_nodes_label")$setSensitive(FALSE)
#theWidget("nnet_hidden_nodes_spinbutton")$setSensitive(FALSE)
#theWidget("nnet_builder_label")$setText("")
}
}
else if (numericTarget())
{
theWidget("model_survival_radiobutton")$setSensitive(FALSE)
theWidget("rpart_radiobutton")$setSensitive(TRUE)
theWidget("rf_radiobutton")$setSensitive(TRUE) # 090301 Support regression
theWidget("svm_radiobutton")$setSensitive(FALSE)
# For linear models, if it is numeric we are probably going to use
# a lm so set the default family to nothing! This is becasue lm
# simply does gaussian and an identity link function.
# theWidget("glm_family_comboboxentry")$setActive(0)
theWidget("model_linear_radiobutton")$setSensitive(TRUE)
theWidget("model_linear_builder_label")$setText("lm")
theWidget("glm_linear_radiobutton")$setSensitive(TRUE)
theWidget("glm_linear_radiobutton")$setActive(TRUE)
theWidget("glm_gaussian_radiobutton")$setSensitive(TRUE)
theWidget("glm_logistic_radiobutton")$setSensitive(FALSE)
theWidget("model_linear_probit_radiobutton")$setSensitive(FALSE)
if (countTarget())
theWidget("model_linear_poisson_radiobutton")$setSensitive(TRUE)
else
theWidget("model_linear_poisson_radiobutton")$setSensitive(FALSE)
theWidget("glm_multinomial_radiobutton")$setSensitive(FALSE)
theWidget("nnet_radiobutton")$setSensitive(TRUE)
theWidget("nnet_hidden_nodes_label")$setSensitive(TRUE)
theWidget("nnet_hidden_nodes_spinbutton")$setSensitive(TRUE)
theWidget("nnet_builder_label")$setText("nnet (Regression)")
theWidget("all_models_radiobutton")$setSensitive(TRUE)
}
else # What else could it be? No target!
{
theWidget("rpart_radiobutton")$setSensitive(FALSE)
theWidget("rf_radiobutton")$setSensitive(FALSE)
theWidget("svm_radiobutton")$setSensitive(FALSE)
theWidget("model_linear_radiobutton")$setSensitive(FALSE)
theWidget("nnet_radiobutton")$setSensitive(FALSE)
theWidget("all_models_radiobutton")$setSensitive(FALSE)
theWidget("nnet_hidden_nodes_label")$setSensitive(FALSE)
theWidget("nnet_hidden_nodes_spinbutton")$setSensitive(FALSE)
# 080719 - remove, or else we can't sample and cluster!!
# theWidget("data_sample_checkbutton")$setActive(FALSE)
theWidget("glm_linear_radiobutton")$setSensitive(FALSE)
theWidget("glm_gaussian_radiobutton")$setSensitive(FALSE)
theWidget("model_linear_poisson_radiobutton")$setSensitive(FALSE)
theWidget("glm_logistic_radiobutton")$setSensitive(FALSE)
theWidget("model_linear_probit_radiobutton")$setSensitive(FALSE)
theWidget("glm_multinomial_radiobutton")$setSensitive(FALSE)
theWidget("model_survival_radiobutton")$setSensitive(FALSE)
}
# Update EVALUATE risk variable
theWidget("evaluate_risk_label")$setText(crs$risk)
# Update defaults that rely on the number of variables.
crv$rf.mtry.default <- floor(sqrt(length(crs$input)))
theWidget("rf_mtry_spinbutton")$setValue(crv$rf.mtry.default)
# 080505 We auto decide whether the target looks like a categoric
# or numeric, but if it ends up being a categoric (the user
# overrides with the type radio button) with very many classes,
# then complain!
if (not.null(target)
&& categoricTarget()
&& target.levels > crv$max.categories)
{
if (! questionDialog(sprintf(Rtxt("The column selected as a Target (%s)",
"will be treated as a categoric variable",
"since Target Type is set to Categoric.",
"\n\nThe variable has %d distinct values",
"whch is greater than the threshold of %d.",
"That is unusual and some algorithms will",
"take a long time.\n\nYou may like to",
"consider using fewer classes for the",
"target categoric variable or select",
"Target Type as Numeric.",
"\n\nDo you want to continue anyhow?"),
target, target.levels, crv$max.categories)))
return()
}
# 091206 Check that we have both a target and risk for a survival
# model.
if (not.null(target)
&& !length(risk)
&& survivalTarget())
{
errorDialog(Rtxt("You have chosen Survial models as the target type,",
"but no Status variable has been identified.",
"Survival models require both a Time and a Status",
"variable.\n",
"\nPlease identify the Status variable and then",
"Execute this tab once again."))
return(FALSE)
}
# Finished - update the status bar.
roles.msg <- sprintf(Rtxt("Roles noted. %d observations",
"and %d input variables."),
nrow(crs$dataset), length(crs$input))
if (length(crs$target) == 0)
model.msg <- Rtxt("No target thus no predictive",
"modelling nor sampling.")
else if (survivalTarget())
model.msg <- sprintf(Rtxt("The target is %s with %s. Survival models enabled."),
crs$target, crs$risk)
else if (categoricTarget())
model.msg <- sprintf(Rtxt("The target is %s. Categoric %d.",
"Classification models enabled."),
crs$target, target.levels)
else
model.msg <- sprintf(Rtxt("The target is %s. Numeric.",
"Regression models enabled."),
crs$target)
setStatusBar(roles.msg, model.msg)
}
executeSelectSample <- function()
{
# Identify if there are observations without a target value. TODO
# 080426. I started looking at noting those observations with missing
# target values. This is recorded in crs$nontargets. Currently I'm
# not using it. The intention was to only sample from those with
# targets, etc. But the impacts need to be carefuly thought through.
#
# Perhaps the philosophy should go back to the fact that the user
# can split the dataset up themselves quite easily, and I do
# provide a mechanism for them to load their dataset for scoring.
#target <- getSelectedVariables("target")
#print(target)
#crs$nontargets <- which(is.na(crs$dataset[[target]]))
# Record that a random sample of the dataset is desired and the
# random sample itself is loaded into crs$sample. 080425 Whilst we
# are at it we also set the variable crs$targeted to be those row
# indicies that have a non NA target.
if (theWidget("data_sample_checkbutton")$getActive())
{
if (newSampling())
{
ssizes <- parseSampleEntry()
ssize <- floor(nrow(crs$dataset) * ssizes[1] / 100)
vsize <- floor(nrow(crs$dataset) * ssizes[2] / 100)
if (ssizes[3] == 0)
tsize <- 0
else
tsize <- nrow(crs$dataset) - ssize - vsize
}
else
#ssize <- theWidget("sample_percentage_spinbutton")$getValue()
#ssize <- floor(nrow(crs$dataset)*ssize/100)
ssize <- theWidget("sample_count_spinbutton")$getValue()
seed <- theWidget("sample_seed_spinbutton")$getValue()
if (seed == crv$seed) seed <- "crv$seed"
if (newSampling())
{
sample.cmd <- sprintf(paste("set.seed(%s)",
"\ncrs$nobs <- nrow(crs$dataset) # %d observations",
"\ncrs$sample <- crs$train <-",
"sample(nrow(crs$dataset),",
"%s*crs$nobs) # %d observations"),
seed, nrow(crs$dataset),
round(ssize/nrow(crs$dataset), 2), ssize)
if (vsize > 0)
sample.cmd <- sprintf(paste("%s\ncrs$validate <-",
"sample(setdiff(seq_len(nrow(crs$dataset)),",
"crs$train),",
"%s*crs$nobs) # %d observations"),
sample.cmd, round(vsize/nrow(crs$dataset), 2), vsize)
else
sample.cmd <- sprintf("%s\ncrs$validate <- NULL", sample.cmd)
if (tsize > 0)
sample.cmd <- sprintf(paste("%s\ncrs$test <-",
"setdiff(setdiff(seq_len(nrow(crs$dataset)),",
"crs$train), crs$validate)",
"# %d observations"), sample.cmd,
nrow(crs$dataset)-ssize-vsize)
else
sample.cmd <- sprintf("%s\ncrs$test <- NULL", sample.cmd)
}
else
{
# 100417 Even for RStat make sure we maintain crs$train as it is
# now starting to be used.
sample.cmd <- paste(sprintf("set.seed(%s)\n", seed),
"crs$sample <- crs$train <- sample(nrow(crs$dataset), ", ssize,
")", sep="")
}
appendLog(Rtxt("Build the training/validate/test datasets."), sample.cmd)
eval(parse(text=sample.cmd))
}
else
{
crs$sample <- crs$train <- crs$validate <- crs$test <- NULL
theWidget("evaluate_validation_radiobutton")$setSensitive(FALSE)
theWidget("evaluate_testing_radiobutton")$setSensitive(FALSE)
if (exists("RATTLE.SCORE.IN") && not.null(RATTLE.SCORE.IN))
theWidget("evaluate_csv_radiobutton")$setActive(TRUE)
else
theWidget("evaluate_training_radiobutton")$setActive(TRUE)
}
crs$smodel <- vector()
# TODO For test/train, use sample,split from caTools?
## Set some defaults that depend on sample size.
#if (is.null(crs$sample))
# crv$rf.sampsize.default <- length(crs$dataset)
#else
# crv$rf.sampsize.default <- length(crs$sample)
#theWidget("rf_sampsize_spinbutton")$setValue(crv$rf.sampsize.default)
## 080520 Don't set the status bar - it is overwritten by the
## message about variable roles being noted.
## setStatusBar()
## if (theWidget("data_sample_checkbutton")$getActive())
## setStatusBar("The sample has been generated.",
## "There are", length(crs$sample), "observations.")
## else
## setStatusBar("Sampling is inactive.")
}
getSelectedVariables <- function(role, named=TRUE)
{
# DESCRIPTION
# Generate a list of variables marked with the specified role.
#
# ARGUMENTS
# role = a string naming the role to query on
# named = if TRUE return variable names as strings, if FALSE, numbers
#
# DETAILS The select_treeview, categorical_treeview and
# continuous_treeview are places where a variable can be identified
# as having a given role. Whilst the role of "ignore" is common
# across all three treeviews, only the ignore from the main
# select_treeview is considered. If a role is not found, simply
# return NULL, rather than an error (for no particular reason).
#
# ASSUMPTIONS The variable and number columns are assumed to be the
# same in each of crv$COLUMNS, crv$CATEGORICAL, and crv$CONTINUOUS.
variables <- NULL
type <- "logical"
if (role %in% c("input", "target", "risk", "ident", "ignore", "weight"))
{
model <- theWidget("select_treeview")$getModel()
rcol <- crv$COLUMN[[role]]
}
else if (role %in% c("boxplot", "hisplot", "cumplot", "benplot"))
{
model <- theWidget("continuous_treeview")$getModel()
rcol <- crv$CONTINUOUS[[role]]
}
else if (role %in% c("barplot", "dotplot", "mosplot"))
{
model <- theWidget("categorical_treeview")$getModel()
rcol <- crv$CATEGORICAL[[role]]
}
else if (role %in% c("paiplot"))
{
model <- theWidget("continuous_treeview")$getModel()
rcol <- crv$CONTINUOUS[[role]]
model2 <- theWidget("categorical_treeview")$getModel()
rcol2 <- crv$CATEGORICAL[[role]]
}
else
return(NULL)
vcol <- crv$COLUMN[["variable"]]
ncol <- crv$COLUMN[["number"]]
model$foreach(function(model, path, iter, data)
{
flag <- model$get(iter, rcol)[[1]]
if (named)
variable <- model$get(iter, vcol)[[1]]
else
variable <- model$get(iter, ncol)[[1]]
# if (type=="character")
# {
# if (role == "zero" && flag == "Zero/Missing")
# variables <<- c(variables, variable)
# if (role == "mean" && flag == "Mean")
# variables <<- c(variables, variable)
# if (role == "median" && flag == "Median")
# variables <<- c(variables, variable)
# }
# else
if (flag) variables <<- c(variables, variable)
return(FALSE) # Keep going through all rows
}, TRUE)
if (role %in% c("paiplot")) # we need to collect the categorical variables too
{
model2$foreach(function(model2, path, iter, data)
{
flag <- model2$get(iter, rcol2)[[1]]
if (named)
variable <- model2$get(iter, vcol)[[1]]
else
variable <- model2$get(iter, ncol)[[1]]
if (flag) variables <<- c(variables, variable)
return(FALSE) # Keep going through all rows
}, TRUE)
}
# Set the data parameter to TRUE to avoid an RGtk2 bug in 2.12.1, fixed in
# next release. 071117
# 091130 Apparently Gtk always returns UTF-8 strings (Acken
# Sakakibara). Thus we convert to the locale of the system.
variables <- iconv(variables, "UTF-8", localeToCharset()[1])
return(variables)
}
initialiseVariableViews <- function()
{
# Define the data models for the various treeviews.
model <- RGtk2::gtkListStoreNew("gchararray", "gchararray", "gchararray",
"gboolean", "gboolean", "gboolean", "gboolean",
"gboolean", "gboolean", "gchararray")
impute <- RGtk2::gtkListStoreNew("gchararray", "gchararray", "gchararray")
continuous <- RGtk2::gtkListStoreNew("gchararray", "gchararray",
"gboolean", "gboolean",
"gboolean", "gboolean", "gboolean", "gchararray")
categorical <- RGtk2::gtkListStoreNew("gchararray", "gchararray",
"gboolean", "gboolean", "gboolean", "gboolean",
"gchararray")
# View the model through the treeview in the DATA tab
treeview <- theWidget("select_treeview")
treeview$setModel(model)
impview <- theWidget("impute_treeview")
impview$setModel(impute)
catview <- theWidget("categorical_treeview")
catview$setModel(categorical)
conview <- theWidget("continuous_treeview")
conview$setModel(continuous)
## Add the NUMBER column as the row number.
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("No."),
renderer,
text= crv$COLUMN[["number"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
imp.offset <-
impview$insertColumnWithAttributes(-1,
Rtxt("No."),
renderer,
text= crv$IMPUTE[["number"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
cat.offset <-
catview$insertColumnWithAttributes(-1,
Rtxt("No."),
renderer,
text= crv$CATEGORICAL[["number"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
con.offset <-
conview$insertColumnWithAttributes(-1,
Rtxt("No."),
renderer,
text= crv$CONTINUOUS[["number"]])
## Add the VARIABLE NAME column to the views.
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Variable"),
renderer,
text = crv$COLUMN[["variable"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
imp.offset <-
impview$insertColumnWithAttributes(-1,
Rtxt("Variable"),
renderer,
text = crv$IMPUTE[["variable"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
cat.offset <-
catview$insertColumnWithAttributes(-1,
Rtxt("Variable"),
renderer,
text = crv$CATEGORICAL[["variable"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
con.offset <-
conview$insertColumnWithAttributes(-1,
Rtxt("Variable"),
renderer,
text = crv$CONTINUOUS[["variable"]])
## Add the TYPE column.
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Data Type"),
renderer,
text = crv$COLUMN[["type"]])
# Add the INPUT column.
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(radio = TRUE)
renderer$set(width = 60)
renderer$setData("column", crv$COLUMN["input"])
RGtk2::connectSignal(renderer, "toggled", item.toggled, model)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Input"),
renderer,
active = crv$COLUMN[["input"]])
## Add the TARGET column.
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(radio = TRUE)
renderer$set(width = 60)
renderer$setData("column", crv$COLUMN["target"])
RGtk2::connectSignal(renderer, "toggled", item.toggled, model)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Target"),
renderer,
active = crv$COLUMN[["target"]])
## Add the RISK column.
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(radio = TRUE)
renderer$set(width = 60)
renderer$setData("column", crv$COLUMN["risk"])
RGtk2::connectSignal(renderer, "toggled", item.toggled, model)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Risk"),
renderer,
active = crv$COLUMN[["risk"]])
## Add the IDENT column.
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(radio = TRUE)
renderer$set(width = 60)
renderer$setData("column", crv$COLUMN["ident"])
RGtk2::connectSignal(renderer, "toggled", item.toggled, model)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Ident"),
renderer,
active = crv$COLUMN[["ident"]])
## Add the IGNORE column (the Ignore check button) to the view.
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(radio = TRUE)
renderer$set(width = 60)
renderer$setData("column", crv$COLUMN["ignore"])
RGtk2::connectSignal(renderer, "toggled", item.toggled, model)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Ignore"),
renderer,
active = crv$COLUMN[["ignore"]])
## Add the WEIGHT column (the Weight check button) to the view.
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(radio = TRUE)
renderer$set(width = 60)
renderer$setData("column", crv$COLUMN["weight"])
RGtk2::connectSignal(renderer, "toggled", item.toggled, model)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Weight"),
renderer,
active = crv$COLUMN[["weight"]])
## Add the barplot and dotplot and mosplot.
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CATEGORICAL["barplot"])
RGtk2::connectSignal(renderer, "toggled", cat_toggled, categorical)
cat.offset <-
catview$insertColumnWithAttributes(-1,
Rtxt("Bar Plot"),
renderer,
active = crv$CATEGORICAL[["barplot"]])
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CATEGORICAL["dotplot"])
RGtk2::connectSignal(renderer, "toggled", cat_toggled, categorical)
cat.offset <-
catview$insertColumnWithAttributes(-1,
Rtxt("Dot Plot"),
renderer,
active = crv$CATEGORICAL[["dotplot"]])
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CATEGORICAL["mosplot"])
RGtk2::connectSignal(renderer, "toggled", cat_toggled, categorical)
cat.offset <-
catview$insertColumnWithAttributes(-1,
Rtxt("Mosaic"),
renderer,
active = crv$CATEGORICAL[["mosplot"]])
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CATEGORICAL["paiplot"])
RGtk2::connectSignal(renderer, "toggled", cat_toggled, categorical)
cat.offset <-
catview$insertColumnWithAttributes(-1,
Rtxt("Pairs"),
renderer,
active = crv$CATEGORICAL[["paiplot"]])
## Add the boxplot, hisplot, cumplot, benplot buttons
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CONTINUOUS["boxplot"])
RGtk2::connectSignal(renderer, "toggled", con_toggled, continuous)
con.offset <-
conview$insertColumnWithAttributes(-1,
Rtxt("Box Plot"),
renderer,
active = crv$CONTINUOUS[["boxplot"]])
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CONTINUOUS["hisplot"])
RGtk2::connectSignal(renderer, "toggled", con_toggled, continuous)
con.offset <-
conview$insertColumnWithAttributes(-1,
Rtxt("Histogram"),
renderer,
active = crv$CONTINUOUS[["hisplot"]])
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CONTINUOUS["cumplot"])
RGtk2::connectSignal(renderer, "toggled", con_toggled, continuous)
con.offset <-
conview$insertColumnWithAttributes(-1,
Rtxt("Cumulative"),
renderer,
active = crv$CONTINUOUS[["cumplot"]])
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CONTINUOUS["benplot"])
RGtk2::connectSignal(renderer, "toggled", con_toggled, continuous)
con.offset <-
conview$insertColumnWithAttributes(-1,
Rtxt("Benford"),
renderer,
active = crv$CONTINUOUS[["benplot"]])
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CONTINUOUS["paiplot"])
RGtk2::connectSignal(renderer, "toggled", con_toggled, continuous)
con.offset <-
conview$insertColumnWithAttributes(-1,
Rtxt("Pairs"),
renderer,
active = crv$CONTINUOUS[["paiplot"]])
## Add the COMMENT column.
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Comment"),
renderer,
text = crv$COLUMN[["comment"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
imp.offset <-
impview$insertColumnWithAttributes(-1,
Rtxt("Data Type and Number Missing"),
renderer,
text = crv$IMPUTE[["comment"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
cat.offset <-
catview$insertColumnWithAttributes(-1,
Rtxt("Levels"),
renderer,
text = crv$CATEGORICAL[["comment"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
con.offset <-
conview$insertColumnWithAttributes(-1,
Rtxt("Min; Median/Mean; Max"),
renderer,
text = crv$CONTINUOUS[["comment"]])
## Allow multiple selections.
treeview$getSelection()$setMode("multiple")
impview$getSelection()$setMode("multiple")
catview$getSelection()$setMode("multiple")
conview$getSelection()$setMode("multiple")
}
createVariablesModel <- function(variables, input=NULL, target=NULL,
risk=NULL, ident=NULL, ignore=NULL, weight=NULL,
zero=NULL, mean=NULL,
boxplot=NULL,
hisplot=NULL, cumplot=NULL, benplot=NULL,
barplot=NULL, dotplot=NULL, mosplot=NULL,
paiplot=NULL,
autoroles=TRUE)
{
# Set up the initial information about variables for use throughout
# Rattle, including the Data tab's variable model, the Explore tab's
# categorical and continuous models, and the Modelling tab defaults
# where they depend on the dataset sizes.
#
# Any values supplied for input, target, risk, ident, ignore,
# boxplot, hisplot, cumplot, benplot, barplot, dotplot, and
# mosplot, arguments should be lists of variable names (list of
# strings).
set.cursor("watch", Rtxt("Summarising the variables..."))
# Retrieve the models.
model <- theWidget("select_treeview")$getModel()
impute <- theWidget("impute_treeview")$getModel()
categorical <- theWidget("categorical_treeview")$getModel()
continuous <- theWidget("continuous_treeview")$getModel()
# 080303 Automatically identify a default target if none are
# identified as a target (by beginning with TARGET or TIME for
# SURVIVAL data) in the variables. Heuristic is - the last or first
# if it's a factor with few levels, or has only a few values. Then
# the treeview model will record this choice, and we set the
# appropriate labels with this, and record it in crs.
survival.model <- theWidget("model_survival_radiobutton")$getActive()
given.target <- c(which(substr(variables, 1, 6) == "TARGET"),
if (survival.model) which(substr(variables, 1, 4) == "TIME"))
if (autoroles && length(given.target) > 0) target <- variables[given.target[1]]
if (autoroles && is.null(target))
{
# Find the last variable that is not an IMP (imputed). This is
# just a general heuristic, and works particularly for imputation
# performed in Rattle. Should also do this for first, and also for
# IGNORE variables.
last.var <- length(variables)
while (last.var > 1 && substr(variables[last.var], 1, 4) == "IMP_")
{
last.var <- last.var - 1
}
target <- -1
if ((is.factor(crs$dataset[,last.var]) &&
length(levels(crs$dataset[,last.var])) > 1 &&
length(levels(crs$dataset[,last.var])) < 11)
|| (length(levels(as.factor(crs$dataset[,last.var]))) < 11
&& length(levels(as.factor(crs$dataset[,last.var]))) > 1))
target <- last.var
else if ((is.factor(crs$dataset[,1]) &&
length(levels(crs$dataset[,1])) > 1 &&
length(levels(crs$dataset[,1])) < 11)
|| (length(levels(as.factor(crs$dataset[,1]))) < 11
&& length(levels(as.factor(crs$dataset[,1]))) > 1))
target <- 1
else
for (i in 2:(length(variables)-1))
{
if ((is.factor(crs$dataset[,i]) &&
length(levels(crs$dataset[,i])) > 1 &&
length(levels(crs$dataset[,i])) < 11)
|| (length(levels(as.factor(crs$dataset[,i]))) < 11
&& length(levels(as.factor(crs$dataset[,i]))) > 1))
{
target <- i
break
}
}
if (target != -1)
target <- variables[target]
else
target <- NULL
}
# Determine the list of input variables so far (i.e., not dealing
# with ignore and risk yet).
if (is.null(input)) input <- variables
input <- setdiff(input, target)
# Update the Model tab with the selected default target
the.target <- ifelse(length(target), sprintf(Rtxt("Target: %s"), target),
Rtxt("No Target"))
theWidget("explot_target_label")$setText(the.target)
theWidget("glm_target_label")$setText(the.target)
theWidget("rpart_target_label")$setText(the.target)
## theWidget("gbm_target_label")$setText(the.target)
theWidget("ada_target_label")$setText(the.target)
theWidget("rf_target_label")$setText(the.target)
theWidget("svm_target_label")$setText(the.target)
theWidget("nnet_target_label")$setText(the.target)
plots <- union(boxplot,
union(hisplot,
union(cumplot,
union(benplot,
union(barplot,
union(paiplot,
union(dotplot, mosplot)))))))
## Build the Variables treeview model with each variable's INPUT set
## to TRUE and all else FALSE. If the variable has only a single
## value then it defaults to IGNORE, and if it is a factor and has
## as many distinct values as there are rows, then also default to
## IGNORE.
for (i in seq_along(variables))
{
#used <- union(target, union(risk, union(ident, ignore)))
iter <- model$append()$iter
cl <- class(crs$dataset[[variables[i]]])
# 110312 There is a case where cl might be "character". This was
# noticed, for example, when loading a .RData file with a column
# which was character. Seems like simply converting this to factor
# is appropriate.
if ("character" %in% cl)
{
crs$dataset[[variables[i]]] <- as.factor(crs$dataset[[variables[i]]])
cl <- class(crs$dataset[[variables[i]]])
}
# 090320 Change "ordered" to Categoric below, so maybe don't need
# this change. 101004 Reinstate this change to cl since ordered
# factors in weather AUS were being dropped from the Descriptions
# option of Explore.
if (length(cl) == 2 && cl[1] == "ordered" && cl[2] == "factor")
cl <- "factor"
# First check for special variable names.
if (autoroles)
{
if (paste("IMP_", variables[i], sep="") %in% variables)
{
# This works with SAS/EM IMPutations and Rattle's imputations,
# which add the IMP_ at the beginning of the name of any
# imputed variables. These will be ignored as they will have
# been replaced by another variable.
ignore <- c(ignore, variables[i])
# Be sure to also remove any other role for the original
# variable?
}
else if (substr(variables[i], 1, 2) == "ID")
{
ident <- c(ident, variables[i])
}
# 080303 No longer needed as this is handled prior to the target
# heuristics. Remove this code eventually if all looks okay.
#
# else if (substr(variables[i], 1, 6) == "TARGET")
# {
# target <- variables[i]
# }
else if (substr(variables[i], 1, 6) == "IGNORE")
{
ignore <- c(ignore, variables[i])
}
else if (variables[i] == "risk" ||
substr(variables[i], 1, 4) == "RISK" ||
substr(variables[i], 1, 6) == "STATUS" ||
substr(variables[i], 1, 5) == "EVENT")
{
risk <- c(risk, variables[i])
}
else if ("factor" %in% cl)
{
lv <- length(levels(crs$dataset[[variables[i]]]))
if (nrow(crs$dataset) > crv$ident.min.rows && lv == nrow(crs$dataset))
{
cl <- "ident"
ident <- c(ident, variables[i])
}
else if (lv == 1)
{
cl <- "constant"
ignore <- c(ignore, variables[i])
}
}
else
{
lv <- length(levels(as.factor(crs$dataset[[variables[i]]])))
# 090704 Start supporting a Date format
if (length(intersect(c("integer", "POSIXt"), cl)) &&
nrow(crs$dataset) > crv$ident.min.rows &&
lv == nrow(crs$dataset))
{
cl <- "ident"
ident <- c(ident, variables[i])
}
else if (all(is.na(crs$dataset[[variables[i]]])))
{
cl <- "missing"
ignore <- c(ignore, variables[i])
}
else if (sd(crs$dataset[[variables[i]]], na.rm=TRUE) %in% c(NA, 0))
{
# sd is NA if all data items are NA.
cl <- "constant"
ignore <- c(ignore, variables[i])
}
}
}
# Fix any doubling up
input <- setdiff(input, target)
if (length(target) && length(ident) && target %in% ident)
target <- NULL
# 090110 We used to include the number of levels in the Data Type
# column, but since we now include Unique in the comment column,
# no longer include this redundant information.
## if ("factor" %in% cl)
## {
## lv <- length(levels(crs$dataset[[variables[i]]]))
## if (lv > 1)
## cl <- paste(cl, lv)
## }
input <- setdiff(setdiff(setdiff(input, ignore), ident), risk)
missing.count <- sum(is.na(crs$dataset[[variables[i]]]))
unique.count <- length(unique(na.omit(crs$dataset[[variables[i]]])))
unique.value <- unique(crs$dataset[[variables[i]]])
numeric.var <- is.numeric(crs$dataset[[variables[i]]])
possible.categoric <- (unique.count <= crv$max.categories ||
theWidget("data_target_categoric_radiobutton")$
getActive())
# Convert internal class to printable form.
prcl <- cl[1]
prcl <- gsub("constant", Rtxt("Constant"), prcl)
prcl <- gsub("ident", Rtxt("Ident"), prcl)
prcl <- gsub("factor", Rtxt("Categoric"), prcl)
prcl <- gsub("ordered", Rtxt("Categoric"), prcl)
prcl <- gsub("integer", Rtxt("Numeric"), prcl)
prcl <- gsub("numeric", Rtxt("Numeric"), prcl)
# Every variable goes into the VARIABLES treeview.
model$set(iter,
crv$COLUMN["number"], i,
crv$COLUMN["variable"], variables[i],
crv$COLUMN["type"], prcl,
crv$COLUMN["input"], variables[i] %in% input,
crv$COLUMN["target"], variables[i] %in% target,
crv$COLUMN["risk"], variables[i] %in% risk,
crv$COLUMN["ident"], variables[i] %in% ident,
crv$COLUMN["ignore"], variables[i] %in% ignore,
crv$COLUMN["weight"], variables[i] %in% weight,
crv$COLUMN["comment"], paste(sprintf(Rtxt("Unique: %d "),
unique.count),## ""),
ifelse(missing.count > 0,
sprintf(Rtxt("Missing: %d "),
missing.count), ""),
ifelse(prcl == "constant",
sprintf(Rtxt("Value: %s "),
unique.value), ""),
sep=""))
# Selected variables go into the other treeviews.
if (missing.count > -1)# Ignore IGNOREd variables. But crs$ignore
# is not yet set. Need to remove
# later. Also, this treeview has become
# used for all TRANSFORM operations, so
# must include all variables, not just ones
# with missing values.
{
# Check if it can be exported to PMML. 131020 Assume now that
# all can be exported (i.e., do not include a message). The test
# is removed from pmml and it was ugly anyhow.
etype <- ""
# Generate correct Rattle terminology for the variable
# class. 090731 We denote an integer as Numeric, to be
# consistent throughout Rattle.
dtype <- paste("A ", cl, " variable")
if (cl == "integer")
dtype <- sprintf(Rtxt("Numeric [%d to %d; unique=%d; mean=%d; median=%d%s%s]"),
min(crs$dataset[[variables[i]]], na.rm=TRUE),
max(crs$dataset[[variables[i]]], na.rm=TRUE),
unique.count,
as.integer(mean(crs$dataset[[variables[i]]],
na.rm=TRUE)),
as.integer(median(crs$dataset[[variables[i]]],
na.rm=TRUE)),
ifelse(sum(is.na(crs$dataset[[variables[i]]])),
sprintf(Rtxt("; miss=%d"),
sum(is.na(crs$dataset[[variables[i]]]))),
""),
ifelse(variables[i] %in% ignore, Rtxt("; ignored"), ""))
else if (cl == "numeric")
dtype <- sprintf(Rtxt("Numeric [%.2f to %.2f; unique=%d; mean=%.2f; median=%.2f%s%s]"),
min(crs$dataset[[variables[i]]], na.rm=TRUE),
max(crs$dataset[[variables[i]]], na.rm=TRUE),
unique.count,
mean(crs$dataset[[variables[i]]], na.rm=TRUE),
median(crs$dataset[[variables[i]]], na.rm=TRUE),
ifelse(missing.count > 0,
sprintf(Rtxt("; miss=%d"), missing.count), ""),
ifelse(variables[i] %in% ignore, Rtxt("; ignored"), ""))
else if (substr(cl, 1, 6) == "factor")
dtype <- sprintf(Rtxt("Categorical [%s levels%s%s]"),
length(levels(crs$dataset[[variables[i]]])),
ifelse(missing.count > 0,
sprintf(Rtxt("; miss=%d"), missing.count), ""),
ifelse(variables[i] %in% ignore, Rtxt("; ignored"), ""))
# Generate text for the missing values bit.
if (missing.count > 0)
mtext <- sprintf(Rtxt(" %d missing values"), missing.count)
else
mtext <- ""
imp.options <- RGtk2::gtkListStoreNew("gchararray")
imp.options.iter <- imp.options$append()$iter
imp.options$set(imp.options.iter, 0, "xx")
combo <- RGtk2::gtkComboBoxNewWithModel(imp.options, 0)
impiter <- impute$append()$iter
impute$set(impiter,
crv$IMPUTE["number"], i,
crv$IMPUTE["variable"], variables[i],
#crv$IMPUTE["comment"], sprintf("%s%s%s.", etype, dtype, mtext))
crv$IMPUTE["comment"], sprintf("%s%s.", dtype, etype))
}
if (strsplit(cl, " ")[[1]][1] == "factor")
{
## For the IMP_ and IGNORE_ variables we don't get a chance
## above to add in the number of levels, so do it here.
if (cl == "factor")
cl <- paste(cl, length(levels(crs$dataset[[variables[i]]])))
catiter <- categorical$append()$iter
categorical$set(catiter,
crv$CATEGORICAL["number"], i,
crv$CATEGORICAL["variable"], variables[i],
crv$CATEGORICAL["barplot"], variables[i] %in% barplot,
crv$CATEGORICAL["dotplot"], variables[i] %in% dotplot,
crv$CATEGORICAL["mosplot"], variables[i] %in% mosplot,
crv$CATEGORICAL["paiplot"], variables[i] %in% paiplot,
crv$CATEGORICAL["comment"],
sprintf("%s", strsplit(cl, " ")[[1]][2]))
}
if (cl == "integer" || cl == "numeric")
{
coniter <- continuous$append()$iter
continuous$set(coniter,
crv$CONTINUOUS["number"], i,
crv$CONTINUOUS["variable"], variables[i],
crv$CONTINUOUS["boxplot"], variables[i] %in% boxplot,
crv$CONTINUOUS["hisplot"], variables[i] %in% hisplot,
crv$CONTINUOUS["cumplot"], variables[i] %in% cumplot,
crv$CONTINUOUS["benplot"], variables[i] %in% benplot,
crv$CONTINUOUS["paiplot"], variables[i] %in% paiplot,
crv$CONTINUOUS["comment"],
sprintf("%.2f; %.2f/%.2f; %.2f",
min(crs$dataset[,i], na.rm=TRUE),
median(crs$dataset[,i], na.rm=TRUE),
mean(crs$dataset[,i], na.rm=TRUE),
max(crs$dataset[,i], na.rm=TRUE)))
}
}
crs$target <- target
crs$input <- input
crs$ident <- ident
crs$ignore <- ignore
crs$risk <- risk
# 091206 Set the default target type.
# 091206 If the target is TIME... and risk is STATUS... or
# EVENT... then enable the Survival radiobutton.
if (! length(target))
theWidget("data_target_auto_radiobutton")$setActive(TRUE)
else if (length(target) && length(risk) &&
substr(target, 1, 4) == "TIME" &&
(substr(risk, 1, 6) == "STATUS" ||
substr(variables[i], 1, 5) == "EVENT"))
theWidget("data_target_survival_radiobutton")$setActive(TRUE)
# else if (is.numeric(crs$dataset[[crs$target]]) &&
# # 080505 TODO we should put 10 as a global CONST
# length(levels(as.factor(crs$dataset[[crs$target]]))) > 10)
# theWidget("data_target_numeric_radiobutton")$setActive(TRUE)
# else if (is.factor(crs$dataset[[crs$target]]) ||
# (is.numeric(crs$dataset[[crs$target]]) &&
# length(levels(as.factor(crs$dataset[[crs$target]]))) <= 10))
# theWidget("data_target_categoric_radiobutton")$setActive(TRUE)
else
# Unset them all - not sure we should be here ever? 091223 Resume
# to this being the default.
theWidget("data_target_auto_radiobutton")$setActive(TRUE)
# Perform other setups associated with a new dataset
crv$rf.mtry.default <- floor(sqrt(ncol(crs$dataset)))
theWidget("rf_mtry_spinbutton")$setValue(crv$rf.mtry.default)
#crv$rf.sampsize.default <- nrow(crs$dataset)
#theWidget("rf_sampsize_spinbutton")$setValue(crv$rf.sampsize.default)
}
#----------------------------------------------------------------------
#
# Support
#
getIncludedVariables <- function(numonly=FALSE, listall=FALSE, risk=FALSE, target=TRUE)
{
# 20110102 TODO Stop using this function, or else have this function
# always return the string "c(crs$input, crs$target)" etc, as
# appropriate, so we use symbolic names rather than lists of
# variable numbers.
# DESCRIPTION
# Generate a numeric list of variables not ignored.
#
# ARGUMENTS
# numonly = Only include numeric variables
# listall = Don't simplify a full list to NULL
# risk = Include any risk variable in the returned list
#
# RETURNS
# A string of comma separated numbers
#
# DETAILS Generates a list of input variable indicies and the
# target variable index and, optionally, the risk variable index.
# If the list contains all variables, then return NULL (as the
# dataset does not then need to be indexed to subset the variables).
#
# TODO This last assumption of returning NULL causes problems since we
# don't know whether this means all variables or no variables!
fi <- getVariableIndicies(crs$input)
if (target)
ti <- getVariableIndicies(crs$target)
else
ti <- NULL
if (risk)
ri <- getVariableIndicies(crs$risk)
else
ri <- NULL
if (numonly)
fl <- seq(1,ncol(crs$dataset))[as.logical(sapply(crs$dataset, is.numeric))]
else
fl <- 1:ncol(crs$dataset)
if (! listall && setequal(union(fi,union(ti, ri)), fl))
return(NULL)
else
return(simplifyNumberList(intersect(fl, union(fi, union(ti, ri)))))
}
inputVariables <- function(numonly=FALSE)
{
# Return, as a comma separated list (as a string), the list of input
# variable indicies. If the list contains all variables except for
# the target variable, then return NULL (as the dataset does not then
# need to be indexed to subset the variables).
fi <- getVariableIndicies(crs$input)
ti <- getVariableIndicies(crs$target)
if (is.null(crs$input))
{
errorDialog(Rtxt("No input variables have been selected.",
"This doesn't make a lot of sense.",
"Please choose some input variables before proceeding."))
stop(Rtxt("no input variables specified"))
}
if (numonly)
fl <- seq(1,ncol(crs$dataset))[as.logical(sapply(crs$dataset, is.numeric))]
else
fl <- 1:ncol(crs$dataset)
if (setequal(fi, fl))
return(NULL)
else
return(simplifyNumberList(intersect(fl,fi)))
}
used.variables <- function(numonly=FALSE)
{
# Return, as a comma separated list (as a string) the list of all
# variable indicies for those that are not ignored. If the list
# contains all variables except for the ignored variables, then
# return NULL.
ii <- union(getVariableIndicies(crs$ignore), getVariableIndicies(crs$ident))
if (numonly)
fl <- seq(1,ncol(crs$dataset))[as.logical(sapply(crs$dataset, is.numeric))]
else
fl <- 1:ncol(crs$dataset)
if (setequal(fl, ii))
return(NULL)
else
return(simplifyNumberList(setdiff(fl, ii)))
}
getCategoricVariables <- function(type="string", include.target=F )
{
# Return a list of categoric variables from amongst those with an
# INPUT role. If type is "names" than return the list of variable
# names.
include <- NULL
cats <- seq(1,ncol(crs$dataset))[as.logical(sapply(crs$dataset, is.factor))]
if (length(cats) > 0)
{
indicies <- getVariableIndicies(crs$input)
if (include.target)
indicies<-c(indicies,getVariableIndicies(crs$target))
included <- intersect(cats, indicies)
if (type=="names")
include <- names(crs$dataset)[included]
else
include <- simplifyNumberList(included)
}
return(include)
}
getNumericVariables <- function(type="string")
{
# Returns a list of numeric variables. 080803 Add support to return
# a list of indicies rather than the default string that needs to be
# executed to identfy the indicies.
nums <- seq(1,ncol(crs$dataset))[as.logical(sapply(crs$dataset, is.numeric))]
if (length(nums) > 0)
{
indicies <- intersect(nums, getVariableIndicies(crs$input))
if (type == "string")
indicies <- simplifyNumberList(indicies)
}
else
indicies <- NULL
return(indicies)
}
|
/rattle/R/data.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 128,889
|
r
|
# Gnome R Data Miner: GNOME interface to R for Data Mining
#
# Time-stamp: <2015-11-15 09:02:15 gjw>
#
# DATA TAB
#
# Copyright (c) 2009 Togaware Pty Ltd
#
# This file is part of Rattle.
#
# Rattle is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Rattle is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rattle. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
#
# I've removed the Data Entry radio button because why, really, would
# anyone be interested in manually entering some data - use Gnumeric
# or some other spreadsheet to do that.
#
########################################################################
# TODO
#
# 100308 Consider using vcdExtras for displaying categoric data.
########################################################################
# UTILITIES
overwriteModel <- function()
{
# 080523 If a model exists then warn the user about losing the model
# on loading a new dataset. Perhaps this could be generalised to any
# kind of opration that replaces the current model.
if (not.null(listBuiltModels()))
return(questionDialog(Rtxt("You have chosen to load/reload the dataset.",
"This will clear the current project",
"(dataset and models).",
"If you choose not to continue",
"you can then save the current project before",
"loading the dataset.",
"\n\nDo you wish to continue and so overwrite",
"the current project?")))
else
return(TRUE)
}
dataTabShow <- function(...)
{
# A support function to display the indicated widgets and hide all
# others, within the data tab. When new widgets are added to the tab
# through editting the XML file with glade, be sure to add it to the
# list of known widgets here.
widgets <- c(...)
known <- c("data_filename_label",
"data_filechooserbutton",
"data_separator_label",
"data_separator_entry",
"data_decimal_label",
"data_decimal_entry",
"data_header_checkbutton",
"data_name_label",
"data_name_combobox",
"data_odbc_dsn_label",
"data_odbc_dsn_entry",
"data_odbc_table_label",
"data_odbc_table_combobox",
"data_odbc_limit_label",
"data_odbc_limit_spinbutton",
"data_odbc_believeNRows_checkbutton")
for (w in widgets) theWidget(w)$show()
for (w in setdiff(known, widgets)) theWidget(w)$hide()
}
showDataViewButtons <- function(action=TRUE)
{
# Rattle starts up with the View (081228 but not now the Edit)
# buttons of the Data tab not sensitive. Once data has been loaded
# we make these tabs sensitive. The ACTION option allows for the
# case where we might want to make them not sensitive. This option
# (action=FALSE) is not currently used but cold be in the future,
# probably when we click New project.
if (! is.logical(action)) warning(Rtxt("action must be a logical"))
theWidget("data_view_button")$setSensitive(action)
theWidget("data_edit_button")$setSensitive(action)
}
urlModTime <- function(filename)
{
# Return the modification time of the file. Strip out any "file://"
# prefix to the filename. We note that this will not work for
# http:// urls.
return(file.info(gsub("file:///", "/", filename))$mtime)
}
dataNeedsLoading <- function()
{
# 080520 Determine whether any of the data source aspects of the
# Data tab have changed. This is probably limited to checking things
# relevant to the currently selected data source radio button.
# 080712 If there is no dataname stored, then don't bother testing
# any other conditions. The dataset should be loaded. 090315 Never
# reload unless there is nothing loaded - that won't work when user
# changes Filename we want to load.
if (is.null(crs$dataname)) return(TRUE)
# 080712 Check what data source is active, and act
# appropriately. For those I have yet to work on, simply return TRUE
# so that at least the data always gets loaded. But this does then
# wipe out any changes the user makes to selections.
if (theWidget("data_csv_radiobutton")$getActive() ||
theWidget("data_arff_radiobutton")$getActive())
{
# 100409 Do the URLdecode here, then encode as UTF-8. Previously
# no UTF-8 and the URLdecode was done 5 separate times below. The
# mtime below did not URLdecode, but do so now, and make sure it
# still works. Seems okay.
filename <- theWidget("data_filechooserbutton")$getUri()
if (is.null(filename)) return(TRUE)
filename <- URLdecode(filename)
Encoding(filename) <- "UTF-8"
if (is.null(crs$dwd)) return(TRUE)
if (isWindows())
{
# MS/Windows is not case sensitive.
if (tolower(basename(filename))
!= tolower(crs$dataname) ||
tolower(dirname(filename)) != tolower(crs$dwd))
return(TRUE)
}
else
{
if (basename(filename) != crs$dataname ||
dirname(filename) != crs$dwd)
return(TRUE)
}
# 080606 TODO Test if file date has changed, and if so, return
# TRUE. Note that file.info does not handle URLs so have to
# specially handle this. Note that under MS/Windows this returns
# NA so we don't get a chance to notice updated files.
now.mtime <- urlModTime(filename)
if (not.null(crs$mtime) && not.null(now.mtime) && now.mtime > crs$mtime)
return(TRUE)
}
if (theWidget("data_rdataset_radiobutton")$getActive())
{
dataname <- theWidget("data_name_combobox")$getActiveText()
if (is.null(dataname) || crs$dataname != dataname)
return(TRUE)
}
if (theWidget("data_library_radiobutton")$getActive())
{
dataname <- theWidget("data_name_combobox")$getActiveText()
if (is.null(crs$datapkg) || is.null(dataname))
return(TRUE)
adsname <- gsub('([^ :]*).*$', '\\1', unlist(strsplit(dataname, ":"))[1])
dspkg <- unlist(strsplit(dataname, ":"))[2]
if (crs$dataname != adsname
|| crs$datapkg != dspkg)
return(TRUE)
}
if (theWidget("data_rdata_radiobutton")$getActive())
{
dataname <- theWidget("data_name_combobox")$getActiveText()
if (is.null(dataname) || crs$dataname != dataname) return(TRUE)
}
if (theWidget("data_odbc_radiobutton")$getActive())
{
table <- theWidget("data_odbc_table_combobox")$getActiveText()
if (is.null(table) || crs$dataname != table) return(TRUE)
}
if (theWidget("data_corpus_radiobutton")$getActive())
{
filename <- theWidget("data_corpus_location_filechooserbutton")$getUri()
if (is.null(filename)) return(TRUE)
return(TRUE) # Always reload for now.
}
if (theWidget("data_script_radiobutton")$getActive())
{
return(TRUE)
}
# Return FALSE if we did not detect any changes.
return(FALSE)
}
updateFilenameFilters <- function(button, fname)
{
# Add the filters appropriate to the filter name (fname) supplied.
if (is.character(button)) button <- theWidget(button)
filters <- button$listFilters()
if (fname == "CSV")
{
if (! (length(filters) && filters[[1]]$getName() == Rtxt("CSV Files")))
{
lapply(filters, function(x) button$removeFilter(x))
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("CSV Files"))
ff$addPattern("*.csv")
button$addFilter(ff)
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("TXT Files"))
ff$addPattern("*.txt")
button$addFilter(ff)
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("Excel Files"))
ff$addPattern("*.xls")
button$addFilter(ff)
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("Excel 2007 Files"))
ff$addPattern("*.xlsx")
button$addFilter(ff)
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("All Files"))
ff$addPattern("*")
button$addFilter(ff)
}
}
else if (fname == "ARFF")
{
if (! (length(filters) && filters[[1]]$getName() == Rtxt("ARFF Files")))
{
lapply(filters, function(x) button$removeFilter(x))
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("ARFF Files"))
ff$addPattern("*.arff")
button$addFilter(ff)
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("All Files"))
ff$addPattern("*")
button$addFilter(ff)
}
}
else if (fname == "Rdata")
{
if (! (length(filters) && filters[[1]]$getName() == Rtxt("Rdata Files")))
{
lapply(filters, function(x) button$removeFilter(x))
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("Rdata Files"))
ff$addPattern("*.R[Dd]ata")
button$addFilter(ff)
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("All Files"))
ff$addPattern("*")
button$addFilter(ff)
}
}
# Kick the GTK event loop otherwise you end up waiting until the
# mouse is moved, for example.
while (RGtk2::gtkEventsPending()) RGtk2::gtkMainIterationDo(blocking=FALSE)
}
newSampling <- function()
{
return(crv$appname != "RStat")
}
validateSampleEntry <- function()
{
sampling <- parseSampleEntry()
result <- TRUE
if (sampling[1] == 0)
{
errorDialog(Rtxt("A training set partition of 0 does not make sense.",
"\n\nPlease choose a non-zero, positive percentage, up to 100."))
result <- FALSE
}
else if (any(sampling < 0))
{
errorDialog(Rtxt("A percentage of less than 0 for the partition",
"does not make sense.",
"\n\nPlease choose percentages in the range 0-100."))
result <- FALSE
}
else if (sum(sampling) != 100)
{
errorDialog(sprintf(Rtxt("The sum of the partition proportions does not add",
"to 100 (percent): %d + %d + %d = %d.",
"\n\nPlease rectify."),
sampling[1], sampling[2], sampling[3], sum(sampling)))
result <- FALSE
}
return(result)
}
parseSampleEntry <- function()
{
ptext <- theWidget("data_sample_entry")$getText()
splitter <- function(x) as.integer(strsplit(x, "/")[[1]])
if (! nchar(ptext))
partition <- splitter(crv$default.sample)
else
partition <- splitter(ptext)
if (length(partition) == 1)
partition <- c(partition, 0, 100-partition)
else if (length(partition) == 2)
partition <- c(partition[1], 100-sum(partition), partition[2])
return(partition)
}
getTrainingPercent <- function()
{
return(parseSampleEntry()[1])
}
#-----------------------------------------------------------------------
# These are for handling protos (or envs for now). Moved into package
# container.
whichNumerics <- function(data)
{
names(data)[sapply(data, is.numeric)]
}
setupDataset <- function(env, seed=NULL)
{
# We assume the following dataset specific variables exist in env
# data This is the actual data frame containing the dataset
# target The single target variable for prediction
# [risk] The single risk variable
# [inputs] The other variables used as inputs to predictive model
# [ignore] This overrides inputs if it is given.
# Then we add the following variables to env
# vars Variables used for modelling
# numerics The numeric vars within inputs
# nobs The number of observations
# ninputs The number of input variables
# form Formula for building models
# train A 70% training dataset
if (! is.null(seed)) set.seed(seed)
evalq({
if (! exists("risk", inherits=FALSE))
risk <- NULL
if (exists("ignore", inherits=FALSE) && ! exists("inputs", inherits=FALSE))
inputs <- setdiff(names(data), c(target, risk, ignore))
if (! exists("inputs", inherits=FALSE))
inputs <- setdiff(names(data), c(target, risk))
vars <- c(inputs, target)
ninputs <- length(inputs)
nobs <- nrow(data)
numerics <- whichNumerics(data[inputs])
form <- as.formula(paste(target, "~ ."))
train <- sample(nobs, 0.7*nobs)
test <- setdiff(1:nobs, train)
na.obs <- attr(na.omit(data[vars]), "na.action")
train.na.omit <- setdiff(train, na.obs)
test.na.omit <- setdiff(test, na.obs)
time.stamp <- date()
}, env)
}
########################################################################
# CALLBACKS
on_data_csv_radiobutton_toggled <- function(button)
{
if (button$getActive())
{
dataTabShow("data_filename_label",
"data_filechooserbutton",
"data_separator_label",
"data_separator_entry",
"data_decimal_label",
"data_decimal_entry",
"data_header_checkbutton")
updateFilenameFilters("data_filechooserbutton", "CSV")
if (not.null(crs$data.tab.csv.filename))
theWidget("data_filechooserbutton")$setUri(crs$data.tab.csv.filename)
}
else
{
crs$data.tab.csv.filename <- theWidget("data_filechooserbutton")$getUri()
}
}
on_data_arff_radiobutton_toggled <- function(button)
{
if (button$getActive())
{
dataTabShow("data_filename_label",
"data_filechooserbutton")
updateFilenameFilters("data_filechooserbutton", "ARFF")
if (not.null(crs$data.tab.arff.filename))
theWidget("data_filechooserbutton")$setUri(crs$data.tab.arff.filename)
}
else
{
crs$data.tab.arff.filename <- theWidget("data_filechooserbutton")$getUri()
}
}
on_data_rdata_radiobutton_toggled <- function(button)
{
if (button$getActive())
{
dataTabShow("data_filename_label",
"data_filechooserbutton",
"data_name_label",
"data_name_combobox")
updateFilenameFilters("data_filechooserbutton", "Rdata")
cbox <- theWidget("data_name_combobox")
cbox$getModel()$clear()
if (not.null(crs$data.tab.rdata.filename))
theWidget("data_filechooserbutton")$setUri(crs$data.tab.rdata.filename)
if (not.null(crs$data.tab.rdata.active))
{
theWidget("data_name_combobox")$setActive(crs$data.tab.rdata.active)
}
}
else
{
crs$data.tab.rdata.filename <- theWidget("data_filechooserbutton")$getUri()
crs$data.tab.rdata.active <- theWidget("data_name_combobox")$getActive()
}
}
on_data_rdataset_radiobutton_toggled <- function(button)
{
if (button$getActive())
{
dataTabShow("data_name_label", "data_name_combobox")
updateRDatasets(current=crs$data.tab.rdataset.name)
}
else
{
crs$data.tab.rdataset.name <- theWidget("data_name_combobox")$getActiveText()
}
}
on_data_corpus_radiobutton_toggled <- function(button)
{
if (button$getActive())
{
crv$DATA.NOTEBOOK$setCurrentPage(crv$DATA.CORPUS.TAB)
}
else
{
crv$DATA.NOTEBOOK$setCurrentPage(crv$DATA.CSV.TAB)
}
}
# 080907 Trying to get an event that will auto update the combobox
# without having to move to another radio button and then back again.
on_data_name_combobox_button_press_event <- function(button)
{
print("Button Press")
updateRDatasets()
}
on_data_name_combobox_enter_notify_event <- function(button)
{
print("Enter Notify")
updateRDatasets()
}
on_data_name_combobox_focus <- function(button)
{
print("Focus")
updateRDatasets()
}
on_data_name_combobox_set_focus_child<- function(direction, data)
{
print("Focus Child")
#print(direction)
print(data)
#updateRDatasets()
}
on_data_name_combobox_focus_in_event<- function(direction, data)
{
print("Focus In")
#print(direction)
#updateRDatasets()
}
#
on_data_library_radiobutton_toggled <- function(button)
{
if (button$getActive())
{
dataTabShow("data_name_label", "data_name_combobox")
updateDataLibrary(crs$data.tab.library.name)
}
else
{
crs$data.tab.library.name <- theWidget("data_name_combobox")$getActiveText()
}
}
on_data_odbc_radiobutton_toggled <- function(button)
{
if (button$getActive())
dataTabShow("data_odbc_dsn_label",
"data_odbc_dsn_entry",
"data_odbc_table_label",
"data_odbc_table_combobox",
"data_odbc_limit_label",
"data_odbc_limit_spinbutton",
"data_odbc_believeNRows_checkbutton")
}
updateRDatasets <- function(current=NULL, cbox.name="data_name_combobox")
{
# Update a combo box with just the available data frames and matrices.
set.cursor("watch", Rtxt("Determining the available datasets...."))
# 130126 We might be able to use get.objects("data.frame") here?
dl <- unlist(sapply(ls(sys.frame(0)),
function(x)
{
cmd <- sprintf(paste("is.data.frame(%s) ||",
'inherits(%s,',
'"sqlite.data.frame")'), x, x)
var <- try(ifelse(eval(parse(text=cmd), sys.frame(0)),
x, NULL), silent=TRUE)
if (inherits(var, "try-error"))
var <- NULL
return(var)
}))
cbox <- theWidget(cbox.name)
cbox$getModel()$clear()
if (not.null(dl))
{
lapply(dl, cbox$appendText)
# Set the selection to that which was is supplied.
if (not.null(current) && current %in% dl)
cbox$setActive(which(sapply(dl, function(x) x==current))[1]-1)
}
set.cursor(message=Rtxt("Data Names updated."))
}
on_data_target_survival_radiobutton_toggled <- function(button)
{
# 091206 When the Survival radio button is toggled, change the names
# of the Target/Risk columns to match the paradigm.
target <- theWidget("select_treeview")$getColumn(crv$COLUMN["target"])
risk <- theWidget("select_treeview")$getColumn(crv$COLUMN["risk"])
if (button$getActive())
{
target$setTitle(Rtxt("Time"))
risk$setTitle(Rtxt("Status"))
}
else
{
target$setTitle(Rtxt("Target"))
risk$setTitle(Rtxt("Risk"))
}
}
########################################################################
# EXECUTE
executeDataTab <- function(csvname=NULL)
{
# Dispatch to the task indicated by the selected radio button within
# the Data tab. 090315 Previously I tested if there is was a change
# to the data source (with dataNeedsLoading) but this continually
# got complicated between different OS and different data sources,
# etc. So now we never reload a dataset, unless no dataset is
# loaded. To load a new dataset, click New project first. Unless the
# data type label is not sensitive (i.e., we have loaded a project),
# simply update the variable roles without reloading the data.
# if (not.null(csvname))
# {
# if (! executeDataCSV(csvname)) return(FALSE)
# }
if (theWidget("data_type_label")$isSensitive() && dataNeedsLoading())
{
if (theWidget("data_csv_radiobutton")$getActive())
{
if (! executeDataCSV(csvname)) return(FALSE)
}
else if (theWidget("data_arff_radiobutton")$getActive())
{
if (! executeDataARFF()) return(FALSE)
}
else if (theWidget("data_odbc_radiobutton")$getActive())
{
if (! executeDataODBC()) return(FALSE)
}
else if (theWidget("data_rdata_radiobutton")$getActive())
{
if (! executeDataRdata()) return()
}
else if (theWidget("data_rdataset_radiobutton")$getActive())
{
if (! executeDataRdataset()) return()
}
else if (theWidget("data_library_radiobutton")$getActive())
{
if (! executeDataLibrary()) return()
}
else if (theWidget("data_corpus_radiobutton")$getActive())
{
if (! executeDataCorpus()) return()
}
else if (theWidget("data_script_radiobutton")$getActive())
{
if (! executeDataScript()) return()
}
else
return()
# Update the select treeview. This is done on a Data execute only
# when a new dataset has been loaded. If the user has simply
# changed some of the roles or the sampling then we do not do a
# reset, just an update.
createVariablesModel(colnames(crs$dataset))
# Whether we have changed the dataset or not we need to generate the
# sample and then record the variable roles.
# Turn sampling on, set range bounds and generate the default 70%
# sample. Do the range bounds first since otherwise the value gets
# set back to 1. Also, need to set both the percentage and the count
# since if the old percentage is 70 and the new is 70, then no
# change in value is noticed, and thus the count is not
# automatically updated.
# 090315 Sampling should be on by default. I had a test here
# "!is.null(RATTLE.SCORE.IN)" which, after cleaning up the
# handling of global variables, is now FALSE, whereas previously
# it must have been TRUE. Simply set to TRUE here until we find
# why that was being done. Might need another crv tuning
# parameter.
theWidget("data_sample_checkbutton")$setActive(TRUE)
# 090513 Reset the default sample size percentage and ensure it
# holds (hence we need more than just setting the percentage spin
# button.
nrows <- nrow(crs$dataset)
per <- crv$default.train.percentage
srows <- round(nrows * per / 100)
theWidget("sample_count_spinbutton")$setRange(1,nrows)
theWidget("sample_count_spinbutton")$setValue(srows)
theWidget("sample_percentage_spinbutton")$setValue(per)
theWidget("data_sample_entry")$setText(crv$default.sample)
}
else
resetRattle(new.dataset=FALSE)
# 090416 Move the following from the above if branch to here. Reset
# the sampling options here, except for whether sampling is
# on/off. Thus, on loading a new dataset, sampling is set on
# above. But if we modify the dataset external to Rattle, we want to
# set new parameters here, yet leave the sampling checkbutton as it
# was. The extra settings here are often redundant, but needed for
# the "modified in R" case. 090513 Though now that I have this code
# both here and above, we might need to revist the logic!
#
# We set range bounds and generate the default 70% sample. Do the
# range bounds first since otherwise the value gets set back to
# 1. Also, need to set both the percentage and the count since if
# the old percentage is 70 and the new is 70, then no change in
# value is noticed, and thus the count is not automatically updated,
# even if the number of rows has been changed.
nrows <- nrow(crs$dataset)
# 090513 Remove the resetting of the sample size to 70 from here,
# but get the current value. Otherwise, the sample size is always
# reset to 70 on each Execute of the Data tab - not desired. Now
# need to only reset it to 70 on loading a new dataset.
if (newSampling())
per <- getTrainingPercent()
else
per <- theWidget("sample_percentage_spinbutton")$getValue()
srows <- round(nrows * per / 100)
theWidget("sample_count_spinbutton")$setRange(1,nrows)
theWidget("sample_count_spinbutton")$setValue(srows)
theWidget("sample_percentage_spinbutton")$setValue(per)
crv$DATA.DISPLAY.NOTEBOOK$setCurrentPage(crv$DATA.DISPLAY.TREEVIEW.TAB)
# else
# {
# resetRattle(new.dataset=FALSE)
#
# if (dataNeedsLoading())
# {
#
# # Just duplicate above for now to get this working.
# createVariablesModel(colnames(crs$dataset)) # BUT THIS REVERTS TO DEFAULTS
# nrows <- nrow(crs$dataset)
# per <- 70
# srows <- round(nrows * per / 100)
# theWidget("data_sample_checkbutton")$setActive(not.null(RATTLE.SCORE.IN))
# theWidget("sample_count_spinbutton")$setRange(1,nrows)
# theWidget("sample_count_spinbutton")$setValue(srows)
# theWidget("sample_percentage_spinbutton")$setValue(per)
# }
#
# }
# TODO 080520 Change the name to updateSample.
## 080603 NOT NEEDED AS DONE IN executeSelectTab
## executeSelectSample()
# Execute the SELECT tab. Changes have bene made and we need to
# ensure the cached role variables are updated, or else we might see
# unexpected warnings about changes having been made but not
# EXECTUEd. [071125]
if (theWidget("data_sample_checkbutton")$getActive() &&
! validateSampleEntry()) return(FALSE)
# TODO 080520 Change the name to updateRoles.
setGuiDefaultsSurvival() # 100505 Moved here from below
executeSelectTab()
resetTestTab()
resetExploreTab()
# 100505 Move to before executeSelectTab, ohterwise the labels get set
# back to stating no variables selected.
# setGuiDefaultsSurvival()
# Set the risk label appropriately.
theWidget("evaluate_risk_label")$setText(crs$risk)
# Enable the Data View and Edit buttons.
showDataViewButtons()
return()
}
#-----------------------------------------------------------------------
# EXECUTE DATA CSV
executeDataCSV <- function(filename=NULL)
{
# Either a filename is supplied in the function call or a filename
# is expected to be available in the data_filechooserbutton. This
# could be either a CSV or TXT file. If no filename is supplied,
# then give the user the option to load a sample dataset (for now,
# the weather dataset).
supplied <- filename
# Begin by collecting the relevant data from the interface. 080511
# The file chooser button has a getFilename to retrieve the
# filename. The getUri also retrieves the file name, but as a
# URL. So we use this, since R can handle the
# "file:///home/kayon/audit.csv" just fine. Thus I have now allowed
# the filechooser button to accept non-local files (i.e.,
# URLs). Unfortunately I can't yet get the basename of the URL to be
# displayed in the button text. 080512 The URLdecode will replace
# the %3F with "?" and %3D with "=", etc, as is required for using
# this with the read.csv function.
if (is.null(filename))
filename <- theWidget("data_filechooserbutton")$getUri()
# If no filename has been supplied give the user the option to use
# the Rattle supplied sample dataset.
use.sample.dataset <- FALSE
if (not.null(supplied))
{
# 090314 Trying to get the scenario of a supplied filename
# working, so that it is displayed in the Filename box and
# dataNeedsLoading does not think a new file needs loading on the
# next Execute.
if (substr(filename, 1, 5) != "file:")
{
if (substr(filename, 1, 1) == "/")
filename <- paste("file://", filename, sep="")
else
filename <- paste("file:///", filename, sep="")
}
# 090314 Added to ensure we get the filename listed properly. This
# seems to be relevant only if a filename was supplied (it is also
# done below for the case when the rattle supplied dataset is
# laoded. Perhaps this should be done up there?
theWidget("data_filechooserbutton")$setUri(filename)
# 090314 Do this because it was done below.
while (RGtk2::gtkEventsPending()) RGtk2::gtkMainIterationDo(blocking=FALSE)
}
else if (is.null(filename))
{
if (! questionDialog(sprintf(Rtxt("No CSV filename has been provided.",
"\n\nWe require a dataset to be loaded.",
"\n\nWould you like to use the example",
"%s dataset?"),
Rtxt(crv$sample.dataset))))
# If no filename is given and the user decides not to go with
# the sample dataset then return without doing anything.
return(FALSE)
else
{
# 080515 Use the Rattle provided sample dataset.
use.sample.dataset <- TRUE
filename <- system.file("csv", paste(crv$sample.dataset, ".csv", sep=""),
package="rattle")
theWidget("data_filechooserbutton")$setFilename(filename)
# 130825 This does not get reflected in the GUI? Can't work out
# how to make it so. For now it stays as None.
# Make sure we end up with a URI since a URI is otherwise used
# when retrieving the information from the filechooserbutton
# widget. If we don't do this then the crs$dwd does not include
# the "file://" bit, and thus dataNeedsLoading returns TRUE the
# next time, which is not right! 090214 This does not work for
# MS/Windows. The filename is something like "C:/..." and this
# ends up adding "file://" but it should be "file:///". So check
# for this.
if (substr(filename, 1, 1) == "/")
filename <- paste("file://", filename, sep="")
else
filename <- paste("file:///", filename, sep="")
# 080713 We still need the events flush with tootiphack set
# since otherwise we have to lose focus before the screen gets
# updated.
while (RGtk2::gtkEventsPending()) RGtk2::gtkMainIterationDo(blocking=FALSE)
#gtkmainquit_handler(NULL, NULL)
#gtkmain_handler(NULL, NULL)
}
}
else
{
filename <- URLdecode(filename)
Encoding(filename) <- "UTF-8" # 100408 Japanese otherwise dirname fails. Try for all.
}
crs$dwd <- dirname(filename)
crs$mtime <- urlModTime(filename)
# If there is a model warn about losing it.
if (! overwriteModel()) return(FALSE)
# Fix filename for MS - otherwise eval/parse strip the \\.
if (isWindows()) filename <- gsub("\\\\", "/", filename)
# Get the separator and decimal to use.
sep = theWidget("data_separator_entry")$getText()
if (sep != ",")
sep <- sprintf(', sep="%s"', sep)
else
sep <- ""
dec = theWidget("data_decimal_entry")$getText()
if (dec != ".")
dec <- sprintf(', dec="%s"', dec)
else
dec <- ""
# Check whether we expect a header or not.
if (theWidget("data_header_checkbutton")$getActive())
hdr <- ""
else
hdr <- ", header=FALSE"
nastring <- ', na.strings=c(".", "NA", "", "?")'
stripwhite <- ', strip.white=TRUE'
# Generate commands to read the data. 091130 Add encoding to use the
# configured encoding.
if (use.sample.dataset)
read.cmd <- sprintf(paste('crs$dataset <-',
'read.csv(system.file("csv",',
'"%s.csv", package="rattle"),',
'encoding="%s")'),
crv$sample.dataset, crv$csv.encoding)
else if (tolower(get.extension(filename)) %in% c("xls", "xlsx"))
{
if (! packageIsAvailable("readxl", Rtxt("read .xls or .xlsx files"))) return(FALSE)
# 100114 A quick hack to allow reading MS/Excel files. 150517
# Notice the use of library() rather than require(). We really
# need to attach the package not try to attach the package.
read.cmd <- sprintf(paste("library(readxl, quietly=TRUE)",
'crs$dataset <- read_excel("%s")',
# Make sure we return the actual dataset
# as the result as that is assumed.
"crs$dataset",
sep="\n"),
sub("file:///", ifelse(isWindows(), "", "/"), filename))
# 130612 Still needed for isWindows? sub("file:///", "", filename))
}
else
# 100428 With read.csv("...", encoding="UTF-8") column names that
# are purely UTF-8 see the trailing comma as part of the column
# name, and so get merged with the next column. Need to ensure the
# encodng option is included in the file argument instead. I think
# that readTableHeader might be the culprit., but not tested. TODO
# This will need fixing everywhere that read.csv is used.
# 10429 Only use file(..., encoding) for Japanese. Otherwise
# put the encoding as argument to read.csv which always works on
# Linux?
if (isJapanese())
read.cmd <- sprintf('crs$dataset <- read.csv(file("%s", encoding="%s")%s%s%s%s%s)',
filename, crv$csv.encoding, hdr, sep, dec, nastring,
stripwhite)
else
read.cmd <- sprintf('crs$dataset <- read.csv("%s"%s%s%s%s%s, encoding="%s")',
filename, hdr, sep, dec, nastring, stripwhite,
crv$csv.encoding)
# Start logging and executing the R code.
startLog()
appendLog(Rtxt("Load the data."), read.cmd)
resetRattle()
result <- try(eval(parse(text=read.cmd)), silent=TRUE)
if (inherits(result, "try-error"))
{
if (any(grep("cannot open the connection", result)))
{
errorDialog(sprintf(Rtxt("The file you specified could not be found:",
"\n\n\t%s",
"\n\nPlease check the filename and try again."),
filename))
return(FALSE)
}
else if (any(grep("no lines available in input", result))
| any(grep("first five rows are empty: giving up", result)))
{
errorDialog(sprintf(Rtxt("The file you specified is empty:",
"\n\n\t%s",
"\n\nPlease check the file and try again."),
filename))
return(FALSE)
}
else if (any(grep("duplicate", result)))
{
errorDialog(sprintf(Rtxt("The dataset loaded from the file:",
"\n\n\t%s",
"\n\nhas duplicate columns.",
"This is sometimes due to using an incorrect",
"separator (%s) or decimal point (%s) in the file.",
"Or it might be because the file has no header line.",
"\n\nThe actual error message was: %s",
"\nPlease check the file format and the defaults",
"set in the Data tab and try again."),
filename, theWidget("data_separator_entry")$getText(),
theWidget("data_decimal_entry")$getText(), result))
return(FALSE)
}
else
return(errorReport(read.cmd, result))
}
if (ncol(result) < 2)
{
errorDialog(sprintf(Rtxt("The data from the file:",
"\n\n\t%s",
"\n\ncontains only a single column.",
"This is not usually what is expected and",
"is often due to using something other than the specified",
"separator (%s) and decimal point (%s) in the file.",
"\n\nPlease check the file format and the defaults",
"set in the Data tab and try again."),
filename, theWidget("data_separator_entry")$getText(),
theWidget("data_decimal_entry")$getText()))
return(FALSE)
}
crs$dataname <- basename(filename)
# 110306 Encoding(crs$dataname) <- "UTF-8"
# 110306 For Japanese hopefully this works better:
if (isJapanese()) crs$dataname <- iconv(crs$dataname, from="UTF-8")
setMainTitle(crs$dataname)
# Update the Data Tab Treeview and Samples.
## resetVariableRoles(colnames(crs$dataset), nrow(crs$dataset))
# Enable the Data View and Edit buttons.
## showDataViewButtons()
setStatusBar(sprintf(Rtxt("The CSV file has been loaded: %s.",
"Please wait whilst we extract its structure..."),
crs$dataname))
return(TRUE)
}
########################################################################
# OLD DATA TAB STUFF MIGRATING TO THE ABOVE
#
on_data_view_button_clicked <- function(button)
{
viewData()
}
on_data_edit_button_clicked <- function(button)
{
editData()
}
on_data_filechooserbutton_file_set <- function(button)
{
# When the filename has been changed on the Data tab check if
# further action is required. If RData File is active, then load the
# corresponding .Rdata file and extract the dataset names to be
# chosen from.
if (theWidget("data_rdata_radiobutton")$getActive())
updateRDataNames()
}
updateRDataNames <- function(filename=NULL)
{
# Collect relevant data
filename <- theWidget("data_filechooserbutton")$getFilename()
crs$dwd <- dirname(filename)
crs$mtime <- urlModTime(filename)
# Fix filename for MS - otherwise eval/parse strip the \\.
if (isWindows()) filename <- gsub("\\\\", "/", filename)
# Generate commands to read the data and then display the structure.
load.cmd <- sprintf('crs$rdata.datasets <- load("%s")', filename)
# Start logging and executing the R code.
startLog()
appendLog(Rtxt("Load an RData file containing R objects."), load.cmd)
set.cursor("watch", Rtxt("Loading the RData file..."))
eval(parse(text=load.cmd), .GlobalEnv) # Env so datasets are globally available.
# Add new data frames to the combo box.
combobox <- theWidget("data_name_combobox")
if (not.null(crs$rdata.datasets))
{
combobox$getModel()$clear()
lapply(crs$rdata.datasets, combobox$appendText)
}
setStatusBar(Rtxt("The list of available datasets has been updated",
"from the supplied data file.",
"Choose one dataset from the Data Name box."))
}
#-----------------------------------------------------------------------
# DATA LIBRAY
#
# 080522 Migrated this from the old interface to the new
# interface. Maybe this is now called whenever the Library radio
# button is activated.
#
# OLD: Update the library combo box with all of the available
# datasets. Can take a little time the first time to generate the
# list. I've associated this with the focus callback, but then it is
# called also when it loses focus!!!
updateDataLibrary <- function(current=NULL)
{
# OLD: TODO How to tell that this is a "gain focus" action and not a
# "lose focus" action, since we only want to build the list on
# gaining focus.
data.name.combobox <- theWidget("data_name_combobox")
# Record the current selection so that we can keep it as the default.
if (is.null(current)) current <- data.name.combobox$getActiveText()
## if (not.null(current)) return()
# This could take a little while, so use to watch cursor to indicate
# we are busy.
set.cursor("watch", Rtxt("Determining the available datasets from all packages...."))
# 090418 Suppress warnings about datasets having moved to 'datasets'
opt <- options(warn=-1)
da <- data(package = .packages(all.available = TRUE))
options(opt)
dl <- sort(paste(da$results[,'Item'], ":", da$results[,'Package'],
":", da$results[,'Title'], sep=""))
# Add the entries to the combo box.
data.name.combobox$getModel()$clear()
if (not.null(dl))
{
lapply(dl, data.name.combobox$appendText)
# Set the selection to that which was already selected, if possible.
if (not.null(current) && current %in% dl)
data.name.combobox$setActive(which(sapply(dl, function(x) x==current))[1]-1)
}
set.cursor(message="")
}
#-----------------------------------------------------------------------
open_odbc_set_combo <- function(button)
{
openODBCSetTables()
}
openODBCSetTables <- function()
{
# This is for use in the callback for when the ODBC DSN name has
# changed (associated with the "activate" signal). Load the known
# tables from the specified ODBC database. The ODBC connection will
# be opened and queried for the list of tables.
# Obtain the name of the DSN.
DSNname <- theWidget("data_odbc_dsn_entry")$getText()
# Check if we should believe the number of rows.
bnumrows <- sprintf(", believeNRows=%s",
ifelse(theWidget("data_odbc_believeNRows_checkbutton")$getActive(),
"TRUE", "FALSE"))
# Generate commands to connect to the database and retrieve the tables.
lib.cmd <- sprintf("library(RODBC)")
connect.cmd <- sprintf('crs$odbc <- odbcConnect("%s"%s)', DSNname, bnumrows)
tables.cmd <- sprintf('crs$odbc.tables <- sqlTables(crs$odbc)$TABLE_NAME')
# Ensure the RODBC library is available or else we can not support ODBC.
if (! packageIsAvailable("RODBC", Rtxt("connect to an ODBC database"))) return(FALSE)
startLog(Rtxt("Open an ODBC connection."))
appendLog(Rtxt("Require the RODBC package."), lib.cmd)
# 140906 Move to using namespaces within the code, though still
# expose the interactive commands.
#set.cursor("watch")
#eval(parse(text=lib.cmd))
#set.cursor()
# Close all currently open channels. This assumes that the user is
# not openning channels themselves. It could be a bad choice, but
# assume we are addressing the usual Rattle user.
RODBC::odbcCloseAll()
appendLog(Rtxt("Open the connection to the ODBC service."), connect.cmd)
result <- try(eval(parse(text=connect.cmd)))
if (inherits(result, "try-error"))
{
errorDialog(Rtxt("The attempt to open the ODBC connection failed.",
"Please check that the DSN is correct.",
"See the R Console for further details."))
return(FALSE)
}
appendLog(Rtxt("Load the names of available tables."), tables.cmd)
set.cursor("watch")
result <- try(eval(parse(text=tables.cmd)))
set.cursor()
if (inherits(result, "try-error"))
{
errorDialog(Rtxt("The attempt to query the ODBC connection failed.",
"Please check that the DSN is correct.",
"See the R Console for further details."))
return(FALSE)
}
# Add list of tables to the combo box.
combobox <- theWidget("data_odbc_table_combobox")
if (not.null(crs$odbc.tables))
{
combobox$getModel()$clear()
lapply(crs$odbc.tables, combobox$appendText)
}
setStatusBar(Rtxt("ODBC connection to database established. Now select a table."))
return(TRUE)
}
#----------------------------------------------------------------------
#
# Execution
#
resetVariableRoles <- function(variables, nrows, input=NULL, target=NULL,
risk=NULL, ident=NULL, ignore=NULL, weight=NULL,
zero=NULL, mean=NULL,
boxplot=NULL,
hisplot=NULL, cumplot=NULL, benplot=NULL,
barplot=NULL, dotplot=NULL, mosplot=NULL, paiplot=NULL,
resample=TRUE, autoroles=TRUE)
{
# Update the SELECT treeview with the dataset variables.
createVariablesModel(variables, input, target, risk, ident, ignore,
weight, zero, mean, boxplot, hisplot, cumplot,
benplot, barplot, dotplot, mosplot, paiplot,
autoroles=autoroles)
if (resample)
{
# Turn sampling on, set range bounds and generate the default 70%
# sample. Do the range bounds first since otherwise the value gets
# set back to 1. Also, need to set both the percentage and the
# count since if the old percentage is 70 and the new is 70, then
# no change in value is noticed, and thus the count is not
# automatically updated.
per <- 70
srows <- round(nrows * per / 100)
theWidget("data_sample_checkbutton")$setActive(TRUE)
theWidget("sample_count_spinbutton")$setRange(1,nrows)
theWidget("sample_count_spinbutton")$setValue(srows)
theWidget("sample_percentage_spinbutton")$setValue(per)
theWidget("data_sample_entry")$setText(crv$default.sample)
executeSelectSample()
}
# Execute the SELECT tab. Changes have bene made and we need to
# ensure the cached role variables are updated, or else we might see
# unexpected warnings about changes having been made but not
# EXECTUEd. [071125]
executeSelectTab(resample)
# Set the risk label appropriately.
theWidget("evaluate_risk_label")$setText(crs$risk)
}
resetDatasetViews <- function(input, target, risk, ident, ignore, weight=NULL)
{
# Reset the treeviews.
theWidget("select_treeview")$getModel()$clear()
theWidget("impute_treeview")$getModel()$clear()
theWidget("categorical_treeview")$getModel()$clear()
theWidget("continuous_treeview")$getModel()$clear()
# Recreate the treeviews, setting the roles as provided.
resetVariableRoles(colnames(crs$dataset), nrow(crs$dataset),
input=input, target=target, risk=risk,
ident=ident, ignore=ignore, weight=weight,
resample=FALSE, autoroles=FALSE)
}
executeDataScript <- function()
{
setStatusBar(Rtxt("The script option is not yet implemented."))
return(FALSE)
}
executeDataARFF <- function()
{
if (!exists("getRversion", baseenv()) || getRversion() <= "2.4.0")
{
infoDialog(Rtxt("Support for ARFF is only available in R 2.5.0 and beyond."))
return(FALSE)
}
# Collect relevant data
filename <- theWidget("data_filechooserbutton")$getUri()
# If no filename is given then return without doing anything.
if (is.null(filename))
{
errorDialog(Rtxt("No ARFF Filename has been chosen yet.",
"You must choose one before execution."))
return(FALSE)
}
filename <- URLdecode(filename)
crs$dwd <- dirname(filename)
crs$mtime <- urlModTime(filename)
# We need the foreign package to read ARFF data.
if (! packageIsAvailable("foreign", Rtxt("read an ARFF dataset"))) return(FALSE)
lib.cmd <- "library(foreign, quietly=TRUE)"
# If there is a model warn about losing it.
if (! overwriteModel()) return(FALSE)
# Fix filename for MS - otherwise eval/parse strip the \\.
if (isWindows()) filename <- gsub("\\\\", "/", filename)
# Generate commands to read the data and then display the structure.
read.cmd <- sprintf('crs$dataset <- read.arff("%s")', filename)
str.cmd <- "str(crs$dataset)"
# Start logging and executing the R code.
startLog()
##theWidget(TV)$setWrapMode("none") # On for welcome msg
##resetTextview(TV)
appendLog(packageProvides("foreign", "read.arff"), lib.cmd)
eval(parse(text=lib.cmd))
appendLog(Rtxt("Load an ARFF file."), read.cmd)
resetRattle()
eval(parse(text=read.cmd))
crs$dataname <- basename(filename)
setMainTitle(crs$dataname)
# appendLog(Rtxt("Display a simple summary (structure) of the dataset."), str.cmd)
##appendTextview(TV, sprintf("Structure of %s.\n\n", filename),
## collectOutput(str.cmd))
## Update the select treeview and samples.
## resetVariableRoles(colnames(crs$dataset), nrow(crs$dataset))
# Enable the Data View button.
## showDataViewButtons()
setStatusBar(sprintf(Rtxt("The ARFF data has been loaded: %s."), crs$dataname))
return(TRUE)
}
executeDataODBC <- function()
{
# Retrieve data from a data source name (DSN) as provided through
# the data_odbc_dsn_entry. Note that there is no standard LIMIT
# option in SQL, but it is LIMIT in Teradata, so perhaps we go with
# that for now?
dsn.name <- theWidget("data_odbc_dsn_entry")$getText()
table <- theWidget("data_odbc_table_combobox")$getActiveText()
row.limit <- theWidget("data_odbc_limit_spinbutton")$getValue()
believe.nrows <- theWidget("data_odbc_believeNRows_checkbutton")$getActive()
# warn.many <- theWidget("data_odbc_warnmany_checkbutton")$getActive()
sql.query <- "" # theWidget("odbc_sql_entry")$getText()
# If the ODBC channel has not been openned, then tell the user how
# to do so.
if (class(crs$odbc) != "RODBC")
{
errorDialog(Rtxt("A connection to an ODBC data source name (DSN) has not been",
"established. Please enter the DSN and press the Enter key.",
"This will also populate the list of tables to choose from.",
"After establishing the connection you can choose a table",
"or else enter a specific SQL query to retrieve a dataset."))
return(FALSE)
}
# Error if no table from the database has been chosen.
if (sql.query == "" && is.null(table))
{
errorDialog(Rtxt("No table nor SQL query has been specified.",
"Please identify the name of the table you wish to load.",
"All tables in the connected database are listed",
"once a connection is made.",
"\n\nAlternatively, enter a query to retrieve a dataset."))
return(FALSE)
}
# If there is a model warn about losing it.
if (! overwriteModel()) return(FALSE)
if (sql.query != "")
sql <- sql.query
else
{
sql <- sprintf("SELECT * FROM %s", table)
if (row.limit > 0) sql <- paste(sql, "LIMIT", row.limit)
}
#assign.cmd <- "crs$dataset <- sqlFetch(crs$odbc, table)"
assign.cmd <- paste("crs$dataset <- sqlQuery(crs$odbc, ", '"', sql, '"',
ifelse(believe.nrows, "", ", believeNRows=FALSE"),
")", sep="")
str.cmd <- "str(crs$dataset)"
if (row.limit == 0)
{
# Double check with the user if we are about to extract a large
# number of rows.
numRows <- RODBC::sqlQuery(crs$odbc, sprintf("SELECT count(*) FROM %s", table))
if (crv$odbc.large != 0 && numRows > crv$odbc.large)
if (! questionDialog(sprintf(Rtxt("You are about to extract %s",
"rows from the table %s",
"of the %s ODBC connection.",
"\n\nDo you wish to continue?"),
numRows, table, dsn.name)))
return()
}
# Start logging and executing the R code.
startLog()
appendLog(Rtxt("Load dataset from ODBC database table."), assign.cmd)
resetRattle()
eval(parse(text=assign.cmd))
crs$dataname <- table
setMainTitle(crs$dataname)
appendLog(Rtxt("Display a simple summary (structure) of the dataset."), str.cmd)
setStatusBar(sprintf(Rtxt("The ODBC data has been loaded: %s."), crs$dataname))
return(TRUE)
}
executeDataRdata <- function()
{
# Collect relevant data.
filename <- theWidget("data_filechooserbutton")$getFilename()
dataset <- theWidget("data_name_combobox")$getActiveText()
# Error exit if no filename is given.
if (is.null(filename))
{
errorDialog(Rtxt("No Rdata filename has been chosen yet.",
"You must choose one before execution."))
return(FALSE)
}
crs$dwd <- dirname(filename)
crs$mtime <- urlModTime(filename)
# Error if no dataset from the Rdata file has been chosen.
if (is.null(dataset))
{
errorDialog(Rtxt("No R dataset name has been specified.",
"Please identify the name of the R dataset.",
"Any data frames that were found in the loaded Rdata",
"file are available to choose from in the Data Name",
"combo box."))
return(FALSE)
}
# If there is a model warn about losing it.
if (! overwriteModel()) return(FALSE)
# Generate commands.
assign.cmd <- sprintf('crs$dataset <- %s', dataset)
str.cmd <- "str(crs$dataset)"
# Start logging and executing the R code.
startLog()
appendLog(Rtxt("Load an RData file."), assign.cmd)
resetRattle()
eval(parse(text=assign.cmd))
crs$dataname <- dataset
setMainTitle(crs$dataname)
setStatusBar(sprintf(Rtxt("The data has been loaded: %s.",
"Please wait whilst we extract its structure..."),
crs$dataname))
return(TRUE)
}
executeDataRdataset <- function()
{
# Collect relevant data
.dataset <- theWidget("data_name_combobox")$getActiveText()
# 080907 Can we do this here each time? I haven't work out a way to
# update the combobox when it is clicked - this is what would be
# best! But at least having it in here means we can update it when
# it is executed.
updateRDatasets(current=.dataset)
if (is.null(.dataset))
{
errorDialog(Rtxt("No R dataset name has been specified.",
"Please identify the name of the R dataset.",
"Any data frames that exist in the R Console",
"are available from the Data Name combo box."))
return(FALSE)
}
# If there is a model then warn about losing it.
if (! overwriteModel()) return(FALSE)
# Generate commands.
assign.cmd <- sprintf('crs$dataset <- %s', .dataset)
str.cmd <- "str(crs$dataset)"
# Start logging and executing the R code.
startLog()
#theWidget(TV)$setWrapMode("none") # On for welcome msg
#resetTextview(TV)
appendLog(Rtxt("Load an R data frame."), assign.cmd)
resetRattle()
eval(parse(text=assign.cmd))
crs$dataname <- .dataset
setMainTitle(crs$dataname)
# 080328 Fix up any non-supported characters in the variable names,
# otherwise they cause problems, e.g. "a-b" when used as ds$a-b is
# interpreted as (ds$a - b)!
names(crs$dataset) <- make.names(names(crs$dataset))
appendLog(Rtxt("Display a simple summary (structure) of the dataset."), str.cmd)
setStatusBar(Rtxt("The R dataset has been loaded.",
"Please wait whilst we extract its structure..."))
return(TRUE)
}
executeDataLibrary <- function()
{
# 080521 Load a dataset from a particular R package.
# Collect relevant data.
dataset <- theWidget("data_name_combobox")$getActiveText()
if (is.null(dataset))
{
errorDialog(Rtxt("No dataset from the R libraries has been specified.",
"\n\nPlease identify the name of the dataset",
"you wish to load using the Data Name chooser."))
return(FALSE)
}
# Actual dataset name as known when loaded.
adsname <- gsub('([^ :]*).*$', '\\1', unlist(strsplit(dataset, ":"))[1])
# Some datasets are loaded through loading another name (which
# appears in parentheses. Extract the actual name of the dataset
# that has to be named to be loaded.
dsname <- gsub('.* \\((.*)\\)$', '\\1', unlist(strsplit(dataset, ":"))[1])
# Extract the name of the package from which the dataset is loaded.
dspkg <- unlist(strsplit(dataset, ":"))[2]
# If there is a model then warn about losing it.
if (! overwriteModel()) return()
# Generate commands. 090321 Add a command to fix the variable
# names. Some datasets, like AdultUCI in arules, have names like
# education-num, which is some cases looks like a subtraction in
# R. Without changing it here I would need to fix other code up to
# quote the use of the variable name, and it might be that rpart has
# an issue with it also (but not confirmed).
assign.cmd <- sprintf(paste('data(list = "%s", package = "%s")\n',
'crs$dataset <- %s\n',
'names(crs$dataset) <- ',
'gsub("-", ".", names(crs$dataset))',
sep=""),
dsname, dspkg, adsname)
# Start logging and executing the R code.
startLog()
appendLog(Rtxt("Load an R dataset."), assign.cmd)
resetRattle()
eval(parse(text=assign.cmd))
if (class(crs$dataset) != "data.frame")
{
errorDialog(sprintf(Rtxt("The selected dataset, '%s', from the '%s' package",
"is not of class data frame (the data type).",
"Its data class is '%s.'",
"This is not currently supported by %s",
"and so it can not be loaded. Perhaps choose a different",
"dataset from the library."),
adsname, dspkg, class(crs$dataset), crv$appname))
return(FALSE)
}
crs$dataname <- adsname
crs$datapkg <- dspkg
setMainTitle(crs$dataname)
setStatusBar(Rtxt("The R package data is now available."))
return(TRUE)
}
viewData <- function()
{
startLog(Rtxt("View the dataset."))
if (packageIsAvailable("RGtk2Extras", Rtxt("view data in a spreadsheet")))
{
# 151115 We currently get the issue:
#
# Error in MakeDFEditWindow(.local, .local$theFrame, size.request, col.width) (from <text>#1) :
# could not find function "gtkTreePathNewFromString"
#
# This is a NAMESPACE issue and a workaround is to
# require(RGkt2Extras). Eventually need to work out the correct
# solution.
lib.cmd <- sprintf("library(RGtk2Extras)")
appendLog(packageProvides("RGtk2Extras", "dfedit"), lib.cmd)
eval(parse(text=lib.cmd))
view.cmd <- paste('RGtk2Extras::dfedit(crs$dataset,\n',
' ',
'dataset.name=Rtxt("Rattle Dataset"),\n',
' ',
'size=c(800, 400))')
appendLog(Rtxt("Please note that any edits will be ignored."), view.cmd)
eval(parse(text=view.cmd))
}
else
{
result <- try(etc <- file.path(path.package(package="rattle")[1], "etc"),
silent=TRUE)
if (inherits(result, "try-error"))
crs$viewdataGUI <- gladeXMLNew("rattle.glade", root="viewdata_window")
else
crs$viewdataGUI <- gladeXMLNew(file.path(etc,"rattle.glade"),
root="viewdata_window")
gladeXMLSignalAutoconnect(crs$viewdataGUI)
tv <- crs$viewdataGUI$getWidget("viewdata_textview")
tv$modifyFont(RGtk2::pangoFontDescriptionFromString(crv$textview.font))
op <- options(width=10000)
tv$getBuffer()$setText(collectOutput("print(crs$dataset)"))
options(op)
crs$viewdataGUI$getWidget("viewdata_window")$
setTitle(paste(crv$appname, ": ", Rtxt("Data Viewer"), sep=""))
}
}
editData <- function()
{
# Check if there is a model first and then warn about losing it.
if (! overwriteModel()) return()
# Start logging.
startLog(Rtxt("Edit the dataset."))
# Generate command to execute.
assign.cmd <- if (is.null(crs$dataset))
'crs$dataset <- edit(data.frame())'
else if (packageIsAvailable("RGtk2Extras"))
paste('crs$dataset <- RGtk2Extras::dfedit(crs$dataset,\n',
' ',
'dataset.name=Rtxt("Rattle Dataset"),\n',
' ',
'size=c(800, 400))')
else
'crs$dataset <- edit(crs$dataset)'
# Update the log withe the command that is run.
appendLog(Rtxt("Note that edits overwrite the current dataset."), assign.cmd)
# These are needed because resetRattle clears everything
ds <- crs$dataset
resetRattle()
crs$dataset <- ds
eval(parse(text=assign.cmd))
crs$dataname <- "dataset"
# TODO fn <- theWidget("data_filechooserbutton")$getValue()
setMainTitle(crs$dataname)
# Update the select treeview and samples.
createVariablesModel(colnames(crs$dataset))
# Ensure we are viewing the treeview tab rather than the Welcome
# message.
crv$DATA.DISPLAY.NOTEBOOK$setCurrentPage(crv$DATA.DISPLAY.TREEVIEW.TAB)
setStatusBar(Rtxt("The supplied data is now available."))
set.cursor()
}
exportDataTab <- function()
{
# Don't export an empty dataset.
if (is.null(crs$dataset))
{
errorDialog(Rtxt("There is no dataset loaded, and so",
"there is nothing to export."))
return(FALSE)
}
sampling <- theWidget("data_sample_checkbutton")$getActive()
# Obtain filename to write the dataset as CSV to.
dialog <- RGtk2::gtkFileChooserDialog("Export Dataset", NULL, "save",
"gtk-cancel", RGtk2::GtkResponseType["cancel"],
"gtk-save", RGtk2::GtkResponseType["accept"])
dialog$setDoOverwriteConfirmation(TRUE)
if(not.null(crs$dataname))
dialog$setCurrentName(paste(get.stem(crs$dataname), "_",
ifelse(sampling, "sample", "saved"),
".csv", sep=""))
# 081222 I get an error on doing the following:
#
### dialog$setCurrentFolder(crs$dwd)
#
# (R:14058): libgnomevfs-CRITICAL **:
# gnome_vfs_get_uri_from_local_path: assertion `g_path_is_absolute
# (local_full_path)' failed
#
# I note that crs$dwd is
# "file:///usr/local/lib/R/site-library/rattle/csv" which is not
# what I want anyhow!
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("CSV Files"))
ff$addPattern("*.csv")
dialog$addFilter(ff)
ff <- RGtk2::gtkFileFilterNew()
ff$setName(Rtxt("All Files"))
ff$addPattern("*")
dialog$addFilter(ff)
if (dialog$run() == RGtk2::GtkResponseType["accept"])
{
save.name <- dialog$getFilename()
dialog$destroy()
}
else
{
dialog$destroy()
return()
}
if (tolower(get.extension(save.name)) != "csv")
save.name <- sprintf("%s.csv", save.name)
# If sample is active then only save the sample.
if (sampling)
writeCSV(crs$dataset[crs$sample,], save.name)
else
writeCSV(crs$dataset, save.name)
if (sampling)
msg <- Rtxt("The training dataset has been exported to %s.")
else
msg <- Rtxt("The dataset has been exported to %s.")
setStatusBar(sprintf(msg, save.name))
}
########################################################################
# DATA ROLES
#
# The DATA Execute will perform a sampling of the data and stores
# the indicies in crs$sample. It will also build the list of variable
# roles and stores these in crs$input, crs$ident, crs$ignore,
# crs$target, and crs$risk. This is then used in MODEL to limit the
# dataset in the call to rpart to just the crs$input variables. In
# EVALUATE the crs$risk is used for the Risk Chart.
#------------------------------------------------------------------------
# Interface
on_data_sample_checkbutton_toggled <- function(button)
{
if (button$getActive())
{
theWidget("sample_percentage_spinbutton")$setSensitive(TRUE)
theWidget("sample_percentage_label")$setSensitive(TRUE)
theWidget("sample_count_spinbutton")$setSensitive(TRUE)
theWidget("sample_count_label")$setSensitive(TRUE)
theWidget("sample_seed_spinbutton")$setSensitive(TRUE)
theWidget("sample_seed_button")$setSensitive(TRUE)
theWidget("data_sample_entry")$setSensitive(TRUE)
# 090617 Do not show this label in the tool bar - It is mixing
# information with actions and thus is conceptually not a good
# thing to do. [Rado]
# theWidget("explore_sample_label")$show()
}
else
{
theWidget("sample_percentage_spinbutton")$setSensitive(FALSE)
theWidget("sample_percentage_label")$setSensitive(FALSE)
theWidget("sample_count_spinbutton")$setSensitive(FALSE)
theWidget("sample_count_label")$setSensitive(FALSE)
theWidget("sample_seed_spinbutton")$setSensitive(FALSE)
theWidget("sample_seed_button")$setSensitive(FALSE)
theWidget("data_sample_entry")$setSensitive(FALSE)
# theWidget("explore_sample_label")$hide()
}
crs$sample <- crs$train <- crs$validate <- crs$test <- NULL
setStatusBar()
}
on_sample_percentage_spinbutton_changed <- function(action, window)
{
if (is.null(crs$dataset)) return()
per <- theWidget("sample_percentage_spinbutton")$getValue()
rows <- round(nrow(crs$dataset) * per / 100)
crows <- theWidget("sample_count_spinbutton")$getValue()
if (rows != crows)
theWidget("sample_count_spinbutton")$setValue(rows)
setStatusBar()
}
on_sample_count_spinbutton_changed <- function(action, window)
{
if (is.null(crs$dataset)) return()
rows <- theWidget("sample_count_spinbutton")$getValue()
per <- round(100*rows/nrow(crs$dataset))
cper <- theWidget("sample_percentage_spinbutton")$getValue()
if (per != cper)
theWidget("sample_percentage_spinbutton")$setValue(per)
setStatusBar()
}
on_sample_seed_button_clicked <- function(button)
{
rseed <- as.integer(runif(1, 0, 1000000))
theWidget("sample_seed_spinbutton")$setValue(rseed)
}
item.toggled <- function(cell, path.str, model)
{
# The data passed in is the model used in the treeview.
RGtk2::checkPtrType(model, "GtkTreeModel")
# Extract the column number of the model that has changed.
column <- cell$getData("column")
# Get the current value of the corresponding flag
path <- RGtk2::gtkTreePathNewFromString(path.str) # Current row
iter <- model$getIter(path)$iter # Iter for the row
current <- model$get(iter, column)[[1]] # Get data from specific column
# Only invert the current value if it is False - work like a radio button
if (! current)
{
model$set(iter, column, !current)
# Uncheck all other Roles for this row, acting like radio buttons.
columns <- crv$COLUMNstart:crv$COLUMNend
lapply(setdiff(columns, column), function(x) model$set(iter, x, FALSE))
# TODO Now fix up other buttons. Any in the same column, if it is
# Target, must be unchecked and the corresponding row made
# Ignore. Currently, just check this on Execute and complain. Can
# we use groups?
}
# 100829 Check if we need to toggle the Weight Calculator - note
# that this is done each time an item is toggled because we don't
# get called when weight is untoggled?
# if (names(column) == "weight")
if (length(getSelectedVariables("weight")) > 0)
{
theWidget("weight_label")$setSensitive(FALSE)
theWidget("weight_entry")$setSensitive(FALSE)
}
else
{
theWidget("weight_label")$setSensitive(TRUE)
theWidget("weight_entry")$setSensitive(TRUE)
}
}
on_variables_toggle_ignore_button_clicked <- function(action, window)
{
# Set the ignore flag for all selected variables, and ensure all
# other roles are unchecked.
#ptm <- proc.time()
set.cursor("watch")
tree.selection <- theWidget("select_treeview")$getSelection()
# Under MS/Windows with Terminal Services to the host we get very
# slow redraws? Tried fixing it with freezeUpdates and thawUpdates
# but it had no impact. Changing 500 variables takes 5
# seconds. When connected over terminal services the elapsed time
# is 16 seconds, still with 5 seconds user time.
# theWidget("rattle_window")$getWindow()$freezeUpdates()
# 071113 Use the data parameter to avoid an RGtk2 bug in 2.12.1,
# fixed in next release.
tree.selection$selectedForeach(function(model, path, iter, data)
{
model$set(iter, crv$COLUMN[["ignore"]], TRUE)
columns <- setdiff(crv$COLUMNstart:crv$COLUMNend,
crv$COLUMN[["ignore"]])
# Timing indicates the for loop is slower on GNU/Linux but faster
# on MS/Windows 500! But the extra test also slows things down,
# so best not to conditionalise for now.
#if (isWindows())
for (c in columns)
if (model$get(iter, c)[[1]]) model$set(iter, c, FALSE)
#else
# lapply(columns, function(x) model$set(iter, x, FALSE))
return(FALSE) # Keep going through all rows
}, data=TRUE)
#cat("->Ig", proc.time() - ptm, "\n")
set.cursor()
# theWidget("rattle_window")$getWindow()$thawUpdates()
}
on_variables_toggle_input_button_clicked <- function(action, window)
{
# Set the input flag for all selected variables within the Select
# tab, and ensure all other roles for these variables are unchecked.
#ptm <- proc.time()
set.cursor("watch")
treeview <- theWidget("select_treeview")
tree.selection <- treeview$getSelection()
#theWidget("rattle_window")$getWindow()$freezeUpdates()
# Use the data parameter to avoid an RGtk2 bug in 2.12.1, fixed in
# next release. 071113
tree.selection$selectedForeach(function(model, path, iter, data)
{
model$set(iter, crv$COLUMN[["input"]], TRUE)
columns <- setdiff(crv$COLUMNstart:crv$COLUMNend,
crv$COLUMN[["input"]])
#if (isWindows())
for (c in columns)
if (model$get(iter, c)[[1]]) model$set(iter, c, FALSE)
#else
# lapply(columns, function(x) model$set(iter, x, FALSE))
return(FALSE) # Keep going through all rows
}, data=TRUE)
#cat("->In", proc.time() - ptm, "\n")
set.cursor()
#theWidget("rattle_window")$getWindow()$thawUpdates()
}
#----------------------------------------------------------------------
# Execution
executeSelectTab <- function(resample=TRUE)
{
# 080520 TODO May want to rename this as SELECT is no longer a tab
# but is now part of the DATA tab. Perhaps we call it
# resetSelections.
# Check for pre-requisites.
# Can not do any preparation if there is no dataset.
if (noDatasetLoaded()) return()
set.cursor("watch", Rtxt("Determining variable roles and characteristics..."))
startLog(Rtxt("Note the user selections."))
if (resample) executeSelectSample()
input <- getSelectedVariables("input")
target <- getSelectedVariables("target")
risk <- getSelectedVariables("risk")
ident <- getSelectedVariables("ident")
ignore <- getSelectedVariables("ignore")
weight <- getSelectedVariables("weight")
weights <- theWidget("weight_entry")$getText()
if (weights == "") weights <- NULL
# Fail if there is more than one target.
if (length(target) > 1)
{
errorDialog(sprintf(Rtxt("Multiple Targets have been identified (%s).",
"Only a single Target is allowed."),
paste(getSelectedVariables("target", FALSE), target,
sep=":", collapse=", ")))
return()
}
# Ask if the Target does not look like a target.
if (length(target))
target.levels <- length(levels(as.factor(crs$dataset[[target]])))
else
target.levels <- 0
# Fail if there is more than one risk.
if (length(risk) > 1)
{
errorDialog(sprintf(Rtxt("More than a single %s",
"variable has been identified (%s).",
"Only a single variable is allowed.\n",
"\nPlease change the role of one of the variables."),
ifelse(survivalTarget(), "Status", "Risk"),
paste(getSelectedVariables("risk", FALSE), risk,
sep=":", collapse=", ")))
return()
}
# Fail if the Risk column is not numeric.
if (length(risk) && ! is.numeric(crs$dataset[[risk]]))
{
errorDialog(sprintf(Rtxt("The variable selected for your %s (%s)",
"is not numeric.",
"\n\nPlease select a numeric variable."),
ifelse(survivalTarget(), "Status", "Risk"), risk))
return()
}
# Deal with weights.
# 100829 Fail if there is more than one weight selected. Note that
# once a weight is selected the Weight Calculator is not sensitive
# and so any Weight formula there will be ignored.
if (length(weight) > 1)
{
errorDialog(sprintf(Rtxt("Multiple Weights have been identified (%s).",
"Only a single Weight is allowed.\n",
"\nPlease reconfigure the roles."),
paste(getSelectedVariables("weight", FALSE), weight,
sep=":", collapse=", ")))
return()
}
else if (length(weight) == 1)
{
weights <- sprintf("crs$dataset$%s", weight)
}
else if (theWidget("weight_entry")$isSensitive() &&
not.null(weights) &&
nchar(weights) > 0)
{
identifiers <- unlist(strsplit(weights, "[^a-zA-Z._]"))
identifiers <- identifiers[nchar(identifiers) > 0]
identifiers <- union(identifiers,identifiers) # Each var/id just once
funs <- unlist(lapply(identifiers,
function(x)
{
try(eval(parse(text=sprintf("class(%s)", x))),
silent=TRUE) == "function"}))
vars <- ! funs
allvars <- union(input, union(target, union(risk, union(ident, ignore))))
for (i in seq_len(sum(vars)))
{
# Check for any missing variables
if (identifiers[vars][i] %notin% allvars)
{
errorDialog(sprintf(Rtxt("The Weight Calculator contains the variable %s",
"which is not known in the dataset."),
identifiers[vars][i]))
return()
}
# Check if Weight variables are not ignored, and inform user if not
if (identifiers[vars][i] %notin%
union(ident, union(target, union(ignore, risk))))
{
infoDialog(sprintf(Rtxt("You have used the variable %s",
"in the weights formula but it is an input.",
"This is unusual since it is both an input variable",
"and used to weight the outputs.",
"It is suggested that you ignore this variable."),
identifiers[vars][i]))
}
# For each Weights variable, replace with full reference to
# crs$dataset, since the variable is ignored.
weights <- gsub(identifiers[vars][i],
sprintf("crs$dataset$%s", identifiers[vars][i]),
weights)
}
}
#------------------------------------------------------------------------
# Record appropriate information.
crs$input <- input
crs$target <- target
crs$risk <- risk
crs$ident <- ident
crs$ignore <- ignore
crs$weights <- weights
crs$numeric <- colnames(crs$dataset)[getNumericVariables(type="indicies")]
crs$categoric <- getCategoricVariables(type="names")
# 091206 Add the information to the Log tab
convertOneMany <- function(x)
switch(min(length(x)+1, 3), 'NULL', sprintf('"%s"', x),
sprintf('c("%s")', paste(x, collapse='", "')))
appendLog(Rtxt("The following variable selections have been noted."),
'crs$input <- ', gsub("(([^,]*,){4})", "\\1\n ",
convertOneMany(input)),
'\n\ncrs$numeric <- ', gsub("(([^,]*,){4})", "\\1\n ",
convertOneMany(crs$numeric)),
'\n\ncrs$categoric <- ', gsub("(([^,]*,){4})", "\\1\n ",
convertOneMany(crs$categoric)),
'\n\ncrs$target <- ', convertOneMany(target),
'\ncrs$risk <- ', convertOneMany(risk),
'\ncrs$ident <- ', convertOneMany(ident),
'\ncrs$ignore <- ', convertOneMany(ignore),
'\ncrs$weights <- ', convertOneMany(weights))
# 090801 Update the transforms list, so that any transforms that are
# not ignore/ident will be noted as active. The status is used when
# exporting to XML since we want to keep ignored transforms (since
# they might be used in other transforms) but don't want them
# exported unnecessarily.
for (i in seq_along(crs$transforms))
if (names(crs$transforms)[i] %in% union(ident, ignore))
crs$transforms[[i]]$status <- "inactive"
else
crs$transforms[[i]]$status <- "active"
# Update MODEL targets
the.target <- ifelse(length(target), sprintf(Rtxt("Target: %s"), target),
Rtxt("No Target"))
the.risk <- ifelse(length(risk), sprintf(Rtxt("Status: %s"), risk),
Rtxt("No Risk"))
theWidget("explot_target_label")$setText(the.target)
theWidget("test_groupby_target_label")$setText(the.target)
theWidget("rpart_target_label")$setText(the.target)
theWidget("rf_target_label")$setText(the.target)
theWidget("svm_target_label")$setText(the.target)
# theWidget("gbm_target_label")$setText(the.target)
theWidget("ada_target_label")$setText(the.target)
theWidget("glm_target_label")$setText(the.target)
theWidget("nnet_target_label")$setText(the.target)
theWidget("model_survival_radiobutton")$setSensitive(TRUE)
theWidget("model_survival_time_var_label")$setText(sub(Rtxt("Target:"),
Rtxt("Time:"), the.target))
theWidget("model_survival_status_var_label")$setText(the.risk)
# Update MODEL weights
if (not.null(weights))
{
weights.display <- gsub('crs\\$dataset\\$', '', weights)
the.weight <- sprintf(Rtxt("Weights: %s"), weights.display)
# 080815 Just display Weights if there is a weights value, and
# empty otherwise.
# theWidget("model_tree_rpart_weights_label")$setText(the.weight)
theWidget("model_tree_rpart_weights_label")$setText(Rtxt("Weights in use."))
}
else
{
theWidget("model_tree_rpart_weights_label")$
setText("")
}
# 080413 Update MODEL types that are available.
# With more than two classes we can't use AdaBoost since the current
# package does not support more than 2 classes.
if (categoricTarget() && target.levels <= 2)
theWidget("boost_radiobutton")$setSensitive(TRUE)
else
theWidget("boost_radiobutton")$setSensitive(FALSE)
# Update various MODEL options
if (survivalTarget())
{
theWidget("model_survival_radiobutton")$setSensitive(TRUE)
theWidget("model_survival_radiobutton")$setActive(TRUE)
theWidget("rpart_radiobutton")$setSensitive(FALSE)
theWidget("boost_radiobutton")$setSensitive(FALSE)
theWidget("rf_radiobutton")$setSensitive(FALSE)
theWidget("svm_radiobutton")$setSensitive(FALSE)
theWidget("model_linear_radiobutton")$setSensitive(FALSE)
theWidget("nnet_radiobutton")$setSensitive(FALSE)
theWidget("all_models_radiobutton")$setSensitive(FALSE)
}
else if (categoricTarget())
{
theWidget("model_survival_radiobutton")$setSensitive(FALSE)
theWidget("rpart_radiobutton")$setSensitive(TRUE)
theWidget("rf_radiobutton")$setSensitive(TRUE)
theWidget("svm_radiobutton")$setSensitive(TRUE)
theWidget("model_linear_radiobutton")$setSensitive(TRUE)
theWidget("all_models_radiobutton")$setSensitive(TRUE)
# For linear models, if it is categoric and binomial then assume
# logistic regression (default to binmoial distribution and the
# logit link function) otherwise it is multinomial so assume
# poisson regression (default to poisson distribution and log link
# function).
theWidget("model_linear_poisson_radiobutton")$setSensitive(FALSE)
if (binomialTarget())
{
theWidget("model_linear_builder_label")$setText("glm (Logistic)")
theWidget("glm_linear_radiobutton")$setSensitive(FALSE)
theWidget("glm_gaussian_radiobutton")$setSensitive(FALSE)
theWidget("glm_logistic_radiobutton")$setSensitive(TRUE)
theWidget("glm_logistic_radiobutton")$setActive(TRUE)
theWidget("model_linear_probit_radiobutton")$setSensitive(TRUE)
theWidget("glm_multinomial_radiobutton")$setSensitive(FALSE)
theWidget("nnet_radiobutton")$setSensitive(TRUE)
theWidget("nnet_hidden_nodes_label")$setSensitive(TRUE)
theWidget("nnet_hidden_nodes_spinbutton")$setSensitive(TRUE)
theWidget("nnet_builder_label")$setText("nnet (0/1)")
}
else
{
theWidget("model_linear_builder_label")$setText("multinom")
theWidget("glm_linear_radiobutton")$setSensitive(FALSE)
theWidget("glm_gaussian_radiobutton")$setSensitive(FALSE)
theWidget("glm_logistic_radiobutton")$setSensitive(FALSE)
theWidget("model_linear_probit_radiobutton")$setSensitive(FALSE)
theWidget("glm_multinomial_radiobutton")$setSensitive(TRUE)
theWidget("glm_multinomial_radiobutton")$setActive(TRUE)
theWidget("nnet_radiobutton")$setSensitive(FALSE)
# I don't think these need tgo be done. We can't see the options
# when the nnet button is not sensitive
#theWidget("nnet_hidden_nodes_label")$setSensitive(FALSE)
#theWidget("nnet_hidden_nodes_spinbutton")$setSensitive(FALSE)
#theWidget("nnet_builder_label")$setText("")
}
}
else if (numericTarget())
{
theWidget("model_survival_radiobutton")$setSensitive(FALSE)
theWidget("rpart_radiobutton")$setSensitive(TRUE)
theWidget("rf_radiobutton")$setSensitive(TRUE) # 090301 Support regression
theWidget("svm_radiobutton")$setSensitive(FALSE)
# For linear models, if it is numeric we are probably going to use
# a lm so set the default family to nothing! This is becasue lm
# simply does gaussian and an identity link function.
# theWidget("glm_family_comboboxentry")$setActive(0)
theWidget("model_linear_radiobutton")$setSensitive(TRUE)
theWidget("model_linear_builder_label")$setText("lm")
theWidget("glm_linear_radiobutton")$setSensitive(TRUE)
theWidget("glm_linear_radiobutton")$setActive(TRUE)
theWidget("glm_gaussian_radiobutton")$setSensitive(TRUE)
theWidget("glm_logistic_radiobutton")$setSensitive(FALSE)
theWidget("model_linear_probit_radiobutton")$setSensitive(FALSE)
if (countTarget())
theWidget("model_linear_poisson_radiobutton")$setSensitive(TRUE)
else
theWidget("model_linear_poisson_radiobutton")$setSensitive(FALSE)
theWidget("glm_multinomial_radiobutton")$setSensitive(FALSE)
theWidget("nnet_radiobutton")$setSensitive(TRUE)
theWidget("nnet_hidden_nodes_label")$setSensitive(TRUE)
theWidget("nnet_hidden_nodes_spinbutton")$setSensitive(TRUE)
theWidget("nnet_builder_label")$setText("nnet (Regression)")
theWidget("all_models_radiobutton")$setSensitive(TRUE)
}
else # What else could it be? No target!
{
theWidget("rpart_radiobutton")$setSensitive(FALSE)
theWidget("rf_radiobutton")$setSensitive(FALSE)
theWidget("svm_radiobutton")$setSensitive(FALSE)
theWidget("model_linear_radiobutton")$setSensitive(FALSE)
theWidget("nnet_radiobutton")$setSensitive(FALSE)
theWidget("all_models_radiobutton")$setSensitive(FALSE)
theWidget("nnet_hidden_nodes_label")$setSensitive(FALSE)
theWidget("nnet_hidden_nodes_spinbutton")$setSensitive(FALSE)
# 080719 - remove, or else we can't sample and cluster!!
# theWidget("data_sample_checkbutton")$setActive(FALSE)
theWidget("glm_linear_radiobutton")$setSensitive(FALSE)
theWidget("glm_gaussian_radiobutton")$setSensitive(FALSE)
theWidget("model_linear_poisson_radiobutton")$setSensitive(FALSE)
theWidget("glm_logistic_radiobutton")$setSensitive(FALSE)
theWidget("model_linear_probit_radiobutton")$setSensitive(FALSE)
theWidget("glm_multinomial_radiobutton")$setSensitive(FALSE)
theWidget("model_survival_radiobutton")$setSensitive(FALSE)
}
# Update EVALUATE risk variable
theWidget("evaluate_risk_label")$setText(crs$risk)
# Update defaults that rely on the number of variables.
crv$rf.mtry.default <- floor(sqrt(length(crs$input)))
theWidget("rf_mtry_spinbutton")$setValue(crv$rf.mtry.default)
# 080505 We auto decide whether the target looks like a categoric
# or numeric, but if it ends up being a categoric (the user
# overrides with the type radio button) with very many classes,
# then complain!
if (not.null(target)
&& categoricTarget()
&& target.levels > crv$max.categories)
{
if (! questionDialog(sprintf(Rtxt("The column selected as a Target (%s)",
"will be treated as a categoric variable",
"since Target Type is set to Categoric.",
"\n\nThe variable has %d distinct values",
"whch is greater than the threshold of %d.",
"That is unusual and some algorithms will",
"take a long time.\n\nYou may like to",
"consider using fewer classes for the",
"target categoric variable or select",
"Target Type as Numeric.",
"\n\nDo you want to continue anyhow?"),
target, target.levels, crv$max.categories)))
return()
}
# 091206 Check that we have both a target and risk for a survival
# model.
if (not.null(target)
&& !length(risk)
&& survivalTarget())
{
errorDialog(Rtxt("You have chosen Survial models as the target type,",
"but no Status variable has been identified.",
"Survival models require both a Time and a Status",
"variable.\n",
"\nPlease identify the Status variable and then",
"Execute this tab once again."))
return(FALSE)
}
# Finished - update the status bar.
roles.msg <- sprintf(Rtxt("Roles noted. %d observations",
"and %d input variables."),
nrow(crs$dataset), length(crs$input))
if (length(crs$target) == 0)
model.msg <- Rtxt("No target thus no predictive",
"modelling nor sampling.")
else if (survivalTarget())
model.msg <- sprintf(Rtxt("The target is %s with %s. Survival models enabled."),
crs$target, crs$risk)
else if (categoricTarget())
model.msg <- sprintf(Rtxt("The target is %s. Categoric %d.",
"Classification models enabled."),
crs$target, target.levels)
else
model.msg <- sprintf(Rtxt("The target is %s. Numeric.",
"Regression models enabled."),
crs$target)
setStatusBar(roles.msg, model.msg)
}
executeSelectSample <- function()
{
# Identify if there are observations without a target value. TODO
# 080426. I started looking at noting those observations with missing
# target values. This is recorded in crs$nontargets. Currently I'm
# not using it. The intention was to only sample from those with
# targets, etc. But the impacts need to be carefuly thought through.
#
# Perhaps the philosophy should go back to the fact that the user
# can split the dataset up themselves quite easily, and I do
# provide a mechanism for them to load their dataset for scoring.
#target <- getSelectedVariables("target")
#print(target)
#crs$nontargets <- which(is.na(crs$dataset[[target]]))
# Record that a random sample of the dataset is desired and the
# random sample itself is loaded into crs$sample. 080425 Whilst we
# are at it we also set the variable crs$targeted to be those row
# indicies that have a non NA target.
if (theWidget("data_sample_checkbutton")$getActive())
{
if (newSampling())
{
ssizes <- parseSampleEntry()
ssize <- floor(nrow(crs$dataset) * ssizes[1] / 100)
vsize <- floor(nrow(crs$dataset) * ssizes[2] / 100)
if (ssizes[3] == 0)
tsize <- 0
else
tsize <- nrow(crs$dataset) - ssize - vsize
}
else
#ssize <- theWidget("sample_percentage_spinbutton")$getValue()
#ssize <- floor(nrow(crs$dataset)*ssize/100)
ssize <- theWidget("sample_count_spinbutton")$getValue()
seed <- theWidget("sample_seed_spinbutton")$getValue()
if (seed == crv$seed) seed <- "crv$seed"
if (newSampling())
{
sample.cmd <- sprintf(paste("set.seed(%s)",
"\ncrs$nobs <- nrow(crs$dataset) # %d observations",
"\ncrs$sample <- crs$train <-",
"sample(nrow(crs$dataset),",
"%s*crs$nobs) # %d observations"),
seed, nrow(crs$dataset),
round(ssize/nrow(crs$dataset), 2), ssize)
if (vsize > 0)
sample.cmd <- sprintf(paste("%s\ncrs$validate <-",
"sample(setdiff(seq_len(nrow(crs$dataset)),",
"crs$train),",
"%s*crs$nobs) # %d observations"),
sample.cmd, round(vsize/nrow(crs$dataset), 2), vsize)
else
sample.cmd <- sprintf("%s\ncrs$validate <- NULL", sample.cmd)
if (tsize > 0)
sample.cmd <- sprintf(paste("%s\ncrs$test <-",
"setdiff(setdiff(seq_len(nrow(crs$dataset)),",
"crs$train), crs$validate)",
"# %d observations"), sample.cmd,
nrow(crs$dataset)-ssize-vsize)
else
sample.cmd <- sprintf("%s\ncrs$test <- NULL", sample.cmd)
}
else
{
# 100417 Even for RStat make sure we maintain crs$train as it is
# now starting to be used.
sample.cmd <- paste(sprintf("set.seed(%s)\n", seed),
"crs$sample <- crs$train <- sample(nrow(crs$dataset), ", ssize,
")", sep="")
}
appendLog(Rtxt("Build the training/validate/test datasets."), sample.cmd)
eval(parse(text=sample.cmd))
}
else
{
crs$sample <- crs$train <- crs$validate <- crs$test <- NULL
theWidget("evaluate_validation_radiobutton")$setSensitive(FALSE)
theWidget("evaluate_testing_radiobutton")$setSensitive(FALSE)
if (exists("RATTLE.SCORE.IN") && not.null(RATTLE.SCORE.IN))
theWidget("evaluate_csv_radiobutton")$setActive(TRUE)
else
theWidget("evaluate_training_radiobutton")$setActive(TRUE)
}
crs$smodel <- vector()
# TODO For test/train, use sample,split from caTools?
## Set some defaults that depend on sample size.
#if (is.null(crs$sample))
# crv$rf.sampsize.default <- length(crs$dataset)
#else
# crv$rf.sampsize.default <- length(crs$sample)
#theWidget("rf_sampsize_spinbutton")$setValue(crv$rf.sampsize.default)
## 080520 Don't set the status bar - it is overwritten by the
## message about variable roles being noted.
## setStatusBar()
## if (theWidget("data_sample_checkbutton")$getActive())
## setStatusBar("The sample has been generated.",
## "There are", length(crs$sample), "observations.")
## else
## setStatusBar("Sampling is inactive.")
}
getSelectedVariables <- function(role, named=TRUE)
{
# DESCRIPTION
# Generate a list of variables marked with the specified role.
#
# ARGUMENTS
# role = a string naming the role to query on
# named = if TRUE return variable names as strings, if FALSE, numbers
#
# DETAILS The select_treeview, categorical_treeview and
# continuous_treeview are places where a variable can be identified
# as having a given role. Whilst the role of "ignore" is common
# across all three treeviews, only the ignore from the main
# select_treeview is considered. If a role is not found, simply
# return NULL, rather than an error (for no particular reason).
#
# ASSUMPTIONS The variable and number columns are assumed to be the
# same in each of crv$COLUMNS, crv$CATEGORICAL, and crv$CONTINUOUS.
variables <- NULL
type <- "logical"
if (role %in% c("input", "target", "risk", "ident", "ignore", "weight"))
{
model <- theWidget("select_treeview")$getModel()
rcol <- crv$COLUMN[[role]]
}
else if (role %in% c("boxplot", "hisplot", "cumplot", "benplot"))
{
model <- theWidget("continuous_treeview")$getModel()
rcol <- crv$CONTINUOUS[[role]]
}
else if (role %in% c("barplot", "dotplot", "mosplot"))
{
model <- theWidget("categorical_treeview")$getModel()
rcol <- crv$CATEGORICAL[[role]]
}
else if (role %in% c("paiplot"))
{
model <- theWidget("continuous_treeview")$getModel()
rcol <- crv$CONTINUOUS[[role]]
model2 <- theWidget("categorical_treeview")$getModel()
rcol2 <- crv$CATEGORICAL[[role]]
}
else
return(NULL)
vcol <- crv$COLUMN[["variable"]]
ncol <- crv$COLUMN[["number"]]
model$foreach(function(model, path, iter, data)
{
flag <- model$get(iter, rcol)[[1]]
if (named)
variable <- model$get(iter, vcol)[[1]]
else
variable <- model$get(iter, ncol)[[1]]
# if (type=="character")
# {
# if (role == "zero" && flag == "Zero/Missing")
# variables <<- c(variables, variable)
# if (role == "mean" && flag == "Mean")
# variables <<- c(variables, variable)
# if (role == "median" && flag == "Median")
# variables <<- c(variables, variable)
# }
# else
if (flag) variables <<- c(variables, variable)
return(FALSE) # Keep going through all rows
}, TRUE)
if (role %in% c("paiplot")) # we need to collect the categorical variables too
{
model2$foreach(function(model2, path, iter, data)
{
flag <- model2$get(iter, rcol2)[[1]]
if (named)
variable <- model2$get(iter, vcol)[[1]]
else
variable <- model2$get(iter, ncol)[[1]]
if (flag) variables <<- c(variables, variable)
return(FALSE) # Keep going through all rows
}, TRUE)
}
# Set the data parameter to TRUE to avoid an RGtk2 bug in 2.12.1, fixed in
# next release. 071117
# 091130 Apparently Gtk always returns UTF-8 strings (Acken
# Sakakibara). Thus we convert to the locale of the system.
variables <- iconv(variables, "UTF-8", localeToCharset()[1])
return(variables)
}
initialiseVariableViews <- function()
{
# Define the data models for the various treeviews.
model <- RGtk2::gtkListStoreNew("gchararray", "gchararray", "gchararray",
"gboolean", "gboolean", "gboolean", "gboolean",
"gboolean", "gboolean", "gchararray")
impute <- RGtk2::gtkListStoreNew("gchararray", "gchararray", "gchararray")
continuous <- RGtk2::gtkListStoreNew("gchararray", "gchararray",
"gboolean", "gboolean",
"gboolean", "gboolean", "gboolean", "gchararray")
categorical <- RGtk2::gtkListStoreNew("gchararray", "gchararray",
"gboolean", "gboolean", "gboolean", "gboolean",
"gchararray")
# View the model through the treeview in the DATA tab
treeview <- theWidget("select_treeview")
treeview$setModel(model)
impview <- theWidget("impute_treeview")
impview$setModel(impute)
catview <- theWidget("categorical_treeview")
catview$setModel(categorical)
conview <- theWidget("continuous_treeview")
conview$setModel(continuous)
## Add the NUMBER column as the row number.
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("No."),
renderer,
text= crv$COLUMN[["number"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
imp.offset <-
impview$insertColumnWithAttributes(-1,
Rtxt("No."),
renderer,
text= crv$IMPUTE[["number"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
cat.offset <-
catview$insertColumnWithAttributes(-1,
Rtxt("No."),
renderer,
text= crv$CATEGORICAL[["number"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
con.offset <-
conview$insertColumnWithAttributes(-1,
Rtxt("No."),
renderer,
text= crv$CONTINUOUS[["number"]])
## Add the VARIABLE NAME column to the views.
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Variable"),
renderer,
text = crv$COLUMN[["variable"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
imp.offset <-
impview$insertColumnWithAttributes(-1,
Rtxt("Variable"),
renderer,
text = crv$IMPUTE[["variable"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
cat.offset <-
catview$insertColumnWithAttributes(-1,
Rtxt("Variable"),
renderer,
text = crv$CATEGORICAL[["variable"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
con.offset <-
conview$insertColumnWithAttributes(-1,
Rtxt("Variable"),
renderer,
text = crv$CONTINUOUS[["variable"]])
## Add the TYPE column.
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Data Type"),
renderer,
text = crv$COLUMN[["type"]])
# Add the INPUT column.
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(radio = TRUE)
renderer$set(width = 60)
renderer$setData("column", crv$COLUMN["input"])
RGtk2::connectSignal(renderer, "toggled", item.toggled, model)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Input"),
renderer,
active = crv$COLUMN[["input"]])
## Add the TARGET column.
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(radio = TRUE)
renderer$set(width = 60)
renderer$setData("column", crv$COLUMN["target"])
RGtk2::connectSignal(renderer, "toggled", item.toggled, model)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Target"),
renderer,
active = crv$COLUMN[["target"]])
## Add the RISK column.
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(radio = TRUE)
renderer$set(width = 60)
renderer$setData("column", crv$COLUMN["risk"])
RGtk2::connectSignal(renderer, "toggled", item.toggled, model)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Risk"),
renderer,
active = crv$COLUMN[["risk"]])
## Add the IDENT column.
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(radio = TRUE)
renderer$set(width = 60)
renderer$setData("column", crv$COLUMN["ident"])
RGtk2::connectSignal(renderer, "toggled", item.toggled, model)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Ident"),
renderer,
active = crv$COLUMN[["ident"]])
## Add the IGNORE column (the Ignore check button) to the view.
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(radio = TRUE)
renderer$set(width = 60)
renderer$setData("column", crv$COLUMN["ignore"])
RGtk2::connectSignal(renderer, "toggled", item.toggled, model)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Ignore"),
renderer,
active = crv$COLUMN[["ignore"]])
## Add the WEIGHT column (the Weight check button) to the view.
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(radio = TRUE)
renderer$set(width = 60)
renderer$setData("column", crv$COLUMN["weight"])
RGtk2::connectSignal(renderer, "toggled", item.toggled, model)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Weight"),
renderer,
active = crv$COLUMN[["weight"]])
## Add the barplot and dotplot and mosplot.
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CATEGORICAL["barplot"])
RGtk2::connectSignal(renderer, "toggled", cat_toggled, categorical)
cat.offset <-
catview$insertColumnWithAttributes(-1,
Rtxt("Bar Plot"),
renderer,
active = crv$CATEGORICAL[["barplot"]])
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CATEGORICAL["dotplot"])
RGtk2::connectSignal(renderer, "toggled", cat_toggled, categorical)
cat.offset <-
catview$insertColumnWithAttributes(-1,
Rtxt("Dot Plot"),
renderer,
active = crv$CATEGORICAL[["dotplot"]])
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CATEGORICAL["mosplot"])
RGtk2::connectSignal(renderer, "toggled", cat_toggled, categorical)
cat.offset <-
catview$insertColumnWithAttributes(-1,
Rtxt("Mosaic"),
renderer,
active = crv$CATEGORICAL[["mosplot"]])
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CATEGORICAL["paiplot"])
RGtk2::connectSignal(renderer, "toggled", cat_toggled, categorical)
cat.offset <-
catview$insertColumnWithAttributes(-1,
Rtxt("Pairs"),
renderer,
active = crv$CATEGORICAL[["paiplot"]])
## Add the boxplot, hisplot, cumplot, benplot buttons
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CONTINUOUS["boxplot"])
RGtk2::connectSignal(renderer, "toggled", con_toggled, continuous)
con.offset <-
conview$insertColumnWithAttributes(-1,
Rtxt("Box Plot"),
renderer,
active = crv$CONTINUOUS[["boxplot"]])
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CONTINUOUS["hisplot"])
RGtk2::connectSignal(renderer, "toggled", con_toggled, continuous)
con.offset <-
conview$insertColumnWithAttributes(-1,
Rtxt("Histogram"),
renderer,
active = crv$CONTINUOUS[["hisplot"]])
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CONTINUOUS["cumplot"])
RGtk2::connectSignal(renderer, "toggled", con_toggled, continuous)
con.offset <-
conview$insertColumnWithAttributes(-1,
Rtxt("Cumulative"),
renderer,
active = crv$CONTINUOUS[["cumplot"]])
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CONTINUOUS["benplot"])
RGtk2::connectSignal(renderer, "toggled", con_toggled, continuous)
con.offset <-
conview$insertColumnWithAttributes(-1,
Rtxt("Benford"),
renderer,
active = crv$CONTINUOUS[["benplot"]])
renderer <- RGtk2::gtkCellRendererToggleNew()
renderer$set(xalign = 0.0)
renderer$set(width = 60)
renderer$setData("column", crv$CONTINUOUS["paiplot"])
RGtk2::connectSignal(renderer, "toggled", con_toggled, continuous)
con.offset <-
conview$insertColumnWithAttributes(-1,
Rtxt("Pairs"),
renderer,
active = crv$CONTINUOUS[["paiplot"]])
## Add the COMMENT column.
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
col.offset <-
treeview$insertColumnWithAttributes(-1,
Rtxt("Comment"),
renderer,
text = crv$COLUMN[["comment"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
imp.offset <-
impview$insertColumnWithAttributes(-1,
Rtxt("Data Type and Number Missing"),
renderer,
text = crv$IMPUTE[["comment"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
cat.offset <-
catview$insertColumnWithAttributes(-1,
Rtxt("Levels"),
renderer,
text = crv$CATEGORICAL[["comment"]])
renderer <- RGtk2::gtkCellRendererTextNew()
renderer$set(xalign = 0.0)
con.offset <-
conview$insertColumnWithAttributes(-1,
Rtxt("Min; Median/Mean; Max"),
renderer,
text = crv$CONTINUOUS[["comment"]])
## Allow multiple selections.
treeview$getSelection()$setMode("multiple")
impview$getSelection()$setMode("multiple")
catview$getSelection()$setMode("multiple")
conview$getSelection()$setMode("multiple")
}
createVariablesModel <- function(variables, input=NULL, target=NULL,
risk=NULL, ident=NULL, ignore=NULL, weight=NULL,
zero=NULL, mean=NULL,
boxplot=NULL,
hisplot=NULL, cumplot=NULL, benplot=NULL,
barplot=NULL, dotplot=NULL, mosplot=NULL,
paiplot=NULL,
autoroles=TRUE)
{
# Set up the initial information about variables for use throughout
# Rattle, including the Data tab's variable model, the Explore tab's
# categorical and continuous models, and the Modelling tab defaults
# where they depend on the dataset sizes.
#
# Any values supplied for input, target, risk, ident, ignore,
# boxplot, hisplot, cumplot, benplot, barplot, dotplot, and
# mosplot, arguments should be lists of variable names (list of
# strings).
set.cursor("watch", Rtxt("Summarising the variables..."))
# Retrieve the models.
model <- theWidget("select_treeview")$getModel()
impute <- theWidget("impute_treeview")$getModel()
categorical <- theWidget("categorical_treeview")$getModel()
continuous <- theWidget("continuous_treeview")$getModel()
# 080303 Automatically identify a default target if none are
# identified as a target (by beginning with TARGET or TIME for
# SURVIVAL data) in the variables. Heuristic is - the last or first
# if it's a factor with few levels, or has only a few values. Then
# the treeview model will record this choice, and we set the
# appropriate labels with this, and record it in crs.
survival.model <- theWidget("model_survival_radiobutton")$getActive()
given.target <- c(which(substr(variables, 1, 6) == "TARGET"),
if (survival.model) which(substr(variables, 1, 4) == "TIME"))
if (autoroles && length(given.target) > 0) target <- variables[given.target[1]]
if (autoroles && is.null(target))
{
# Find the last variable that is not an IMP (imputed). This is
# just a general heuristic, and works particularly for imputation
# performed in Rattle. Should also do this for first, and also for
# IGNORE variables.
last.var <- length(variables)
while (last.var > 1 && substr(variables[last.var], 1, 4) == "IMP_")
{
last.var <- last.var - 1
}
target <- -1
if ((is.factor(crs$dataset[,last.var]) &&
length(levels(crs$dataset[,last.var])) > 1 &&
length(levels(crs$dataset[,last.var])) < 11)
|| (length(levels(as.factor(crs$dataset[,last.var]))) < 11
&& length(levels(as.factor(crs$dataset[,last.var]))) > 1))
target <- last.var
else if ((is.factor(crs$dataset[,1]) &&
length(levels(crs$dataset[,1])) > 1 &&
length(levels(crs$dataset[,1])) < 11)
|| (length(levels(as.factor(crs$dataset[,1]))) < 11
&& length(levels(as.factor(crs$dataset[,1]))) > 1))
target <- 1
else
for (i in 2:(length(variables)-1))
{
if ((is.factor(crs$dataset[,i]) &&
length(levels(crs$dataset[,i])) > 1 &&
length(levels(crs$dataset[,i])) < 11)
|| (length(levels(as.factor(crs$dataset[,i]))) < 11
&& length(levels(as.factor(crs$dataset[,i]))) > 1))
{
target <- i
break
}
}
if (target != -1)
target <- variables[target]
else
target <- NULL
}
# Determine the list of input variables so far (i.e., not dealing
# with ignore and risk yet).
if (is.null(input)) input <- variables
input <- setdiff(input, target)
# Update the Model tab with the selected default target
the.target <- ifelse(length(target), sprintf(Rtxt("Target: %s"), target),
Rtxt("No Target"))
theWidget("explot_target_label")$setText(the.target)
theWidget("glm_target_label")$setText(the.target)
theWidget("rpart_target_label")$setText(the.target)
## theWidget("gbm_target_label")$setText(the.target)
theWidget("ada_target_label")$setText(the.target)
theWidget("rf_target_label")$setText(the.target)
theWidget("svm_target_label")$setText(the.target)
theWidget("nnet_target_label")$setText(the.target)
plots <- union(boxplot,
union(hisplot,
union(cumplot,
union(benplot,
union(barplot,
union(paiplot,
union(dotplot, mosplot)))))))
## Build the Variables treeview model with each variable's INPUT set
## to TRUE and all else FALSE. If the variable has only a single
## value then it defaults to IGNORE, and if it is a factor and has
## as many distinct values as there are rows, then also default to
## IGNORE.
for (i in seq_along(variables))
{
#used <- union(target, union(risk, union(ident, ignore)))
iter <- model$append()$iter
cl <- class(crs$dataset[[variables[i]]])
# 110312 There is a case where cl might be "character". This was
# noticed, for example, when loading a .RData file with a column
# which was character. Seems like simply converting this to factor
# is appropriate.
if ("character" %in% cl)
{
crs$dataset[[variables[i]]] <- as.factor(crs$dataset[[variables[i]]])
cl <- class(crs$dataset[[variables[i]]])
}
# 090320 Change "ordered" to Categoric below, so maybe don't need
# this change. 101004 Reinstate this change to cl since ordered
# factors in weather AUS were being dropped from the Descriptions
# option of Explore.
if (length(cl) == 2 && cl[1] == "ordered" && cl[2] == "factor")
cl <- "factor"
# First check for special variable names.
if (autoroles)
{
if (paste("IMP_", variables[i], sep="") %in% variables)
{
# This works with SAS/EM IMPutations and Rattle's imputations,
# which add the IMP_ at the beginning of the name of any
# imputed variables. These will be ignored as they will have
# been replaced by another variable.
ignore <- c(ignore, variables[i])
# Be sure to also remove any other role for the original
# variable?
}
else if (substr(variables[i], 1, 2) == "ID")
{
ident <- c(ident, variables[i])
}
# 080303 No longer needed as this is handled prior to the target
# heuristics. Remove this code eventually if all looks okay.
#
# else if (substr(variables[i], 1, 6) == "TARGET")
# {
# target <- variables[i]
# }
else if (substr(variables[i], 1, 6) == "IGNORE")
{
ignore <- c(ignore, variables[i])
}
else if (variables[i] == "risk" ||
substr(variables[i], 1, 4) == "RISK" ||
substr(variables[i], 1, 6) == "STATUS" ||
substr(variables[i], 1, 5) == "EVENT")
{
risk <- c(risk, variables[i])
}
else if ("factor" %in% cl)
{
lv <- length(levels(crs$dataset[[variables[i]]]))
if (nrow(crs$dataset) > crv$ident.min.rows && lv == nrow(crs$dataset))
{
cl <- "ident"
ident <- c(ident, variables[i])
}
else if (lv == 1)
{
cl <- "constant"
ignore <- c(ignore, variables[i])
}
}
else
{
lv <- length(levels(as.factor(crs$dataset[[variables[i]]])))
# 090704 Start supporting a Date format
if (length(intersect(c("integer", "POSIXt"), cl)) &&
nrow(crs$dataset) > crv$ident.min.rows &&
lv == nrow(crs$dataset))
{
cl <- "ident"
ident <- c(ident, variables[i])
}
else if (all(is.na(crs$dataset[[variables[i]]])))
{
cl <- "missing"
ignore <- c(ignore, variables[i])
}
else if (sd(crs$dataset[[variables[i]]], na.rm=TRUE) %in% c(NA, 0))
{
# sd is NA if all data items are NA.
cl <- "constant"
ignore <- c(ignore, variables[i])
}
}
}
# Fix any doubling up
input <- setdiff(input, target)
if (length(target) && length(ident) && target %in% ident)
target <- NULL
# 090110 We used to include the number of levels in the Data Type
# column, but since we now include Unique in the comment column,
# no longer include this redundant information.
## if ("factor" %in% cl)
## {
## lv <- length(levels(crs$dataset[[variables[i]]]))
## if (lv > 1)
## cl <- paste(cl, lv)
## }
input <- setdiff(setdiff(setdiff(input, ignore), ident), risk)
missing.count <- sum(is.na(crs$dataset[[variables[i]]]))
unique.count <- length(unique(na.omit(crs$dataset[[variables[i]]])))
unique.value <- unique(crs$dataset[[variables[i]]])
numeric.var <- is.numeric(crs$dataset[[variables[i]]])
possible.categoric <- (unique.count <= crv$max.categories ||
theWidget("data_target_categoric_radiobutton")$
getActive())
# Convert internal class to printable form.
prcl <- cl[1]
prcl <- gsub("constant", Rtxt("Constant"), prcl)
prcl <- gsub("ident", Rtxt("Ident"), prcl)
prcl <- gsub("factor", Rtxt("Categoric"), prcl)
prcl <- gsub("ordered", Rtxt("Categoric"), prcl)
prcl <- gsub("integer", Rtxt("Numeric"), prcl)
prcl <- gsub("numeric", Rtxt("Numeric"), prcl)
# Every variable goes into the VARIABLES treeview.
model$set(iter,
crv$COLUMN["number"], i,
crv$COLUMN["variable"], variables[i],
crv$COLUMN["type"], prcl,
crv$COLUMN["input"], variables[i] %in% input,
crv$COLUMN["target"], variables[i] %in% target,
crv$COLUMN["risk"], variables[i] %in% risk,
crv$COLUMN["ident"], variables[i] %in% ident,
crv$COLUMN["ignore"], variables[i] %in% ignore,
crv$COLUMN["weight"], variables[i] %in% weight,
crv$COLUMN["comment"], paste(sprintf(Rtxt("Unique: %d "),
unique.count),## ""),
ifelse(missing.count > 0,
sprintf(Rtxt("Missing: %d "),
missing.count), ""),
ifelse(prcl == "constant",
sprintf(Rtxt("Value: %s "),
unique.value), ""),
sep=""))
# Selected variables go into the other treeviews.
if (missing.count > -1)# Ignore IGNOREd variables. But crs$ignore
# is not yet set. Need to remove
# later. Also, this treeview has become
# used for all TRANSFORM operations, so
# must include all variables, not just ones
# with missing values.
{
# Check if it can be exported to PMML. 131020 Assume now that
# all can be exported (i.e., do not include a message). The test
# is removed from pmml and it was ugly anyhow.
etype <- ""
# Generate correct Rattle terminology for the variable
# class. 090731 We denote an integer as Numeric, to be
# consistent throughout Rattle.
dtype <- paste("A ", cl, " variable")
if (cl == "integer")
dtype <- sprintf(Rtxt("Numeric [%d to %d; unique=%d; mean=%d; median=%d%s%s]"),
min(crs$dataset[[variables[i]]], na.rm=TRUE),
max(crs$dataset[[variables[i]]], na.rm=TRUE),
unique.count,
as.integer(mean(crs$dataset[[variables[i]]],
na.rm=TRUE)),
as.integer(median(crs$dataset[[variables[i]]],
na.rm=TRUE)),
ifelse(sum(is.na(crs$dataset[[variables[i]]])),
sprintf(Rtxt("; miss=%d"),
sum(is.na(crs$dataset[[variables[i]]]))),
""),
ifelse(variables[i] %in% ignore, Rtxt("; ignored"), ""))
else if (cl == "numeric")
dtype <- sprintf(Rtxt("Numeric [%.2f to %.2f; unique=%d; mean=%.2f; median=%.2f%s%s]"),
min(crs$dataset[[variables[i]]], na.rm=TRUE),
max(crs$dataset[[variables[i]]], na.rm=TRUE),
unique.count,
mean(crs$dataset[[variables[i]]], na.rm=TRUE),
median(crs$dataset[[variables[i]]], na.rm=TRUE),
ifelse(missing.count > 0,
sprintf(Rtxt("; miss=%d"), missing.count), ""),
ifelse(variables[i] %in% ignore, Rtxt("; ignored"), ""))
else if (substr(cl, 1, 6) == "factor")
dtype <- sprintf(Rtxt("Categorical [%s levels%s%s]"),
length(levels(crs$dataset[[variables[i]]])),
ifelse(missing.count > 0,
sprintf(Rtxt("; miss=%d"), missing.count), ""),
ifelse(variables[i] %in% ignore, Rtxt("; ignored"), ""))
# Generate text for the missing values bit.
if (missing.count > 0)
mtext <- sprintf(Rtxt(" %d missing values"), missing.count)
else
mtext <- ""
imp.options <- RGtk2::gtkListStoreNew("gchararray")
imp.options.iter <- imp.options$append()$iter
imp.options$set(imp.options.iter, 0, "xx")
combo <- RGtk2::gtkComboBoxNewWithModel(imp.options, 0)
impiter <- impute$append()$iter
impute$set(impiter,
crv$IMPUTE["number"], i,
crv$IMPUTE["variable"], variables[i],
#crv$IMPUTE["comment"], sprintf("%s%s%s.", etype, dtype, mtext))
crv$IMPUTE["comment"], sprintf("%s%s.", dtype, etype))
}
if (strsplit(cl, " ")[[1]][1] == "factor")
{
## For the IMP_ and IGNORE_ variables we don't get a chance
## above to add in the number of levels, so do it here.
if (cl == "factor")
cl <- paste(cl, length(levels(crs$dataset[[variables[i]]])))
catiter <- categorical$append()$iter
categorical$set(catiter,
crv$CATEGORICAL["number"], i,
crv$CATEGORICAL["variable"], variables[i],
crv$CATEGORICAL["barplot"], variables[i] %in% barplot,
crv$CATEGORICAL["dotplot"], variables[i] %in% dotplot,
crv$CATEGORICAL["mosplot"], variables[i] %in% mosplot,
crv$CATEGORICAL["paiplot"], variables[i] %in% paiplot,
crv$CATEGORICAL["comment"],
sprintf("%s", strsplit(cl, " ")[[1]][2]))
}
if (cl == "integer" || cl == "numeric")
{
coniter <- continuous$append()$iter
continuous$set(coniter,
crv$CONTINUOUS["number"], i,
crv$CONTINUOUS["variable"], variables[i],
crv$CONTINUOUS["boxplot"], variables[i] %in% boxplot,
crv$CONTINUOUS["hisplot"], variables[i] %in% hisplot,
crv$CONTINUOUS["cumplot"], variables[i] %in% cumplot,
crv$CONTINUOUS["benplot"], variables[i] %in% benplot,
crv$CONTINUOUS["paiplot"], variables[i] %in% paiplot,
crv$CONTINUOUS["comment"],
sprintf("%.2f; %.2f/%.2f; %.2f",
min(crs$dataset[,i], na.rm=TRUE),
median(crs$dataset[,i], na.rm=TRUE),
mean(crs$dataset[,i], na.rm=TRUE),
max(crs$dataset[,i], na.rm=TRUE)))
}
}
crs$target <- target
crs$input <- input
crs$ident <- ident
crs$ignore <- ignore
crs$risk <- risk
# 091206 Set the default target type.
# 091206 If the target is TIME... and risk is STATUS... or
# EVENT... then enable the Survival radiobutton.
if (! length(target))
theWidget("data_target_auto_radiobutton")$setActive(TRUE)
else if (length(target) && length(risk) &&
substr(target, 1, 4) == "TIME" &&
(substr(risk, 1, 6) == "STATUS" ||
substr(variables[i], 1, 5) == "EVENT"))
theWidget("data_target_survival_radiobutton")$setActive(TRUE)
# else if (is.numeric(crs$dataset[[crs$target]]) &&
# # 080505 TODO we should put 10 as a global CONST
# length(levels(as.factor(crs$dataset[[crs$target]]))) > 10)
# theWidget("data_target_numeric_radiobutton")$setActive(TRUE)
# else if (is.factor(crs$dataset[[crs$target]]) ||
# (is.numeric(crs$dataset[[crs$target]]) &&
# length(levels(as.factor(crs$dataset[[crs$target]]))) <= 10))
# theWidget("data_target_categoric_radiobutton")$setActive(TRUE)
else
# Unset them all - not sure we should be here ever? 091223 Resume
# to this being the default.
theWidget("data_target_auto_radiobutton")$setActive(TRUE)
# Perform other setups associated with a new dataset
crv$rf.mtry.default <- floor(sqrt(ncol(crs$dataset)))
theWidget("rf_mtry_spinbutton")$setValue(crv$rf.mtry.default)
#crv$rf.sampsize.default <- nrow(crs$dataset)
#theWidget("rf_sampsize_spinbutton")$setValue(crv$rf.sampsize.default)
}
#----------------------------------------------------------------------
#
# Support
#
getIncludedVariables <- function(numonly=FALSE, listall=FALSE, risk=FALSE, target=TRUE)
{
# 20110102 TODO Stop using this function, or else have this function
# always return the string "c(crs$input, crs$target)" etc, as
# appropriate, so we use symbolic names rather than lists of
# variable numbers.
# DESCRIPTION
# Generate a numeric list of variables not ignored.
#
# ARGUMENTS
# numonly = Only include numeric variables
# listall = Don't simplify a full list to NULL
# risk = Include any risk variable in the returned list
#
# RETURNS
# A string of comma separated numbers
#
# DETAILS Generates a list of input variable indicies and the
# target variable index and, optionally, the risk variable index.
# If the list contains all variables, then return NULL (as the
# dataset does not then need to be indexed to subset the variables).
#
# TODO This last assumption of returning NULL causes problems since we
# don't know whether this means all variables or no variables!
fi <- getVariableIndicies(crs$input)
if (target)
ti <- getVariableIndicies(crs$target)
else
ti <- NULL
if (risk)
ri <- getVariableIndicies(crs$risk)
else
ri <- NULL
if (numonly)
fl <- seq(1,ncol(crs$dataset))[as.logical(sapply(crs$dataset, is.numeric))]
else
fl <- 1:ncol(crs$dataset)
if (! listall && setequal(union(fi,union(ti, ri)), fl))
return(NULL)
else
return(simplifyNumberList(intersect(fl, union(fi, union(ti, ri)))))
}
inputVariables <- function(numonly=FALSE)
{
# Return, as a comma separated list (as a string), the list of input
# variable indicies. If the list contains all variables except for
# the target variable, then return NULL (as the dataset does not then
# need to be indexed to subset the variables).
fi <- getVariableIndicies(crs$input)
ti <- getVariableIndicies(crs$target)
if (is.null(crs$input))
{
errorDialog(Rtxt("No input variables have been selected.",
"This doesn't make a lot of sense.",
"Please choose some input variables before proceeding."))
stop(Rtxt("no input variables specified"))
}
if (numonly)
fl <- seq(1,ncol(crs$dataset))[as.logical(sapply(crs$dataset, is.numeric))]
else
fl <- 1:ncol(crs$dataset)
if (setequal(fi, fl))
return(NULL)
else
return(simplifyNumberList(intersect(fl,fi)))
}
used.variables <- function(numonly=FALSE)
{
# Return, as a comma separated list (as a string) the list of all
# variable indicies for those that are not ignored. If the list
# contains all variables except for the ignored variables, then
# return NULL.
ii <- union(getVariableIndicies(crs$ignore), getVariableIndicies(crs$ident))
if (numonly)
fl <- seq(1,ncol(crs$dataset))[as.logical(sapply(crs$dataset, is.numeric))]
else
fl <- 1:ncol(crs$dataset)
if (setequal(fl, ii))
return(NULL)
else
return(simplifyNumberList(setdiff(fl, ii)))
}
getCategoricVariables <- function(type="string", include.target=F )
{
# Return a list of categoric variables from amongst those with an
# INPUT role. If type is "names" than return the list of variable
# names.
include <- NULL
cats <- seq(1,ncol(crs$dataset))[as.logical(sapply(crs$dataset, is.factor))]
if (length(cats) > 0)
{
indicies <- getVariableIndicies(crs$input)
if (include.target)
indicies<-c(indicies,getVariableIndicies(crs$target))
included <- intersect(cats, indicies)
if (type=="names")
include <- names(crs$dataset)[included]
else
include <- simplifyNumberList(included)
}
return(include)
}
getNumericVariables <- function(type="string")
{
# Returns a list of numeric variables. 080803 Add support to return
# a list of indicies rather than the default string that needs to be
# executed to identfy the indicies.
nums <- seq(1,ncol(crs$dataset))[as.logical(sapply(crs$dataset, is.numeric))]
if (length(nums) > 0)
{
indicies <- intersect(nums, getVariableIndicies(crs$input))
if (type == "string")
indicies <- simplifyNumberList(indicies)
}
else
indicies <- NULL
return(indicies)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/output.R
\name{interval.prob}
\alias{interval.prob}
\title{Estimate the probability of a change point in a specified interval}
\usage{
interval.prob(object, start, end)
}
\arguments{
\item{object}{the result of a call to \code{bcp()}.}
\item{start}{the starting index of the interval.}
\item{end}{the ending index of the interval.}
}
\description{
The function \code{interval.prob()} estimates the probability of at least one
change point in the specified interval of sequential observations; it may only be used when \code{return.mcmc=TRUE}.
}
\details{
For sequential data only, the function returns an estimate of the posterior probability of at least one change point in the specified interval.
}
\note{
\code{return.mcmc} must be \code{TRUE}.
}
\examples{
##### A random sample from a few normal distributions #####
testdata <- c(rnorm(50), rnorm(50, 5, 1), rnorm(50))
bcp.0 <- bcp(testdata, return.mcmc=TRUE)
plot(bcp.0, main="Univariate Change Point Example")
interval.prob(bcp.0, 45, 55)
}
\seealso{
\code{\link{bcp}} and \code{\link{plot.bcp}}.
}
\author{
Xiaofei Wang, Chandra Erdman, and John W. Emerson
}
\keyword{datasets}
|
/fuzzedpackages/bcp/man/interval.prob.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| true
| 1,217
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/output.R
\name{interval.prob}
\alias{interval.prob}
\title{Estimate the probability of a change point in a specified interval}
\usage{
interval.prob(object, start, end)
}
\arguments{
\item{object}{the result of a call to \code{bcp()}.}
\item{start}{the starting index of the interval.}
\item{end}{the ending index of the interval.}
}
\description{
The function \code{interval.prob()} estimates the probability of at least one
change point in the specified interval of sequential observations; it may only be used when \code{return.mcmc=TRUE}.
}
\details{
For sequential data only, the function returns an estimate of the posterior probability of at least one change point in the specified interval.
}
\note{
\code{return.mcmc} must be \code{TRUE}.
}
\examples{
##### A random sample from a few normal distributions #####
testdata <- c(rnorm(50), rnorm(50, 5, 1), rnorm(50))
bcp.0 <- bcp(testdata, return.mcmc=TRUE)
plot(bcp.0, main="Univariate Change Point Example")
interval.prob(bcp.0, 45, 55)
}
\seealso{
\code{\link{bcp}} and \code{\link{plot.bcp}}.
}
\author{
Xiaofei Wang, Chandra Erdman, and John W. Emerson
}
\keyword{datasets}
|
#separando las palabras por espacio
splitEspacioNoticia<-strsplit(textoNoticia," ")[[1]]
#pasando todas las palabras a minusculas
splitEspacioNoticia<-tolower(splitEspacioNoticia)
#contar palabras
unlisNoticias<-unlist(splitEspacioNoticia)
tablaPalabra<-table(unlisNoticia)
#pasando la informacion a un data frame
dfPalabrasNoticia<-as.data.frame(tablaPalabra)
#almacenamiento la informacion en csv
write.csv(dfPalabrasNoticia,file=palabrasNoticia.csv)
#alamcenamiento en un txt
write.table(dfPalabrasNoticia,file="palabrasNoticia.txt",sep=";")
#extrayendo los elementos que contienen las tablas
tablaProducto<-html_nodes(webpage,".productos")
#extraccion de informacion tabla 1
tabla1<-html_table(contenedorDeTabla[1][[1]])
#ver el contenido de la posicion 1,2 de la tabla 1
print(tabla1[1,2])
#extraccion informacion tabla 2
tabla2<-html_table(contenedorDeTabalas[2][[1]])
#viendo el contenido de la pocision 1,2 de la tabla 2
print(tabla2[1,2])
#limpiando $ comas y cambios de puntos por coma
tabla1$Valor<-gsub("\\$","",tabla1$Valor)
tabla1$Valor<-gsub("[.]","",tabla1$Valor)
#as.numerico transforma la columna a elementos numericos
tabla1$Valor<-as.numeric(tabla1$Valor)
tabla2$Valor<-as.numeric(tabla2$Valor)
#combinando los dos data frame
#----------------------------------------------------------------------
#
dfVariable[whict(dfVariable$Columna=="(loqbuscamos"),]
#graficar barra
tablaMerge%>%
ggplot()
aes(x=ProductoSupermercado), y=Valor +
geom_bar(stat="identity")
#grafico de dispercion-------------------------
#===========Feria chilena del libro================================#
paginaChilenaDelLibro<-"https://www.feriachilenadellibro.cl/"
paginaChilenaRead<-read_html(paginaChilenaDelLibro)
paginaChilenaNodesReferencias<-html_nodes(paginaChilenaRead,".product-item-photo")
#html_attr(paginaChilenaNodesReferencias,"href")
referencias<-html_attr(paginaChilenaNodesReferencias,"href")
for(refe in referencias){
print(refe)
lecturaLibro<-read_html(refe)
precio<-html_text(html_nodes(lecturaLibro,".price"))
print(precio)
}
|
/Funcion de extraccion.R
|
no_license
|
angela2020/Tarea_4.4
|
R
| false
| false
| 2,084
|
r
|
#separando las palabras por espacio
splitEspacioNoticia<-strsplit(textoNoticia," ")[[1]]
#pasando todas las palabras a minusculas
splitEspacioNoticia<-tolower(splitEspacioNoticia)
#contar palabras
unlisNoticias<-unlist(splitEspacioNoticia)
tablaPalabra<-table(unlisNoticia)
#pasando la informacion a un data frame
dfPalabrasNoticia<-as.data.frame(tablaPalabra)
#almacenamiento la informacion en csv
write.csv(dfPalabrasNoticia,file=palabrasNoticia.csv)
#alamcenamiento en un txt
write.table(dfPalabrasNoticia,file="palabrasNoticia.txt",sep=";")
#extrayendo los elementos que contienen las tablas
tablaProducto<-html_nodes(webpage,".productos")
#extraccion de informacion tabla 1
tabla1<-html_table(contenedorDeTabla[1][[1]])
#ver el contenido de la posicion 1,2 de la tabla 1
print(tabla1[1,2])
#extraccion informacion tabla 2
tabla2<-html_table(contenedorDeTabalas[2][[1]])
#viendo el contenido de la pocision 1,2 de la tabla 2
print(tabla2[1,2])
#limpiando $ comas y cambios de puntos por coma
tabla1$Valor<-gsub("\\$","",tabla1$Valor)
tabla1$Valor<-gsub("[.]","",tabla1$Valor)
#as.numerico transforma la columna a elementos numericos
tabla1$Valor<-as.numeric(tabla1$Valor)
tabla2$Valor<-as.numeric(tabla2$Valor)
#combinando los dos data frame
#----------------------------------------------------------------------
#
dfVariable[whict(dfVariable$Columna=="(loqbuscamos"),]
#graficar barra
tablaMerge%>%
ggplot()
aes(x=ProductoSupermercado), y=Valor +
geom_bar(stat="identity")
#grafico de dispercion-------------------------
#===========Feria chilena del libro================================#
paginaChilenaDelLibro<-"https://www.feriachilenadellibro.cl/"
paginaChilenaRead<-read_html(paginaChilenaDelLibro)
paginaChilenaNodesReferencias<-html_nodes(paginaChilenaRead,".product-item-photo")
#html_attr(paginaChilenaNodesReferencias,"href")
referencias<-html_attr(paginaChilenaNodesReferencias,"href")
for(refe in referencias){
print(refe)
lecturaLibro<-read_html(refe)
precio<-html_text(html_nodes(lecturaLibro,".price"))
print(precio)
}
|
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/wrap_col.R
\name{wrap_col}
\alias{wrap_col}
\title{Replicate Vector to N}
\usage{
wrap_col(col, n)
}
\arguments{
\item{col}{A vector of colors.}
\item{n}{The desired length for the returned vector of colors.}
}
\description{
This function extends a vector of colors to specified length.
}
\examples{
wrap_col(c('red','blue'), 8)
}
|
/EnzymeAssay/man/wrap_col.Rd
|
no_license
|
alisandra/enzyme_assay
|
R
| false
| false
| 424
|
rd
|
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/wrap_col.R
\name{wrap_col}
\alias{wrap_col}
\title{Replicate Vector to N}
\usage{
wrap_col(col, n)
}
\arguments{
\item{col}{A vector of colors.}
\item{n}{The desired length for the returned vector of colors.}
}
\description{
This function extends a vector of colors to specified length.
}
\examples{
wrap_col(c('red','blue'), 8)
}
|
#
# This module contains functions, implementing RR-related functionality:
#
# 1. General functions (dynamic chunks, etc.)
# 2. Specialized functions (creating tables/figures for various analyses)
# 3. Miscellaneous (utility) functions
##### GENERAL FUNCTIONS
## CHUNKS
# Dynamically generates knit-ready chunk code for objects (figures/tables),
# ready for cross-referencing in a report document (supports .Rmd to LaTeX).
# Currently supports figures only,
# as tables generation is context- and target-specific (TBD).
genDynChunks <- function (refInfo, multi = FALSE, hold = TRUE,
fig.height, fig.width) {
figOptions <- ""
latexObjLabel <- paste0("{{caption}}", "\\\\label{", refInfo$objType, ":{{varName}}", "}")
chunkName <- "{{name2}}"
chunkHeader <- paste0("```{r ", chunkName, ",")
if (!(missing(fig.height) && missing(fig.width)))
figOptions <- paste0("fig.height=myScale*", fig.height, ", fig.width=myScale*", fig.width)
chunkOptions <- paste0("include=TRUE, results='asis', ", figOptions, ", fig.cap='", latexObjLabel, "'")
chunkHeaderFull <- paste(chunkHeader, chunkOptions, "}")
chunkBody <- "print(get('{{name}}'))"
chunkText <- c(chunkHeaderFull,
chunkBody,
"```", "\n")
objChunks <- lapply(refInfo$objs, function (x)
knit_expand(text = chunkText,
name = x,
name2 = gsub('\\.', '_', x),
varName = strsplit(x, refInfo$objTypePrefix)[[1]][2],
caption = attr(get(x), 'title')))
return (unlist(objChunks))
}
## REFERENCES
# Produces string with LaTeX ref. labels for figures/tables of specific type
genObjRefs <- function (objType, objTypePrefix) {
objs <- ls(pattern = objTypePrefix, envir = .GlobalEnv)
if (length(objs) == 0)
stop(paste("No objects of type", objTypePrefix, "found!"))
split <- strsplit(objs, objTypePrefix)
objRefs <- sapply(split, `[[`, 2)
#objRefs <- split[[seq(split)]][2]
objAllRefs <- c()
for (i in seq(objRefs)) objAllRefs <- c(objAllRefs, objRefs[[i]])
refKeyword <- ifelse(objType == "fig", "\\ref{fig:", "\\ref{tab:")
refStr <- sapply(objAllRefs, function (x) {paste0(refKeyword, x, "}")})
colFlag <- ""; refStrTemp <- ""
objWord <- ifelse(objType == "fig", "Figures ", "Tables ")
if (length(refStr) < 2) {
objWord <- ifelse(objType == "fig", "Figure ", "Table")
refStrFinal <- paste(objWord, refStr[length(refStr)])
}
else {
if (length(refStr) == 2) colFlag <- " and "
else if (length(refStr) > 2) colFlag <- ", "
refStrTemp <- paste(refStr[-length(refStr)], collapse = colFlag)
refStrFinal <- paste(objWord, refStrTemp, " and ", refStr[length(refStr)])
}
list(objs = objs, str = refStrFinal,
objType = objType, objTypePrefix = objTypePrefix)
}
##### SPECIALIZED FUNCTIONS - EDA
## TABLES
genEDAdescStatsTable <- function (df, label = "edaDescStats",
caption = "EDA descriptive statistics",
digits = 2) {
is.numericORfactor <- function (x) { is.numeric(x) || is.factor(x) }
df <- df[, sapply(df, is.numericORfactor)]
df <- psych::describe(df)
df <- as.data.frame(round(df, digits))
df$vars <- rownames(df)
colsToInclude <- c("n", "mean", "sd", "median",
"min", "max", "skew", "kurtosis")
tableCols <- c("N", "Mean", "SD", "Median",
"Min", "Max", "Skew", "Kurtosis")
df <- df[-1, ] # remove Project.ID
df <- df[, colsToInclude]
names(df) <- tableCols
edaDescStatsTable <- as.tabular(df)
# set the caption (specific for 'tables' package)
latexCap <- paste0("\\caption{", caption, ".}\\\\", "\n",
"\\toprule",
"\\label{tab:", label, "}")
# set tabular settings
booktabs()
# output LaTeX table
latex(edaDescStatsTable,
mathmode = FALSE, # output dash instead of LaTeX minus sign character
options = list(tabular = "longtable",
toprule = latexCap))
}
##### SPECIALIZED FUNCTIONS - EFA
##### SPECIALIZED FUNCTIONS - CFA
##### SPECIALIZED FUNCTIONS - SEM
## TABLES
# Generate R Markdown table with results of SEM analysis ('pander' 2nd ver.)
plspm.innerprint <- function(object, digits = DIGITS) {
res1 <- do.call(rbind, lapply(names(object$inner_model), function(n) {
data.frame(Outcome = n, object$inner_model[[n]])
}))
colnames(res1)[3:5] <- c("SE", "Tvalue", "Pvalue")
res1$Pvalue <- format.pval(res1$Pvalue, digits = digits)
pander(res1, split.tables = 200, round = digits)
}
# Generate R Markdown table with results of SEM analysis ('pander' 1st ver.)
# currently uses pandoc.table(); using methods is TBD:
# print.pander <- function (x, ...) UseMethod("pander")
genSEMtable <- function (obj, caption, label,
type = "1", format = "latex") {
# if LaTeX, add label to the caption for cross-referencing
if (format == "latex")
caption <- paste0(caption, "\\label{tab:", label, "}")
# set the caption, but don't re-use for next table(s)
set.caption(caption, permanent = FALSE)
# don't split tables
##panderOptions("table.split.table", Inf)
# create table in R Markdown format
##pandoc.table(obj) # more flexible alternative: pander()
##pander(obj, split.tables = 200, round = DIGITS)
# return both caption/label and table in a list
list(caption = caption, table = obj)
}
## FIGURES
genSEMfigure <- function (obj, caption, label) {
# add label to the caption for cross-referencing
caption <- paste0(caption, "\\label{fig:", label, "}")
# return both caption/label and plot in a list
list(caption = caption, plot = obj)
}
##### MISC FUNCTIONS #####
sanitize <- function(str) {
result <- str
result <- gsub("\\\\", "SANITIZE.BACKSLASH", result)
result <- gsub("$", "\\$", result, fixed = TRUE)
result <- gsub(">", "$>$", result, fixed = TRUE)
result <- gsub("<", "$<$", result, fixed = TRUE)
result <- gsub("|", "$|$", result, fixed = TRUE)
result <- gsub("{", "\\{", result, fixed = TRUE)
result <- gsub("}", "\\}", result, fixed = TRUE)
result <- gsub("%", "\\%", result, fixed = TRUE)
result <- gsub("&", "\\&", result, fixed = TRUE)
result <- gsub("_", "\\_", result, fixed = TRUE)
result <- gsub("#", "\\#", result, fixed = TRUE)
result <- gsub("^", "\\verb|^|", result, fixed = TRUE)
result <- gsub("~", "\\~{}", result, fixed = TRUE)
result <- gsub("SANITIZE.BACKSLASH", "$\\backslash$",
result, fixed = TRUE)
return(result)
}
|
/utils/knit.R
|
permissive
|
abnova/diss-floss-official
|
R
| false
| false
| 6,671
|
r
|
#
# This module contains functions, implementing RR-related functionality:
#
# 1. General functions (dynamic chunks, etc.)
# 2. Specialized functions (creating tables/figures for various analyses)
# 3. Miscellaneous (utility) functions
##### GENERAL FUNCTIONS
## CHUNKS
# Dynamically generates knit-ready chunk code for objects (figures/tables),
# ready for cross-referencing in a report document (supports .Rmd to LaTeX).
# Currently supports figures only,
# as tables generation is context- and target-specific (TBD).
genDynChunks <- function (refInfo, multi = FALSE, hold = TRUE,
fig.height, fig.width) {
figOptions <- ""
latexObjLabel <- paste0("{{caption}}", "\\\\label{", refInfo$objType, ":{{varName}}", "}")
chunkName <- "{{name2}}"
chunkHeader <- paste0("```{r ", chunkName, ",")
if (!(missing(fig.height) && missing(fig.width)))
figOptions <- paste0("fig.height=myScale*", fig.height, ", fig.width=myScale*", fig.width)
chunkOptions <- paste0("include=TRUE, results='asis', ", figOptions, ", fig.cap='", latexObjLabel, "'")
chunkHeaderFull <- paste(chunkHeader, chunkOptions, "}")
chunkBody <- "print(get('{{name}}'))"
chunkText <- c(chunkHeaderFull,
chunkBody,
"```", "\n")
objChunks <- lapply(refInfo$objs, function (x)
knit_expand(text = chunkText,
name = x,
name2 = gsub('\\.', '_', x),
varName = strsplit(x, refInfo$objTypePrefix)[[1]][2],
caption = attr(get(x), 'title')))
return (unlist(objChunks))
}
## REFERENCES
# Produces string with LaTeX ref. labels for figures/tables of specific type
genObjRefs <- function (objType, objTypePrefix) {
objs <- ls(pattern = objTypePrefix, envir = .GlobalEnv)
if (length(objs) == 0)
stop(paste("No objects of type", objTypePrefix, "found!"))
split <- strsplit(objs, objTypePrefix)
objRefs <- sapply(split, `[[`, 2)
#objRefs <- split[[seq(split)]][2]
objAllRefs <- c()
for (i in seq(objRefs)) objAllRefs <- c(objAllRefs, objRefs[[i]])
refKeyword <- ifelse(objType == "fig", "\\ref{fig:", "\\ref{tab:")
refStr <- sapply(objAllRefs, function (x) {paste0(refKeyword, x, "}")})
colFlag <- ""; refStrTemp <- ""
objWord <- ifelse(objType == "fig", "Figures ", "Tables ")
if (length(refStr) < 2) {
objWord <- ifelse(objType == "fig", "Figure ", "Table")
refStrFinal <- paste(objWord, refStr[length(refStr)])
}
else {
if (length(refStr) == 2) colFlag <- " and "
else if (length(refStr) > 2) colFlag <- ", "
refStrTemp <- paste(refStr[-length(refStr)], collapse = colFlag)
refStrFinal <- paste(objWord, refStrTemp, " and ", refStr[length(refStr)])
}
list(objs = objs, str = refStrFinal,
objType = objType, objTypePrefix = objTypePrefix)
}
##### SPECIALIZED FUNCTIONS - EDA
## TABLES
genEDAdescStatsTable <- function (df, label = "edaDescStats",
caption = "EDA descriptive statistics",
digits = 2) {
is.numericORfactor <- function (x) { is.numeric(x) || is.factor(x) }
df <- df[, sapply(df, is.numericORfactor)]
df <- psych::describe(df)
df <- as.data.frame(round(df, digits))
df$vars <- rownames(df)
colsToInclude <- c("n", "mean", "sd", "median",
"min", "max", "skew", "kurtosis")
tableCols <- c("N", "Mean", "SD", "Median",
"Min", "Max", "Skew", "Kurtosis")
df <- df[-1, ] # remove Project.ID
df <- df[, colsToInclude]
names(df) <- tableCols
edaDescStatsTable <- as.tabular(df)
# set the caption (specific for 'tables' package)
latexCap <- paste0("\\caption{", caption, ".}\\\\", "\n",
"\\toprule",
"\\label{tab:", label, "}")
# set tabular settings
booktabs()
# output LaTeX table
latex(edaDescStatsTable,
mathmode = FALSE, # output dash instead of LaTeX minus sign character
options = list(tabular = "longtable",
toprule = latexCap))
}
##### SPECIALIZED FUNCTIONS - EFA
##### SPECIALIZED FUNCTIONS - CFA
##### SPECIALIZED FUNCTIONS - SEM
## TABLES
# Generate R Markdown table with results of SEM analysis ('pander' 2nd ver.)
plspm.innerprint <- function(object, digits = DIGITS) {
res1 <- do.call(rbind, lapply(names(object$inner_model), function(n) {
data.frame(Outcome = n, object$inner_model[[n]])
}))
colnames(res1)[3:5] <- c("SE", "Tvalue", "Pvalue")
res1$Pvalue <- format.pval(res1$Pvalue, digits = digits)
pander(res1, split.tables = 200, round = digits)
}
# Generate R Markdown table with results of SEM analysis ('pander' 1st ver.)
# currently uses pandoc.table(); using methods is TBD:
# print.pander <- function (x, ...) UseMethod("pander")
genSEMtable <- function (obj, caption, label,
type = "1", format = "latex") {
# if LaTeX, add label to the caption for cross-referencing
if (format == "latex")
caption <- paste0(caption, "\\label{tab:", label, "}")
# set the caption, but don't re-use for next table(s)
set.caption(caption, permanent = FALSE)
# don't split tables
##panderOptions("table.split.table", Inf)
# create table in R Markdown format
##pandoc.table(obj) # more flexible alternative: pander()
##pander(obj, split.tables = 200, round = DIGITS)
# return both caption/label and table in a list
list(caption = caption, table = obj)
}
## FIGURES
genSEMfigure <- function (obj, caption, label) {
# add label to the caption for cross-referencing
caption <- paste0(caption, "\\label{fig:", label, "}")
# return both caption/label and plot in a list
list(caption = caption, plot = obj)
}
##### MISC FUNCTIONS #####
sanitize <- function(str) {
result <- str
result <- gsub("\\\\", "SANITIZE.BACKSLASH", result)
result <- gsub("$", "\\$", result, fixed = TRUE)
result <- gsub(">", "$>$", result, fixed = TRUE)
result <- gsub("<", "$<$", result, fixed = TRUE)
result <- gsub("|", "$|$", result, fixed = TRUE)
result <- gsub("{", "\\{", result, fixed = TRUE)
result <- gsub("}", "\\}", result, fixed = TRUE)
result <- gsub("%", "\\%", result, fixed = TRUE)
result <- gsub("&", "\\&", result, fixed = TRUE)
result <- gsub("_", "\\_", result, fixed = TRUE)
result <- gsub("#", "\\#", result, fixed = TRUE)
result <- gsub("^", "\\verb|^|", result, fixed = TRUE)
result <- gsub("~", "\\~{}", result, fixed = TRUE)
result <- gsub("SANITIZE.BACKSLASH", "$\\backslash$",
result, fixed = TRUE)
return(result)
}
|
testlist <- list(Rs = c(-1.9577272327571e+276, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), atmp = numeric(0), relh = c(7.41896642122422e-304, -4.29227809743625e-307, 1.81037701089217e+87, -2.93112217825115e-158, 9.03412394302482e-46, 7.31195213563656e+256, -1.93925524631599e-68, 2.08343441298214e-168, 1.39098956557385e-309 ), temp = 1.11231963688461e-307)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615862022-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 473
|
r
|
testlist <- list(Rs = c(-1.9577272327571e+276, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), atmp = numeric(0), relh = c(7.41896642122422e-304, -4.29227809743625e-307, 1.81037701089217e+87, -2.93112217825115e-158, 9.03412394302482e-46, 7.31195213563656e+256, -1.93925524631599e-68, 2.08343441298214e-168, 1.39098956557385e-309 ), temp = 1.11231963688461e-307)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
library(visualize)
### Name: visualize.chisq
### Title: Visualize Chi-squared Distribution
### Aliases: visualize.chisq
### Keywords: visualize
### ** Examples
# Evaluates lower tail.
visualize.chisq(stat = 1, df = 3, section = "lower")
# Evaluates bounded region.
visualize.chisq(stat = c(1,2), df = 6, section = "bounded")
# Evaluates upper tail.
visualize.chisq(stat = 1, df = 3, section = "upper")
|
/data/genthat_extracted_code/visualize/examples/visualize.chisq.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 412
|
r
|
library(visualize)
### Name: visualize.chisq
### Title: Visualize Chi-squared Distribution
### Aliases: visualize.chisq
### Keywords: visualize
### ** Examples
# Evaluates lower tail.
visualize.chisq(stat = 1, df = 3, section = "lower")
# Evaluates bounded region.
visualize.chisq(stat = c(1,2), df = 6, section = "bounded")
# Evaluates upper tail.
visualize.chisq(stat = 1, df = 3, section = "upper")
|
#
# Copyright 2007-2015 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
omxLocateParameters <- function(model, labels = NULL, indep = FALSE) {
retval <- locateParametersInternal(model, labels, indep)
if (nrow(retval) > 0) {
retval <- retval[order(retval$label),]
}
rownames(retval) <- NULL
return(retval)
}
locateParametersInternal <- function(model, labels, indep) {
if (!is.null(labels) && !single.na(labels) && !is.character(labels)) {
stop("'labels' argument must be NULL or a character vector")
}
retval <- lapply(model@matrices, locateParametersHelper, model@name, labels)
retval <- do.call(rbind, retval)
if(indep) {
submodels <- model@submodels
} else {
submodels <- imxDependentModels(model)
}
if (length(submodels) > 0) {
subparams <- lapply(submodels, locateParametersInternal, labels, indep)
subparams <- do.call(rbind, subparams)
retval <- rbind(retval, subparams)
}
return(retval)
}
locateParametersHelper <- function(matrix, modelname, target) {
retval <- data.frame(label = character(0), model = character(0),
matrix = character(0), row = numeric(0),
col = numeric(0), value = numeric(0),
lbound = numeric(0), ubound = numeric(0),
stringsAsFactors = FALSE)
free <- matrix@free
count <- sum(free)
if (count == 0) {
return(retval)
} else {
labels <- matrix@labels[free]
rows <- row(free)[free]
cols <- col(free)[free]
values <- matrix@values[free]
lbound <- matrix@lbound[free]
ubound <- matrix@ubound[free]
for(i in 1:count) {
pname <- labels[[i]]
if (is.null(target) || (is.na(pname) && any(is.na(target))) ||
pname %in% target) {
nextentry <- nrow(retval) + 1
retval[nextentry,'label'] <- pname
retval[nextentry,'model'] <- modelname
retval[nextentry,'matrix'] <- matrix@name
retval[nextentry,'row'] <- rows[[i]]
retval[nextentry,'col'] <- cols[[i]]
retval[nextentry,'value'] <- values[[i]]
retval[nextentry,'lbound'] <- lbound[[i]]
retval[nextentry,'ubound'] <- ubound[[i]]
}
}
}
return(retval)
}
omxGetParameters <- function(model, indep = FALSE, free = c(TRUE, FALSE, NA),
fetch = c('values', 'free', 'lbound', 'ubound', 'all')) {
if (identical(free, c(TRUE, FALSE, NA))) {
free <- TRUE
}
if (identical(fetch, c('values', 'free', 'lbound', 'ubound', 'all'))) {
fetch <- 'values'
}
if (!is.logical(free) || length(free) != 1) {
stop("argument 'free' must be a 'TRUE', 'FALSE', or NA")
}
if (!is.character(fetch) || length(fetch) != 1 ||
!(fetch %in% c('values', 'free', 'lbound', 'ubound', 'all'))) {
stop("argument 'fetch' must be one of c('values', 'free', 'lbound', 'ubound', 'all')")
}
if (fetch == 'all') {
values <- omxGetParameters(model, indep, free, 'values')
lbound <- omxGetParameters(model, indep, free, 'lbound')
ubound <- omxGetParameters(model, indep, free, 'ubound')
if (!is.na(free) && free) {
free <- rep.int(TRUE, length(values))
} else if (!is.na(free) && !free) {
free <- rep.int(FALSE, length(values))
} else {
free <- omxGetParameters(model, indep, free, 'free')
}
return(data.frame(values, free, lbound, ubound))
}
parameters <- lapply(model@matrices, getParametersHelper, model@name, free, fetch)
plen <- lapply(parameters, length)
parameters[plen == 0] <- NULL
names(parameters) <- NULL
parameters <- unlist(parameters)
if(indep) {
submodels <- model@submodels
} else {
submodels <- imxDependentModels(model)
}
if (length(submodels) > 0) {
subparams <- lapply(submodels, omxGetParameters, indep, free, fetch)
plen <- lapply(subparams, length)
subparams[plen == 0] <- NULL
names(subparams) <- NULL
subparams <- unlist(subparams)
parameters <- c(parameters, subparams)
}
parameters <- parameters[!duplicated(names(parameters), incomparables = NA)]
return(parameters)
}
setParametersCheckVector <- function(values, test, argname, typename) {
if (is.null(values)) return()
if (!test(values)) {
stop(paste(omxQuotes(argname),
"argument must either be NA or a",
typename, "vector"), call. = FALSE)
}
}
omxSetParameters <- function(model, labels, free = NULL, values = NULL,
newlabels = NULL, lbound = NULL, ubound = NULL, indep = FALSE,
strict = TRUE, name = NULL) {
if (missing(labels) || !is.character(labels) || length(labels) == 0) {
stop("'labels' argument must be a character vector")
}
if (any(is.na(labels))) {
stop("'labels' argument must not contain NA values")
}
if (any(duplicated(labels))) {
stop("'labels' argument must not contain duplicate values")
}
if (!is.null(name) && length(name) != 1 && !is.character(name)) {
stop("'name' argument must be a character string")
}
if (strict) {
pnames <- names(omxGetParameters(model, indep, NA))
missing <- setdiff(labels, pnames)
if (length(missing) > 0) {
msg <- paste("The following labels are",
"not present in the model",
"(use 'strict' = FALSE to ignore):",
omxQuotes(missing))
stop(msg)
}
}
if (is.vector(lbound) && length(lbound) > 0 && all(sapply(lbound, is.na))) {
lbound <- as.numeric(lbound)
}
if (is.vector(ubound) && length(ubound) > 0 && all(sapply(ubound, is.na))) {
ubound <- as.numeric(ubound)
}
setParametersCheckVector(free, is.logical, 'free', 'logical')
setParametersCheckVector(values, is.numeric, 'values', 'numeric')
setParametersCheckVector(newlabels, is.character, 'newlabels', 'character')
setParametersCheckVector(lbound, is.numeric, 'lbound', 'numeric')
setParametersCheckVector(ubound, is.numeric, 'ubound', 'numeric')
retval <- setParametersHelper(model, labels, free, values,
newlabels, lbound, ubound, indep)
if (!is.null(name)) {
retval <- mxRename(retval, name)
}
return(retval)
}
SBMatchHelper <- function(label, modelname) {
if (!hasSquareBrackets(label)) {
return(FALSE)
}
components <- splitSubstitution(label)
fullname <- unlist(strsplit(components[[1]], imxSeparatorChar, fixed = TRUE))
return(fullname[[1]] == modelname)
}
detectSBMatches <- function(model, labels) {
modelname <- model@name
targets <- which(sapply(labels, SBMatchHelper, modelname))
return(targets)
}
setParametersHelper <- function(model, labels, free, values,
newlabels, lbound, ubound, indep) {
squarebrackets <- detectSBMatches(model, labels)
model@matrices <- lapply(model@matrices, setParametersMatrix,
labels, free, values, newlabels, lbound, ubound)
if (length(squarebrackets) > 0) {
model <- setSquareBracketsHelper(model, squarebrackets, labels, free, values, newlabels, lbound, ubound)
}
if(indep) {
if (length(model@submodels) == 0) {
return(model)
}
model@submodels <- lapply(model@submodels, setParametersHelper,
labels, free, values, newlabels, lbound, ubound, indep)
} else {
select <- imxDependentModels(model)
if (length(select) == 0) {
return(model)
}
select <- lapply(select, setParametersHelper,
labels, free, values, newlabels, lbound, ubound, indep)
model@submodels <- c(select, imxIndependentModels(model))
}
return(model)
}
extractFirst <- function(x) {
return(x[[1]])
}
extractSecond <- function(x) {
return(x[[2]])
}
##' omxNameAnonymousParameters
##'
##' Assign new names to the unnamed parameters
##'
##' @param model the MxModel
##' @param indep whether models are independent
##' @return
##' a list with components for the new MxModel with named parameters, and the new names.
omxNameAnonymousParameters <- function(model, indep = FALSE) {
rows <- lapply(model@matrices, getAnonymousRows)
cols <- lapply(model@matrices, getAnonymousCols)
newnames <- mapply(getAnonymousNames, rows)
model@matrices <- mapply(assignAnonymousNames, model@matrices, rows, cols, newnames)
newnames <- unlist(newnames)
if(indep) {
if (length(model@submodels) == 0) {
return(list(model, newnames))
}
pairs <- lapply(model@submodels, omxNameAnonymousParameters, indep)
submodels <- lapply(pairs, extractFirst)
subnames <- unlist(lapply(pairs, extractSecond))
names(submodels) <- names(model@submodels)
model@submodels <- submodels
newnames <- c(newnames, subnames)
} else {
select <- imxDependentModels(model)
if (length(select) == 0) {
return(list(model, newnames))
}
pairs <- lapply(select, omxNameAnonymousParameters, indep)
submodels <- lapply(pairs, extractFirst)
subnames <- unlist(lapply(pairs, extractSecond))
names(submodels) <- names(select)
model@submodels <- c(select, imxIndependentModels(model))
newnames <- c(newnames, subnames)
}
return(list(model, newnames))
}
getAnonymousRows <- function(matrix) {
select <- matrix@free & is.na(matrix@labels)
if (imxSymmetricMatrix(matrix)) {
select <- select & upper.tri(matrix@labels, diag=TRUE)
}
return(row(matrix@free)[select])
}
getAnonymousCols <- function(matrix) {
select <- matrix@free & is.na(matrix@labels)
if (imxSymmetricMatrix(matrix)) {
select <- select & upper.tri(matrix@labels, diag=TRUE)
}
return(col(matrix@free)[select])
}
assignAnonymousNames <- function(matrix, rows, cols, newnames) {
symmetry <- imxSymmetricMatrix(matrix)
if (length(rows) > 0) {
for(i in 1:length(rows)) {
row <- rows[[i]]
col <- cols[[i]]
newname <- newnames[[i]]
matrix@labels[row,col] <- newname
if (symmetry) {
matrix@labels[col,row] <- newname
}
}
}
return(matrix)
}
getAnonymousNames <- function(rows) {
return(replicate(length(rows), imxUntitledName()))
}
omxAssignFirstParameters <- function(model, indep = FALSE) {
params <- omxGetParameters(model, indep)
if (!length(params)) return(model)
pnames <- names(params)
model <- omxSetParameters(model, pnames[!is.na(pnames)],
values = params[!is.na(pnames)], indep = indep)
return(model)
}
getParametersHelper <- function(amatrix, modelname, selection, fetch) {
if (single.na(selection)) {
select <- amatrix@free | !apply(amatrix@labels, c(1,2), is.na)
} else if (selection) {
select <- amatrix@free
} else {
select <- !amatrix@free & !apply(amatrix@labels, c(1,2), is.na)
}
if (all(!select)) {
return(numeric())
}
if (imxSymmetricMatrix(amatrix)) {
triangle <- upper.tri(select, diag=TRUE)
select <- select & triangle
}
theNames <- amatrix@labels[select]
if (any(is.na(theNames))) {
rows <- row(amatrix@labels)[select]
cols <- col(amatrix@labels)[select]
for(i in 1:length(theNames)) {
if (is.na(theNames[[i]])) {
theNames[[i]] <- paste(modelname, ".", amatrix@name,
"[", rows[i], ",", cols[i], "]", sep ="")
}
}
}
if (fetch == "values") {
theValues <- amatrix@values[select]
} else if (fetch == "lbound") {
theValues <- amatrix@lbound[select]
} else if (fetch == "ubound") {
theValues <- amatrix@ubound[select]
} else if (fetch == "free") {
theValues <- amatrix@free[select]
}
names(theValues) <- theNames
return(theValues[!duplicated(theNames)])
}
setParametersMatrix <- function(amatrix, names, free, values, newlabels, lbound, ubound) {
labels <- amatrix@labels
locations <- which(labels %in% names)
indices <- match(labels[locations], names)
if (!is.null(free)) {
index2 <- ((indices - 1) %% length(free)) + 1
amatrix@free[locations] <- as.logical(free[index2])
}
if (!is.null(values)) {
index2 <- ((indices - 1) %% length(values)) + 1
amatrix@values[locations] <- as.numeric(values[index2])
}
if (!is.null(newlabels)) {
index2 <- ((indices - 1) %% length(newlabels)) + 1
amatrix@labels[locations] <- as.character(newlabels[index2])
}
if (!is.null(lbound)) {
index2 <- ((indices - 1) %% length(lbound)) + 1
amatrix@lbound[locations] <- as.numeric(lbound[index2])
}
if (!is.null(ubound)) {
index2 <- ((indices - 1) %% length(ubound)) + 1
amatrix@ubound[locations] <- as.numeric(ubound[index2])
}
return(amatrix)
}
setSquareBracketsHelper <- function(model, squarebrackets, labels,
free, values, newlabels, lbound, ubound) {
for(i in 1:length(squarebrackets)) {
nextbracket <- squarebrackets[[i]]
nextlabel <- labels[[nextbracket]]
components <- splitSubstitution(nextlabel)
fullname <- unlist(strsplit(components[[1]], imxSeparatorChar, fixed = TRUE))
matrixname <-fullname[[2]]
row <- as.numeric(components[[2]])
col <- as.numeric(components[[3]])
amatrix <- model[[matrixname]]
if (!is.null(amatrix) || !is(amatrix, "MxMatrix")) {
isSymmetric <- imxSymmetricMatrix(amatrix)
if (!is.null(free)) {
index2 <- ((nextbracket - 1) %% length(free)) + 1
amatrix@free[row,col] <- as.logical(free[index2])
if (isSymmetric) {
amatrix@free[col,row] <- as.logical(free[index2])
}
}
if (!is.null(values)) {
index2 <- ((nextbracket - 1) %% length(values)) + 1
amatrix@values[row,col] <- as.numeric(values[index2])
if (isSymmetric) {
amatrix@values[col,row] <- as.numeric(values[index2])
}
}
if (!is.null(newlabels)) {
index2 <- ((nextbracket - 1) %% length(newlabels)) + 1
amatrix@labels[row,col] <- as.character(newlabels[index2])
if (isSymmetric) {
amatrix@labels[col,row] <- as.character(newlabels[index2])
}
}
if (!is.null(lbound)) {
index2 <- ((nextbracket - 1) %% length(lbound)) + 1
amatrix@lbound[row,col] <- as.numeric(lbound[index2])
if (isSymmetric) {
amatrix@lbound[col,row] <- as.numeric(lbound[index2])
}
}
if (!is.null(ubound)) {
index2 <- ((nextbracket - 1) %% length(ubound)) + 1
amatrix@ubound[row,col] <- as.numeric(ubound[index2])
if (isSymmetric) {
amatrix@ubound[col,row] <- as.numeric(ubound[index2])
}
}
model[[matrixname]] <- amatrix
}
}
return(model)
}
|
/R/MxModelParameters.R
|
permissive
|
trbrick/OpenMx
|
R
| false
| false
| 14,073
|
r
|
#
# Copyright 2007-2015 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
omxLocateParameters <- function(model, labels = NULL, indep = FALSE) {
retval <- locateParametersInternal(model, labels, indep)
if (nrow(retval) > 0) {
retval <- retval[order(retval$label),]
}
rownames(retval) <- NULL
return(retval)
}
locateParametersInternal <- function(model, labels, indep) {
if (!is.null(labels) && !single.na(labels) && !is.character(labels)) {
stop("'labels' argument must be NULL or a character vector")
}
retval <- lapply(model@matrices, locateParametersHelper, model@name, labels)
retval <- do.call(rbind, retval)
if(indep) {
submodels <- model@submodels
} else {
submodels <- imxDependentModels(model)
}
if (length(submodels) > 0) {
subparams <- lapply(submodels, locateParametersInternal, labels, indep)
subparams <- do.call(rbind, subparams)
retval <- rbind(retval, subparams)
}
return(retval)
}
locateParametersHelper <- function(matrix, modelname, target) {
retval <- data.frame(label = character(0), model = character(0),
matrix = character(0), row = numeric(0),
col = numeric(0), value = numeric(0),
lbound = numeric(0), ubound = numeric(0),
stringsAsFactors = FALSE)
free <- matrix@free
count <- sum(free)
if (count == 0) {
return(retval)
} else {
labels <- matrix@labels[free]
rows <- row(free)[free]
cols <- col(free)[free]
values <- matrix@values[free]
lbound <- matrix@lbound[free]
ubound <- matrix@ubound[free]
for(i in 1:count) {
pname <- labels[[i]]
if (is.null(target) || (is.na(pname) && any(is.na(target))) ||
pname %in% target) {
nextentry <- nrow(retval) + 1
retval[nextentry,'label'] <- pname
retval[nextentry,'model'] <- modelname
retval[nextentry,'matrix'] <- matrix@name
retval[nextentry,'row'] <- rows[[i]]
retval[nextentry,'col'] <- cols[[i]]
retval[nextentry,'value'] <- values[[i]]
retval[nextentry,'lbound'] <- lbound[[i]]
retval[nextentry,'ubound'] <- ubound[[i]]
}
}
}
return(retval)
}
omxGetParameters <- function(model, indep = FALSE, free = c(TRUE, FALSE, NA),
fetch = c('values', 'free', 'lbound', 'ubound', 'all')) {
if (identical(free, c(TRUE, FALSE, NA))) {
free <- TRUE
}
if (identical(fetch, c('values', 'free', 'lbound', 'ubound', 'all'))) {
fetch <- 'values'
}
if (!is.logical(free) || length(free) != 1) {
stop("argument 'free' must be a 'TRUE', 'FALSE', or NA")
}
if (!is.character(fetch) || length(fetch) != 1 ||
!(fetch %in% c('values', 'free', 'lbound', 'ubound', 'all'))) {
stop("argument 'fetch' must be one of c('values', 'free', 'lbound', 'ubound', 'all')")
}
if (fetch == 'all') {
values <- omxGetParameters(model, indep, free, 'values')
lbound <- omxGetParameters(model, indep, free, 'lbound')
ubound <- omxGetParameters(model, indep, free, 'ubound')
if (!is.na(free) && free) {
free <- rep.int(TRUE, length(values))
} else if (!is.na(free) && !free) {
free <- rep.int(FALSE, length(values))
} else {
free <- omxGetParameters(model, indep, free, 'free')
}
return(data.frame(values, free, lbound, ubound))
}
parameters <- lapply(model@matrices, getParametersHelper, model@name, free, fetch)
plen <- lapply(parameters, length)
parameters[plen == 0] <- NULL
names(parameters) <- NULL
parameters <- unlist(parameters)
if(indep) {
submodels <- model@submodels
} else {
submodels <- imxDependentModels(model)
}
if (length(submodels) > 0) {
subparams <- lapply(submodels, omxGetParameters, indep, free, fetch)
plen <- lapply(subparams, length)
subparams[plen == 0] <- NULL
names(subparams) <- NULL
subparams <- unlist(subparams)
parameters <- c(parameters, subparams)
}
parameters <- parameters[!duplicated(names(parameters), incomparables = NA)]
return(parameters)
}
setParametersCheckVector <- function(values, test, argname, typename) {
if (is.null(values)) return()
if (!test(values)) {
stop(paste(omxQuotes(argname),
"argument must either be NA or a",
typename, "vector"), call. = FALSE)
}
}
omxSetParameters <- function(model, labels, free = NULL, values = NULL,
newlabels = NULL, lbound = NULL, ubound = NULL, indep = FALSE,
strict = TRUE, name = NULL) {
if (missing(labels) || !is.character(labels) || length(labels) == 0) {
stop("'labels' argument must be a character vector")
}
if (any(is.na(labels))) {
stop("'labels' argument must not contain NA values")
}
if (any(duplicated(labels))) {
stop("'labels' argument must not contain duplicate values")
}
if (!is.null(name) && length(name) != 1 && !is.character(name)) {
stop("'name' argument must be a character string")
}
if (strict) {
pnames <- names(omxGetParameters(model, indep, NA))
missing <- setdiff(labels, pnames)
if (length(missing) > 0) {
msg <- paste("The following labels are",
"not present in the model",
"(use 'strict' = FALSE to ignore):",
omxQuotes(missing))
stop(msg)
}
}
if (is.vector(lbound) && length(lbound) > 0 && all(sapply(lbound, is.na))) {
lbound <- as.numeric(lbound)
}
if (is.vector(ubound) && length(ubound) > 0 && all(sapply(ubound, is.na))) {
ubound <- as.numeric(ubound)
}
setParametersCheckVector(free, is.logical, 'free', 'logical')
setParametersCheckVector(values, is.numeric, 'values', 'numeric')
setParametersCheckVector(newlabels, is.character, 'newlabels', 'character')
setParametersCheckVector(lbound, is.numeric, 'lbound', 'numeric')
setParametersCheckVector(ubound, is.numeric, 'ubound', 'numeric')
retval <- setParametersHelper(model, labels, free, values,
newlabels, lbound, ubound, indep)
if (!is.null(name)) {
retval <- mxRename(retval, name)
}
return(retval)
}
SBMatchHelper <- function(label, modelname) {
if (!hasSquareBrackets(label)) {
return(FALSE)
}
components <- splitSubstitution(label)
fullname <- unlist(strsplit(components[[1]], imxSeparatorChar, fixed = TRUE))
return(fullname[[1]] == modelname)
}
detectSBMatches <- function(model, labels) {
modelname <- model@name
targets <- which(sapply(labels, SBMatchHelper, modelname))
return(targets)
}
setParametersHelper <- function(model, labels, free, values,
newlabels, lbound, ubound, indep) {
squarebrackets <- detectSBMatches(model, labels)
model@matrices <- lapply(model@matrices, setParametersMatrix,
labels, free, values, newlabels, lbound, ubound)
if (length(squarebrackets) > 0) {
model <- setSquareBracketsHelper(model, squarebrackets, labels, free, values, newlabels, lbound, ubound)
}
if(indep) {
if (length(model@submodels) == 0) {
return(model)
}
model@submodels <- lapply(model@submodels, setParametersHelper,
labels, free, values, newlabels, lbound, ubound, indep)
} else {
select <- imxDependentModels(model)
if (length(select) == 0) {
return(model)
}
select <- lapply(select, setParametersHelper,
labels, free, values, newlabels, lbound, ubound, indep)
model@submodels <- c(select, imxIndependentModels(model))
}
return(model)
}
extractFirst <- function(x) {
return(x[[1]])
}
extractSecond <- function(x) {
return(x[[2]])
}
##' omxNameAnonymousParameters
##'
##' Assign new names to the unnamed parameters
##'
##' @param model the MxModel
##' @param indep whether models are independent
##' @return
##' a list with components for the new MxModel with named parameters, and the new names.
omxNameAnonymousParameters <- function(model, indep = FALSE) {
rows <- lapply(model@matrices, getAnonymousRows)
cols <- lapply(model@matrices, getAnonymousCols)
newnames <- mapply(getAnonymousNames, rows)
model@matrices <- mapply(assignAnonymousNames, model@matrices, rows, cols, newnames)
newnames <- unlist(newnames)
if(indep) {
if (length(model@submodels) == 0) {
return(list(model, newnames))
}
pairs <- lapply(model@submodels, omxNameAnonymousParameters, indep)
submodels <- lapply(pairs, extractFirst)
subnames <- unlist(lapply(pairs, extractSecond))
names(submodels) <- names(model@submodels)
model@submodels <- submodels
newnames <- c(newnames, subnames)
} else {
select <- imxDependentModels(model)
if (length(select) == 0) {
return(list(model, newnames))
}
pairs <- lapply(select, omxNameAnonymousParameters, indep)
submodels <- lapply(pairs, extractFirst)
subnames <- unlist(lapply(pairs, extractSecond))
names(submodels) <- names(select)
model@submodels <- c(select, imxIndependentModels(model))
newnames <- c(newnames, subnames)
}
return(list(model, newnames))
}
getAnonymousRows <- function(matrix) {
select <- matrix@free & is.na(matrix@labels)
if (imxSymmetricMatrix(matrix)) {
select <- select & upper.tri(matrix@labels, diag=TRUE)
}
return(row(matrix@free)[select])
}
getAnonymousCols <- function(matrix) {
select <- matrix@free & is.na(matrix@labels)
if (imxSymmetricMatrix(matrix)) {
select <- select & upper.tri(matrix@labels, diag=TRUE)
}
return(col(matrix@free)[select])
}
assignAnonymousNames <- function(matrix, rows, cols, newnames) {
symmetry <- imxSymmetricMatrix(matrix)
if (length(rows) > 0) {
for(i in 1:length(rows)) {
row <- rows[[i]]
col <- cols[[i]]
newname <- newnames[[i]]
matrix@labels[row,col] <- newname
if (symmetry) {
matrix@labels[col,row] <- newname
}
}
}
return(matrix)
}
getAnonymousNames <- function(rows) {
return(replicate(length(rows), imxUntitledName()))
}
omxAssignFirstParameters <- function(model, indep = FALSE) {
params <- omxGetParameters(model, indep)
if (!length(params)) return(model)
pnames <- names(params)
model <- omxSetParameters(model, pnames[!is.na(pnames)],
values = params[!is.na(pnames)], indep = indep)
return(model)
}
getParametersHelper <- function(amatrix, modelname, selection, fetch) {
if (single.na(selection)) {
select <- amatrix@free | !apply(amatrix@labels, c(1,2), is.na)
} else if (selection) {
select <- amatrix@free
} else {
select <- !amatrix@free & !apply(amatrix@labels, c(1,2), is.na)
}
if (all(!select)) {
return(numeric())
}
if (imxSymmetricMatrix(amatrix)) {
triangle <- upper.tri(select, diag=TRUE)
select <- select & triangle
}
theNames <- amatrix@labels[select]
if (any(is.na(theNames))) {
rows <- row(amatrix@labels)[select]
cols <- col(amatrix@labels)[select]
for(i in 1:length(theNames)) {
if (is.na(theNames[[i]])) {
theNames[[i]] <- paste(modelname, ".", amatrix@name,
"[", rows[i], ",", cols[i], "]", sep ="")
}
}
}
if (fetch == "values") {
theValues <- amatrix@values[select]
} else if (fetch == "lbound") {
theValues <- amatrix@lbound[select]
} else if (fetch == "ubound") {
theValues <- amatrix@ubound[select]
} else if (fetch == "free") {
theValues <- amatrix@free[select]
}
names(theValues) <- theNames
return(theValues[!duplicated(theNames)])
}
setParametersMatrix <- function(amatrix, names, free, values, newlabels, lbound, ubound) {
labels <- amatrix@labels
locations <- which(labels %in% names)
indices <- match(labels[locations], names)
if (!is.null(free)) {
index2 <- ((indices - 1) %% length(free)) + 1
amatrix@free[locations] <- as.logical(free[index2])
}
if (!is.null(values)) {
index2 <- ((indices - 1) %% length(values)) + 1
amatrix@values[locations] <- as.numeric(values[index2])
}
if (!is.null(newlabels)) {
index2 <- ((indices - 1) %% length(newlabels)) + 1
amatrix@labels[locations] <- as.character(newlabels[index2])
}
if (!is.null(lbound)) {
index2 <- ((indices - 1) %% length(lbound)) + 1
amatrix@lbound[locations] <- as.numeric(lbound[index2])
}
if (!is.null(ubound)) {
index2 <- ((indices - 1) %% length(ubound)) + 1
amatrix@ubound[locations] <- as.numeric(ubound[index2])
}
return(amatrix)
}
setSquareBracketsHelper <- function(model, squarebrackets, labels,
free, values, newlabels, lbound, ubound) {
for(i in 1:length(squarebrackets)) {
nextbracket <- squarebrackets[[i]]
nextlabel <- labels[[nextbracket]]
components <- splitSubstitution(nextlabel)
fullname <- unlist(strsplit(components[[1]], imxSeparatorChar, fixed = TRUE))
matrixname <-fullname[[2]]
row <- as.numeric(components[[2]])
col <- as.numeric(components[[3]])
amatrix <- model[[matrixname]]
if (!is.null(amatrix) || !is(amatrix, "MxMatrix")) {
isSymmetric <- imxSymmetricMatrix(amatrix)
if (!is.null(free)) {
index2 <- ((nextbracket - 1) %% length(free)) + 1
amatrix@free[row,col] <- as.logical(free[index2])
if (isSymmetric) {
amatrix@free[col,row] <- as.logical(free[index2])
}
}
if (!is.null(values)) {
index2 <- ((nextbracket - 1) %% length(values)) + 1
amatrix@values[row,col] <- as.numeric(values[index2])
if (isSymmetric) {
amatrix@values[col,row] <- as.numeric(values[index2])
}
}
if (!is.null(newlabels)) {
index2 <- ((nextbracket - 1) %% length(newlabels)) + 1
amatrix@labels[row,col] <- as.character(newlabels[index2])
if (isSymmetric) {
amatrix@labels[col,row] <- as.character(newlabels[index2])
}
}
if (!is.null(lbound)) {
index2 <- ((nextbracket - 1) %% length(lbound)) + 1
amatrix@lbound[row,col] <- as.numeric(lbound[index2])
if (isSymmetric) {
amatrix@lbound[col,row] <- as.numeric(lbound[index2])
}
}
if (!is.null(ubound)) {
index2 <- ((nextbracket - 1) %% length(ubound)) + 1
amatrix@ubound[row,col] <- as.numeric(ubound[index2])
if (isSymmetric) {
amatrix@ubound[col,row] <- as.numeric(ubound[index2])
}
}
model[[matrixname]] <- amatrix
}
}
return(model)
}
|
#importing libraries
library(Amelia)
library(outliers)
#setting working environment
setwd("/home/rajesh/Desktop/DMML_project/Datasets/1/")
#reading data and saving it into a data frame
gaming_data <- read.csv("GamingStudy_data.csv")
#getting the structure of the data frame
str(gaming_data)
#plotting missing percentage
missmap(gaming_data,main = "missing in gaming data")
#computing the total missing values in each column of the dataframe
sapply(gaming_data, function(x) sum(is.na(x)))
#removing unnecessary columns
gaming_data <- gaming_data[c(1:2,10,16:20,23,41:49,51:55)]
str(gaming_data)
#omitting the rows with null values
gaming_data <- na.omit(gaming_data)
#again removing unnecessary columns
gaming_data <- gaming_data[-c(1,2,3,22,23)]
length(unique(gaming_data))
#adding column to the data frame, creating categorical data from
#the existing data in the dataframe
values <- c()
for(value in gaming_data$SWL_T) {
if(value >= 5 && value <= 9){
values <- append(values,"Extremely Disatisfied")
}
else if (value >= 10 && value <= 14 ) {
values <-append(values,"Disatisfied")
}
else if (value >= 15 && value <= 19 ) {
values <-append(values,"Slightly Disatisfied")
}
else if (value == 20) {
values <-append(values,"Neutral")
}
else if (value >= 21 && value <= 25 ) {
values <-append(values,"Slightly Satisfied")
}
else if (value >= 26 && value <= 30 ) {
values <-append(values,"Satisfied")
}
else if (value >= 31 && value <= 35 ) {
values <-append(values,"Extremely Satisfied")
}
}
values
gaming_data$SWL_summary <- values
length(values)
length(gaming_data$SWL_summary)
unique(gaming_data$SWL_summary)
min(gaming_data$GAD_T)
max(gaming_data$GAD_T)
GAD_values <- c()
#setting again values for GAD as categorical
for(value in gaming_data$GAD_T) {
if(value >= 0 && value <= 4){
GAD_values <- append(GAD_values,"Normal")
}
else if (value >= 5 && value <= 9 ) {
GAD_values <-append(GAD_values,"Mild Anxiety")
}
else if (value >= 10 && value <= 14 ) {
GAD_values <-append(GAD_values,"Moderate Anxiety")
}
else if (value >= 15 && value <= 21 ) {
GAD_values <-append(GAD_values,"Severe Anxiety")
}
}
gaming_data$GAD_summary <- GAD_values
min(gaming_data$SPIN_T)
max(gaming_data$SPIN_T)
SPIN_values <- c()
#adding column for SPIN_T as categorical
for(value in gaming_data$SPIN_T) {
if(value >= 0 && value <= 18){
SPIN_values <- append(SPIN_values,"Normal")
}
else if (value >= 19 && value <= 30 ) {
SPIN_values <-append(SPIN_values,"Mild Social Phobia")
}
else if (value >= 31 && value <= 40 ) {
SPIN_values <-append(SPIN_values,"Moderate Social Phobia")
}
else if (value >= 41 && value <= 49 ) {
SPIN_values <-append(SPIN_values,"Severe Social Phobia")
}
else if (value >= 50 ){
SPIN_values <- append(SPIN_values,"Very Severe Social Phobia")
}
}
gaming_data$SPIN_summary <- SPIN_values
boxplot(gaming_data$Hours)
summary(gaming_data$Hours)
#handling outliers counting the record with outlier values by substitutng each column
#and repeating the steps until the outliers are removed
count <- 0
for (value in gaming_data$Hours) {
if (value>46) {
count <- count + 1
}
}
count
str(gaming_data)
summary(gaming_data$Hours)
#getting inter quartile range
IOR <- 28 - 12
upper_wisker <- 52 + 1.5*IOR
head(gaming_data,10)
IQR(gaming_data$Hours)
outlier(gaming_data$Hours)
boxplot(gaming_data$Hours)
#getting values with no outliers
gaming_data <- subset(gaming_data,gaming_data$Hours <= 44)
max(gaming_data$Hours)
hour_values <- c()
summary(gaming_data$Age)
IQR(gaming_data$Age)
gaming_data$Per_Day_Playing_time <- hour_values
gaming_data$Per_Day_Playing_time <- as.factor(gaming_data$Per_Day_Playing_time)
# writing the cleaned dataset to a file for training
writexl::write_xlsx(gaming_data,"/home/rajesh/cleaned_gaming_data.xlsx")
|
/Project/Dataset1_Decision_trees/gaming_data_cleaning_code.R
|
no_license
|
rajesh95cs/machine-learning-project
|
R
| false
| false
| 3,878
|
r
|
#importing libraries
library(Amelia)
library(outliers)
#setting working environment
setwd("/home/rajesh/Desktop/DMML_project/Datasets/1/")
#reading data and saving it into a data frame
gaming_data <- read.csv("GamingStudy_data.csv")
#getting the structure of the data frame
str(gaming_data)
#plotting missing percentage
missmap(gaming_data,main = "missing in gaming data")
#computing the total missing values in each column of the dataframe
sapply(gaming_data, function(x) sum(is.na(x)))
#removing unnecessary columns
gaming_data <- gaming_data[c(1:2,10,16:20,23,41:49,51:55)]
str(gaming_data)
#omitting the rows with null values
gaming_data <- na.omit(gaming_data)
#again removing unnecessary columns
gaming_data <- gaming_data[-c(1,2,3,22,23)]
length(unique(gaming_data))
#adding column to the data frame, creating categorical data from
#the existing data in the dataframe
values <- c()
for(value in gaming_data$SWL_T) {
if(value >= 5 && value <= 9){
values <- append(values,"Extremely Disatisfied")
}
else if (value >= 10 && value <= 14 ) {
values <-append(values,"Disatisfied")
}
else if (value >= 15 && value <= 19 ) {
values <-append(values,"Slightly Disatisfied")
}
else if (value == 20) {
values <-append(values,"Neutral")
}
else if (value >= 21 && value <= 25 ) {
values <-append(values,"Slightly Satisfied")
}
else if (value >= 26 && value <= 30 ) {
values <-append(values,"Satisfied")
}
else if (value >= 31 && value <= 35 ) {
values <-append(values,"Extremely Satisfied")
}
}
values
gaming_data$SWL_summary <- values
length(values)
length(gaming_data$SWL_summary)
unique(gaming_data$SWL_summary)
min(gaming_data$GAD_T)
max(gaming_data$GAD_T)
GAD_values <- c()
#setting again values for GAD as categorical
for(value in gaming_data$GAD_T) {
if(value >= 0 && value <= 4){
GAD_values <- append(GAD_values,"Normal")
}
else if (value >= 5 && value <= 9 ) {
GAD_values <-append(GAD_values,"Mild Anxiety")
}
else if (value >= 10 && value <= 14 ) {
GAD_values <-append(GAD_values,"Moderate Anxiety")
}
else if (value >= 15 && value <= 21 ) {
GAD_values <-append(GAD_values,"Severe Anxiety")
}
}
gaming_data$GAD_summary <- GAD_values
min(gaming_data$SPIN_T)
max(gaming_data$SPIN_T)
SPIN_values <- c()
#adding column for SPIN_T as categorical
for(value in gaming_data$SPIN_T) {
if(value >= 0 && value <= 18){
SPIN_values <- append(SPIN_values,"Normal")
}
else if (value >= 19 && value <= 30 ) {
SPIN_values <-append(SPIN_values,"Mild Social Phobia")
}
else if (value >= 31 && value <= 40 ) {
SPIN_values <-append(SPIN_values,"Moderate Social Phobia")
}
else if (value >= 41 && value <= 49 ) {
SPIN_values <-append(SPIN_values,"Severe Social Phobia")
}
else if (value >= 50 ){
SPIN_values <- append(SPIN_values,"Very Severe Social Phobia")
}
}
gaming_data$SPIN_summary <- SPIN_values
boxplot(gaming_data$Hours)
summary(gaming_data$Hours)
#handling outliers counting the record with outlier values by substitutng each column
#and repeating the steps until the outliers are removed
count <- 0
for (value in gaming_data$Hours) {
if (value>46) {
count <- count + 1
}
}
count
str(gaming_data)
summary(gaming_data$Hours)
#getting inter quartile range
IOR <- 28 - 12
upper_wisker <- 52 + 1.5*IOR
head(gaming_data,10)
IQR(gaming_data$Hours)
outlier(gaming_data$Hours)
boxplot(gaming_data$Hours)
#getting values with no outliers
gaming_data <- subset(gaming_data,gaming_data$Hours <= 44)
max(gaming_data$Hours)
hour_values <- c()
summary(gaming_data$Age)
IQR(gaming_data$Age)
gaming_data$Per_Day_Playing_time <- hour_values
gaming_data$Per_Day_Playing_time <- as.factor(gaming_data$Per_Day_Playing_time)
# writing the cleaned dataset to a file for training
writexl::write_xlsx(gaming_data,"/home/rajesh/cleaned_gaming_data.xlsx")
|
/Stepik2/Task1-3.R
|
no_license
|
venkaDaria/rlang-demo
|
R
| false
| false
| 847
|
r
| ||
library(tidyverse)
library(wordVectors)
library(tictoc)
# library(furrr)
#
# plan(multicore(workers = 3L))
# options(future.globals.maxSize = 10522669875)
# plan(sequential)
options(scipen = 99)
experiments <- read_csv("data/ms_final_experiments.csv")
translations <- read_csv("data/final_translations.csv") %>%
mutate(c = str_to_lower(c), i = str_to_lower(i)) %>%
filter(!c == "i've") %>%
mutate(
c = case_when(
c == "hadn't" ~ "hadnt",
c == "word's" ~ "words",
c == "be-loved" ~ "beloved",
TRUE ~ c
),
i = case_when(
i == "hadn't" ~ "hadnt",
i == "word's" ~ "words",
i == "be-loved" ~ "beloved",
TRUE ~ i
)
)
translations %>%
write_csv("data/experiment_final.csv")
english_words <- unique(c(translations$c, translations$i))
# english_words[which(!english_words %in% rownames(english_vectors))]
## I've -> ive, hadn't, word's, be-loved
closest_words <- function(word, vector_space, n = 10) {
vectors <- vector_space[word, ]
closest <- closest_to(vector_space, vectors, n = n + 1) %>%
slice(-1)
return(closest$word)
}
cosine_similarity <- function(word1, word2, vector_space = glove_vectors) {
sim <- cosineSimilarity(vector_space[word1, ], vector_space[word2, ])
return(as.double(sim))
}
semantic_overlap <- function(word1, word2, n = 10, vector_space) {
cat(paste0("word1: ", word1, ", word2: ", word2), sep = "\n")
word1_neighbors = closest_words(word1, n = n, vector_space = vector_space)
word2_neighbors = closest_words(word2, n = n, vector_space = vector_space)
neighbor1_vectors <- vector_space[word1_neighbors, ]
neighbor2_vectors <- vector_space[word2_neighbors, ]
overlap1 = mean(cosineSimilarity(neighbor1_vectors, vector_space[word2, ]))
overlap2 = mean(cosineSimilarity(neighbor2_vectors, vector_space[word1, ]))
overlap = mean(c(overlap1, overlap2))
return(overlap)
}
# english_vectors <- read.vectors("data/pretrained_embeddings/cc.en.300.vec", binary = F)
english_vectors_wiki <- read.vectors("../pretrained_vectors/fasttext/wiki-news-300d-1M-subword.vec", binary= F)
closest_words("propose", english_vectors)
# english overlaps, n = 10
# plan(sequential)
# tic()
# translations %>%
# slice(1:8) %>%
# mutate(english_overlap = future_map2_dbl(i, c, semantic_overlap, vector_space = english_vectors))
# toc()
tic()
english_overlaps <- translations %>%
mutate(
english_overlap = map2(i, c, semantic_overlap, vector_space = english_vectors)
)
toc()
english_overlaps %>% unnest() %>% write_csv("data/english_overlaps.csv")
english_overlaps %>% unnest() %>% arrange(-english_overlap) %>%
View()
## WIKI WORD VECTORS
wiki_translations <- translations %>%
mutate(
c = str_replace_all(c, "'", ""),
i = str_replace_all(i, "'", ""),
i = case_when(
i == "sun-burn" ~ "sunburn",
TRUE ~ i
)
)
wiki_translation_words <- unique(c(wiki_translations$c, wiki_translations$i))
wiki_translation_words[which(!wiki_translation_words %in% rownames(english_vectors_wiki))]
tic()
english_overlaps_wiki <- wiki_translations %>%
mutate(
english_overlap = future_map2(i, c, semantic_overlap, vector_space = english_vectors_wiki)
)
toc()
english_overlaps_wiki %>% unnest() %>% write_csv("data/english_overlaps_wiki.csv")
english_overlaps_wiki %>% unnest() %>% arrange(-english_overlap) %>%
View()
tic()
cosine_similarity("propose", "suggest", english_vectors_wiki)
toc()
english_overlaps <- read_csv("data/english_overlaps.csv")
english_overlaps
english_overlap_experiment <- read_csv("data/ms_final_exnglish_experiments.csv") %>%
select(-c, -i, -l1_c, -l1_i) %>%
gather(l2_sim_cc_10:l2_sim_cc_100, key = "neighbors", value = "overlap") %>%
mutate(neighbors = as.numeric(str_extract(neighbors, "(?<=cc_).+$*")))
english_overlap_experiment %>% write_csv("data/english_overlap_experiment.csv")
english_overlap_experiment %>%
ggplot(aes(overlap, color = as.factor(neighbors), group = neighbors)) +
geom_density() +
facet_wrap(~language) +
theme_light() +
theme(
legend.position = "top"
) +
labs(
color = "Number of Nearest Neighbors"
)
english_overlap_experiment %>%
group_by(language, neighbors) %>%
summarize(
overlap = mean(overlap)
) %>%
ggplot(aes(as.factor(neighbors), overlap, group = 1)) +
geom_point() +
geom_line() +
facet_wrap(~language) +
scale_y_continuous(limits = c(0, 0.4)) +
theme_light(base_family = "CMU Serif") +
labs(
x = "Nearest Neighbors in Vector Space"
)
korean <- read.vectors("data/pretrained_embeddings/cc.ko.vec", binary = F)
|
/R/semantic_overlaps.R
|
permissive
|
kanishkamisra/cogsci2019
|
R
| false
| false
| 4,608
|
r
|
library(tidyverse)
library(wordVectors)
library(tictoc)
# library(furrr)
#
# plan(multicore(workers = 3L))
# options(future.globals.maxSize = 10522669875)
# plan(sequential)
options(scipen = 99)
experiments <- read_csv("data/ms_final_experiments.csv")
translations <- read_csv("data/final_translations.csv") %>%
mutate(c = str_to_lower(c), i = str_to_lower(i)) %>%
filter(!c == "i've") %>%
mutate(
c = case_when(
c == "hadn't" ~ "hadnt",
c == "word's" ~ "words",
c == "be-loved" ~ "beloved",
TRUE ~ c
),
i = case_when(
i == "hadn't" ~ "hadnt",
i == "word's" ~ "words",
i == "be-loved" ~ "beloved",
TRUE ~ i
)
)
translations %>%
write_csv("data/experiment_final.csv")
english_words <- unique(c(translations$c, translations$i))
# english_words[which(!english_words %in% rownames(english_vectors))]
## I've -> ive, hadn't, word's, be-loved
closest_words <- function(word, vector_space, n = 10) {
vectors <- vector_space[word, ]
closest <- closest_to(vector_space, vectors, n = n + 1) %>%
slice(-1)
return(closest$word)
}
cosine_similarity <- function(word1, word2, vector_space = glove_vectors) {
sim <- cosineSimilarity(vector_space[word1, ], vector_space[word2, ])
return(as.double(sim))
}
semantic_overlap <- function(word1, word2, n = 10, vector_space) {
cat(paste0("word1: ", word1, ", word2: ", word2), sep = "\n")
word1_neighbors = closest_words(word1, n = n, vector_space = vector_space)
word2_neighbors = closest_words(word2, n = n, vector_space = vector_space)
neighbor1_vectors <- vector_space[word1_neighbors, ]
neighbor2_vectors <- vector_space[word2_neighbors, ]
overlap1 = mean(cosineSimilarity(neighbor1_vectors, vector_space[word2, ]))
overlap2 = mean(cosineSimilarity(neighbor2_vectors, vector_space[word1, ]))
overlap = mean(c(overlap1, overlap2))
return(overlap)
}
# english_vectors <- read.vectors("data/pretrained_embeddings/cc.en.300.vec", binary = F)
english_vectors_wiki <- read.vectors("../pretrained_vectors/fasttext/wiki-news-300d-1M-subword.vec", binary= F)
closest_words("propose", english_vectors)
# english overlaps, n = 10
# plan(sequential)
# tic()
# translations %>%
# slice(1:8) %>%
# mutate(english_overlap = future_map2_dbl(i, c, semantic_overlap, vector_space = english_vectors))
# toc()
tic()
english_overlaps <- translations %>%
mutate(
english_overlap = map2(i, c, semantic_overlap, vector_space = english_vectors)
)
toc()
english_overlaps %>% unnest() %>% write_csv("data/english_overlaps.csv")
english_overlaps %>% unnest() %>% arrange(-english_overlap) %>%
View()
## WIKI WORD VECTORS
wiki_translations <- translations %>%
mutate(
c = str_replace_all(c, "'", ""),
i = str_replace_all(i, "'", ""),
i = case_when(
i == "sun-burn" ~ "sunburn",
TRUE ~ i
)
)
wiki_translation_words <- unique(c(wiki_translations$c, wiki_translations$i))
wiki_translation_words[which(!wiki_translation_words %in% rownames(english_vectors_wiki))]
tic()
english_overlaps_wiki <- wiki_translations %>%
mutate(
english_overlap = future_map2(i, c, semantic_overlap, vector_space = english_vectors_wiki)
)
toc()
english_overlaps_wiki %>% unnest() %>% write_csv("data/english_overlaps_wiki.csv")
english_overlaps_wiki %>% unnest() %>% arrange(-english_overlap) %>%
View()
tic()
cosine_similarity("propose", "suggest", english_vectors_wiki)
toc()
english_overlaps <- read_csv("data/english_overlaps.csv")
english_overlaps
english_overlap_experiment <- read_csv("data/ms_final_exnglish_experiments.csv") %>%
select(-c, -i, -l1_c, -l1_i) %>%
gather(l2_sim_cc_10:l2_sim_cc_100, key = "neighbors", value = "overlap") %>%
mutate(neighbors = as.numeric(str_extract(neighbors, "(?<=cc_).+$*")))
english_overlap_experiment %>% write_csv("data/english_overlap_experiment.csv")
english_overlap_experiment %>%
ggplot(aes(overlap, color = as.factor(neighbors), group = neighbors)) +
geom_density() +
facet_wrap(~language) +
theme_light() +
theme(
legend.position = "top"
) +
labs(
color = "Number of Nearest Neighbors"
)
english_overlap_experiment %>%
group_by(language, neighbors) %>%
summarize(
overlap = mean(overlap)
) %>%
ggplot(aes(as.factor(neighbors), overlap, group = 1)) +
geom_point() +
geom_line() +
facet_wrap(~language) +
scale_y_continuous(limits = c(0, 0.4)) +
theme_light(base_family = "CMU Serif") +
labs(
x = "Nearest Neighbors in Vector Space"
)
korean <- read.vectors("data/pretrained_embeddings/cc.ko.vec", binary = F)
|
testlist <- list(data = structure(0, .Dim = c(1L, 1L)), w = structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 6L)))
result <- do.call(UniIsoRegression:::pre_2d_l2_inc,testlist)
str(result)
|
/UniIsoRegression/inst/testfiles/pre_2d_l2_inc/libFuzzer_pre_2d_l2_inc/pre_2d_l2_inc_valgrind_files/1612736688-test.R
|
no_license
|
akhikolla/updatedatatype-list1
|
R
| false
| false
| 349
|
r
|
testlist <- list(data = structure(0, .Dim = c(1L, 1L)), w = structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 6L)))
result <- do.call(UniIsoRegression:::pre_2d_l2_inc,testlist)
str(result)
|
############## COMMENTS ####################
### REGIMES
# 1 - EXPANSION
# 0 - RECESSION
set.seed(42)
# preparing matryx Y for GDP in USA
dftemp<-USA_GDP_ch
Y<-dftemp%>% select(c(ID,Period, Value)) %>% pivot_wider(names_from = ID,values_from = Value)
Y <- as.matrix(Y[,-1])
W<-W_USA
table(is.na(Y))
######################### PARAMETRY DLA USA GDP ##########
N <- n_states
theta0 <- list(rho = 0.5,
mu_1 = rep(4, N),
mu_0 = rep(2.3, N),
omega_d = rep(1, N), #VARIANCES (already squared)
p_00 = rep(0.8, N),
p_11 = rep(0.8, N))
hyperpar0 = list(alpha_prior = matrix(c(8, 2, 1, 9), nrow = 2, byrow = TRUE),
v_prior = 6,
delta_prior = 2, ## changed from 0.4
m_prior = matrix(c(2.3,4), nrow = 2),
M_prior = diag(2))
start <- Sys.time()
posterior_a <- sample_posterior(initial = theta0, hyperpar = hyperpar0, S = 5000, S0 = 1000, S_rho = 1000, S0_rho = 100, Y = Y, W = W)
end <- Sys.time()
print(end - start)
save.image(paste0("~/Desktop/Magisterka/Master_git/post_simul/posterior_USA_GDP_", format(Sys.time(), "%b%d"), ".RData"))
############ POSTERIOR SIMULATION #################################
posterior_a <- list()
start <- Sys.time()
posterior_a <- sample_posterior(initial = theta0, hyperpar = hyperpar0, S = 5000, S0 = 1000, S_rho = 10000, S0_rho = 2000, Y = Y, W = W)
end <- Sys.time()
print(end - start)
save.image(paste0("~/Desktop/Magisterka/Master_git/post_simul/posterior_USA_GDP", format(Sys.time(), "%b%d"), ".RData"))
#save.image(paste0("~/Desktop/Magisterka/Master_git/post_simul/posterior_PL_GDP", format(Sys.time(), "%b%d"), ".RData"))
#save.image(paste0("~/Desktop/Magisterka/Master_git/post_simul/posterior_USA_UE", format(Sys.time(), "%b%d"), ".RData"))
#save.image(paste0("~/Desktop/Magisterka/Master_git/post_simul/posterior_PL_UE", format(Sys.time(), "%b%d"), ".RData"))
########### PRIORS for illustration
load("~/Desktop/Magisterka/Master_git/post_simul/posterior_USA_GDP_Apr06.RData")
library(RColorBrewer)
library(classInt)
path<-"~/Desktop/Magisterka/Master_git/raw_maps/map"
path2<-"~/Desktop/Magisterka/Master_git/raw_maps/"
path3<-"~/Desktop/Magisterka/Master_git/output/"
posterior <- posterior_a
n<-n_states
setwd("~/Desktop/Magisterka/Master_git/output")
attach(hyperpar0)
sigma_domain <- seq(from = 0, to = max(posterior[,(2*n+2):(3*n+1)]), by = 0.01)
sigma_prior <- dinvgamma(sigma_domain, shape = v_prior/2, scale = delta_prior/2)
m1_domain <- seq(from = min(posterior[,2:(n+1)]), to = max(posterior[,2:(n+1)]), by = 0.01)
m0_domain <- seq(from = min(posterior[,(n+2):(2*n+1)]), to = max(posterior[,(n+2):(2*n+1)]), by = 0.01)
m_domain <- seq(from = min(c(m0_domain, m1_domain)), to = max(c(m1_domain, m0_domain)), by = 0.01)
m1_prior <- dnorm(m_domain, mean = m_prior[2], sd = M_prior[2,2]^0.5)
m0_prior <- dnorm(m_domain, mean = m_prior[1], sd = M_prior[1,1]^0.5)
p_domain <- seq(from = 0, to = 1, by = 0.01)
p11_prior <- dbeta(p_domain, alpha_prior[2,2], alpha_prior[2,1])
p00_prior <- dbeta(p_domain, alpha_prior[1,1], alpha_prior[1,2])
lowerbound_rho <- 1/min(eigen(W)$values)
lowerbound_rho2 <- -0.5
rho_domain <- seq(from = lowerbound_rho2, to = 1, by = 0.01)
rho_prior <- rep(1/(1-lowerbound_rho2), length(rho_domain))
v_m1<-posterior[,2:(n+1)]
v_m0<-posterior[,(n+2):(2*n+1)]
v_omega<-posterior[,(2*n+2):(3*n+1)]
v_p0<-posterior[,(3*n+2):(4*n+1)]
v_p1<-posterior[,(4*n+2):(5*n+1)]
v_rho<-posterior[,1]
########### ILLUSTRATE POSTERIORS (TOTAL) ##############
main_colour <- "navy"
main_colour2<- "deeppink3"
variable<-'GDP'
country<-'USA'
cex<-1
n_col<-4
n_row<-7
m<- n_col*n_row
names<-USA_states
N<-length(colnames(Y))
pages<-ceiling(N/m)
### ILLUSTRATE M
for (i in 1:pages){
page<-i
#m1+m0
png(file = paste0("m1m0_",country,variable,"_",page,".png"), width = 8.27, height = 11.69, units ="in",res=300)
par(mfrow = c(n_row, n_col), family="serif",mar=c(3, 2, 2, 0)+ 0.1,mgp=c(1.5,0.2,0))
for (pp in 1:m) {
pp<-pp+(page-1)*m
if (pp<=N){
hist(v_m1[,pp], freq = FALSE, main = colnames(Y)[pp], border=rgb(1, 1, 1, 0, maxColorValue = 1),
xlab = NULL, ylab = NULL, nclass = 20, col="skyblue4",#col = rgb(0, 0, 0, 0.5, maxColorValue = 1),
xlim = c(min(m_domain), max(m_domain)), #ylim = c(0,1.5),
cex.main = cex, cex.axis = cex/1.2,tck=-0.02)
hist(v_m0[,pp], freq = FALSE, main = colnames(Y)[pp], border=rgb(1, 1, 1, 0, maxColorValue = 1),
xlab = NULL, ylab = NULL, nclass = 20, col = rgb(1, 0, 0, 0.5, maxColorValue = 1),
xlim = c(min(m_domain), max(m_domain)), #ylim = c(0,1.5),
cex.main = cex, cex.axis = cex/1.2, add = TRUE)
lines(x=m_domain, y=m1_prior, lwd = 2, col = "steelblue4")
lines(x=m_domain, y=m0_prior, lwd = 2, col = main_colour2)
legend(x="topleft", legend = c("m1 a priori", "m1 a posteriori", "m0 a priori", "m0 a posteriori"),
fill = c("steelblue4", "skyblue4", main_colour2, rgb(1, 0, 0, 0.5, maxColorValue = 1)),
bty = "n", cex = cex/1.4)}}
dev.off()
}
### ILLUSTRATE P
for (i in 1:pages){
page<-i
#p11+p00
png(file = paste0("p1p0_",country,variable,"_",page,".png"), width = 8.27,
height = 11.69, units ="in", res=300)
par(mfrow = c(n_row, n_col),family="serif",mar=c(3, 1, 2, 1)+ 0.1,mgp=c(1.5,0.2,0))
for (pp in 1:m) {
pp<-pp+(page-1)*m
if (pp<=N){
hist(v_p1[,pp], freq = FALSE, main = names[pp], col="skyblue4",
xlab = NULL, ylab = NULL, nclass = 10, #col = rgb(0, 0, 0, 0.5, maxColorValue = 1),
xlim = c(min(p_domain), max(p_domain)), #ylim = c(0, 8),
cex.main = cex, cex.axis = cex/1.2,tck=-0.02)
hist(v_p0[,pp], freq = FALSE, main = names[pp], border=main_colour2,
xlab = NULL, ylab = NULL, nclass = 10, col = rgb(1, 0, 0, 0.5, maxColorValue = 1),
xlim = c(min(p_domain), max(p_domain)), #ylim = c(0, 8),
add = TRUE, cex.main = cex, cex.axis = cex/1.2)
lines(x=p_domain, y=p11_prior, lwd = 2, col = "steelblue4")
lines(x=p_domain, y=p00_prior, lwd = 2, col = main_colour2)
legend(x="topleft", legend = c("p11 a priori", "p11 a posteriori", "p00 a priori", "p00 a posteriori"),
fill = c("steelblue4", "skyblue4", main_colour2, rgb(1, 0, 0, 0.5, maxColorValue = 1)),
bty = "n", cex = cex/1.4)
}}
dev.off()
}
### ILLUSTRATE RHO
title="Stopa bezrobocia w Polsce"
#rho
png(file = paste0("rho_",country,"_",variable,".png"), width = 400, height = 400)
hist(v_rho, freq = FALSE, #main = title,
xlab = NULL, ylab = NULL, nclass = 20, col = rgb(0, 0, 0, 0.5, maxColorValue = 1),
#xlim = c(lowerbound_rho2, 1), #ylim = c(0, 15),
cex.main = cex, cex.axis = cex/1.7)
lines(x = rho_domain, y = rho_prior, lwd = 2, col = "grey")
legend(x = "topleft", legend = c("rho a priori", "rho a posteriori"),
fill = c("grey", rgb(0, 0, 0, 0.5, maxColorValue = 1)),
bty = "n", cex = cex/2)
dev.off()
############# TABLES #######################
post <- posterior
post.sum <- matrix(NA, nrow = 4, ncol = ncol(posterior))
v_m1<-posterior[,2:(n+1)]
v_m0<-posterior[,(n+2):(2*n+1)]
v_omega<-posterior[,(2*n+2):(3*n+1)]
v_p0<-posterior[,(3*n+2):(4*n+1)]
v_p1<-posterior[,(4*n+2):(5*n+1)]
post.sum[1,] <- colMeans(post)
post.sum[2,] <- vapply(post, 2, FUN = sd)
post.sum[3,] <- vapply(post, 2, FUN = quantile, probs = c(0.025))
post.sum[4,] <- vapply(post, 2, FUN = quantile, probs = c(0.975))
post2 <- cbind(t(post.sum[,(n+2):(2*n+1)]),
t(post.sum[, 2:(n+1)]),
t(post.sum[,(3*n+2):(4*n+1)]),
t(post.sum[,(4*n+2):(5*n+1)]),
t(post.sum[,(2*n+2):(3*n+1)]))
rownames(post2) <- names
colnames(post2) <- paste0(rep(c("m0", "m1", "p00", "p11", "sigma"), each = 4), " ", rep(c("post mean", "post SD", "HPDI 95 L", "HPDI 95 U"), 5))
post2 <- round(post2,3)
write.table(post2, file = paste0(country,variable, "_results.csv"), sep = ";", dec = ",")
rho.sum <- post.sum[,1]
theta_posterior_means <- list(rho = post.sum[1,1],
mu_1 = as.vector(post2[,5]),
mu_0 = as.vector(post2[,1]),
omega_d = as.vector(post2[,17]),
p_00 = as.vector(post2[,9]),
p_11 = as.vector(post2[,13]))
p_Hamilton <- Hamilton_filter(Y, theta_posterior_means, W)
p_Hamilton <- p_Hamilton$p_1
for (i in 1:pages){
page<-i
png(file = paste0("Hamilton_", country,variable,"_",page,".png"), width = 8.27, height = 11.69, units ="in",res=300)
par(mfrow = c(n_row, n_col), family="serif",mar=c(2, 2, 2, 0.5)+ 0.1,mgp=c(1,0,0))
dates<-unique(USA_GDP_ch$Period)
for (pp in 1:m) {
pp<-pp+(page-1)*m
if (pp<=N){
plot(x=1:length(dates), y=p_Hamilton[,pp],type="l",lwd=2,xlab="",
ylab="p-stwo ekspansji",main=names[pp],col="navy",
cex.axis=cex/2,cex.main=cex,cex.lab=cex/2,xaxt="none",yaxt="none")
axis(2, cex.axis=cex/1.5, tck=-0.015)
axis(1, seq(1,60,4), cex.axis=cex/1.5, srt = 45,tck=-0.015, #col.axis="red",
labels=c("2006","2007","2008","2009","2010","2011","2012",
"2013","2014","2015","2016","2017","2018","2019","2020"))
}}
dev.off()
}
############### TWORZENEI RYSUNKÓW IMPULSU - indywidualnie dla każdego zestawu
nclr<-9
e<-c()
impulse <- theta_posterior_means$mu_1 - theta_posterior_means$mu_0
for (pp in 1:N) {
impulse2 <- as.matrix(rep(0,N))
impulse2[pp] <- impulse[pp]
effect <- solve(diag(N) - theta_posterior_means$rho * W) %*% impulse2
e<-cbind(e,effect)}
effect_mean<-mean(e)
vec_e<-c(e)
breaks_qt <- classIntervals(vec_e,2, n = nclr, style = "quantile")
r <- breaks_qt$brks
# choice of folder to keep maps
n_col<-4
n_row<-5
m<- n_col*n_row
pages<-ceiling(N/m)
pal<-c()
draw_impulse2<-function(map,N,n,theta,W,ef,r,legend,i){
#pp<-1
#theta<-theta_posterior_means
nclr<-9
#pal <- colorRampPalette(c("white", "black"), bias = 1)
impulse <- theta$mu_1 - theta$mu_0
pal <- brewer.pal(9, "PuBuGn")
#pal<-rev(pal)
#pal[9:10] <- brewer.pal(3, "Oranges")[2:3]
impulse2 <- as.matrix(rep(0,N))
impulse2[i] <- impulse[i]
map@data$response <- as.vector(ef[,i])
map@data$bracket <- cut(map@data$response, r)
spplot(map, "bracket", lwd=0.1, col.regions=pal,colorkey=legend,
par.settings = list(axis.line = list(col = 'transparent')),
main = list(label=n[i],cex=0.8,fontfamily="serif"))
}
draw_impulse_empty<-function(map,N,n,theta,W,ef,r,i){
#pp<-1
#theta<-theta_posterior_means
nclr<-9
#pal <- colorRampPalette(c("white", "black"), bias = 1)
impulse <- theta$mu_1 - theta$mu_0
pal <- brewer.pal(9, "PuBuGn")
impulse2 <- as.matrix(rep(0,N))
impulse2[i] <- impulse[i]
map@data$response <- as.vector(ef[,i])
map@data$bracket <- cut(map@data$response, r)
s<-spplot(map, "bracket", lwd=0, col.regions=pal,
colorkey=list(space='left',height = 2,width =4),
par.settings = list(axis.line = list(col = 'transparent')),
main = list(label='',cex=0.8,fontfamily="serif"))
library(grid)
library(lattice)
args <- s$legend$left$args$key
## Prepare list of arguments needed by `legend=` argument (as described in ?xyplot)
legendArgs <- list(fun = draw.colorkey,
args = list(key = args),
corner = c(0.5,.5),
fontfamily="serif")
## Call spplot() again, this time passing in to legend the arguments
## needed to print a color key
spplot(USA_map, "ID", colorkey =FALSE,
panel=function(x, y, ...){
panel.rect(xleft=180000, ybottom=330000,
xright=181000, ytop=330500, alpha=1)},lwd=0, par.settings = list(axis.line = list(col = 'transparent')),
legend = list(inside = legendArgs))
}
png(file = paste0("legend_effect_",country,"_",variable,".png"), width = 3, height = 4, units ="in",res=300)
draw_impulse_empty(USA_map,48,names,theta_posterior_means,W,e,r,1)
dev.off()
setwd(path3)
## UNEMPLOYMENT RATE IN POLAND
for (page in 1:pages){
if (m+(page-1)*(m-1)>N){
dif<- m - (N-(m+(page-1)*(m-1))+1)
temp<-seq(1+(page-1)*(m-1),N)
png(file = paste0("effect_",country,variable,"_",page,".png"), width = 8.27, height = 11.69, units ="in",res=300)
plots = lapply(temp, function(.x) draw_impulse2(USA_map,48,names,theta_posterior_means, W, e, r, FALSE, .x))
p<-marrangeGrob(plots, nrow=n_row, ncol=n_col)
print(p)
dev.off()
}else{
temp<-seq(1+(page-1)*(m-1), m+(page-1)*(m-1)-1)
png(file = paste0("effect_",country,variable,"_",page,".png"), width = 8.27, height = 11.69, units ="in",res=300)
plots = lapply(temp, function(.x) draw_impulse2(USA_map,48,names,theta_posterior_means,W,e,r,FALSE,.x))
do.call(grid.arrange,plots)
dev.off()
}
}
write.table(rho.sum, file = paste0("rho_results_",country,"_",variable,".csv"), sep = ";", dec = ",")
|
/USA_GDP_analysis.R
|
no_license
|
CocoChanelno5/Master_git
|
R
| false
| false
| 13,036
|
r
|
############## COMMENTS ####################
### REGIMES
# 1 - EXPANSION
# 0 - RECESSION
set.seed(42)
# preparing matryx Y for GDP in USA
dftemp<-USA_GDP_ch
Y<-dftemp%>% select(c(ID,Period, Value)) %>% pivot_wider(names_from = ID,values_from = Value)
Y <- as.matrix(Y[,-1])
W<-W_USA
table(is.na(Y))
######################### PARAMETRY DLA USA GDP ##########
N <- n_states
theta0 <- list(rho = 0.5,
mu_1 = rep(4, N),
mu_0 = rep(2.3, N),
omega_d = rep(1, N), #VARIANCES (already squared)
p_00 = rep(0.8, N),
p_11 = rep(0.8, N))
hyperpar0 = list(alpha_prior = matrix(c(8, 2, 1, 9), nrow = 2, byrow = TRUE),
v_prior = 6,
delta_prior = 2, ## changed from 0.4
m_prior = matrix(c(2.3,4), nrow = 2),
M_prior = diag(2))
start <- Sys.time()
posterior_a <- sample_posterior(initial = theta0, hyperpar = hyperpar0, S = 5000, S0 = 1000, S_rho = 1000, S0_rho = 100, Y = Y, W = W)
end <- Sys.time()
print(end - start)
save.image(paste0("~/Desktop/Magisterka/Master_git/post_simul/posterior_USA_GDP_", format(Sys.time(), "%b%d"), ".RData"))
############ POSTERIOR SIMULATION #################################
posterior_a <- list()
start <- Sys.time()
posterior_a <- sample_posterior(initial = theta0, hyperpar = hyperpar0, S = 5000, S0 = 1000, S_rho = 10000, S0_rho = 2000, Y = Y, W = W)
end <- Sys.time()
print(end - start)
save.image(paste0("~/Desktop/Magisterka/Master_git/post_simul/posterior_USA_GDP", format(Sys.time(), "%b%d"), ".RData"))
#save.image(paste0("~/Desktop/Magisterka/Master_git/post_simul/posterior_PL_GDP", format(Sys.time(), "%b%d"), ".RData"))
#save.image(paste0("~/Desktop/Magisterka/Master_git/post_simul/posterior_USA_UE", format(Sys.time(), "%b%d"), ".RData"))
#save.image(paste0("~/Desktop/Magisterka/Master_git/post_simul/posterior_PL_UE", format(Sys.time(), "%b%d"), ".RData"))
########### PRIORS for illustration
load("~/Desktop/Magisterka/Master_git/post_simul/posterior_USA_GDP_Apr06.RData")
library(RColorBrewer)
library(classInt)
path<-"~/Desktop/Magisterka/Master_git/raw_maps/map"
path2<-"~/Desktop/Magisterka/Master_git/raw_maps/"
path3<-"~/Desktop/Magisterka/Master_git/output/"
posterior <- posterior_a
n<-n_states
setwd("~/Desktop/Magisterka/Master_git/output")
attach(hyperpar0)
sigma_domain <- seq(from = 0, to = max(posterior[,(2*n+2):(3*n+1)]), by = 0.01)
sigma_prior <- dinvgamma(sigma_domain, shape = v_prior/2, scale = delta_prior/2)
m1_domain <- seq(from = min(posterior[,2:(n+1)]), to = max(posterior[,2:(n+1)]), by = 0.01)
m0_domain <- seq(from = min(posterior[,(n+2):(2*n+1)]), to = max(posterior[,(n+2):(2*n+1)]), by = 0.01)
m_domain <- seq(from = min(c(m0_domain, m1_domain)), to = max(c(m1_domain, m0_domain)), by = 0.01)
m1_prior <- dnorm(m_domain, mean = m_prior[2], sd = M_prior[2,2]^0.5)
m0_prior <- dnorm(m_domain, mean = m_prior[1], sd = M_prior[1,1]^0.5)
p_domain <- seq(from = 0, to = 1, by = 0.01)
p11_prior <- dbeta(p_domain, alpha_prior[2,2], alpha_prior[2,1])
p00_prior <- dbeta(p_domain, alpha_prior[1,1], alpha_prior[1,2])
lowerbound_rho <- 1/min(eigen(W)$values)
lowerbound_rho2 <- -0.5
rho_domain <- seq(from = lowerbound_rho2, to = 1, by = 0.01)
rho_prior <- rep(1/(1-lowerbound_rho2), length(rho_domain))
v_m1<-posterior[,2:(n+1)]
v_m0<-posterior[,(n+2):(2*n+1)]
v_omega<-posterior[,(2*n+2):(3*n+1)]
v_p0<-posterior[,(3*n+2):(4*n+1)]
v_p1<-posterior[,(4*n+2):(5*n+1)]
v_rho<-posterior[,1]
########### ILLUSTRATE POSTERIORS (TOTAL) ##############
main_colour <- "navy"
main_colour2<- "deeppink3"
variable<-'GDP'
country<-'USA'
cex<-1
n_col<-4
n_row<-7
m<- n_col*n_row
names<-USA_states
N<-length(colnames(Y))
pages<-ceiling(N/m)
### ILLUSTRATE M
for (i in 1:pages){
page<-i
#m1+m0
png(file = paste0("m1m0_",country,variable,"_",page,".png"), width = 8.27, height = 11.69, units ="in",res=300)
par(mfrow = c(n_row, n_col), family="serif",mar=c(3, 2, 2, 0)+ 0.1,mgp=c(1.5,0.2,0))
for (pp in 1:m) {
pp<-pp+(page-1)*m
if (pp<=N){
hist(v_m1[,pp], freq = FALSE, main = colnames(Y)[pp], border=rgb(1, 1, 1, 0, maxColorValue = 1),
xlab = NULL, ylab = NULL, nclass = 20, col="skyblue4",#col = rgb(0, 0, 0, 0.5, maxColorValue = 1),
xlim = c(min(m_domain), max(m_domain)), #ylim = c(0,1.5),
cex.main = cex, cex.axis = cex/1.2,tck=-0.02)
hist(v_m0[,pp], freq = FALSE, main = colnames(Y)[pp], border=rgb(1, 1, 1, 0, maxColorValue = 1),
xlab = NULL, ylab = NULL, nclass = 20, col = rgb(1, 0, 0, 0.5, maxColorValue = 1),
xlim = c(min(m_domain), max(m_domain)), #ylim = c(0,1.5),
cex.main = cex, cex.axis = cex/1.2, add = TRUE)
lines(x=m_domain, y=m1_prior, lwd = 2, col = "steelblue4")
lines(x=m_domain, y=m0_prior, lwd = 2, col = main_colour2)
legend(x="topleft", legend = c("m1 a priori", "m1 a posteriori", "m0 a priori", "m0 a posteriori"),
fill = c("steelblue4", "skyblue4", main_colour2, rgb(1, 0, 0, 0.5, maxColorValue = 1)),
bty = "n", cex = cex/1.4)}}
dev.off()
}
### ILLUSTRATE P
for (i in 1:pages){
page<-i
#p11+p00
png(file = paste0("p1p0_",country,variable,"_",page,".png"), width = 8.27,
height = 11.69, units ="in", res=300)
par(mfrow = c(n_row, n_col),family="serif",mar=c(3, 1, 2, 1)+ 0.1,mgp=c(1.5,0.2,0))
for (pp in 1:m) {
pp<-pp+(page-1)*m
if (pp<=N){
hist(v_p1[,pp], freq = FALSE, main = names[pp], col="skyblue4",
xlab = NULL, ylab = NULL, nclass = 10, #col = rgb(0, 0, 0, 0.5, maxColorValue = 1),
xlim = c(min(p_domain), max(p_domain)), #ylim = c(0, 8),
cex.main = cex, cex.axis = cex/1.2,tck=-0.02)
hist(v_p0[,pp], freq = FALSE, main = names[pp], border=main_colour2,
xlab = NULL, ylab = NULL, nclass = 10, col = rgb(1, 0, 0, 0.5, maxColorValue = 1),
xlim = c(min(p_domain), max(p_domain)), #ylim = c(0, 8),
add = TRUE, cex.main = cex, cex.axis = cex/1.2)
lines(x=p_domain, y=p11_prior, lwd = 2, col = "steelblue4")
lines(x=p_domain, y=p00_prior, lwd = 2, col = main_colour2)
legend(x="topleft", legend = c("p11 a priori", "p11 a posteriori", "p00 a priori", "p00 a posteriori"),
fill = c("steelblue4", "skyblue4", main_colour2, rgb(1, 0, 0, 0.5, maxColorValue = 1)),
bty = "n", cex = cex/1.4)
}}
dev.off()
}
### ILLUSTRATE RHO
title="Stopa bezrobocia w Polsce"
#rho
png(file = paste0("rho_",country,"_",variable,".png"), width = 400, height = 400)
hist(v_rho, freq = FALSE, #main = title,
xlab = NULL, ylab = NULL, nclass = 20, col = rgb(0, 0, 0, 0.5, maxColorValue = 1),
#xlim = c(lowerbound_rho2, 1), #ylim = c(0, 15),
cex.main = cex, cex.axis = cex/1.7)
lines(x = rho_domain, y = rho_prior, lwd = 2, col = "grey")
legend(x = "topleft", legend = c("rho a priori", "rho a posteriori"),
fill = c("grey", rgb(0, 0, 0, 0.5, maxColorValue = 1)),
bty = "n", cex = cex/2)
dev.off()
############# TABLES #######################
post <- posterior
post.sum <- matrix(NA, nrow = 4, ncol = ncol(posterior))
v_m1<-posterior[,2:(n+1)]
v_m0<-posterior[,(n+2):(2*n+1)]
v_omega<-posterior[,(2*n+2):(3*n+1)]
v_p0<-posterior[,(3*n+2):(4*n+1)]
v_p1<-posterior[,(4*n+2):(5*n+1)]
post.sum[1,] <- colMeans(post)
post.sum[2,] <- vapply(post, 2, FUN = sd)
post.sum[3,] <- vapply(post, 2, FUN = quantile, probs = c(0.025))
post.sum[4,] <- vapply(post, 2, FUN = quantile, probs = c(0.975))
post2 <- cbind(t(post.sum[,(n+2):(2*n+1)]),
t(post.sum[, 2:(n+1)]),
t(post.sum[,(3*n+2):(4*n+1)]),
t(post.sum[,(4*n+2):(5*n+1)]),
t(post.sum[,(2*n+2):(3*n+1)]))
rownames(post2) <- names
colnames(post2) <- paste0(rep(c("m0", "m1", "p00", "p11", "sigma"), each = 4), " ", rep(c("post mean", "post SD", "HPDI 95 L", "HPDI 95 U"), 5))
post2 <- round(post2,3)
write.table(post2, file = paste0(country,variable, "_results.csv"), sep = ";", dec = ",")
rho.sum <- post.sum[,1]
theta_posterior_means <- list(rho = post.sum[1,1],
mu_1 = as.vector(post2[,5]),
mu_0 = as.vector(post2[,1]),
omega_d = as.vector(post2[,17]),
p_00 = as.vector(post2[,9]),
p_11 = as.vector(post2[,13]))
p_Hamilton <- Hamilton_filter(Y, theta_posterior_means, W)
p_Hamilton <- p_Hamilton$p_1
for (i in 1:pages){
page<-i
png(file = paste0("Hamilton_", country,variable,"_",page,".png"), width = 8.27, height = 11.69, units ="in",res=300)
par(mfrow = c(n_row, n_col), family="serif",mar=c(2, 2, 2, 0.5)+ 0.1,mgp=c(1,0,0))
dates<-unique(USA_GDP_ch$Period)
for (pp in 1:m) {
pp<-pp+(page-1)*m
if (pp<=N){
plot(x=1:length(dates), y=p_Hamilton[,pp],type="l",lwd=2,xlab="",
ylab="p-stwo ekspansji",main=names[pp],col="navy",
cex.axis=cex/2,cex.main=cex,cex.lab=cex/2,xaxt="none",yaxt="none")
axis(2, cex.axis=cex/1.5, tck=-0.015)
axis(1, seq(1,60,4), cex.axis=cex/1.5, srt = 45,tck=-0.015, #col.axis="red",
labels=c("2006","2007","2008","2009","2010","2011","2012",
"2013","2014","2015","2016","2017","2018","2019","2020"))
}}
dev.off()
}
############### TWORZENEI RYSUNKÓW IMPULSU - indywidualnie dla każdego zestawu
nclr<-9
e<-c()
impulse <- theta_posterior_means$mu_1 - theta_posterior_means$mu_0
for (pp in 1:N) {
impulse2 <- as.matrix(rep(0,N))
impulse2[pp] <- impulse[pp]
effect <- solve(diag(N) - theta_posterior_means$rho * W) %*% impulse2
e<-cbind(e,effect)}
effect_mean<-mean(e)
vec_e<-c(e)
breaks_qt <- classIntervals(vec_e,2, n = nclr, style = "quantile")
r <- breaks_qt$brks
# choice of folder to keep maps
n_col<-4
n_row<-5
m<- n_col*n_row
pages<-ceiling(N/m)
pal<-c()
draw_impulse2<-function(map,N,n,theta,W,ef,r,legend,i){
#pp<-1
#theta<-theta_posterior_means
nclr<-9
#pal <- colorRampPalette(c("white", "black"), bias = 1)
impulse <- theta$mu_1 - theta$mu_0
pal <- brewer.pal(9, "PuBuGn")
#pal<-rev(pal)
#pal[9:10] <- brewer.pal(3, "Oranges")[2:3]
impulse2 <- as.matrix(rep(0,N))
impulse2[i] <- impulse[i]
map@data$response <- as.vector(ef[,i])
map@data$bracket <- cut(map@data$response, r)
spplot(map, "bracket", lwd=0.1, col.regions=pal,colorkey=legend,
par.settings = list(axis.line = list(col = 'transparent')),
main = list(label=n[i],cex=0.8,fontfamily="serif"))
}
draw_impulse_empty<-function(map,N,n,theta,W,ef,r,i){
#pp<-1
#theta<-theta_posterior_means
nclr<-9
#pal <- colorRampPalette(c("white", "black"), bias = 1)
impulse <- theta$mu_1 - theta$mu_0
pal <- brewer.pal(9, "PuBuGn")
impulse2 <- as.matrix(rep(0,N))
impulse2[i] <- impulse[i]
map@data$response <- as.vector(ef[,i])
map@data$bracket <- cut(map@data$response, r)
s<-spplot(map, "bracket", lwd=0, col.regions=pal,
colorkey=list(space='left',height = 2,width =4),
par.settings = list(axis.line = list(col = 'transparent')),
main = list(label='',cex=0.8,fontfamily="serif"))
library(grid)
library(lattice)
args <- s$legend$left$args$key
## Prepare list of arguments needed by `legend=` argument (as described in ?xyplot)
legendArgs <- list(fun = draw.colorkey,
args = list(key = args),
corner = c(0.5,.5),
fontfamily="serif")
## Call spplot() again, this time passing in to legend the arguments
## needed to print a color key
spplot(USA_map, "ID", colorkey =FALSE,
panel=function(x, y, ...){
panel.rect(xleft=180000, ybottom=330000,
xright=181000, ytop=330500, alpha=1)},lwd=0, par.settings = list(axis.line = list(col = 'transparent')),
legend = list(inside = legendArgs))
}
png(file = paste0("legend_effect_",country,"_",variable,".png"), width = 3, height = 4, units ="in",res=300)
draw_impulse_empty(USA_map,48,names,theta_posterior_means,W,e,r,1)
dev.off()
setwd(path3)
## UNEMPLOYMENT RATE IN POLAND
for (page in 1:pages){
if (m+(page-1)*(m-1)>N){
dif<- m - (N-(m+(page-1)*(m-1))+1)
temp<-seq(1+(page-1)*(m-1),N)
png(file = paste0("effect_",country,variable,"_",page,".png"), width = 8.27, height = 11.69, units ="in",res=300)
plots = lapply(temp, function(.x) draw_impulse2(USA_map,48,names,theta_posterior_means, W, e, r, FALSE, .x))
p<-marrangeGrob(plots, nrow=n_row, ncol=n_col)
print(p)
dev.off()
}else{
temp<-seq(1+(page-1)*(m-1), m+(page-1)*(m-1)-1)
png(file = paste0("effect_",country,variable,"_",page,".png"), width = 8.27, height = 11.69, units ="in",res=300)
plots = lapply(temp, function(.x) draw_impulse2(USA_map,48,names,theta_posterior_means,W,e,r,FALSE,.x))
do.call(grid.arrange,plots)
dev.off()
}
}
write.table(rho.sum, file = paste0("rho_results_",country,"_",variable,".csv"), sep = ";", dec = ",")
|
library(tidyverse)
open_data <- read_csv('data/open_data_index_places.csv') %>%
select(name,score) %>%
rename(country=name,open_data=score) %>%
fix_adm0
|
/open_data.R
|
no_license
|
ccjolley/DECA
|
R
| false
| false
| 160
|
r
|
library(tidyverse)
open_data <- read_csv('data/open_data_index_places.csv') %>%
select(name,score) %>%
rename(country=name,open_data=score) %>%
fix_adm0
|
## Let's evaluate some of the logistic model submissions
# 1. Do the priors make sense?
# 2. Do the MCMCs converge?
# 3. Do the posteriors make sense?
# 4. Do they produce good predictions?
# Names are anonymized to protect the innocent...
library(tidyverse)
library(rethinking)
## Hubert, Mack, and Chad ---------------------------------
# load and clean the data
d <- read_csv('data/CCES-Train.csv') %>%
mutate(Y = democratic2016,
famincome_quart = case_when(faminc_new %in% c(1:4) ~ 1,
faminc_new %in% c(5:8) ~ 2,
faminc_new %in% c(9:12) ~ 3,
faminc_new %in% c(13:16) ~ 4),
gender_num = case_when(gender == "Male" ~ 1,
gender == "Female" ~ 2),
educ_num_v2 = case_when(educ == 'No HS' ~ 1,
educ == 'High school graduate' ~ 2,
educ == 'Some college' ~ 3,
educ == '2-year' ~ 4,
educ == '4-year' ~ 5,
educ == 'Post-grad' ~ 6))
dat <- list(
Y = d$Y,
# index variables for family income, gender, and education
famincome_quart = d$famincome_quart,
gender_num = d$gender_num,
educ_num_v2 = d$educ_num_v2
)
# fit the Hubert, Mack, and Chad model
model_HMC <- ulam(
alist(
Y ~ dbinom(1, p), # binomial likelihood
logit(p) <- aF[famincome_quart] + aG[gender_num] + aE[educ_num_v2],
aF[famincome_quart] ~ dnorm(2, 1),
aG[gender_num] ~ dnorm(1.5, 1),
aE[educ_num_v2] ~ dnorm(10, 2.5)
), data = dat, chains = 1, iter = 3000, log_lik = TRUE
)
# took...about 9 minutes to estimate
# save the model
save(model_HMC, file = 'models/model_HMC.RData')
# plot out the posterior intervals for each parameter
plot(model_HMC, depth = 2)
# really? no difference between males and females?
# no difference between respondents with different educational levels?
# MCMC diagnostics
traceplot( model_HMC )
# clearly some autocorrelation in the draws; and strangely small
# effective sample sizes for such a long chain!
# The problem is those priors. Let's do a prior predictive simulation...
# the guts of their model is...
# logit(p_democrat) = aF + aG + aE
p_democrat <- inv_logit(rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2))
dens(p_democrat)
# plot prior on difference between men and women
prior_aF <- rnorm(1e5, 0, 0.2)
prior_aE <- rnorm(1e5, 0, 0.2)
prior_aG1 <- rnorm(1e5, 0, 0.2)
prior_aG2 <- rnorm(1e5, 0, 0.2)
prior_democrat_men <- inv_logit(prior_aF + prior_aE + prior_aG1)
prior_democrat_women <- inv_logit(prior_aF + prior_aE + prior_aG2)
# plot the difference between those two priors
dens(prior_democrat_men - prior_democrat_women)
# plot the logistic function, to get a sense for how log odds map onto probabilities
plot(seq(-10,10,0.1), inv_logit(seq(-10,10,0.1)), type = 'l',
xlab = 'Log-Odds', ylab = 'Probability')
## Challenge: redefine some sensible priors and refit the model ---------------
model_HMC2 <- ulam(
alist(
Y ~ dbinom(1, p), # binomial likelihood
logit(p) <- aF[famincome_quart] + aG[gender_num] + aE[educ_num_v2],
aF[famincome_quart] ~ dnorm(0, 0.2),
aG[gender_num] ~ dnorm(0, 0.2),
aE[educ_num_v2] ~ dnorm(0, 0.2)
), data = dat, chains = 1, iter = 5000, log_lik = TRUE
)
# save the model
save(model_HMC2, file = 'models/model_HMC2.RData')
# plot the posteriors
plot(model_HMC2, depth = 2)
precis(model_HMC2, depth = 2)
traceplot( model_HMC2 )
# try dropping a predictor and see how MCMC does
# (worried about collinearity between income and education)
model_HMC3 <- ulam(
alist(
Y ~ dbinom(1, p), # binomial likelihood
logit(p) <- aF[famincome_quart] + aG[gender_num],
aF[famincome_quart] ~ dnorm(0, 0.2),
aG[gender_num] ~ dnorm(0, 0.2)
), data = dat, chains = 1, ter = 5000, log_lik = TRUE
)
save(model_HMC3, file = 'models/model_HMC3.RData')
precis(model_HMC3, depth = 2)
## Here's a model from Sally & Mason -----------------------------
d <- read_csv('data/CCES-Train.csv') %>%
# reformat variables for model
mutate(Y = democratic2016,
R = case_when(region == 'Midwest' ~ 1,
region == 'Northeast' ~ 2,
region == 'South' ~ 3,
region == 'West' ~ 4),
A = 2016 - birthyr,
Fe = case_when(gender == 'Female' ~ 1,
gender == 'Male' ~ 0),
C = case_when(child18 == 'Yes' ~ 1,
child18 == 'No' ~ 0),
Em = case_when(employ == 'Full-Time' ~ 1,
employ == 'Unemployed' ~ 0,
employ == 'Retired' ~ 0,
employ == 'Part-Time' ~ 1,
employ == 'Permanently disabled' ~ 0,
employ == 'Other' ~ 0,
employ == 'Homemaker' ~ 0,
employ == 'Temporarily laid off' ~ 0,
TRUE ~ 0),
Race = case_when (race == 'Black' ~ 1,
race == 'White' ~ 2,
race == 'Hispanic' ~ 3,
race == 'Asain' ~ 4,
race == 'Mixed' ~ 5,
race == 'Other' ~ 6,
TRUE ~ 6),
Educ = case_when (educ == 'High school graduate' ~ 0,
educ == 'Some college' ~ 1,
educ == '2-year' ~ 2,
educ == '4-year' ~ 3,
educ == 'Post-grad' ~ 4,
educ == 'No HS' ~ 0),
LGBTQ = case_when(sexuality == 'Heterosexual' ~ 0,
sexuality == 'Gay' ~ 1,
sexuality == 'Lesbian' ~ 1,
sexuality == 'Bisexual' ~ 1,
sexuality == 'Prefer not to say' ~ 0,
sexuality == 'Other' ~ 1))
# despite the warnings of our sage Richard McElreath,
# they each attempted a version of the model with dummy variables
# let's see what this implies for our priors.....
dat <- list(
Y = d$Y, # outcome variable (Democratic vote in 2016)
R = d$R, # index variable for region
A = d$A,
Fe = d$Fe,
C = d$C,
Em = d$Em,
Race = d$Race,
Educ = d$Educ,
LGBTQ = d$LGBTQ)
# here's their model
model_SM <- ulam(
alist(
Y ~ dbinom(1, p), # binomial likeliood
logit(p) <- a[R] + b1[R]*Fe + b2[R]*A + b3[R]*Educ + b4[R]*C + b5[R]*Em + b6[R]*Race + b7[R]*LGBTQ, # log-odds vary by region
a[R] ~ dnorm(0, 1.5),
b1[R] ~ dnorm(0, .5),
b2[R] ~ dnorm(0, .5),
b3[R] ~ dnorm(0, .5),
b4[R] ~ dnorm(0, .5),
b5[R] ~ dnorm(0, .5),
b6[R] ~ dnorm(0, .5),
b7[R] ~ dnorm(0, .5)
), data = dat, chains = 4, log_lik=TRUE
)
save(model_SM, 'models/model_SM.RData')
# plot the priors for various groups
p_democratic_male_unemployed_nochildren_straight <- inv_logit(rnorm(1e5, 0, 1.5))
dens(p_democratic_male_unemployed_nochildren_straight)
p_democratic_female_employed_children_LGBTQ <- inv_logit(rnorm(1e5, 0, 1.5) +
rnorm(1e5, 0, 0.5) +
rnorm(1e5, 0, 0.5) +
rnorm(1e5, 0, 0.5) +
rnorm(1e5, 0, 0.5))
dens(p_democratic_female_employed_children_LGBTQ)
p_democratic_female_employed_children_LGBTQ_40yo <- inv_logit(rnorm(1e5, 0, 1.5) +
rnorm(1e5, 0, 0.5) +
rnorm(1e5, 0, 0.5) +
rnorm(1e5, 0, 0.5) +
rnorm(1e5, 0, 0.5) +
rnorm(1e5, 0, 0.5) * 40)
dens(p_democratic_female_employed_children_LGBTQ_40yo)
## Updated model ---------------------------------------------
d <- read_csv('data/CCES-Train.csv') %>%
# reformat variables for model
mutate(Y = democratic2016,
R = case_when(region == 'Midwest' ~ 1,
region == 'Northeast' ~ 2,
region == 'South' ~ 3,
region == 'West' ~ 4),
A = scale(2018 - birthyr),
gender = case_when(gender == 'Female' ~ 1,
gender == 'Male' ~ 2),
children = case_when(child18 == 'Yes' ~ 2,
child18 == 'No' ~ 1),
employment = case_when(employ == 'Full-time' ~ 1,
employ == 'Unemployed' ~ 2,
employ == 'Retired' ~ 3,
employ == 'Part-time' ~ 1,
employ == 'Permanently disabled' ~ 2,
employ == 'Other' ~ 2,
employ == 'Homemaker' ~ 4,
employ == 'Temporarily laid off' ~ 2,
TRUE ~ 2),
Race = case_when (race == 'Black' ~ 1,
race == 'White' ~ 2,
race == 'Hispanic' ~ 3,
race == 'Asian' ~ 4,
race == 'Mixed' ~ 5,
race == 'Other' ~ 6,
TRUE ~ 6),
Educ = case_when (educ == 'High school graduate' ~ 2,
educ == 'Some college' ~ 3,
educ == '2-year' ~ 4,
educ == '4-year' ~ 5,
educ == 'Post-grad' ~ 6,
educ == 'No HS' ~ 1),
LGBTQ = case_when(sexuality == 'Heterosexual' ~ 1,
sexuality == 'Gay' ~ 2,
sexuality == 'Lesbian' ~ 2,
sexuality == 'Bisexual' ~ 2,
sexuality == 'Prefer not to say' ~ 2,
sexuality == 'Other' ~ 2))
# make it a nice list for ulam()
dat <- list(
Y = d$Y,
R = d$R,
gender = d$gender,
A = as.numeric(d$A),
Educ = d$Educ,
children = d$children,
employment = d$employment,
Race = d$Race,
LGBTQ = d$LGBTQ
)
# prior predictive simulation
(rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2)) %>%
inv_logit %>%
dens
model_SM2 <- ulam(
alist(
Y ~ dbinom(1, p), # binomial likelihood
logit(p) <- a1[R] + a2[gender] + b1[R]*A + a3[Educ] + a4[children] + a5[employment] + a6[Race] + a7[LGBTQ],
a1[R] ~ dnorm(0, 0.2),
a2[gender] ~ dnorm(0, .2),
b1[R] ~ dnorm(0, .2),
a3[Educ] ~ dnorm(0, .2),
a4[children] ~ dnorm(0, .2),
a5[employment] ~ dnorm(0, .2),
a6[Race] ~ dnorm(0, .2),
a7[LGBTQ] ~ dnorm(0, .2)
), data = dat, chains = 1, log_lik=TRUE
)
# took about 6 minutes to fit
# save the model
save(model_SM2, file = 'models/model_SM2.RData')
plot(model_SM2, depth = 2)
traceplot( model_SM2 )
|
/day-13-model-competition.R
|
permissive
|
colt-jensen/maymester-bayes-2021
|
R
| false
| false
| 11,341
|
r
|
## Let's evaluate some of the logistic model submissions
# 1. Do the priors make sense?
# 2. Do the MCMCs converge?
# 3. Do the posteriors make sense?
# 4. Do they produce good predictions?
# Names are anonymized to protect the innocent...
library(tidyverse)
library(rethinking)
## Hubert, Mack, and Chad ---------------------------------
# load and clean the data
d <- read_csv('data/CCES-Train.csv') %>%
mutate(Y = democratic2016,
famincome_quart = case_when(faminc_new %in% c(1:4) ~ 1,
faminc_new %in% c(5:8) ~ 2,
faminc_new %in% c(9:12) ~ 3,
faminc_new %in% c(13:16) ~ 4),
gender_num = case_when(gender == "Male" ~ 1,
gender == "Female" ~ 2),
educ_num_v2 = case_when(educ == 'No HS' ~ 1,
educ == 'High school graduate' ~ 2,
educ == 'Some college' ~ 3,
educ == '2-year' ~ 4,
educ == '4-year' ~ 5,
educ == 'Post-grad' ~ 6))
dat <- list(
Y = d$Y,
# index variables for family income, gender, and education
famincome_quart = d$famincome_quart,
gender_num = d$gender_num,
educ_num_v2 = d$educ_num_v2
)
# fit the Hubert, Mack, and Chad model
model_HMC <- ulam(
alist(
Y ~ dbinom(1, p), # binomial likelihood
logit(p) <- aF[famincome_quart] + aG[gender_num] + aE[educ_num_v2],
aF[famincome_quart] ~ dnorm(2, 1),
aG[gender_num] ~ dnorm(1.5, 1),
aE[educ_num_v2] ~ dnorm(10, 2.5)
), data = dat, chains = 1, iter = 3000, log_lik = TRUE
)
# took...about 9 minutes to estimate
# save the model
save(model_HMC, file = 'models/model_HMC.RData')
# plot out the posterior intervals for each parameter
plot(model_HMC, depth = 2)
# really? no difference between males and females?
# no difference between respondents with different educational levels?
# MCMC diagnostics
traceplot( model_HMC )
# clearly some autocorrelation in the draws; and strangely small
# effective sample sizes for such a long chain!
# The problem is those priors. Let's do a prior predictive simulation...
# the guts of their model is...
# logit(p_democrat) = aF + aG + aE
p_democrat <- inv_logit(rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2))
dens(p_democrat)
# plot prior on difference between men and women
prior_aF <- rnorm(1e5, 0, 0.2)
prior_aE <- rnorm(1e5, 0, 0.2)
prior_aG1 <- rnorm(1e5, 0, 0.2)
prior_aG2 <- rnorm(1e5, 0, 0.2)
prior_democrat_men <- inv_logit(prior_aF + prior_aE + prior_aG1)
prior_democrat_women <- inv_logit(prior_aF + prior_aE + prior_aG2)
# plot the difference between those two priors
dens(prior_democrat_men - prior_democrat_women)
# plot the logistic function, to get a sense for how log odds map onto probabilities
plot(seq(-10,10,0.1), inv_logit(seq(-10,10,0.1)), type = 'l',
xlab = 'Log-Odds', ylab = 'Probability')
## Challenge: redefine some sensible priors and refit the model ---------------
model_HMC2 <- ulam(
alist(
Y ~ dbinom(1, p), # binomial likelihood
logit(p) <- aF[famincome_quart] + aG[gender_num] + aE[educ_num_v2],
aF[famincome_quart] ~ dnorm(0, 0.2),
aG[gender_num] ~ dnorm(0, 0.2),
aE[educ_num_v2] ~ dnorm(0, 0.2)
), data = dat, chains = 1, iter = 5000, log_lik = TRUE
)
# save the model
save(model_HMC2, file = 'models/model_HMC2.RData')
# plot the posteriors
plot(model_HMC2, depth = 2)
precis(model_HMC2, depth = 2)
traceplot( model_HMC2 )
# try dropping a predictor and see how MCMC does
# (worried about collinearity between income and education)
model_HMC3 <- ulam(
alist(
Y ~ dbinom(1, p), # binomial likelihood
logit(p) <- aF[famincome_quart] + aG[gender_num],
aF[famincome_quart] ~ dnorm(0, 0.2),
aG[gender_num] ~ dnorm(0, 0.2)
), data = dat, chains = 1, ter = 5000, log_lik = TRUE
)
save(model_HMC3, file = 'models/model_HMC3.RData')
precis(model_HMC3, depth = 2)
## Here's a model from Sally & Mason -----------------------------
d <- read_csv('data/CCES-Train.csv') %>%
# reformat variables for model
mutate(Y = democratic2016,
R = case_when(region == 'Midwest' ~ 1,
region == 'Northeast' ~ 2,
region == 'South' ~ 3,
region == 'West' ~ 4),
A = 2016 - birthyr,
Fe = case_when(gender == 'Female' ~ 1,
gender == 'Male' ~ 0),
C = case_when(child18 == 'Yes' ~ 1,
child18 == 'No' ~ 0),
Em = case_when(employ == 'Full-Time' ~ 1,
employ == 'Unemployed' ~ 0,
employ == 'Retired' ~ 0,
employ == 'Part-Time' ~ 1,
employ == 'Permanently disabled' ~ 0,
employ == 'Other' ~ 0,
employ == 'Homemaker' ~ 0,
employ == 'Temporarily laid off' ~ 0,
TRUE ~ 0),
Race = case_when (race == 'Black' ~ 1,
race == 'White' ~ 2,
race == 'Hispanic' ~ 3,
race == 'Asain' ~ 4,
race == 'Mixed' ~ 5,
race == 'Other' ~ 6,
TRUE ~ 6),
Educ = case_when (educ == 'High school graduate' ~ 0,
educ == 'Some college' ~ 1,
educ == '2-year' ~ 2,
educ == '4-year' ~ 3,
educ == 'Post-grad' ~ 4,
educ == 'No HS' ~ 0),
LGBTQ = case_when(sexuality == 'Heterosexual' ~ 0,
sexuality == 'Gay' ~ 1,
sexuality == 'Lesbian' ~ 1,
sexuality == 'Bisexual' ~ 1,
sexuality == 'Prefer not to say' ~ 0,
sexuality == 'Other' ~ 1))
# despite the warnings of our sage Richard McElreath,
# they each attempted a version of the model with dummy variables
# let's see what this implies for our priors.....
dat <- list(
Y = d$Y, # outcome variable (Democratic vote in 2016)
R = d$R, # index variable for region
A = d$A,
Fe = d$Fe,
C = d$C,
Em = d$Em,
Race = d$Race,
Educ = d$Educ,
LGBTQ = d$LGBTQ)
# here's their model
model_SM <- ulam(
alist(
Y ~ dbinom(1, p), # binomial likeliood
logit(p) <- a[R] + b1[R]*Fe + b2[R]*A + b3[R]*Educ + b4[R]*C + b5[R]*Em + b6[R]*Race + b7[R]*LGBTQ, # log-odds vary by region
a[R] ~ dnorm(0, 1.5),
b1[R] ~ dnorm(0, .5),
b2[R] ~ dnorm(0, .5),
b3[R] ~ dnorm(0, .5),
b4[R] ~ dnorm(0, .5),
b5[R] ~ dnorm(0, .5),
b6[R] ~ dnorm(0, .5),
b7[R] ~ dnorm(0, .5)
), data = dat, chains = 4, log_lik=TRUE
)
save(model_SM, 'models/model_SM.RData')
# plot the priors for various groups
p_democratic_male_unemployed_nochildren_straight <- inv_logit(rnorm(1e5, 0, 1.5))
dens(p_democratic_male_unemployed_nochildren_straight)
p_democratic_female_employed_children_LGBTQ <- inv_logit(rnorm(1e5, 0, 1.5) +
rnorm(1e5, 0, 0.5) +
rnorm(1e5, 0, 0.5) +
rnorm(1e5, 0, 0.5) +
rnorm(1e5, 0, 0.5))
dens(p_democratic_female_employed_children_LGBTQ)
p_democratic_female_employed_children_LGBTQ_40yo <- inv_logit(rnorm(1e5, 0, 1.5) +
rnorm(1e5, 0, 0.5) +
rnorm(1e5, 0, 0.5) +
rnorm(1e5, 0, 0.5) +
rnorm(1e5, 0, 0.5) +
rnorm(1e5, 0, 0.5) * 40)
dens(p_democratic_female_employed_children_LGBTQ_40yo)
## Updated model ---------------------------------------------
d <- read_csv('data/CCES-Train.csv') %>%
# reformat variables for model
mutate(Y = democratic2016,
R = case_when(region == 'Midwest' ~ 1,
region == 'Northeast' ~ 2,
region == 'South' ~ 3,
region == 'West' ~ 4),
A = scale(2018 - birthyr),
gender = case_when(gender == 'Female' ~ 1,
gender == 'Male' ~ 2),
children = case_when(child18 == 'Yes' ~ 2,
child18 == 'No' ~ 1),
employment = case_when(employ == 'Full-time' ~ 1,
employ == 'Unemployed' ~ 2,
employ == 'Retired' ~ 3,
employ == 'Part-time' ~ 1,
employ == 'Permanently disabled' ~ 2,
employ == 'Other' ~ 2,
employ == 'Homemaker' ~ 4,
employ == 'Temporarily laid off' ~ 2,
TRUE ~ 2),
Race = case_when (race == 'Black' ~ 1,
race == 'White' ~ 2,
race == 'Hispanic' ~ 3,
race == 'Asian' ~ 4,
race == 'Mixed' ~ 5,
race == 'Other' ~ 6,
TRUE ~ 6),
Educ = case_when (educ == 'High school graduate' ~ 2,
educ == 'Some college' ~ 3,
educ == '2-year' ~ 4,
educ == '4-year' ~ 5,
educ == 'Post-grad' ~ 6,
educ == 'No HS' ~ 1),
LGBTQ = case_when(sexuality == 'Heterosexual' ~ 1,
sexuality == 'Gay' ~ 2,
sexuality == 'Lesbian' ~ 2,
sexuality == 'Bisexual' ~ 2,
sexuality == 'Prefer not to say' ~ 2,
sexuality == 'Other' ~ 2))
# make it a nice list for ulam()
dat <- list(
Y = d$Y,
R = d$R,
gender = d$gender,
A = as.numeric(d$A),
Educ = d$Educ,
children = d$children,
employment = d$employment,
Race = d$Race,
LGBTQ = d$LGBTQ
)
# prior predictive simulation
(rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2) +
rnorm(1e5, 0, 0.2)) %>%
inv_logit %>%
dens
model_SM2 <- ulam(
alist(
Y ~ dbinom(1, p), # binomial likelihood
logit(p) <- a1[R] + a2[gender] + b1[R]*A + a3[Educ] + a4[children] + a5[employment] + a6[Race] + a7[LGBTQ],
a1[R] ~ dnorm(0, 0.2),
a2[gender] ~ dnorm(0, .2),
b1[R] ~ dnorm(0, .2),
a3[Educ] ~ dnorm(0, .2),
a4[children] ~ dnorm(0, .2),
a5[employment] ~ dnorm(0, .2),
a6[Race] ~ dnorm(0, .2),
a7[LGBTQ] ~ dnorm(0, .2)
), data = dat, chains = 1, log_lik=TRUE
)
# took about 6 minutes to fit
# save the model
save(model_SM2, file = 'models/model_SM2.RData')
plot(model_SM2, depth = 2)
traceplot( model_SM2 )
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/routines.R
\name{investigateAIC}
\alias{investigateAIC}
\title{Plot of simulation study}
\usage{
investigateAIC(nsim = 10000, Nsamp = 1000, seed = 1001)
}
\arguments{
\item{nsim=10000}{The number of simulation replications}
\item{Nsamp=1000}{The expected value of the total population within each simulation}
\item{seed=1001}{The random number seed}
}
\value{
An \code{nsim} \eqn{\times} 2 matrix giving the changes in deviance for each replication for each of the two models
}
\description{
This routine produces Figure 1 of Chan, Silverman and Vincent (2019).
}
\details{
Simulations are carried out for two different three-list models.
In one model, the probabilities of capture are 0.01, 0.04 and 0.2 for the three lists respectively, while in the other the probability
is 0.3 on all three lists. In both cases, there are no interaction
effects, so that captures on the lists occur independently of each other.
The first model is chosen to be somewhat more typical of the sparse capture
case, of the kind which often occurs in the human trafficking context,
while the second is a more classical multiple systems estimate.
The probability of an individual having each possible capture history is first evaluated.
Then these probabilities are multiplied by \code{Nsamp = 1000} and, for each simulation
replicate, Poisson random values with expectations equal to these values are generated
to give a full set of observed capture histories;
together with the null capture history the expected number of counts
(population size) is equal to \code{Nsamp}.
Inference was carried out both for the model with main effects only, and for the model with the addition of an interaction effect between the first two lists.
The reduction in deviance between the two models was determined.
Checking for compliance with the conditions for existence and identifiability of the
estimates shows that a very small number of the simulations for the sparse model (two out of
ten thousand) fail the checks for existence even within the extended maximum likelihood context.
Detailed investigation shows that in neither of these cases is the dark figure itself not estimable;
although the parameters themselves cannot all be estimated, there is a maximum likelihood estimate
of the expected capture frequencies, and hence the deviance can still be calculated.
The routine produces QQ-plots
of the resulting deviance reductions against quantiles of the \eqn{\chi^2_1} distribution,
for \code{nsim} simulation replications.
}
\references{
Chan, L., Silverman, B. W., and Vincent, K. (2019).
Multiple systems estimation for Sparse Capture Data: Inferential Challenges when there are Non-Overlapping Lists. Available from \url{https://arxiv.org/abs/1902.05156}.
}
|
/man/investigateAIC.Rd
|
no_license
|
SparseMSE/sparsemse
|
R
| false
| true
| 2,903
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/routines.R
\name{investigateAIC}
\alias{investigateAIC}
\title{Plot of simulation study}
\usage{
investigateAIC(nsim = 10000, Nsamp = 1000, seed = 1001)
}
\arguments{
\item{nsim=10000}{The number of simulation replications}
\item{Nsamp=1000}{The expected value of the total population within each simulation}
\item{seed=1001}{The random number seed}
}
\value{
An \code{nsim} \eqn{\times} 2 matrix giving the changes in deviance for each replication for each of the two models
}
\description{
This routine produces Figure 1 of Chan, Silverman and Vincent (2019).
}
\details{
Simulations are carried out for two different three-list models.
In one model, the probabilities of capture are 0.01, 0.04 and 0.2 for the three lists respectively, while in the other the probability
is 0.3 on all three lists. In both cases, there are no interaction
effects, so that captures on the lists occur independently of each other.
The first model is chosen to be somewhat more typical of the sparse capture
case, of the kind which often occurs in the human trafficking context,
while the second is a more classical multiple systems estimate.
The probability of an individual having each possible capture history is first evaluated.
Then these probabilities are multiplied by \code{Nsamp = 1000} and, for each simulation
replicate, Poisson random values with expectations equal to these values are generated
to give a full set of observed capture histories;
together with the null capture history the expected number of counts
(population size) is equal to \code{Nsamp}.
Inference was carried out both for the model with main effects only, and for the model with the addition of an interaction effect between the first two lists.
The reduction in deviance between the two models was determined.
Checking for compliance with the conditions for existence and identifiability of the
estimates shows that a very small number of the simulations for the sparse model (two out of
ten thousand) fail the checks for existence even within the extended maximum likelihood context.
Detailed investigation shows that in neither of these cases is the dark figure itself not estimable;
although the parameters themselves cannot all be estimated, there is a maximum likelihood estimate
of the expected capture frequencies, and hence the deviance can still be calculated.
The routine produces QQ-plots
of the resulting deviance reductions against quantiles of the \eqn{\chi^2_1} distribution,
for \code{nsim} simulation replications.
}
\references{
Chan, L., Silverman, B. W., and Vincent, K. (2019).
Multiple systems estimation for Sparse Capture Data: Inferential Challenges when there are Non-Overlapping Lists. Available from \url{https://arxiv.org/abs/1902.05156}.
}
|
\name{filter_feature_selection}
\alias{filter_feature_selection}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Perform selection by filter
}
\description{
Perform selection by filter using univariate filters, from caret's package.
}
\usage{
filter_feature_selection(datamat, samples.class,
functions = caret::rfSBF, method = "cv", repeats = 5)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{datamat}{
data matrix from dataset.
}
\item{samples.class}{
string or index indicating what metadata to use.
}
\item{functions}{
a list of functions for model fitting, prediction and variable filtering.
}
\item{method}{
the external resampling method: boot, cv, LOOCV or LGOCV (for repeated training/test splits.
}
\item{repeats}{
for repeated k-fold cross-validation only: the number of complete sets of folds to compute.
}
}
\value{
A caret's sbf object with the result of selection by filter.
}
\examples{
\dontrun{
## Example of selection by filter
data(cachexia)
library(caret)
rfe.result = filter_feature_selection(cachexia$data,
cachexia$metadata$Muscle.loss, functions = caret::rfSBF,
method = "cv")
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ sbf }
\keyword{ filters }% __ONLY ONE__ keyword per line
|
/man/filter_feature_selection.Rd
|
no_license
|
Neal050617/specmine
|
R
| false
| false
| 1,377
|
rd
|
\name{filter_feature_selection}
\alias{filter_feature_selection}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Perform selection by filter
}
\description{
Perform selection by filter using univariate filters, from caret's package.
}
\usage{
filter_feature_selection(datamat, samples.class,
functions = caret::rfSBF, method = "cv", repeats = 5)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{datamat}{
data matrix from dataset.
}
\item{samples.class}{
string or index indicating what metadata to use.
}
\item{functions}{
a list of functions for model fitting, prediction and variable filtering.
}
\item{method}{
the external resampling method: boot, cv, LOOCV or LGOCV (for repeated training/test splits.
}
\item{repeats}{
for repeated k-fold cross-validation only: the number of complete sets of folds to compute.
}
}
\value{
A caret's sbf object with the result of selection by filter.
}
\examples{
\dontrun{
## Example of selection by filter
data(cachexia)
library(caret)
rfe.result = filter_feature_selection(cachexia$data,
cachexia$metadata$Muscle.loss, functions = caret::rfSBF,
method = "cv")
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ sbf }
\keyword{ filters }% __ONLY ONE__ keyword per line
|
library(pscl)
### Name: ideal
### Title: analysis of educational testing data and roll call data with IRT
### models, via Markov chain Monte Carlo methods
### Aliases: ideal
### Keywords: models
### ** Examples
## Not run:
##D ## long run, many iterations
##D data(s109)
##D n <- dim(s109$legis.data)[1]
##D x0 <- rep(0,n)
##D x0[s109$legis.data$party=="D"] <- -1
##D x0[s109$legis.data$party=="R"] <- 1
##D
##D id1 <- ideal(s109,
##D d=1,
##D startvals=list(x=x0),
##D normalize=TRUE,
##D store.item=TRUE,
##D maxiter=260E3,
##D burnin=10E3,
##D thin=100)
## End(Not run)
|
/data/genthat_extracted_code/pscl/examples/ideal.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 677
|
r
|
library(pscl)
### Name: ideal
### Title: analysis of educational testing data and roll call data with IRT
### models, via Markov chain Monte Carlo methods
### Aliases: ideal
### Keywords: models
### ** Examples
## Not run:
##D ## long run, many iterations
##D data(s109)
##D n <- dim(s109$legis.data)[1]
##D x0 <- rep(0,n)
##D x0[s109$legis.data$party=="D"] <- -1
##D x0[s109$legis.data$party=="R"] <- 1
##D
##D id1 <- ideal(s109,
##D d=1,
##D startvals=list(x=x0),
##D normalize=TRUE,
##D store.item=TRUE,
##D maxiter=260E3,
##D burnin=10E3,
##D thin=100)
## End(Not run)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{CH25PR17}
\alias{CH25PR17}
\title{CH25PR17}
\format{\preformatted{'data.frame': 48 obs. of 4 variables:
$ V1: num 72 74.6 67.4 72.8 72.1 76.9 74.8 73.3 75.2 73.8 ...
$ V2: int 1 1 1 1 1 1 1 1 1 1 ...
$ V3: int 1 1 1 1 2 2 2 2 3 3 ...
$ V4: int 1 2 3 4 1 2 3 4 1 2 ...
}}
\usage{
CH25PR17
}
\description{
CH25PR17
}
\keyword{datasets}
|
/man/CH25PR17.Rd
|
no_license
|
bryangoodrich/ALSM
|
R
| false
| false
| 458
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{CH25PR17}
\alias{CH25PR17}
\title{CH25PR17}
\format{\preformatted{'data.frame': 48 obs. of 4 variables:
$ V1: num 72 74.6 67.4 72.8 72.1 76.9 74.8 73.3 75.2 73.8 ...
$ V2: int 1 1 1 1 1 1 1 1 1 1 ...
$ V3: int 1 1 1 1 2 2 2 2 3 3 ...
$ V4: int 1 2 3 4 1 2 3 4 1 2 ...
}}
\usage{
CH25PR17
}
\description{
CH25PR17
}
\keyword{datasets}
|
library(tseries)
library(forecast)
library(magrittr)
set.seed(0)
#seed beállítása, hogy ugyanazok a pszeudorandom számaink legyenek
#1. Tárolófüggvény
dgp_parameterek<-function(tslength,nofts,noise,p,d,q){
return(c(tslength,nofts,noise,p,d,q))
}
#eltárolja a szimuláció paramétereit
#2. Adatgeneráló függvény
ts_gen<-function(x){
tslength<-x[1]
nofts<-x[2]
noise<-x[3]
p<-x[4]
d<-x[5]
q<-x[6]
#Kiolvassa az input vektorból az egyes paramétereket
idosorok<-array(numeric(),c(tslength,nofts))
#létrehoz egy most még üres mátrixot az idősoroknak
#a mátrix tslength x nofts dimenziós
#vagyis minden oszlop egy idősor lesz
for (i in 1:nofts) {
idosorok[1:tslength,i]<-arima.sim(model=list(order=c(p,d,q),
ar=runif(p,min=-0.5,max=0.5),ma=runif(q,min=-0.5,max=0.5)),
n=tslength,sd=noise)}
#legenerálom a megfelelő hosszúságú és számú idősort
#a theták és phik a robusztusság miatt -0.5 és 0.5 közötti
#egyenletes eloszlású véletlen számok
return(idosorok)
}
#3. Illesztési paraméterek tárolása
fit_input<-function(maxp,maxq){
return(c(maxp,maxq))
}
#4. Modellillesztés
fitmodel<-function(x,ts){
maxp<-x[1]
maxq<-x[2]
akaike<-array(numeric(),c(maxp+1,maxq+1))
schwarz<-array(numeric(),c(maxp+1,maxq+1))
#definiálja az egyes modellekhez tartozó aic és bic értékek mátrixait
#+1 sor és oszlop, mert a p és q 0 is lehet
d <- ndiffs(ts,test="kpss")
#meghatározza d-t
for (i in 0:maxp) {
for (j in 0:maxq) {
fit<-arima(ts,order = c(i,d,j))
akaike[i+1,j+1]<-AIC(fit)
schwarz[i+1,j+1]<-BIC(fit)
}
}
#minden (p,q) paraméterpárra fitteli az arimat
#és kiszámolja erre az információs kritériumokat
aic_opt <- which(akaike == min(akaike), arr.ind = TRUE)-1
bic_opt <- which(schwarz == min(schwarz), arr.ind = TRUE)-1
#megnézi, hogy az akaike és scwarz mátrixoknak melyik eleme a legkisebb
#ennek az elemnek a sorát és oszlopát kimenti
#mivel p=0 és q=0-val kezdünk ezért -1
#ez lesz az aic illetve bic által optimálisnak talált (p,q) paraméterpár
optimalis <- list(aic_opt,bic_opt)
return(optimalis)
}
#########################################
#5. A szimulációs függvény
result_table<-function(pdq,max){
aic_grid<-array(numeric(),c(5,5))
bic_grid<-array(numeric(),c(5,5))
noise<-c(0.01,0.1,1,10,100)
tslength<-c(100,500,1000,5000,10000)
#definiálom a két gridet és paramétereit
for (i in 1:5) {
for (j in 1:5) {
parameterek <- c(tslength[i],1000,noise[j],pdq[1],pdq[2],pdq[3])
#Összeszedem a grid i,j elemének megfelelő paraméterek értékeit egy vektorba
ts <- ts_gen(parameterek)
#legenerálom az idősorokat
aicszamlalo <- 0
bicszamlalo <- 0
for (k in 1:1000){
ered <- try(fitmodel(max,ts[,k]),silent=TRUE)
#optimális modell keresése
#a try() functionre az arima függvény numerikus megoldásakor
#adódó esetleges hibája miatt van szükség
if(is.numeric(ered[[1]][1])==TRUE){
#azt vizsgálja meg, hogy nincs-e hibaüzenet az outputban
if(ered[[1]][1] == pdq[1] & ered[[1]][2] == pdq[3]){
aicszamlalo <- aicszamlalo+1}
if(ered[[2]][1] == pdq[1] & ered[[2]][2] == pdq[3]){
bicszamlalo <- bicszamlalo+1}
#megnézi, hogy aic, bic eltalálta-e az adatgeneráló folyamat rendjét
}
}
aic_grid[i,j] <- aicszamlalo/1000
bic_grid[i,j] <- bicszamlalo/1000
#az adott (zaj,mintaelemszám) specifikációra lefuttatott eredményt elmenti
}
}
output <- list(aic_grid,bic_grid)
#egyszerre jeleníti meg az aic és bic eredményeit
return(output)
}
######### A függvény tesztelése ###########
#VIGYÁZAT! Egy-egy futtatás nagyon sok idő!
#1. specifikáció: sima egyszerű AR
spec1<-result_table(c(0,0,1),c(3,3))
#2. specifikáció: egyszerű ARMA
spec2<-result_table(c(1,0,1),c(3,3))
#3. specifikáció: komplex ARIMA
spec3<-result_table(c(2,1,1),c(4,4))
#4. specifikáció: még komplexebb ARIMA
spec4<-result_table(c(4,2,3),c(5,5))
|
/EFRP_EconometricswithR_Paulovics-Plesz.R
|
no_license
|
pleszboldi/EFRP_EconometricswithR_Plesz
|
R
| false
| false
| 4,232
|
r
|
library(tseries)
library(forecast)
library(magrittr)
set.seed(0)
#seed beállítása, hogy ugyanazok a pszeudorandom számaink legyenek
#1. Tárolófüggvény
dgp_parameterek<-function(tslength,nofts,noise,p,d,q){
return(c(tslength,nofts,noise,p,d,q))
}
#eltárolja a szimuláció paramétereit
#2. Adatgeneráló függvény
ts_gen<-function(x){
tslength<-x[1]
nofts<-x[2]
noise<-x[3]
p<-x[4]
d<-x[5]
q<-x[6]
#Kiolvassa az input vektorból az egyes paramétereket
idosorok<-array(numeric(),c(tslength,nofts))
#létrehoz egy most még üres mátrixot az idősoroknak
#a mátrix tslength x nofts dimenziós
#vagyis minden oszlop egy idősor lesz
for (i in 1:nofts) {
idosorok[1:tslength,i]<-arima.sim(model=list(order=c(p,d,q),
ar=runif(p,min=-0.5,max=0.5),ma=runif(q,min=-0.5,max=0.5)),
n=tslength,sd=noise)}
#legenerálom a megfelelő hosszúságú és számú idősort
#a theták és phik a robusztusság miatt -0.5 és 0.5 közötti
#egyenletes eloszlású véletlen számok
return(idosorok)
}
#3. Illesztési paraméterek tárolása
fit_input<-function(maxp,maxq){
return(c(maxp,maxq))
}
#4. Modellillesztés
fitmodel<-function(x,ts){
maxp<-x[1]
maxq<-x[2]
akaike<-array(numeric(),c(maxp+1,maxq+1))
schwarz<-array(numeric(),c(maxp+1,maxq+1))
#definiálja az egyes modellekhez tartozó aic és bic értékek mátrixait
#+1 sor és oszlop, mert a p és q 0 is lehet
d <- ndiffs(ts,test="kpss")
#meghatározza d-t
for (i in 0:maxp) {
for (j in 0:maxq) {
fit<-arima(ts,order = c(i,d,j))
akaike[i+1,j+1]<-AIC(fit)
schwarz[i+1,j+1]<-BIC(fit)
}
}
#minden (p,q) paraméterpárra fitteli az arimat
#és kiszámolja erre az információs kritériumokat
aic_opt <- which(akaike == min(akaike), arr.ind = TRUE)-1
bic_opt <- which(schwarz == min(schwarz), arr.ind = TRUE)-1
#megnézi, hogy az akaike és scwarz mátrixoknak melyik eleme a legkisebb
#ennek az elemnek a sorát és oszlopát kimenti
#mivel p=0 és q=0-val kezdünk ezért -1
#ez lesz az aic illetve bic által optimálisnak talált (p,q) paraméterpár
optimalis <- list(aic_opt,bic_opt)
return(optimalis)
}
#########################################
#5. A szimulációs függvény
result_table<-function(pdq,max){
aic_grid<-array(numeric(),c(5,5))
bic_grid<-array(numeric(),c(5,5))
noise<-c(0.01,0.1,1,10,100)
tslength<-c(100,500,1000,5000,10000)
#definiálom a két gridet és paramétereit
for (i in 1:5) {
for (j in 1:5) {
parameterek <- c(tslength[i],1000,noise[j],pdq[1],pdq[2],pdq[3])
#Összeszedem a grid i,j elemének megfelelő paraméterek értékeit egy vektorba
ts <- ts_gen(parameterek)
#legenerálom az idősorokat
aicszamlalo <- 0
bicszamlalo <- 0
for (k in 1:1000){
ered <- try(fitmodel(max,ts[,k]),silent=TRUE)
#optimális modell keresése
#a try() functionre az arima függvény numerikus megoldásakor
#adódó esetleges hibája miatt van szükség
if(is.numeric(ered[[1]][1])==TRUE){
#azt vizsgálja meg, hogy nincs-e hibaüzenet az outputban
if(ered[[1]][1] == pdq[1] & ered[[1]][2] == pdq[3]){
aicszamlalo <- aicszamlalo+1}
if(ered[[2]][1] == pdq[1] & ered[[2]][2] == pdq[3]){
bicszamlalo <- bicszamlalo+1}
#megnézi, hogy aic, bic eltalálta-e az adatgeneráló folyamat rendjét
}
}
aic_grid[i,j] <- aicszamlalo/1000
bic_grid[i,j] <- bicszamlalo/1000
#az adott (zaj,mintaelemszám) specifikációra lefuttatott eredményt elmenti
}
}
output <- list(aic_grid,bic_grid)
#egyszerre jeleníti meg az aic és bic eredményeit
return(output)
}
######### A függvény tesztelése ###########
#VIGYÁZAT! Egy-egy futtatás nagyon sok idő!
#1. specifikáció: sima egyszerű AR
spec1<-result_table(c(0,0,1),c(3,3))
#2. specifikáció: egyszerű ARMA
spec2<-result_table(c(1,0,1),c(3,3))
#3. specifikáció: komplex ARIMA
spec3<-result_table(c(2,1,1),c(4,4))
#4. specifikáció: még komplexebb ARIMA
spec4<-result_table(c(4,2,3),c(5,5))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getGraphIdsInCorpus.R
\name{getGraphIdsInCorpus}
\alias{getGraphIdsInCorpus}
\title{Deprecated synonym for getTranscriptIdsInCorpus.}
\usage{
getGraphIdsInCorpus(labbcat.url, id)
}
\arguments{
\item{labbcat.url}{URL to the LaBB-CAT instance}
\item{id}{The ID (name) of the corpus}
}
\value{
A list of corpus IDs
}
\description{
Returns a list of corpora in the given 'LaBB-CAT' instance.
}
\examples{
\dontrun{
## List transcripts in the QB corpus
transcripts <- getGraphIdsInCorpus("https://labbcat.canterbury.ac.nz/demo/", "QB")
}
}
\seealso{
\code{\link{getGraphIdsInCorpus}}
}
\keyword{corpora}
\keyword{corpus}
|
/man/getGraphIdsInCorpus.Rd
|
no_license
|
cran/nzilbb.labbcat
|
R
| false
| true
| 696
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getGraphIdsInCorpus.R
\name{getGraphIdsInCorpus}
\alias{getGraphIdsInCorpus}
\title{Deprecated synonym for getTranscriptIdsInCorpus.}
\usage{
getGraphIdsInCorpus(labbcat.url, id)
}
\arguments{
\item{labbcat.url}{URL to the LaBB-CAT instance}
\item{id}{The ID (name) of the corpus}
}
\value{
A list of corpus IDs
}
\description{
Returns a list of corpora in the given 'LaBB-CAT' instance.
}
\examples{
\dontrun{
## List transcripts in the QB corpus
transcripts <- getGraphIdsInCorpus("https://labbcat.canterbury.ac.nz/demo/", "QB")
}
}
\seealso{
\code{\link{getGraphIdsInCorpus}}
}
\keyword{corpora}
\keyword{corpus}
|
# Setup dependencies ------------------------------------------
library(shiny)
source("../predictions.R")
highest_order_ngram = 5
ngram.rankings = 5
# Replace with for loop based on configured highest_order_ngram
if (!exists("ngram_1")) {
ngram_1 <<- readRDS("../files/ngram_1_3_005_3_5.rds")
}
if (!exists("ngram_2")) {
ngram_2 <<- readRDS("../files/ngram_2_3_005_3_5.rds")
}
if (!exists("ngram_3")) {
ngram_3 <<- readRDS("../files/ngram_3_3_005_3_5.rds")
}
if (!exists("ngram_4")) {
ngram_4 <<- readRDS("../files/ngram_4_3_005_3_5.rds")
}
if (!exists("ngram_5")) {
ngram_5 <<- readRDS("../files/ngram_5_3_005_3_5.rds")
}
# Reactive Code ----------------------------------------------------
shinyServer(function(input, output) {
# - How does it handle weird characters?,
# - i/I => all lowercased is a problem
# - Word cloud of most likely words (?) => need an estimated likelihood for that...
output$tableResult <- renderTable({
result <- make_prediction(input$inputString, highest_order_ngram, ngram.rankings)
result
})
})
|
/Ngram/server.R
|
no_license
|
FreddieK/Coursera-JHU-Capstone
|
R
| false
| false
| 1,076
|
r
|
# Setup dependencies ------------------------------------------
library(shiny)
source("../predictions.R")
highest_order_ngram = 5
ngram.rankings = 5
# Replace with for loop based on configured highest_order_ngram
if (!exists("ngram_1")) {
ngram_1 <<- readRDS("../files/ngram_1_3_005_3_5.rds")
}
if (!exists("ngram_2")) {
ngram_2 <<- readRDS("../files/ngram_2_3_005_3_5.rds")
}
if (!exists("ngram_3")) {
ngram_3 <<- readRDS("../files/ngram_3_3_005_3_5.rds")
}
if (!exists("ngram_4")) {
ngram_4 <<- readRDS("../files/ngram_4_3_005_3_5.rds")
}
if (!exists("ngram_5")) {
ngram_5 <<- readRDS("../files/ngram_5_3_005_3_5.rds")
}
# Reactive Code ----------------------------------------------------
shinyServer(function(input, output) {
# - How does it handle weird characters?,
# - i/I => all lowercased is a problem
# - Word cloud of most likely words (?) => need an estimated likelihood for that...
output$tableResult <- renderTable({
result <- make_prediction(input$inputString, highest_order_ngram, ngram.rankings)
result
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree_numbering.R
\name{RenumberTree}
\alias{RenumberTree}
\alias{RenumberEdges}
\alias{Reorder}
\alias{Cladewise}
\alias{Cladewise.phylo}
\alias{Cladewise.list}
\alias{Cladewise.multiPhylo}
\alias{Cladewise.matrix}
\alias{ApePostorder}
\alias{ApePostorder.phylo}
\alias{ApePostorder.list}
\alias{ApePostorder.multiPhylo}
\alias{Postorder}
\alias{Postorder.phylo}
\alias{Postorder.list}
\alias{Postorder.multiPhylo}
\alias{Postorder.numeric}
\alias{PostorderEdges}
\alias{Pruningwise}
\alias{Pruningwise.phylo}
\alias{Pruningwise.list}
\alias{Pruningwise.multiPhylo}
\alias{Preorder}
\alias{Preorder.phylo}
\alias{Preorder.numeric}
\alias{Preorder.multiPhylo}
\alias{Preorder.list}
\title{Reorder trees}
\usage{
RenumberTree(parent, child)
RenumberEdges(parent, child, ...)
Cladewise(tree, nTip, edge)
\method{Cladewise}{phylo}(tree, nTip = length(tree$tip.label), edge = tree$edge)
\method{Cladewise}{list}(tree, nTip, edge)
\method{Cladewise}{multiPhylo}(tree, nTip, edge)
\method{Cladewise}{matrix}(tree, nTip = min(tree[, 1]) - 1L, edge)
ApePostorder(tree, nTip, edge)
\method{ApePostorder}{phylo}(tree, nTip = length(tree$tip.label), edge = tree$edge)
\method{ApePostorder}{list}(tree, nTip, edge)
\method{ApePostorder}{multiPhylo}(tree, nTip, edge)
Postorder(tree, force = FALSE, renumber = FALSE)
\method{Postorder}{phylo}(tree, force = FALSE, renumber = FALSE)
\method{Postorder}{list}(tree, force = FALSE, renumber = FALSE)
\method{Postorder}{multiPhylo}(tree, force = FALSE, renumber = FALSE)
\method{Postorder}{numeric}(tree, force = FALSE, renumber = FALSE)
PostorderEdges(edge, renumber = FALSE)
Pruningwise(tree, nTip, edge)
\method{Pruningwise}{phylo}(tree, nTip = length(tree$tip.label), edge = tree$edge)
\method{Pruningwise}{list}(tree, nTip, edge)
\method{Pruningwise}{multiPhylo}(tree, nTip, edge)
Preorder(tree)
\method{Preorder}{phylo}(tree)
\method{Preorder}{numeric}(tree)
\method{Preorder}{multiPhylo}(tree)
\method{Preorder}{list}(tree)
}
\arguments{
\item{parent}{Integer vector corresponding to the first column of the edge
matrix of a tree of class \code{\link{phylo}}, i.e. \code{tree$edge[, 1]}}
\item{child}{Integer vector corresponding to the second column of the edge
matrix of a tree of class \code{\link{phylo}}, i.e. \code{tree$edge[, 2]}.}
\item{\dots}{Deprecated; included for compatibility with previous versions.}
\item{tree}{A tree of class \code{\link[ape:read.tree]{phylo}}.}
\item{nTip}{Integer specifying number of tips (leaves).}
\item{edge}{Two-column matrix listing the parent and child of each edge in a
tree, corresponding to \code{tree$edge}. Optional in \code{Cladewise()}.}
\item{force}{Logical specifying whether to rearrange trees already in
postorder, in order to ensure edges are ordered in the 'TreeTools' fashion.}
\item{renumber}{Logical specifying whether to renumber nodes such that they
increase in number away from the root.}
}
\value{
\code{RenumberTree()} returns an edge matrix for a tree of class \code{phylo}
following the preorder convention for edge and node numbering.
\code{RenumberEdges()} formats the output of \code{RenumberTree()} into a list
whose two entries correspond to the new parent and child vectors.
\code{ApePostorder()}, \code{Cladewise()}, \code{Postorder()}, \code{Preorder()} and
\code{Pruningwise()} each return a tree of class \code{phylo} with nodes following the
specified numbering scheme.
\code{Postorder.numeric} accepts a numeric matrix corresponding to the
\code{edge} entry of a tree of class \code{phylo}, and returns a two-column array
corresponding to \code{tree}, with edges listed in postorder
}
\description{
\code{Reorder()} is a wrapper for \code{ape:::.reorder_ape}.
Calling this C function directly is approximately twice as fast as using
\code{ape::\link[ape:reorder.phylo]{cladewise}} or
\code{ape::\link[ape:reorder.phylo]{postorder}}
}
\details{
\code{Cladewise()}, \code{ApePostorder()} and \code{Pruningwise()} are convenience
functions to the corresponding functions in 'ape'. Single nodes may
need to be collapsed using \link[ape:collapse.singles]{ape::collapse.singles} first. 'ape' functions
can cause crashes if nodes are numbered unconventionally -- sometimes
encountered after using tree rearrangement functions, e.g. \code{phangorn::SPR}.
\code{Preorder()} is more robust: it supports polytomies, nodes can be numbered
in any sequence, and edges can be listed in any order in the input tree.
Its output is guaranteed to be identical for any tree of an equivalent
topology, allowing unique trees to be detected by comparing sorted edge
matrices alone.
A tree in preorder is numbered starting from the root node.
Each node is numbered in the sequence in which it is encountered, and
each edge is listed in the sequence in which it is visited.
At each node, child edges are sorted from left to right in order of the
lowest-numbered leaf in the subtree subtended by each edge; i.e. an edge
that leads eventually to tip 1 will be to the left of an edge leading to a
subtree containing tip 2.
Numbering begins by following the leftmost edge of the root node,
and sorting its descendant subtree into preorder.
Then, the next edge at the root node is followed, and its descendants
sorted into preorder, until each edge has been visited.
\code{RenumberTree()} and \code{RenumberEdges()} are wrappers for the C function
\code{preorder_edges_and_nodes()}; they do not perform the same checks on input
as \code{Preorder()} and are intended for use where performance is at a premium.
\code{Postorder()} is modified from the 'ape' function to return a specific
order: edges are listed from the node that subtends the smallest
subtree to the one that subtends the largest (i.e. the root node), with
all of a node's descendant edges listed adjacently. If a tree is already
in postorder, it will not be rearranged unless \code{force = TRUE}.
Methods applied to numeric inputs do not check input for sanity, so should
be used with caution: malformed input may cause undefined results, including
crashing R.
Trees with >8191 leaves require additional memory and are not handled
at present. If you need to process such large trees, please contact the
maintainer for advice.
}
\section{Functions}{
\itemize{
\item \code{Cladewise}: Reorder tree cladewise.
\item \code{ApePostorder}: Reorder tree in Postorder using ape's \code{postorder}
function, which is robust to unconventional node numbering.
\item \code{Postorder}: Reorder tree in Postorder. Edge lengths are not retained.
\item \code{Pruningwise}: Reorder tree Pruningwise.
\item \code{Preorder}: Reorder tree in Preorder (special case of cladewise).
}}
\seealso{
Rotate each node into a consistent orientation with \code{\link[=SortTree]{SortTree()}}.
Other tree manipulation:
\code{\link{AddTip}()},
\code{\link{CollapseNode}()},
\code{\link{ConsensusWithout}()},
\code{\link{DropTip}()},
\code{\link{EnforceOutgroup}()},
\code{\link{LeafLabelInterchange}()},
\code{\link{MakeTreeBinary}()},
\code{\link{RenumberTips}()},
\code{\link{Renumber}()},
\code{\link{RootTree}()},
\code{\link{SingleTaxonTree}()},
\code{\link{SortTree}()},
\code{\link{Subtree}()}
Other C wrappers:
\code{\link{Neworder}}
Other C wrappers:
\code{\link{Neworder}}
}
\author{
\code{Preorder()} and \code{Postorder()}: Martin R. Smith.
\code{Cladewise()}, \code{ApePostorder()} and \code{Pruningwise()}: modified by Martin R.
Smith from \code{.reorder_ape()} in \pkg{ape} (Emmanuel Paradis).
}
\concept{C wrappers}
\concept{tree manipulation}
\keyword{internal}
|
/fuzzedpackages/TreeTools/man/Reorder.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| true
| 7,854
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree_numbering.R
\name{RenumberTree}
\alias{RenumberTree}
\alias{RenumberEdges}
\alias{Reorder}
\alias{Cladewise}
\alias{Cladewise.phylo}
\alias{Cladewise.list}
\alias{Cladewise.multiPhylo}
\alias{Cladewise.matrix}
\alias{ApePostorder}
\alias{ApePostorder.phylo}
\alias{ApePostorder.list}
\alias{ApePostorder.multiPhylo}
\alias{Postorder}
\alias{Postorder.phylo}
\alias{Postorder.list}
\alias{Postorder.multiPhylo}
\alias{Postorder.numeric}
\alias{PostorderEdges}
\alias{Pruningwise}
\alias{Pruningwise.phylo}
\alias{Pruningwise.list}
\alias{Pruningwise.multiPhylo}
\alias{Preorder}
\alias{Preorder.phylo}
\alias{Preorder.numeric}
\alias{Preorder.multiPhylo}
\alias{Preorder.list}
\title{Reorder trees}
\usage{
RenumberTree(parent, child)
RenumberEdges(parent, child, ...)
Cladewise(tree, nTip, edge)
\method{Cladewise}{phylo}(tree, nTip = length(tree$tip.label), edge = tree$edge)
\method{Cladewise}{list}(tree, nTip, edge)
\method{Cladewise}{multiPhylo}(tree, nTip, edge)
\method{Cladewise}{matrix}(tree, nTip = min(tree[, 1]) - 1L, edge)
ApePostorder(tree, nTip, edge)
\method{ApePostorder}{phylo}(tree, nTip = length(tree$tip.label), edge = tree$edge)
\method{ApePostorder}{list}(tree, nTip, edge)
\method{ApePostorder}{multiPhylo}(tree, nTip, edge)
Postorder(tree, force = FALSE, renumber = FALSE)
\method{Postorder}{phylo}(tree, force = FALSE, renumber = FALSE)
\method{Postorder}{list}(tree, force = FALSE, renumber = FALSE)
\method{Postorder}{multiPhylo}(tree, force = FALSE, renumber = FALSE)
\method{Postorder}{numeric}(tree, force = FALSE, renumber = FALSE)
PostorderEdges(edge, renumber = FALSE)
Pruningwise(tree, nTip, edge)
\method{Pruningwise}{phylo}(tree, nTip = length(tree$tip.label), edge = tree$edge)
\method{Pruningwise}{list}(tree, nTip, edge)
\method{Pruningwise}{multiPhylo}(tree, nTip, edge)
Preorder(tree)
\method{Preorder}{phylo}(tree)
\method{Preorder}{numeric}(tree)
\method{Preorder}{multiPhylo}(tree)
\method{Preorder}{list}(tree)
}
\arguments{
\item{parent}{Integer vector corresponding to the first column of the edge
matrix of a tree of class \code{\link{phylo}}, i.e. \code{tree$edge[, 1]}}
\item{child}{Integer vector corresponding to the second column of the edge
matrix of a tree of class \code{\link{phylo}}, i.e. \code{tree$edge[, 2]}.}
\item{\dots}{Deprecated; included for compatibility with previous versions.}
\item{tree}{A tree of class \code{\link[ape:read.tree]{phylo}}.}
\item{nTip}{Integer specifying number of tips (leaves).}
\item{edge}{Two-column matrix listing the parent and child of each edge in a
tree, corresponding to \code{tree$edge}. Optional in \code{Cladewise()}.}
\item{force}{Logical specifying whether to rearrange trees already in
postorder, in order to ensure edges are ordered in the 'TreeTools' fashion.}
\item{renumber}{Logical specifying whether to renumber nodes such that they
increase in number away from the root.}
}
\value{
\code{RenumberTree()} returns an edge matrix for a tree of class \code{phylo}
following the preorder convention for edge and node numbering.
\code{RenumberEdges()} formats the output of \code{RenumberTree()} into a list
whose two entries correspond to the new parent and child vectors.
\code{ApePostorder()}, \code{Cladewise()}, \code{Postorder()}, \code{Preorder()} and
\code{Pruningwise()} each return a tree of class \code{phylo} with nodes following the
specified numbering scheme.
\code{Postorder.numeric} accepts a numeric matrix corresponding to the
\code{edge} entry of a tree of class \code{phylo}, and returns a two-column array
corresponding to \code{tree}, with edges listed in postorder
}
\description{
\code{Reorder()} is a wrapper for \code{ape:::.reorder_ape}.
Calling this C function directly is approximately twice as fast as using
\code{ape::\link[ape:reorder.phylo]{cladewise}} or
\code{ape::\link[ape:reorder.phylo]{postorder}}
}
\details{
\code{Cladewise()}, \code{ApePostorder()} and \code{Pruningwise()} are convenience
functions to the corresponding functions in 'ape'. Single nodes may
need to be collapsed using \link[ape:collapse.singles]{ape::collapse.singles} first. 'ape' functions
can cause crashes if nodes are numbered unconventionally -- sometimes
encountered after using tree rearrangement functions, e.g. \code{phangorn::SPR}.
\code{Preorder()} is more robust: it supports polytomies, nodes can be numbered
in any sequence, and edges can be listed in any order in the input tree.
Its output is guaranteed to be identical for any tree of an equivalent
topology, allowing unique trees to be detected by comparing sorted edge
matrices alone.
A tree in preorder is numbered starting from the root node.
Each node is numbered in the sequence in which it is encountered, and
each edge is listed in the sequence in which it is visited.
At each node, child edges are sorted from left to right in order of the
lowest-numbered leaf in the subtree subtended by each edge; i.e. an edge
that leads eventually to tip 1 will be to the left of an edge leading to a
subtree containing tip 2.
Numbering begins by following the leftmost edge of the root node,
and sorting its descendant subtree into preorder.
Then, the next edge at the root node is followed, and its descendants
sorted into preorder, until each edge has been visited.
\code{RenumberTree()} and \code{RenumberEdges()} are wrappers for the C function
\code{preorder_edges_and_nodes()}; they do not perform the same checks on input
as \code{Preorder()} and are intended for use where performance is at a premium.
\code{Postorder()} is modified from the 'ape' function to return a specific
order: edges are listed from the node that subtends the smallest
subtree to the one that subtends the largest (i.e. the root node), with
all of a node's descendant edges listed adjacently. If a tree is already
in postorder, it will not be rearranged unless \code{force = TRUE}.
Methods applied to numeric inputs do not check input for sanity, so should
be used with caution: malformed input may cause undefined results, including
crashing R.
Trees with >8191 leaves require additional memory and are not handled
at present. If you need to process such large trees, please contact the
maintainer for advice.
}
\section{Functions}{
\itemize{
\item \code{Cladewise}: Reorder tree cladewise.
\item \code{ApePostorder}: Reorder tree in Postorder using ape's \code{postorder}
function, which is robust to unconventional node numbering.
\item \code{Postorder}: Reorder tree in Postorder. Edge lengths are not retained.
\item \code{Pruningwise}: Reorder tree Pruningwise.
\item \code{Preorder}: Reorder tree in Preorder (special case of cladewise).
}}
\seealso{
Rotate each node into a consistent orientation with \code{\link[=SortTree]{SortTree()}}.
Other tree manipulation:
\code{\link{AddTip}()},
\code{\link{CollapseNode}()},
\code{\link{ConsensusWithout}()},
\code{\link{DropTip}()},
\code{\link{EnforceOutgroup}()},
\code{\link{LeafLabelInterchange}()},
\code{\link{MakeTreeBinary}()},
\code{\link{RenumberTips}()},
\code{\link{Renumber}()},
\code{\link{RootTree}()},
\code{\link{SingleTaxonTree}()},
\code{\link{SortTree}()},
\code{\link{Subtree}()}
Other C wrappers:
\code{\link{Neworder}}
Other C wrappers:
\code{\link{Neworder}}
}
\author{
\code{Preorder()} and \code{Postorder()}: Martin R. Smith.
\code{Cladewise()}, \code{ApePostorder()} and \code{Pruningwise()}: modified by Martin R.
Smith from \code{.reorder_ape()} in \pkg{ape} (Emmanuel Paradis).
}
\concept{C wrappers}
\concept{tree manipulation}
\keyword{internal}
|
output$network_hello <- renderVisNetwork({
# minimal example
nodes <- data.frame(id = 1:3)
edges <- data.frame(from = c(1,2), to = c(1,3))
visNetwork(nodes, edges)
})
output$code_network_hello <- renderText({
'
# in server.R :
output$network_hello <- renderVisNetwork({
# minimal example
nodes <- data.frame(id = 1:3)
edges <- data.frame(from = c(1,2), to = c(1,3))
visNetwork(nodes, edges)
})
# in ui.R
visNetworkOutput("network_hello",height = "200px")
'
})
output$network_icon <- renderVisNetwork({
nodes <- data.frame(id = 1:3, group = c("B", "A", "B"))
edges <- data.frame(from = c(1,2), to = c(2,3))
visNetwork(nodes, edges) %>%
visGroups(groupname = "A", shape = "icon", icon = list(code = "f0c0", size = 75)) %>%
visGroups(groupname = "B", shape = "icon", icon = list(code = "f007", color = "red")) %>%
addFontAwesome() %>%
visLegend(addNodes = list(
list(label = "A", shape = "icon", icon = list(code = "f0c0", size = 25)),
list(label = "B", shape = "icon", icon = list(code = "f007", size = 50, color = "red"))
),
addEdges = data.frame(label = "link"), useGroups = FALSE)
})
output$code_network_icon <- renderText({
'
nodes <- data.frame(id = 1:3, group = c("B", "A", "B"))
edges <- data.frame(from = c(1,2), to = c(2,3))
visNetwork(nodes, edges) %>%
visGroups(groupname = "A", shape = "icon", icon = list(code = "f0c0", size = 75)) %>%
visGroups(groupname = "B", shape = "icon", icon = list(code = "f007", color = "red")) %>%
addFontAwesome() %>%
visLegend(addNodes = list(
list(label = "A", shape = "icon", icon = list(code = "f0c0", size = 25)),
list(label = "B", shape = "icon", icon = list(code = "f007", size = 50, color = "red"))
),
addEdges = data.frame(label = "link"), useGroups = FALSE)
'
})
|
/packrat/lib/x86_64-pc-linux-gnu/3.2.5/visNetwork/shiny/src/server/basic_server.R
|
permissive
|
harryprince/seamonster
|
R
| false
| false
| 1,913
|
r
|
output$network_hello <- renderVisNetwork({
# minimal example
nodes <- data.frame(id = 1:3)
edges <- data.frame(from = c(1,2), to = c(1,3))
visNetwork(nodes, edges)
})
output$code_network_hello <- renderText({
'
# in server.R :
output$network_hello <- renderVisNetwork({
# minimal example
nodes <- data.frame(id = 1:3)
edges <- data.frame(from = c(1,2), to = c(1,3))
visNetwork(nodes, edges)
})
# in ui.R
visNetworkOutput("network_hello",height = "200px")
'
})
output$network_icon <- renderVisNetwork({
nodes <- data.frame(id = 1:3, group = c("B", "A", "B"))
edges <- data.frame(from = c(1,2), to = c(2,3))
visNetwork(nodes, edges) %>%
visGroups(groupname = "A", shape = "icon", icon = list(code = "f0c0", size = 75)) %>%
visGroups(groupname = "B", shape = "icon", icon = list(code = "f007", color = "red")) %>%
addFontAwesome() %>%
visLegend(addNodes = list(
list(label = "A", shape = "icon", icon = list(code = "f0c0", size = 25)),
list(label = "B", shape = "icon", icon = list(code = "f007", size = 50, color = "red"))
),
addEdges = data.frame(label = "link"), useGroups = FALSE)
})
output$code_network_icon <- renderText({
'
nodes <- data.frame(id = 1:3, group = c("B", "A", "B"))
edges <- data.frame(from = c(1,2), to = c(2,3))
visNetwork(nodes, edges) %>%
visGroups(groupname = "A", shape = "icon", icon = list(code = "f0c0", size = 75)) %>%
visGroups(groupname = "B", shape = "icon", icon = list(code = "f007", color = "red")) %>%
addFontAwesome() %>%
visLegend(addNodes = list(
list(label = "A", shape = "icon", icon = list(code = "f0c0", size = 25)),
list(label = "B", shape = "icon", icon = list(code = "f007", size = 50, color = "red"))
),
addEdges = data.frame(label = "link"), useGroups = FALSE)
'
})
|
#Carrega os pacotes necessários
library(tidyverse)
library(lubridate)
library(jsonlite)
library(writexl)
#Cria um vetor para armazenar as séries desejadas
Cod_Serie <- c(22099, #PIB trimestral - Dados observados - Produto Interno Bruto a preços de mercado
22083, #PIB trimestral - Dados observados - Agropecuária (total) SCN 2010
22084, #PIB trimestral - Dados observados - Indústria (total)
22089 #PIB trimestral - Dados observados - Serviços (total)
)
#Cria um vetor para armazenar os nomes atribuiídos das variáveis (séries)
Nome_Serie <- c("PIB_tri", #PIB trimestral - Dados observados - Produto Interno Bruto a preços de mercado
"PIB_agro", #PIB trimestral - Dados observados - Produto Interno Bruto a preços de mercado
"PIB_ind", #PIB trimestral - Dados observados - Indústria (total)
"PIB_ser" #PIB trimestral - Dados observados - Serviços (total)
)
#Nomeia o objeto series como um vetor do tipo list
series <- vector(mode = "list")
#Laço que acessa o site do bcb, passando como parâmetro cada variável desejada, para então armazenar cada série temporal no objeto séries
for (i in 1:length(Cod_Serie)) {
url = paste0('https://api.bcb.gov.br/dados/serie/bcdata.sgs.',Cod_Serie,'/dados?formato=json')
series[[i]] = fromJSON(url[i])
#Nomeia as variaveis com os nomes do vetor Nome_Serie
names(series[[i]])[names(series[[i]]) =="valor"] = Nome_Serie[i]
}
#armazena os dados um data frame
t_df <- as.data.frame(series)
#armazena a coluna data em um data frame
t_df_data <- t_df %>% select(data)
#armazena somente as colunas das variaveis selecionadas no vetor Nome_Serie
t_df <- t_df %>% select(Nome_Serie)
#une os data frames adicionando as colunas das variaveis ao data frame com a coluna data
t_df_data <- t_df_data %>% add_column(t_df)
#armazena o data frame em um novo data frame para ser exportado
df <- t_df_data
#Exporta o data frame em formato csv
write.csv2(df, file="dados_bcb", row.names=F)
#Exporta o data frame em formato xlsx
pasta <- getwd()
local <- paste0(local,'/dados_bcb.xlsx')
write_xlsx(df,local)
|
/Get_data.R
|
no_license
|
FernandoAlvesSilveira/Get_BCB_data
|
R
| false
| false
| 2,165
|
r
|
#Carrega os pacotes necessários
library(tidyverse)
library(lubridate)
library(jsonlite)
library(writexl)
#Cria um vetor para armazenar as séries desejadas
Cod_Serie <- c(22099, #PIB trimestral - Dados observados - Produto Interno Bruto a preços de mercado
22083, #PIB trimestral - Dados observados - Agropecuária (total) SCN 2010
22084, #PIB trimestral - Dados observados - Indústria (total)
22089 #PIB trimestral - Dados observados - Serviços (total)
)
#Cria um vetor para armazenar os nomes atribuiídos das variáveis (séries)
Nome_Serie <- c("PIB_tri", #PIB trimestral - Dados observados - Produto Interno Bruto a preços de mercado
"PIB_agro", #PIB trimestral - Dados observados - Produto Interno Bruto a preços de mercado
"PIB_ind", #PIB trimestral - Dados observados - Indústria (total)
"PIB_ser" #PIB trimestral - Dados observados - Serviços (total)
)
#Nomeia o objeto series como um vetor do tipo list
series <- vector(mode = "list")
#Laço que acessa o site do bcb, passando como parâmetro cada variável desejada, para então armazenar cada série temporal no objeto séries
for (i in 1:length(Cod_Serie)) {
url = paste0('https://api.bcb.gov.br/dados/serie/bcdata.sgs.',Cod_Serie,'/dados?formato=json')
series[[i]] = fromJSON(url[i])
#Nomeia as variaveis com os nomes do vetor Nome_Serie
names(series[[i]])[names(series[[i]]) =="valor"] = Nome_Serie[i]
}
#armazena os dados um data frame
t_df <- as.data.frame(series)
#armazena a coluna data em um data frame
t_df_data <- t_df %>% select(data)
#armazena somente as colunas das variaveis selecionadas no vetor Nome_Serie
t_df <- t_df %>% select(Nome_Serie)
#une os data frames adicionando as colunas das variaveis ao data frame com a coluna data
t_df_data <- t_df_data %>% add_column(t_df)
#armazena o data frame em um novo data frame para ser exportado
df <- t_df_data
#Exporta o data frame em formato csv
write.csv2(df, file="dados_bcb", row.names=F)
#Exporta o data frame em formato xlsx
pasta <- getwd()
local <- paste0(local,'/dados_bcb.xlsx')
write_xlsx(df,local)
|
## library("graph")
set.seed(0x12a9b)
randBAMGraph <- function(numNodes = 10 , numEdges = 10)
{
df <- graph:::randFromTo(numNodes, numEdges)
df$ft$weight = seq_len(numNodes)
g <- graphBAM(df$ft, nodes = df$nodes, edgemode = "directed")
g
}
make_smallBAM <- function() {
from = c("a", "a", "a", "x", "x", "c")
to = c("b", "c", "x", "y", "c", "a")
weight=c(3.4, 2.6, 1.7, 5.3, 1.6, 7.9)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
g1
}
make_unDirectedBAM <- function() {
from = c("a", "a", "a", "x", "x", "c")
to = c("b", "c", "x", "y", "c", "d")
weight=c(3.4, 2.6, 1.7, 5.3, 1.6, 7.9)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "undirected")
g1
}
create_bigBAM <- function()
{
r1 <- randFromTo(100, 100)
r1$ft$weight <- seq_len(100)
g1 <- graphBAM(r1$ft, r1$nodes, edgemode="directed")
g1
}
test_create_graphBAMSmall <- function() {
from = c("a", "d", "d", "b")
to = c("b", "a", "d", "c")
weight= c(1.5, 3.1, 5.4, 1)
nodes = c("a","b","c","d")
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, nodes, edgemode = "directed")
g2 <- graphBAM(df, nodes, edgemode = "undirected")
checkEquals(4L, numEdges(g1))
checkEquals(isDirected(g1), TRUE)
checkEquals(isAdjacent(g1, c("a", "d", "b"), c("b", "d", "c") ), c(TRUE,TRUE,TRUE))
checkEquals(names(edges(g1)), c("a", "b", "c", "d"))
k <- edges(g1)
checkEquals(list(k$a, k$b, k$c, k$d), list("b", "c", character(0), c("a", "d")))
w <- edgeWeights(g1)
checkEquals(names(w), c("a", "b", "c", "d"))
checkEquals(list(w$a, w$b, w$c, w$d), list(structure(1.5, names="b"),
structure(1, names="c"), numeric(0), structure(c(3.1, 5.4),
names= c("a", "d"))))
checkEquals(4L, numNodes(g2))
checkEquals(4L, numEdges(g2))
checkEquals(isDirected(g2), FALSE)
checkEquals(isAdjacent(g1, c("a","d","b"), c("b","d","c") ), c(TRUE,TRUE,TRUE))
}
test_BAMNodes <- function() {
from = c("a", "a", "a", "x", "x", "c")
to = c("b", "c", "x", "y", "c", "a")
weight=c(3.4, 2.6, 1.7, 5.3, 1.6, 7.9)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
nds <- nodes(g1)
checkIdentical(all(nds %in% unique(c(from,to))),TRUE)
checkIdentical(isDirected(g1),TRUE)
}
checkBAMSubGraph <- function(g, subG) {
nds <- nodes(g)
subNodes <- nodes(subG)
w1 <- g@edgeSet@weights
ft1 <- .Call(graph:::graph_bitarray_rowColPos, g@edgeSet@bit_vector)
origFromTo <- data.frame(from=nds[ft1[,"from"]], to = nds[ft1[,"to"]], weights = w1)
w2 <- subG@edgeSet@weights
ft2 <- .Call(graph:::graph_bitarray_rowColPos, subG@edgeSet@bit_vector)
subFromTo <- data.frame(from = subNodes[ft2[,"from"]], to = subNodes[ft2[,"to"]], weights = w2)
indx <- (origFromTo$from %in% subNodes) &
(origFromTo$to %in% subNodes)
want <- origFromTo[(origFromTo$from %in% subNodes) & (origFromTo$to %in% subNodes),]
checkEquals(as.character(want$from), as.character(subFromTo$from))
checkIdentical(as.character(want$to), as.character(subFromTo$to))
checkEquals(g@edgeSet@weights[indx], subG@edgeSet@weights)
}
test_BAMSubGraph_Small <- function() {
g1 <- make_smallBAM()
sg <- subGraph(c("a","x", "y"), g1)
checkIdentical(isDirected(sg), TRUE)
checkIdentical(nodes(sg), c("a", "x", "y"))
checkBAMSubGraph(g1,sg)
}
test_BAMSubGraph_Large <- function() {
g1 <- randBAMGraph(100,100)
sn <- sample(nodes(g1), 55)
sg <- subGraph( sn, g1)
checkIdentical(isDirected(sg), TRUE)
checkBAMSubGraph(g1,sg)
}
test_BAM_edgeWeights <- function() {
g1 <- make_smallBAM()
ew1 <- edgeWeights(g1)
checkEquals(names(ew1), c("a", "b", "c", "x", "y"))
checkEquals(list(ew1$a, ew1$b, ew1$c, ew1$x, ew1$y),
list(structure( c(3.4, 2.6, 1.7), names = c("b","c","x")),
numeric(0), structure(c(7.9), names = "a"),
structure(c(1.6, 5.3), names= c("c", "y")), numeric(0)))
ew2 <- edgeWeights(g1,c("a","b")) ##index = char
checkEquals(names(ew2), c("a","b"))
checkEquals(list(ew2$a, ew2$b), list(structure( c(3.4, 2.6, 1.7),
names = c("b","c","x")), numeric(0)))
ew2 <- edgeWeights(g1, 1:2) ##index = numeric
checkEquals(names(ew2), c("a","b"))
checkEquals(list(ew2$a, ew2$b), list(structure( c(3.4, 2.6, 1.7),
names = c("b","c","x")), numeric(0)))
}
test_BAM_edgeWeights_undirected <- function()
{
from = c("a", "d", "d", "b", "a")
to = c("b", "a", "d", "c", "c")
weight = c(1.5, 2.1, 3.4, 4.1, 5.6)
df <- data.frame(from, to, weight)
gu <- graphBAM(df, nodes="e", edgemode = "undirected")
want <- list(a=c(b=1.5, c=5.6, d=2.1),
b=c(a=1.5, c=4.1),
c=c(a=5.6, b=4.1),
d=c(a=2.1, d=3.4),
e=numeric(0))
checkEquals(want, edgeWeights(gu))
checkEquals(want[c("c", "a")], edgeWeights(gu, c("c", "a")))
}
test_BAM_edges <- function() {
g1 <- make_smallBAM()
ew1 <- edges(g1)
checkEquals(names(ew1), c("a", "b", "c", "x", "y"))
checkEquals(list(ew1$a, ew1$b, ew1$c, ew1$x, ew1$y),
list( c("b","c","x"), character(0), "a", c("c", "y"), character(0)))
ew2 <- edges(g1, c("c", "b"))
checkEquals(names(ew2), c("c","b"))
checkEquals(list(ew2$c, ew2$b), list("a", character(0)))
}
test_BAM_adj <- function() {
g1 <- make_smallBAM()
ew <- adj(g1, c("c", "b"))
checkEquals(names(ew), c("c","b"))
checkEquals(list(ew$c, ew$b), list("a", character(0)))
}
test_BAM_edgeMatrix <- function() {
g1 <- make_smallBAM()
em <- edgeMatrix(g1)
checkEquals(em[1,], c(3, 1, 1, 4, 1, 4))
checkEquals(em[2,], c(1, 2, 3, 3, 4, 5))
}
test_BAM_removeEdge_unknown_nodes <- function()
{
g1 <- make_smallBAM()
checkException(removeEdge("a", "q", g1))
checkException(removeEdge("q", "a", g1))
checkException(removeEdge("a", c("q", "aa", "tt"), g1))
checkException(removeEdge(c("a", "q", "tt", "aa"),
c("a", "q", "aa", "tt"), g1))
}
test_BAM_removeEdge <- function()
{
g1 <- make_smallBAM()
## removing nothing does nothing
c0 <- character(0)
checkEquals(edges(g1), edges(removeEdge(c0, c0, g1)))
## there is no y => a edge, throw error
checkException(removeEdge("y", "a", g1))
g2 <- removeEdge("c", "a", g1)
checkEquals(list(c=character(0)), edges(g2, "c"))
em <- edgeMatrix(g2)
checkEquals(em[1,], c(1, 1, 4, 1, 4))
checkEquals(em[2,], c(2, 3, 3, 4, 5))
g3 <- removeEdge("a", c("b", "x"), g1)
checkEquals(list(a="c"), edges(g3, "a"))
checkEquals(edges(g1)[-1], edges(g3)[-1])
g4 <- removeEdge(c("a", "x"), "c", g1)
checkEquals(list(a=c("b", "x")), edges(g4, "a"))
checkEquals(list(x="y"), edges(g4, "x"))
}
test_BAMSmall_edgeData <- function(){
g1 <- make_smallBAM()
eg <- edgeData(g1)
tmp <- paste(c("c", "a", "a", "x", "a", "x"), c("a","b","c","c","x","y"),sep="|")
checkEquals(names(eg), tmp)
vals <- sapply( names(eg),function(k){
eg[[k]]$weight
})
checkEquals(names(vals), tmp)
checkEquals( as.numeric(vals),c(7.9, 3.4, 2.6, 1.6, 1.7, 5.3))
eg <- edgeData(g1, "a", attr="weight")
tmp <- paste( c("a", "a", "a"), c("b", "c", "x"), sep = "|")
checkEquals(names(eg), tmp)
vals <- sapply( names(eg),function(k){
eg[[k]]
})
checkEquals(names(vals), tmp)
checkEquals( as.numeric(vals), c(3.4, 2.6, 1.7))
checkException(eg <- edgeData(g1, "a", attr="weightsss"))
eg <- edgeData(g1, "a", "b", attr="weight")
tmp <- paste("a", "b", sep = "|")
checkEquals(names(eg), tmp)
vals <- sapply( names(eg),function(k){
eg[[k]]
})
checkEquals(names(vals), tmp)
checkEquals( as.numeric(vals),3.4)
}
test_BAM_extractFromToUndirected <- function() {
g1 <- make_unDirectedBAM()
ft <- extractFromTo(g1)
checkEquals(as.character(ft$from), c("a", "a", "c", "a", "c", "x"))
checkEquals(as.character(ft$to), c("b", "c", "d", "x", "x", "y"))
checkEquals(ft$weight, c(3.4, 2.6, 7.9, 1.7, 1.6, 5.3))
}
test_BAM_extractFromToDirected <- function() {
g1 <- make_smallBAM()
ft <- extractFromTo(g1)
checkEquals(as.character(ft$from), c("c", "a", "a", "x", "a", "x"))
checkEquals(as.character(ft$to), c("a", "b", "c", "c", "x", "y"))
checkEquals(ft$weight, c(7.9, 3.4, 2.6, 1.6, 1.7, 5.3))
}
test_BAM_bamToMatrix_UnDirected <- function() {
g1 <- make_unDirectedBAM()
mat <- as(g1, "matrix")
checkEquals(isSymmetric(mat), TRUE)
checkEquals(mat[upper.tri(mat)],
c(3.4, 2.6, 0.0, 0.0, 0.0, 7.9, 1.7, 0.0,
1.6, 0.0, 0.0, 0.0, 0.0, 0.0, 5.3))
checkEquals(rownames(mat),colnames(mat))
checkEquals(rownames(mat), c("a", "b", "c", "d", "x", "y"))
}
test_BAM_bamToMatrix_Directed <- function() {
g1 <- make_smallBAM()
mat <- as(g1, "matrix")
checkEquals(as.numeric(mat), c(0.0, 0.0, 7.9, 0.0,
0.0, 3.4, 0.0, 0.0, 0.0, 0.0, 2.6, 0.0,
0.0, 1.6, 0.0, 1.7, 0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0, 5.3, 0.0))
checkEquals(rownames(mat),colnames(mat))
checkEquals(rownames(mat), c("a","b", "c", "x","y"))
}
test_BAM_bamTographAM_unDirected <- function() {
g1 <- make_unDirectedBAM()
am <- as(g1,"graphAM")
checkEquals(nodes(g1), nodes(am))
checkEquals(edgemode(g1), edgemode(am))
checkEquals(edges(g1), edges(am))
w1 <- edgeWeights(g1)
w2 <- edgeWeights(am)
checkEquals(names(w1), names(w2))
checkEquals( w1$a, w2$a)
checkEquals( w1$b, w2$b)
checkEquals( sort(w1$c), sort(w2$c))
checkEquals( w1$d, w2$d)
checkEquals( sort(w1$x), sort(w2$x))
checkEquals( w1$y, w2$y)
}
test_BAM_bamTographAM_Directed <- function() {
g1 <- make_smallBAM()
am <- as(g1,"graphAM")
checkEquals(nodes(g1), nodes(am))
checkEquals(edgemode(g1), edgemode(am))
checkEquals(edges(g1), edges(am))
w1 <- edgeWeights(g1)
w2 <- edgeWeights(am)
checkEquals(names(w1), names(w2))
checkEquals( w1$a, w2$a)
checkEquals( w1$b, w2$b)
checkEquals( sort(w1$c), sort(w2$c))
checkEquals( w1$d, w2$d)
checkEquals( sort(w1$x), sort(w2$x))
checkEquals( w1$y, w2$y)
}
test_BAM_bamTographNEL_UnDirected <- function() {
g1 <- make_unDirectedBAM()
nel <- as(g1,"graphNEL")
checkEquals(nodes(g1), nodes(nel))
checkEquals(edgemode(g1), edgemode(nel))
checkEquals(edges(g1), edges(nel))
w1 <- edgeWeights(g1)
w2 <- edgeWeights(nel)
checkEquals(names(w1), names(w2))
checkEquals( w1$a, w2$a)
checkEquals( w1$b, w2$b)
checkEquals( sort(w1$c), sort(w2$c))
checkEquals( w1$d, w2$d)
checkEquals( sort(w1$x), sort(w2$x))
checkEquals( w1$y, w2$y)
}
test_BAM_bamTographNEL_Directed <- function() {
g1 <- make_smallBAM()
nel <- as(g1,"graphNEL")
checkEquals(nodes(g1), nodes(nel))
checkEquals(edgemode(g1), edgemode(nel))
checkEquals(edges(g1), edges(nel))
w1 <- edgeWeights(g1)
w2 <- edgeWeights(nel)
checkEquals(names(w1), names(w2))
checkEquals( w1$a, w2$a)
checkEquals( w1$b, w2$b)
checkEquals( sort(w1$c), sort(w2$c))
checkEquals( w1$d, w2$d)
checkEquals( sort(w1$x), sort(w2$x))
checkEquals( w1$y, w2$y)
}
create_GraphNEL_Directed <- function() {
set.seed(123)
V <- letters[1:4]
edL <- vector("list", length=4)
names(edL) <- V
edL[["a"]] <- list(edges=c(3, 4), weights=c(.13, .14))
edL[["b"]] <- list(edges=c(3), weights=.23)
edL[["c"]] <- list(edges=numeric(0), weights=numeric(0))
edL[["d"]] <- list(edges=c(2, 3), weights=c(.42, .43))
gR <- new("graphNEL", nodes = V, edgeL = edL, edgemode = "directed" )
gR
}
create_GraphNEL_UnDirected <- function() {
set.seed(123)
V <- letters[1:4]
edL <- vector("list", length=4)
names(edL) <- V
edL[["a"]] <- list(edges=c(2, 3), weights=c(.13, .14))
edL[["b"]] <- list(edges=c(1), weights=.13)
edL[["c"]] <- list(edges=c(1), weights=0.14)
edL[["d"]] <- list(edges= numeric(0), weights=numeric(0))
gR <- new("graphNEL", nodes = V, edgeL = edL, edgemode = "undirected" )
gR
}
test_graphNEL_Directed_To_graphBAM <-function() {
nel <- create_GraphNEL_Directed()
bam <- as(nel, "graphBAM")
checkEquals(nodes(nel), nodes(bam))
checkEquals(edgemode(nel), edgemode(bam))
checkEquals(edges(nel), edges(bam))
w1 <- edgeWeights(nel)
w2 <- edgeWeights(bam)
checkEquals(w1,w2)
}
test_graphNEL_Directed_To_graphBAM <- function() {
nel <- create_GraphNEL_Directed()
bam <- as(nel, "graphBAM")
checkEquals(nodes(nel), nodes(bam))
checkEquals(edgemode(nel), edgemode(bam))
checkEquals(edges(nel), edges(bam))
w1 <- edgeWeights(nel)
w2 <- edgeWeights(bam)
checkEquals(w1,w2)
}
test_graphNEL_UnDirected_To_graphBAM <- function() {
nel <- create_GraphNEL_UnDirected()
bam <- as(nel, "graphBAM")
checkEquals(nodes(nel), nodes(bam))
checkEquals(edgemode(nel), edgemode(bam))
checkEquals(edges(nel), edges(bam))
w1 <- edgeWeights(nel)
w2 <- edgeWeights(bam)
checkEquals(w1,w2)
}
test_graphAM_Directed_To_graphBAM <- function() {
nel <- create_GraphNEL_Directed()
am <- as(nel, "graphAM")
bam <- as(am, "graphBAM")
checkEquals(nodes(am), nodes(bam))
checkEquals(edgemode(am), edgemode(bam))
checkEquals(edges(am), edges(bam))
w1 <- edgeWeights(am)
w2 <- edgeWeights(bam)
checkEquals(w1,w2)
}
test_graphAM_UnDirected_To_graphBAM<- function() {
nel <- create_GraphNEL_UnDirected()
am <- as(nel, "graphAM")
bam <- as(am, "graphBAM")
checkEquals(nodes(am), nodes(bam))
checkEquals(edgemode(am), edgemode(bam))
checkEquals(edges(am), edges(bam))
w1 <- edgeWeights(am)
w2 <- edgeWeights(bam)
checkEquals(w1, w2)
}
test_BAM_set_edge_weights <- function()
{
getw <- function(x) unlist(edgeWeights(x))
g <- make_smallBAM()
weight0 <- unlist(edgeWeights(g))
edgeData(g, "c", "a", attr="weight") <- 123.0
want <- weight0
want["c.a"] <- 123.0
checkEquals(want, getw(g))
g <- make_smallBAM()
edgeData(g, "a", c("b", "c", "x"), attr="weight") <- c(10, 11, 12)
want <- weight0
want[c("a.b", "a.c", "a.x")] <- c(10, 11, 12)
checkEquals(want, getw(g))
}
test_BAM_Intersect_UnDirected <- function() {
## nodes a b c d x y
from = c("a", "b", "d", "d")
to = c("b", "c", "x", "y")
weight=c(1.2, 2.4, 3.2, 5.4)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "undirected")
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(3.2, 1.2, 2.1, 3.2, 3.5)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "undirected")
g <- graphIntersect(g1,g2)
checkEquals(intersect(nodes(g1), nodes(g2)), nodes(g))
checkEquals(FALSE, isDirected(g))
eg <- edgeData(g)
vals <- sapply( names(eg),function(k){
eg[[k]]$weight
})
tmp <- paste(c("a", "b", "d", "b", "c", "x"), c("b", "c", "x", "a", "b", "d"), sep= "|")
checkEquals(tmp, names(vals))
checkEquals(as.numeric(rep(NA, 6)), as.numeric(vals))
}
test_BAM_Intersect_Directed <- function() {
## nodes a b c d x y
from = c("a", "b", "d", "d")
to = c("b", "c", "x", "y")
weight=c(1.2, 2.4, 3.2, 5.4)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(1.2, 1.2, 2.1, 3.2, 3.5)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "directed")
g <- graphIntersect(g1,g2)
checkEquals(intersect(nodes(g1), nodes(g2)), nodes(g))
checkEquals(TRUE, isDirected(g))
eg <- edgeData(g)
vals <- sapply( names(eg),function(k){
eg[[k]]$weight
})
tmp <- paste(c("a", "b", "d"), c("b", "c", "x"), sep= "|")
checkEquals(tmp, names(vals))
checkEquals(c(1.2, NA, NA), as.numeric(vals))
}
test_BAM_Intersect_UnDirected2 <- function() {
## nodes a b d x y
from = c("a", "d", "d")
to = c("b", "x", "y")
weight=c(1.2, 3.2, 5.4)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "undirected")
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(3.2, 1.2, 2.1, 5.2, 3.2)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "undirected")
g <- graphIntersect(g1,g2)
checkEquals(intersect(nodes(g1), nodes(g2)), nodes(g))
checkEquals(FALSE, isDirected(g))
eg <- edgeData(g)
vals <- sapply( names(eg),function(k){
eg[[k]]$weight
})
tmp <- paste(c("a", "d", "b", "x"), c("b", "x", "a", "d"), sep= "|")
checkEquals(tmp, names(vals))
checkEquals(rep(c(NA,3.2),2), as.numeric(vals))
}
test_BAM_Intersect_EmptyEdges <- function() {
from = c("a", "d", "d")
to = c("b", "x", "y")
weight=c(1.2, 3.2, 5.4)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
from = c("h", "i", "j")
to = c("b", "x", "y")
weight=c(1.2, 3.2, 5.4)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, edgemode = "directed")
g <- graphIntersect(g1,g2)
checkEquals(nodes(g), intersect(nodes(g1), nodes(g2)))
checkEquals(isDirected(g), TRUE)
eg <- edgeWeights(g)
checkEquals(c("b", "x", "y"), names(eg))
checkEquals(list(numeric(0), numeric(0), numeric(0)),list(eg$b, eg$x, eg$y))
}
test_BAM_Intersect_EmptyNodes <- function() {
from = c("a", "d", "d")
to = c("b", "x", "y")
weight=c(1.2, 3.2, 5.4)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "unirected")
from = c("h", "i", "j")
to = c("s", "h", "l")
weight=c(1.2, 3.2, 5.4)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, edgemode = "undirected")
g <- graphIntersect(g1,g2)
checkEquals(intersect(nodes(g1), nodes(g2)), nodes(g))
checkEquals(FALSE, isDirected(g))
eg <- edgeWeights(g)
checkEquals(list(), eg)
}
test_BAM_isAdjacent <- function()
{
from = c("a", "d", "d", "b", "a")
to = c("b", "a", "d", "c", "c")
weight= c(1.5, 2.1, 3.4, 4.1, 5.6)
df <- data.frame(from, to, weight)
gd <- graphBAM(df, nodes="e", edgemode = "directed")
## single edges
for (i in seq_len(nrow(df))) {
checkEquals(TRUE, isAdjacent(gd, from[i], to[i]))
}
## vectorized
checkEquals(c(FALSE, TRUE, TRUE, FALSE, FALSE),
isAdjacent(gd, "a", letters[1:5]))
checkEquals(c(FALSE, FALSE, FALSE, TRUE, FALSE),
isAdjacent(gd, letters[1:5], "a"))
}
test_BAM_Union_UnDirected <- function() {
## nodes a b c d x y
from = c("a", "b", "d", "d")
to = c("b", "c", "x", "y")
weight=c(1.2, 2.4, 3.5, 5.4)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "undirected")
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(3.2, 1.2, 2.1, 3.2, 3.5)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "undirected")
g <- graphUnion(g1,g2)
checkEquals(union(nodes(g1), nodes(g2)), nodes(g))
checkEquals(FALSE, isDirected(g))
df <- extractFromTo(g)
tmp <- data.frame(from = c("a", "b", "b", "c", "d", "d"),
to = c("b", "c", "d", "d", "x", "y"),
weight = c( NA, NA, 2.1, 3.2, 3.5, 5.4))
checkEquals(tmp, df)
}
test_BAM_Union_Directed <- function() {
## nodes a b c d x y
from = c("a", "b", "d", "d")
to = c("b", "c", "x", "y")
weight=c(1.2, 2.4, 3.5, 5.4)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(1.2, 1.2, 2.1, 3.2, 3.5)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "directed")
g <- graphUnion(g1,g2)
checkEquals(union(nodes(g1), nodes(g2)), nodes(g))
checkEquals(TRUE, isDirected(g))
df <- extractFromTo(g)
tmp <- data.frame(from = c("a", "b", "d", "b", "d", "d"),
to = c("b", "c", "c", "d", "x", "y"),
weight = c( 1.2, NA, 3.2, 2.1, 3.5, 5.4))
checkEquals(tmp, df)
}
test_BAM_Union_Mixed <- function() {
## nodes a b d x y
from = c("a", "d", "d")
to = c("b", "x", "y")
weight=c(1.2, 3.2, 5.4)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "undirected")
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(3.2, 1.2, 2.1, 3.2, 3.5)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "directed")
checkException(g <- graphUnion(g1,g2))
}
test_BAM_inEdges <- function()
{
from = c("a", "d", "d", "b", "a")
to = c("b", "a", "d", "c", "c")
weight = c(1.5, 2.1, 3.4, 4.1, 5.6)
df <- data.frame(from, to, weight)
## directed
gd <- graphBAM(df, nodes="e", edgemode = "directed")
want <- list(a="d",
b="a",
c=c("a", "b"),
d="d",
e=character(0))
checkEquals(want, inEdges(nodes(gd), gd))
## undirected
gu <- graphBAM(df, nodes="e", edgemode = "undirected")
checkEquals(edges(gu), inEdges(nodes(gu), gu))
}
test_BAM_directed_attrs <- function() {
from = c("a", "a", "a", "x", "x", "c")
to = c("b", "c", "x", "y", "c", "a")
weight = c(2, 1, 3, 4, 5, 6)
df <- data.frame(from, to, weight)
bam <- graphBAM(df, edgemode = "directed")
checkException(edgeData(bam,from="a", attr="code"))
edgeDataDefaults(bam, attr ="weight") <- 1
edgeDataDefaults(bam, attr = "code") <- "plain"
res <- unlist(edgeData(bam,from="a", attr="code"))
nmres <- paste(c("a","a","a"), c ("b", "c", "x"), sep="|")
checkEquals(names(res), nmres)
checkEquals(as.character(res), c("plain", "plain", "plain"))
edgeData(bam,from = "a", to = "x", attr= "code") <- "red"
res <- unlist(edgeData(bam, from = "a", attr = "code"))
checkEquals(names(res), nmres)
checkEquals(as.character(res), c("plain", "plain", "red"))
edgeData(bam,to = "c", attr= "code") <- "yellow"
res <- unlist(edgeData(bam, to= "c", attr = "code"))
nmres <- paste(c("a", "x"), c("c", "c"), sep = "|")
checkEquals(names(res), nmres)
checkEquals(as.character(res), c("yellow", "yellow"))
}
test_BAM_undirected_attrs <- function() {
from = c("a", "a", "a", "x", "x")
to = c("b", "c", "x", "y", "c")
weight = c(2, 1, 3, 4, 5)
df <- data.frame(from, to, weight)
bam <- graphBAM(df, edgemode = "undirected")
checkException(edgeData(bam,from="a", attr="code"))
edgeDataDefaults(bam, attr = "weight") <- 1
edgeDataDefaults(bam, attr = "code") <- "plain"
res <- unlist(edgeData(bam,from="a", attr="code"))
nmres <- paste(c("a","a","a"), c ("b", "c", "x"), sep="|")
checkEquals(names(res), nmres)
checkEquals(as.character(res), c("plain", "plain", "plain"))
edgeData(bam,from = "a", to = "x", attr= "code") <- "red"
res <- unlist(edgeData(bam, from = "a", attr = "code"))
checkEquals(names(res), nmres)
checkEquals(as.character(res), c("plain", "plain", "red"))
edgeData(bam,to = "c", attr= "code") <- "yellow"
res <- unlist(edgeData(bam, to= "c", attr = "code"))
nmres <- paste(c("a", "x"), c("c", "c"), sep = "|")
checkEquals(names(res), nmres)
checkEquals(as.character(res), c("yellow", "yellow"))
}
test_graphBAM_detailed_Attribute_Intersection <- function() {
## nodes a b c d x y
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(1.2, 2.4, 5.4, 3.2)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
edgeData(g1, from = from, to = to ,attr = "weight") <- c(1.2, 2.4, 5.4, 3.2)
edgeDataDefaults(g1, attr = "color") <- "unknown"
edgeDataDefaults(g1, attr ="type") <- "unknown"
edgeData(g1, from = from, to = to ,attr = "color") <- c("red", "blue", NA, "green")
edgeData(g1, from = from, to = to , attr = "type") <- c("high", "low", "high", NA)
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(1.2, 4.2, 5.6, 2.1, 3.2)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "directed")
edgeDataDefaults(g2, attr = "color") <- "unknown"
edgeData(g2, from = from, to = to, attr = "color") <- c("red", "blue", NA, "red",
"yellow")
g <- graphIntersect(g1, g2)
df <- extractFromTo(g)
tmp <- data.frame( from = c("a", "b", "d"),
to = c("b", "c", "x"),
weight = c(1.2, NA, 3.2))
checkEquals(tmp, df)
attColor <- edgeData(g, attr = "color")
nms <- paste(c("a", "b", "d"), c("b", "c", "x"), sep = "|")
target <- structure( c("red", "blue", NA), names = nms)
checkEquals(target, unlist(attColor))
checkException(edgeData(g, attr = "type"))
weightFun <- function(x, y) {
return(x +y )
}
colorFun <- function(x,y) {
if(x=="red" && y == "red")
return("white")
else
return("black")
}
setClass("myType", representation = representation(typ ="character"))
myType <- function(typ){ new("myType", typ = typ)}
typeFun <- function(x,y) {
if(is(x, "myType") && is(y, "myType")){
if(x@typ =="low" || y@typ == "med")
return("low")
else
return("high")
}
else {return (NA)}
}
nodeDataDefaults(g1, attr ="color") <- "unknown"
nodeDataDefaults(g1, attr ="type") <- "unknown"
nodeDataDefaults(g2, attr ="color") <- "unknown"
nodeDataDefaults(g2, attr ="type") <- "unknown"
nodeData(g1,n = c("a", "b", "c"), attr ="color") <- c("red", "green", "blue")
nodeData(g1,n = c("b", "c"), attr ="type") <- c(myType("low"), myType("high"))
nodeData(g2,n = c("a", "b", "c"), attr ="color") <- c("red", "green", "red")
nodeData(g2,n = c("b", "c"), attr ="type") <- c(myType("med"), myType("low"))
g <- graphIntersect(g1, g2, nodeFun = list(type = typeFun),
edgeFun = list(weight = weightFun, color = colorFun))
attWeight <- edgeData(g, attr = "weight")
nms <- paste(c("a", "b", "d"), c("b", "c", "x"), sep = "|")
target <- structure( c( 2.4, 6.6, 6.4), names = nms)
checkEquals(target, unlist(attWeight))
attColor <- edgeData(g, attr = "color")
nms <- paste(c("a", "b", "d"), c("b", "c", "x"), sep = "|")
target <- structure( c( 2.4, 6.6, 6.4), names = nms)
checkEquals(target, unlist(attWeight))
nodeColor <- nodeData(g, attr = "color")
target <- as.list(structure(c("red", "green", NA, "unknown", "unknown",
"unknown"), names = c("a", "b", "c", "d", "x", "y")))
checkEquals(target, nodeColor)
nodeType <- nodeData(g, attr = "type")
target <- as.list(structure(c("unknown", "low", "high", "unknown",
"unknown", "unknown"), names = c("a", "b", "c", "d", "x", "y")))
checkEquals(target, nodeType)
}
test_graphBAM_detailed_Attribute_Union <- function() {
## nodes a b c d x y
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(1.2, 2.4, 5.4, 3.2)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
edgeData(g1, from = from, to = to ,attr = "weight") <- c(1.2, 2.4, 5.4, 3.2)
edgeDataDefaults(g1, attr = "color") <- "cyan"
edgeDataDefaults(g1, attr = "type") <- "unknown"
edgeData(g1, from = from, to = to ,attr = "color") <- c("red", "blue", NA, "green")
edgeData(g1, from = from, to = to , attr = "type") <- c("high", "low", "high", NA)
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(1.2, 4.2, 5.6, 2.1, 3.2)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "directed")
edgeDataDefaults(g2, attr = "color") <- "cyan"
edgeData(g2, from = from, to = to, attr = "color") <- c("red", "blue", NA, "red",
"yellow")
g <- graphUnion(g1, g2)
df <- extractFromTo(g)
tmp <- data.frame( from = c("a", "b", "d", "b", "d", "d"),
to = c("b", "c", "c", "d", "x", "y"),
weight = c(1.2, NA, 2.1, 5.6, 3.2, 5.4))
checkEquals(tmp, df)
attColor <- edgeData(g, attr = "color")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c("red", "blue", "red", NA, NA, NA), names = nms)
checkEquals(target, unlist(attColor))
attType <- edgeData(g, attr = "type")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c("high", "low", NA, NA, NA, "high"), names = nms)
checkEquals(target, unlist(attType))
weightFun <- function(x, y) {
return(x + y )
}
colorFun <- function(x,y) {
if(x=="red" || y == "red")
return("white")
else
return("black")
}
setClass("myType", representation = representation(typ ="character"))
myType <- function(typ){ new("myType", typ = typ)}
typeFun <- function(x,y) {
if(is(x, "myType") && is(y, "myType")){
if(x@typ =="low" || y@typ == "med")
return("low")
else
return("high")
}
else {return (NA)}
}
nodeDataDefaults(g1, attr ="color") <- "cyan"
nodeDataDefaults(g1, attr="type") <- "unknown"
nodeData(g1,n = c("a", "b", "c"), attr ="color") <- c("red", "green", "blue")
nodeData(g1,n = c("b", "c"), attr ="type") <- c(myType("low"), myType("high"))
nodeDataDefaults(g2, attr ="color") <- "cyan"
nodeDataDefaults(g2, attr="type") <- "unknown"
nodeDataDefaults(g2, attr="test") <- "missing"
nodeData(g2,n = c("a", "b", "c", "z"), attr ="color") <- c("red", "green", "red","pink")
nodeData(g2,n = c("b", "c"), attr ="type") <- c(myType("med"), myType("low"))
nodeData(g2,n = c("a", "b", "c"), attr = "test") <- c("pass", "fail", "pass")
g <- graphUnion(g1, g2, edgeFun = list(weight = weightFun, color = colorFun))
attWeight <- edgeData(g, attr = "weight")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c( 2.4, 6.6, 2.1, 5.6, 6.4, 5.4), names = nms)
checkEquals(target, unlist(attWeight))
attColor <- edgeData(g, attr = "color")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure(c( "white", "black", "red", NA, "black", NA), names = nms)
checkEquals( target, unlist(attColor))
attType <- edgeData(g, attr = "type")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c("high", "low", NA, NA, NA, "high"), names = nms)
checkEquals(target, unlist(attType))
attType <- edgeData(g, attr = "type")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c("high", "low", NA, NA, NA, "high"), names = nms)
checkEquals(target, unlist(attType))
}
test_graphBAM_removeEdgesByWeight <- function() {
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(2.2, 2.0, 0.4, 0.2)
df <- data.frame(from, to, weight)
g <- graphBAM(df, edgemode = "directed")
edgeDataDefaults(g, attr="color") <- "pink"
edgeData(g, from = from, to = to ,attr = "color") <- c("red", "blue", NA, "green")
res <- removeEdgesByWeight(g, lessThan = 2.0)
checkEquals(attr(res@edgeSet@bit_vector, "nbitset"), 2)
checkEquals(res@edgeSet@weights, c(2.2, 2.0))
current <- unlist( edgeData(res, attr = "color"))
target <- structure(c("red", "blue"),
names = paste(c("a", "b"), c("b", "c"), sep = "|"))
checkEquals(target, current)
res <- removeEdgesByWeight(g, greaterThan = 1.9)
checkEquals(attr(res@edgeSet@bit_vector, "nbitset"), 2)
checkEquals(res@edgeSet@weights, c(0.2, 0.4))
current <- unlist( edgeData(res, attr = "color"))
target <- structure(c("green", NA),
names = paste(c("d", "d"), c("x", "y"), sep = "|"))
checkEquals(target, current)
res <- removeEdgesByWeight(g, lessThan =1.0, greaterThan = 2)
checkEquals(res@edgeSet@weights, c(2.0))
current <- unlist( edgeData(res, attr = "color"))
target <- structure(c("blue"),
names = paste( "b", "c", sep = "|"))
checkEquals(target, current)
res <- removeEdgesByWeight(g, greaterThan = 0.1)
checkEquals(res@edgeSet@weights, numeric(0))
checkEquals(res@edgeSet@edge_attrs$color, character(0))
}
test_graphBAM_nodeAttributes <- function(){
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(2.2, 2.0, 0.4, 0.2)
df <- data.frame(from, to, weight)
g <- graphBAM(df, edgemode = "directed")
nodeDataDefaults(g, attr ="color") <- "blue"
nodeData(g, n = c("d","a"), attr = "color") <- c("red", "green")
current <- nodeData(g, attr = "color")
target <- as.list(structure( c("green", "blue", "blue", "red", "blue", "blue"),
names = c("a", "b", "c", "d", "x", "y")))
checkEquals(target, current)
nodeDataDefaults(g, attr="mat") <- NA
nodeData(g, n= c("x", "y"), attr = "mat") <- df
current <- nodeData(g, n= c("x", "y"), attr = "mat")
target <- list(x = df, y = df)
checkEquals(target, current)
sg <- subGraph(c("d","b"), g)
current <- nodeData(sg, attr = "color")
target <- as.list(structure(c("blue", "red"), names = c("b", "d")))
checkEquals(target, current)
}
test_BAM_directed_attrs_s4 <- function() {
from = c("a", "a", "a", "x", "x", "c")
to = c("b", "c", "x", "y", "c", "a")
weight = c(2, 1, 3, 4, 5, 6)
df <- data.frame(from, to, weight)
bam <- graphBAM(df, edgemode = "directed")
edgeDataDefaults(bam, attr = "weight") <- 1.3
edgeDataDefaults (bam, attr = "vals") <- df
edgeData(bam, from = "a", attr= "vals") <- "unknown"
res <- edgeData(bam, attr="vals")
nmres <- c("c|a", "a|b", "a|c", "x|c", "a|x", "x|y")
target <- structure(list(df, "unknown", "unknown", df, "unknown",df), names = nmres)
checkEquals(res, target)
edgeDataDefaults(bam, attr = "mat") <- NA
edgeData(bam,from = "a", to = "x", attr= "mat") <- matrix(1)
res <- edgeData(bam, from = "a", attr = "mat")
nmres <- paste(c("a", "a", "a"), c("b", "c", "x"), sep = "|")
target <- structure( list(NA, NA, matrix(1)), names = nmres)
checkEquals(res, target)
edgeDataDefaults(bam, attr = "mk") <- NA
edgeData(bam,to = "c", attr= "mk") <- matrix(1)
res <- edgeData(bam, attr = "mk")
nmres <- paste(c("c", "a", "a", "x", "a", "x"), c("a", "b", "c", "c", "x", "y"), sep ="|")
target <- structure( list(NA, NA, matrix(1), matrix(1), NA ,NA), names = nmres)
checkEquals(res, target)
}
test_BAM_undirected_attrs_s4 <- function() {
from = c("a", "a", "a", "x")
to = c("b", "c", "x", "y")
weight = c(2, 1, 3, 4)
df <- data.frame(from, to, weight)
bam <- graphBAM(df, edgemode = "undirected")
edgeDataDefaults(bam, attr = "weight") <- 1.3
edgeDataDefaults(bam, attr = "vals") <- df
# edgeData(bam, attr = "weight") <- 1.3
# edgeData(bam, attr = "vals") <- df
edgeData(bam, from = "x", attr = "vals") <- "unknown"
res <- edgeData(bam, attr="vals")
nmres <- c("a|b", "a|c", "a|x", "x|y", "b|a", "c|a", "x|a", "y|x")
target <- structure(list(df, df, "unknown", "unknown", df, df, "unknown",
"unknown"), names = nmres)
checkEquals(res, target)
edgeDataDefaults(bam, attr ="mat") <- NA
edgeData(bam,from = "a", to = "x", attr= "mat") <- matrix(1)
res <- edgeData(bam, attr = "mat")
target <- structure(list(NA, NA, matrix(1), NA, NA, NA, matrix(1), NA),
names = nmres)
checkEquals(res, target)
edgeDataDefaults(bam, attr = "mk") <- NA
edgeData(bam,to = "c", attr= "mk") <- matrix(1)
res <- edgeData(bam, attr = "mk")
target <- structure( list(NA, matrix(1), NA, NA, NA, matrix(1), NA ,NA),
names = nmres)
checkEquals(res, target)
}
test_graphBAM_S4_Attribute_Intersection <- function() {
setClass("myColor", representation = representation(col ="character"))
setClass("myType", representation = representation(typ ="character"))
myColor <- function(col){ new("myColor", col = col)}
myType <- function(typ){ new("myType", typ = typ)}
## nodes a b c d x y
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(1.2, 2.4, 5.4, 3.2)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
edgeData(g1, from = from, to = to ,attr = "weight") <- c(1.2, 2.4, 5.4, 3.2)
edgeDataDefaults(g1, attr="color") <- "cyan"
edgeDataDefaults(g1, attr="type") <- "unknown"
edgeData(g1, from = from, to = to ,attr = "color") <- c(myColor("red"),
myColor("blue"), NA, myColor("green"))
edgeData(g1, from = from, to = to , attr = "type") <- c(myType("high"),
myType("low"), myType("high"), NA)
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(1.2, 4.2, 5.6, 2.1, 3.2)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "directed")
edgeDataDefaults(g2, attr ="color") <- "cyan"
edgeData(g2, from = from, to = to, attr = "color") <- c(myColor("red"),
myColor("blue"), NA, myColor("red"), myColor("yellow"))
g <- graphIntersect(g1, g2)
df <- extractFromTo(g)
tmp <- data.frame( from = c("a", "b", "d"), to = c("b", "c", "x"),
weight = c(1.2, NA, 3.2))
checkEquals(tmp, df)
attColor <- edgeData(g, attr = "color")
nms <- paste(c("a", "b", "d"), c("b", "c", "x"), sep = "|")
target <- structure( c(myColor("red"), myColor("blue"), NA), names = nms)
checkEquals(target, unlist(attColor))
checkException(edgeData(g, attr = "type"))
weightFun <- function(x, y) {
return(x + y )
}
colorFun <- function(x,y) {
if(x@col=="red" && y@col == "red")
return("white")
else
return("black")
}
g <- graphIntersect(g1, g2, edgeFun =list(weight = weightFun, color = colorFun))
df <- extractFromTo(g)
tmp <- data.frame( from = c("a", "b", "d"),
to = c("b", "c", "x"),
weight = c(2.4, 6.6 , 6.4))
checkEquals(tmp, df)
attColor <- edgeData(g, attr = "color")
nms <- paste(c("a", "b", "d"), c("b", "c", "x"), sep = "|")
target <- structure( c("white", "black", "black"), names = nms)
checkEquals(target, unlist(attColor))
checkException(edgeData(g, attr = "type"))
}
test_graphBAM_S4_Attribute_Union <- function() {
setClass("myColor", representation = representation(col ="character"))
setClass("myType", representation = representation(typ ="character"))
myColor <- function(col){ new("myColor", col = col)}
myType <- function(typ){ new("myType", typ = typ)}
## nodes a b c d x y
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(1.2, 2.4, 5.4, 3.2)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
edgeData(g1, from = from, to = to ,attr = "weight") <- c(1.2, 2.4, 5.4, 3.2)
edgeDataDefaults(g1, attr = "color") <- "cyan"
edgeDataDefaults(g1, attr = "type") <- "missing"
edgeData(g1, from = from, to = to ,attr = "color") <- c(myColor("red"),
myColor("blue"), NA, myColor("green"))
edgeData(g1, from = from, to = to , attr = "type") <- c(myType("high"),
myType("low"), myType("high"), NA)
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(1.2, 4.2, 5.6, 2.1, 3.2)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "directed")
edgeDataDefaults(g2, attr = "color") <- "cyan"
edgeData(g2, from = from, to = to, attr = "color") <- c(myColor("red"),
myColor("blue"), NA, myColor("red"), myColor("yellow"))
g <- graphUnion(g1, g2)
df <- extractFromTo(g)
tmp <- data.frame( from = c("a", "b", "d", "b", "d", "d"),
to = c("b", "c", "c", "d", "x", "y"),
weight = c(1.2, NA, 2.1, 5.6, 3.2, 5.4))
checkEquals(tmp, df)
attColor <- edgeData(g, attr = "color")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c(myColor("red"), myColor("blue"), myColor("red"), NA, NA, NA), names = nms)
checkEquals(target, unlist(attColor))
attType <- edgeData(g, attr = "type")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c(myType("high"), myType("low"), NA, NA, NA, myType("high")), names = nms)
checkEquals(target, unlist(attType))
weightFun <- function(x, y) {
return(x + y )
}
colorFun <- function(x,y) {
if(x@col =="red" || y@col == "red")
return("white")
else
return("black")
}
g <- graphUnion(g1, g2, edgeFun = list(weight = weightFun, color = colorFun))
attWeight <- edgeData(g, attr = "weight")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c( 2.4, 6.6, 2.1, 5.6, 6.4, 5.4), names = nms)
checkEquals(target, unlist(attWeight))
attColor <- edgeData(g, attr = "color")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure(c( "white", "black", myColor("red"), NA, "black", NA), names = nms)
checkEquals( target, unlist(attColor))
attType <- edgeData(g, attr = "type")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c(myType("high"), myType("low"), NA, NA, NA, myType("high")), names = nms)
checkEquals(target, unlist(attType))
attType <- edgeData(g, attr = "type")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure(c( myType("high"), myType("low"), NA, NA, NA, myType("high")), names = nms)
checkEquals(target, unlist(attType))
}
test_graphBAM_addNode1 <- function(){
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(2.2, 2.0, 0.4, 0.2)
df <- data.frame(from, to, weight)
g <- graphBAM(df, edgemode = "directed")
nodeDataDefaults(g, attr="color") <- "pink"
nodeData(g, n = c("d","a"), attr = "color") <- c("red", "green")
nodeDataDefaults(g, attr="type") <- "unknown"
nodeData(g, n = c("a", "b", "y", "d"), attr = "type") <- c("high", "med", "high", "low")
gr <- addNode(c("q", "ss"), g)
current <- nodeData(gr, attr = "color")
target <- as.list(structure( c("green", "pink", "pink", "red", "pink", "pink", "pink", "pink"),
names = c("a", "b", "c", "d", "q", "ss", "x", "y")))
checkEquals(target, current)
current <- nodeData(gr, attr = "type")
target <- as.list(structure( c("high", "med", "unknown", "low", "unknown",
"unknown", "unknown", "high"),
names = c("a", "b", "c", "d", "q", "ss", "x", "y")))
checkEquals(target, current)
}
test_graphBAM_addNode2 <- function(){
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(2.2, 2.0, 0.4, 0.2)
df <- data.frame(from, to, weight)
g <- graphBAM(df, edgemode = "directed")
edgeDataDefaults(g, attr="color") <- "blue"
edgeDataDefaults(g, attr="type") <- "unknown"
edgeData(g, from = c("d","a"), to = c("y", "b"), attr = "color") <- c("red", "green")
edgeData(g, from = c("a", "b"), to = c("b", "c") , attr = "type") <- c("low", "high")
g1 <- addEdge(from = c("d", "b"), to = c("c", "x"), g, weights = c(4.0, 10.0))
current <- edgeData(g1, attr ="weight")
lbl <- paste(c("a", "b", "d", "b", "d", "d"), c( "b", "c", "c", "x", "x", "y") , sep ="|")
target <- as.list( structure(c(2.2, 2, 4, 10, 0.2, 0.4), names = lbl))
checkEquals(target, current)
current <- edgeData(g1, attr ="color")
lbl <- paste(c("a", "b", "d", "b", "d", "d"),
c( "b", "c", "c", "x", "x", "y"), sep ="|")
target <- as.list( structure(c("green", "blue", "blue", "blue", "blue", "red"),
names = lbl))
checkEquals(target, current)
current <- edgeData(g1, attr ="type")
lbl <- paste(c("a", "b", "d", "b", "d", "d"),
c( "b", "c", "c", "x", "x", "y") , sep ="|")
target <- as.list( structure(c("low", "high", "unknown", "unknown", "unknown", "unknown"),
names = lbl))
checkEquals(target, current)
}
test_graphBAM_nodeUnion_Attributes <- function(use.factors=TRUE){
setClass("myType", representation = representation(typ ="character"))
myType <- function(typ){ new("myType", typ = typ)}
testFun <- function(x,y) {
if(is(x, "myType") && is(y, "myType")){
if(x@typ =="aa" || y@typ == "ac")
return("ax")
else
return("ab")
} else return(as.character(NA))
}
funList <- structure(list(testFun), names ="gene")
ft1 <- data.frame(from=c("a", "a", "a", "b", "b"),
to =c("b", "c", "d", "a", "d"),
weight=c(1, 3.1, 5.4, 1, 2.2),
stringsAsFactors = use.factors)
g1 <- graphBAM(ft1, edgemode="directed")
nodeDataDefaults(g1, attr="color") <- "cyan"
nodeDataDefaults(g1, attr="type") <- "missing"
nodeDataDefaults(g1, attr="kp") <- "missing"
nodeDataDefaults(g1, attr="gene") <- "unknown"
nodeData(g1, n = c("a", "b", "c") , attr = "color") <- c("red", "green", "blue")
nodeData(g1, n = c("a", "b"), attr = "type") <- c("low", "high")
nodeData(g1, n = c("a", "b"), attr = "kp") <- c("kplow", "kphigh")
nodeData(g1, n = c("a", "b"), attr = "gene") <- c(myType("aa"), myType("bt"))
ft1 <- data.frame(from=c("a", "a", "b"),
to=c("b", "x", "z"),
weight=c(6, 5, 2),
stringsAsFactors = use.factors)
g2 <- graphBAM(ft1,nodes = c("a","b", "c", "d", "x", "y", "z"), edgemode = "directed")
nodeDataDefaults(g2, attr ="color") <- "cyan"
nodeDataDefaults(g2, attr="type") <- "missing"
nodeDataDefaults(g2, attr="gene") <- "unknown"
nodeData(g2, n = c("a", "b", "x", "y", "z") , attr = "color") <- c("red", "red", "green", "pink", "yellow")
nodeData(g2, n = c("a", "b"), attr = "type") <- c("low", "high")
nodeData(g2, n = c("a", "b"), attr = "gene") <- c(myType("at"), myType("kt"))
res <- graphUnion(g1, g2, nodeFun = funList)
current <- nodeData(res, attr = "color")
cn <- as.character(NA)
target <- as.list( structure(c("red", cn, cn, "cyan", "green", "pink", "yellow"),
names = c("a", "b", "c", "d", "x", "y", "z")))
checkEquals(target, current)
current <- nodeData(res, attr = "type")
target <- as.list( structure(c("low", "high", "missing", "missing", "missing", "missing", "missing"),
names = c("a", "b", "c", "d", "x", "y", "z")))
checkEquals(target, current)
current <- nodeData(res, attr = "kp")
target <- as.list( structure(c("kplow", "kphigh", "missing", "missing", "missing",
"missing", "missing"),
names = c("a", "b", "c", "d", "x", "y", "z")))
checkEquals(target, current)
current <- nodeData(res, n = c("a", "b", "c", "d"), attr ="gene")
target <- as.list( structure(c("ax", "ab", cn ,cn), names = c("a", "b", "c", "d")))
checkEquals(target, current)
current <- nodeData(res, n= c( "x", "y", "z"), attr ="gene")
target <- as.list( structure(c("unknown","unknown", "unknown"),
names = c("x", "y", "z")))
checkEquals(target, current)
}
test_graphBAM_removeNode <- function(){
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(2.2, 2.0, 0.4, 0.2)
df <- data.frame(from, to, weight)
g <- graphBAM(df, edgemode = "directed")
nodeDataDefaults(g, attr="name") <- "NN"
nodeData(g, n = c("a","b", "c", "d", "x", "y"), attr = "name") <-
c("a", "b", "c", "d", "x", "y")
edgeDataDefaults(g, attr="name") <- "EE"
edgeData(g, from = from, to = to , attr = "name") <- paste(from, to , sep= "")
res <- removeNode(c("x","b"), g)
current <- nodeData(res, attr = "name")
target <- as.list(structure( c("a", "c", "d", "y"), names = c("a", "c",
"d", "y")))
checkEquals(target, current)
current <- edgeData(res, attr = "name")
target <- as.list(structure( "dy", names = paste("d", "y", sep = "|")))
checkEquals(current, target)
res <- removeNode(c("x", "a"), g)
current <- edgeData(res, attr = "name")
target <- as.list(structure( c("bc", "dy"), names = paste(c("b", "d"),
c("c","y"), sep = "|")))
checkEquals(target, current)
}
|
/inst/unitTests/graphBAM_test.R
|
no_license
|
vgpprasad91/graph
|
R
| false
| false
| 50,650
|
r
|
## library("graph")
set.seed(0x12a9b)
randBAMGraph <- function(numNodes = 10 , numEdges = 10)
{
df <- graph:::randFromTo(numNodes, numEdges)
df$ft$weight = seq_len(numNodes)
g <- graphBAM(df$ft, nodes = df$nodes, edgemode = "directed")
g
}
make_smallBAM <- function() {
from = c("a", "a", "a", "x", "x", "c")
to = c("b", "c", "x", "y", "c", "a")
weight=c(3.4, 2.6, 1.7, 5.3, 1.6, 7.9)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
g1
}
make_unDirectedBAM <- function() {
from = c("a", "a", "a", "x", "x", "c")
to = c("b", "c", "x", "y", "c", "d")
weight=c(3.4, 2.6, 1.7, 5.3, 1.6, 7.9)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "undirected")
g1
}
create_bigBAM <- function()
{
r1 <- randFromTo(100, 100)
r1$ft$weight <- seq_len(100)
g1 <- graphBAM(r1$ft, r1$nodes, edgemode="directed")
g1
}
test_create_graphBAMSmall <- function() {
from = c("a", "d", "d", "b")
to = c("b", "a", "d", "c")
weight= c(1.5, 3.1, 5.4, 1)
nodes = c("a","b","c","d")
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, nodes, edgemode = "directed")
g2 <- graphBAM(df, nodes, edgemode = "undirected")
checkEquals(4L, numEdges(g1))
checkEquals(isDirected(g1), TRUE)
checkEquals(isAdjacent(g1, c("a", "d", "b"), c("b", "d", "c") ), c(TRUE,TRUE,TRUE))
checkEquals(names(edges(g1)), c("a", "b", "c", "d"))
k <- edges(g1)
checkEquals(list(k$a, k$b, k$c, k$d), list("b", "c", character(0), c("a", "d")))
w <- edgeWeights(g1)
checkEquals(names(w), c("a", "b", "c", "d"))
checkEquals(list(w$a, w$b, w$c, w$d), list(structure(1.5, names="b"),
structure(1, names="c"), numeric(0), structure(c(3.1, 5.4),
names= c("a", "d"))))
checkEquals(4L, numNodes(g2))
checkEquals(4L, numEdges(g2))
checkEquals(isDirected(g2), FALSE)
checkEquals(isAdjacent(g1, c("a","d","b"), c("b","d","c") ), c(TRUE,TRUE,TRUE))
}
test_BAMNodes <- function() {
from = c("a", "a", "a", "x", "x", "c")
to = c("b", "c", "x", "y", "c", "a")
weight=c(3.4, 2.6, 1.7, 5.3, 1.6, 7.9)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
nds <- nodes(g1)
checkIdentical(all(nds %in% unique(c(from,to))),TRUE)
checkIdentical(isDirected(g1),TRUE)
}
checkBAMSubGraph <- function(g, subG) {
nds <- nodes(g)
subNodes <- nodes(subG)
w1 <- g@edgeSet@weights
ft1 <- .Call(graph:::graph_bitarray_rowColPos, g@edgeSet@bit_vector)
origFromTo <- data.frame(from=nds[ft1[,"from"]], to = nds[ft1[,"to"]], weights = w1)
w2 <- subG@edgeSet@weights
ft2 <- .Call(graph:::graph_bitarray_rowColPos, subG@edgeSet@bit_vector)
subFromTo <- data.frame(from = subNodes[ft2[,"from"]], to = subNodes[ft2[,"to"]], weights = w2)
indx <- (origFromTo$from %in% subNodes) &
(origFromTo$to %in% subNodes)
want <- origFromTo[(origFromTo$from %in% subNodes) & (origFromTo$to %in% subNodes),]
checkEquals(as.character(want$from), as.character(subFromTo$from))
checkIdentical(as.character(want$to), as.character(subFromTo$to))
checkEquals(g@edgeSet@weights[indx], subG@edgeSet@weights)
}
test_BAMSubGraph_Small <- function() {
g1 <- make_smallBAM()
sg <- subGraph(c("a","x", "y"), g1)
checkIdentical(isDirected(sg), TRUE)
checkIdentical(nodes(sg), c("a", "x", "y"))
checkBAMSubGraph(g1,sg)
}
test_BAMSubGraph_Large <- function() {
g1 <- randBAMGraph(100,100)
sn <- sample(nodes(g1), 55)
sg <- subGraph( sn, g1)
checkIdentical(isDirected(sg), TRUE)
checkBAMSubGraph(g1,sg)
}
test_BAM_edgeWeights <- function() {
g1 <- make_smallBAM()
ew1 <- edgeWeights(g1)
checkEquals(names(ew1), c("a", "b", "c", "x", "y"))
checkEquals(list(ew1$a, ew1$b, ew1$c, ew1$x, ew1$y),
list(structure( c(3.4, 2.6, 1.7), names = c("b","c","x")),
numeric(0), structure(c(7.9), names = "a"),
structure(c(1.6, 5.3), names= c("c", "y")), numeric(0)))
ew2 <- edgeWeights(g1,c("a","b")) ##index = char
checkEquals(names(ew2), c("a","b"))
checkEquals(list(ew2$a, ew2$b), list(structure( c(3.4, 2.6, 1.7),
names = c("b","c","x")), numeric(0)))
ew2 <- edgeWeights(g1, 1:2) ##index = numeric
checkEquals(names(ew2), c("a","b"))
checkEquals(list(ew2$a, ew2$b), list(structure( c(3.4, 2.6, 1.7),
names = c("b","c","x")), numeric(0)))
}
test_BAM_edgeWeights_undirected <- function()
{
from = c("a", "d", "d", "b", "a")
to = c("b", "a", "d", "c", "c")
weight = c(1.5, 2.1, 3.4, 4.1, 5.6)
df <- data.frame(from, to, weight)
gu <- graphBAM(df, nodes="e", edgemode = "undirected")
want <- list(a=c(b=1.5, c=5.6, d=2.1),
b=c(a=1.5, c=4.1),
c=c(a=5.6, b=4.1),
d=c(a=2.1, d=3.4),
e=numeric(0))
checkEquals(want, edgeWeights(gu))
checkEquals(want[c("c", "a")], edgeWeights(gu, c("c", "a")))
}
test_BAM_edges <- function() {
g1 <- make_smallBAM()
ew1 <- edges(g1)
checkEquals(names(ew1), c("a", "b", "c", "x", "y"))
checkEquals(list(ew1$a, ew1$b, ew1$c, ew1$x, ew1$y),
list( c("b","c","x"), character(0), "a", c("c", "y"), character(0)))
ew2 <- edges(g1, c("c", "b"))
checkEquals(names(ew2), c("c","b"))
checkEquals(list(ew2$c, ew2$b), list("a", character(0)))
}
test_BAM_adj <- function() {
g1 <- make_smallBAM()
ew <- adj(g1, c("c", "b"))
checkEquals(names(ew), c("c","b"))
checkEquals(list(ew$c, ew$b), list("a", character(0)))
}
test_BAM_edgeMatrix <- function() {
g1 <- make_smallBAM()
em <- edgeMatrix(g1)
checkEquals(em[1,], c(3, 1, 1, 4, 1, 4))
checkEquals(em[2,], c(1, 2, 3, 3, 4, 5))
}
test_BAM_removeEdge_unknown_nodes <- function()
{
g1 <- make_smallBAM()
checkException(removeEdge("a", "q", g1))
checkException(removeEdge("q", "a", g1))
checkException(removeEdge("a", c("q", "aa", "tt"), g1))
checkException(removeEdge(c("a", "q", "tt", "aa"),
c("a", "q", "aa", "tt"), g1))
}
test_BAM_removeEdge <- function()
{
g1 <- make_smallBAM()
## removing nothing does nothing
c0 <- character(0)
checkEquals(edges(g1), edges(removeEdge(c0, c0, g1)))
## there is no y => a edge, throw error
checkException(removeEdge("y", "a", g1))
g2 <- removeEdge("c", "a", g1)
checkEquals(list(c=character(0)), edges(g2, "c"))
em <- edgeMatrix(g2)
checkEquals(em[1,], c(1, 1, 4, 1, 4))
checkEquals(em[2,], c(2, 3, 3, 4, 5))
g3 <- removeEdge("a", c("b", "x"), g1)
checkEquals(list(a="c"), edges(g3, "a"))
checkEquals(edges(g1)[-1], edges(g3)[-1])
g4 <- removeEdge(c("a", "x"), "c", g1)
checkEquals(list(a=c("b", "x")), edges(g4, "a"))
checkEquals(list(x="y"), edges(g4, "x"))
}
test_BAMSmall_edgeData <- function(){
g1 <- make_smallBAM()
eg <- edgeData(g1)
tmp <- paste(c("c", "a", "a", "x", "a", "x"), c("a","b","c","c","x","y"),sep="|")
checkEquals(names(eg), tmp)
vals <- sapply( names(eg),function(k){
eg[[k]]$weight
})
checkEquals(names(vals), tmp)
checkEquals( as.numeric(vals),c(7.9, 3.4, 2.6, 1.6, 1.7, 5.3))
eg <- edgeData(g1, "a", attr="weight")
tmp <- paste( c("a", "a", "a"), c("b", "c", "x"), sep = "|")
checkEquals(names(eg), tmp)
vals <- sapply( names(eg),function(k){
eg[[k]]
})
checkEquals(names(vals), tmp)
checkEquals( as.numeric(vals), c(3.4, 2.6, 1.7))
checkException(eg <- edgeData(g1, "a", attr="weightsss"))
eg <- edgeData(g1, "a", "b", attr="weight")
tmp <- paste("a", "b", sep = "|")
checkEquals(names(eg), tmp)
vals <- sapply( names(eg),function(k){
eg[[k]]
})
checkEquals(names(vals), tmp)
checkEquals( as.numeric(vals),3.4)
}
test_BAM_extractFromToUndirected <- function() {
g1 <- make_unDirectedBAM()
ft <- extractFromTo(g1)
checkEquals(as.character(ft$from), c("a", "a", "c", "a", "c", "x"))
checkEquals(as.character(ft$to), c("b", "c", "d", "x", "x", "y"))
checkEquals(ft$weight, c(3.4, 2.6, 7.9, 1.7, 1.6, 5.3))
}
test_BAM_extractFromToDirected <- function() {
g1 <- make_smallBAM()
ft <- extractFromTo(g1)
checkEquals(as.character(ft$from), c("c", "a", "a", "x", "a", "x"))
checkEquals(as.character(ft$to), c("a", "b", "c", "c", "x", "y"))
checkEquals(ft$weight, c(7.9, 3.4, 2.6, 1.6, 1.7, 5.3))
}
test_BAM_bamToMatrix_UnDirected <- function() {
g1 <- make_unDirectedBAM()
mat <- as(g1, "matrix")
checkEquals(isSymmetric(mat), TRUE)
checkEquals(mat[upper.tri(mat)],
c(3.4, 2.6, 0.0, 0.0, 0.0, 7.9, 1.7, 0.0,
1.6, 0.0, 0.0, 0.0, 0.0, 0.0, 5.3))
checkEquals(rownames(mat),colnames(mat))
checkEquals(rownames(mat), c("a", "b", "c", "d", "x", "y"))
}
test_BAM_bamToMatrix_Directed <- function() {
g1 <- make_smallBAM()
mat <- as(g1, "matrix")
checkEquals(as.numeric(mat), c(0.0, 0.0, 7.9, 0.0,
0.0, 3.4, 0.0, 0.0, 0.0, 0.0, 2.6, 0.0,
0.0, 1.6, 0.0, 1.7, 0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0, 5.3, 0.0))
checkEquals(rownames(mat),colnames(mat))
checkEquals(rownames(mat), c("a","b", "c", "x","y"))
}
test_BAM_bamTographAM_unDirected <- function() {
g1 <- make_unDirectedBAM()
am <- as(g1,"graphAM")
checkEquals(nodes(g1), nodes(am))
checkEquals(edgemode(g1), edgemode(am))
checkEquals(edges(g1), edges(am))
w1 <- edgeWeights(g1)
w2 <- edgeWeights(am)
checkEquals(names(w1), names(w2))
checkEquals( w1$a, w2$a)
checkEquals( w1$b, w2$b)
checkEquals( sort(w1$c), sort(w2$c))
checkEquals( w1$d, w2$d)
checkEquals( sort(w1$x), sort(w2$x))
checkEquals( w1$y, w2$y)
}
test_BAM_bamTographAM_Directed <- function() {
g1 <- make_smallBAM()
am <- as(g1,"graphAM")
checkEquals(nodes(g1), nodes(am))
checkEquals(edgemode(g1), edgemode(am))
checkEquals(edges(g1), edges(am))
w1 <- edgeWeights(g1)
w2 <- edgeWeights(am)
checkEquals(names(w1), names(w2))
checkEquals( w1$a, w2$a)
checkEquals( w1$b, w2$b)
checkEquals( sort(w1$c), sort(w2$c))
checkEquals( w1$d, w2$d)
checkEquals( sort(w1$x), sort(w2$x))
checkEquals( w1$y, w2$y)
}
test_BAM_bamTographNEL_UnDirected <- function() {
g1 <- make_unDirectedBAM()
nel <- as(g1,"graphNEL")
checkEquals(nodes(g1), nodes(nel))
checkEquals(edgemode(g1), edgemode(nel))
checkEquals(edges(g1), edges(nel))
w1 <- edgeWeights(g1)
w2 <- edgeWeights(nel)
checkEquals(names(w1), names(w2))
checkEquals( w1$a, w2$a)
checkEquals( w1$b, w2$b)
checkEquals( sort(w1$c), sort(w2$c))
checkEquals( w1$d, w2$d)
checkEquals( sort(w1$x), sort(w2$x))
checkEquals( w1$y, w2$y)
}
test_BAM_bamTographNEL_Directed <- function() {
g1 <- make_smallBAM()
nel <- as(g1,"graphNEL")
checkEquals(nodes(g1), nodes(nel))
checkEquals(edgemode(g1), edgemode(nel))
checkEquals(edges(g1), edges(nel))
w1 <- edgeWeights(g1)
w2 <- edgeWeights(nel)
checkEquals(names(w1), names(w2))
checkEquals( w1$a, w2$a)
checkEquals( w1$b, w2$b)
checkEquals( sort(w1$c), sort(w2$c))
checkEquals( w1$d, w2$d)
checkEquals( sort(w1$x), sort(w2$x))
checkEquals( w1$y, w2$y)
}
create_GraphNEL_Directed <- function() {
set.seed(123)
V <- letters[1:4]
edL <- vector("list", length=4)
names(edL) <- V
edL[["a"]] <- list(edges=c(3, 4), weights=c(.13, .14))
edL[["b"]] <- list(edges=c(3), weights=.23)
edL[["c"]] <- list(edges=numeric(0), weights=numeric(0))
edL[["d"]] <- list(edges=c(2, 3), weights=c(.42, .43))
gR <- new("graphNEL", nodes = V, edgeL = edL, edgemode = "directed" )
gR
}
create_GraphNEL_UnDirected <- function() {
set.seed(123)
V <- letters[1:4]
edL <- vector("list", length=4)
names(edL) <- V
edL[["a"]] <- list(edges=c(2, 3), weights=c(.13, .14))
edL[["b"]] <- list(edges=c(1), weights=.13)
edL[["c"]] <- list(edges=c(1), weights=0.14)
edL[["d"]] <- list(edges= numeric(0), weights=numeric(0))
gR <- new("graphNEL", nodes = V, edgeL = edL, edgemode = "undirected" )
gR
}
test_graphNEL_Directed_To_graphBAM <-function() {
nel <- create_GraphNEL_Directed()
bam <- as(nel, "graphBAM")
checkEquals(nodes(nel), nodes(bam))
checkEquals(edgemode(nel), edgemode(bam))
checkEquals(edges(nel), edges(bam))
w1 <- edgeWeights(nel)
w2 <- edgeWeights(bam)
checkEquals(w1,w2)
}
test_graphNEL_Directed_To_graphBAM <- function() {
nel <- create_GraphNEL_Directed()
bam <- as(nel, "graphBAM")
checkEquals(nodes(nel), nodes(bam))
checkEquals(edgemode(nel), edgemode(bam))
checkEquals(edges(nel), edges(bam))
w1 <- edgeWeights(nel)
w2 <- edgeWeights(bam)
checkEquals(w1,w2)
}
test_graphNEL_UnDirected_To_graphBAM <- function() {
nel <- create_GraphNEL_UnDirected()
bam <- as(nel, "graphBAM")
checkEquals(nodes(nel), nodes(bam))
checkEquals(edgemode(nel), edgemode(bam))
checkEquals(edges(nel), edges(bam))
w1 <- edgeWeights(nel)
w2 <- edgeWeights(bam)
checkEquals(w1,w2)
}
test_graphAM_Directed_To_graphBAM <- function() {
nel <- create_GraphNEL_Directed()
am <- as(nel, "graphAM")
bam <- as(am, "graphBAM")
checkEquals(nodes(am), nodes(bam))
checkEquals(edgemode(am), edgemode(bam))
checkEquals(edges(am), edges(bam))
w1 <- edgeWeights(am)
w2 <- edgeWeights(bam)
checkEquals(w1,w2)
}
test_graphAM_UnDirected_To_graphBAM<- function() {
nel <- create_GraphNEL_UnDirected()
am <- as(nel, "graphAM")
bam <- as(am, "graphBAM")
checkEquals(nodes(am), nodes(bam))
checkEquals(edgemode(am), edgemode(bam))
checkEquals(edges(am), edges(bam))
w1 <- edgeWeights(am)
w2 <- edgeWeights(bam)
checkEquals(w1, w2)
}
test_BAM_set_edge_weights <- function()
{
getw <- function(x) unlist(edgeWeights(x))
g <- make_smallBAM()
weight0 <- unlist(edgeWeights(g))
edgeData(g, "c", "a", attr="weight") <- 123.0
want <- weight0
want["c.a"] <- 123.0
checkEquals(want, getw(g))
g <- make_smallBAM()
edgeData(g, "a", c("b", "c", "x"), attr="weight") <- c(10, 11, 12)
want <- weight0
want[c("a.b", "a.c", "a.x")] <- c(10, 11, 12)
checkEquals(want, getw(g))
}
test_BAM_Intersect_UnDirected <- function() {
## nodes a b c d x y
from = c("a", "b", "d", "d")
to = c("b", "c", "x", "y")
weight=c(1.2, 2.4, 3.2, 5.4)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "undirected")
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(3.2, 1.2, 2.1, 3.2, 3.5)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "undirected")
g <- graphIntersect(g1,g2)
checkEquals(intersect(nodes(g1), nodes(g2)), nodes(g))
checkEquals(FALSE, isDirected(g))
eg <- edgeData(g)
vals <- sapply( names(eg),function(k){
eg[[k]]$weight
})
tmp <- paste(c("a", "b", "d", "b", "c", "x"), c("b", "c", "x", "a", "b", "d"), sep= "|")
checkEquals(tmp, names(vals))
checkEquals(as.numeric(rep(NA, 6)), as.numeric(vals))
}
test_BAM_Intersect_Directed <- function() {
## nodes a b c d x y
from = c("a", "b", "d", "d")
to = c("b", "c", "x", "y")
weight=c(1.2, 2.4, 3.2, 5.4)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(1.2, 1.2, 2.1, 3.2, 3.5)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "directed")
g <- graphIntersect(g1,g2)
checkEquals(intersect(nodes(g1), nodes(g2)), nodes(g))
checkEquals(TRUE, isDirected(g))
eg <- edgeData(g)
vals <- sapply( names(eg),function(k){
eg[[k]]$weight
})
tmp <- paste(c("a", "b", "d"), c("b", "c", "x"), sep= "|")
checkEquals(tmp, names(vals))
checkEquals(c(1.2, NA, NA), as.numeric(vals))
}
test_BAM_Intersect_UnDirected2 <- function() {
## nodes a b d x y
from = c("a", "d", "d")
to = c("b", "x", "y")
weight=c(1.2, 3.2, 5.4)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "undirected")
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(3.2, 1.2, 2.1, 5.2, 3.2)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "undirected")
g <- graphIntersect(g1,g2)
checkEquals(intersect(nodes(g1), nodes(g2)), nodes(g))
checkEquals(FALSE, isDirected(g))
eg <- edgeData(g)
vals <- sapply( names(eg),function(k){
eg[[k]]$weight
})
tmp <- paste(c("a", "d", "b", "x"), c("b", "x", "a", "d"), sep= "|")
checkEquals(tmp, names(vals))
checkEquals(rep(c(NA,3.2),2), as.numeric(vals))
}
test_BAM_Intersect_EmptyEdges <- function() {
from = c("a", "d", "d")
to = c("b", "x", "y")
weight=c(1.2, 3.2, 5.4)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
from = c("h", "i", "j")
to = c("b", "x", "y")
weight=c(1.2, 3.2, 5.4)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, edgemode = "directed")
g <- graphIntersect(g1,g2)
checkEquals(nodes(g), intersect(nodes(g1), nodes(g2)))
checkEquals(isDirected(g), TRUE)
eg <- edgeWeights(g)
checkEquals(c("b", "x", "y"), names(eg))
checkEquals(list(numeric(0), numeric(0), numeric(0)),list(eg$b, eg$x, eg$y))
}
test_BAM_Intersect_EmptyNodes <- function() {
from = c("a", "d", "d")
to = c("b", "x", "y")
weight=c(1.2, 3.2, 5.4)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "unirected")
from = c("h", "i", "j")
to = c("s", "h", "l")
weight=c(1.2, 3.2, 5.4)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, edgemode = "undirected")
g <- graphIntersect(g1,g2)
checkEquals(intersect(nodes(g1), nodes(g2)), nodes(g))
checkEquals(FALSE, isDirected(g))
eg <- edgeWeights(g)
checkEquals(list(), eg)
}
test_BAM_isAdjacent <- function()
{
from = c("a", "d", "d", "b", "a")
to = c("b", "a", "d", "c", "c")
weight= c(1.5, 2.1, 3.4, 4.1, 5.6)
df <- data.frame(from, to, weight)
gd <- graphBAM(df, nodes="e", edgemode = "directed")
## single edges
for (i in seq_len(nrow(df))) {
checkEquals(TRUE, isAdjacent(gd, from[i], to[i]))
}
## vectorized
checkEquals(c(FALSE, TRUE, TRUE, FALSE, FALSE),
isAdjacent(gd, "a", letters[1:5]))
checkEquals(c(FALSE, FALSE, FALSE, TRUE, FALSE),
isAdjacent(gd, letters[1:5], "a"))
}
test_BAM_Union_UnDirected <- function() {
## nodes a b c d x y
from = c("a", "b", "d", "d")
to = c("b", "c", "x", "y")
weight=c(1.2, 2.4, 3.5, 5.4)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "undirected")
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(3.2, 1.2, 2.1, 3.2, 3.5)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "undirected")
g <- graphUnion(g1,g2)
checkEquals(union(nodes(g1), nodes(g2)), nodes(g))
checkEquals(FALSE, isDirected(g))
df <- extractFromTo(g)
tmp <- data.frame(from = c("a", "b", "b", "c", "d", "d"),
to = c("b", "c", "d", "d", "x", "y"),
weight = c( NA, NA, 2.1, 3.2, 3.5, 5.4))
checkEquals(tmp, df)
}
test_BAM_Union_Directed <- function() {
## nodes a b c d x y
from = c("a", "b", "d", "d")
to = c("b", "c", "x", "y")
weight=c(1.2, 2.4, 3.5, 5.4)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(1.2, 1.2, 2.1, 3.2, 3.5)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "directed")
g <- graphUnion(g1,g2)
checkEquals(union(nodes(g1), nodes(g2)), nodes(g))
checkEquals(TRUE, isDirected(g))
df <- extractFromTo(g)
tmp <- data.frame(from = c("a", "b", "d", "b", "d", "d"),
to = c("b", "c", "c", "d", "x", "y"),
weight = c( 1.2, NA, 3.2, 2.1, 3.5, 5.4))
checkEquals(tmp, df)
}
test_BAM_Union_Mixed <- function() {
## nodes a b d x y
from = c("a", "d", "d")
to = c("b", "x", "y")
weight=c(1.2, 3.2, 5.4)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "undirected")
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(3.2, 1.2, 2.1, 3.2, 3.5)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "directed")
checkException(g <- graphUnion(g1,g2))
}
test_BAM_inEdges <- function()
{
from = c("a", "d", "d", "b", "a")
to = c("b", "a", "d", "c", "c")
weight = c(1.5, 2.1, 3.4, 4.1, 5.6)
df <- data.frame(from, to, weight)
## directed
gd <- graphBAM(df, nodes="e", edgemode = "directed")
want <- list(a="d",
b="a",
c=c("a", "b"),
d="d",
e=character(0))
checkEquals(want, inEdges(nodes(gd), gd))
## undirected
gu <- graphBAM(df, nodes="e", edgemode = "undirected")
checkEquals(edges(gu), inEdges(nodes(gu), gu))
}
test_BAM_directed_attrs <- function() {
from = c("a", "a", "a", "x", "x", "c")
to = c("b", "c", "x", "y", "c", "a")
weight = c(2, 1, 3, 4, 5, 6)
df <- data.frame(from, to, weight)
bam <- graphBAM(df, edgemode = "directed")
checkException(edgeData(bam,from="a", attr="code"))
edgeDataDefaults(bam, attr ="weight") <- 1
edgeDataDefaults(bam, attr = "code") <- "plain"
res <- unlist(edgeData(bam,from="a", attr="code"))
nmres <- paste(c("a","a","a"), c ("b", "c", "x"), sep="|")
checkEquals(names(res), nmres)
checkEquals(as.character(res), c("plain", "plain", "plain"))
edgeData(bam,from = "a", to = "x", attr= "code") <- "red"
res <- unlist(edgeData(bam, from = "a", attr = "code"))
checkEquals(names(res), nmres)
checkEquals(as.character(res), c("plain", "plain", "red"))
edgeData(bam,to = "c", attr= "code") <- "yellow"
res <- unlist(edgeData(bam, to= "c", attr = "code"))
nmres <- paste(c("a", "x"), c("c", "c"), sep = "|")
checkEquals(names(res), nmres)
checkEquals(as.character(res), c("yellow", "yellow"))
}
test_BAM_undirected_attrs <- function() {
from = c("a", "a", "a", "x", "x")
to = c("b", "c", "x", "y", "c")
weight = c(2, 1, 3, 4, 5)
df <- data.frame(from, to, weight)
bam <- graphBAM(df, edgemode = "undirected")
checkException(edgeData(bam,from="a", attr="code"))
edgeDataDefaults(bam, attr = "weight") <- 1
edgeDataDefaults(bam, attr = "code") <- "plain"
res <- unlist(edgeData(bam,from="a", attr="code"))
nmres <- paste(c("a","a","a"), c ("b", "c", "x"), sep="|")
checkEquals(names(res), nmres)
checkEquals(as.character(res), c("plain", "plain", "plain"))
edgeData(bam,from = "a", to = "x", attr= "code") <- "red"
res <- unlist(edgeData(bam, from = "a", attr = "code"))
checkEquals(names(res), nmres)
checkEquals(as.character(res), c("plain", "plain", "red"))
edgeData(bam,to = "c", attr= "code") <- "yellow"
res <- unlist(edgeData(bam, to= "c", attr = "code"))
nmres <- paste(c("a", "x"), c("c", "c"), sep = "|")
checkEquals(names(res), nmres)
checkEquals(as.character(res), c("yellow", "yellow"))
}
test_graphBAM_detailed_Attribute_Intersection <- function() {
## nodes a b c d x y
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(1.2, 2.4, 5.4, 3.2)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
edgeData(g1, from = from, to = to ,attr = "weight") <- c(1.2, 2.4, 5.4, 3.2)
edgeDataDefaults(g1, attr = "color") <- "unknown"
edgeDataDefaults(g1, attr ="type") <- "unknown"
edgeData(g1, from = from, to = to ,attr = "color") <- c("red", "blue", NA, "green")
edgeData(g1, from = from, to = to , attr = "type") <- c("high", "low", "high", NA)
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(1.2, 4.2, 5.6, 2.1, 3.2)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "directed")
edgeDataDefaults(g2, attr = "color") <- "unknown"
edgeData(g2, from = from, to = to, attr = "color") <- c("red", "blue", NA, "red",
"yellow")
g <- graphIntersect(g1, g2)
df <- extractFromTo(g)
tmp <- data.frame( from = c("a", "b", "d"),
to = c("b", "c", "x"),
weight = c(1.2, NA, 3.2))
checkEquals(tmp, df)
attColor <- edgeData(g, attr = "color")
nms <- paste(c("a", "b", "d"), c("b", "c", "x"), sep = "|")
target <- structure( c("red", "blue", NA), names = nms)
checkEquals(target, unlist(attColor))
checkException(edgeData(g, attr = "type"))
weightFun <- function(x, y) {
return(x +y )
}
colorFun <- function(x,y) {
if(x=="red" && y == "red")
return("white")
else
return("black")
}
setClass("myType", representation = representation(typ ="character"))
myType <- function(typ){ new("myType", typ = typ)}
typeFun <- function(x,y) {
if(is(x, "myType") && is(y, "myType")){
if(x@typ =="low" || y@typ == "med")
return("low")
else
return("high")
}
else {return (NA)}
}
nodeDataDefaults(g1, attr ="color") <- "unknown"
nodeDataDefaults(g1, attr ="type") <- "unknown"
nodeDataDefaults(g2, attr ="color") <- "unknown"
nodeDataDefaults(g2, attr ="type") <- "unknown"
nodeData(g1,n = c("a", "b", "c"), attr ="color") <- c("red", "green", "blue")
nodeData(g1,n = c("b", "c"), attr ="type") <- c(myType("low"), myType("high"))
nodeData(g2,n = c("a", "b", "c"), attr ="color") <- c("red", "green", "red")
nodeData(g2,n = c("b", "c"), attr ="type") <- c(myType("med"), myType("low"))
g <- graphIntersect(g1, g2, nodeFun = list(type = typeFun),
edgeFun = list(weight = weightFun, color = colorFun))
attWeight <- edgeData(g, attr = "weight")
nms <- paste(c("a", "b", "d"), c("b", "c", "x"), sep = "|")
target <- structure( c( 2.4, 6.6, 6.4), names = nms)
checkEquals(target, unlist(attWeight))
attColor <- edgeData(g, attr = "color")
nms <- paste(c("a", "b", "d"), c("b", "c", "x"), sep = "|")
target <- structure( c( 2.4, 6.6, 6.4), names = nms)
checkEquals(target, unlist(attWeight))
nodeColor <- nodeData(g, attr = "color")
target <- as.list(structure(c("red", "green", NA, "unknown", "unknown",
"unknown"), names = c("a", "b", "c", "d", "x", "y")))
checkEquals(target, nodeColor)
nodeType <- nodeData(g, attr = "type")
target <- as.list(structure(c("unknown", "low", "high", "unknown",
"unknown", "unknown"), names = c("a", "b", "c", "d", "x", "y")))
checkEquals(target, nodeType)
}
test_graphBAM_detailed_Attribute_Union <- function() {
## nodes a b c d x y
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(1.2, 2.4, 5.4, 3.2)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
edgeData(g1, from = from, to = to ,attr = "weight") <- c(1.2, 2.4, 5.4, 3.2)
edgeDataDefaults(g1, attr = "color") <- "cyan"
edgeDataDefaults(g1, attr = "type") <- "unknown"
edgeData(g1, from = from, to = to ,attr = "color") <- c("red", "blue", NA, "green")
edgeData(g1, from = from, to = to , attr = "type") <- c("high", "low", "high", NA)
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(1.2, 4.2, 5.6, 2.1, 3.2)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "directed")
edgeDataDefaults(g2, attr = "color") <- "cyan"
edgeData(g2, from = from, to = to, attr = "color") <- c("red", "blue", NA, "red",
"yellow")
g <- graphUnion(g1, g2)
df <- extractFromTo(g)
tmp <- data.frame( from = c("a", "b", "d", "b", "d", "d"),
to = c("b", "c", "c", "d", "x", "y"),
weight = c(1.2, NA, 2.1, 5.6, 3.2, 5.4))
checkEquals(tmp, df)
attColor <- edgeData(g, attr = "color")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c("red", "blue", "red", NA, NA, NA), names = nms)
checkEquals(target, unlist(attColor))
attType <- edgeData(g, attr = "type")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c("high", "low", NA, NA, NA, "high"), names = nms)
checkEquals(target, unlist(attType))
weightFun <- function(x, y) {
return(x + y )
}
colorFun <- function(x,y) {
if(x=="red" || y == "red")
return("white")
else
return("black")
}
setClass("myType", representation = representation(typ ="character"))
myType <- function(typ){ new("myType", typ = typ)}
typeFun <- function(x,y) {
if(is(x, "myType") && is(y, "myType")){
if(x@typ =="low" || y@typ == "med")
return("low")
else
return("high")
}
else {return (NA)}
}
nodeDataDefaults(g1, attr ="color") <- "cyan"
nodeDataDefaults(g1, attr="type") <- "unknown"
nodeData(g1,n = c("a", "b", "c"), attr ="color") <- c("red", "green", "blue")
nodeData(g1,n = c("b", "c"), attr ="type") <- c(myType("low"), myType("high"))
nodeDataDefaults(g2, attr ="color") <- "cyan"
nodeDataDefaults(g2, attr="type") <- "unknown"
nodeDataDefaults(g2, attr="test") <- "missing"
nodeData(g2,n = c("a", "b", "c", "z"), attr ="color") <- c("red", "green", "red","pink")
nodeData(g2,n = c("b", "c"), attr ="type") <- c(myType("med"), myType("low"))
nodeData(g2,n = c("a", "b", "c"), attr = "test") <- c("pass", "fail", "pass")
g <- graphUnion(g1, g2, edgeFun = list(weight = weightFun, color = colorFun))
attWeight <- edgeData(g, attr = "weight")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c( 2.4, 6.6, 2.1, 5.6, 6.4, 5.4), names = nms)
checkEquals(target, unlist(attWeight))
attColor <- edgeData(g, attr = "color")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure(c( "white", "black", "red", NA, "black", NA), names = nms)
checkEquals( target, unlist(attColor))
attType <- edgeData(g, attr = "type")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c("high", "low", NA, NA, NA, "high"), names = nms)
checkEquals(target, unlist(attType))
attType <- edgeData(g, attr = "type")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c("high", "low", NA, NA, NA, "high"), names = nms)
checkEquals(target, unlist(attType))
}
test_graphBAM_removeEdgesByWeight <- function() {
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(2.2, 2.0, 0.4, 0.2)
df <- data.frame(from, to, weight)
g <- graphBAM(df, edgemode = "directed")
edgeDataDefaults(g, attr="color") <- "pink"
edgeData(g, from = from, to = to ,attr = "color") <- c("red", "blue", NA, "green")
res <- removeEdgesByWeight(g, lessThan = 2.0)
checkEquals(attr(res@edgeSet@bit_vector, "nbitset"), 2)
checkEquals(res@edgeSet@weights, c(2.2, 2.0))
current <- unlist( edgeData(res, attr = "color"))
target <- structure(c("red", "blue"),
names = paste(c("a", "b"), c("b", "c"), sep = "|"))
checkEquals(target, current)
res <- removeEdgesByWeight(g, greaterThan = 1.9)
checkEquals(attr(res@edgeSet@bit_vector, "nbitset"), 2)
checkEquals(res@edgeSet@weights, c(0.2, 0.4))
current <- unlist( edgeData(res, attr = "color"))
target <- structure(c("green", NA),
names = paste(c("d", "d"), c("x", "y"), sep = "|"))
checkEquals(target, current)
res <- removeEdgesByWeight(g, lessThan =1.0, greaterThan = 2)
checkEquals(res@edgeSet@weights, c(2.0))
current <- unlist( edgeData(res, attr = "color"))
target <- structure(c("blue"),
names = paste( "b", "c", sep = "|"))
checkEquals(target, current)
res <- removeEdgesByWeight(g, greaterThan = 0.1)
checkEquals(res@edgeSet@weights, numeric(0))
checkEquals(res@edgeSet@edge_attrs$color, character(0))
}
test_graphBAM_nodeAttributes <- function(){
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(2.2, 2.0, 0.4, 0.2)
df <- data.frame(from, to, weight)
g <- graphBAM(df, edgemode = "directed")
nodeDataDefaults(g, attr ="color") <- "blue"
nodeData(g, n = c("d","a"), attr = "color") <- c("red", "green")
current <- nodeData(g, attr = "color")
target <- as.list(structure( c("green", "blue", "blue", "red", "blue", "blue"),
names = c("a", "b", "c", "d", "x", "y")))
checkEquals(target, current)
nodeDataDefaults(g, attr="mat") <- NA
nodeData(g, n= c("x", "y"), attr = "mat") <- df
current <- nodeData(g, n= c("x", "y"), attr = "mat")
target <- list(x = df, y = df)
checkEquals(target, current)
sg <- subGraph(c("d","b"), g)
current <- nodeData(sg, attr = "color")
target <- as.list(structure(c("blue", "red"), names = c("b", "d")))
checkEquals(target, current)
}
test_BAM_directed_attrs_s4 <- function() {
from = c("a", "a", "a", "x", "x", "c")
to = c("b", "c", "x", "y", "c", "a")
weight = c(2, 1, 3, 4, 5, 6)
df <- data.frame(from, to, weight)
bam <- graphBAM(df, edgemode = "directed")
edgeDataDefaults(bam, attr = "weight") <- 1.3
edgeDataDefaults (bam, attr = "vals") <- df
edgeData(bam, from = "a", attr= "vals") <- "unknown"
res <- edgeData(bam, attr="vals")
nmres <- c("c|a", "a|b", "a|c", "x|c", "a|x", "x|y")
target <- structure(list(df, "unknown", "unknown", df, "unknown",df), names = nmres)
checkEquals(res, target)
edgeDataDefaults(bam, attr = "mat") <- NA
edgeData(bam,from = "a", to = "x", attr= "mat") <- matrix(1)
res <- edgeData(bam, from = "a", attr = "mat")
nmres <- paste(c("a", "a", "a"), c("b", "c", "x"), sep = "|")
target <- structure( list(NA, NA, matrix(1)), names = nmres)
checkEquals(res, target)
edgeDataDefaults(bam, attr = "mk") <- NA
edgeData(bam,to = "c", attr= "mk") <- matrix(1)
res <- edgeData(bam, attr = "mk")
nmres <- paste(c("c", "a", "a", "x", "a", "x"), c("a", "b", "c", "c", "x", "y"), sep ="|")
target <- structure( list(NA, NA, matrix(1), matrix(1), NA ,NA), names = nmres)
checkEquals(res, target)
}
test_BAM_undirected_attrs_s4 <- function() {
from = c("a", "a", "a", "x")
to = c("b", "c", "x", "y")
weight = c(2, 1, 3, 4)
df <- data.frame(from, to, weight)
bam <- graphBAM(df, edgemode = "undirected")
edgeDataDefaults(bam, attr = "weight") <- 1.3
edgeDataDefaults(bam, attr = "vals") <- df
# edgeData(bam, attr = "weight") <- 1.3
# edgeData(bam, attr = "vals") <- df
edgeData(bam, from = "x", attr = "vals") <- "unknown"
res <- edgeData(bam, attr="vals")
nmres <- c("a|b", "a|c", "a|x", "x|y", "b|a", "c|a", "x|a", "y|x")
target <- structure(list(df, df, "unknown", "unknown", df, df, "unknown",
"unknown"), names = nmres)
checkEquals(res, target)
edgeDataDefaults(bam, attr ="mat") <- NA
edgeData(bam,from = "a", to = "x", attr= "mat") <- matrix(1)
res <- edgeData(bam, attr = "mat")
target <- structure(list(NA, NA, matrix(1), NA, NA, NA, matrix(1), NA),
names = nmres)
checkEquals(res, target)
edgeDataDefaults(bam, attr = "mk") <- NA
edgeData(bam,to = "c", attr= "mk") <- matrix(1)
res <- edgeData(bam, attr = "mk")
target <- structure( list(NA, matrix(1), NA, NA, NA, matrix(1), NA ,NA),
names = nmres)
checkEquals(res, target)
}
test_graphBAM_S4_Attribute_Intersection <- function() {
setClass("myColor", representation = representation(col ="character"))
setClass("myType", representation = representation(typ ="character"))
myColor <- function(col){ new("myColor", col = col)}
myType <- function(typ){ new("myType", typ = typ)}
## nodes a b c d x y
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(1.2, 2.4, 5.4, 3.2)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
edgeData(g1, from = from, to = to ,attr = "weight") <- c(1.2, 2.4, 5.4, 3.2)
edgeDataDefaults(g1, attr="color") <- "cyan"
edgeDataDefaults(g1, attr="type") <- "unknown"
edgeData(g1, from = from, to = to ,attr = "color") <- c(myColor("red"),
myColor("blue"), NA, myColor("green"))
edgeData(g1, from = from, to = to , attr = "type") <- c(myType("high"),
myType("low"), myType("high"), NA)
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(1.2, 4.2, 5.6, 2.1, 3.2)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "directed")
edgeDataDefaults(g2, attr ="color") <- "cyan"
edgeData(g2, from = from, to = to, attr = "color") <- c(myColor("red"),
myColor("blue"), NA, myColor("red"), myColor("yellow"))
g <- graphIntersect(g1, g2)
df <- extractFromTo(g)
tmp <- data.frame( from = c("a", "b", "d"), to = c("b", "c", "x"),
weight = c(1.2, NA, 3.2))
checkEquals(tmp, df)
attColor <- edgeData(g, attr = "color")
nms <- paste(c("a", "b", "d"), c("b", "c", "x"), sep = "|")
target <- structure( c(myColor("red"), myColor("blue"), NA), names = nms)
checkEquals(target, unlist(attColor))
checkException(edgeData(g, attr = "type"))
weightFun <- function(x, y) {
return(x + y )
}
colorFun <- function(x,y) {
if(x@col=="red" && y@col == "red")
return("white")
else
return("black")
}
g <- graphIntersect(g1, g2, edgeFun =list(weight = weightFun, color = colorFun))
df <- extractFromTo(g)
tmp <- data.frame( from = c("a", "b", "d"),
to = c("b", "c", "x"),
weight = c(2.4, 6.6 , 6.4))
checkEquals(tmp, df)
attColor <- edgeData(g, attr = "color")
nms <- paste(c("a", "b", "d"), c("b", "c", "x"), sep = "|")
target <- structure( c("white", "black", "black"), names = nms)
checkEquals(target, unlist(attColor))
checkException(edgeData(g, attr = "type"))
}
test_graphBAM_S4_Attribute_Union <- function() {
setClass("myColor", representation = representation(col ="character"))
setClass("myType", representation = representation(typ ="character"))
myColor <- function(col){ new("myColor", col = col)}
myType <- function(typ){ new("myType", typ = typ)}
## nodes a b c d x y
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(1.2, 2.4, 5.4, 3.2)
df <- data.frame(from, to, weight)
g1 <- graphBAM(df, edgemode = "directed")
edgeData(g1, from = from, to = to ,attr = "weight") <- c(1.2, 2.4, 5.4, 3.2)
edgeDataDefaults(g1, attr = "color") <- "cyan"
edgeDataDefaults(g1, attr = "type") <- "missing"
edgeData(g1, from = from, to = to ,attr = "color") <- c(myColor("red"),
myColor("blue"), NA, myColor("green"))
edgeData(g1, from = from, to = to , attr = "type") <- c(myType("high"),
myType("low"), myType("high"), NA)
## nodes a b c d x y z
from = c("a", "b", "b", "d", "d")
to = c("b", "c", "d", "c", "x")
weight=c(1.2, 4.2, 5.6, 2.1, 3.2)
df <- data.frame(from, to, weight)
g2 <- graphBAM(df, nodes = c("a","b","c", "d", "x", "y", "z"),
edgemode = "directed")
edgeDataDefaults(g2, attr = "color") <- "cyan"
edgeData(g2, from = from, to = to, attr = "color") <- c(myColor("red"),
myColor("blue"), NA, myColor("red"), myColor("yellow"))
g <- graphUnion(g1, g2)
df <- extractFromTo(g)
tmp <- data.frame( from = c("a", "b", "d", "b", "d", "d"),
to = c("b", "c", "c", "d", "x", "y"),
weight = c(1.2, NA, 2.1, 5.6, 3.2, 5.4))
checkEquals(tmp, df)
attColor <- edgeData(g, attr = "color")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c(myColor("red"), myColor("blue"), myColor("red"), NA, NA, NA), names = nms)
checkEquals(target, unlist(attColor))
attType <- edgeData(g, attr = "type")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c(myType("high"), myType("low"), NA, NA, NA, myType("high")), names = nms)
checkEquals(target, unlist(attType))
weightFun <- function(x, y) {
return(x + y )
}
colorFun <- function(x,y) {
if(x@col =="red" || y@col == "red")
return("white")
else
return("black")
}
g <- graphUnion(g1, g2, edgeFun = list(weight = weightFun, color = colorFun))
attWeight <- edgeData(g, attr = "weight")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c( 2.4, 6.6, 2.1, 5.6, 6.4, 5.4), names = nms)
checkEquals(target, unlist(attWeight))
attColor <- edgeData(g, attr = "color")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure(c( "white", "black", myColor("red"), NA, "black", NA), names = nms)
checkEquals( target, unlist(attColor))
attType <- edgeData(g, attr = "type")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure( c(myType("high"), myType("low"), NA, NA, NA, myType("high")), names = nms)
checkEquals(target, unlist(attType))
attType <- edgeData(g, attr = "type")
nms <- paste(c("a", "b", "d", "b", "d", "d"), c("b", "c", "c", "d", "x", "y"), sep = "|")
target <- structure(c( myType("high"), myType("low"), NA, NA, NA, myType("high")), names = nms)
checkEquals(target, unlist(attType))
}
test_graphBAM_addNode1 <- function(){
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(2.2, 2.0, 0.4, 0.2)
df <- data.frame(from, to, weight)
g <- graphBAM(df, edgemode = "directed")
nodeDataDefaults(g, attr="color") <- "pink"
nodeData(g, n = c("d","a"), attr = "color") <- c("red", "green")
nodeDataDefaults(g, attr="type") <- "unknown"
nodeData(g, n = c("a", "b", "y", "d"), attr = "type") <- c("high", "med", "high", "low")
gr <- addNode(c("q", "ss"), g)
current <- nodeData(gr, attr = "color")
target <- as.list(structure( c("green", "pink", "pink", "red", "pink", "pink", "pink", "pink"),
names = c("a", "b", "c", "d", "q", "ss", "x", "y")))
checkEquals(target, current)
current <- nodeData(gr, attr = "type")
target <- as.list(structure( c("high", "med", "unknown", "low", "unknown",
"unknown", "unknown", "high"),
names = c("a", "b", "c", "d", "q", "ss", "x", "y")))
checkEquals(target, current)
}
test_graphBAM_addNode2 <- function(){
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(2.2, 2.0, 0.4, 0.2)
df <- data.frame(from, to, weight)
g <- graphBAM(df, edgemode = "directed")
edgeDataDefaults(g, attr="color") <- "blue"
edgeDataDefaults(g, attr="type") <- "unknown"
edgeData(g, from = c("d","a"), to = c("y", "b"), attr = "color") <- c("red", "green")
edgeData(g, from = c("a", "b"), to = c("b", "c") , attr = "type") <- c("low", "high")
g1 <- addEdge(from = c("d", "b"), to = c("c", "x"), g, weights = c(4.0, 10.0))
current <- edgeData(g1, attr ="weight")
lbl <- paste(c("a", "b", "d", "b", "d", "d"), c( "b", "c", "c", "x", "x", "y") , sep ="|")
target <- as.list( structure(c(2.2, 2, 4, 10, 0.2, 0.4), names = lbl))
checkEquals(target, current)
current <- edgeData(g1, attr ="color")
lbl <- paste(c("a", "b", "d", "b", "d", "d"),
c( "b", "c", "c", "x", "x", "y"), sep ="|")
target <- as.list( structure(c("green", "blue", "blue", "blue", "blue", "red"),
names = lbl))
checkEquals(target, current)
current <- edgeData(g1, attr ="type")
lbl <- paste(c("a", "b", "d", "b", "d", "d"),
c( "b", "c", "c", "x", "x", "y") , sep ="|")
target <- as.list( structure(c("low", "high", "unknown", "unknown", "unknown", "unknown"),
names = lbl))
checkEquals(target, current)
}
test_graphBAM_nodeUnion_Attributes <- function(use.factors=TRUE){
setClass("myType", representation = representation(typ ="character"))
myType <- function(typ){ new("myType", typ = typ)}
testFun <- function(x,y) {
if(is(x, "myType") && is(y, "myType")){
if(x@typ =="aa" || y@typ == "ac")
return("ax")
else
return("ab")
} else return(as.character(NA))
}
funList <- structure(list(testFun), names ="gene")
ft1 <- data.frame(from=c("a", "a", "a", "b", "b"),
to =c("b", "c", "d", "a", "d"),
weight=c(1, 3.1, 5.4, 1, 2.2),
stringsAsFactors = use.factors)
g1 <- graphBAM(ft1, edgemode="directed")
nodeDataDefaults(g1, attr="color") <- "cyan"
nodeDataDefaults(g1, attr="type") <- "missing"
nodeDataDefaults(g1, attr="kp") <- "missing"
nodeDataDefaults(g1, attr="gene") <- "unknown"
nodeData(g1, n = c("a", "b", "c") , attr = "color") <- c("red", "green", "blue")
nodeData(g1, n = c("a", "b"), attr = "type") <- c("low", "high")
nodeData(g1, n = c("a", "b"), attr = "kp") <- c("kplow", "kphigh")
nodeData(g1, n = c("a", "b"), attr = "gene") <- c(myType("aa"), myType("bt"))
ft1 <- data.frame(from=c("a", "a", "b"),
to=c("b", "x", "z"),
weight=c(6, 5, 2),
stringsAsFactors = use.factors)
g2 <- graphBAM(ft1,nodes = c("a","b", "c", "d", "x", "y", "z"), edgemode = "directed")
nodeDataDefaults(g2, attr ="color") <- "cyan"
nodeDataDefaults(g2, attr="type") <- "missing"
nodeDataDefaults(g2, attr="gene") <- "unknown"
nodeData(g2, n = c("a", "b", "x", "y", "z") , attr = "color") <- c("red", "red", "green", "pink", "yellow")
nodeData(g2, n = c("a", "b"), attr = "type") <- c("low", "high")
nodeData(g2, n = c("a", "b"), attr = "gene") <- c(myType("at"), myType("kt"))
res <- graphUnion(g1, g2, nodeFun = funList)
current <- nodeData(res, attr = "color")
cn <- as.character(NA)
target <- as.list( structure(c("red", cn, cn, "cyan", "green", "pink", "yellow"),
names = c("a", "b", "c", "d", "x", "y", "z")))
checkEquals(target, current)
current <- nodeData(res, attr = "type")
target <- as.list( structure(c("low", "high", "missing", "missing", "missing", "missing", "missing"),
names = c("a", "b", "c", "d", "x", "y", "z")))
checkEquals(target, current)
current <- nodeData(res, attr = "kp")
target <- as.list( structure(c("kplow", "kphigh", "missing", "missing", "missing",
"missing", "missing"),
names = c("a", "b", "c", "d", "x", "y", "z")))
checkEquals(target, current)
current <- nodeData(res, n = c("a", "b", "c", "d"), attr ="gene")
target <- as.list( structure(c("ax", "ab", cn ,cn), names = c("a", "b", "c", "d")))
checkEquals(target, current)
current <- nodeData(res, n= c( "x", "y", "z"), attr ="gene")
target <- as.list( structure(c("unknown","unknown", "unknown"),
names = c("x", "y", "z")))
checkEquals(target, current)
}
test_graphBAM_removeNode <- function(){
from = c("a", "b", "d", "d")
to = c("b", "c", "y", "x")
weight=c(2.2, 2.0, 0.4, 0.2)
df <- data.frame(from, to, weight)
g <- graphBAM(df, edgemode = "directed")
nodeDataDefaults(g, attr="name") <- "NN"
nodeData(g, n = c("a","b", "c", "d", "x", "y"), attr = "name") <-
c("a", "b", "c", "d", "x", "y")
edgeDataDefaults(g, attr="name") <- "EE"
edgeData(g, from = from, to = to , attr = "name") <- paste(from, to , sep= "")
res <- removeNode(c("x","b"), g)
current <- nodeData(res, attr = "name")
target <- as.list(structure( c("a", "c", "d", "y"), names = c("a", "c",
"d", "y")))
checkEquals(target, current)
current <- edgeData(res, attr = "name")
target <- as.list(structure( "dy", names = paste("d", "y", sep = "|")))
checkEquals(current, target)
res <- removeNode(c("x", "a"), g)
current <- edgeData(res, attr = "name")
target <- as.list(structure( c("bc", "dy"), names = paste(c("b", "d"),
c("c","y"), sep = "|")))
checkEquals(target, current)
}
|
rm(list=ls())
########################################
## packages
########################################
library(ggplot2)
library(dplyr)
library(reshape2)
# library(nlme)
########################################
## directories
########################################
wd <- "C:\\merrill\\status_priors"
fig_dir <- file.path(wd, "figures")
dir.create(fig_dir, showWarnings=FALSE)
data_dir <- file.path(wd, "data")
########################################
## figure theme
########################################
theme_lsd <- function (base_size = 14, base_family = "")
{
theme_grey(base_size = base_size, base_family = base_family) %+replace%
theme(axis.title.x = element_text(margin = margin(10,0,0,0)),
#axis.title.x = element_text(vjust = -1.5),
#axis.title.y = element_text(margin = margin(0,20,0,0)),
#axis.title.y = element_text(vjust = -0.1),
axis.text = element_text(size = rel(0.8)),
axis.ticks = element_line(colour = "black"),
legend.key = element_rect(colour = "grey80"),
panel.background = element_rect(fill = "white", colour = NA),
panel.border = element_rect(fill = NA, colour = "grey50"),
panel.grid.major = element_line(colour = "grey90", size = 0.2),
panel.grid.minor = element_line(colour = "grey98", size = 0.5),
strip.background = element_rect(fill = "grey80", colour = "grey50", size = 0.2))
}
########################################
## combine information
########################################
timeseries <- read.csv(file.path(data_dir, "RLSADB_v4.25_timeseries.csv"), stringsAsFactors=FALSE, header=TRUE)
timeseries <- melt(timeseries, measure.vars=c("BdivBmsypref", "UdivUmsypref"))
timeseries <- timeseries[-which(is.na(timeseries$value)),]
stockinfo <- read.csv(file.path(data_dir, "RLSADB_v4.25_stockinfo.csv"), stringsAsFactors=FALSE, header=TRUE)
colnames(stockinfo)[1] <- "stockid"
stockinfo <- stockinfo %>% dplyr::select("stockid", "region", "scientificname")
stockinfo <- stockinfo[which(stockinfo$stockid %in% timeseries$stockid),]
bioparams <- read.csv(file.path(data_dir, "RLSADB_v4.25_bioparams.csv"), stringsAsFactors=FALSE)
bioparams <- bioparams[which(bioparams$stockid %in% timeseries$stockid),] %>%
dplyr::mutate(M = as.numeric(M))
taxonomy <- read.csv(file.path(data_dir, "RLSADB_v4.25_taxonomy.csv"), stringsAsFactors=FALSE)
taxonomy <- taxonomy[which(taxonomy$scientificname %in% stockinfo$scientificname),]
data <- timeseries %>%
dplyr::select(stockid, stocklong, year, variable, value) %>%
dplyr::mutate(variable = ifelse(variable=="BdivBmsypref","B/Bmsy", ifelse(variable=="UdivUmsypref","U/Umsy",NA))) %>%
dplyr::full_join(stockinfo, by="stockid") %>%
dplyr::full_join(bioparams %>% dplyr::select(stockid, M), by="stockid") %>%
dplyr::full_join(taxonomy %>% dplyr::select(scientificname, FisheryType, taxGroup), by="scientificname") %>%
dplyr::mutate(value = ifelse(value > 10, 10, value)) %>%
na.omit()
data$region <- as.factor(data$region)
data$FisheryType <- as.factor(data$FisheryType)
data$taxGroup <- as.factor(data$taxGroup)
region_vec <- unique(data$region)
fishtype_vec <- unique(data$FisheryType)
taxgroup_vec <- unique(data$taxGroup)
data <- data %>%
dplyr::mutate(region2 = as.factor(match(region, region_vec))) %>%
dplyr::mutate(FisheryType2 = as.factor(match(FisheryType, fishtype_vec))) %>%
dplyr::mutate(taxGroup2 = as.factor(match(taxGroup, taxgroup_vec))) %>%
dplyr::mutate(logvalue = log(value)) %>%
dplyr::mutate(logM = log(M))
## B/Bmsy averaged across last 5 years
bdata <- data %>% filter(variable == 'B/Bmsy')
bstocks <- unique(bdata$stockid)
bdata_sum <- lapply(1:length(bstocks), function(x){
sub <- bdata %>% filter(stockid == bstocks[x])
yr <- sub$year
if(length(yr)>=5) sub2 <- sub %>% filter(year %in% yr[(length(yr)-4):length(yr)])
if(length(yr)<5) sub2 <- sub
avg <- mean(sub2$value)
sub3 <- sub2 %>% mutate(value = avg) %>%
mutate(logvalue = log(avg)) %>%
select(-year)
return(unique(sub3))
})
sbdata <- do.call(rbind, bdata_sum)
## U/Umsy averaged across last 5 years
udata <- data %>% filter(variable == 'U/Umsy')
ustocks <- unique(udata$stockid)
udata_sum <- lapply(1:length(ustocks), function(x){
sub <- udata %>% filter(stockid == ustocks[x])
yr <- sub$year
if(length(yr)>=5) sub2 <- sub %>% filter(year %in% yr[(length(yr)-4):length(yr)])
if(length(yr)<5) sub2 <- sub
avg <- mean(sub2$value)
sub3 <- sub2 %>% mutate(value = avg) %>%
mutate(logvalue = log(avg)) %>%
select(-year)
return(unique(sub3))
})
sudata <- do.call(rbind, udata_sum)
sdata <- rbind(sbdata, sudata)
allstocks <- unique(sdata %>% select(stockid, M, logM, region, FisheryType, taxGroup))
#########################################
## plot the data against single factors
#########################################
## factors -- region of ocean, type of fish, natural mortality rate
## scatterplots
p <- ggplot(sdata) +
geom_point(aes(x=M, y=value)) +
stat_smooth(aes(x=M, y=value), method="lm") +
facet_grid(variable ~ ., scales="free") +
geom_hline(yintercept=1, color="black", lty=2) +
theme_lsd()
ggsave(file.path(fig_dir, "scatterplots_M.png"), p)
p <- ggplot(sdata) +
geom_point(aes(x=logM, y=logvalue)) +
stat_smooth(aes(x=logM, y=logvalue), method="lm") +
facet_grid(variable ~ ., scales="free") +
geom_hline(yintercept=0, color="black", lty=2) +
theme_lsd()
ggsave(file.path(fig_dir, "scatterplots_logM.png"), p)
## boxplots
p <- ggplot(sdata) +
geom_boxplot(aes(x=variable, y=value, color=variable, fill=variable)) +
geom_hline(yintercept=1, color="black", lty=2) +
guides(fill=FALSE, color=FALSE) +
theme_lsd()
ggsave(file.path(fig_dir, "violin_all.png"), p)
p <- ggplot(sdata) +
geom_boxplot(aes(x=variable, y=logvalue, color=variable, fill=variable)) +
geom_hline(yintercept=0, color="black", lty=2) +
guides(fill=FALSE, color=FALSE) +
theme_lsd()
ggsave(file.path(fig_dir, "violin_logall.png"), p)
p <- ggplot(sdata) +
geom_boxplot(aes(x=region, y=value, color=region, fill=region)) +
facet_grid(variable ~ ., scales="free") +
geom_hline(yintercept=1, color="black", lty=2) +
theme_lsd() +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "violin_region.png"), p, width=10)
p <- ggplot(sdata) +
geom_boxplot(aes(x=region, y=logvalue, color=region, fill=region)) +
facet_grid(variable ~ ., scales="free") +
geom_hline(yintercept=0, color="black", lty=2) +
theme_lsd() +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "violin_logregion.png"), p, width=10)
p <- ggplot(sdata) +
geom_boxplot(aes(x=FisheryType, y=value, color=FisheryType, fill=FisheryType)) +
facet_grid(variable ~ ., scales="free") +
geom_hline(yintercept=1, color="black", lty=2) +
theme_lsd() +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "violin_fishtype.png"), p, width=10)
p <- ggplot(sdata) +
geom_boxplot(aes(x=FisheryType, y=logvalue, color=FisheryType, fill=FisheryType)) +
facet_grid(variable ~ ., scales="free") +
geom_hline(yintercept=0, color="black", lty=2) +
theme_lsd() +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "violin_logfishtype.png"), p, width=10)
## correlation between factors
p <- ggplot(allstocks) +
geom_boxplot(aes(x=region, y=M, color=region, fill=region)) +
theme_lsd() +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "region_by_M.png"), p, width=10)
p <- ggplot(allstocks) +
geom_boxplot(aes(x=region, y=logM, color=region, fill=region)) +
theme_lsd() +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "region_by_M.png"), p, width=10)
p <- ggplot(allstocks) +
geom_boxplot(aes(x=FisheryType, y=M, color=FisheryType, fill=FisheryType)) +
theme_lsd() +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "fishtype_by_M.png"), p, width=10)
p <- ggplot(allstocks) +
geom_boxplot(aes(x=FisheryType, y=logM, color=FisheryType, fill=FisheryType)) +
theme_lsd() +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "fishtype_by_logM.png"), p, width=10)
png(file.path(fig_dir, "factor_correlation_logM.png"), height=8, width=10, units="in", res=200)
pairs(data.frame("logM"=allstocks$logM, "FishType"=allstocks$FisheryType, "Region"=allstocks$region))
dev.off()
png(file.path(fig_dir, "factor_correlation.png"), height=8, width=10, units="in", res=200)
pairs(data.frame("logM"=allstocks$M, "FishType"=allstocks$FisheryType, "Region"=allstocks$region))
dev.off()
#########################################
## regression
#########################################
## U/UMSY
u1 <- lm(logvalue ~ logM, data=sudata)
summary(u1)
plot(sudata$value, col="gray", lwd=2)
lines(fitted(u1))
boxplot(resid(u1))
u2 <- lm(logvalue ~ region2, data=sudata)
summary(u2)
plot(sudata$logvalue, col="gray", lwd=2)
lines(fitted(u2))
u3 <- lm(logvalue ~ FisheryType2, data=sudata)
summary(u3)
plot(sudata$logvalue, col="gray", lwd=2)
lines(fitted(u3))
boxplot(resid(u3))
u4 <- lm(logvalue ~ taxGroup2, data=sudata)
summary(u4)
u5 <- lm(logvalue ~ region2 + M, data=sudata)
summary(u5)
plot(sudata$logvalue, col="gray", lwd=2)
lines(fitted(u5))
boxplot(resid(u5))
u6 <- lm(logvalue ~ FisheryType2 + M, data=sudata)
summary(u6)
plot(sudata$logvalue, col="gray", lwd=2)
lines(fitted(u6))
boxplot(resid(u6))
u7 <- lm(logvalue ~ taxGroup2 + M, data=sudata)
summary(u7)
u8 <- lm(logvalue ~ region2 + FisheryType2, data=sudata)
summary(u8)
plot(sudata$logvalue, col="gray", lwd=2)
lines(fitted(u8))
u9 <- lm(logvalue ~ region2 + taxGroup2, data=sudata)
summary(u9)
u10 <- lm(logvalue ~ M + region2 + FisheryType2, data=sudata)
summary(u10)
plot(sudata$logvalue, col="gray", lwd=2)
lines(fitted(u10))
u11 <- lm(logvalue ~ M + region2 + taxGroup2, data=sudata)
summary(u11)
umods <- list("M"=u1, "region"=u2, "fishtype"=u3, "taxagroup"=u4,
"region_M"=u5, "fishtype_M"=u6, "taxagroup_M"=u7,
"region_fishtype"=u8, "region_taxagroup"=u9,
"all_fishtype"=u10, "all_taxagroup"=u11)
ur2 <- sapply(1:length(umods), function(x) summary(umods[[x]])$adj.r.squared)
aicu <- AIC(u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11) %>%
dplyr::mutate(deltaAIC = AIC - min(AIC))
aicu1 <- AIC(u1, u2, u3, u4) %>%
dplyr::mutate(deltaAIC = AIC - min(AIC))
aicu$R2 <- ur2
aicu$ModelName <- names(umods)
aicu1$ModelName <- names(umods)[1:4]
write.csv(aicu, file.path(fig_dir, "UUmsy_AIC.csv"))
### B/Bmsy
b1 <- lm(logvalue ~ logM, data=sbdata)
summary(b1)
plot(sbdata$value, col="gray", lwd=2)
lines(fitted(b1))
boxplot(resid(b1))
b2 <- lm(logvalue ~ region2, data=sbdata)
summary(b2)
plot(sbdata$logvalue, col="gray", lwd=2)
lines(fitted(b2))
b3 <- lm(logvalue ~ FisheryType2, data=sbdata)
summary(b3)
plot(sbdata$logvalue, col="gray", lwd=2)
lines(fitted(b3))
boxplot(resid(b3))
b4 <- lm(logvalue ~ taxGroup2, data=sbdata)
summary(b4)
b5 <- lm(logvalue ~ region2 + M, data=sbdata)
summary(b5)
plot(sbdata$logvalue, col="gray", lwd=2)
lines(fitted(b5))
boxplot(resid(b5))
b6 <- lm(logvalue ~ FisheryType2 + M, data=sbdata)
summary(b6)
plot(sbdata$logvalue, col="gray", lwd=2)
lines(fitted(b6))
boxplot(resid(b6))
b7 <- lm(logvalue ~ taxGroup2 + M, data=sbdata)
summary(b7)
b8 <- lm(logvalue ~ region2 + FisheryType2, data=sbdata)
summary(b8)
plot(sbdata$logvalue, col="gray", lwd=2)
lines(fitted(b8))
b9 <- lm(logvalue ~ region2 + taxGroup2, data=sbdata)
summary(b9)
b10 <- lm(logvalue ~ M + region2 + FisheryType2, data=sbdata)
summary(b10)
plot(sbdata$logvalue, col="gray", lwd=2)
lines(fitted(b10))
b11 <- lm(logvalue ~ M + region2 + taxGroup2, data=sbdata)
summary(b11)
bmods <- list("M"=b1, "region"=b2, "fishtype"=b3, "taxagroup"=b4,
"region_M"=b5, "fishtype_M"=b6, "taxagroup_M"=b7,
"region_fishtype"=b8, "region_taxagroup"=b9,
"all_fishtype"=b10, "all_taxagroup"=b11)
br2 <- sapply(1:length(bmods), function(x) summary(bmods[[x]])$adj.r.squared)
aicb <- AIC(b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11) %>%
dplyr::mutate(deltaAIC = AIC - min(AIC))
aicb1 <- AIC(b1, b2, b3, b4) %>%
dplyr::mutate(deltaAIC = AIC - min(AIC))
aicb$R2 <- br2
aicb$ModelName <- names(bmods)
aicb1$ModelName <- names(bmods)[1:4]
write.csv(aicb, file.path(fig_dir, "UUmsy_AIC.csv"))
############################
## diagnostic plots
############################
png(file.path(fig_dir, "diagnostic_BM.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(b1)
dev.off()
png(file.path(fig_dir, "diagnostic_Bregion.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(b2)
dev.off()
png(file.path(fig_dir, "diagnostic_Bfishtype.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(b3)
dev.off()
png(file.path(fig_dir, "diagnostic_B_all_fishtype.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(b10)
dev.off()
png(file.path(fig_dir, "diagnostic_UM.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(u1)
dev.off()
png(file.path(fig_dir, "diagnostic_Uregion.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(u2)
dev.off()
png(file.path(fig_dir, "diagnostic_Ufishtype.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(u3)
dev.off()
png(file.path(fig_dir, "diagnostic_U_fishtype_M.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(u6)
dev.off()
png(file.path(fig_dir, "diagnostic_U_all_fishtype.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(u10)
dev.off()
p <- ggplot(sdata, aes(x=logM, y=logvalue , colour=factor(region))) +
geom_point() +
stat_smooth(method=lm, fullrange=FALSE) +
facet_grid(variable ~ .)
ggsave(file.path(fig_dir, "logM_logvalue_byregion_lm.png"), p, width=10)
############################
## residual plots
############################
### natural mortality
bres1 <- data.frame(variable="B/BMSY", value=b1$residuals)
ures1 <- data.frame(variable="U/UMSY", value=u1$residuals)
mres1 <- rbind(bres1, ures1)
p <- ggplot(mres1) +
geom_violin(aes(x=variable, y=value, color=variable, fill=variable)) +
theme_lsd() +
guides(color=FALSE, fill=FALSE) +
geom_hline(yintercept=0, color="black", lty=2) +
xlab("") + ylab("Residuals")
ggsave(file.path(fig_dir, "M_residuals.png"), p, width=6)
### region
b2 <- fortify(b2)
u2 <- fortify(u2)
bres2 <- data.frame(variable="B/BMSY", value=b2$.resid, region=b2$region)
ures2 <- data.frame(variable="U/UMSY", value=u2$.resid, region=u2$region)
mres2 <- rbind(bres2, ures2)
p <- ggplot(mres2) +
geom_violin(aes(x=region, y=value, color=region, fill=region)) +
facet_grid(variable ~ .) +
theme_lsd() +
geom_hline(yintercept=0, color="black", lty=2) +
ylab("Residuals") +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "region_residuals.png"), p, width=10)
### fish type
b3 <- fortify(b3)
u3 <- fortify(u3)
bres3 <- data.frame(variable="B/BMSY", value=b3$.resid, FisheryType=b3$FisheryType)
ures3 <- data.frame(variable="U/UMSY", value=u3$.resid, FisheryType=u3$FisheryType)
mres3 <- rbind(bres3, ures3)
p <- ggplot(mres3) +
geom_violin(aes(x=FisheryType, y=value, color=FisheryType, fill=FisheryType)) +
facet_grid(variable ~ .) +
theme_lsd() +
geom_hline(yintercept=0, color="black", lty=2) +
ylab("Residuals") +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "fishtype_residuals.png"), p, width=10)
############################
## cross-validation
############################
# ## randomly select half the stocks
bchoose_raw <- sample(1:length(bstocks), size=length(bstocks)/2)
bchoose <- bchoose_raw[order(bchoose_raw)]
bdata_train <- sbdata %>% dplyr::filter(stockid %in% bstocks[bchoose])
bregions_choose <- unique(bdata_train$region)
bfishtype_choose <- unique(bdata_train$FisheryType)
btaxatype_choose <- unique(bdata_train$taxGroup)
bdata_test <- sbdata %>% dplyr::filter(stockid %in% bstocks[bchoose] == FALSE) %>%
dplyr::filter(region %in% bregions_choose) %>%
dplyr::filter(FisheryType %in% bfishtype_choose) %>%
dplyr::filter(taxGroup %in% btaxatype_choose)
### run models with training data
cb1 <- lm(logvalue ~ logM, data=bdata_train)
summary(cb1)
cb2 <- lm(logvalue ~ region2, data=bdata_train)
summary(cb2)
cb3 <- lm(logvalue ~ FisheryType2, data=bdata_train)
summary(cb3)
cb4 <- lm(logvalue ~ taxGroup2, data=bdata_train)
summary(cb4)
cb5 <- lm(logvalue ~ region2 + logM, data=bdata_train)
summary(cb5)
cb6 <- lm(logvalue ~ FisheryType2 + logM, data=bdata_train)
summary(cb6)
cb7 <- lm(logvalue ~ taxGroup2 + logM, data=bdata_train)
summary(cb7)
cb8 <- lm(logvalue ~ region2 + FisheryType2, data=bdata_train)
summary(cb8)
cb9 <- lm(logvalue ~ region2 + taxGroup2, data=bdata_train)
summary(cb9)
cb10 <- lm(logvalue ~ logM + region2 + FisheryType2, data=bdata_train)
summary(cb10)
cb11 <- lm(logvalue ~ logM + region2 + taxGroup2, data=bdata_train)
summary(cb11)
cbmods <- list("M"=cb1, "region"=cb2, "fishtype"=cb3, "taxagroup"=cb4,
"region_M"=cb5, "fishtype_M"=cb6, "taxagroup_M"=cb7,
"region_fishtype"=cb8, "region_taxagroup"=cb9,
"all_fishtype"=cb10, "all_taxagroup"=cb11)
cbr2 <- sapply(1:length(cbmods), function(x) summary(cbmods[[x]])$adj.r.squared)
aicbc <- AIC(cb1, cb2, cb3, cb4, cb5, cb6, cb7, cb8, cb9, cb10, cb11) %>%
dplyr::mutate(deltaAIC = AIC - min(AIC))
aicbc1 <- AIC(cb1, cb2, cb3, cb4) %>%
dplyr::mutate(deltaAIC = AIC - min(AIC))
aicbc$R2 <- cbr2
aicbc$ModelName <- names(cbmods)
write.csv(aicb, file.path(fig_dir, "cross_BBmsy_AIC.csv"))
# ## randomly select half the stocks
uchoose_raw <- sample(1:length(ustocks), size=length(ustocks)/2)
uchoose <- uchoose_raw[order(uchoose_raw)]
udata_train <- sudata %>% dplyr::filter(stockid %in% ustocks[uchoose])
uregions_choose <- unique(udata_train$region)
ufishtype_choose <- unique(udata_train$FisheryType)
utaxatype_choose <- unique(udata_train$taxGroup)
udata_test <- sudata %>% dplyr::filter(stockid %in% ustocks[uchoose] == FALSE) %>%
dplyr::filter(region %in% uregions_choose) %>%
dplyr::filter(FisheryType %in% ufishtype_choose) %>%
dplyr::filter(taxGroup %in% utaxatype_choose)
### run models with training data
cu1 <- lm(logvalue ~ logM, data=bdata_train)
summary(cu1)
cu2 <- lm(logvalue ~ region2, data=bdata_train)
summary(cu2)
cu3 <- lm(logvalue ~ FisheryType2, data=bdata_train)
summary(cu3)
cu4 <- lm(logvalue ~ taxGroup2, data=bdata_train)
summary(cu4)
cu5 <- lm(logvalue ~ region2 + logM, data=bdata_train)
summary(cu5)
cu6 <- lm(logvalue ~ FisheryType2 + logM, data=bdata_train)
summary(cu6)
cu7 <- lm(logvalue ~ taxGroup2 + logM, data=bdata_train)
summary(cu7)
cu8 <- lm(logvalue ~ region2 + FisheryType2, data=bdata_train)
summary(cu8)
cu9 <- lm(logvalue ~ region2 + taxGroup2, data=bdata_train)
summary(cu9)
cu10 <- lm(logvalue ~ logM + region2 + FisheryType2, data=bdata_train)
summary(cu10)
cu11 <- lm(logvalue ~ logM + region2 + taxGroup2, data=bdata_train)
summary(cu11)
cumods <- list("M"=cu1, "region"=cu2, "fishtype"=cu3, "taxagroup"=cu4,
"region_M"=cu5, "fishtype_M"=cu6, "taxagroup_M"=cu7,
"region_fishtype"=cu8, "region_taxagroup"=cu9,
"all_fishtype"=cu10, "all_taxagroup"=cu11)
cur2 <- sapply(1:length(cumods), function(x) summary(cumods[[x]])$adj.r.squared)
aicuc <- AIC(cu1, cu2, cu3, cu4, cu5, cu6, cu7, cu8, cu9, cu10, cu11) %>%
dplyr::mutate(deltaAIC = AIC - min(AIC))
aicuc1 <- AIC(cu1, cu2, cu3, cu4) %>%
dplyr::mutate(deltaAIC = AIC - min(AIC))
aicuc$R2 <- cur2
aicuc$ModelName <- names(cumods)
write.csv(aicu, file.path(fig_dir, "cross_BBmsy_AIC.csv"))
png(file.path(fig_dir, "cross_val_B.png"), height=8, width=10, res=200, units="in")
pred <- predict(cb10, bdata_test, se.fit=TRUE)
val <- exp(pred$fit)
low <- exp(pred$fit - 1.96 * pred$se.fit)
up <- exp(pred$fit + 1.96 * pred$se.fit)
plot(val, col="black", pch=19, ylim=c(0, max(up)), xlab="Stock", ylab="B/Bmsy")
segments(x0=1:length(val), x1=1:length(val), y0=low, y1=up, col="black")
obs <- bdata_test$value
within <- sapply(1:length(obs), function(x){
ifelse(obs[x] >= low[x] & obs[x] <= up[x], 19, 1)
})
col <- sapply(1:length(obs), function(x){
ifelse(obs[x] >= 1 & val[x] >=1, "blue", ifelse(obs[x] < 1 & val[x] < 1, "blue", "red"))
})
points(bdata_test$val, pch=within, col=col)
length(which(within==19))/length(within)
length(which(col=="red"))/length(col)
length(which(within==1 & col=="red"))/length(within)
dev.off()
png(file.path(fig_dir, "cross_val_U.png"), height=8, width=10, res=200, units="in")
pred <- predict(cu10, udata_test, se.fit=TRUE)
val <- exp(pred$fit)
low <- exp(pred$fit - 1.96 * pred$se.fit)
up <- exp(pred$fit + 1.96 * pred$se.fit)
plot(val, col="black", pch=19, ylim=c(0, max(up)), xlab="Stock", ylab="B/Bmsy")
segments(x0=1:length(val), x1=1:length(val), y0=low, y1=up, col="black")
obs <- udata_test$value
within <- sapply(1:length(obs), function(x){
ifelse(obs[x] >= low[x] & obs[x] <= up[x], 19, 1)
})
col <- sapply(1:length(obs), function(x){
ifelse(obs[x] >= 1 & val[x] >=1, "blue", ifelse(obs[x] < 1 & val[x] < 1, "blue", "red"))
})
points(bdata_test$val, pch=within, col=col)
length(which(within==19))/length(within)
length(which(col=="red"))/length(col)
length(which(within==1 & col=="red"))/length(within)
dev.off()
|
/life_history.R
|
no_license
|
merrillrudd/status_priors
|
R
| false
| false
| 21,254
|
r
|
rm(list=ls())
########################################
## packages
########################################
library(ggplot2)
library(dplyr)
library(reshape2)
# library(nlme)
########################################
## directories
########################################
wd <- "C:\\merrill\\status_priors"
fig_dir <- file.path(wd, "figures")
dir.create(fig_dir, showWarnings=FALSE)
data_dir <- file.path(wd, "data")
########################################
## figure theme
########################################
theme_lsd <- function (base_size = 14, base_family = "")
{
theme_grey(base_size = base_size, base_family = base_family) %+replace%
theme(axis.title.x = element_text(margin = margin(10,0,0,0)),
#axis.title.x = element_text(vjust = -1.5),
#axis.title.y = element_text(margin = margin(0,20,0,0)),
#axis.title.y = element_text(vjust = -0.1),
axis.text = element_text(size = rel(0.8)),
axis.ticks = element_line(colour = "black"),
legend.key = element_rect(colour = "grey80"),
panel.background = element_rect(fill = "white", colour = NA),
panel.border = element_rect(fill = NA, colour = "grey50"),
panel.grid.major = element_line(colour = "grey90", size = 0.2),
panel.grid.minor = element_line(colour = "grey98", size = 0.5),
strip.background = element_rect(fill = "grey80", colour = "grey50", size = 0.2))
}
########################################
## combine information
########################################
timeseries <- read.csv(file.path(data_dir, "RLSADB_v4.25_timeseries.csv"), stringsAsFactors=FALSE, header=TRUE)
timeseries <- melt(timeseries, measure.vars=c("BdivBmsypref", "UdivUmsypref"))
timeseries <- timeseries[-which(is.na(timeseries$value)),]
stockinfo <- read.csv(file.path(data_dir, "RLSADB_v4.25_stockinfo.csv"), stringsAsFactors=FALSE, header=TRUE)
colnames(stockinfo)[1] <- "stockid"
stockinfo <- stockinfo %>% dplyr::select("stockid", "region", "scientificname")
stockinfo <- stockinfo[which(stockinfo$stockid %in% timeseries$stockid),]
bioparams <- read.csv(file.path(data_dir, "RLSADB_v4.25_bioparams.csv"), stringsAsFactors=FALSE)
bioparams <- bioparams[which(bioparams$stockid %in% timeseries$stockid),] %>%
dplyr::mutate(M = as.numeric(M))
taxonomy <- read.csv(file.path(data_dir, "RLSADB_v4.25_taxonomy.csv"), stringsAsFactors=FALSE)
taxonomy <- taxonomy[which(taxonomy$scientificname %in% stockinfo$scientificname),]
data <- timeseries %>%
dplyr::select(stockid, stocklong, year, variable, value) %>%
dplyr::mutate(variable = ifelse(variable=="BdivBmsypref","B/Bmsy", ifelse(variable=="UdivUmsypref","U/Umsy",NA))) %>%
dplyr::full_join(stockinfo, by="stockid") %>%
dplyr::full_join(bioparams %>% dplyr::select(stockid, M), by="stockid") %>%
dplyr::full_join(taxonomy %>% dplyr::select(scientificname, FisheryType, taxGroup), by="scientificname") %>%
dplyr::mutate(value = ifelse(value > 10, 10, value)) %>%
na.omit()
data$region <- as.factor(data$region)
data$FisheryType <- as.factor(data$FisheryType)
data$taxGroup <- as.factor(data$taxGroup)
region_vec <- unique(data$region)
fishtype_vec <- unique(data$FisheryType)
taxgroup_vec <- unique(data$taxGroup)
data <- data %>%
dplyr::mutate(region2 = as.factor(match(region, region_vec))) %>%
dplyr::mutate(FisheryType2 = as.factor(match(FisheryType, fishtype_vec))) %>%
dplyr::mutate(taxGroup2 = as.factor(match(taxGroup, taxgroup_vec))) %>%
dplyr::mutate(logvalue = log(value)) %>%
dplyr::mutate(logM = log(M))
## B/Bmsy averaged across last 5 years
bdata <- data %>% filter(variable == 'B/Bmsy')
bstocks <- unique(bdata$stockid)
bdata_sum <- lapply(1:length(bstocks), function(x){
sub <- bdata %>% filter(stockid == bstocks[x])
yr <- sub$year
if(length(yr)>=5) sub2 <- sub %>% filter(year %in% yr[(length(yr)-4):length(yr)])
if(length(yr)<5) sub2 <- sub
avg <- mean(sub2$value)
sub3 <- sub2 %>% mutate(value = avg) %>%
mutate(logvalue = log(avg)) %>%
select(-year)
return(unique(sub3))
})
sbdata <- do.call(rbind, bdata_sum)
## U/Umsy averaged across last 5 years
udata <- data %>% filter(variable == 'U/Umsy')
ustocks <- unique(udata$stockid)
udata_sum <- lapply(1:length(ustocks), function(x){
sub <- udata %>% filter(stockid == ustocks[x])
yr <- sub$year
if(length(yr)>=5) sub2 <- sub %>% filter(year %in% yr[(length(yr)-4):length(yr)])
if(length(yr)<5) sub2 <- sub
avg <- mean(sub2$value)
sub3 <- sub2 %>% mutate(value = avg) %>%
mutate(logvalue = log(avg)) %>%
select(-year)
return(unique(sub3))
})
sudata <- do.call(rbind, udata_sum)
sdata <- rbind(sbdata, sudata)
allstocks <- unique(sdata %>% select(stockid, M, logM, region, FisheryType, taxGroup))
#########################################
## plot the data against single factors
#########################################
## factors -- region of ocean, type of fish, natural mortality rate
## scatterplots
p <- ggplot(sdata) +
geom_point(aes(x=M, y=value)) +
stat_smooth(aes(x=M, y=value), method="lm") +
facet_grid(variable ~ ., scales="free") +
geom_hline(yintercept=1, color="black", lty=2) +
theme_lsd()
ggsave(file.path(fig_dir, "scatterplots_M.png"), p)
p <- ggplot(sdata) +
geom_point(aes(x=logM, y=logvalue)) +
stat_smooth(aes(x=logM, y=logvalue), method="lm") +
facet_grid(variable ~ ., scales="free") +
geom_hline(yintercept=0, color="black", lty=2) +
theme_lsd()
ggsave(file.path(fig_dir, "scatterplots_logM.png"), p)
## boxplots
p <- ggplot(sdata) +
geom_boxplot(aes(x=variable, y=value, color=variable, fill=variable)) +
geom_hline(yintercept=1, color="black", lty=2) +
guides(fill=FALSE, color=FALSE) +
theme_lsd()
ggsave(file.path(fig_dir, "violin_all.png"), p)
p <- ggplot(sdata) +
geom_boxplot(aes(x=variable, y=logvalue, color=variable, fill=variable)) +
geom_hline(yintercept=0, color="black", lty=2) +
guides(fill=FALSE, color=FALSE) +
theme_lsd()
ggsave(file.path(fig_dir, "violin_logall.png"), p)
p <- ggplot(sdata) +
geom_boxplot(aes(x=region, y=value, color=region, fill=region)) +
facet_grid(variable ~ ., scales="free") +
geom_hline(yintercept=1, color="black", lty=2) +
theme_lsd() +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "violin_region.png"), p, width=10)
p <- ggplot(sdata) +
geom_boxplot(aes(x=region, y=logvalue, color=region, fill=region)) +
facet_grid(variable ~ ., scales="free") +
geom_hline(yintercept=0, color="black", lty=2) +
theme_lsd() +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "violin_logregion.png"), p, width=10)
p <- ggplot(sdata) +
geom_boxplot(aes(x=FisheryType, y=value, color=FisheryType, fill=FisheryType)) +
facet_grid(variable ~ ., scales="free") +
geom_hline(yintercept=1, color="black", lty=2) +
theme_lsd() +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "violin_fishtype.png"), p, width=10)
p <- ggplot(sdata) +
geom_boxplot(aes(x=FisheryType, y=logvalue, color=FisheryType, fill=FisheryType)) +
facet_grid(variable ~ ., scales="free") +
geom_hline(yintercept=0, color="black", lty=2) +
theme_lsd() +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "violin_logfishtype.png"), p, width=10)
## correlation between factors
p <- ggplot(allstocks) +
geom_boxplot(aes(x=region, y=M, color=region, fill=region)) +
theme_lsd() +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "region_by_M.png"), p, width=10)
p <- ggplot(allstocks) +
geom_boxplot(aes(x=region, y=logM, color=region, fill=region)) +
theme_lsd() +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "region_by_M.png"), p, width=10)
p <- ggplot(allstocks) +
geom_boxplot(aes(x=FisheryType, y=M, color=FisheryType, fill=FisheryType)) +
theme_lsd() +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "fishtype_by_M.png"), p, width=10)
p <- ggplot(allstocks) +
geom_boxplot(aes(x=FisheryType, y=logM, color=FisheryType, fill=FisheryType)) +
theme_lsd() +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "fishtype_by_logM.png"), p, width=10)
png(file.path(fig_dir, "factor_correlation_logM.png"), height=8, width=10, units="in", res=200)
pairs(data.frame("logM"=allstocks$logM, "FishType"=allstocks$FisheryType, "Region"=allstocks$region))
dev.off()
png(file.path(fig_dir, "factor_correlation.png"), height=8, width=10, units="in", res=200)
pairs(data.frame("logM"=allstocks$M, "FishType"=allstocks$FisheryType, "Region"=allstocks$region))
dev.off()
#########################################
## regression
#########################################
## U/UMSY
u1 <- lm(logvalue ~ logM, data=sudata)
summary(u1)
plot(sudata$value, col="gray", lwd=2)
lines(fitted(u1))
boxplot(resid(u1))
u2 <- lm(logvalue ~ region2, data=sudata)
summary(u2)
plot(sudata$logvalue, col="gray", lwd=2)
lines(fitted(u2))
u3 <- lm(logvalue ~ FisheryType2, data=sudata)
summary(u3)
plot(sudata$logvalue, col="gray", lwd=2)
lines(fitted(u3))
boxplot(resid(u3))
u4 <- lm(logvalue ~ taxGroup2, data=sudata)
summary(u4)
u5 <- lm(logvalue ~ region2 + M, data=sudata)
summary(u5)
plot(sudata$logvalue, col="gray", lwd=2)
lines(fitted(u5))
boxplot(resid(u5))
u6 <- lm(logvalue ~ FisheryType2 + M, data=sudata)
summary(u6)
plot(sudata$logvalue, col="gray", lwd=2)
lines(fitted(u6))
boxplot(resid(u6))
u7 <- lm(logvalue ~ taxGroup2 + M, data=sudata)
summary(u7)
u8 <- lm(logvalue ~ region2 + FisheryType2, data=sudata)
summary(u8)
plot(sudata$logvalue, col="gray", lwd=2)
lines(fitted(u8))
u9 <- lm(logvalue ~ region2 + taxGroup2, data=sudata)
summary(u9)
u10 <- lm(logvalue ~ M + region2 + FisheryType2, data=sudata)
summary(u10)
plot(sudata$logvalue, col="gray", lwd=2)
lines(fitted(u10))
u11 <- lm(logvalue ~ M + region2 + taxGroup2, data=sudata)
summary(u11)
umods <- list("M"=u1, "region"=u2, "fishtype"=u3, "taxagroup"=u4,
"region_M"=u5, "fishtype_M"=u6, "taxagroup_M"=u7,
"region_fishtype"=u8, "region_taxagroup"=u9,
"all_fishtype"=u10, "all_taxagroup"=u11)
ur2 <- sapply(1:length(umods), function(x) summary(umods[[x]])$adj.r.squared)
aicu <- AIC(u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11) %>%
dplyr::mutate(deltaAIC = AIC - min(AIC))
aicu1 <- AIC(u1, u2, u3, u4) %>%
dplyr::mutate(deltaAIC = AIC - min(AIC))
aicu$R2 <- ur2
aicu$ModelName <- names(umods)
aicu1$ModelName <- names(umods)[1:4]
write.csv(aicu, file.path(fig_dir, "UUmsy_AIC.csv"))
### B/Bmsy
b1 <- lm(logvalue ~ logM, data=sbdata)
summary(b1)
plot(sbdata$value, col="gray", lwd=2)
lines(fitted(b1))
boxplot(resid(b1))
b2 <- lm(logvalue ~ region2, data=sbdata)
summary(b2)
plot(sbdata$logvalue, col="gray", lwd=2)
lines(fitted(b2))
b3 <- lm(logvalue ~ FisheryType2, data=sbdata)
summary(b3)
plot(sbdata$logvalue, col="gray", lwd=2)
lines(fitted(b3))
boxplot(resid(b3))
b4 <- lm(logvalue ~ taxGroup2, data=sbdata)
summary(b4)
b5 <- lm(logvalue ~ region2 + M, data=sbdata)
summary(b5)
plot(sbdata$logvalue, col="gray", lwd=2)
lines(fitted(b5))
boxplot(resid(b5))
b6 <- lm(logvalue ~ FisheryType2 + M, data=sbdata)
summary(b6)
plot(sbdata$logvalue, col="gray", lwd=2)
lines(fitted(b6))
boxplot(resid(b6))
b7 <- lm(logvalue ~ taxGroup2 + M, data=sbdata)
summary(b7)
b8 <- lm(logvalue ~ region2 + FisheryType2, data=sbdata)
summary(b8)
plot(sbdata$logvalue, col="gray", lwd=2)
lines(fitted(b8))
b9 <- lm(logvalue ~ region2 + taxGroup2, data=sbdata)
summary(b9)
b10 <- lm(logvalue ~ M + region2 + FisheryType2, data=sbdata)
summary(b10)
plot(sbdata$logvalue, col="gray", lwd=2)
lines(fitted(b10))
b11 <- lm(logvalue ~ M + region2 + taxGroup2, data=sbdata)
summary(b11)
bmods <- list("M"=b1, "region"=b2, "fishtype"=b3, "taxagroup"=b4,
"region_M"=b5, "fishtype_M"=b6, "taxagroup_M"=b7,
"region_fishtype"=b8, "region_taxagroup"=b9,
"all_fishtype"=b10, "all_taxagroup"=b11)
br2 <- sapply(1:length(bmods), function(x) summary(bmods[[x]])$adj.r.squared)
aicb <- AIC(b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11) %>%
dplyr::mutate(deltaAIC = AIC - min(AIC))
aicb1 <- AIC(b1, b2, b3, b4) %>%
dplyr::mutate(deltaAIC = AIC - min(AIC))
aicb$R2 <- br2
aicb$ModelName <- names(bmods)
aicb1$ModelName <- names(bmods)[1:4]
write.csv(aicb, file.path(fig_dir, "UUmsy_AIC.csv"))
############################
## diagnostic plots
############################
png(file.path(fig_dir, "diagnostic_BM.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(b1)
dev.off()
png(file.path(fig_dir, "diagnostic_Bregion.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(b2)
dev.off()
png(file.path(fig_dir, "diagnostic_Bfishtype.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(b3)
dev.off()
png(file.path(fig_dir, "diagnostic_B_all_fishtype.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(b10)
dev.off()
png(file.path(fig_dir, "diagnostic_UM.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(u1)
dev.off()
png(file.path(fig_dir, "diagnostic_Uregion.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(u2)
dev.off()
png(file.path(fig_dir, "diagnostic_Ufishtype.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(u3)
dev.off()
png(file.path(fig_dir, "diagnostic_U_fishtype_M.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(u6)
dev.off()
png(file.path(fig_dir, "diagnostic_U_all_fishtype.png"), height=6, width=8, units="in", res=200)
par(mfrow=c(2,2))
plot(u10)
dev.off()
p <- ggplot(sdata, aes(x=logM, y=logvalue , colour=factor(region))) +
geom_point() +
stat_smooth(method=lm, fullrange=FALSE) +
facet_grid(variable ~ .)
ggsave(file.path(fig_dir, "logM_logvalue_byregion_lm.png"), p, width=10)
############################
## residual plots
############################
### natural mortality
bres1 <- data.frame(variable="B/BMSY", value=b1$residuals)
ures1 <- data.frame(variable="U/UMSY", value=u1$residuals)
mres1 <- rbind(bres1, ures1)
p <- ggplot(mres1) +
geom_violin(aes(x=variable, y=value, color=variable, fill=variable)) +
theme_lsd() +
guides(color=FALSE, fill=FALSE) +
geom_hline(yintercept=0, color="black", lty=2) +
xlab("") + ylab("Residuals")
ggsave(file.path(fig_dir, "M_residuals.png"), p, width=6)
### region
b2 <- fortify(b2)
u2 <- fortify(u2)
bres2 <- data.frame(variable="B/BMSY", value=b2$.resid, region=b2$region)
ures2 <- data.frame(variable="U/UMSY", value=u2$.resid, region=u2$region)
mres2 <- rbind(bres2, ures2)
p <- ggplot(mres2) +
geom_violin(aes(x=region, y=value, color=region, fill=region)) +
facet_grid(variable ~ .) +
theme_lsd() +
geom_hline(yintercept=0, color="black", lty=2) +
ylab("Residuals") +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "region_residuals.png"), p, width=10)
### fish type
b3 <- fortify(b3)
u3 <- fortify(u3)
bres3 <- data.frame(variable="B/BMSY", value=b3$.resid, FisheryType=b3$FisheryType)
ures3 <- data.frame(variable="U/UMSY", value=u3$.resid, FisheryType=u3$FisheryType)
mres3 <- rbind(bres3, ures3)
p <- ggplot(mres3) +
geom_violin(aes(x=FisheryType, y=value, color=FisheryType, fill=FisheryType)) +
facet_grid(variable ~ .) +
theme_lsd() +
geom_hline(yintercept=0, color="black", lty=2) +
ylab("Residuals") +
theme(axis.text.x=element_blank())
ggsave(file.path(fig_dir, "fishtype_residuals.png"), p, width=10)
############################
## cross-validation
############################
# ## randomly select half the stocks
bchoose_raw <- sample(1:length(bstocks), size=length(bstocks)/2)
bchoose <- bchoose_raw[order(bchoose_raw)]
bdata_train <- sbdata %>% dplyr::filter(stockid %in% bstocks[bchoose])
bregions_choose <- unique(bdata_train$region)
bfishtype_choose <- unique(bdata_train$FisheryType)
btaxatype_choose <- unique(bdata_train$taxGroup)
bdata_test <- sbdata %>% dplyr::filter(stockid %in% bstocks[bchoose] == FALSE) %>%
dplyr::filter(region %in% bregions_choose) %>%
dplyr::filter(FisheryType %in% bfishtype_choose) %>%
dplyr::filter(taxGroup %in% btaxatype_choose)
### run models with training data
cb1 <- lm(logvalue ~ logM, data=bdata_train)
summary(cb1)
cb2 <- lm(logvalue ~ region2, data=bdata_train)
summary(cb2)
cb3 <- lm(logvalue ~ FisheryType2, data=bdata_train)
summary(cb3)
cb4 <- lm(logvalue ~ taxGroup2, data=bdata_train)
summary(cb4)
cb5 <- lm(logvalue ~ region2 + logM, data=bdata_train)
summary(cb5)
cb6 <- lm(logvalue ~ FisheryType2 + logM, data=bdata_train)
summary(cb6)
cb7 <- lm(logvalue ~ taxGroup2 + logM, data=bdata_train)
summary(cb7)
cb8 <- lm(logvalue ~ region2 + FisheryType2, data=bdata_train)
summary(cb8)
cb9 <- lm(logvalue ~ region2 + taxGroup2, data=bdata_train)
summary(cb9)
cb10 <- lm(logvalue ~ logM + region2 + FisheryType2, data=bdata_train)
summary(cb10)
cb11 <- lm(logvalue ~ logM + region2 + taxGroup2, data=bdata_train)
summary(cb11)
cbmods <- list("M"=cb1, "region"=cb2, "fishtype"=cb3, "taxagroup"=cb4,
"region_M"=cb5, "fishtype_M"=cb6, "taxagroup_M"=cb7,
"region_fishtype"=cb8, "region_taxagroup"=cb9,
"all_fishtype"=cb10, "all_taxagroup"=cb11)
cbr2 <- sapply(1:length(cbmods), function(x) summary(cbmods[[x]])$adj.r.squared)
aicbc <- AIC(cb1, cb2, cb3, cb4, cb5, cb6, cb7, cb8, cb9, cb10, cb11) %>%
dplyr::mutate(deltaAIC = AIC - min(AIC))
aicbc1 <- AIC(cb1, cb2, cb3, cb4) %>%
dplyr::mutate(deltaAIC = AIC - min(AIC))
aicbc$R2 <- cbr2
aicbc$ModelName <- names(cbmods)
write.csv(aicb, file.path(fig_dir, "cross_BBmsy_AIC.csv"))
# ## randomly select half the stocks
uchoose_raw <- sample(1:length(ustocks), size=length(ustocks)/2)
uchoose <- uchoose_raw[order(uchoose_raw)]
udata_train <- sudata %>% dplyr::filter(stockid %in% ustocks[uchoose])
uregions_choose <- unique(udata_train$region)
ufishtype_choose <- unique(udata_train$FisheryType)
utaxatype_choose <- unique(udata_train$taxGroup)
udata_test <- sudata %>% dplyr::filter(stockid %in% ustocks[uchoose] == FALSE) %>%
dplyr::filter(region %in% uregions_choose) %>%
dplyr::filter(FisheryType %in% ufishtype_choose) %>%
dplyr::filter(taxGroup %in% utaxatype_choose)
### run models with training data
cu1 <- lm(logvalue ~ logM, data=bdata_train)
summary(cu1)
cu2 <- lm(logvalue ~ region2, data=bdata_train)
summary(cu2)
cu3 <- lm(logvalue ~ FisheryType2, data=bdata_train)
summary(cu3)
cu4 <- lm(logvalue ~ taxGroup2, data=bdata_train)
summary(cu4)
cu5 <- lm(logvalue ~ region2 + logM, data=bdata_train)
summary(cu5)
cu6 <- lm(logvalue ~ FisheryType2 + logM, data=bdata_train)
summary(cu6)
cu7 <- lm(logvalue ~ taxGroup2 + logM, data=bdata_train)
summary(cu7)
cu8 <- lm(logvalue ~ region2 + FisheryType2, data=bdata_train)
summary(cu8)
cu9 <- lm(logvalue ~ region2 + taxGroup2, data=bdata_train)
summary(cu9)
cu10 <- lm(logvalue ~ logM + region2 + FisheryType2, data=bdata_train)
summary(cu10)
cu11 <- lm(logvalue ~ logM + region2 + taxGroup2, data=bdata_train)
summary(cu11)
cumods <- list("M"=cu1, "region"=cu2, "fishtype"=cu3, "taxagroup"=cu4,
"region_M"=cu5, "fishtype_M"=cu6, "taxagroup_M"=cu7,
"region_fishtype"=cu8, "region_taxagroup"=cu9,
"all_fishtype"=cu10, "all_taxagroup"=cu11)
cur2 <- sapply(1:length(cumods), function(x) summary(cumods[[x]])$adj.r.squared)
aicuc <- AIC(cu1, cu2, cu3, cu4, cu5, cu6, cu7, cu8, cu9, cu10, cu11) %>%
dplyr::mutate(deltaAIC = AIC - min(AIC))
aicuc1 <- AIC(cu1, cu2, cu3, cu4) %>%
dplyr::mutate(deltaAIC = AIC - min(AIC))
aicuc$R2 <- cur2
aicuc$ModelName <- names(cumods)
write.csv(aicu, file.path(fig_dir, "cross_BBmsy_AIC.csv"))
png(file.path(fig_dir, "cross_val_B.png"), height=8, width=10, res=200, units="in")
pred <- predict(cb10, bdata_test, se.fit=TRUE)
val <- exp(pred$fit)
low <- exp(pred$fit - 1.96 * pred$se.fit)
up <- exp(pred$fit + 1.96 * pred$se.fit)
plot(val, col="black", pch=19, ylim=c(0, max(up)), xlab="Stock", ylab="B/Bmsy")
segments(x0=1:length(val), x1=1:length(val), y0=low, y1=up, col="black")
obs <- bdata_test$value
within <- sapply(1:length(obs), function(x){
ifelse(obs[x] >= low[x] & obs[x] <= up[x], 19, 1)
})
col <- sapply(1:length(obs), function(x){
ifelse(obs[x] >= 1 & val[x] >=1, "blue", ifelse(obs[x] < 1 & val[x] < 1, "blue", "red"))
})
points(bdata_test$val, pch=within, col=col)
length(which(within==19))/length(within)
length(which(col=="red"))/length(col)
length(which(within==1 & col=="red"))/length(within)
dev.off()
png(file.path(fig_dir, "cross_val_U.png"), height=8, width=10, res=200, units="in")
pred <- predict(cu10, udata_test, se.fit=TRUE)
val <- exp(pred$fit)
low <- exp(pred$fit - 1.96 * pred$se.fit)
up <- exp(pred$fit + 1.96 * pred$se.fit)
plot(val, col="black", pch=19, ylim=c(0, max(up)), xlab="Stock", ylab="B/Bmsy")
segments(x0=1:length(val), x1=1:length(val), y0=low, y1=up, col="black")
obs <- udata_test$value
within <- sapply(1:length(obs), function(x){
ifelse(obs[x] >= low[x] & obs[x] <= up[x], 19, 1)
})
col <- sapply(1:length(obs), function(x){
ifelse(obs[x] >= 1 & val[x] >=1, "blue", ifelse(obs[x] < 1 & val[x] < 1, "blue", "red"))
})
points(bdata_test$val, pch=within, col=col)
length(which(within==19))/length(within)
length(which(col=="red"))/length(col)
length(which(within==1 & col=="red"))/length(within)
dev.off()
|
# Building a Prod-Ready, Robust Shiny Application.
#
# README: each step of the dev files is optional, and you don't have to
# fill every dev scripts before getting started.
# 01_start.R should be filled at start.
# 02_dev.R should be used to keep track of your development during the project.
# 03_deploy.R should be used once you need to deploy your app.
#
#
########################################
#### CURRENT FILE: ON START SCRIPT #####
########################################
## Fill the DESCRIPTION ----
## Add meta data about your application
golem::fill_desc(
pkg_name = "amzreviewer", # The Name of the package containing the App
pkg_title = "Amazon Reviews Analyser", # The Title of the package containing the App
pkg_description = "Provides a simple app to analyse product reviews from Amazon.", # The Description of the package containing the App
author_first_name = "David", # Your First Name
author_last_name = "Mateos", # Your Last Name
author_email = "cldav.privmath@gmail.com", # Your Email
repo_url = "https://github.com/alberto-mateos-mo/amzreviewer.git" # The URL of the GitHub Repo (optional)
)
## Set {golem} options ----
golem::set_golem_options()
## Create Common Files ----
## See ?usethis for more information
usethis::use_ccby_license( name = "David Mateos" ) # You can set another license here
usethis::use_readme_rmd( open = TRUE )
usethis::use_code_of_conduct()
usethis::use_lifecycle_badge( "Experimental" )
## Use Recommended Packages ----
golem::use_recommended_deps()
## Favicon ----
# If you want to change the favicon (default is golem's one)
golem::remove_favicon()
# golem::use_favicon() # path = "path/to/ico". Can be an online file.
## Add helper functions ----
golem::use_utils_ui()
golem::use_utils_server()
# You're now set! ----
# go to dev/02_dev.R
rstudioapi::navigateToFile( "dev/02_dev.R" )
|
/dev/01_start.R
|
permissive
|
alberto-mateos-mo/amzreviewer
|
R
| false
| false
| 1,877
|
r
|
# Building a Prod-Ready, Robust Shiny Application.
#
# README: each step of the dev files is optional, and you don't have to
# fill every dev scripts before getting started.
# 01_start.R should be filled at start.
# 02_dev.R should be used to keep track of your development during the project.
# 03_deploy.R should be used once you need to deploy your app.
#
#
########################################
#### CURRENT FILE: ON START SCRIPT #####
########################################
## Fill the DESCRIPTION ----
## Add meta data about your application
golem::fill_desc(
pkg_name = "amzreviewer", # The Name of the package containing the App
pkg_title = "Amazon Reviews Analyser", # The Title of the package containing the App
pkg_description = "Provides a simple app to analyse product reviews from Amazon.", # The Description of the package containing the App
author_first_name = "David", # Your First Name
author_last_name = "Mateos", # Your Last Name
author_email = "cldav.privmath@gmail.com", # Your Email
repo_url = "https://github.com/alberto-mateos-mo/amzreviewer.git" # The URL of the GitHub Repo (optional)
)
## Set {golem} options ----
golem::set_golem_options()
## Create Common Files ----
## See ?usethis for more information
usethis::use_ccby_license( name = "David Mateos" ) # You can set another license here
usethis::use_readme_rmd( open = TRUE )
usethis::use_code_of_conduct()
usethis::use_lifecycle_badge( "Experimental" )
## Use Recommended Packages ----
golem::use_recommended_deps()
## Favicon ----
# If you want to change the favicon (default is golem's one)
golem::remove_favicon()
# golem::use_favicon() # path = "path/to/ico". Can be an online file.
## Add helper functions ----
golem::use_utils_ui()
golem::use_utils_server()
# You're now set! ----
# go to dev/02_dev.R
rstudioapi::navigateToFile( "dev/02_dev.R" )
|
test_that("last modification date", {
fs::file_create("foo")
# Notes: Had to set this to GMT otherwise I was getting a one off date error
today <- lubridate::ymd(lubridate::today(tz = "GMT"))
fs::file_touch("foo", today)
touch_date <- holepunch:::last_modification_date(".")
expect_identical(today, touch_date)
unlink("foo")
})
|
/tests/testthat/test-last_modification_date.R
|
permissive
|
choldgraf/holepunch
|
R
| false
| false
| 342
|
r
|
test_that("last modification date", {
fs::file_create("foo")
# Notes: Had to set this to GMT otherwise I was getting a one off date error
today <- lubridate::ymd(lubridate::today(tz = "GMT"))
fs::file_touch("foo", today)
touch_date <- holepunch:::last_modification_date(".")
expect_identical(today, touch_date)
unlink("foo")
})
|
## ---- echo = FALSE-------------------------------------------------------
library(knitr)
opts_chunk$set(tidy.opts=list(width.cutoff=60),tidy=TRUE)
knitr::opts_chunk$set(comment = "#>", collapse = TRUE)
|
/vignettes/CLI_guide.R
|
permissive
|
billchenxi/BaMORC
|
R
| false
| false
| 205
|
r
|
## ---- echo = FALSE-------------------------------------------------------
library(knitr)
opts_chunk$set(tidy.opts=list(width.cutoff=60),tidy=TRUE)
knitr::opts_chunk$set(comment = "#>", collapse = TRUE)
|
dados <- read.csv("C:/Users/James Bond/Desktop/Gitlab/dsl/programs/python/Leitos_OP/tabela_micro_OP.csv")
grafico <- lm(formula = confirmados_totais ~ poly(index_data, degree = 4, raw=T), data = dados)
summary(grafico)
#Fazendo previsoes
plot(dados$index_data, dados$confirmados_totais)
inicio =109
fim = 120
novo_dado =
data.frame(index_data = seq(inicio, fim, 1))
#predict(grafico, newdata = novo_dado,interval = "prediction")
previsao =
data.frame(predict(grafico, newdata = novo_dado, interval = 'prediction'))
previsao$index = novo_dado$index_data
y_fitted = data.frame(predict(grafico, interval = 'prediction'))
y_fitted$index = dados$index_data
library(ggplot2)
ggplot(data = y_fitted, aes(x = index, y = fit))+ #Dados passados (observados)
geom_path(size = 0.1, colour = 'black')+ #Fazendo linha dados passados
geom_ribbon(data = previsao, aes(ymin = lwr, ymax = upr), fill = 'gray70')+ #Erro dados futuros
geom_path(data = previsao, mapping = aes(x = index, fit), color = 'red')+ #Linha para dados futuros
ylim(-100, 2000)+ #Determinar limite de Y
xlim(0, 120) #determinar limite de X
#Save graph high resolution
ggsave(oi, filename = 'C:/Users/James Bond/Desktop/Gitlab/dsl/programs/python/Leitos_OP/oi.png',
dpi = 1200, width = 6, height = 4.5, units = 'in')
|
/Códigos boletins/Micro_OP_R.R
|
no_license
|
Gabrieldomal/Covid-19-Quadril-tero-Ferr-fero-MG
|
R
| false
| false
| 1,304
|
r
|
dados <- read.csv("C:/Users/James Bond/Desktop/Gitlab/dsl/programs/python/Leitos_OP/tabela_micro_OP.csv")
grafico <- lm(formula = confirmados_totais ~ poly(index_data, degree = 4, raw=T), data = dados)
summary(grafico)
#Fazendo previsoes
plot(dados$index_data, dados$confirmados_totais)
inicio =109
fim = 120
novo_dado =
data.frame(index_data = seq(inicio, fim, 1))
#predict(grafico, newdata = novo_dado,interval = "prediction")
previsao =
data.frame(predict(grafico, newdata = novo_dado, interval = 'prediction'))
previsao$index = novo_dado$index_data
y_fitted = data.frame(predict(grafico, interval = 'prediction'))
y_fitted$index = dados$index_data
library(ggplot2)
ggplot(data = y_fitted, aes(x = index, y = fit))+ #Dados passados (observados)
geom_path(size = 0.1, colour = 'black')+ #Fazendo linha dados passados
geom_ribbon(data = previsao, aes(ymin = lwr, ymax = upr), fill = 'gray70')+ #Erro dados futuros
geom_path(data = previsao, mapping = aes(x = index, fit), color = 'red')+ #Linha para dados futuros
ylim(-100, 2000)+ #Determinar limite de Y
xlim(0, 120) #determinar limite de X
#Save graph high resolution
ggsave(oi, filename = 'C:/Users/James Bond/Desktop/Gitlab/dsl/programs/python/Leitos_OP/oi.png',
dpi = 1200, width = 6, height = 4.5, units = 'in')
|
################################################################################
# Author: Petr Keil
# Email: pkeil@seznam.cz
# Date: April 26 2018
################################################################################
# Description: Here is where model SMOOTH is used to generate predictions to the
# regular global network of 1 ha plots, and to the grid of large hexagons.
################################################################################
# clean the workspace and load the libraries
source("0_libraries_functions_settings.r")
################################################################################
### Read, transform and scale the data
# read the data
pts <- read.csv(file="../Data/GRIDS/Fine_points_with_environment.csv")
grid5 <- readOGR(dsn = "../Data/GRIDS", layer = "hex5_with_environment")
grid5 <- spTransform(x = grid5, CRSobj = WGS84)
# -----------------------------------------
pts$Tree_dens <- (pts$TREE_DENS + 1) / pts$A # calculate tree density (note the x+1 step!!)
pts <- data.frame(pts,
Area_km = 0.01,
min_DBH = 0,
ELONG = 1,
DAT_TYPE = "Plot")
# tree density at the grid level
grid5$Tree_dens <- (grid5$TREE_DENS + 1) / grid5$LandArea
grid5@data <- data.frame(grid5@data,
min_DBH = 0,
ELONG = 1,
DAT_TYPE = "Country")
# -----------------------------------------
pts <- dplyr::select(pts, Area_km, Tree_dens, min_DBH,
GPP, ANN_T, ISO_T, MIN_P, P_SEAS, ALT_DIF, ELONG,
ISLAND = ISL_LS, Lat, Lon, DAT_TYPE) %>%
mutate(Area_km = log(Area_km), Tree_dens=log(Tree_dens))
grid5.dat <- dplyr::select(grid5@data, Area_km = LandArea, Tree_dens, min_DBH,
GPP, ANN_T, ISO_T, MIN_P, P_SEAS, ALT_DIF, ELONG,
ISLAND = ISL_LS, Lat, Lon, DAT_TYPE) %>%
mutate(Area_km = log(Area_km), Tree_dens=log(Tree_dens))
# get the scaling constants that were used to scale the raw plot and country data:
scal.tab <- read.csv("scale_tab.csv")
scal.tab <- scal.tab[scal.tab$var %in% c("ET","WARM_T") == FALSE,]
# scale the grid data in the same way as the original data
pts[,1:10] <- scale(pts[,1:10],
center = scal.tab$centr,
scale = scal.tab$scale)
grid5.dat[,1:10] <- scale(grid5.dat[,1:10],
center = scal.tab$centr,
scale = scal.tab$scale)
################################################################################
### Make the predictions
# load the saved SMOOTH model that will be used for the global predictions
library(mgcv)
load("../Models/gam_SMOOTH.Rdata")
################################################################################
### Predictions in hexagons
# predict S from the model SMOOTH
grid.pred.S <- predict(gam.SMOOTH,
newdata = grid5.dat,
type="response")
grid.preds.S <- round(grid.pred.S, 2)
# predict the regional effect from model SMOOTH
grid.pred.smth <- predict.gam(gam.SMOOTH,
type = "terms",
newdata = grid5.dat)[,"s(Lat,Lon):DAT_TYPECountry"]
# merge with the original grid
grid5@data <- data.frame(grid5@data, S = grid.pred.S, smooth.country = grid.pred.smth)
grid5@data$id <- as.character(grid5@data$id)
# remove cells with little land area
good.cells <- grid5@data$LandArea / grid5@data$CellArea > 0.5
good.cells[is.na(good.cells)] <- FALSE
grid5 <- grid5[good.cells,]
# remove cells with 0 or NA species predicted
good.cells <- grid5@data$S > 1
good.cells[is.na(good.cells)] <- FALSE
grid5 <- grid5[good.cells, ]
################################################################################
### Predictions in 1 ha plots
# predict S in the plots from the SMOOTH model
plot.pred.S <- predict(gam.SMOOTH,
newdata = pts,
type="response")
plot.pred.S <- round(plot.pred.S, 2)
# predict region effects in the plots from the SMOOTH model
plot.pred.smth <- predict.gam(gam.SMOOTH,
type = "terms",
newdata = pts)[,"s(Lat,Lon):DAT_TYPEPlot"]
# put all together
plot.preds <- data.frame(pts,
S = as.numeric(plot.pred.S),
smooth.plot = plot.pred.smth)
# remove predictions of S < 0.8 (an arbitrarily selected threshold)
plot.preds$S[plot.preds$S < 0.8] <- NA
plot.preds <- plot.preds[rowSums(is.na(plot.preds)) == 0,]
# put predictions to a spatial object
plot.preds <- SpatialPointsDataFrame(coords = data.frame(plot.preds$Lon, plot.preds$Lat),
data = plot.preds,
proj4string = CRS(WGS84))
# ------------------------------------------------------------------------------
# calculate BETA DIVERSITY for PLOTS
# extract S values from the hexagonal grid to the points
gamma <- over(x=plot.preds, y=grid5)[,c("S", "ALT_DIF", "smooth.country")]
names(gamma) <- c("gamma", "ALT_DIF_grid", "smooth.country")
# calculate beta diversity per plot
plot.preds@data <- data.frame(plot.preds@data, gamma) %>%
mutate(beta = gamma/S, reg.beta = exp(smooth.country)/exp(smooth.plot))
# ------------------------------------------------------------------------------
# write out data with no NA values
write.csv(plot.preds@data,
file="../Data/GRIDS/Fine_points_with_predictions.csv", row.names=FALSE)
# ------------------------------------------------------------------------------
# transform the data for fancy plotting
plot.preds.ml <- spTransform(plot.preds, CRSobj = MOLLWEIDE)
plot.preds.ml <- data.frame(plot.preds.ml@data,
data.frame(X=coordinates(plot.preds.ml)[,1],
Y=coordinates(plot.preds.ml)[,2]))
grid5.ml <- spTransform(grid5, CRSobj=MOLLWEIDE)
grid5.mlf <- tidy(grid5.ml, region="id")
grid5.mlf <- left_join(x=grid5.mlf, y=grid5.ml@data, by="id")
################################################################################
# PLOTTING THE MAPS
# Read the shapefiles
# coutnry boundaries
CNTR <- readOGR(dsn="../Data/COUNTRIES", layer="COUNTRIES")
CNTRml <- spTransform(CNTR, CRSobj=MOLLWEIDE)
CNTRml <- tidy(CNTRml, region="NAME")
# global mainlands (not divided by country boundaries)
MAINL <- readOGR(dsn = "../Data/COUNTRIES", layer = "GSHHS_i_L1_simple")
MAINL <- spTransform(MAINL, CRSobj = CRS(MOLLWEIDE))
MAINL <- tidy(MAINL, region="id")
# equator, tropics, and polar circles
LINES <- readOGR(dsn = "../Data/COUNTRIES", layer = "ne_110m_geographic_lines")
LINES <- spTransform(LINES, CRSobj = CRS(MOLLWEIDE))
LINES <- tidy(LINES, region="name")
blank.theme <- theme(axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position=c(0.63, 0.09),
legend.direction = "horizontal",
legend.title = element_blank(),
legend.title.align = 0,
#plot.title = element_text(hjust = 0),
plot.subtitle = element_text(vjust=-3),
panel.background=element_blank(),
panel.border=element_blank(),panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),plot.background=element_blank())
# predicted S in hexagons
plot.gr.S <- ggplot(grid5.mlf, aes(long, lat, group=group)) +
geom_polygon(data=LINES, aes(long, lat, group=group),
colour="darkgrey", size=0.2) +
geom_polygon(data=MAINL, aes(long, lat, group=group),
fill="white", colour=NA, size=.2) +
geom_polygon(aes(fill=S)) +
geom_polygon(data=MAINL, aes(long, lat, group=group),
fill=NA, colour="black", size=.2) +
scale_fill_distiller(palette = "Spectral",
name=expression(S[hex]),
#limits=c(1,5000),
trans="log10") +
scale_x_continuous(limits = c(-12000000, 16000000)) +
scale_y_continuous(limits = c(-6.4e+06, 8.8e+06)) +
xlab("") + ylab("") +
#ggtitle("A") +
labs(subtitle = expression(hat(S)[hex] ~ "- predicted richness in 209,903" ~ km^2 ~ "hexagons")) +
theme_minimal() + blank.theme
plot.gr.S
# predicted S in plots
plot.pl.S <- ggplot(MAINL, aes(long, lat, group=group)) +
geom_polygon(data=LINES, aes(long, lat, group=group),
colour="darkgrey", size=0.2) +
geom_polygon(colour=NA, fill="white", size=.2) +
geom_point(data=plot.preds.ml, size=0.01,
aes(x=X, y=Y, group=NULL, colour=S)) +
geom_polygon(colour="black", fill=NA, size=.2) +
scale_colour_distiller(palette = "Spectral",
name=expression(S[plot]),
#limits=c(1,5000),
trans="log10") +
scale_x_continuous(limits = c(-12000000, 16000000)) +
scale_y_continuous(limits = c(-6.4e+06, 8.8e+06)) +
xlab("") + ylab("") +
#ggtitle("B") +
labs(subtitle = expression(hat(S)[plot] ~ "- predicted richness in 1 ha plots")) +
theme_minimal() + blank.theme
# predicted beta in plots
plot.beta.S <- ggplot(MAINL, aes(long, lat, group=group)) +
geom_polygon(data=LINES, aes(long, lat, group=group),
colour="darkgrey", size=0.2) +
geom_polygon(colour=NA, fill="white", size=.2) +
geom_point(data=plot.preds.ml, size=0.01,
aes(x=X, y=Y, group=NULL, colour=beta)) +
geom_polygon(colour="black", fill=NA, size=.2) +
scale_colour_distiller(palette = "Spectral",
name=expression(beta),
trans="log10") +
scale_x_continuous(limits = c(-12000000, 16000000)) +
scale_y_continuous(limits = c(-6.4e+06, 8.8e+06)) +
xlab("") + ylab("") +
#ggtitle("C") +
labs(subtitle = expression(beta ~ "=" ~ hat(S)[hex]/hat(S)[plot])) +
theme_minimal() + blank.theme
# predicted region effects in the hexagons
plot.gr.smth <- ggplot(grid5.mlf, aes(long, lat, group=group)) +
geom_polygon(data=LINES, aes(long, lat, group=group),
colour="darkgrey", size=0.2) +
geom_polygon(data=MAINL, aes(long, lat, group=group),
fill="white", colour=NA, size=.2) +
geom_polygon(aes(fill=smooth.country)) +
geom_polygon(data=MAINL, aes(long, lat, group=group),
fill=NA, colour="black", size=.2) +
scale_fill_distiller(palette = "Spectral",
name="Region effect",
limits=c(-2, 2)) +
scale_x_continuous(limits = c(-12000000, 16000000)) +
scale_y_continuous(limits = c(-6.4e+06, 8.8e+06)) +
xlab("") + ylab("") +
#ggtitle("D") +
labs(subtitle = expression(s[2](Lat, Lon) ~ "- smooth region effects in 209,903" ~ km^2 ~ "hexagons")) +
theme_minimal() + blank.theme
# predicted region effects in the plots
plot.pl.smth <- ggplot(MAINL, aes(long, lat, group=group)) +
geom_polygon(data=LINES, aes(long, lat, group=group),
colour="darkgrey", size=0.2) +
geom_polygon(colour=NA, fill="white", size=.2) +
geom_point(data=plot.preds.ml, size=0.01,
aes(x=X, y=Y, group=NULL, colour=smooth.plot)) +
geom_polygon(colour="black", fill=NA, size=.2) +
scale_colour_distiller(palette = "Spectral",
limits=c(-2, 2),
name="Region effect") +
scale_x_continuous(limits = c(-12000000, 16000000)) +
scale_y_continuous(limits = c(-6.4e+06, 8.8e+06)) +
xlab("") + ylab("") +
labs(subtitle = expression(s[1](Lat, Lon) ~ "- smooth region effects in 1 ha plots)")) +
theme_minimal() + blank.theme
plot.pl.smth
# predicted ratios of region effects between local and hexagon grains
plot.beta.smth <- ggplot(MAINL, aes(long, lat, group=group)) +
geom_polygon(data=LINES, aes(long, lat, group=group),
colour="darkgrey", size=0.2) +
geom_polygon(colour=NA, fill="white", size=.2) +
geom_point(data=plot.preds.ml, size=0.01,
aes(x=X, y=Y, group=NULL, colour=reg.beta)) +
geom_polygon(colour="black", fill=NA, size=.2) +
scale_colour_distiller(palette = "Spectral",
name=expression("Region" ~ beta)) +
scale_x_continuous(limits = c(-12000000, 16000000)) +
scale_y_continuous(limits = c(-6.4e+06, 8.8e+06)) +
xlab("") + ylab("") +
#ggtitle("F") +
labs(subtitle = expression(Delta ~ "=" ~ e^s[2](Lat, Lon) / e^s[1](Lat, Lon) )) +
theme_minimal() + blank.theme
plot.beta.smth
# ------------------------------------------------------------------------------
# write to file
library(cowplot)
tiff("../Figures/SMOOTH_prediction_grids.tif", width=4000, height=3400, res=350,
compression = "lzw")
plot_grid(plot.gr.S, plot.gr.smth,
plot.pl.S, plot.pl.smth,
plot.beta.S, plot.beta.smth,
nrow=3, ncol=2,
labels = c("a", "d", "b", "e", "c", "f"), vjust = 1.1, hjust = -0.1)
dev.off()
################################################################################
# LATITUDINAL GRADIENTS OF THE PREDICTIONS
################################################################################
# data for latitudinal gradient plots
LG.data <-
rbind(
data.frame(Latitude = grid5.ml@data$Lat, S = grid5.ml@data$S,
Grain = "hexagons", Longitude = grid5.ml$Lon),
data.frame(Latitude = plot.preds$Lat, S = plot.preds$S,
Grain = "plots", Longitude = plot.preds$Lon)
)
# plot the latitudinal gradients
LG.plot <- ggplot(LG.data, aes(x=Latitude, y=S)) +
geom_vline(xintercept = 0, size=.2) +
geom_vline(xintercept = 23.5, size=.2) +
geom_vline(xintercept = -23.5, size=.2) +
geom_point(aes(shape=Grain), alpha=0.3) +
geom_smooth(colour="red", aes(linetype=Grain), method="loess", span=0.3) +
scale_y_log10() +
theme_bw() +
scale_shape(solid = FALSE) +
coord_flip()
# write to file
png(file="../Figures/latitudinal_gradient.png", width=1500, height=1200, res=250)
LG.plot
dev.off()
################################################################################
# RELATIONSHIP BETWEEN BETA DIVERSITY AND ELEVATION SPAN
################################################################################
DAT <- plot.preds@data[is.na(plot.preds@data$beta) == FALSE, ]
DAT <- DAT[is.na(DAT$ALT_DIF_grid) == FALSE, ]
DAT <- DAT[is.na(DAT$ALT_DIF) == FALSE, ]
m1 <- lm(log10(beta)~ poly(ALT_DIF_grid,2) + poly(ALT_DIF, 2),
data=DAT,
na.action=na.omit)
par(mfrow=c(1,2))
termplot(m1, se=T)
ggplot(data=plot.preds@data, aes(x=ALT_DIF_grid, y=beta)) +
geom_point(shape=1) +
geom_smooth(method="lm", formula= y ~ poly(x, 2)) +
scale_y_continuous(trans="log10") +
xlab("Elavation span within hexagon [m]") +
ylab(expression(gamma / alpha)) +
theme_bw()
|
/R/8.0_GAM_make_predictions_to_regular_grids_at_two_grains.r
|
no_license
|
lrcai/global_tree_S
|
R
| false
| false
| 15,237
|
r
|
################################################################################
# Author: Petr Keil
# Email: pkeil@seznam.cz
# Date: April 26 2018
################################################################################
# Description: Here is where model SMOOTH is used to generate predictions to the
# regular global network of 1 ha plots, and to the grid of large hexagons.
################################################################################
# clean the workspace and load the libraries
source("0_libraries_functions_settings.r")
################################################################################
### Read, transform and scale the data
# read the data
pts <- read.csv(file="../Data/GRIDS/Fine_points_with_environment.csv")
grid5 <- readOGR(dsn = "../Data/GRIDS", layer = "hex5_with_environment")
grid5 <- spTransform(x = grid5, CRSobj = WGS84)
# -----------------------------------------
pts$Tree_dens <- (pts$TREE_DENS + 1) / pts$A # calculate tree density (note the x+1 step!!)
pts <- data.frame(pts,
Area_km = 0.01,
min_DBH = 0,
ELONG = 1,
DAT_TYPE = "Plot")
# tree density at the grid level
grid5$Tree_dens <- (grid5$TREE_DENS + 1) / grid5$LandArea
grid5@data <- data.frame(grid5@data,
min_DBH = 0,
ELONG = 1,
DAT_TYPE = "Country")
# -----------------------------------------
pts <- dplyr::select(pts, Area_km, Tree_dens, min_DBH,
GPP, ANN_T, ISO_T, MIN_P, P_SEAS, ALT_DIF, ELONG,
ISLAND = ISL_LS, Lat, Lon, DAT_TYPE) %>%
mutate(Area_km = log(Area_km), Tree_dens=log(Tree_dens))
grid5.dat <- dplyr::select(grid5@data, Area_km = LandArea, Tree_dens, min_DBH,
GPP, ANN_T, ISO_T, MIN_P, P_SEAS, ALT_DIF, ELONG,
ISLAND = ISL_LS, Lat, Lon, DAT_TYPE) %>%
mutate(Area_km = log(Area_km), Tree_dens=log(Tree_dens))
# get the scaling constants that were used to scale the raw plot and country data:
scal.tab <- read.csv("scale_tab.csv")
scal.tab <- scal.tab[scal.tab$var %in% c("ET","WARM_T") == FALSE,]
# scale the grid data in the same way as the original data
pts[,1:10] <- scale(pts[,1:10],
center = scal.tab$centr,
scale = scal.tab$scale)
grid5.dat[,1:10] <- scale(grid5.dat[,1:10],
center = scal.tab$centr,
scale = scal.tab$scale)
################################################################################
### Make the predictions
# load the saved SMOOTH model that will be used for the global predictions
library(mgcv)
load("../Models/gam_SMOOTH.Rdata")
################################################################################
### Predictions in hexagons
# predict S from the model SMOOTH
grid.pred.S <- predict(gam.SMOOTH,
newdata = grid5.dat,
type="response")
grid.preds.S <- round(grid.pred.S, 2)
# predict the regional effect from model SMOOTH
grid.pred.smth <- predict.gam(gam.SMOOTH,
type = "terms",
newdata = grid5.dat)[,"s(Lat,Lon):DAT_TYPECountry"]
# merge with the original grid
grid5@data <- data.frame(grid5@data, S = grid.pred.S, smooth.country = grid.pred.smth)
grid5@data$id <- as.character(grid5@data$id)
# remove cells with little land area
good.cells <- grid5@data$LandArea / grid5@data$CellArea > 0.5
good.cells[is.na(good.cells)] <- FALSE
grid5 <- grid5[good.cells,]
# remove cells with 0 or NA species predicted
good.cells <- grid5@data$S > 1
good.cells[is.na(good.cells)] <- FALSE
grid5 <- grid5[good.cells, ]
################################################################################
### Predictions in 1 ha plots
# predict S in the plots from the SMOOTH model
plot.pred.S <- predict(gam.SMOOTH,
newdata = pts,
type="response")
plot.pred.S <- round(plot.pred.S, 2)
# predict region effects in the plots from the SMOOTH model
plot.pred.smth <- predict.gam(gam.SMOOTH,
type = "terms",
newdata = pts)[,"s(Lat,Lon):DAT_TYPEPlot"]
# put all together
plot.preds <- data.frame(pts,
S = as.numeric(plot.pred.S),
smooth.plot = plot.pred.smth)
# remove predictions of S < 0.8 (an arbitrarily selected threshold)
plot.preds$S[plot.preds$S < 0.8] <- NA
plot.preds <- plot.preds[rowSums(is.na(plot.preds)) == 0,]
# put predictions to a spatial object
plot.preds <- SpatialPointsDataFrame(coords = data.frame(plot.preds$Lon, plot.preds$Lat),
data = plot.preds,
proj4string = CRS(WGS84))
# ------------------------------------------------------------------------------
# calculate BETA DIVERSITY for PLOTS
# extract S values from the hexagonal grid to the points
gamma <- over(x=plot.preds, y=grid5)[,c("S", "ALT_DIF", "smooth.country")]
names(gamma) <- c("gamma", "ALT_DIF_grid", "smooth.country")
# calculate beta diversity per plot
plot.preds@data <- data.frame(plot.preds@data, gamma) %>%
mutate(beta = gamma/S, reg.beta = exp(smooth.country)/exp(smooth.plot))
# ------------------------------------------------------------------------------
# write out data with no NA values
write.csv(plot.preds@data,
file="../Data/GRIDS/Fine_points_with_predictions.csv", row.names=FALSE)
# ------------------------------------------------------------------------------
# transform the data for fancy plotting
plot.preds.ml <- spTransform(plot.preds, CRSobj = MOLLWEIDE)
plot.preds.ml <- data.frame(plot.preds.ml@data,
data.frame(X=coordinates(plot.preds.ml)[,1],
Y=coordinates(plot.preds.ml)[,2]))
grid5.ml <- spTransform(grid5, CRSobj=MOLLWEIDE)
grid5.mlf <- tidy(grid5.ml, region="id")
grid5.mlf <- left_join(x=grid5.mlf, y=grid5.ml@data, by="id")
################################################################################
# PLOTTING THE MAPS
# Read the shapefiles
# coutnry boundaries
CNTR <- readOGR(dsn="../Data/COUNTRIES", layer="COUNTRIES")
CNTRml <- spTransform(CNTR, CRSobj=MOLLWEIDE)
CNTRml <- tidy(CNTRml, region="NAME")
# global mainlands (not divided by country boundaries)
MAINL <- readOGR(dsn = "../Data/COUNTRIES", layer = "GSHHS_i_L1_simple")
MAINL <- spTransform(MAINL, CRSobj = CRS(MOLLWEIDE))
MAINL <- tidy(MAINL, region="id")
# equator, tropics, and polar circles
LINES <- readOGR(dsn = "../Data/COUNTRIES", layer = "ne_110m_geographic_lines")
LINES <- spTransform(LINES, CRSobj = CRS(MOLLWEIDE))
LINES <- tidy(LINES, region="name")
blank.theme <- theme(axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position=c(0.63, 0.09),
legend.direction = "horizontal",
legend.title = element_blank(),
legend.title.align = 0,
#plot.title = element_text(hjust = 0),
plot.subtitle = element_text(vjust=-3),
panel.background=element_blank(),
panel.border=element_blank(),panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),plot.background=element_blank())
# predicted S in hexagons
plot.gr.S <- ggplot(grid5.mlf, aes(long, lat, group=group)) +
geom_polygon(data=LINES, aes(long, lat, group=group),
colour="darkgrey", size=0.2) +
geom_polygon(data=MAINL, aes(long, lat, group=group),
fill="white", colour=NA, size=.2) +
geom_polygon(aes(fill=S)) +
geom_polygon(data=MAINL, aes(long, lat, group=group),
fill=NA, colour="black", size=.2) +
scale_fill_distiller(palette = "Spectral",
name=expression(S[hex]),
#limits=c(1,5000),
trans="log10") +
scale_x_continuous(limits = c(-12000000, 16000000)) +
scale_y_continuous(limits = c(-6.4e+06, 8.8e+06)) +
xlab("") + ylab("") +
#ggtitle("A") +
labs(subtitle = expression(hat(S)[hex] ~ "- predicted richness in 209,903" ~ km^2 ~ "hexagons")) +
theme_minimal() + blank.theme
plot.gr.S
# predicted S in plots
plot.pl.S <- ggplot(MAINL, aes(long, lat, group=group)) +
geom_polygon(data=LINES, aes(long, lat, group=group),
colour="darkgrey", size=0.2) +
geom_polygon(colour=NA, fill="white", size=.2) +
geom_point(data=plot.preds.ml, size=0.01,
aes(x=X, y=Y, group=NULL, colour=S)) +
geom_polygon(colour="black", fill=NA, size=.2) +
scale_colour_distiller(palette = "Spectral",
name=expression(S[plot]),
#limits=c(1,5000),
trans="log10") +
scale_x_continuous(limits = c(-12000000, 16000000)) +
scale_y_continuous(limits = c(-6.4e+06, 8.8e+06)) +
xlab("") + ylab("") +
#ggtitle("B") +
labs(subtitle = expression(hat(S)[plot] ~ "- predicted richness in 1 ha plots")) +
theme_minimal() + blank.theme
# predicted beta in plots
plot.beta.S <- ggplot(MAINL, aes(long, lat, group=group)) +
geom_polygon(data=LINES, aes(long, lat, group=group),
colour="darkgrey", size=0.2) +
geom_polygon(colour=NA, fill="white", size=.2) +
geom_point(data=plot.preds.ml, size=0.01,
aes(x=X, y=Y, group=NULL, colour=beta)) +
geom_polygon(colour="black", fill=NA, size=.2) +
scale_colour_distiller(palette = "Spectral",
name=expression(beta),
trans="log10") +
scale_x_continuous(limits = c(-12000000, 16000000)) +
scale_y_continuous(limits = c(-6.4e+06, 8.8e+06)) +
xlab("") + ylab("") +
#ggtitle("C") +
labs(subtitle = expression(beta ~ "=" ~ hat(S)[hex]/hat(S)[plot])) +
theme_minimal() + blank.theme
# predicted region effects in the hexagons
plot.gr.smth <- ggplot(grid5.mlf, aes(long, lat, group=group)) +
geom_polygon(data=LINES, aes(long, lat, group=group),
colour="darkgrey", size=0.2) +
geom_polygon(data=MAINL, aes(long, lat, group=group),
fill="white", colour=NA, size=.2) +
geom_polygon(aes(fill=smooth.country)) +
geom_polygon(data=MAINL, aes(long, lat, group=group),
fill=NA, colour="black", size=.2) +
scale_fill_distiller(palette = "Spectral",
name="Region effect",
limits=c(-2, 2)) +
scale_x_continuous(limits = c(-12000000, 16000000)) +
scale_y_continuous(limits = c(-6.4e+06, 8.8e+06)) +
xlab("") + ylab("") +
#ggtitle("D") +
labs(subtitle = expression(s[2](Lat, Lon) ~ "- smooth region effects in 209,903" ~ km^2 ~ "hexagons")) +
theme_minimal() + blank.theme
# predicted region effects in the plots
plot.pl.smth <- ggplot(MAINL, aes(long, lat, group=group)) +
geom_polygon(data=LINES, aes(long, lat, group=group),
colour="darkgrey", size=0.2) +
geom_polygon(colour=NA, fill="white", size=.2) +
geom_point(data=plot.preds.ml, size=0.01,
aes(x=X, y=Y, group=NULL, colour=smooth.plot)) +
geom_polygon(colour="black", fill=NA, size=.2) +
scale_colour_distiller(palette = "Spectral",
limits=c(-2, 2),
name="Region effect") +
scale_x_continuous(limits = c(-12000000, 16000000)) +
scale_y_continuous(limits = c(-6.4e+06, 8.8e+06)) +
xlab("") + ylab("") +
labs(subtitle = expression(s[1](Lat, Lon) ~ "- smooth region effects in 1 ha plots)")) +
theme_minimal() + blank.theme
plot.pl.smth
# predicted ratios of region effects between local and hexagon grains
plot.beta.smth <- ggplot(MAINL, aes(long, lat, group=group)) +
geom_polygon(data=LINES, aes(long, lat, group=group),
colour="darkgrey", size=0.2) +
geom_polygon(colour=NA, fill="white", size=.2) +
geom_point(data=plot.preds.ml, size=0.01,
aes(x=X, y=Y, group=NULL, colour=reg.beta)) +
geom_polygon(colour="black", fill=NA, size=.2) +
scale_colour_distiller(palette = "Spectral",
name=expression("Region" ~ beta)) +
scale_x_continuous(limits = c(-12000000, 16000000)) +
scale_y_continuous(limits = c(-6.4e+06, 8.8e+06)) +
xlab("") + ylab("") +
#ggtitle("F") +
labs(subtitle = expression(Delta ~ "=" ~ e^s[2](Lat, Lon) / e^s[1](Lat, Lon) )) +
theme_minimal() + blank.theme
plot.beta.smth
# ------------------------------------------------------------------------------
# write to file
library(cowplot)
tiff("../Figures/SMOOTH_prediction_grids.tif", width=4000, height=3400, res=350,
compression = "lzw")
plot_grid(plot.gr.S, plot.gr.smth,
plot.pl.S, plot.pl.smth,
plot.beta.S, plot.beta.smth,
nrow=3, ncol=2,
labels = c("a", "d", "b", "e", "c", "f"), vjust = 1.1, hjust = -0.1)
dev.off()
################################################################################
# LATITUDINAL GRADIENTS OF THE PREDICTIONS
################################################################################
# data for latitudinal gradient plots
LG.data <-
rbind(
data.frame(Latitude = grid5.ml@data$Lat, S = grid5.ml@data$S,
Grain = "hexagons", Longitude = grid5.ml$Lon),
data.frame(Latitude = plot.preds$Lat, S = plot.preds$S,
Grain = "plots", Longitude = plot.preds$Lon)
)
# plot the latitudinal gradients
LG.plot <- ggplot(LG.data, aes(x=Latitude, y=S)) +
geom_vline(xintercept = 0, size=.2) +
geom_vline(xintercept = 23.5, size=.2) +
geom_vline(xintercept = -23.5, size=.2) +
geom_point(aes(shape=Grain), alpha=0.3) +
geom_smooth(colour="red", aes(linetype=Grain), method="loess", span=0.3) +
scale_y_log10() +
theme_bw() +
scale_shape(solid = FALSE) +
coord_flip()
# write to file
png(file="../Figures/latitudinal_gradient.png", width=1500, height=1200, res=250)
LG.plot
dev.off()
################################################################################
# RELATIONSHIP BETWEEN BETA DIVERSITY AND ELEVATION SPAN
################################################################################
DAT <- plot.preds@data[is.na(plot.preds@data$beta) == FALSE, ]
DAT <- DAT[is.na(DAT$ALT_DIF_grid) == FALSE, ]
DAT <- DAT[is.na(DAT$ALT_DIF) == FALSE, ]
m1 <- lm(log10(beta)~ poly(ALT_DIF_grid,2) + poly(ALT_DIF, 2),
data=DAT,
na.action=na.omit)
par(mfrow=c(1,2))
termplot(m1, se=T)
ggplot(data=plot.preds@data, aes(x=ALT_DIF_grid, y=beta)) +
geom_point(shape=1) +
geom_smooth(method="lm", formula= y ~ poly(x, 2)) +
scale_y_continuous(trans="log10") +
xlab("Elavation span within hexagon [m]") +
ylab(expression(gamma / alpha)) +
theme_bw()
|
testlist <- list(sub = NULL, NULL, NULL, NULL, num_sub = 0L, s_ = integer(0), t_ = integer(0), x_ = numeric(0))
result <- do.call(MatchIt:::pairdistsubC,testlist)
str(result)
|
/MatchIt/inst/testfiles/pairdistsubC/libFuzzer_pairdistsubC/pairdistsubC_valgrind_files/1612738329-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 179
|
r
|
testlist <- list(sub = NULL, NULL, NULL, NULL, num_sub = 0L, s_ = integer(0), t_ = integer(0), x_ = numeric(0))
result <- do.call(MatchIt:::pairdistsubC,testlist)
str(result)
|
04928cc1604ff79fcc4cf52c20bede6b tlc01-nonuniform-depth-88.qdimacs 31240 83550
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Miller-Marin/trafficlight-controller/tlc01-nonuniform-depth-88/tlc01-nonuniform-depth-88.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 78
|
r
|
04928cc1604ff79fcc4cf52c20bede6b tlc01-nonuniform-depth-88.qdimacs 31240 83550
|
##########################
# #
# Excercise 14 #
# #
##########################
# Load libraries
library(haven)
library(forecast)
# Import the dataset
dataset <- read_sas("/your_path/quarterly.sas7bdat")
# Generate the spread as: r5 - Tbill
spread = dataset[,5] - dataset[,3]
spread <- ts(spread)
# Estimate an AR(7) and an ARMA(1,1)
arSeven <- Arima(spread, order = c(7,0,0), fixed=c(NA,NA,NA,NA,NA,NA,NA,NA))
arSeven # AIC=289.32 BIC=319.53
armaOneOne <- Arima(spread, order = c(1,0,1), fixed=c(NA,NA,NA))
armaOneOne # AIC=293.5 BIC=306.92
# We estimate the AR(7) and the ARMA(1, 1) over the period 1960Q1–2000Q3.
# We use the rows 1:163
arSevenReduced <- Arima(spread[1:163,], order = c(7,0,0), fixed=c(NA,NA,NA,NA,NA,NA,NA,NA))
arSevenReduced # AIC=235.74 BIC=263.59
armaOneOneReduced <- Arima(spread[1:163,], order = c(1,0,1), fixed=c(NA,NA,NA))
armaOneOneReduced # AIC=241.56 BIC=253.93
# We compute the out-of-sample forecast with rolling origin and we form the erroor forecast.
# AR(7)
error <- 0
forecastArSeven <- 0
for (i in 1:49) {
model <- arima(window(spread, end=162+i),order=c(7,0,0),fixed=c(NA,NA,NA,NA,NA,NA,NA,NA))
prediction <- predict(model, n.ahead=1)
error[i] <- spread[163+i] - prediction$pred
forecastArSeven[i] = prediction$pred
}
errorArSeven = error
mspeErrorArSeven = mean(errorArSeven^2, na.rm=TRUE) # MSPE: Mean Square Prediction Error
# AR(7): alternative method
k <- 163 # minimum size for training set
n <- length(spread) # total number of observations
e <- spread*NA # vector to record one-step forecast errors
for(i in 163:(n-1))
{
train <- ts(spread[1:i],freq=1)
fit <- arima(train, order=c(7,0,0),fixed=c(NA,NA,NA,NA,NA,NA,NA,NA))
fc <- forecast(fit,h=1)$mean
e[i] <- spread[i+1]-fc
}
# ARMA(1,1)
error <- 0
forecastArimaOneOne <- 0
for (i in 1:49) {
model <- arima(window(spread, end=162+i),order=c(1,0,1),fixed=c(NA,NA,NA))
prediction <- predict(mod,n.ahead=1)
error[i] <- spread[163+i] - prediction$pred
forecastArimaOneOne[i] <- prediction$pred
}
errorArimaOneOne = error
mspeerrorArimaOneOne = mean(errorArimaOneOne^2,na.rm=TRUE)
# From the MSPE seems that the ARIMA(1,1) fit better the data.
# We check if the forecast are unbiased or not.
lrArSeven = lm(spread[164:212,] ~ forecastArSeven)
summary(lrArSeven)
lrArimaOneOne = lm(spread[164:212,] ~ forecastArimaOneOne)
summary(lrArimaOneOne)
|
/chapter2/excercise14.R
|
no_license
|
XiaoShiliu611/time-series-enders-R
|
R
| false
| false
| 2,453
|
r
|
##########################
# #
# Excercise 14 #
# #
##########################
# Load libraries
library(haven)
library(forecast)
# Import the dataset
dataset <- read_sas("/your_path/quarterly.sas7bdat")
# Generate the spread as: r5 - Tbill
spread = dataset[,5] - dataset[,3]
spread <- ts(spread)
# Estimate an AR(7) and an ARMA(1,1)
arSeven <- Arima(spread, order = c(7,0,0), fixed=c(NA,NA,NA,NA,NA,NA,NA,NA))
arSeven # AIC=289.32 BIC=319.53
armaOneOne <- Arima(spread, order = c(1,0,1), fixed=c(NA,NA,NA))
armaOneOne # AIC=293.5 BIC=306.92
# We estimate the AR(7) and the ARMA(1, 1) over the period 1960Q1–2000Q3.
# We use the rows 1:163
arSevenReduced <- Arima(spread[1:163,], order = c(7,0,0), fixed=c(NA,NA,NA,NA,NA,NA,NA,NA))
arSevenReduced # AIC=235.74 BIC=263.59
armaOneOneReduced <- Arima(spread[1:163,], order = c(1,0,1), fixed=c(NA,NA,NA))
armaOneOneReduced # AIC=241.56 BIC=253.93
# We compute the out-of-sample forecast with rolling origin and we form the erroor forecast.
# AR(7)
error <- 0
forecastArSeven <- 0
for (i in 1:49) {
model <- arima(window(spread, end=162+i),order=c(7,0,0),fixed=c(NA,NA,NA,NA,NA,NA,NA,NA))
prediction <- predict(model, n.ahead=1)
error[i] <- spread[163+i] - prediction$pred
forecastArSeven[i] = prediction$pred
}
errorArSeven = error
mspeErrorArSeven = mean(errorArSeven^2, na.rm=TRUE) # MSPE: Mean Square Prediction Error
# AR(7): alternative method
k <- 163 # minimum size for training set
n <- length(spread) # total number of observations
e <- spread*NA # vector to record one-step forecast errors
for(i in 163:(n-1))
{
train <- ts(spread[1:i],freq=1)
fit <- arima(train, order=c(7,0,0),fixed=c(NA,NA,NA,NA,NA,NA,NA,NA))
fc <- forecast(fit,h=1)$mean
e[i] <- spread[i+1]-fc
}
# ARMA(1,1)
error <- 0
forecastArimaOneOne <- 0
for (i in 1:49) {
model <- arima(window(spread, end=162+i),order=c(1,0,1),fixed=c(NA,NA,NA))
prediction <- predict(mod,n.ahead=1)
error[i] <- spread[163+i] - prediction$pred
forecastArimaOneOne[i] <- prediction$pred
}
errorArimaOneOne = error
mspeerrorArimaOneOne = mean(errorArimaOneOne^2,na.rm=TRUE)
# From the MSPE seems that the ARIMA(1,1) fit better the data.
# We check if the forecast are unbiased or not.
lrArSeven = lm(spread[164:212,] ~ forecastArSeven)
summary(lrArSeven)
lrArimaOneOne = lm(spread[164:212,] ~ forecastArimaOneOne)
summary(lrArimaOneOne)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package-HDclust.R
\docType{package}
\name{HDclust-package}
\alias{HDclust-package}
\alias{HDclust}
\title{Clustering high dimensional data with Hidden Markov Model on Variable Blocks}
\description{
Clustering of high dimensional data with Hidden Markov Model on Variable Blocks (HMM-VB)
fitted via Baum-Welch algorithm. Clustering is performed by the Modal Baum-Welch
algorithm (MBW), which finds modes of the density function.
}
\details{
For a quick introduction to \pkg{HDclust} see the vignette \href{../doc/HDclust.html}{\code{vignette("HDclust")}}.
}
\examples{
data("sim3")
set.seed(12345)
Vb <- vb(2, dim=40, bdim=c(10,30), numst=c(3,5), varorder=list(c(1:10),c(11:40)))
hmmvb <- hmmvbTrain(sim3[,1:40], VbStructure=Vb)
clust <- hmmvbClust(sim3[,1:40], model=hmmvb)
show(clust)
}
\references{
Lin Lin and Jia Li, "Clustering with hidden Markov model on variable blocks," \strong{Journal of Machine Learning Research}, 18(110):1-49, 2017.
}
\seealso{
\code{\link{hmmvbTrain}}, \code{\link{hmmvbClust}}
}
\author{
{ Jia Li, Lin Lin and Yevhen Tupikov.
Maintainer: Yevhen Tupikov \email{yzt116@psu.edu}
}
}
|
/fuzzedpackages/HDclust/man/HDclust-package.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| true
| 1,191
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package-HDclust.R
\docType{package}
\name{HDclust-package}
\alias{HDclust-package}
\alias{HDclust}
\title{Clustering high dimensional data with Hidden Markov Model on Variable Blocks}
\description{
Clustering of high dimensional data with Hidden Markov Model on Variable Blocks (HMM-VB)
fitted via Baum-Welch algorithm. Clustering is performed by the Modal Baum-Welch
algorithm (MBW), which finds modes of the density function.
}
\details{
For a quick introduction to \pkg{HDclust} see the vignette \href{../doc/HDclust.html}{\code{vignette("HDclust")}}.
}
\examples{
data("sim3")
set.seed(12345)
Vb <- vb(2, dim=40, bdim=c(10,30), numst=c(3,5), varorder=list(c(1:10),c(11:40)))
hmmvb <- hmmvbTrain(sim3[,1:40], VbStructure=Vb)
clust <- hmmvbClust(sim3[,1:40], model=hmmvb)
show(clust)
}
\references{
Lin Lin and Jia Li, "Clustering with hidden Markov model on variable blocks," \strong{Journal of Machine Learning Research}, 18(110):1-49, 2017.
}
\seealso{
\code{\link{hmmvbTrain}}, \code{\link{hmmvbClust}}
}
\author{
{ Jia Li, Lin Lin and Yevhen Tupikov.
Maintainer: Yevhen Tupikov \email{yzt116@psu.edu}
}
}
|
computeQTrigram<-function(word2,word3,frqTblTokenz, Trigram, biGms,
totalTokens,listTokens){
searchPtn<-paste0("(^|[^a-z])(",word2,"_",word3,"_)+")
#subset known trigrams from the trigrams dfm
frqTblKnownTrigram<-dfm_select(Trigram,pattern=searchPtn,selection="keep",
valuetype="regex")
#get no of total known bigrams
totalKnownTrigram<-rowSums(frqTblKnownTrigram)
#setup returnVector1
returnVector1<-rep(0,length(listTokens))
#get vector from lower level bigram function
returnList<-computeQBigram(word3,frqTblTokenz,biGms,totalTokens,listTokens)
returnStatus<-returnList[[1]]
returnVector2<-returnList[[2]]
#set return status flag
returnStatusFlag<-NULL
# use frqTblKnownTrigram
TrigramFrqVec<-as.vector(frqTblKnownTrigram[1,])
if (length(TrigramFrqVec)<2){
#length 0 means that no combination of given word and another word has been
#found in the bigrams list, we need to use returnVector2 itself for all
#probabilities. we also need to set a return flag value to let calling
#program know
if (length(TrigramFrqVec)==0) {
PA1<-0
returnStatusFlag<-"No trigrams with word found"
}
#for length 1 we have no option other than manual discounting
#here we use a factor of 0.5 to simulate a coin toss
if (length(TrigramFrqVec==1)){
#split bigrams up
splitTrigramList<-strsplit(featnames(frqTblKnownTrigram),split="_")
# a vector to store the known trailing words indexes
#process the entire split words list using a for loop to store words
#get trailing known word
tWord<-splitTrigramList[[1]][3]
#get index of trailing known word
indx<-which(listTokens==tWord)
returnVector2[indx]<-0
#renormalize probabilities for returnVector2
totalReturnVector2<-sum(returnVector2)
returnVector2<-returnVector2/totalReturnVector2
#manual discount this probability to a high value say 0.5
PA1<-0.5
returnVector1[indx]<-as.vector(frqTblKnownTrigram[1,])
returnVector1<-returnVector1/totalKnownTrigram
returnVector1<-returnVector1*PA1
}
}
else {
# length of the vector is more than 2 and
# possible to compute simple good turing discounting
# split trigrams up
splitTrigramList<- as.vector(sapply(
featnames(frqTblKnownTrigram), function(x) strsplit(x, "_")[[1]][3]) )
#get trailing word indexes
wordIndxs<-vector()
for (ctr in 1:length(splitTrigramList)){
wordIndxs[ctr]<-which(listTokens==splitTrigramList[ctr])
}
#set probabilities of known trailing words to 0 in returnVector2
returnVector2[wordIndxs]<-0
#renormalize probabilities for returnVector2
totalReturnVector2<-sum(returnVector2)
returnVector2<-returnVector2/totalReturnVector2
#setup a flag for simple good turing
simpleGoodTuring<-FALSE
# we can find nA,nB to compute log(nC) = nA + nB*log(C)
freqKnownTrigram <- TrigramFrqVec %>% unique() %>% sort()
if (length(freqKnownTrigram)>1){
cA<-freqKnownTrigram[1]
nVA<-length(which(as.vector(frqTblKnownTrigram[1,])==cA))
cB<-freqKnownTrigram[2]
nVB<-length(which(as.vector(frqTblKnownTrigram[1,])==cB))
matA<-matrix(c(1,log(cA),1,log(cB)), nrow=2,ncol=2,byrow=T)
matY<-matrix(c(log(nVA),log(nVB)), nrow=2, ncol=1, byrow=T)
invMatA<-solve(matA)
matX<-invMatA %*% matY
nA<-matX[1,1]
nB<-matX[2,1]
simpleGoodTuring<-TRUE
}
# n1 ~ n6 are frequencies of frequencies i.e. n1 stores the number
# of times a frequency of 1 occurs in the frqTblKnownTrigram
# n2 stores the number of times a frequency of 2
# occurs in the frqTblKnownTrigram and so on
n1<-0
n2<-0
n3<-0
n4<-0
n5<-0
n6<-0
#compute indexes of which entries in the table have frquencies 1~6
indx1<-which(as.vector(frqTblKnownTrigram[1,])==1)
indx2<-which(as.vector(frqTblKnownTrigram[1,])==2)
indx3<-which(as.vector(frqTblKnownTrigram[1,])==3)
indx4<-which(as.vector(frqTblKnownTrigram[1,])==4)
indx5<-which(as.vector(frqTblKnownTrigram[1,])==5)
indx6<-which(as.vector(frqTblKnownTrigram[1,])==6)
#setup frequenies of frequencies
n1<-length(indx1)
n2<-length(indx2)
n3<-length(indx3)
n4<-length(indx4)
n5<-length(indx5)
n6<-length(indx6)
#check if we have any counts less than 5 so that we can use simple good turing
#discounting
if ((n1==0)&(n2==0)&(n3==0)&(n4==0)&(n5==0)){
#only large counts are available and these can't be discounted
#switch to manual discounting
simpleGoodTuring<-FALSE
#setup returnVector1
returnVector1[wordIndxs]<-as.vector(frqTblKnownTrigram[1,])
returnVector1<-returnVector1/totalKnownTrigram
PA1<-0.5
returnVector1<-returnVector1*PA1
}
# check if conditions for simple good turing are met via flag
if (simpleGoodTuring) {
#nA and nB are valid so we can use them to compute smoothed n1~n6
#whichever are at 0
if (n1==0) n1=1*exp(nA)
if (n2==0) n2=(2^nB)*exp(nA)
if (n3==0) n3=(3^nB)*exp(nA)
if (n4==0) n4=(4^nB)*exp(nA)
if (n5==0) n5=(5^nB)*exp(nA)
if (n6==0) n6=(6^nB)*exp(nA)
#modify counts using simple good turning calculations
modifiedCount<-as.vector(frqTblKnownTrigram[1,])
modifiedCount<-computeMC(n1,n2,n3,n4,n5,n6,indx1,indx2,indx3,indx4,
indx5,modifiedCount)
probabilityCount<-modifiedCount/totalKnownTrigram
returnVector1[wordIndxs]<-probabilityCount
PA1<-sum(probabilityCount)
}
else{
#the algorithm has been unable to compute nA and nB so we can't use
#simple good turing to compute modified counts and hence the discounted
#probabilities. in this case we are forced to use manual discounting to
#the extent of .50
PA1<-0.5
returnVector1[wordIndxs]<-as.vector(frqTblKnownTrigram[1,])
returnVector1<-returnVector1/totalKnownTrigram
returnVector1<-returnVector1*PA1
}
}
returnVector2<-returnVector2*(1-PA1)
if (!is.null(returnStatus)){
if (returnStatus=="No bigrams with word found") {
returnStatusFlag<-"No bigrams with word found"
}
else{
returnStatusFlag<-paste(returnStatusFlag,returnStatus)
}
}
return(list(returnStatusFlag,(returnVector1+returnVector2)))
}
|
/capstoneProject/computeQTrigram.R
|
no_license
|
Vulcan-Logic/DSC10_Capstone_Project
|
R
| false
| false
| 6,460
|
r
|
computeQTrigram<-function(word2,word3,frqTblTokenz, Trigram, biGms,
totalTokens,listTokens){
searchPtn<-paste0("(^|[^a-z])(",word2,"_",word3,"_)+")
#subset known trigrams from the trigrams dfm
frqTblKnownTrigram<-dfm_select(Trigram,pattern=searchPtn,selection="keep",
valuetype="regex")
#get no of total known bigrams
totalKnownTrigram<-rowSums(frqTblKnownTrigram)
#setup returnVector1
returnVector1<-rep(0,length(listTokens))
#get vector from lower level bigram function
returnList<-computeQBigram(word3,frqTblTokenz,biGms,totalTokens,listTokens)
returnStatus<-returnList[[1]]
returnVector2<-returnList[[2]]
#set return status flag
returnStatusFlag<-NULL
# use frqTblKnownTrigram
TrigramFrqVec<-as.vector(frqTblKnownTrigram[1,])
if (length(TrigramFrqVec)<2){
#length 0 means that no combination of given word and another word has been
#found in the bigrams list, we need to use returnVector2 itself for all
#probabilities. we also need to set a return flag value to let calling
#program know
if (length(TrigramFrqVec)==0) {
PA1<-0
returnStatusFlag<-"No trigrams with word found"
}
#for length 1 we have no option other than manual discounting
#here we use a factor of 0.5 to simulate a coin toss
if (length(TrigramFrqVec==1)){
#split bigrams up
splitTrigramList<-strsplit(featnames(frqTblKnownTrigram),split="_")
# a vector to store the known trailing words indexes
#process the entire split words list using a for loop to store words
#get trailing known word
tWord<-splitTrigramList[[1]][3]
#get index of trailing known word
indx<-which(listTokens==tWord)
returnVector2[indx]<-0
#renormalize probabilities for returnVector2
totalReturnVector2<-sum(returnVector2)
returnVector2<-returnVector2/totalReturnVector2
#manual discount this probability to a high value say 0.5
PA1<-0.5
returnVector1[indx]<-as.vector(frqTblKnownTrigram[1,])
returnVector1<-returnVector1/totalKnownTrigram
returnVector1<-returnVector1*PA1
}
}
else {
# length of the vector is more than 2 and
# possible to compute simple good turing discounting
# split trigrams up
splitTrigramList<- as.vector(sapply(
featnames(frqTblKnownTrigram), function(x) strsplit(x, "_")[[1]][3]) )
#get trailing word indexes
wordIndxs<-vector()
for (ctr in 1:length(splitTrigramList)){
wordIndxs[ctr]<-which(listTokens==splitTrigramList[ctr])
}
#set probabilities of known trailing words to 0 in returnVector2
returnVector2[wordIndxs]<-0
#renormalize probabilities for returnVector2
totalReturnVector2<-sum(returnVector2)
returnVector2<-returnVector2/totalReturnVector2
#setup a flag for simple good turing
simpleGoodTuring<-FALSE
# we can find nA,nB to compute log(nC) = nA + nB*log(C)
freqKnownTrigram <- TrigramFrqVec %>% unique() %>% sort()
if (length(freqKnownTrigram)>1){
cA<-freqKnownTrigram[1]
nVA<-length(which(as.vector(frqTblKnownTrigram[1,])==cA))
cB<-freqKnownTrigram[2]
nVB<-length(which(as.vector(frqTblKnownTrigram[1,])==cB))
matA<-matrix(c(1,log(cA),1,log(cB)), nrow=2,ncol=2,byrow=T)
matY<-matrix(c(log(nVA),log(nVB)), nrow=2, ncol=1, byrow=T)
invMatA<-solve(matA)
matX<-invMatA %*% matY
nA<-matX[1,1]
nB<-matX[2,1]
simpleGoodTuring<-TRUE
}
# n1 ~ n6 are frequencies of frequencies i.e. n1 stores the number
# of times a frequency of 1 occurs in the frqTblKnownTrigram
# n2 stores the number of times a frequency of 2
# occurs in the frqTblKnownTrigram and so on
n1<-0
n2<-0
n3<-0
n4<-0
n5<-0
n6<-0
#compute indexes of which entries in the table have frquencies 1~6
indx1<-which(as.vector(frqTblKnownTrigram[1,])==1)
indx2<-which(as.vector(frqTblKnownTrigram[1,])==2)
indx3<-which(as.vector(frqTblKnownTrigram[1,])==3)
indx4<-which(as.vector(frqTblKnownTrigram[1,])==4)
indx5<-which(as.vector(frqTblKnownTrigram[1,])==5)
indx6<-which(as.vector(frqTblKnownTrigram[1,])==6)
#setup frequenies of frequencies
n1<-length(indx1)
n2<-length(indx2)
n3<-length(indx3)
n4<-length(indx4)
n5<-length(indx5)
n6<-length(indx6)
#check if we have any counts less than 5 so that we can use simple good turing
#discounting
if ((n1==0)&(n2==0)&(n3==0)&(n4==0)&(n5==0)){
#only large counts are available and these can't be discounted
#switch to manual discounting
simpleGoodTuring<-FALSE
#setup returnVector1
returnVector1[wordIndxs]<-as.vector(frqTblKnownTrigram[1,])
returnVector1<-returnVector1/totalKnownTrigram
PA1<-0.5
returnVector1<-returnVector1*PA1
}
# check if conditions for simple good turing are met via flag
if (simpleGoodTuring) {
#nA and nB are valid so we can use them to compute smoothed n1~n6
#whichever are at 0
if (n1==0) n1=1*exp(nA)
if (n2==0) n2=(2^nB)*exp(nA)
if (n3==0) n3=(3^nB)*exp(nA)
if (n4==0) n4=(4^nB)*exp(nA)
if (n5==0) n5=(5^nB)*exp(nA)
if (n6==0) n6=(6^nB)*exp(nA)
#modify counts using simple good turning calculations
modifiedCount<-as.vector(frqTblKnownTrigram[1,])
modifiedCount<-computeMC(n1,n2,n3,n4,n5,n6,indx1,indx2,indx3,indx4,
indx5,modifiedCount)
probabilityCount<-modifiedCount/totalKnownTrigram
returnVector1[wordIndxs]<-probabilityCount
PA1<-sum(probabilityCount)
}
else{
#the algorithm has been unable to compute nA and nB so we can't use
#simple good turing to compute modified counts and hence the discounted
#probabilities. in this case we are forced to use manual discounting to
#the extent of .50
PA1<-0.5
returnVector1[wordIndxs]<-as.vector(frqTblKnownTrigram[1,])
returnVector1<-returnVector1/totalKnownTrigram
returnVector1<-returnVector1*PA1
}
}
returnVector2<-returnVector2*(1-PA1)
if (!is.null(returnStatus)){
if (returnStatus=="No bigrams with word found") {
returnStatusFlag<-"No bigrams with word found"
}
else{
returnStatusFlag<-paste(returnStatusFlag,returnStatus)
}
}
return(list(returnStatusFlag,(returnVector1+returnVector2)))
}
|
#' qPCR data table from Jimenez-Dominguez et al, Sci Rep, 2021; replicate 3_A
#' (perturbations of the ERs, RARs, LCoR, and RIP140 transcriptional network)
#'
#'@format A data frame with 8 rows (modules) and 17 variables (perturbations):
#' \describe{
#' \item{Modules}{Names of the modules}
#' \item{Et}{Ethanol}
#' \item{E2}{Estradiol}
#' \item{RA}{Retinoic Acid}
#' \item{E2+RA}{Estradiol and retinoic acid}
#' \item{siLCoR}{LCoR gene silencing RNA}
#' \item{E2+siLCoR}{Retinoic acid and LCoR gene silencing RNA}
#' \item{RA+siLCoR}{Retinoic acid and LCoR gene silencing RNA}
#' \item{E2+RA+siLCoR}{Estradiol, retinoic acid, and LCoR gene silencing RNA}
#' \item{siRIP140}{RIP140 gene silencing RNA}
#' \item{E2+siRIP140}{Estradiol and RIP140 gene silencing RNA}
#' \item{RA+siRIP140}{Retinoic acid and RIP140 gene silencing RNA}
#' \item{E2+RA+siRIP140}{Estradiol, retinoic acid, and RIP140 gene
#' silencing RNA}
#' \item{siLCoR+siRIP140}{LCoR gene silencing RNA and RIP140 gene silencing
#' RNA}
#' \item{E2+siLCoR+siRIP140}{Estradiol, LCoR gene silencing RNA,
#' and RIP140 gene silencing RNA}
#' \item{RA+siLCoR+siRIP140}{Retinoic acid, LCoR gene silencing RNA,
#' and RIP140 gene silencing RNA}
#' \item{E2+RA+siLCoR+siRIP140}{Estradiol, retinoic acid, LCoR gene silencing
#' RNA, and RIP140 gene silencing RNA}
#' ...
#'}
#'@source \url{EDF R\&D}
"estr3_A"
|
/R/estr3_A.R
|
no_license
|
bioinfo-ircm/aiMeRA
|
R
| false
| false
| 1,467
|
r
|
#' qPCR data table from Jimenez-Dominguez et al, Sci Rep, 2021; replicate 3_A
#' (perturbations of the ERs, RARs, LCoR, and RIP140 transcriptional network)
#'
#'@format A data frame with 8 rows (modules) and 17 variables (perturbations):
#' \describe{
#' \item{Modules}{Names of the modules}
#' \item{Et}{Ethanol}
#' \item{E2}{Estradiol}
#' \item{RA}{Retinoic Acid}
#' \item{E2+RA}{Estradiol and retinoic acid}
#' \item{siLCoR}{LCoR gene silencing RNA}
#' \item{E2+siLCoR}{Retinoic acid and LCoR gene silencing RNA}
#' \item{RA+siLCoR}{Retinoic acid and LCoR gene silencing RNA}
#' \item{E2+RA+siLCoR}{Estradiol, retinoic acid, and LCoR gene silencing RNA}
#' \item{siRIP140}{RIP140 gene silencing RNA}
#' \item{E2+siRIP140}{Estradiol and RIP140 gene silencing RNA}
#' \item{RA+siRIP140}{Retinoic acid and RIP140 gene silencing RNA}
#' \item{E2+RA+siRIP140}{Estradiol, retinoic acid, and RIP140 gene
#' silencing RNA}
#' \item{siLCoR+siRIP140}{LCoR gene silencing RNA and RIP140 gene silencing
#' RNA}
#' \item{E2+siLCoR+siRIP140}{Estradiol, LCoR gene silencing RNA,
#' and RIP140 gene silencing RNA}
#' \item{RA+siLCoR+siRIP140}{Retinoic acid, LCoR gene silencing RNA,
#' and RIP140 gene silencing RNA}
#' \item{E2+RA+siLCoR+siRIP140}{Estradiol, retinoic acid, LCoR gene silencing
#' RNA, and RIP140 gene silencing RNA}
#' ...
#'}
#'@source \url{EDF R\&D}
"estr3_A"
|
test_that("rbindAll() works", {
L <- list(1:3, 4:6)
y <- rbindAll(L, nameColumn = "Name")
expect_is(y, "data.frame")
expect_true("Name" %in% names(y))
expect_identical(dim(y), c(2L, 4L))
L <- list(
A = data.frame(x = 1:2, y = 2:3),
B = data.frame(x = 1:3, y = 2:4)
)
L_unnamed <- unname(L)
y1 <- rbindAll(L)
y2 <- rbindAll(L, nameColumn = "group")
y3 <- rbindAll(L_unnamed, nameColumn = "group", namesAsFactor = FALSE)
y4 <- rbindAll(L_unnamed, nameColumn = "group")
expected1 <- data.frame(
x = c(L$A$x, L$B$x),
y = c(L$A$y, L$B$y)
)
expected2 <- cbind(
expected1,
group = as.factor(c(rep("A", nrow(L$A)), rep("B", nrow(L$B)))),
stringsAsFactors = FALSE
)
expected3 <- cbind(
expected1,
group = c(rep(1L, nrow(L$A)), rep(2L, nrow(L$B)))
)
expected4 <- expected3
expected4$group <- as.factor(expected4$group)
expect_identical(y1, expected1)
expect_identical(y2, expected2)
expect_identical(y3, expected3)
expect_identical(y4, expected4)
})
|
/tests/testthat/test-function-rbindAll.R
|
permissive
|
KWB-R/kwb.utils
|
R
| false
| false
| 1,053
|
r
|
test_that("rbindAll() works", {
L <- list(1:3, 4:6)
y <- rbindAll(L, nameColumn = "Name")
expect_is(y, "data.frame")
expect_true("Name" %in% names(y))
expect_identical(dim(y), c(2L, 4L))
L <- list(
A = data.frame(x = 1:2, y = 2:3),
B = data.frame(x = 1:3, y = 2:4)
)
L_unnamed <- unname(L)
y1 <- rbindAll(L)
y2 <- rbindAll(L, nameColumn = "group")
y3 <- rbindAll(L_unnamed, nameColumn = "group", namesAsFactor = FALSE)
y4 <- rbindAll(L_unnamed, nameColumn = "group")
expected1 <- data.frame(
x = c(L$A$x, L$B$x),
y = c(L$A$y, L$B$y)
)
expected2 <- cbind(
expected1,
group = as.factor(c(rep("A", nrow(L$A)), rep("B", nrow(L$B)))),
stringsAsFactors = FALSE
)
expected3 <- cbind(
expected1,
group = c(rep(1L, nrow(L$A)), rep(2L, nrow(L$B)))
)
expected4 <- expected3
expected4$group <- as.factor(expected4$group)
expect_identical(y1, expected1)
expect_identical(y2, expected2)
expect_identical(y3, expected3)
expect_identical(y4, expected4)
})
|
dt<-read.table(file="data/household_power_consumption.txt",header=TRUE,sep=";"
,colClasses=(c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
,nrows=2075259,na.strings=c("?",""))
require("lubridate")
datetime<-parse_date_time(paste(dt$Date,dt$Time),"dmyhms",tz="UTC",truncated=3)
dt<-cbind(datetime,dt[,3:9])
rm(datetime)
feb2days<-dt[dt$datetime>=strptime("02/01/2007 00:00:00","%m/%d/%Y %H:%M:%S",tz="UTC")
& dt$datetime<strptime("02/03/2007 00:00:00","%m/%d/%Y %H:%M:%S",tz="UTC"),]
png(filename = "plot3.png",width=480,height=480,type="windows",pointsize = 10)
with(feb2days
,plot(Sub_metering_1~datetime,type="l",xlab=NA,ylab="Energy sub metering")
)
with(feb2days
,points(Sub_metering_2~datetime,type="l",col="red")
)
with(feb2days
,points(Sub_metering_3~datetime,type="l",col="blue")
)
legend("topright",lty=1,lwd=1,col=c("black","blue","red"),legend=c("Sub_metering_1","Sub_metering_1","Sub_metering_1"))
dev.off()
|
/plot3.R
|
no_license
|
mcassidy04/ExData_Plotting1
|
R
| false
| false
| 1,030
|
r
|
dt<-read.table(file="data/household_power_consumption.txt",header=TRUE,sep=";"
,colClasses=(c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
,nrows=2075259,na.strings=c("?",""))
require("lubridate")
datetime<-parse_date_time(paste(dt$Date,dt$Time),"dmyhms",tz="UTC",truncated=3)
dt<-cbind(datetime,dt[,3:9])
rm(datetime)
feb2days<-dt[dt$datetime>=strptime("02/01/2007 00:00:00","%m/%d/%Y %H:%M:%S",tz="UTC")
& dt$datetime<strptime("02/03/2007 00:00:00","%m/%d/%Y %H:%M:%S",tz="UTC"),]
png(filename = "plot3.png",width=480,height=480,type="windows",pointsize = 10)
with(feb2days
,plot(Sub_metering_1~datetime,type="l",xlab=NA,ylab="Energy sub metering")
)
with(feb2days
,points(Sub_metering_2~datetime,type="l",col="red")
)
with(feb2days
,points(Sub_metering_3~datetime,type="l",col="blue")
)
legend("topright",lty=1,lwd=1,col=c("black","blue","red"),legend=c("Sub_metering_1","Sub_metering_1","Sub_metering_1"))
dev.off()
|
rm(list=ls()) #clear all variables
library(ggplot2); library(plyr); library(dplyr); library(car); library(reshape); library(lme4); library(cowplot); library(stringi); library(scales); library(ggrepel)
load("~/friends-and-enemies/PostScript02.RData")
setwd("~/friends-and-enemies")
# Initial setup of VOISeR data ----
## VOISeR was trained on the ELP_groupdata.csv file, which was generated using the following code
# nmgData.correct.groupItem <- ddply(nmgData.correct, .(Word), summarise, latency=mean(latency))
# nmgData.correct.groupItem <- merge(lexicon, nmgData.correct.groupItem, by = 'Word')
# accuracy.groupItem <- ddply(nmgData, .(Word), summarise, accuracy=mean(accuracy))
# nmgData.correct.groupItem <- merge(nmgData.correct.groupItem, accuracy.groupItem, by = 'Word')
# groupData <- subset(nmgData.correct.groupItem, select = c("Word","Pron_NoStress","Length","Log_Freq_HAL","latency","accuracy"))
# colnames(groupData) <- c("Ortho","Phono","Length","LogFreq","meanRT","accuracy")
# write.csv(groupData, file = "ELP_groupData.csv")
# We then train VOISeR and read in the output data here
VOISeR <- read.csv('VOISeR-data-FrequencyWeighted-FINAL.csv', header = TRUE, sep = ',', na.strings = "#N/A")
#Select correct trials from epoch 10000
VOISeR$Epoch <- as.factor(VOISeR$Epoch)
VOISeR.correct <- droplevels(subset(VOISeR, Accuracy_Pronunciation == "TRUE"))
accuracy.VOISeR <- 100 * nrow(VOISeR.correct)/nrow(VOISeR)
accuracy.VOISeR
VOISeR.trained <- droplevels(subset(VOISeR, Epoch == "10000"))
VOISeR.trained.correct <- droplevels(subset(VOISeR.trained, Accuracy_Pronunciation == "TRUE"))
accuracy.VOISeR <- 100 * nrow(VOISeR.trained.correct)/nrow(VOISeR.trained)
accuracy.VOISeR
#Confirm that VOISeR was trained on words in degree proportional to logFreq
cor.test(VOISeR.trained.correct$Probability, VOISeR.trained.correct$Trained_Count)
#Compute the LN of subject RTs and of all model parameters
VOISeR.trained.correct.LN <- VOISeR.trained.correct %>%
mutate(MeanRT = log(MeanRT)) %>%
mutate(Cosine_Similarity = log(Cosine_Similarity)) %>%
mutate(Mean_Squared_Error = log(Mean_Squared_Error)) %>%
mutate(Euclidean_Distance = log(Euclidean_Distance)) %>%
mutate(Cross_Entropy = log(Cross_Entropy)) %>%
mutate(Hidden_Cosine_Similarity = log(Hidden_Cosine_Similarity)) %>%
mutate(Hidden_Mean_Squared_Error = log(Hidden_Mean_Squared_Error)) %>%
mutate(Hidden_Euclidean_Distance = log(Hidden_Euclidean_Distance)) %>%
mutate(Hidden_Cross_Entropy = log(Hidden_Cross_Entropy))
#Create data frames for each word length
VOISeR.trained.correct.LN <- VOISeR.trained.correct.LN[which(VOISeR.trained.correct.LN$Ortho %in% lexicon$Word), ]
colnames(VOISeR.trained.correct.LN)[2]<-"Word"
VOISeR.trained.correct.LN <- merge(lexicon, VOISeR.trained.correct.LN[c("Epoch","Word","Phono","MeanRT","Trained_Count","Cosine_Similarity","Mean_Squared_Error","Euclidean_Distance","Cross_Entropy","Exported_Pronunciation","Accuracy_Pronunciation")], by = 'Word')
VOISeR.trained.correct.LN.3letter <- VOISeR.trained.correct.LN[which(VOISeR.trained.correct.LN$Word %in% lexicon.3letter$Word), ]
colnames(VOISeR.trained.correct.LN.3letter)[2]<-"Word"
VOISeR.trained.correct.LN.3letter <- merge(lexicon.3letter, VOISeR.trained.correct.LN.3letter[c("Epoch","Word","Phono","MeanRT","Trained_Count","Cosine_Similarity","Mean_Squared_Error","Euclidean_Distance","Cross_Entropy","Exported_Pronunciation","Accuracy_Pronunciation")], by = 'Word')
VOISeR.trained.correct.LN.4letter <- VOISeR.trained.correct.LN[which(VOISeR.trained.correct.LN$Word %in% lexicon.4letter$Word), ]
colnames(VOISeR.trained.correct.LN.4letter)[2]<-"Word"
VOISeR.trained.correct.LN.4letter <- merge(lexicon.4letter, VOISeR.trained.correct.LN.4letter[c("Epoch","Word","Phono","MeanRT","Trained_Count","Cosine_Similarity","Mean_Squared_Error","Euclidean_Distance","Cross_Entropy","Exported_Pronunciation","Accuracy_Pronunciation")], by = 'Word')
VOISeR.trained.correct.LN.5letter <- VOISeR.trained.correct.LN[which(VOISeR.trained.correct.LN$Word %in% lexicon.5letter$Word), ]
colnames(VOISeR.trained.correct.LN.5letter)[2]<-"Word"
VOISeR.trained.correct.LN.5letter <- merge(lexicon.5letter, VOISeR.trained.correct.LN.5letter[c("Epoch","Word","Phono","MeanRT","Trained_Count","Cosine_Similarity","Mean_Squared_Error","Euclidean_Distance","Cross_Entropy","Exported_Pronunciation","Accuracy_Pronunciation")], by = 'Word')
VOISeR.trained.correct.LN.6letter <- VOISeR.trained.correct.LN[which(VOISeR.trained.correct.LN$Word %in% lexicon.6letter$Word), ]
colnames(VOISeR.trained.correct.LN.6letter)[2]<-"Word"
VOISeR.trained.correct.LN.6letter <- merge(lexicon.6letter, VOISeR.trained.correct.LN.6letter[c("Epoch","Word","Phono","MeanRT","Trained_Count","Cosine_Similarity","Mean_Squared_Error","Euclidean_Distance","Cross_Entropy","Exported_Pronunciation","Accuracy_Pronunciation")], by = 'Word')
VOISeR.trained.correct.LN.7letter <- VOISeR.trained.correct.LN[which(VOISeR.trained.correct.LN$Word %in% lexicon.7letter$Word), ]
colnames(VOISeR.trained.correct.LN.7letter)[2]<-"Word"
VOISeR.trained.correct.LN.7letter <- merge(lexicon.7letter, VOISeR.trained.correct.LN.7letter[c("Epoch","Word","Phono","MeanRT","Trained_Count","Cosine_Similarity","Mean_Squared_Error","Euclidean_Distance","Cross_Entropy","Exported_Pronunciation","Accuracy_Pronunciation")], by = 'Word')
VOISeR.trained.correct.LN.8letter <- VOISeR.trained.correct.LN[which(VOISeR.trained.correct.LN$Word %in% lexicon.8letter$Word), ]
colnames(VOISeR.trained.correct.LN.8letter)[2]<-"Word"
VOISeR.trained.correct.LN.8letter <- merge(lexicon.8letter, VOISeR.trained.correct.LN.8letter[c("Epoch","Word","Phono","MeanRT","Trained_Count","Cosine_Similarity","Mean_Squared_Error","Euclidean_Distance","Cross_Entropy","Exported_Pronunciation","Accuracy_Pronunciation")], by = 'Word')
#Visualize correlations between model parameters and subject RT
my.pairscor(VOISeR.trained.correct.LN[c(6,8:11)])
cor.test(VOISeR.trained.correct.LN$MeanRT, VOISeR.trained.correct.LN$Cross_Entropy)
#Correlation with cross-entropy is highest
# Do a few other simple tests on VOISeR
# Factor out word length, word frequency, and neighborhood size before doing RT-CE correlation
pcor.test(VOISeR.trained.correct.LN$MeanRT, VOISeR.trained.correct.LN$Cross_Entropy,
cbind(VOISeR.trained.correct.LN$Length, VOISeR.trained.correct.LN$Log_Freq_HAL,
VOISeR.trained.correct.LN$Ortho_N))
# Test for word length effect
ggplot(VOISeR.trained.correct.LN, aes(Length, Cross_Entropy)) +
geom_smooth(method=lm, color = "black", alpha = 0.15) + geom_point() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "transparent", color = NA)) +
labs(x="Word Length", y = "ln(CE)")
cor.test(VOISeR.trained.correct.LN$Length, VOISeR.trained.correct.LN$Cross_Entropy)
# Test for word frequency effect
ggplot(VOISeR.trained.correct.LN, aes(Log_Freq_HAL, Cross_Entropy)) +
geom_smooth(method=lm, color = "black", alpha = 0.15) + geom_point() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "transparent", color = NA)) +
labs(x="Word Frequency", y = "ln(CE)")
cor.test(VOISeR.trained.correct.LN$Log_Freq_HAL, VOISeR.trained.correct.LN$Cross_Entropy)
# Test for orthographic neighbor effect
ggplot(VOISeR.trained.correct.LN, aes(Ortho_N, Cross_Entropy)) +
geom_smooth(method=lm, color = "black", alpha = 0.15) + geom_point() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "transparent", color = NA)) +
labs(x="Word Frequency", y = "ln(CE)")
cor.test(VOISeR.trained.correct.LN$Ortho_N, VOISeR.trained.correct.LN$Cross_Entropy)
# Cross entropy regressions with Enemies ####
#3-letter words
VOISeR.CE.3letter.Freq.Enemies <- lm(Cross_Entropy ~ Log_Freq_HAL + Enemies1 + Enemies2 + Enemies3, data = subset(VOISeR.trained.correct.LN.3letter, Ortho_N>0))
summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]]
#4-letter words
VOISeR.CE.4letter.Freq.Enemies <- lm(Cross_Entropy ~ Log_Freq_HAL + Enemies1 + Enemies2 + Enemies3 + Enemies4, data = subset(VOISeR.trained.correct.LN.4letter, Ortho_N>0))
summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]]
#5-letter words
VOISeR.CE.5letter.Freq.Enemies <- lm(Cross_Entropy ~ Log_Freq_HAL + Enemies1 + Enemies2 + Enemies3 + Enemies4 + Enemies5, data = subset(VOISeR.trained.correct.LN.5letter, Ortho_N>0))
summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]]
#6-letter words
VOISeR.CE.6letter.Freq.Enemies <- lm(Cross_Entropy ~ Log_Freq_HAL + Enemies1 + Enemies2 + Enemies3 + Enemies4 + Enemies5 + Enemies6, data = subset(VOISeR.trained.correct.LN.6letter, Ortho_N>0))
summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]]
#7-letter words
VOISeR.CE.7letter.Freq.Enemies <- lm(Cross_Entropy ~ Log_Freq_HAL + Enemies1 + Enemies2 + Enemies3 + Enemies4 + Enemies5 + Enemies6 + Enemies7, data = subset(VOISeR.trained.correct.LN.7letter, Ortho_N>0))
summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]]
#8-letter words
VOISeR.CE.8letter.Freq.Enemies <- lm(Cross_Entropy ~ Log_Freq_HAL + Enemies1 + Enemies2 + Enemies3 + Enemies4 + Enemies5 + Enemies6 + Enemies7 + Enemies8 , data = subset(VOISeR.trained.correct.LN.8letter, Ortho_N>0))
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]]
#Unadjusted b estimates
VOISeR.Coef.Freq.Enemies.3letter <- cbind(
as.numeric(VOISeR.CE.3letter.Freq.Enemies$coefficients[3]),
as.numeric(VOISeR.CE.3letter.Freq.Enemies$coefficients[4]),
as.numeric(VOISeR.CE.3letter.Freq.Enemies$coefficients[5])
)
VOISeR.Coef.Freq.Enemies.4letter <- cbind(
as.numeric(VOISeR.CE.4letter.Freq.Enemies$coefficients[3]),
as.numeric(VOISeR.CE.4letter.Freq.Enemies$coefficients[4]),
as.numeric(VOISeR.CE.4letter.Freq.Enemies$coefficients[5]),
as.numeric(VOISeR.CE.4letter.Freq.Enemies$coefficients[6])
)
VOISeR.Coef.Freq.Enemies.5letter <- cbind(
as.numeric(VOISeR.CE.5letter.Freq.Enemies$coefficients[3]),
as.numeric(VOISeR.CE.5letter.Freq.Enemies$coefficients[4]),
as.numeric(VOISeR.CE.5letter.Freq.Enemies$coefficients[5]),
as.numeric(VOISeR.CE.5letter.Freq.Enemies$coefficients[6]),
as.numeric(VOISeR.CE.5letter.Freq.Enemies$coefficients[7])
)
VOISeR.Coef.Freq.Enemies.6letter <- cbind(
as.numeric(VOISeR.CE.6letter.Freq.Enemies$coefficients[3]),
as.numeric(VOISeR.CE.6letter.Freq.Enemies$coefficients[4]),
as.numeric(VOISeR.CE.6letter.Freq.Enemies$coefficients[5]),
as.numeric(VOISeR.CE.6letter.Freq.Enemies$coefficients[6]),
as.numeric(VOISeR.CE.6letter.Freq.Enemies$coefficients[7]),
as.numeric(VOISeR.CE.6letter.Freq.Enemies$coefficients[8])
)
VOISeR.Coef.Freq.Enemies.7letter <- cbind(
as.numeric(VOISeR.CE.7letter.Freq.Enemies$coefficients[3]),
as.numeric(VOISeR.CE.7letter.Freq.Enemies$coefficients[4]),
as.numeric(VOISeR.CE.7letter.Freq.Enemies$coefficients[5]),
as.numeric(VOISeR.CE.7letter.Freq.Enemies$coefficients[6]),
as.numeric(VOISeR.CE.7letter.Freq.Enemies$coefficients[7]),
as.numeric(VOISeR.CE.7letter.Freq.Enemies$coefficients[8]),
as.numeric(VOISeR.CE.7letter.Freq.Enemies$coefficients[9])
)
VOISeR.Coef.Freq.Enemies.8letter <- cbind(
as.numeric(VOISeR.CE.8letter.Freq.Enemies$coefficients[3]),
as.numeric(VOISeR.CE.8letter.Freq.Enemies$coefficients[4]),
as.numeric(VOISeR.CE.8letter.Freq.Enemies$coefficients[5]),
as.numeric(VOISeR.CE.8letter.Freq.Enemies$coefficients[6]),
as.numeric(VOISeR.CE.8letter.Freq.Enemies$coefficients[7]),
as.numeric(VOISeR.CE.8letter.Freq.Enemies$coefficients[8]),
as.numeric(VOISeR.CE.8letter.Freq.Enemies$coefficients[9]),
as.numeric(VOISeR.CE.8letter.Freq.Enemies$coefficients[10])
)
VOISeR.Coef.Freq.Enemies.allLetter <- as.vector(cbind(VOISeR.Coef.Freq.Enemies.3letter,VOISeR.Coef.Freq.Enemies.4letter,VOISeR.Coef.Freq.Enemies.5letter,VOISeR.Coef.Freq.Enemies.6letter,VOISeR.Coef.Freq.Enemies.7letter,VOISeR.Coef.Freq.Enemies.8letter))
metrics <- cbind(metrics, VOISeR.Coef.Freq.Enemies.allLetter)
#Adjusted b estimates
scalFac <- nrow(subset(VOISeR.trained.correct.LN.3letter, Ortho_N>0)) / (
1/((summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][3,2])^2) +
1/((summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][4,2])^2) +
1/((summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][5,2])^2)
)
VOISeR.CoefAdj.Freq.Enemies.3letter <- cbind(
summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][3,1] * scalFac/((summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][3,2])^2),
summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][4,1] * scalFac/((summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][4,2])^2),
summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][5,1] * scalFac/((summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][5,2])^2)
)
scalFac <- nrow(subset(VOISeR.trained.correct.LN.4letter, Ortho_N>0)) / (
1/((summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][3,2])^2) +
1/((summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][4,2])^2) +
1/((summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][5,2])^2) +
1/((summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][6,2])^2)
)
VOISeR.CoefAdj.Freq.Enemies.4letter <- cbind(
summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][3,1] * scalFac/((summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][3,2])^2),
summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][4,1] * scalFac/((summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][4,2])^2),
summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][5,1] * scalFac/((summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][5,2])^2),
summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][6,1] * scalFac/((summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][6,2])^2)
)
scalFac <- nrow(subset(VOISeR.trained.correct.LN.5letter, Ortho_N>0)) / (
1/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][3,2])^2) +
1/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][4,2])^2) +
1/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][5,2])^2) +
1/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][6,2])^2) +
1/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][7,2])^2)
)
VOISeR.CoefAdj.Freq.Enemies.5letter <- cbind(
summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][3,1] * scalFac/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][3,2])^2),
summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][4,1] * scalFac/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][4,2])^2),
summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][5,1] * scalFac/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][5,2])^2),
summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][6,1] * scalFac/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][6,2])^2),
summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][7,1] * scalFac/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][7,2])^2)
)
scalFac <- nrow(subset(VOISeR.trained.correct.LN.6letter, Ortho_N>0)) / (
1/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][3,2])^2) +
1/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][4,2])^2) +
1/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][5,2])^2) +
1/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][6,2])^2) +
1/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][7,2])^2) +
1/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][8,2])^2)
)
VOISeR.CoefAdj.Freq.Enemies.6letter <- cbind(
summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][3,1] * scalFac/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][3,2])^2),
summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][4,1] * scalFac/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][4,2])^2),
summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][5,1] * scalFac/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][5,2])^2),
summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][6,1] * scalFac/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][6,2])^2),
summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][7,1] * scalFac/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][7,2])^2),
summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][8,1] * scalFac/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][8,2])^2)
)
scalFac <- nrow(subset(VOISeR.trained.correct.LN.7letter, Ortho_N>0)) / (
1/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][3,2])^2) +
1/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][4,2])^2) +
1/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][5,2])^2) +
1/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][6,2])^2) +
1/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][7,2])^2) +
1/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][8,2])^2) +
1/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][9,2])^2)
)
VOISeR.CoefAdj.Freq.Enemies.7letter <- cbind(
summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][3,1] * scalFac/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][3,2])^2),
summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][4,1] * scalFac/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][4,2])^2),
summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][5,1] * scalFac/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][5,2])^2),
summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][6,1] * scalFac/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][6,2])^2),
summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][7,1] * scalFac/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][7,2])^2),
summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][8,1] * scalFac/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][8,2])^2),
summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][9,1] * scalFac/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][9,2])^2)
)
scalFac <- nrow(subset(VOISeR.trained.correct.LN.8letter, Ortho_N>0)) / (
1/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][3,2])^2) +
1/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][4,2])^2) +
1/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][5,2])^2) +
1/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][6,2])^2) +
1/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][7,2])^2) +
1/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][8,2])^2) +
1/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][9,2])^2) +
1/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][10,2])^2)
)
VOISeR.CoefAdj.Freq.Enemies.8letter <- cbind(
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][3,1] * scalFac/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][3,2])^2),
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][4,1] * scalFac/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][4,2])^2),
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][5,1] * scalFac/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][5,2])^2),
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][6,1] * scalFac/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][6,2])^2),
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][7,1] * scalFac/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][7,2])^2),
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][8,1] * scalFac/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][8,2])^2),
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][9,1] * scalFac/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][9,2])^2),
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][10,1] * scalFac/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][10,2])^2)
)
VOISeR.CoefAdj.Freq.Enemies.allLetter <- as.vector(cbind(VOISeR.CoefAdj.Freq.Enemies.3letter,VOISeR.CoefAdj.Freq.Enemies.4letter,VOISeR.CoefAdj.Freq.Enemies.5letter,VOISeR.CoefAdj.Freq.Enemies.6letter,VOISeR.CoefAdj.Freq.Enemies.7letter,VOISeR.CoefAdj.Freq.Enemies.8letter))
metrics <- cbind(metrics, VOISeR.CoefAdj.Freq.Enemies.allLetter)
#Plots
ggplot(metrics, aes(entropy.allLetter, VOISeR.CoefAdj.Freq.Enemies.allLetter)) +
geom_smooth(method=lm, color = "black", alpha = 0.15) + geom_point(color = metrics$Colors) +
geom_text_repel(box.padding = 1, aes(label=Labels), color = metrics$Colors,
size = 6, force = 3, max.iter = 5000) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "transparent", color = NA)) +
labs(x="Entropy", y = "Influence of Enemies \n on VOISeR Latency (b estimate)")
cor.test(entropy.allLetter,VOISeR.CoefAdj.Freq.Enemies.allLetter)
cor.test(entropy.allLetter[-c(1,4,8,13,19,26)], VOISeR.CoefAdj.Freq.Enemies.allLetter[-c(1,4,8,13,19,26)]) #look at this without first-position values
ggplot(metrics, aes(nmgCoefAdj.Freq.Enemies.allLetter, VOISeR.CoefAdj.Freq.Enemies.allLetter)) +
geom_smooth(method=lm, color = "black", alpha = 0.15) + geom_point(color = metrics$Colors) +
geom_text_repel(box.padding = 1, aes(label=Labels), color = metrics$Colors,
size = 6, force = 3, max.iter = 5000) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "transparent", color = NA)) +
labs(x="b estimates from \n human subjects data", y = "b estimates from \n VOISeR model")
cor.test(nmgCoefAdj.Freq.Enemies.allLetter,VOISeR.CoefAdj.Freq.Enemies.allLetter)
cor.test(nmgCoefAdj.Freq.Enemies.allLetter[-c(1,4,8,13,19,26)], VOISeR.CoefAdj.Freq.Enemies.allLetter[-c(1,4,8,13,19,26)]) #look at this without first-position values
save.image("~/friends-and-enemies/PostScript03.RData")
|
/03-VOISeR-Naming-FINAL.R
|
no_license
|
sahil-luthra/friends-and-enemies
|
R
| false
| false
| 22,222
|
r
|
rm(list=ls()) #clear all variables
library(ggplot2); library(plyr); library(dplyr); library(car); library(reshape); library(lme4); library(cowplot); library(stringi); library(scales); library(ggrepel)
load("~/friends-and-enemies/PostScript02.RData")
setwd("~/friends-and-enemies")
# Initial setup of VOISeR data ----
## VOISeR was trained on the ELP_groupdata.csv file, which was generated using the following code
# nmgData.correct.groupItem <- ddply(nmgData.correct, .(Word), summarise, latency=mean(latency))
# nmgData.correct.groupItem <- merge(lexicon, nmgData.correct.groupItem, by = 'Word')
# accuracy.groupItem <- ddply(nmgData, .(Word), summarise, accuracy=mean(accuracy))
# nmgData.correct.groupItem <- merge(nmgData.correct.groupItem, accuracy.groupItem, by = 'Word')
# groupData <- subset(nmgData.correct.groupItem, select = c("Word","Pron_NoStress","Length","Log_Freq_HAL","latency","accuracy"))
# colnames(groupData) <- c("Ortho","Phono","Length","LogFreq","meanRT","accuracy")
# write.csv(groupData, file = "ELP_groupData.csv")
# We then train VOISeR and read in the output data here
VOISeR <- read.csv('VOISeR-data-FrequencyWeighted-FINAL.csv', header = TRUE, sep = ',', na.strings = "#N/A")
#Select correct trials from epoch 10000
VOISeR$Epoch <- as.factor(VOISeR$Epoch)
VOISeR.correct <- droplevels(subset(VOISeR, Accuracy_Pronunciation == "TRUE"))
accuracy.VOISeR <- 100 * nrow(VOISeR.correct)/nrow(VOISeR)
accuracy.VOISeR
VOISeR.trained <- droplevels(subset(VOISeR, Epoch == "10000"))
VOISeR.trained.correct <- droplevels(subset(VOISeR.trained, Accuracy_Pronunciation == "TRUE"))
accuracy.VOISeR <- 100 * nrow(VOISeR.trained.correct)/nrow(VOISeR.trained)
accuracy.VOISeR
#Confirm that VOISeR was trained on words in degree proportional to logFreq
cor.test(VOISeR.trained.correct$Probability, VOISeR.trained.correct$Trained_Count)
#Compute the LN of subject RTs and of all model parameters
VOISeR.trained.correct.LN <- VOISeR.trained.correct %>%
mutate(MeanRT = log(MeanRT)) %>%
mutate(Cosine_Similarity = log(Cosine_Similarity)) %>%
mutate(Mean_Squared_Error = log(Mean_Squared_Error)) %>%
mutate(Euclidean_Distance = log(Euclidean_Distance)) %>%
mutate(Cross_Entropy = log(Cross_Entropy)) %>%
mutate(Hidden_Cosine_Similarity = log(Hidden_Cosine_Similarity)) %>%
mutate(Hidden_Mean_Squared_Error = log(Hidden_Mean_Squared_Error)) %>%
mutate(Hidden_Euclidean_Distance = log(Hidden_Euclidean_Distance)) %>%
mutate(Hidden_Cross_Entropy = log(Hidden_Cross_Entropy))
#Create data frames for each word length
VOISeR.trained.correct.LN <- VOISeR.trained.correct.LN[which(VOISeR.trained.correct.LN$Ortho %in% lexicon$Word), ]
colnames(VOISeR.trained.correct.LN)[2]<-"Word"
VOISeR.trained.correct.LN <- merge(lexicon, VOISeR.trained.correct.LN[c("Epoch","Word","Phono","MeanRT","Trained_Count","Cosine_Similarity","Mean_Squared_Error","Euclidean_Distance","Cross_Entropy","Exported_Pronunciation","Accuracy_Pronunciation")], by = 'Word')
VOISeR.trained.correct.LN.3letter <- VOISeR.trained.correct.LN[which(VOISeR.trained.correct.LN$Word %in% lexicon.3letter$Word), ]
colnames(VOISeR.trained.correct.LN.3letter)[2]<-"Word"
VOISeR.trained.correct.LN.3letter <- merge(lexicon.3letter, VOISeR.trained.correct.LN.3letter[c("Epoch","Word","Phono","MeanRT","Trained_Count","Cosine_Similarity","Mean_Squared_Error","Euclidean_Distance","Cross_Entropy","Exported_Pronunciation","Accuracy_Pronunciation")], by = 'Word')
VOISeR.trained.correct.LN.4letter <- VOISeR.trained.correct.LN[which(VOISeR.trained.correct.LN$Word %in% lexicon.4letter$Word), ]
colnames(VOISeR.trained.correct.LN.4letter)[2]<-"Word"
VOISeR.trained.correct.LN.4letter <- merge(lexicon.4letter, VOISeR.trained.correct.LN.4letter[c("Epoch","Word","Phono","MeanRT","Trained_Count","Cosine_Similarity","Mean_Squared_Error","Euclidean_Distance","Cross_Entropy","Exported_Pronunciation","Accuracy_Pronunciation")], by = 'Word')
VOISeR.trained.correct.LN.5letter <- VOISeR.trained.correct.LN[which(VOISeR.trained.correct.LN$Word %in% lexicon.5letter$Word), ]
colnames(VOISeR.trained.correct.LN.5letter)[2]<-"Word"
VOISeR.trained.correct.LN.5letter <- merge(lexicon.5letter, VOISeR.trained.correct.LN.5letter[c("Epoch","Word","Phono","MeanRT","Trained_Count","Cosine_Similarity","Mean_Squared_Error","Euclidean_Distance","Cross_Entropy","Exported_Pronunciation","Accuracy_Pronunciation")], by = 'Word')
VOISeR.trained.correct.LN.6letter <- VOISeR.trained.correct.LN[which(VOISeR.trained.correct.LN$Word %in% lexicon.6letter$Word), ]
colnames(VOISeR.trained.correct.LN.6letter)[2]<-"Word"
VOISeR.trained.correct.LN.6letter <- merge(lexicon.6letter, VOISeR.trained.correct.LN.6letter[c("Epoch","Word","Phono","MeanRT","Trained_Count","Cosine_Similarity","Mean_Squared_Error","Euclidean_Distance","Cross_Entropy","Exported_Pronunciation","Accuracy_Pronunciation")], by = 'Word')
VOISeR.trained.correct.LN.7letter <- VOISeR.trained.correct.LN[which(VOISeR.trained.correct.LN$Word %in% lexicon.7letter$Word), ]
colnames(VOISeR.trained.correct.LN.7letter)[2]<-"Word"
VOISeR.trained.correct.LN.7letter <- merge(lexicon.7letter, VOISeR.trained.correct.LN.7letter[c("Epoch","Word","Phono","MeanRT","Trained_Count","Cosine_Similarity","Mean_Squared_Error","Euclidean_Distance","Cross_Entropy","Exported_Pronunciation","Accuracy_Pronunciation")], by = 'Word')
VOISeR.trained.correct.LN.8letter <- VOISeR.trained.correct.LN[which(VOISeR.trained.correct.LN$Word %in% lexicon.8letter$Word), ]
colnames(VOISeR.trained.correct.LN.8letter)[2]<-"Word"
VOISeR.trained.correct.LN.8letter <- merge(lexicon.8letter, VOISeR.trained.correct.LN.8letter[c("Epoch","Word","Phono","MeanRT","Trained_Count","Cosine_Similarity","Mean_Squared_Error","Euclidean_Distance","Cross_Entropy","Exported_Pronunciation","Accuracy_Pronunciation")], by = 'Word')
#Visualize correlations between model parameters and subject RT
my.pairscor(VOISeR.trained.correct.LN[c(6,8:11)])
cor.test(VOISeR.trained.correct.LN$MeanRT, VOISeR.trained.correct.LN$Cross_Entropy)
#Correlation with cross-entropy is highest
# Do a few other simple tests on VOISeR
# Factor out word length, word frequency, and neighborhood size before doing RT-CE correlation
pcor.test(VOISeR.trained.correct.LN$MeanRT, VOISeR.trained.correct.LN$Cross_Entropy,
cbind(VOISeR.trained.correct.LN$Length, VOISeR.trained.correct.LN$Log_Freq_HAL,
VOISeR.trained.correct.LN$Ortho_N))
# Test for word length effect
ggplot(VOISeR.trained.correct.LN, aes(Length, Cross_Entropy)) +
geom_smooth(method=lm, color = "black", alpha = 0.15) + geom_point() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "transparent", color = NA)) +
labs(x="Word Length", y = "ln(CE)")
cor.test(VOISeR.trained.correct.LN$Length, VOISeR.trained.correct.LN$Cross_Entropy)
# Test for word frequency effect
ggplot(VOISeR.trained.correct.LN, aes(Log_Freq_HAL, Cross_Entropy)) +
geom_smooth(method=lm, color = "black", alpha = 0.15) + geom_point() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "transparent", color = NA)) +
labs(x="Word Frequency", y = "ln(CE)")
cor.test(VOISeR.trained.correct.LN$Log_Freq_HAL, VOISeR.trained.correct.LN$Cross_Entropy)
# Test for orthographic neighbor effect
ggplot(VOISeR.trained.correct.LN, aes(Ortho_N, Cross_Entropy)) +
geom_smooth(method=lm, color = "black", alpha = 0.15) + geom_point() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "transparent", color = NA)) +
labs(x="Word Frequency", y = "ln(CE)")
cor.test(VOISeR.trained.correct.LN$Ortho_N, VOISeR.trained.correct.LN$Cross_Entropy)
# Cross entropy regressions with Enemies ####
#3-letter words
VOISeR.CE.3letter.Freq.Enemies <- lm(Cross_Entropy ~ Log_Freq_HAL + Enemies1 + Enemies2 + Enemies3, data = subset(VOISeR.trained.correct.LN.3letter, Ortho_N>0))
summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]]
#4-letter words
VOISeR.CE.4letter.Freq.Enemies <- lm(Cross_Entropy ~ Log_Freq_HAL + Enemies1 + Enemies2 + Enemies3 + Enemies4, data = subset(VOISeR.trained.correct.LN.4letter, Ortho_N>0))
summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]]
#5-letter words
VOISeR.CE.5letter.Freq.Enemies <- lm(Cross_Entropy ~ Log_Freq_HAL + Enemies1 + Enemies2 + Enemies3 + Enemies4 + Enemies5, data = subset(VOISeR.trained.correct.LN.5letter, Ortho_N>0))
summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]]
#6-letter words
VOISeR.CE.6letter.Freq.Enemies <- lm(Cross_Entropy ~ Log_Freq_HAL + Enemies1 + Enemies2 + Enemies3 + Enemies4 + Enemies5 + Enemies6, data = subset(VOISeR.trained.correct.LN.6letter, Ortho_N>0))
summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]]
#7-letter words
VOISeR.CE.7letter.Freq.Enemies <- lm(Cross_Entropy ~ Log_Freq_HAL + Enemies1 + Enemies2 + Enemies3 + Enemies4 + Enemies5 + Enemies6 + Enemies7, data = subset(VOISeR.trained.correct.LN.7letter, Ortho_N>0))
summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]]
#8-letter words
VOISeR.CE.8letter.Freq.Enemies <- lm(Cross_Entropy ~ Log_Freq_HAL + Enemies1 + Enemies2 + Enemies3 + Enemies4 + Enemies5 + Enemies6 + Enemies7 + Enemies8 , data = subset(VOISeR.trained.correct.LN.8letter, Ortho_N>0))
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]]
#Unadjusted b estimates
VOISeR.Coef.Freq.Enemies.3letter <- cbind(
as.numeric(VOISeR.CE.3letter.Freq.Enemies$coefficients[3]),
as.numeric(VOISeR.CE.3letter.Freq.Enemies$coefficients[4]),
as.numeric(VOISeR.CE.3letter.Freq.Enemies$coefficients[5])
)
VOISeR.Coef.Freq.Enemies.4letter <- cbind(
as.numeric(VOISeR.CE.4letter.Freq.Enemies$coefficients[3]),
as.numeric(VOISeR.CE.4letter.Freq.Enemies$coefficients[4]),
as.numeric(VOISeR.CE.4letter.Freq.Enemies$coefficients[5]),
as.numeric(VOISeR.CE.4letter.Freq.Enemies$coefficients[6])
)
VOISeR.Coef.Freq.Enemies.5letter <- cbind(
as.numeric(VOISeR.CE.5letter.Freq.Enemies$coefficients[3]),
as.numeric(VOISeR.CE.5letter.Freq.Enemies$coefficients[4]),
as.numeric(VOISeR.CE.5letter.Freq.Enemies$coefficients[5]),
as.numeric(VOISeR.CE.5letter.Freq.Enemies$coefficients[6]),
as.numeric(VOISeR.CE.5letter.Freq.Enemies$coefficients[7])
)
VOISeR.Coef.Freq.Enemies.6letter <- cbind(
as.numeric(VOISeR.CE.6letter.Freq.Enemies$coefficients[3]),
as.numeric(VOISeR.CE.6letter.Freq.Enemies$coefficients[4]),
as.numeric(VOISeR.CE.6letter.Freq.Enemies$coefficients[5]),
as.numeric(VOISeR.CE.6letter.Freq.Enemies$coefficients[6]),
as.numeric(VOISeR.CE.6letter.Freq.Enemies$coefficients[7]),
as.numeric(VOISeR.CE.6letter.Freq.Enemies$coefficients[8])
)
VOISeR.Coef.Freq.Enemies.7letter <- cbind(
as.numeric(VOISeR.CE.7letter.Freq.Enemies$coefficients[3]),
as.numeric(VOISeR.CE.7letter.Freq.Enemies$coefficients[4]),
as.numeric(VOISeR.CE.7letter.Freq.Enemies$coefficients[5]),
as.numeric(VOISeR.CE.7letter.Freq.Enemies$coefficients[6]),
as.numeric(VOISeR.CE.7letter.Freq.Enemies$coefficients[7]),
as.numeric(VOISeR.CE.7letter.Freq.Enemies$coefficients[8]),
as.numeric(VOISeR.CE.7letter.Freq.Enemies$coefficients[9])
)
VOISeR.Coef.Freq.Enemies.8letter <- cbind(
as.numeric(VOISeR.CE.8letter.Freq.Enemies$coefficients[3]),
as.numeric(VOISeR.CE.8letter.Freq.Enemies$coefficients[4]),
as.numeric(VOISeR.CE.8letter.Freq.Enemies$coefficients[5]),
as.numeric(VOISeR.CE.8letter.Freq.Enemies$coefficients[6]),
as.numeric(VOISeR.CE.8letter.Freq.Enemies$coefficients[7]),
as.numeric(VOISeR.CE.8letter.Freq.Enemies$coefficients[8]),
as.numeric(VOISeR.CE.8letter.Freq.Enemies$coefficients[9]),
as.numeric(VOISeR.CE.8letter.Freq.Enemies$coefficients[10])
)
VOISeR.Coef.Freq.Enemies.allLetter <- as.vector(cbind(VOISeR.Coef.Freq.Enemies.3letter,VOISeR.Coef.Freq.Enemies.4letter,VOISeR.Coef.Freq.Enemies.5letter,VOISeR.Coef.Freq.Enemies.6letter,VOISeR.Coef.Freq.Enemies.7letter,VOISeR.Coef.Freq.Enemies.8letter))
metrics <- cbind(metrics, VOISeR.Coef.Freq.Enemies.allLetter)
#Adjusted b estimates
scalFac <- nrow(subset(VOISeR.trained.correct.LN.3letter, Ortho_N>0)) / (
1/((summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][3,2])^2) +
1/((summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][4,2])^2) +
1/((summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][5,2])^2)
)
VOISeR.CoefAdj.Freq.Enemies.3letter <- cbind(
summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][3,1] * scalFac/((summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][3,2])^2),
summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][4,1] * scalFac/((summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][4,2])^2),
summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][5,1] * scalFac/((summary(VOISeR.CE.3letter.Freq.Enemies)[["coefficients"]][5,2])^2)
)
scalFac <- nrow(subset(VOISeR.trained.correct.LN.4letter, Ortho_N>0)) / (
1/((summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][3,2])^2) +
1/((summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][4,2])^2) +
1/((summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][5,2])^2) +
1/((summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][6,2])^2)
)
VOISeR.CoefAdj.Freq.Enemies.4letter <- cbind(
summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][3,1] * scalFac/((summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][3,2])^2),
summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][4,1] * scalFac/((summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][4,2])^2),
summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][5,1] * scalFac/((summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][5,2])^2),
summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][6,1] * scalFac/((summary(VOISeR.CE.4letter.Freq.Enemies)[["coefficients"]][6,2])^2)
)
scalFac <- nrow(subset(VOISeR.trained.correct.LN.5letter, Ortho_N>0)) / (
1/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][3,2])^2) +
1/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][4,2])^2) +
1/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][5,2])^2) +
1/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][6,2])^2) +
1/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][7,2])^2)
)
VOISeR.CoefAdj.Freq.Enemies.5letter <- cbind(
summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][3,1] * scalFac/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][3,2])^2),
summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][4,1] * scalFac/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][4,2])^2),
summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][5,1] * scalFac/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][5,2])^2),
summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][6,1] * scalFac/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][6,2])^2),
summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][7,1] * scalFac/((summary(VOISeR.CE.5letter.Freq.Enemies)[["coefficients"]][7,2])^2)
)
scalFac <- nrow(subset(VOISeR.trained.correct.LN.6letter, Ortho_N>0)) / (
1/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][3,2])^2) +
1/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][4,2])^2) +
1/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][5,2])^2) +
1/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][6,2])^2) +
1/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][7,2])^2) +
1/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][8,2])^2)
)
VOISeR.CoefAdj.Freq.Enemies.6letter <- cbind(
summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][3,1] * scalFac/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][3,2])^2),
summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][4,1] * scalFac/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][4,2])^2),
summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][5,1] * scalFac/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][5,2])^2),
summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][6,1] * scalFac/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][6,2])^2),
summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][7,1] * scalFac/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][7,2])^2),
summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][8,1] * scalFac/((summary(VOISeR.CE.6letter.Freq.Enemies)[["coefficients"]][8,2])^2)
)
scalFac <- nrow(subset(VOISeR.trained.correct.LN.7letter, Ortho_N>0)) / (
1/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][3,2])^2) +
1/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][4,2])^2) +
1/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][5,2])^2) +
1/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][6,2])^2) +
1/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][7,2])^2) +
1/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][8,2])^2) +
1/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][9,2])^2)
)
VOISeR.CoefAdj.Freq.Enemies.7letter <- cbind(
summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][3,1] * scalFac/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][3,2])^2),
summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][4,1] * scalFac/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][4,2])^2),
summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][5,1] * scalFac/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][5,2])^2),
summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][6,1] * scalFac/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][6,2])^2),
summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][7,1] * scalFac/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][7,2])^2),
summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][8,1] * scalFac/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][8,2])^2),
summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][9,1] * scalFac/((summary(VOISeR.CE.7letter.Freq.Enemies)[["coefficients"]][9,2])^2)
)
scalFac <- nrow(subset(VOISeR.trained.correct.LN.8letter, Ortho_N>0)) / (
1/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][3,2])^2) +
1/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][4,2])^2) +
1/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][5,2])^2) +
1/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][6,2])^2) +
1/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][7,2])^2) +
1/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][8,2])^2) +
1/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][9,2])^2) +
1/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][10,2])^2)
)
VOISeR.CoefAdj.Freq.Enemies.8letter <- cbind(
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][3,1] * scalFac/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][3,2])^2),
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][4,1] * scalFac/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][4,2])^2),
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][5,1] * scalFac/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][5,2])^2),
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][6,1] * scalFac/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][6,2])^2),
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][7,1] * scalFac/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][7,2])^2),
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][8,1] * scalFac/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][8,2])^2),
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][9,1] * scalFac/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][9,2])^2),
summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][10,1] * scalFac/((summary(VOISeR.CE.8letter.Freq.Enemies)[["coefficients"]][10,2])^2)
)
VOISeR.CoefAdj.Freq.Enemies.allLetter <- as.vector(cbind(VOISeR.CoefAdj.Freq.Enemies.3letter,VOISeR.CoefAdj.Freq.Enemies.4letter,VOISeR.CoefAdj.Freq.Enemies.5letter,VOISeR.CoefAdj.Freq.Enemies.6letter,VOISeR.CoefAdj.Freq.Enemies.7letter,VOISeR.CoefAdj.Freq.Enemies.8letter))
metrics <- cbind(metrics, VOISeR.CoefAdj.Freq.Enemies.allLetter)
#Plots
ggplot(metrics, aes(entropy.allLetter, VOISeR.CoefAdj.Freq.Enemies.allLetter)) +
geom_smooth(method=lm, color = "black", alpha = 0.15) + geom_point(color = metrics$Colors) +
geom_text_repel(box.padding = 1, aes(label=Labels), color = metrics$Colors,
size = 6, force = 3, max.iter = 5000) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "transparent", color = NA)) +
labs(x="Entropy", y = "Influence of Enemies \n on VOISeR Latency (b estimate)")
cor.test(entropy.allLetter,VOISeR.CoefAdj.Freq.Enemies.allLetter)
cor.test(entropy.allLetter[-c(1,4,8,13,19,26)], VOISeR.CoefAdj.Freq.Enemies.allLetter[-c(1,4,8,13,19,26)]) #look at this without first-position values
ggplot(metrics, aes(nmgCoefAdj.Freq.Enemies.allLetter, VOISeR.CoefAdj.Freq.Enemies.allLetter)) +
geom_smooth(method=lm, color = "black", alpha = 0.15) + geom_point(color = metrics$Colors) +
geom_text_repel(box.padding = 1, aes(label=Labels), color = metrics$Colors,
size = 6, force = 3, max.iter = 5000) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "transparent", color = NA)) +
labs(x="b estimates from \n human subjects data", y = "b estimates from \n VOISeR model")
cor.test(nmgCoefAdj.Freq.Enemies.allLetter,VOISeR.CoefAdj.Freq.Enemies.allLetter)
cor.test(nmgCoefAdj.Freq.Enemies.allLetter[-c(1,4,8,13,19,26)], VOISeR.CoefAdj.Freq.Enemies.allLetter[-c(1,4,8,13,19,26)]) #look at this without first-position values
save.image("~/friends-and-enemies/PostScript03.RData")
|
#' Data.frame of parameter values
#'
#' @inheritParams simmiad
#' @param habitat_width Length of the edge of the habitat square
#' @param population_size Number of individual plants in the population,
#' derived from the habitat size and population density
#' @author Tom Ellis
#' @return A data.frame giving parameter names and values
parameter_table <- function(
mean_dispersal_distance,
outcrossing_rate,
n_generations,
n_starting_genotypes,
density,
nsims,
n_sample_points,
sample_spacing,
range_limit,
years_to_sample,
dormancy,
habitat_width = (n_sample_points * sample_spacing * range_limit),
population_size = density * habitat_width^2,
var_w
){
data.frame(
parameter = c(
'mean_dispersal_distance',
'outcrossing_rate',
'n_generations',
'n_starting_genotypes',
'density',
'dormancy',
'nsims',
'n_sample_points',
'sample_spacing',
'range_limit',
'habitat_width',
'population_size',
'years_to_sample',
'npairs',
'var_w'
),
value = c(
mean_dispersal_distance,
outcrossing_rate,
n_generations,
n_starting_genotypes,
density,
dormancy,
nsims,
n_sample_points,
sample_spacing,
range_limit,
habitat_width,
population_size,
years_to_sample,
(n_sample_points * (n_sample_points-1))/2,
var_w
)
)
}
|
/R/parameter_table.R
|
permissive
|
ellisztamas/simmiad
|
R
| false
| false
| 1,424
|
r
|
#' Data.frame of parameter values
#'
#' @inheritParams simmiad
#' @param habitat_width Length of the edge of the habitat square
#' @param population_size Number of individual plants in the population,
#' derived from the habitat size and population density
#' @author Tom Ellis
#' @return A data.frame giving parameter names and values
parameter_table <- function(
mean_dispersal_distance,
outcrossing_rate,
n_generations,
n_starting_genotypes,
density,
nsims,
n_sample_points,
sample_spacing,
range_limit,
years_to_sample,
dormancy,
habitat_width = (n_sample_points * sample_spacing * range_limit),
population_size = density * habitat_width^2,
var_w
){
data.frame(
parameter = c(
'mean_dispersal_distance',
'outcrossing_rate',
'n_generations',
'n_starting_genotypes',
'density',
'dormancy',
'nsims',
'n_sample_points',
'sample_spacing',
'range_limit',
'habitat_width',
'population_size',
'years_to_sample',
'npairs',
'var_w'
),
value = c(
mean_dispersal_distance,
outcrossing_rate,
n_generations,
n_starting_genotypes,
density,
dormancy,
nsims,
n_sample_points,
sample_spacing,
range_limit,
habitat_width,
population_size,
years_to_sample,
(n_sample_points * (n_sample_points-1))/2,
var_w
)
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/retype.R
\name{retype}
\alias{retype}
\title{Retype Variable}
\usage{
retype(df, ...)
}
\arguments{
\item{df:}{data.frame with values to typecast}
\item{...:}{unquoted list alternating variables and datatypes to convert to}
}
\value{
Data.frame \code{df} with listed variables updated to specified data types
}
\description{
retype is a typecasting function which can be used
in the tidy workflow to change the type of multiple
functions within a data.frame in-line.
}
\examples{
iris2 <- iris \%>\% select(Species, Sepal.Length) \%>\% retype(Species, character)
str(iris2)
}
\author{
Samuel Callisto \email{calli055@umn.edu}
}
|
/man/retype.Rd
|
no_license
|
ftuhin2828/dataTools
|
R
| false
| true
| 708
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/retype.R
\name{retype}
\alias{retype}
\title{Retype Variable}
\usage{
retype(df, ...)
}
\arguments{
\item{df:}{data.frame with values to typecast}
\item{...:}{unquoted list alternating variables and datatypes to convert to}
}
\value{
Data.frame \code{df} with listed variables updated to specified data types
}
\description{
retype is a typecasting function which can be used
in the tidy workflow to change the type of multiple
functions within a data.frame in-line.
}
\examples{
iris2 <- iris \%>\% select(Species, Sepal.Length) \%>\% retype(Species, character)
str(iris2)
}
\author{
Samuel Callisto \email{calli055@umn.edu}
}
|
#' Return the average timestep in days
#'
#' @param datetimes a vector of date-times in POSIXct format from which to
#' compute the average timestep
#' @param format the format in which to return the timestep. 'mean' always
#' returns one value; 'unique' may return more than one depending on the
#' variation in timesteps and the value of \code{digits}.
#' @param require_unique logical. should it be required that there is exactly
#' one unique timestep (within the given tolerance \code{tol})?
#' @param tol if \code{format == 'unique'}, unique values are first calculated
#' to machine precision, but then subsetted to those that differ from one
#' another by at least tol, where tol is a time difference in units of days
#' (and thus 1/(24*60*60) is one second).
#' @importFrom unitted v
#' @importFrom stats approx
#' @examples {
#' datetimes <- Sys.time()+ as.difftime(c(0,304,600,900.2,1200,1500,1800), units='secs')
#' mm_get_timestep(datetimes, 'unique', tol=1/(24*60*60))
#' mm_get_timestep(datetimes, 'unique', tol=5/(24*60*60))
#' mm_get_timestep(datetimes, 'unique', tol=10/(24*60*60))
#' mm_get_timestep(datetimes, 'unique', tol=300/(24*60*60))
#' mm_get_timestep(datetimes, 'mean')
#' mm_get_timestep(datetimes, 'mean', require_unique=TRUE, tol=300/(24*60*60))
#' datetimes <- Sys.time()+ as.difftime(c(-1,0,2,4,5,6,8,10), units='days')
#' mm_get_timestep(datetimes, 'modal')
#' mm_get_timestep(c(), 'mean')
#' mm_get_timestep(c(), 'unique')
#' mm_get_timestep(c(), 'modal')
#' \dontrun{
#' # all of these should and do give errors:
#' mm_get_timestep(datetimes, 'mean', require_unique=TRUE, tol=1/(24*60*60))
#' mm_get_timestep(datetimes, 'unique', tol=5/(24*60*60), require_unique=TRUE)
#' mm_get_timestep(c(), 'mean', require_unique=TRUE)
#' mm_get_timestep(c(), 'unique', require_unique=TRUE)
#' mm_get_timestep(c(), 'modal', require_unique=TRUE)
#' }
#' }
#' @export
mm_get_timestep <- function(datetimes, format=c('mean','unique','modal'), require_unique=FALSE, tol=60/(24*60*60)) {
if(length(datetimes) < 2) {
if(require_unique) stop('!=1 unique timestep') else return(NA)
}
timesteps <- as.numeric(diff(v(datetimes)), units="days")
timestep <- switch(
match.arg(format),
mean = {
if(require_unique == TRUE)
mm_get_timestep(datetimes, format='unique', require_unique=TRUE, tol=tol)
else
mean(timesteps, na.rm=TRUE)
},
unique = {
all_unique <- sort(unique(timesteps))
sufficiently_unique <- c()
while(length(all_unique) > 0) {
sufficiently_unique <- c(sufficiently_unique, all_unique[1])
all_unique <- all_unique[which(all_unique > tail(sufficiently_unique, 1) + tol)]
}
if(require_unique == TRUE && length(sufficiently_unique) != 1)
stop('!=1 unique timestep')
sufficiently_unique
},
modal = {
all_unique <- sort(unique(timesteps))
tbl_unique <- sapply(all_unique, function(tstep) sum(timesteps == tstep), USE.NAMES=FALSE)
sufficiently_unique <- mm_get_timestep(
datetimes=cumsum(c(0,all_unique)), format='unique', require_unique=require_unique, tol=tol)
if(length(sufficiently_unique) == 0) return(c())
round_unique <- approx(sufficiently_unique, sufficiently_unique, xout=all_unique, method='constant', rule=2)$y
tbl_suff_unique <- sapply(sufficiently_unique, function(ru) sum(tbl_unique[which(round_unique == ru)]))
all_suff_unique <- sapply(sufficiently_unique, function(ru) {
matches <- which(round_unique == ru)
best_match <- matches[which.max(tbl_unique[matches])]
all_unique[best_match]
})
all_suff_unique[which.max(tbl_suff_unique)]
})
timestep
}
|
/R/mm_get_timestep.R
|
permissive
|
lsdeel/streamMetabolizer
|
R
| false
| false
| 3,717
|
r
|
#' Return the average timestep in days
#'
#' @param datetimes a vector of date-times in POSIXct format from which to
#' compute the average timestep
#' @param format the format in which to return the timestep. 'mean' always
#' returns one value; 'unique' may return more than one depending on the
#' variation in timesteps and the value of \code{digits}.
#' @param require_unique logical. should it be required that there is exactly
#' one unique timestep (within the given tolerance \code{tol})?
#' @param tol if \code{format == 'unique'}, unique values are first calculated
#' to machine precision, but then subsetted to those that differ from one
#' another by at least tol, where tol is a time difference in units of days
#' (and thus 1/(24*60*60) is one second).
#' @importFrom unitted v
#' @importFrom stats approx
#' @examples {
#' datetimes <- Sys.time()+ as.difftime(c(0,304,600,900.2,1200,1500,1800), units='secs')
#' mm_get_timestep(datetimes, 'unique', tol=1/(24*60*60))
#' mm_get_timestep(datetimes, 'unique', tol=5/(24*60*60))
#' mm_get_timestep(datetimes, 'unique', tol=10/(24*60*60))
#' mm_get_timestep(datetimes, 'unique', tol=300/(24*60*60))
#' mm_get_timestep(datetimes, 'mean')
#' mm_get_timestep(datetimes, 'mean', require_unique=TRUE, tol=300/(24*60*60))
#' datetimes <- Sys.time()+ as.difftime(c(-1,0,2,4,5,6,8,10), units='days')
#' mm_get_timestep(datetimes, 'modal')
#' mm_get_timestep(c(), 'mean')
#' mm_get_timestep(c(), 'unique')
#' mm_get_timestep(c(), 'modal')
#' \dontrun{
#' # all of these should and do give errors:
#' mm_get_timestep(datetimes, 'mean', require_unique=TRUE, tol=1/(24*60*60))
#' mm_get_timestep(datetimes, 'unique', tol=5/(24*60*60), require_unique=TRUE)
#' mm_get_timestep(c(), 'mean', require_unique=TRUE)
#' mm_get_timestep(c(), 'unique', require_unique=TRUE)
#' mm_get_timestep(c(), 'modal', require_unique=TRUE)
#' }
#' }
#' @export
mm_get_timestep <- function(datetimes, format=c('mean','unique','modal'), require_unique=FALSE, tol=60/(24*60*60)) {
if(length(datetimes) < 2) {
if(require_unique) stop('!=1 unique timestep') else return(NA)
}
timesteps <- as.numeric(diff(v(datetimes)), units="days")
timestep <- switch(
match.arg(format),
mean = {
if(require_unique == TRUE)
mm_get_timestep(datetimes, format='unique', require_unique=TRUE, tol=tol)
else
mean(timesteps, na.rm=TRUE)
},
unique = {
all_unique <- sort(unique(timesteps))
sufficiently_unique <- c()
while(length(all_unique) > 0) {
sufficiently_unique <- c(sufficiently_unique, all_unique[1])
all_unique <- all_unique[which(all_unique > tail(sufficiently_unique, 1) + tol)]
}
if(require_unique == TRUE && length(sufficiently_unique) != 1)
stop('!=1 unique timestep')
sufficiently_unique
},
modal = {
all_unique <- sort(unique(timesteps))
tbl_unique <- sapply(all_unique, function(tstep) sum(timesteps == tstep), USE.NAMES=FALSE)
sufficiently_unique <- mm_get_timestep(
datetimes=cumsum(c(0,all_unique)), format='unique', require_unique=require_unique, tol=tol)
if(length(sufficiently_unique) == 0) return(c())
round_unique <- approx(sufficiently_unique, sufficiently_unique, xout=all_unique, method='constant', rule=2)$y
tbl_suff_unique <- sapply(sufficiently_unique, function(ru) sum(tbl_unique[which(round_unique == ru)]))
all_suff_unique <- sapply(sufficiently_unique, function(ru) {
matches <- which(round_unique == ru)
best_match <- matches[which.max(tbl_unique[matches])]
all_unique[best_match]
})
all_suff_unique[which.max(tbl_suff_unique)]
})
timestep
}
|
#' Generation of a Square-wave Burst Signal
#'
#' This function takes in numeric arguments for a customizable, square-wave burst shape. Each oscillation cycle is separated into three phases: a primary active phase, in which the oscillator resides at peak concentration, a secondary active phase, in which the oscillator stays at secondary peak concentration and an inactive phase, in which the oscillator is fixed to baseline concentration. A discretized time course is returned.
#'
#' @details Standards:
#' \itemize{
#' \item{\code{peak} and \code{sec_peak} must be larger than \code{baseline}}
#' \item{\code{duration} must be larger than \code{resolution}}
#' \item{\code{duration} must be a multiple of the \code{resolution}}
#' \item{\code{period} must be a multiple of \code{resolution}}
#' \item{\code{duration}, \code{resolution}, \code{peak}, \code{sec_peak} and \code{period} must be larger than 0}
#' \item{\code{baseline} must be larger or equal to 0}
#' \item{\code{duty_cycle} must be larger than 0 and smaller or equal to 1}
#' \item{\code{sec_duty_cycle} must be larger than 0 and smaller or equal to 1}
#' \item{\code{trend} must be larger than 0}
#' }
#'
#' @param baseline minimal oscillation value
#' @param peak maximal oscillation value
#' @param period oscillation period of the oscillating species (reciprocal of the frequency)
#' @param duty_cycle ratio of the active phase (oscillator above baseline) to the total oscillation period
#' @param sec_duty_cycle ratio of the primary active phase (time interval from cycle start till reaching of the secondary peak level) to the total active phase
#' @param sec_peak intermediary value reached after the end of the primary active phase
#' @param trend percental decrease or increase in the peak and secondary peak values for the successive oscillation cycles; if set to 1, values remain unchanged
#' @param duration duration of the generated time course
#' @param resolution temporal resolution of the generated time course
#' @examples
#' # test effect of changes in period
#' m1 = SquareBurst(baseline = 200, peak = 1000, period = 50, duty_cycle = 0.6,
#' sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#' m2 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
#' sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#' m3 = SquareBurst(baseline = 200, peak = 1000, period = 200, duty_cycle = 0.6,
#' sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#'
#' par(mfrow = c(3,1))
#' plot(m1, type = "l", xlab = "time", ylab = "abundance")
#' plot(m2, type = "l", xlab = "time", ylab = "abundance")
#' plot(m3, type = "l", xlab = "time", ylab = "abundance")
#'
#' # test effect of changes in duty_cycle
#' m1 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.3,
#' sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#' m2 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
#' sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#' m3 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.9,
#' sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#'
#' par(mfrow = c(3,1))
#' plot(m1, type = "l", xlab = "time", ylab = "abundance")
#' plot(m2, type = "l", xlab = "time", ylab = "abundance")
#' plot(m3, type = "l", xlab = "time", ylab = "abundance")
#'
#' # test effect of changes in sec_duty_cycle
#' m1 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
#' sec_duty_cycle = 0.3, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#' m2 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
#' sec_duty_cycle = 0.6, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#' m3 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
#' sec_duty_cycle = 0.9, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#'
#' par(mfrow = c(3,1))
#' plot(m1, type = "l", xlab = "time", ylab = "abundance")
#' plot(m2, type = "l", xlab = "time", ylab = "abundance")
#' plot(m3, type = "l", xlab = "time", ylab = "abundance")
#'
#' # test effect of changes in trend
#' m1 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
#' sec_duty_cycle = 0.6, sec_peak = 700, trend = 0.7, duration = 500, resolution = 0.1)
#' m2 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
#' sec_duty_cycle = 0.6, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#' m3 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
#' sec_duty_cycle = 0.6, sec_peak = 700, trend = 1.3, duration = 500, resolution = 0.1)
#'
#' par(mfrow = c(3,1))
#' plot(m1, type = "l", xlab = "time", ylab = "abundance")
#' plot(m2, type = "l", xlab = "time", ylab = "abundance")
#' plot(m3, type = "l", xlab = "time", ylab = "abundance")
#' @return Returns a matrix with two columns: a time vector and an oscillator abundance vector.
#' @export
SquareBurst <- function(baseline, peak, period, duty_cycle, sec_duty_cycle, sec_peak, trend, duration, resolution) {
# check input parameters
if(peak < baseline || sec_peak < baseline) {
stop("(secondary) peak must be larger than baseline!")
}
if(duration <= resolution ) {
stop("duration must be longer than resolution!")
}
if((abs(duration/resolution - round(duration/resolution))) > 1e-10) {
stop("duration must be a multiple of resolution!")
}
if((abs(period/resolution - round(period/resolution))) > 1e-10) {
stop("period must be a multiple of resolution!")
}
if(duration <= 0 || resolution <= 0 || peak <= 0 || period <= 0 || sec_peak <= 0) {
ID=matrix(ncol=1,nrow=6)
rownames(ID)=c("duration","resolution","peak","sec_peak","period")
ID[,1]=c(duration,resolution,peak,period)
Ind=which(ID[,1]<=0)
stop(paste0(rownames(ID)[Ind]," must be larger than 0! "))
}
if(baseline < 0) {
stop("baseline must be larger than or equal to 0!")
}
if(duty_cycle <= 0 || duty_cycle > 1) {
stop("duty cycle must be larger than 0 and smaller or equal to 1!")
}
if(sec_duty_cycle <= 0 || sec_duty_cycle > 1) {
stop("secondary duty cycle must be larger than 0 and smaller or equal to 1!")
}
if(trend <=0) {
stop("trend must be larger than 0!")
}
Osc=matrix(ncol=2,nrow=length(seq(0,duration,resolution)))
colnames(Osc)=c("time","osc")
Osc[,1]=seq(0,duration,resolution)
active_tot=period*duty_cycle
active_prim=active_tot*sec_duty_cycle
Osc[,2]=baseline
for (i in 1:(nrow(Osc)*resolution/period)) {
Osc[round((((i-1)*(period/resolution))+2):(((i-1)*(period/resolution))+active_prim/resolution)),2]=peak
Osc[round(((((i-1)*(period/resolution))+active_prim/resolution)+1):(((i-1)*(period/resolution))+active_tot/resolution)),2]=sec_peak
peak=peak*trend
if(peak <= baseline) {
peak=baseline
}
sec_peak=sec_peak*trend
if(sec_peak <= baseline) {
sec_peak=baseline
}
}
if ((nrow(Osc)-(((i)*(period/resolution)))) <= (active_prim/resolution)) {
Osc[round((((i)*(period/resolution))+1):nrow(Osc)),2]=peak
} else {
Osc[round((((i)*(period/resolution))+1):(((i)*(period/resolution))+active_prim/resolution)),2]=peak
if ((nrow(Osc)-(((i)*(period/resolution))+active_prim/resolution)) <= ((active_tot-active_prim)/resolution)) {
Osc[round(floor(((i)*(period/resolution))+active_prim/resolution+1):nrow(Osc)),2]=sec_peak
} else {
Osc[round((((i)*(period/resolution))+active_prim/resolution+1):(((i)*(period/resolution))+active_tot/resolution)),2]=sec_peak
}
}
Osc[round((((i)*(period/resolution))+1)),2] = baseline
return(Osc)
}
|
/R/SquareBurst.R
|
no_license
|
cran/OscillatorGenerator
|
R
| false
| false
| 7,826
|
r
|
#' Generation of a Square-wave Burst Signal
#'
#' This function takes in numeric arguments for a customizable, square-wave burst shape. Each oscillation cycle is separated into three phases: a primary active phase, in which the oscillator resides at peak concentration, a secondary active phase, in which the oscillator stays at secondary peak concentration and an inactive phase, in which the oscillator is fixed to baseline concentration. A discretized time course is returned.
#'
#' @details Standards:
#' \itemize{
#' \item{\code{peak} and \code{sec_peak} must be larger than \code{baseline}}
#' \item{\code{duration} must be larger than \code{resolution}}
#' \item{\code{duration} must be a multiple of the \code{resolution}}
#' \item{\code{period} must be a multiple of \code{resolution}}
#' \item{\code{duration}, \code{resolution}, \code{peak}, \code{sec_peak} and \code{period} must be larger than 0}
#' \item{\code{baseline} must be larger or equal to 0}
#' \item{\code{duty_cycle} must be larger than 0 and smaller or equal to 1}
#' \item{\code{sec_duty_cycle} must be larger than 0 and smaller or equal to 1}
#' \item{\code{trend} must be larger than 0}
#' }
#'
#' @param baseline minimal oscillation value
#' @param peak maximal oscillation value
#' @param period oscillation period of the oscillating species (reciprocal of the frequency)
#' @param duty_cycle ratio of the active phase (oscillator above baseline) to the total oscillation period
#' @param sec_duty_cycle ratio of the primary active phase (time interval from cycle start till reaching of the secondary peak level) to the total active phase
#' @param sec_peak intermediary value reached after the end of the primary active phase
#' @param trend percental decrease or increase in the peak and secondary peak values for the successive oscillation cycles; if set to 1, values remain unchanged
#' @param duration duration of the generated time course
#' @param resolution temporal resolution of the generated time course
#' @examples
#' # test effect of changes in period
#' m1 = SquareBurst(baseline = 200, peak = 1000, period = 50, duty_cycle = 0.6,
#' sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#' m2 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
#' sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#' m3 = SquareBurst(baseline = 200, peak = 1000, period = 200, duty_cycle = 0.6,
#' sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#'
#' par(mfrow = c(3,1))
#' plot(m1, type = "l", xlab = "time", ylab = "abundance")
#' plot(m2, type = "l", xlab = "time", ylab = "abundance")
#' plot(m3, type = "l", xlab = "time", ylab = "abundance")
#'
#' # test effect of changes in duty_cycle
#' m1 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.3,
#' sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#' m2 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
#' sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#' m3 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.9,
#' sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#'
#' par(mfrow = c(3,1))
#' plot(m1, type = "l", xlab = "time", ylab = "abundance")
#' plot(m2, type = "l", xlab = "time", ylab = "abundance")
#' plot(m3, type = "l", xlab = "time", ylab = "abundance")
#'
#' # test effect of changes in sec_duty_cycle
#' m1 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
#' sec_duty_cycle = 0.3, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#' m2 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
#' sec_duty_cycle = 0.6, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#' m3 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
#' sec_duty_cycle = 0.9, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#'
#' par(mfrow = c(3,1))
#' plot(m1, type = "l", xlab = "time", ylab = "abundance")
#' plot(m2, type = "l", xlab = "time", ylab = "abundance")
#' plot(m3, type = "l", xlab = "time", ylab = "abundance")
#'
#' # test effect of changes in trend
#' m1 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
#' sec_duty_cycle = 0.6, sec_peak = 700, trend = 0.7, duration = 500, resolution = 0.1)
#' m2 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
#' sec_duty_cycle = 0.6, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
#' m3 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
#' sec_duty_cycle = 0.6, sec_peak = 700, trend = 1.3, duration = 500, resolution = 0.1)
#'
#' par(mfrow = c(3,1))
#' plot(m1, type = "l", xlab = "time", ylab = "abundance")
#' plot(m2, type = "l", xlab = "time", ylab = "abundance")
#' plot(m3, type = "l", xlab = "time", ylab = "abundance")
#' @return Returns a matrix with two columns: a time vector and an oscillator abundance vector.
#' @export
SquareBurst <- function(baseline, peak, period, duty_cycle, sec_duty_cycle, sec_peak, trend, duration, resolution) {
# check input parameters
if(peak < baseline || sec_peak < baseline) {
stop("(secondary) peak must be larger than baseline!")
}
if(duration <= resolution ) {
stop("duration must be longer than resolution!")
}
if((abs(duration/resolution - round(duration/resolution))) > 1e-10) {
stop("duration must be a multiple of resolution!")
}
if((abs(period/resolution - round(period/resolution))) > 1e-10) {
stop("period must be a multiple of resolution!")
}
if(duration <= 0 || resolution <= 0 || peak <= 0 || period <= 0 || sec_peak <= 0) {
ID=matrix(ncol=1,nrow=6)
rownames(ID)=c("duration","resolution","peak","sec_peak","period")
ID[,1]=c(duration,resolution,peak,period)
Ind=which(ID[,1]<=0)
stop(paste0(rownames(ID)[Ind]," must be larger than 0! "))
}
if(baseline < 0) {
stop("baseline must be larger than or equal to 0!")
}
if(duty_cycle <= 0 || duty_cycle > 1) {
stop("duty cycle must be larger than 0 and smaller or equal to 1!")
}
if(sec_duty_cycle <= 0 || sec_duty_cycle > 1) {
stop("secondary duty cycle must be larger than 0 and smaller or equal to 1!")
}
if(trend <=0) {
stop("trend must be larger than 0!")
}
Osc=matrix(ncol=2,nrow=length(seq(0,duration,resolution)))
colnames(Osc)=c("time","osc")
Osc[,1]=seq(0,duration,resolution)
active_tot=period*duty_cycle
active_prim=active_tot*sec_duty_cycle
Osc[,2]=baseline
for (i in 1:(nrow(Osc)*resolution/period)) {
Osc[round((((i-1)*(period/resolution))+2):(((i-1)*(period/resolution))+active_prim/resolution)),2]=peak
Osc[round(((((i-1)*(period/resolution))+active_prim/resolution)+1):(((i-1)*(period/resolution))+active_tot/resolution)),2]=sec_peak
peak=peak*trend
if(peak <= baseline) {
peak=baseline
}
sec_peak=sec_peak*trend
if(sec_peak <= baseline) {
sec_peak=baseline
}
}
if ((nrow(Osc)-(((i)*(period/resolution)))) <= (active_prim/resolution)) {
Osc[round((((i)*(period/resolution))+1):nrow(Osc)),2]=peak
} else {
Osc[round((((i)*(period/resolution))+1):(((i)*(period/resolution))+active_prim/resolution)),2]=peak
if ((nrow(Osc)-(((i)*(period/resolution))+active_prim/resolution)) <= ((active_tot-active_prim)/resolution)) {
Osc[round(floor(((i)*(period/resolution))+active_prim/resolution+1):nrow(Osc)),2]=sec_peak
} else {
Osc[round((((i)*(period/resolution))+active_prim/resolution+1):(((i)*(period/resolution))+active_tot/resolution)),2]=sec_peak
}
}
Osc[round((((i)*(period/resolution))+1)),2] = baseline
return(Osc)
}
|
CPmodelChibReg = function(
data = ySIM,
K = K,
start = list(
mu = rnorm(K,0,1),
mu2 = rnorm(K,0,1),
sigma2 = abs(rnorm(K,0,1)),
#sigma2 = c(sigma2SIM,rep(20,Kmax)),
#st = rep(1:2,each=1000)[1:1500],
st = stSIM,
beta = 0.01,
pi = rdirichlet(1,rep(1,K))
),
prior = list(
mu = c(0,1000),
mu2 = c(0,1000),
sigma2 = c(1,1),
beta = c(0,1000)
),
MCMC_par = list(
iter = 15000,
burnin = 11000,
thin = 10
),
sd_prop = 0.5
)
{
rmnorm=function(n = 1, mean = rep(0, d), varcov) # mean, var를 가진거의 rmnorm
{
d <- if (is.matrix(varcov))
ncol(varcov)
else 1
z <- matrix(rnorm(n * d), n, d) %*% chol(varcov) #임의의 x행렬을 정규화
y <- t(mean + t(z)) #mean[dX1]
return(y)
}
##### DATA
nT = nrow(data)
#################
MCMCpar = c(
MCMC_par$iter,
MCMC_par$burnin,
MCMC_par$thin,
round((MCMC_par$iter-MCMC_par$burnin)/MCMC_par$thin)
)
sdprop = sd_prop
indexMCMC = MCMCpar[2]
stMCMC = start$st
TT = table(stMCMC)
nvec = rep(0,K )
nvec[as.numeric(names(TT))] = TT
muMCMC = rep(0,K)
muMCMC[1:K] = start$mu[1:K]
mu2MCMC = rep(0,K)
mu2MCMC[1:K] = start$mu2[1:K]
sigma2MCMC = rep(1,K)
sigma2MCMC[1:K] = start$sigma2[1:K]
piMCMC = rep(1,K)
piMCMC[1:K] = start$pi[1:K]
piMCMC[K] = 1
betaMCMC = start$beta
muOUT = matrix(NA, nrow=MCMCpar[4], ncol=K)
mu2OUT = matrix(NA, nrow=MCMCpar[4], ncol=K)
sigma2OUT = matrix(NA, nrow=MCMCpar[4], ncol=K)
betaOUT = matrix(NA, nrow=MCMCpar[4], ncol=1)
stOUT = matrix(NA, nrow=MCMCpar[4], ncol=nT)
piOUT = matrix(NA, nrow=MCMCpar[4], ncol=K)
nvecOUT = matrix(NA, nrow=MCMCpar[4], ncol=K)
appst = matrix(0, nrow=nT,ncol=K)
MatrixP = matrix(0,nrow=K,ncol=K)
MatrixB = matrix(0,nrow=K,ncol=K)
iterations = 0
BIC = c()
AIC = c()
DIC = c()
#dataT = c(1:nT)
dataT=data$x
data=data$y
for(iMCMC in 1:MCMCpar[4])
{
#browser()
for(jMCMC in 1:indexMCMC)
{
iterations = iterations+1
##### beta MCMC
betaProp = exp(rnorm(1,log(betaMCMC), sdprop))
MHratio = -0.5*(betaProp-prior$beta[1])^2/prior$beta[2]
MHratio = MHratio-(-0.5*(betaMCMC-prior$beta[1])^2/prior$beta[2])
MHratio = MHratio+log(betaProp)-log(betaMCMC)
MHratio = MHratio+(K-1)*log(betaProp)-(K-1)*log(betaMCMC)
for(k in 1:K)
{
MHratio = MHratio+(lgamma(betaProp+1)-lgamma(nvec[k]+betaProp+1-(k==K)) )-(lgamma(betaMCMC+1)-lgamma(nvec[k]+betaMCMC+1-(k==K)))
}
if(runif(1,0,1)<exp(MHratio))
{
#browser()
betaMCMC = betaProp
}
#print(iterations)
## 1. parameter<THETA: mu, sigma>
##### mu
for(k in 1:K)
{
# browser()
w = stMCMC==k
# V = matrix(ncol=2, nrow=2)
# V[1,1] = sum(w) # 상태가 k인 애들의 개수
# V[1,2] = sum(dataT[w]) #dataT = 1:nT(데이터 개수)_time series의 t
# V[2,1] = sum(dataT[w])
# V[2,2] = sum(dataT[w]^2)
bigX = cbind(rep(1,sum(w)),dataT[w])
V = t(bigX)%*%bigX
V = solve(V/sigma2MCMC[k]+diag(c(1/prior$mu[2],1/prior$mu2[2])))
M = matrix(ncol=1, nrow=2)
# M[1,1] = sum(data[w])
# M[2,1] = sum(dataT[w]*data[w])
M = t(bigX)%*%data[w]
M = V%*%(M/sigma2MCMC[k]+ c(prior$mu[1]/prior$mu[2],prior$mu2[1]/prior$mu2[2]) )
# meanMU = (prior$mu[2]*sum(data[xiMCMC==k])+sigma2MCMC[k]*prior$mu[1])/( nvec[k]*prior$mu[2]+sigma2MCMC[k])
#
# varMU = sigma2MCMC[k]*prior$mu[2]/( nvec[k]*prior$mu[2]+sigma2MCMC[k])
# muMCMC[k] = rnorm(1, meanMU,varMU^0.5)
simm = rmnorm(1,M,V)
muMCMC[k] = simm[1]
mu2MCMC[k] = simm[2]
}
####### sigma2
for(k in 1:K)
{
Asigma2 = prior$sigma2[1]+nvec[k]/2
w = stMCMC==k
Bsigma2 = 1/prior$sigma2[2]+ sum( (data[w]-muMCMC[k]-mu2MCMC[k]*dataT[w])^2 )/2
sigma2MCMC[k] = 1.0/rgamma(1,shape=Asigma2,rate=Bsigma2)
}
######### ######### ######### #########
######### sampling st
######### ######### ######### ######### k
if(K>1)
{
#browser()
appst = matrix(0, nrow=nT,ncol=K)
diag(MatrixP) = piMCMC
for(k in 1:(K-1))
{
MatrixP[k,k+1] = 1-MatrixP[k,k]
}
#browser()
MatrixP[K,K] = 1
appst[1,1] = 1
if(K>1)
{
if(K>2)
{
for(t in 2:(K-1))
{
appst[t,] = log((appst[t-1,]%*%MatrixP ) )+dnorm(data[t], muMCMC+mu2MCMC*dataT[t], sigma2MCMC^0.5, log=T)
appst[t,] = exp(appst[t,])
appst[t,(t+1):K] = 0
appst[t,] = appst[t,]/sum(appst[t,])
}
}
for(t in (K):nT)
{
appst[t,] = log((appst[t-1,]%*%MatrixP ) )+dnorm(data[t], muMCMC+mu2MCMC*dataT[t], sigma2MCMC^0.5, log=T)
appst[t,] = exp(appst[t,])
appst[t,] = appst[t,]/sum(appst[t,])
}
}else{
appst[,1] = 1
}
stMCMC[nT] = K
#stMCMC[nT] = max(which(appst[nT,]>0)) #regime K 찾는거인듯...
# 역순
for(t in (nT-1):1)
{
# browser()
k = stMCMC[t+1]
if(k>1)
{
stMCMC[t] = sample(c(k-1,k),1, prob = c( (1-piMCMC[k-1])*appst[t,k-1], piMCMC[k]*appst[t,k] ))
}else{
stMCMC[t] = 1 # (t+1)시점의st가 1이면 t시점은 당연히 st=1임
}
}
#browser()
### nvec contains the number of observations in the regimes
TT = table(stMCMC)
nvec = rep(0,K )
nvec[as.numeric(names(TT))] = TT
#### pi
for(k in 1:(K-1))
{
#piMCMC[k] = rbeta(1,1+nvec[k], betaMCMC+1)
#piMCMC[k] = rbeta(1,prior$beta[1]+nvec[k], prior$beta[2]+1)
piMCMC[k] = rbeta(1,prior$beta[1]+(nvec[k]-1), prior$beta[2]+1)
}
}
}
indexMCMC = MCMCpar[3]
###### SALVO PARAMETRI
muOUT[iMCMC,1:K] = muMCMC[1:K]
mu2OUT[iMCMC,1:K] = mu2MCMC[1:K]
sigma2OUT[iMCMC,1:K] = sigma2MCMC[1:K]
piOUT[iMCMC,1:K] = piMCMC[1:K]
#betaOUT[iMCMC,1] = betaMCMC
stOUT[iMCMC,] = stMCMC
nvecOUT[iMCMC,] =nvec
#### Bayes factor
appst = matrix(0, nrow=nT,ncol=K)
diag(MatrixP) = piMCMC
for(k in 1:(K-1))
{
MatrixP[k,k+1] = 1-MatrixP[k,k]
}
MatrixP[K,K] = 1
appst[1,1] = 1
SS = c(1)
if(K>1)
{
#browser()
if(K>2){
for(t in 2:(K-1))
{
appst[t,] = log((appst[t-1,]%*%MatrixP ) )+dnorm(data[t], muMCMC+mu2MCMC*dataT[t], sigma2MCMC^0.5, log=T)
appst[t,] = exp(appst[t,])
appst[t,(t+1):K] = 0
SS[t] = sum(appst[t,])
appst[t,]= appst[t,]/sum(appst[t,])
}
}
for(t in (K):nT)
{
appst[t,] = log((appst[t-1,]%*%MatrixP ) )+dnorm(data[t], muMCMC+mu2MCMC*dataT[t], sigma2MCMC^0.5, log=T)
appst[t,] = exp(appst[t,])
SS[t] = sum(appst[t,])
appst[t,]= appst[t,]/sum(appst[t,])
}
}else{
for(t in 1:nT)
{
#browser()
#SS[t] = exp(dpois(data[t], lambdaMCMC[1], log=T))
SS[t] = exp(dnorm(data[t], muMCMC+mu2MCMC*dataT[t], sigma2MCMC^0.5, log=T))
}
}
BIC[iMCMC] = 0
BIC[iMCMC] = BIC[iMCMC]+sum(log(SS))
AIC[iMCMC] = -2*BIC[iMCMC]+2*K+(K-1)
BIC[iMCMC] = -2*BIC[iMCMC]+(2*K+(K-1))*log(nT)
}
#####
out = list(muOUT=muOUT,mu2OUT=mu2OUT,sigma2OUT=sigma2OUT,
betaOUT=betaOUT,
stOUT=stOUT,
BIC=max(BIC),
piOUT=piOUT,nvec=nvec,nvecOUT=nvecOUT)
return(out)
}
|
/cp_chib_reg_sh_general.R
|
no_license
|
sh0406/ff
|
R
| false
| false
| 8,608
|
r
|
CPmodelChibReg = function(
data = ySIM,
K = K,
start = list(
mu = rnorm(K,0,1),
mu2 = rnorm(K,0,1),
sigma2 = abs(rnorm(K,0,1)),
#sigma2 = c(sigma2SIM,rep(20,Kmax)),
#st = rep(1:2,each=1000)[1:1500],
st = stSIM,
beta = 0.01,
pi = rdirichlet(1,rep(1,K))
),
prior = list(
mu = c(0,1000),
mu2 = c(0,1000),
sigma2 = c(1,1),
beta = c(0,1000)
),
MCMC_par = list(
iter = 15000,
burnin = 11000,
thin = 10
),
sd_prop = 0.5
)
{
rmnorm=function(n = 1, mean = rep(0, d), varcov) # mean, var를 가진거의 rmnorm
{
d <- if (is.matrix(varcov))
ncol(varcov)
else 1
z <- matrix(rnorm(n * d), n, d) %*% chol(varcov) #임의의 x행렬을 정규화
y <- t(mean + t(z)) #mean[dX1]
return(y)
}
##### DATA
nT = nrow(data)
#################
MCMCpar = c(
MCMC_par$iter,
MCMC_par$burnin,
MCMC_par$thin,
round((MCMC_par$iter-MCMC_par$burnin)/MCMC_par$thin)
)
sdprop = sd_prop
indexMCMC = MCMCpar[2]
stMCMC = start$st
TT = table(stMCMC)
nvec = rep(0,K )
nvec[as.numeric(names(TT))] = TT
muMCMC = rep(0,K)
muMCMC[1:K] = start$mu[1:K]
mu2MCMC = rep(0,K)
mu2MCMC[1:K] = start$mu2[1:K]
sigma2MCMC = rep(1,K)
sigma2MCMC[1:K] = start$sigma2[1:K]
piMCMC = rep(1,K)
piMCMC[1:K] = start$pi[1:K]
piMCMC[K] = 1
betaMCMC = start$beta
muOUT = matrix(NA, nrow=MCMCpar[4], ncol=K)
mu2OUT = matrix(NA, nrow=MCMCpar[4], ncol=K)
sigma2OUT = matrix(NA, nrow=MCMCpar[4], ncol=K)
betaOUT = matrix(NA, nrow=MCMCpar[4], ncol=1)
stOUT = matrix(NA, nrow=MCMCpar[4], ncol=nT)
piOUT = matrix(NA, nrow=MCMCpar[4], ncol=K)
nvecOUT = matrix(NA, nrow=MCMCpar[4], ncol=K)
appst = matrix(0, nrow=nT,ncol=K)
MatrixP = matrix(0,nrow=K,ncol=K)
MatrixB = matrix(0,nrow=K,ncol=K)
iterations = 0
BIC = c()
AIC = c()
DIC = c()
#dataT = c(1:nT)
dataT=data$x
data=data$y
for(iMCMC in 1:MCMCpar[4])
{
#browser()
for(jMCMC in 1:indexMCMC)
{
iterations = iterations+1
##### beta MCMC
betaProp = exp(rnorm(1,log(betaMCMC), sdprop))
MHratio = -0.5*(betaProp-prior$beta[1])^2/prior$beta[2]
MHratio = MHratio-(-0.5*(betaMCMC-prior$beta[1])^2/prior$beta[2])
MHratio = MHratio+log(betaProp)-log(betaMCMC)
MHratio = MHratio+(K-1)*log(betaProp)-(K-1)*log(betaMCMC)
for(k in 1:K)
{
MHratio = MHratio+(lgamma(betaProp+1)-lgamma(nvec[k]+betaProp+1-(k==K)) )-(lgamma(betaMCMC+1)-lgamma(nvec[k]+betaMCMC+1-(k==K)))
}
if(runif(1,0,1)<exp(MHratio))
{
#browser()
betaMCMC = betaProp
}
#print(iterations)
## 1. parameter<THETA: mu, sigma>
##### mu
for(k in 1:K)
{
# browser()
w = stMCMC==k
# V = matrix(ncol=2, nrow=2)
# V[1,1] = sum(w) # 상태가 k인 애들의 개수
# V[1,2] = sum(dataT[w]) #dataT = 1:nT(데이터 개수)_time series의 t
# V[2,1] = sum(dataT[w])
# V[2,2] = sum(dataT[w]^2)
bigX = cbind(rep(1,sum(w)),dataT[w])
V = t(bigX)%*%bigX
V = solve(V/sigma2MCMC[k]+diag(c(1/prior$mu[2],1/prior$mu2[2])))
M = matrix(ncol=1, nrow=2)
# M[1,1] = sum(data[w])
# M[2,1] = sum(dataT[w]*data[w])
M = t(bigX)%*%data[w]
M = V%*%(M/sigma2MCMC[k]+ c(prior$mu[1]/prior$mu[2],prior$mu2[1]/prior$mu2[2]) )
# meanMU = (prior$mu[2]*sum(data[xiMCMC==k])+sigma2MCMC[k]*prior$mu[1])/( nvec[k]*prior$mu[2]+sigma2MCMC[k])
#
# varMU = sigma2MCMC[k]*prior$mu[2]/( nvec[k]*prior$mu[2]+sigma2MCMC[k])
# muMCMC[k] = rnorm(1, meanMU,varMU^0.5)
simm = rmnorm(1,M,V)
muMCMC[k] = simm[1]
mu2MCMC[k] = simm[2]
}
####### sigma2
for(k in 1:K)
{
Asigma2 = prior$sigma2[1]+nvec[k]/2
w = stMCMC==k
Bsigma2 = 1/prior$sigma2[2]+ sum( (data[w]-muMCMC[k]-mu2MCMC[k]*dataT[w])^2 )/2
sigma2MCMC[k] = 1.0/rgamma(1,shape=Asigma2,rate=Bsigma2)
}
######### ######### ######### #########
######### sampling st
######### ######### ######### ######### k
if(K>1)
{
#browser()
appst = matrix(0, nrow=nT,ncol=K)
diag(MatrixP) = piMCMC
for(k in 1:(K-1))
{
MatrixP[k,k+1] = 1-MatrixP[k,k]
}
#browser()
MatrixP[K,K] = 1
appst[1,1] = 1
if(K>1)
{
if(K>2)
{
for(t in 2:(K-1))
{
appst[t,] = log((appst[t-1,]%*%MatrixP ) )+dnorm(data[t], muMCMC+mu2MCMC*dataT[t], sigma2MCMC^0.5, log=T)
appst[t,] = exp(appst[t,])
appst[t,(t+1):K] = 0
appst[t,] = appst[t,]/sum(appst[t,])
}
}
for(t in (K):nT)
{
appst[t,] = log((appst[t-1,]%*%MatrixP ) )+dnorm(data[t], muMCMC+mu2MCMC*dataT[t], sigma2MCMC^0.5, log=T)
appst[t,] = exp(appst[t,])
appst[t,] = appst[t,]/sum(appst[t,])
}
}else{
appst[,1] = 1
}
stMCMC[nT] = K
#stMCMC[nT] = max(which(appst[nT,]>0)) #regime K 찾는거인듯...
# 역순
for(t in (nT-1):1)
{
# browser()
k = stMCMC[t+1]
if(k>1)
{
stMCMC[t] = sample(c(k-1,k),1, prob = c( (1-piMCMC[k-1])*appst[t,k-1], piMCMC[k]*appst[t,k] ))
}else{
stMCMC[t] = 1 # (t+1)시점의st가 1이면 t시점은 당연히 st=1임
}
}
#browser()
### nvec contains the number of observations in the regimes
TT = table(stMCMC)
nvec = rep(0,K )
nvec[as.numeric(names(TT))] = TT
#### pi
for(k in 1:(K-1))
{
#piMCMC[k] = rbeta(1,1+nvec[k], betaMCMC+1)
#piMCMC[k] = rbeta(1,prior$beta[1]+nvec[k], prior$beta[2]+1)
piMCMC[k] = rbeta(1,prior$beta[1]+(nvec[k]-1), prior$beta[2]+1)
}
}
}
indexMCMC = MCMCpar[3]
###### SALVO PARAMETRI
muOUT[iMCMC,1:K] = muMCMC[1:K]
mu2OUT[iMCMC,1:K] = mu2MCMC[1:K]
sigma2OUT[iMCMC,1:K] = sigma2MCMC[1:K]
piOUT[iMCMC,1:K] = piMCMC[1:K]
#betaOUT[iMCMC,1] = betaMCMC
stOUT[iMCMC,] = stMCMC
nvecOUT[iMCMC,] =nvec
#### Bayes factor
appst = matrix(0, nrow=nT,ncol=K)
diag(MatrixP) = piMCMC
for(k in 1:(K-1))
{
MatrixP[k,k+1] = 1-MatrixP[k,k]
}
MatrixP[K,K] = 1
appst[1,1] = 1
SS = c(1)
if(K>1)
{
#browser()
if(K>2){
for(t in 2:(K-1))
{
appst[t,] = log((appst[t-1,]%*%MatrixP ) )+dnorm(data[t], muMCMC+mu2MCMC*dataT[t], sigma2MCMC^0.5, log=T)
appst[t,] = exp(appst[t,])
appst[t,(t+1):K] = 0
SS[t] = sum(appst[t,])
appst[t,]= appst[t,]/sum(appst[t,])
}
}
for(t in (K):nT)
{
appst[t,] = log((appst[t-1,]%*%MatrixP ) )+dnorm(data[t], muMCMC+mu2MCMC*dataT[t], sigma2MCMC^0.5, log=T)
appst[t,] = exp(appst[t,])
SS[t] = sum(appst[t,])
appst[t,]= appst[t,]/sum(appst[t,])
}
}else{
for(t in 1:nT)
{
#browser()
#SS[t] = exp(dpois(data[t], lambdaMCMC[1], log=T))
SS[t] = exp(dnorm(data[t], muMCMC+mu2MCMC*dataT[t], sigma2MCMC^0.5, log=T))
}
}
BIC[iMCMC] = 0
BIC[iMCMC] = BIC[iMCMC]+sum(log(SS))
AIC[iMCMC] = -2*BIC[iMCMC]+2*K+(K-1)
BIC[iMCMC] = -2*BIC[iMCMC]+(2*K+(K-1))*log(nT)
}
#####
out = list(muOUT=muOUT,mu2OUT=mu2OUT,sigma2OUT=sigma2OUT,
betaOUT=betaOUT,
stOUT=stOUT,
BIC=max(BIC),
piOUT=piOUT,nvec=nvec,nvecOUT=nvecOUT)
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LookmlModel.r
\name{LookmlModel}
\alias{LookmlModel}
\title{LookmlModel Class}
\description{
LookmlModel Class
LookmlModel Class
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{name}}{}
\item{\code{project_name}}{}
\item{\code{allowed_db_connection_names}}{}
\item{\code{unlimited_db_connections}}{}
\item{\code{has_content}}{}
\item{\code{label}}{}
\item{\code{explores}}{}
\item{\code{can}}{}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{LookmlModel$new()}}
\item \href{#method-toJSON}{\code{LookmlModel$toJSON()}}
\item \href{#method-fromJSON}{\code{LookmlModel$fromJSON()}}
\item \href{#method-toJSONString}{\code{LookmlModel$toJSONString()}}
\item \href{#method-fromJSONString}{\code{LookmlModel$fromJSONString()}}
\item \href{#method-clone}{\code{LookmlModel$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{LookmlModel$new(
name,
project_name,
allowed_db_connection_names,
unlimited_db_connections,
has_content,
label,
explores,
can
)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-toJSON"></a>}}
\if{latex}{\out{\hypertarget{method-toJSON}{}}}
\subsection{Method \code{toJSON()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{LookmlModel$toJSON()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-fromJSON"></a>}}
\if{latex}{\out{\hypertarget{method-fromJSON}{}}}
\subsection{Method \code{fromJSON()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{LookmlModel$fromJSON(LookmlModelJson)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-toJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-toJSONString}{}}}
\subsection{Method \code{toJSONString()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{LookmlModel$toJSONString()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-fromJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-fromJSONString}{}}}
\subsection{Method \code{fromJSONString()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{LookmlModel$fromJSONString(LookmlModelJson)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{LookmlModel$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
/man/LookmlModel.Rd
|
permissive
|
grepinsight/lookr
|
R
| false
| true
| 3,012
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LookmlModel.r
\name{LookmlModel}
\alias{LookmlModel}
\title{LookmlModel Class}
\description{
LookmlModel Class
LookmlModel Class
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{name}}{}
\item{\code{project_name}}{}
\item{\code{allowed_db_connection_names}}{}
\item{\code{unlimited_db_connections}}{}
\item{\code{has_content}}{}
\item{\code{label}}{}
\item{\code{explores}}{}
\item{\code{can}}{}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{LookmlModel$new()}}
\item \href{#method-toJSON}{\code{LookmlModel$toJSON()}}
\item \href{#method-fromJSON}{\code{LookmlModel$fromJSON()}}
\item \href{#method-toJSONString}{\code{LookmlModel$toJSONString()}}
\item \href{#method-fromJSONString}{\code{LookmlModel$fromJSONString()}}
\item \href{#method-clone}{\code{LookmlModel$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{LookmlModel$new(
name,
project_name,
allowed_db_connection_names,
unlimited_db_connections,
has_content,
label,
explores,
can
)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-toJSON"></a>}}
\if{latex}{\out{\hypertarget{method-toJSON}{}}}
\subsection{Method \code{toJSON()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{LookmlModel$toJSON()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-fromJSON"></a>}}
\if{latex}{\out{\hypertarget{method-fromJSON}{}}}
\subsection{Method \code{fromJSON()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{LookmlModel$fromJSON(LookmlModelJson)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-toJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-toJSONString}{}}}
\subsection{Method \code{toJSONString()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{LookmlModel$toJSONString()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-fromJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-fromJSONString}{}}}
\subsection{Method \code{fromJSONString()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{LookmlModel$fromJSONString(LookmlModelJson)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{LookmlModel$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
#Case Study12
# load required packages from the assignment
library(dplyr)
library(ggplot2)
library(ggmap)
library(htmlwidgets)
library(widgetframe)
#Detailed Steps
#I downloaded these packages from the assignment
library(tidyverse)
library(rnoaa)
library(xts)
library(dygraphs)
d=meteo_tidy_ghcnd("USW00014733",
date_min = "2016-01-01",
var = c("TMAX"),
keep_flags=T) %>%
mutate(date=as.Date(date),
tmax=as.numeric(tmax)/10) #Divide the tmax data by 10 to convert to degrees.
#This is what I created for the assignment
maximum_temperature <- xts(d$tmax, order.by = d$date)
dygraph(maximum_temperature, main="Daily Maximum Temperature in Buffalo, NY")%>%
dyRangeSelector(dateWindow = c("2020-01-01", "2020-10-31"))
|
/week_12/case_study_12.R
|
no_license
|
geo511-2020/geo511-2020-tasks-hsare
|
R
| false
| false
| 791
|
r
|
#Case Study12
# load required packages from the assignment
library(dplyr)
library(ggplot2)
library(ggmap)
library(htmlwidgets)
library(widgetframe)
#Detailed Steps
#I downloaded these packages from the assignment
library(tidyverse)
library(rnoaa)
library(xts)
library(dygraphs)
d=meteo_tidy_ghcnd("USW00014733",
date_min = "2016-01-01",
var = c("TMAX"),
keep_flags=T) %>%
mutate(date=as.Date(date),
tmax=as.numeric(tmax)/10) #Divide the tmax data by 10 to convert to degrees.
#This is what I created for the assignment
maximum_temperature <- xts(d$tmax, order.by = d$date)
dygraph(maximum_temperature, main="Daily Maximum Temperature in Buffalo, NY")%>%
dyRangeSelector(dateWindow = c("2020-01-01", "2020-10-31"))
|
testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(5.18571301874972e-320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610382265-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 282
|
r
|
testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(5.18571301874972e-320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/f_highermomentsIV.R
\name{higherMomentsIV}
\alias{higherMomentsIV}
\title{Fitting Linear Models with Endogenous Regressors using Lewbel's Higher Moments Approach}
\usage{
higherMomentsIV(formula, data, verbose = TRUE)
}
\arguments{
\item{formula}{A symbolic description of the model to be fitted. See the "Details" section for the exact notation.}
\item{data}{A data.frame containing the data of all parts specified in the formula parameter.}
\item{verbose}{Show details about the running of the function.}
}
\value{
Returns an object of classes \code{rendo.ivreg} and \code{ivreg}, It extends the object returned from
function \code{ivreg} of package \code{AER} and slightly modifies it by adapting the \code{call}
and \code{formula} components. The \code{summary} function prints additional diagnostic information as
described in documentation for \code{\link[AER]{summary.ivreg}}.
All generic accessor functions for \code{ivreg} such as \code{anova}, \code{hatvalues}, or \code{vcov} are available.
}
\description{
Fits linear models with one endogenous regressor using internal instruments built using the approach described in
Lewbel A. (1997). This is a statistical technique to address the endogeneity problem where no external instrumental
variables are needed. The implementation allows the incorporation of external instruments if available.
An important assumption for identification is that the endogenous variable has a skewed distribution.
}
\details{
\subsection{Method}{
Consider the model:
\ifelse{html}{\out{<br><div style="text-align:center">Y<sub>t</sub>=β<sub>0</sub> + β<sub>1</sub>X<sub>t</sub>+αP<sub>t</sub>+ε<sub>t</sub></div>}}{\deqn{ Y_{t} = \beta_{0}+ \beta_{1}X_{t} + \alpha P_{t}+\epsilon_{t} \hspace{0.3cm} (1) }}
\ifelse{html}{\out{<div style="text-align:center">P<sub>t</sub>=Z<sub>t</sub>+ν<sub>t</sub></div>}}{\deqn{ P_{t} = \gamma Z_{t}+\nu_{t} \hspace{2.5 cm} (2)}}
The observed data consist of \ifelse{html}{\out{Y<sub>t</sub>}}{\eqn{Y_{t}}}, \ifelse{html}{\out{X<sub>t</sub>}}{\eqn{X_{t}}} and \ifelse{html}{\out{P<sub>t</sub>}}{\eqn{P_{t}}},
while \ifelse{html}{\out{Z<sub>t</sub>}}{\eqn{Z_{t}}}, \ifelse{html}{\out{ε<sub>t</sub>}}{\eqn{\epsilon_{t}}},
and \ifelse{html}{\out{ν<sub>t</sub>}}{\eqn{\nu_{t}}} are unobserved. The endogeneity problem arises from
the correlation of \ifelse{html}{\out{P<sub>t</sub>}}{\eqn{P_{t}}} with the structural error
\ifelse{html}{\out{ε<sub>t</sub>}}{\eqn{\epsilon_{t}}},
since \ifelse{html}{\out{E(εν)≠0}}{\eqn{E(\epsilon \nu)\neq 0}}.
The requirement for the structural and measurement error is to have mean zero, but no restriction is imposed on their distribution.
Let \ifelse{html}{\out{S̅}}{\eqn{\bar{S}}} be the sample mean of a variable \ifelse{html}{\out{S<sub>t</sub>}}{\eqn{S_{t}}}
and \ifelse{html}{\out{G<sub>t</sub>=G(X<sub>t</sub>)}}{\eqn{G_{t} = G(X_{t})}} for any given function \eqn{G} that
has finite third own and cross moments. Lewbel(1997) proves that the following instruments can be constructed and used with two-stage least squares to obtain consistent estimates:
\ifelse{html}{\out{<div style="text-align:center">q<sub>1t</sub>=(G<sub>t</sub>-G̅)<span style="float:right;">(3a)</span></div>}}{\deqn{ q_{1t}=(G_{t} - \bar{G}) \hspace{1.6 cm}(3a)}}
\ifelse{html}{\out{<div style="text-align:center">q<sub>2t</sub>=(G<sub>t</sub>-G̅)(P<sub>t</sub>-P̅)<span style="float:right;">(3b)</span></div>}}{\deqn{ q_{2t}=(G_{t} - \bar{G})(P_{t}-\bar{P}) \hspace{0.3cm} (3b)}}
\ifelse{html}{\out{<div style="text-align:center">q<sub>3t</sub>=(G<sub>t</sub>-G̅)(Y<sub>t</sub>-Y̅)<span style="float:right;">(3c)</span></div>}}{\deqn{ q_{3t}=(G_{t} - \bar{G})(Y_{t}-\bar{Y}) \hspace{0.3cm} (3c)}}
\ifelse{html}{\out{<div style="text-align:center">q<sub>4t</sub>=(Y<sub>t</sub>-Y̅)(P<sub>t</sub>-P̅)<span style="float:right;">(3d)</span></div>}}{\deqn{ q_{4t}=(Y_{t} - \bar{Y})(P_{t}-\bar{P}) \hspace{0.3cm} (3d)}}
\ifelse{html}{\out{<div style="text-align:center">q<sub>5t</sub>=(P<sub>t</sub>-P̅)<sup>2</sup><span style="float:right;">(3e)</span></div>}}{\deqn{ q_{5t}=(P_{t}-\bar{P})^{2} \hspace{1.5 cm} (3e)}}
\ifelse{html}{\out{<div style="text-align:center">q<sub>6t</sub>=(Y<sub>t</sub>-Y̅)<sup>2</sup><span style="float:right;">(3f)</span></div>}}{\deqn{ q_{6t}=(Y_{t}-\bar{Y})^{2}\hspace{1.5 cm} (3f)}}
Instruments in equations \eqn{3e} and \eqn{3f} can be used only when the measurement and the structural errors are symmetrically distributed.
Otherwise, the use of the instruments does not require any distributional assumptions for the errors. Given that the regressors \eqn{G(X) = X}
are included as instruments, \eqn{G(X)} should not be linear in \eqn{X} in equation \eqn{3a}.
Let small letter denote deviation from the sample mean: \ifelse{html}{\out{s<sub>i</sub> = S<sub>i</sub>-S̅}}{\eqn{s_{i} = S_{i}-\bar{S}}}.
Then, using as instruments the variables presented in equations \eqn{3} together with \eqn{1} and
\ifelse{html}{\out{X<sub>t</sub>}}{\eqn{X_{t}}}, the two-stage-least-squares estimation will provide consistent estimates for the parameters
in equation \eqn{1} under the assumptions exposed in Lewbel(1997).
}
\subsection{Formula parameter}{
The \code{formula} argument follows a four part notation:
A two-sided formula describing the model (e.g. \code{y ~ X1 + X2 + P}), a single endogenous regressor
(e.g. \code{P}), and the exogenous variables from which the internal instrumental variables should
be build (e.g. \code{IIV(iiv=y2)}), each part separated by a single vertical bar (\code{|}).
The instrumental variables that should be built are specified as (multiple) functions, one for each
instrument. This function is \code{IIV} and uses the following arguments:
\describe{
\item{\code{iiv}}{Which internal instrument to build. One of \code{g, gp, gy, yp, p2, y2} can be chosen.}
\item{\code{g}}{Which function \code{g} represents in \code{iiv}.
One of \code{x2, x3, lnx, 1/x} can be chosen.
Only required if the type of internal instrument demands it.}
\item{\code{...}}{
The exogenous regressors to build the internal instrument. If more than one is given,
separate instruments are built for each. Only required if the type of internal instrument demands it.}
}
Note that no argument to \code{IIV} is to be supplied as character but as symbols without quotation marks.
Optionally, additional external instrumental variables to also include in the instrumental variable
regression can be specified. These external instruments have to be already present in the data
and are provided as the fourth right-hand side part of the formula, again separated by a vertical bar.
See the example section for illustrations on how to specify the \code{formula} parameter.
}
}
\examples{
data("dataHigherMoments")
# P is the endogenous regressor in all examples
# 2 IVs with g*p, g=x^2, separately for each regressor X1 and X2.
hm <- higherMomentsIV(y~X1+X2+P|P|IIV(iiv=gp, g=x2, X1, X2),
data = dataHigherMoments)
# same as above
hm <- higherMomentsIV(y~X1+X2+P|P|IIV(iiv=gp, g=x2, X1) +
IIV(iiv=gp, g=x2, X2),
data = dataHigherMoments)
# 3 different IVs
hm <- higherMomentsIV(y~X1+X2+P|P|IIV(iiv=y2) + IIV(iiv=yp) +
IIV(iiv=g,g=x3,X1),
data = dataHigherMoments)
# use X2 as external IV
hm <- higherMomentsIV(y~X1+P|P|IIV(iiv=y2)+IIV(iiv=g,g=lnx,X1)| X2,
data = dataHigherMoments)
summary(hm)
}
\references{
Lewbel A (1997). “Constructing Instruments for Regressions with Measurement Error When No Additional Data are Available, With an Application to Patents and R&D.” Econometrica, 65(5), 1201–1213.
}
\seealso{
\code{\link[AER]{ivreg}}
}
|
/man/higherMomentsIV.Rd
|
no_license
|
mmeierer/REndo
|
R
| false
| true
| 7,928
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/f_highermomentsIV.R
\name{higherMomentsIV}
\alias{higherMomentsIV}
\title{Fitting Linear Models with Endogenous Regressors using Lewbel's Higher Moments Approach}
\usage{
higherMomentsIV(formula, data, verbose = TRUE)
}
\arguments{
\item{formula}{A symbolic description of the model to be fitted. See the "Details" section for the exact notation.}
\item{data}{A data.frame containing the data of all parts specified in the formula parameter.}
\item{verbose}{Show details about the running of the function.}
}
\value{
Returns an object of classes \code{rendo.ivreg} and \code{ivreg}, It extends the object returned from
function \code{ivreg} of package \code{AER} and slightly modifies it by adapting the \code{call}
and \code{formula} components. The \code{summary} function prints additional diagnostic information as
described in documentation for \code{\link[AER]{summary.ivreg}}.
All generic accessor functions for \code{ivreg} such as \code{anova}, \code{hatvalues}, or \code{vcov} are available.
}
\description{
Fits linear models with one endogenous regressor using internal instruments built using the approach described in
Lewbel A. (1997). This is a statistical technique to address the endogeneity problem where no external instrumental
variables are needed. The implementation allows the incorporation of external instruments if available.
An important assumption for identification is that the endogenous variable has a skewed distribution.
}
\details{
\subsection{Method}{
Consider the model:
\ifelse{html}{\out{<br><div style="text-align:center">Y<sub>t</sub>=β<sub>0</sub> + β<sub>1</sub>X<sub>t</sub>+αP<sub>t</sub>+ε<sub>t</sub></div>}}{\deqn{ Y_{t} = \beta_{0}+ \beta_{1}X_{t} + \alpha P_{t}+\epsilon_{t} \hspace{0.3cm} (1) }}
\ifelse{html}{\out{<div style="text-align:center">P<sub>t</sub>=Z<sub>t</sub>+ν<sub>t</sub></div>}}{\deqn{ P_{t} = \gamma Z_{t}+\nu_{t} \hspace{2.5 cm} (2)}}
The observed data consist of \ifelse{html}{\out{Y<sub>t</sub>}}{\eqn{Y_{t}}}, \ifelse{html}{\out{X<sub>t</sub>}}{\eqn{X_{t}}} and \ifelse{html}{\out{P<sub>t</sub>}}{\eqn{P_{t}}},
while \ifelse{html}{\out{Z<sub>t</sub>}}{\eqn{Z_{t}}}, \ifelse{html}{\out{ε<sub>t</sub>}}{\eqn{\epsilon_{t}}},
and \ifelse{html}{\out{ν<sub>t</sub>}}{\eqn{\nu_{t}}} are unobserved. The endogeneity problem arises from
the correlation of \ifelse{html}{\out{P<sub>t</sub>}}{\eqn{P_{t}}} with the structural error
\ifelse{html}{\out{ε<sub>t</sub>}}{\eqn{\epsilon_{t}}},
since \ifelse{html}{\out{E(εν)≠0}}{\eqn{E(\epsilon \nu)\neq 0}}.
The requirement for the structural and measurement error is to have mean zero, but no restriction is imposed on their distribution.
Let \ifelse{html}{\out{S̅}}{\eqn{\bar{S}}} be the sample mean of a variable \ifelse{html}{\out{S<sub>t</sub>}}{\eqn{S_{t}}}
and \ifelse{html}{\out{G<sub>t</sub>=G(X<sub>t</sub>)}}{\eqn{G_{t} = G(X_{t})}} for any given function \eqn{G} that
has finite third own and cross moments. Lewbel(1997) proves that the following instruments can be constructed and used with two-stage least squares to obtain consistent estimates:
\ifelse{html}{\out{<div style="text-align:center">q<sub>1t</sub>=(G<sub>t</sub>-G̅)<span style="float:right;">(3a)</span></div>}}{\deqn{ q_{1t}=(G_{t} - \bar{G}) \hspace{1.6 cm}(3a)}}
\ifelse{html}{\out{<div style="text-align:center">q<sub>2t</sub>=(G<sub>t</sub>-G̅)(P<sub>t</sub>-P̅)<span style="float:right;">(3b)</span></div>}}{\deqn{ q_{2t}=(G_{t} - \bar{G})(P_{t}-\bar{P}) \hspace{0.3cm} (3b)}}
\ifelse{html}{\out{<div style="text-align:center">q<sub>3t</sub>=(G<sub>t</sub>-G̅)(Y<sub>t</sub>-Y̅)<span style="float:right;">(3c)</span></div>}}{\deqn{ q_{3t}=(G_{t} - \bar{G})(Y_{t}-\bar{Y}) \hspace{0.3cm} (3c)}}
\ifelse{html}{\out{<div style="text-align:center">q<sub>4t</sub>=(Y<sub>t</sub>-Y̅)(P<sub>t</sub>-P̅)<span style="float:right;">(3d)</span></div>}}{\deqn{ q_{4t}=(Y_{t} - \bar{Y})(P_{t}-\bar{P}) \hspace{0.3cm} (3d)}}
\ifelse{html}{\out{<div style="text-align:center">q<sub>5t</sub>=(P<sub>t</sub>-P̅)<sup>2</sup><span style="float:right;">(3e)</span></div>}}{\deqn{ q_{5t}=(P_{t}-\bar{P})^{2} \hspace{1.5 cm} (3e)}}
\ifelse{html}{\out{<div style="text-align:center">q<sub>6t</sub>=(Y<sub>t</sub>-Y̅)<sup>2</sup><span style="float:right;">(3f)</span></div>}}{\deqn{ q_{6t}=(Y_{t}-\bar{Y})^{2}\hspace{1.5 cm} (3f)}}
Instruments in equations \eqn{3e} and \eqn{3f} can be used only when the measurement and the structural errors are symmetrically distributed.
Otherwise, the use of the instruments does not require any distributional assumptions for the errors. Given that the regressors \eqn{G(X) = X}
are included as instruments, \eqn{G(X)} should not be linear in \eqn{X} in equation \eqn{3a}.
Let small letter denote deviation from the sample mean: \ifelse{html}{\out{s<sub>i</sub> = S<sub>i</sub>-S̅}}{\eqn{s_{i} = S_{i}-\bar{S}}}.
Then, using as instruments the variables presented in equations \eqn{3} together with \eqn{1} and
\ifelse{html}{\out{X<sub>t</sub>}}{\eqn{X_{t}}}, the two-stage-least-squares estimation will provide consistent estimates for the parameters
in equation \eqn{1} under the assumptions exposed in Lewbel(1997).
}
\subsection{Formula parameter}{
The \code{formula} argument follows a four part notation:
A two-sided formula describing the model (e.g. \code{y ~ X1 + X2 + P}), a single endogenous regressor
(e.g. \code{P}), and the exogenous variables from which the internal instrumental variables should
be build (e.g. \code{IIV(iiv=y2)}), each part separated by a single vertical bar (\code{|}).
The instrumental variables that should be built are specified as (multiple) functions, one for each
instrument. This function is \code{IIV} and uses the following arguments:
\describe{
\item{\code{iiv}}{Which internal instrument to build. One of \code{g, gp, gy, yp, p2, y2} can be chosen.}
\item{\code{g}}{Which function \code{g} represents in \code{iiv}.
One of \code{x2, x3, lnx, 1/x} can be chosen.
Only required if the type of internal instrument demands it.}
\item{\code{...}}{
The exogenous regressors to build the internal instrument. If more than one is given,
separate instruments are built for each. Only required if the type of internal instrument demands it.}
}
Note that no argument to \code{IIV} is to be supplied as character but as symbols without quotation marks.
Optionally, additional external instrumental variables to also include in the instrumental variable
regression can be specified. These external instruments have to be already present in the data
and are provided as the fourth right-hand side part of the formula, again separated by a vertical bar.
See the example section for illustrations on how to specify the \code{formula} parameter.
}
}
\examples{
data("dataHigherMoments")
# P is the endogenous regressor in all examples
# 2 IVs with g*p, g=x^2, separately for each regressor X1 and X2.
hm <- higherMomentsIV(y~X1+X2+P|P|IIV(iiv=gp, g=x2, X1, X2),
data = dataHigherMoments)
# same as above
hm <- higherMomentsIV(y~X1+X2+P|P|IIV(iiv=gp, g=x2, X1) +
IIV(iiv=gp, g=x2, X2),
data = dataHigherMoments)
# 3 different IVs
hm <- higherMomentsIV(y~X1+X2+P|P|IIV(iiv=y2) + IIV(iiv=yp) +
IIV(iiv=g,g=x3,X1),
data = dataHigherMoments)
# use X2 as external IV
hm <- higherMomentsIV(y~X1+P|P|IIV(iiv=y2)+IIV(iiv=g,g=lnx,X1)| X2,
data = dataHigherMoments)
summary(hm)
}
\references{
Lewbel A (1997). “Constructing Instruments for Regressions with Measurement Error When No Additional Data are Available, With an Application to Patents and R&D.” Econometrica, 65(5), 1201–1213.
}
\seealso{
\code{\link[AER]{ivreg}}
}
|
### Data Science Capstone : Course Project
### ui.R file for the Shiny app
### Github repo : https://github.com/kapilkaushik2/capstone
suppressWarnings(library(shiny))
suppressWarnings(library(markdown))
shinyUI(navbarPage("Coursera Data Science Capstone: Course Project",
tabPanel("Predict the Next Word",
HTML("<strong>Author: Kapil Kumar Kaushik</strong>"),
br(),
HTML("<strong>Date: 21 February 2017</strong>"),
br(),
img(src = "./headers.png"),
# Sidebar
sidebarLayout(
sidebarPanel(
helpText("Enter a partially complete sentence to begin the next word prediction"),
textInput("inputString", "Enter a partial sentence here",value = ""),
br(),
br(),
br(),
br()
),
mainPanel(
h2("Predicted Next Word"),
verbatimTextOutput("prediction"),
strong("Sentence Input:"),
tags$style(type='text/css', '#text1 {background-color: rgba(255,255,0,0.40); color: blue;}'),
textOutput('text1'),
br(),
strong("Note:"),
tags$style(type='text/css', '#text2 {background-color: rgba(255,255,0,0.40); color: black;}'),
textOutput('text2')
)
)
),
tabPanel("About",
mainPanel(
img(src = "./headers.png"),
includeMarkdown("about.md")
)
)
)
)
|
/Capstone/ui.R
|
no_license
|
kapilkaushik2/capstone
|
R
| false
| false
| 2,190
|
r
|
### Data Science Capstone : Course Project
### ui.R file for the Shiny app
### Github repo : https://github.com/kapilkaushik2/capstone
suppressWarnings(library(shiny))
suppressWarnings(library(markdown))
shinyUI(navbarPage("Coursera Data Science Capstone: Course Project",
tabPanel("Predict the Next Word",
HTML("<strong>Author: Kapil Kumar Kaushik</strong>"),
br(),
HTML("<strong>Date: 21 February 2017</strong>"),
br(),
img(src = "./headers.png"),
# Sidebar
sidebarLayout(
sidebarPanel(
helpText("Enter a partially complete sentence to begin the next word prediction"),
textInput("inputString", "Enter a partial sentence here",value = ""),
br(),
br(),
br(),
br()
),
mainPanel(
h2("Predicted Next Word"),
verbatimTextOutput("prediction"),
strong("Sentence Input:"),
tags$style(type='text/css', '#text1 {background-color: rgba(255,255,0,0.40); color: blue;}'),
textOutput('text1'),
br(),
strong("Note:"),
tags$style(type='text/css', '#text2 {background-color: rgba(255,255,0,0.40); color: black;}'),
textOutput('text2')
)
)
),
tabPanel("About",
mainPanel(
img(src = "./headers.png"),
includeMarkdown("about.md")
)
)
)
)
|
## example of gfile
## we use a stack widget with the first card to upload the file, the second
## to show some simple summary.
w <- gwindow("gfile example")
sb <- gstatusbar("Powered by gWidgetsWWW2.rapache and rapache", cont=w)
sw <- gstackwidget(cont=w)
## page 1
page1 <- gvbox(cont=sw)
ghtml("Upload a csv file to do something", cont=page1)
## gfile has handler to process file. The filename is done returned through [;
## the path to the uploaded file is through svalue.
f <- gfile(text="Choose a csv file...", cont=page1, handler=function(h,...) {
nm <- h$obj[];
path <- svalue(h$obj)
update_page2(nm, path)
})
page2 <- gvbox(cont=sw)
glabel("<h3>Simple summary of uploaded file</h3>", cont=page2)
nm <- glabel("file name", cont=page2)
var_names <- glabel("", cont=page2)
update_page2 <- function(name, path) {
svalue(nm) <- sprintf("Name of file is %s", name)
x <- read.csv(path)
nms <- paste(names(x), collapse="; ")
svalue(var_names) <- sprintf("Variable names are: %s", nms)
svalue(sw) <- 2
}
## set to first page
svalue(sw) <- 1
|
/inst/examples/ex-gfile.R
|
no_license
|
jverzani/gWidgetsWWW2.rapache
|
R
| false
| false
| 1,067
|
r
|
## example of gfile
## we use a stack widget with the first card to upload the file, the second
## to show some simple summary.
w <- gwindow("gfile example")
sb <- gstatusbar("Powered by gWidgetsWWW2.rapache and rapache", cont=w)
sw <- gstackwidget(cont=w)
## page 1
page1 <- gvbox(cont=sw)
ghtml("Upload a csv file to do something", cont=page1)
## gfile has handler to process file. The filename is done returned through [;
## the path to the uploaded file is through svalue.
f <- gfile(text="Choose a csv file...", cont=page1, handler=function(h,...) {
nm <- h$obj[];
path <- svalue(h$obj)
update_page2(nm, path)
})
page2 <- gvbox(cont=sw)
glabel("<h3>Simple summary of uploaded file</h3>", cont=page2)
nm <- glabel("file name", cont=page2)
var_names <- glabel("", cont=page2)
update_page2 <- function(name, path) {
svalue(nm) <- sprintf("Name of file is %s", name)
x <- read.csv(path)
nms <- paste(names(x), collapse="; ")
svalue(var_names) <- sprintf("Variable names are: %s", nms)
svalue(sw) <- 2
}
## set to first page
svalue(sw) <- 1
|
library(openxlsx)
library(reshape)
library(plyr)
#library(dplyr)
########################## Set Admin variables ##########################
#What all to do right now
Make=1
Check=0
Save=1
FinalCB <- data.frame()
#List of counterbalances with lists assigned to conditions in the order given in CondNames
ListRot <- list("CondNames"=list("TR"=list("TR_Old", "TR_Similar", "TR_New"),
"TI"=list("TI_Old", "TI_Similar", "TI_New")),
"CB1"=list("TR"=c(1, 2, 3),
"TI"=c(4, 5, 6)),
"CB2"=list("TR"=c(2, 3, 4),
"TI"=c(5, 6, 1)),
"CB3"=list("TR"=c(3, 4, 5),
"TI"=c(6, 1, 2)),
"CB4"=list("TR"=c(4, 5, 6),
"TI"=c(1, 2, 3)),
"CB5"=list("TR"=c(5, 6, 1),
"TI"=c(2, 3, 4)),
"CB6"=list("TR"=c(6, 1, 2),
"TI"=c(3, 4, 5)))
BasePath <- "/Users/mrinmayi/GoogleDrive/Mrinmayi/Research/TemporalExpectation/Experiment/"
################################ Sanity check functions ################################
#Make sure that conditions and categories are evenly spread out across sets. To make sure that not all
#4 objects in a set are eventually in the Similar condition, or that not all of the objects are Tools
#etc.
CountRows <- function(df, Col){
count(df[, Col])
}
CheckCB <- function(df=NULL, Build=1) {
if(Build==1){ #This is initially building the CB
Check <- list()
Checked <- 0
#Condition per ISI is also controlled in the excel sheet
Check$CatPerISI <- ddply(df, c("ISIType"), CountRows, "Category")
Check$CatPerSet <- ddply(df, c("Set"), CountRows, "Category")
Check$CondPerSet <- ddply(df, c("Set"), CountRows, "Condition")
Check$OldPerThirds <- ddply(df[df$Condition=="Old",], c("Thirds"), CountRows, "Condition")
Check$SimPerThirds <- ddply(df[df$ListType=="Similar",], c("Thirds"), CountRows, "Condition")
Checked <- ifelse(any(any(Check$CatPerISI$freq>5),
any(Check$CatPerSet$freq>=4),
any(Check$CondPerSet$freq>=4),
any(Check$CondPerThirds$freq>=8),
any(Check$OldPerThirds$freq>=18),
any(Check$SimPerThirds$freq>=10)), 1, 0)
return(list(Checked, Check))
}else if(Build==2){ #Only need to make sure here that the ISI are equally split across conditions in the first ISIs
#Everything else is taken care of above
CheckTIISI <- list()
CheckedTIISI <- 0
df <- df[df$NumPres==1,]
CheckTIISI$CondPerISI <- ddply(df, c("ISI"), count, "Condition")
CheckedTIISI <- ifelse(any(any(CheckTIISI$CondPerISI$freq>=13)), 1, 0)
return(list(CheckedTIISI, CheckTIISI))
}
}
CheckCB() #initialise
#Make sure that after the copying and sorting the trials, the same 4 objects are presented one after another
CheckRepetitions <- function(df){
if(all(df[1:4, "Items"] == df[5:8, "Items"])){
Good <- 1
}
else if(!(all(df[1:4, "Items"] == df[5:8, "Items"]))){
stop("Order of items in consecutive presentations is not the same. Investigate!!!!!")
}
return(Good)
}
#Randomise ISIs in the TI block either similar to Thavabalasingham et al., 2017 or like Debbie's suggestion
RandomiseTI <- function(df, TIType){
#Some jugaad required here. We want to make sure the subsequent presentations of the TI objects are paired with
#opposite ISIs. Since we're now changing around the order of the ISIs (See ISICombo variable), we no longer know
#which are the shortest and longest ISIs
ShortISI <- order(ISICombo)[1:2]
LongISI <- order(ISICombo)[3:4]
#First randominse the set of trials
df[1:4, "ISIType"] <- sample(4)
#df[5:8, "ISIType"] <- sample(4)
##### Use this if you want to randomise the TI block exactly like in Thavabalasingham et al., 2017 like
if(TIType=="Rand"){
MeanISI <- ISICombo #jittered around whatever is the ISIcombo for this participant
SDISI <- c(TIJitters[[as.character(MeanISI[1])]],
TIJitters[[as.character(MeanISI[2])]],
TIJitters[[as.character(MeanISI[3])]],
TIJitters[[as.character(MeanISI[4])]])
RandISI <- c(0, 0, 0, 0)
#Set ISIs. Just make sure a negative ISI isn't returned
while(any(RandISI<=17)){
for(i in 1:4){
RandISI[i] <- round(rnorm(1, mean=MeanISI[df[i, "ISIType"]], sd=SDISI[df[i, "ISIType"]]), digits=0)
}
}
df[1:4, "ISI"] <- RandISI
df[5:8, "ISI"] <- RandISI[sample(4)]
}
##### For the time being, using the Debbie version where the trials in a set that were short ISIs in
##### the first presentation, should become the long ISIs in the second presentation
else if(TIType=="Shuffle"){
df[which(row.names(df)==5:8 & df[1:4, "ISIType"] %in% ShortISI), "ISIType"] <- LongISI[sample(1:2)]
df[which(row.names(df)==5:8 & df[1:4, "ISIType"] %in% LongISI), "ISIType"] <- ShortISI[sample(1:2)]
df[df$ISIType==1, "ISI"] <- ISICombo[1]
df[df$ISIType==2, "ISI"] <- ISICombo[2]
df[df$ISIType==3, "ISI"] <- ISICombo[3]
df[df$ISIType==4, "ISI"] <- ISICombo[4]
}
return(df)
}
################################ Read in files ################################
#Read in the master sheet that has all of the original details for how each participant should be organised
MasterList <- read.xlsx(paste(BasePath, "Experiment1/Counterbalancing/Counterbalancing_MasterSheet.xlsx", sep=""), sheet="ListAssignment",
cols=1:3, colNames=TRUE)
ColOrd <- c("Orig.Order", "Order_Assign.Conds", "Order_by.Run", "Condition", "List.ID", "SceneType", "Scene.ID", "RAND.Scene",
"Object", "ENC.Run", "AssociatePosition", "TestRun")
ISIRotation <- read.xlsx(paste(BasePath, "Experiment1/Counterbalancing/Counterbalancing_MasterSheet.xlsx", sep=""), sheet="RotateISIAcrossParts",
cols=1:8, rows=1:289, colNames=TRUE)
ISIComboDict <- read.xlsx(paste(BasePath, "Experiment1/Counterbalancing/Counterbalancing_MasterSheet.xlsx", sep=""), sheet="ISIRotation",
cols=1:5, rows=28:52, colNames=TRUE)
#Change this to 1, 2, 3 and so on and so forth for different participants
Part=8
#This will alternate between a and b to yoke participants. So there will be
#a 1a, 1b, 2a, 2b and so on
Ver="a"
#Figure out how ITIs will be randomised based on which experiment we're on
Experiment=4
if(Experiment==4){
TIMethod = "Rand"
TIJitters <- list("100" = 40,
"500" = 80,
"1000" = 80,
"2000" = 80)
#ISI Combination: Got from CounterbalancingMasterSheet (Sheet: ISIRotation). This is to make sure that not all participants
#in the regular condition have the same ISI combination
print("***********DID YOU CHANGE THE ISI COMBO?!?!?!?!***********")
#ISICombo <- c(100, 1000, 2000, 500)
ISICombo <- unlist(list(ISIComboDict[ISIComboDict$Participant==Part, c("1stDelay", "2ndDelay", "3rdDelay", "4thDelay")]))
} else if(Experiment %in% 1:3) {
TIMethod = "Shuffle"
#ISI Combination: Got from CounterbalancingMasterSheet (Sheet: ISIRotation). This is to make sure that not all participants
#in the regular condition have the same ISI combination
print("***********DID YOU CHANGE THE ISI COMBO?!?!?!?!***********")
ISICombo <- c(500, 2500, 50, 1000)
}
#Just get the conditions for each set for that particular participant
CB=unique(ISIRotation[which(ISIRotation$Participant==Part), "CB"])
if(length(CB)>1){
stop("Something is wrong in the ISIRotation CB number. INVESTIGATE!!!!")
}
#The participants will be yoked such that the objects that were tested on the High Interefernce condition for 1a
#will be tested on the Low interference condition for 1b
SimType <- list("a" = c("Similar_HI", "Similar_LI"),
"b" = c("Similar_LI", "Similar_HI"))
#From ISIRotation, get the order of conditions for each set
UseRot <- ISIRotation[which(ISIRotation$CB==CB & ISIRotation$Participant==Part), names(ISIRotation) %in% c("ISI_1", "ISI_2", "ISI_3", "ISI_4")]
UseRot <- UseRot[sample(nrow(UseRot)),]
#The conditions will be assigned in this order
VecRot <- as.vector(t(UseRot))
#Decide whether the TI condition will come first or TR
UseCondOrd <- ISIRotation[which(ISIRotation$CB==CB & ISIRotation$Participant==Part), "CondOrd"][1:2]
#Sanity Check
if(length(VecRot) > 96){
stop("VecRot has more than 96 trials. Investigate!!!!")
}
FinalList <- data.frame(matrix(NA, nrow=96, ncol=5))
names(FinalList) <- c(names(MasterList), "ListType", "Condition")
################################ Build the actual counterbalancing ################################
#Make blank workbook
#OutSheet <- createWorkbook()
Count<-1
FinalCB_Encode <- c()
FinalCB_Test <- c()
#Make encoding blocks first
for(Cond in UseCondOrd){
#Subset MasterList to only include the lists that you need for this condition, for encoding
#(list numbers got from the ListRotation list)
UseList <- MasterList[MasterList$ListAssignment %in% ListRot[[paste("CB", CB, sep="")]][[Cond]][1:2], ]
#Sanity check
if(nrow(UseList) > 96){
stop("UseList has more than 96 trials. Investigate!!!!")
}
#Assign conditions based on the ListRotation list. From ListRot, get me the current CB number that I'm working with
#For the current condition. List rotation is setup such that the first number is always for the Old condition followed
#by similar and then new
UseList[UseList$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][1], "ListType"] <- "Old"
UseList[UseList$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][2], "ListType"] <- "Similar"
UseList[UseList$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][1], "Condition"] <- "Old"
#Make high and low interference assignments based on whether we're doing version a or b of a counterbalance
UseList[UseList$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][2], "Condition"] <- rep_len(SimType[[Ver]], 48)
DoAgain <- 1
Count<-1
#Keep doing the sampling until you all conditions are satisfied
while(DoAgain>0){
Count <- Count+1
#From ISIRotation, get the order of conditions for each set
UseRot <- ISIRotation[which(ISIRotation$CB==CB & ISIRotation$Participant==Part), names(ISIRotation) %in% c("ISI_1", "ISI_2", "ISI_3", "ISI_4")]
UseRot <- UseRot[sample(nrow(UseRot)),]
#The conditions will be assigned in this order
VecRot <- as.vector(t(UseRot))
#Choose the indices of the old (or similar) trials from the vector made from the appropriate sets.
#To those rows, assign a shuffled list of objects from UseList that are supposed to be old (or similar)
FinalList[VecRot=="Old", ] <- UseList[sample(1:48, 48), ]
FinalList[VecRot=="Similar", ] <- UseList[sample(49:96, 48), ]
#Add the necessary columns
FinalList$ISIType <- rep_len(1:4, 96)
FinalList$Set <- sort(rep_len(1:(96/4), 96))
#This column will be helpful to look at if performance improves through the thirds of the encoding block,
#as participants learn the rhythm of presentation.
#It's easier to add this column through the counterbalance, rather than add it to the data, and then make errors
#because of how the data is ordered
FinalList$Thirds <- sort(rep_len(1:3, nrow(FinalList)))
CheckList <- CheckCB(FinalList, 1)[[2]]
DoAgain <- CheckCB(FinalList, 1)[[1]]
}
#Repeat the same thing 2 times so that each set is presented twice
FinalListRpt <- rbind(FinalList, FinalList)
FinalEncode <- FinalListRpt[order(FinalListRpt$Set), ]
FinalEncode$NumPres <- rep_len(c(1, 1, 1, 1, 2, 2, 2, 2), nrow(FinalEncode))
ddply(FinalEncode, c("Set"), CheckRepetitions)
#rm(FinalList)
#FinalList <- FinalListRptSort
#FinalEncode$EncodeOrTest <- 1 #1 = encoding, 2 = Test-- Don't need this anymore because of the way the experiment is setup
#Yay for eliminating IF statements!!
FinalEncode$Block <- Cond
#FinalEncode$FirstLastTrial <- 0-- Don't need this anymore because of the way the experiment is setup
#FinalEncode[1, "FirstLastTrial"] <- 1
#FinalEncode[nrow(FinalEncode), "FirstLastTrial"] <- 2
#Set regular ISIs depending on whether you're in encoding for TR or TI
if(Cond=="TR"){
FinalEncode[FinalEncode$ISIType==1, "ISI"] <- ISICombo[1]
FinalEncode[FinalEncode$ISIType==2, "ISI"] <- ISICombo[2]
FinalEncode[FinalEncode$ISIType==3, "ISI"] <- ISICombo[3]
FinalEncode[FinalEncode$ISIType==4, "ISI"] <- ISICombo[4]
}
else if (Cond=="TI"){
DoAgainTI <- 1
while(DoAgainTI>0){
FinalEncode <- ddply(FinalEncode, c("Set"), RandomiseTI, TIMethod)
CheckListTI <- CheckCB(FinalEncode, 2)[[2]]
DoAgainTI <- CheckCB(FinalEncode, 2)[[1]]
}
}
FinalEncode[, "Picture"] <- paste(FinalEncode[, "Items"], "_1", sep="")
#Prepare some stuff for the corresponding testing session
Similar_HI <- FinalEncode[FinalEncode$Condition=="Similar_HI", "Items"]
Similar_LI <- FinalEncode[FinalEncode$Condition=="Similar_LI", "Items"]
#Because the Test should be setup such that the items encoded in the first quarter of the experiment are
#tested first, get a list of objects split up by quarters
#split divides the data in the vector x into the groups defined by f
#unique is the function rapply is applying because each object is repreated twice in Final encode
QuartItems <- rapply(split(FinalEncode[, "Items"], ceiling(seq_along(1:96)/24)), unique, how="list")
################################ Done with Encoding ################################
################################ Now do test ################################
#Just get old and new from the list for now so that they can be randomised based on quarters from
#encoding
UseList <- MasterList[MasterList$ListAssignment %in% ListRot[[paste("CB", CB, sep="")]][[Cond]][1:2], ]
#Build Uselist from the quarters made above
UseList <- rbind(UseList[sample(which(UseList$Items %in% QuartItems$`1`)),],
UseList[sample(which(UseList$Items %in% QuartItems$`2`)),],
UseList[sample(which(UseList$Items %in% QuartItems$`3`)),],
UseList[sample(which(UseList$Items %in% QuartItems$`4`)),])
#Now add the new to this mess
NewTest <- MasterList[MasterList$ListAssignment %in% ListRot[[paste("CB", CB, sep="")]][[Cond]][3], ]
#Get positions of new objects
NewPos <- sample(1:nrow(UseList), nrow(NewTest))
NewTest_RowName <- as.integer(rownames(NewTest))
UseList_RowName <- as.integer(rownames(UseList))
for(Pos in 1:length(NewPos)){
UseList_RowName <- append(UseList_RowName, NewTest_RowName[Pos], after=NewPos[Pos])
}
FinalTest <- rbind(UseList, NewTest)
FinalTest <- FinalTest[as.character(UseList_RowName),]
#Assign conditions based on the ListRotation list
FinalTest[FinalTest$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][1], "ListType"] <- "Old"
FinalTest[FinalTest$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][2], "ListType"] <- "Similar"
FinalTest[FinalTest$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][3], "ListType"] <- "New"
FinalTest[FinalTest$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][1], "Condition"] <- "Old"
#Assign HI and LI based on items that were assigned HI and LI in study
FinalTest[FinalTest$Items %in% Similar_HI, "Condition"] <- "Similar_HI"
FinalTest[FinalTest$Items %in% Similar_LI, "Condition"] <- "Similar_LI"
#Rename New
FinalTest[FinalTest$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][3], "Condition"] <- "New"
#FinalTest$EncodeOrTest <- 2
FinalTest$Block <- Cond
#FinalTest$FirstLastTrial <- 0
#FinalTest[1, "FirstLastTrial"] <- 1
#FinalTest[nrow(FinalTest), "FirstLastTrial"] <- 2
#FinalTest <- merge(FinalTest, FinalEncode[,c("Category", "Items", "ISIType", "Set", "ISI")],
# by=c("Category", "Items"), all.x=TRUE)
#FinalTest[FinalTest$Condition=="New", c("ISIType", "Set", "ISI")] <- 0
#FinalTest[, c("ISIType", "Set", "ISI")] <- 0
FinalTest[FinalTest$Condition=="Old", "Picture"] <- paste(FinalTest[FinalTest$Condition=="Old", "Items"], "_1", sep="")
FinalTest[FinalTest$Condition=="New", "Picture"] <- paste(FinalTest[FinalTest$Condition=="New", "Items"], "_1", sep="")
FinalTest[FinalTest$Condition=="Similar_HI", "Picture"] <- paste(FinalTest[FinalTest$Condition=="Similar_HI", "Items"], "_2", sep="")
FinalTest[FinalTest$Condition=="Similar_LI", "Picture"] <- paste(FinalTest[FinalTest$Condition=="Similar_LI", "Items"], "_3", sep="")
FinalTest$Trial <- 1:nrow(FinalTest)
FinalCB_Encode <- rbind(FinalCB_Encode, FinalEncode)
FinalCB_Test <- rbind(FinalCB_Test, FinalTest)
#assign(paste(Cond, "_Encode", sep=""), FinalEncode)
}
#Need to do this explicitly because for some reason openxlsx isn't doing it
#Add some sheets to the workbook
#addWorksheet(OutSheet, paste(Cond, "_Encode", sep=""))
#Write the data to the sheets
#writeData(OutSheet, sheet = paste(Cond, "_Encode", sep=""), x=FinalList)
# Export the file
#saveWorkbook(OutSheet, paste(BasePath, "CB", CB, "_", Ver, ".xlsx", sep=""))
FinalCB_Encode$Trial <- c(1:192, 1:192)
if(Save==1){
write.csv(FinalCB_Encode, file = paste(BasePath, "CB_Encode_", Part, "a.csv", sep=""), row.names=FALSE)
write.csv(FinalCB_Test, file = paste(BasePath, "CB_Test_", Part, "a.csv", sep=""), row.names=FALSE)
}
#Make the CB for the yoked participant such that the object in the Similar_HI condition is now in the Similar_LI condition, but everything else
#(e.g. order of stim presentation is kept same)
FinalCB_Encode_Yoked <- FinalCB_Encode
FinalCB_Encode_Yoked$Condition <- factor(FinalCB_Encode_Yoked$Condition,
levels=c("Old", "Similar_HI", "Similar_LI"),
labels=c("Old", "Similar_LI", "Similar_HI"))
FinalCB_Test_Yoked <- FinalCB_Test
FinalCB_Test_Yoked$Condition <- factor(FinalCB_Test_Yoked$Condition,
levels=c("Old", "Similar_HI", "Similar_LI", "New"),
labels=c("Old", "Similar_LI", "Similar_HI", "New"))
FinalCB_Test_Yoked[FinalCB_Test_Yoked$Condition=="Old", "Picture"] <- paste(FinalCB_Test_Yoked[FinalCB_Test_Yoked$Condition=="Old", "Items"], "_1", sep="")
FinalCB_Test_Yoked[FinalCB_Test_Yoked$Condition=="New", "Picture"] <- paste(FinalCB_Test_Yoked[FinalCB_Test_Yoked$Condition=="New", "Items"], "_1", sep="")
FinalCB_Test_Yoked[FinalCB_Test_Yoked$Condition=="Similar_HI", "Picture"] <- paste(FinalCB_Test_Yoked[FinalCB_Test_Yoked$Condition=="Similar_HI", "Items"], "_2", sep="")
FinalCB_Test_Yoked[FinalCB_Test_Yoked$Condition=="Similar_LI", "Picture"] <- paste(FinalCB_Test_Yoked[FinalCB_Test_Yoked$Condition=="Similar_LI", "Items"], "_3", sep="")
if(Save==1){
write.csv(FinalCB_Encode_Yoked, file = paste(BasePath, "CB_Encode_", Part, "b.csv", sep=""), row.names=FALSE)
write.csv(FinalCB_Test_Yoked, file = paste(BasePath, "CB_Test_", Part, "b.csv", sep=""), row.names=FALSE)
}
#ptm <- proc.time()
#Blah <-sort(sample(540, 90))#, prob=rep_len(1, 540)))
#while(any(diff(Blah)==1)){
# Blah <-sort(sample(540, 90))#, prob=rep_len(1, 540)))
# #print(Blah)
#}
#proc.time() - ptm
|
/MakeCounterbalance/TE_MakeCounterbalance.R
|
no_license
|
mrinmayik/TemporalExpectation
|
R
| false
| false
| 19,478
|
r
|
library(openxlsx)
library(reshape)
library(plyr)
#library(dplyr)
########################## Set Admin variables ##########################
#What all to do right now
Make=1
Check=0
Save=1
FinalCB <- data.frame()
#List of counterbalances with lists assigned to conditions in the order given in CondNames
ListRot <- list("CondNames"=list("TR"=list("TR_Old", "TR_Similar", "TR_New"),
"TI"=list("TI_Old", "TI_Similar", "TI_New")),
"CB1"=list("TR"=c(1, 2, 3),
"TI"=c(4, 5, 6)),
"CB2"=list("TR"=c(2, 3, 4),
"TI"=c(5, 6, 1)),
"CB3"=list("TR"=c(3, 4, 5),
"TI"=c(6, 1, 2)),
"CB4"=list("TR"=c(4, 5, 6),
"TI"=c(1, 2, 3)),
"CB5"=list("TR"=c(5, 6, 1),
"TI"=c(2, 3, 4)),
"CB6"=list("TR"=c(6, 1, 2),
"TI"=c(3, 4, 5)))
BasePath <- "/Users/mrinmayi/GoogleDrive/Mrinmayi/Research/TemporalExpectation/Experiment/"
################################ Sanity check functions ################################
#Make sure that conditions and categories are evenly spread out across sets. To make sure that not all
#4 objects in a set are eventually in the Similar condition, or that not all of the objects are Tools
#etc.
CountRows <- function(df, Col){
count(df[, Col])
}
CheckCB <- function(df=NULL, Build=1) {
if(Build==1){ #This is initially building the CB
Check <- list()
Checked <- 0
#Condition per ISI is also controlled in the excel sheet
Check$CatPerISI <- ddply(df, c("ISIType"), CountRows, "Category")
Check$CatPerSet <- ddply(df, c("Set"), CountRows, "Category")
Check$CondPerSet <- ddply(df, c("Set"), CountRows, "Condition")
Check$OldPerThirds <- ddply(df[df$Condition=="Old",], c("Thirds"), CountRows, "Condition")
Check$SimPerThirds <- ddply(df[df$ListType=="Similar",], c("Thirds"), CountRows, "Condition")
Checked <- ifelse(any(any(Check$CatPerISI$freq>5),
any(Check$CatPerSet$freq>=4),
any(Check$CondPerSet$freq>=4),
any(Check$CondPerThirds$freq>=8),
any(Check$OldPerThirds$freq>=18),
any(Check$SimPerThirds$freq>=10)), 1, 0)
return(list(Checked, Check))
}else if(Build==2){ #Only need to make sure here that the ISI are equally split across conditions in the first ISIs
#Everything else is taken care of above
CheckTIISI <- list()
CheckedTIISI <- 0
df <- df[df$NumPres==1,]
CheckTIISI$CondPerISI <- ddply(df, c("ISI"), count, "Condition")
CheckedTIISI <- ifelse(any(any(CheckTIISI$CondPerISI$freq>=13)), 1, 0)
return(list(CheckedTIISI, CheckTIISI))
}
}
CheckCB() #initialise
#Make sure that after the copying and sorting the trials, the same 4 objects are presented one after another
CheckRepetitions <- function(df){
if(all(df[1:4, "Items"] == df[5:8, "Items"])){
Good <- 1
}
else if(!(all(df[1:4, "Items"] == df[5:8, "Items"]))){
stop("Order of items in consecutive presentations is not the same. Investigate!!!!!")
}
return(Good)
}
#Randomise ISIs in the TI block either similar to Thavabalasingham et al., 2017 or like Debbie's suggestion
RandomiseTI <- function(df, TIType){
#Some jugaad required here. We want to make sure the subsequent presentations of the TI objects are paired with
#opposite ISIs. Since we're now changing around the order of the ISIs (See ISICombo variable), we no longer know
#which are the shortest and longest ISIs
ShortISI <- order(ISICombo)[1:2]
LongISI <- order(ISICombo)[3:4]
#First randominse the set of trials
df[1:4, "ISIType"] <- sample(4)
#df[5:8, "ISIType"] <- sample(4)
##### Use this if you want to randomise the TI block exactly like in Thavabalasingham et al., 2017 like
if(TIType=="Rand"){
MeanISI <- ISICombo #jittered around whatever is the ISIcombo for this participant
SDISI <- c(TIJitters[[as.character(MeanISI[1])]],
TIJitters[[as.character(MeanISI[2])]],
TIJitters[[as.character(MeanISI[3])]],
TIJitters[[as.character(MeanISI[4])]])
RandISI <- c(0, 0, 0, 0)
#Set ISIs. Just make sure a negative ISI isn't returned
while(any(RandISI<=17)){
for(i in 1:4){
RandISI[i] <- round(rnorm(1, mean=MeanISI[df[i, "ISIType"]], sd=SDISI[df[i, "ISIType"]]), digits=0)
}
}
df[1:4, "ISI"] <- RandISI
df[5:8, "ISI"] <- RandISI[sample(4)]
}
##### For the time being, using the Debbie version where the trials in a set that were short ISIs in
##### the first presentation, should become the long ISIs in the second presentation
else if(TIType=="Shuffle"){
df[which(row.names(df)==5:8 & df[1:4, "ISIType"] %in% ShortISI), "ISIType"] <- LongISI[sample(1:2)]
df[which(row.names(df)==5:8 & df[1:4, "ISIType"] %in% LongISI), "ISIType"] <- ShortISI[sample(1:2)]
df[df$ISIType==1, "ISI"] <- ISICombo[1]
df[df$ISIType==2, "ISI"] <- ISICombo[2]
df[df$ISIType==3, "ISI"] <- ISICombo[3]
df[df$ISIType==4, "ISI"] <- ISICombo[4]
}
return(df)
}
################################ Read in files ################################
#Read in the master sheet that has all of the original details for how each participant should be organised
MasterList <- read.xlsx(paste(BasePath, "Experiment1/Counterbalancing/Counterbalancing_MasterSheet.xlsx", sep=""), sheet="ListAssignment",
cols=1:3, colNames=TRUE)
ColOrd <- c("Orig.Order", "Order_Assign.Conds", "Order_by.Run", "Condition", "List.ID", "SceneType", "Scene.ID", "RAND.Scene",
"Object", "ENC.Run", "AssociatePosition", "TestRun")
ISIRotation <- read.xlsx(paste(BasePath, "Experiment1/Counterbalancing/Counterbalancing_MasterSheet.xlsx", sep=""), sheet="RotateISIAcrossParts",
cols=1:8, rows=1:289, colNames=TRUE)
ISIComboDict <- read.xlsx(paste(BasePath, "Experiment1/Counterbalancing/Counterbalancing_MasterSheet.xlsx", sep=""), sheet="ISIRotation",
cols=1:5, rows=28:52, colNames=TRUE)
#Change this to 1, 2, 3 and so on and so forth for different participants
Part=8
#This will alternate between a and b to yoke participants. So there will be
#a 1a, 1b, 2a, 2b and so on
Ver="a"
#Figure out how ITIs will be randomised based on which experiment we're on
Experiment=4
if(Experiment==4){
TIMethod = "Rand"
TIJitters <- list("100" = 40,
"500" = 80,
"1000" = 80,
"2000" = 80)
#ISI Combination: Got from CounterbalancingMasterSheet (Sheet: ISIRotation). This is to make sure that not all participants
#in the regular condition have the same ISI combination
print("***********DID YOU CHANGE THE ISI COMBO?!?!?!?!***********")
#ISICombo <- c(100, 1000, 2000, 500)
ISICombo <- unlist(list(ISIComboDict[ISIComboDict$Participant==Part, c("1stDelay", "2ndDelay", "3rdDelay", "4thDelay")]))
} else if(Experiment %in% 1:3) {
TIMethod = "Shuffle"
#ISI Combination: Got from CounterbalancingMasterSheet (Sheet: ISIRotation). This is to make sure that not all participants
#in the regular condition have the same ISI combination
print("***********DID YOU CHANGE THE ISI COMBO?!?!?!?!***********")
ISICombo <- c(500, 2500, 50, 1000)
}
#Just get the conditions for each set for that particular participant
CB=unique(ISIRotation[which(ISIRotation$Participant==Part), "CB"])
if(length(CB)>1){
stop("Something is wrong in the ISIRotation CB number. INVESTIGATE!!!!")
}
#The participants will be yoked such that the objects that were tested on the High Interefernce condition for 1a
#will be tested on the Low interference condition for 1b
SimType <- list("a" = c("Similar_HI", "Similar_LI"),
"b" = c("Similar_LI", "Similar_HI"))
#From ISIRotation, get the order of conditions for each set
UseRot <- ISIRotation[which(ISIRotation$CB==CB & ISIRotation$Participant==Part), names(ISIRotation) %in% c("ISI_1", "ISI_2", "ISI_3", "ISI_4")]
UseRot <- UseRot[sample(nrow(UseRot)),]
#The conditions will be assigned in this order
VecRot <- as.vector(t(UseRot))
#Decide whether the TI condition will come first or TR
UseCondOrd <- ISIRotation[which(ISIRotation$CB==CB & ISIRotation$Participant==Part), "CondOrd"][1:2]
#Sanity Check
if(length(VecRot) > 96){
stop("VecRot has more than 96 trials. Investigate!!!!")
}
FinalList <- data.frame(matrix(NA, nrow=96, ncol=5))
names(FinalList) <- c(names(MasterList), "ListType", "Condition")
################################ Build the actual counterbalancing ################################
#Make blank workbook
#OutSheet <- createWorkbook()
Count<-1
FinalCB_Encode <- c()
FinalCB_Test <- c()
#Make encoding blocks first
for(Cond in UseCondOrd){
#Subset MasterList to only include the lists that you need for this condition, for encoding
#(list numbers got from the ListRotation list)
UseList <- MasterList[MasterList$ListAssignment %in% ListRot[[paste("CB", CB, sep="")]][[Cond]][1:2], ]
#Sanity check
if(nrow(UseList) > 96){
stop("UseList has more than 96 trials. Investigate!!!!")
}
#Assign conditions based on the ListRotation list. From ListRot, get me the current CB number that I'm working with
#For the current condition. List rotation is setup such that the first number is always for the Old condition followed
#by similar and then new
UseList[UseList$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][1], "ListType"] <- "Old"
UseList[UseList$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][2], "ListType"] <- "Similar"
UseList[UseList$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][1], "Condition"] <- "Old"
#Make high and low interference assignments based on whether we're doing version a or b of a counterbalance
UseList[UseList$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][2], "Condition"] <- rep_len(SimType[[Ver]], 48)
DoAgain <- 1
Count<-1
#Keep doing the sampling until you all conditions are satisfied
while(DoAgain>0){
Count <- Count+1
#From ISIRotation, get the order of conditions for each set
UseRot <- ISIRotation[which(ISIRotation$CB==CB & ISIRotation$Participant==Part), names(ISIRotation) %in% c("ISI_1", "ISI_2", "ISI_3", "ISI_4")]
UseRot <- UseRot[sample(nrow(UseRot)),]
#The conditions will be assigned in this order
VecRot <- as.vector(t(UseRot))
#Choose the indices of the old (or similar) trials from the vector made from the appropriate sets.
#To those rows, assign a shuffled list of objects from UseList that are supposed to be old (or similar)
FinalList[VecRot=="Old", ] <- UseList[sample(1:48, 48), ]
FinalList[VecRot=="Similar", ] <- UseList[sample(49:96, 48), ]
#Add the necessary columns
FinalList$ISIType <- rep_len(1:4, 96)
FinalList$Set <- sort(rep_len(1:(96/4), 96))
#This column will be helpful to look at if performance improves through the thirds of the encoding block,
#as participants learn the rhythm of presentation.
#It's easier to add this column through the counterbalance, rather than add it to the data, and then make errors
#because of how the data is ordered
FinalList$Thirds <- sort(rep_len(1:3, nrow(FinalList)))
CheckList <- CheckCB(FinalList, 1)[[2]]
DoAgain <- CheckCB(FinalList, 1)[[1]]
}
#Repeat the same thing 2 times so that each set is presented twice
FinalListRpt <- rbind(FinalList, FinalList)
FinalEncode <- FinalListRpt[order(FinalListRpt$Set), ]
FinalEncode$NumPres <- rep_len(c(1, 1, 1, 1, 2, 2, 2, 2), nrow(FinalEncode))
ddply(FinalEncode, c("Set"), CheckRepetitions)
#rm(FinalList)
#FinalList <- FinalListRptSort
#FinalEncode$EncodeOrTest <- 1 #1 = encoding, 2 = Test-- Don't need this anymore because of the way the experiment is setup
#Yay for eliminating IF statements!!
FinalEncode$Block <- Cond
#FinalEncode$FirstLastTrial <- 0-- Don't need this anymore because of the way the experiment is setup
#FinalEncode[1, "FirstLastTrial"] <- 1
#FinalEncode[nrow(FinalEncode), "FirstLastTrial"] <- 2
#Set regular ISIs depending on whether you're in encoding for TR or TI
if(Cond=="TR"){
FinalEncode[FinalEncode$ISIType==1, "ISI"] <- ISICombo[1]
FinalEncode[FinalEncode$ISIType==2, "ISI"] <- ISICombo[2]
FinalEncode[FinalEncode$ISIType==3, "ISI"] <- ISICombo[3]
FinalEncode[FinalEncode$ISIType==4, "ISI"] <- ISICombo[4]
}
else if (Cond=="TI"){
DoAgainTI <- 1
while(DoAgainTI>0){
FinalEncode <- ddply(FinalEncode, c("Set"), RandomiseTI, TIMethod)
CheckListTI <- CheckCB(FinalEncode, 2)[[2]]
DoAgainTI <- CheckCB(FinalEncode, 2)[[1]]
}
}
FinalEncode[, "Picture"] <- paste(FinalEncode[, "Items"], "_1", sep="")
#Prepare some stuff for the corresponding testing session
Similar_HI <- FinalEncode[FinalEncode$Condition=="Similar_HI", "Items"]
Similar_LI <- FinalEncode[FinalEncode$Condition=="Similar_LI", "Items"]
#Because the Test should be setup such that the items encoded in the first quarter of the experiment are
#tested first, get a list of objects split up by quarters
#split divides the data in the vector x into the groups defined by f
#unique is the function rapply is applying because each object is repreated twice in Final encode
QuartItems <- rapply(split(FinalEncode[, "Items"], ceiling(seq_along(1:96)/24)), unique, how="list")
################################ Done with Encoding ################################
################################ Now do test ################################
#Just get old and new from the list for now so that they can be randomised based on quarters from
#encoding
UseList <- MasterList[MasterList$ListAssignment %in% ListRot[[paste("CB", CB, sep="")]][[Cond]][1:2], ]
#Build Uselist from the quarters made above
UseList <- rbind(UseList[sample(which(UseList$Items %in% QuartItems$`1`)),],
UseList[sample(which(UseList$Items %in% QuartItems$`2`)),],
UseList[sample(which(UseList$Items %in% QuartItems$`3`)),],
UseList[sample(which(UseList$Items %in% QuartItems$`4`)),])
#Now add the new to this mess
NewTest <- MasterList[MasterList$ListAssignment %in% ListRot[[paste("CB", CB, sep="")]][[Cond]][3], ]
#Get positions of new objects
NewPos <- sample(1:nrow(UseList), nrow(NewTest))
NewTest_RowName <- as.integer(rownames(NewTest))
UseList_RowName <- as.integer(rownames(UseList))
for(Pos in 1:length(NewPos)){
UseList_RowName <- append(UseList_RowName, NewTest_RowName[Pos], after=NewPos[Pos])
}
FinalTest <- rbind(UseList, NewTest)
FinalTest <- FinalTest[as.character(UseList_RowName),]
#Assign conditions based on the ListRotation list
FinalTest[FinalTest$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][1], "ListType"] <- "Old"
FinalTest[FinalTest$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][2], "ListType"] <- "Similar"
FinalTest[FinalTest$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][3], "ListType"] <- "New"
FinalTest[FinalTest$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][1], "Condition"] <- "Old"
#Assign HI and LI based on items that were assigned HI and LI in study
FinalTest[FinalTest$Items %in% Similar_HI, "Condition"] <- "Similar_HI"
FinalTest[FinalTest$Items %in% Similar_LI, "Condition"] <- "Similar_LI"
#Rename New
FinalTest[FinalTest$ListAssignment == ListRot[[paste("CB", CB, sep="")]][[Cond]][3], "Condition"] <- "New"
#FinalTest$EncodeOrTest <- 2
FinalTest$Block <- Cond
#FinalTest$FirstLastTrial <- 0
#FinalTest[1, "FirstLastTrial"] <- 1
#FinalTest[nrow(FinalTest), "FirstLastTrial"] <- 2
#FinalTest <- merge(FinalTest, FinalEncode[,c("Category", "Items", "ISIType", "Set", "ISI")],
# by=c("Category", "Items"), all.x=TRUE)
#FinalTest[FinalTest$Condition=="New", c("ISIType", "Set", "ISI")] <- 0
#FinalTest[, c("ISIType", "Set", "ISI")] <- 0
FinalTest[FinalTest$Condition=="Old", "Picture"] <- paste(FinalTest[FinalTest$Condition=="Old", "Items"], "_1", sep="")
FinalTest[FinalTest$Condition=="New", "Picture"] <- paste(FinalTest[FinalTest$Condition=="New", "Items"], "_1", sep="")
FinalTest[FinalTest$Condition=="Similar_HI", "Picture"] <- paste(FinalTest[FinalTest$Condition=="Similar_HI", "Items"], "_2", sep="")
FinalTest[FinalTest$Condition=="Similar_LI", "Picture"] <- paste(FinalTest[FinalTest$Condition=="Similar_LI", "Items"], "_3", sep="")
FinalTest$Trial <- 1:nrow(FinalTest)
FinalCB_Encode <- rbind(FinalCB_Encode, FinalEncode)
FinalCB_Test <- rbind(FinalCB_Test, FinalTest)
#assign(paste(Cond, "_Encode", sep=""), FinalEncode)
}
#Need to do this explicitly because for some reason openxlsx isn't doing it
#Add some sheets to the workbook
#addWorksheet(OutSheet, paste(Cond, "_Encode", sep=""))
#Write the data to the sheets
#writeData(OutSheet, sheet = paste(Cond, "_Encode", sep=""), x=FinalList)
# Export the file
#saveWorkbook(OutSheet, paste(BasePath, "CB", CB, "_", Ver, ".xlsx", sep=""))
FinalCB_Encode$Trial <- c(1:192, 1:192)
if(Save==1){
write.csv(FinalCB_Encode, file = paste(BasePath, "CB_Encode_", Part, "a.csv", sep=""), row.names=FALSE)
write.csv(FinalCB_Test, file = paste(BasePath, "CB_Test_", Part, "a.csv", sep=""), row.names=FALSE)
}
#Make the CB for the yoked participant such that the object in the Similar_HI condition is now in the Similar_LI condition, but everything else
#(e.g. order of stim presentation is kept same)
FinalCB_Encode_Yoked <- FinalCB_Encode
FinalCB_Encode_Yoked$Condition <- factor(FinalCB_Encode_Yoked$Condition,
levels=c("Old", "Similar_HI", "Similar_LI"),
labels=c("Old", "Similar_LI", "Similar_HI"))
FinalCB_Test_Yoked <- FinalCB_Test
FinalCB_Test_Yoked$Condition <- factor(FinalCB_Test_Yoked$Condition,
levels=c("Old", "Similar_HI", "Similar_LI", "New"),
labels=c("Old", "Similar_LI", "Similar_HI", "New"))
FinalCB_Test_Yoked[FinalCB_Test_Yoked$Condition=="Old", "Picture"] <- paste(FinalCB_Test_Yoked[FinalCB_Test_Yoked$Condition=="Old", "Items"], "_1", sep="")
FinalCB_Test_Yoked[FinalCB_Test_Yoked$Condition=="New", "Picture"] <- paste(FinalCB_Test_Yoked[FinalCB_Test_Yoked$Condition=="New", "Items"], "_1", sep="")
FinalCB_Test_Yoked[FinalCB_Test_Yoked$Condition=="Similar_HI", "Picture"] <- paste(FinalCB_Test_Yoked[FinalCB_Test_Yoked$Condition=="Similar_HI", "Items"], "_2", sep="")
FinalCB_Test_Yoked[FinalCB_Test_Yoked$Condition=="Similar_LI", "Picture"] <- paste(FinalCB_Test_Yoked[FinalCB_Test_Yoked$Condition=="Similar_LI", "Items"], "_3", sep="")
if(Save==1){
write.csv(FinalCB_Encode_Yoked, file = paste(BasePath, "CB_Encode_", Part, "b.csv", sep=""), row.names=FALSE)
write.csv(FinalCB_Test_Yoked, file = paste(BasePath, "CB_Test_", Part, "b.csv", sep=""), row.names=FALSE)
}
#ptm <- proc.time()
#Blah <-sort(sample(540, 90))#, prob=rep_len(1, 540)))
#while(any(diff(Blah)==1)){
# Blah <-sort(sample(540, 90))#, prob=rep_len(1, 540)))
# #print(Blah)
#}
#proc.time() - ptm
|
## SNMM_Start_Job_By_ID(filename_job_specifications,job_ID)
SNMM_Stan_Start_Job_By_ID <- function(filename_job_specifications,job_ID)
{
require("rstan")
job_specs <- read.csv(file=filename_job_specifications,header=TRUE,sep=";")
print(job_specs)
fn_datafile <- toString(job_specs$filename[job_ID])
data_snmm_thinned <- SNMM_Load_Data(fn_datafile,as.numeric(job_specs$data_thinning_factor[job_ID]))
## plot a histogram of the data (just for temporary test purposes:)
hist(data_snmm_thinned,breaks=100,main="Histogram of deflection data",xlab="deflection [nm]",ylab="frequency")
##
nstates <- as.numeric(job_specs$num_states[job_ID])
num_samples <- as.numeric(job_specs$num_total_samples[job_ID])
sample_thinning_factor <- as.numeric(job_specs$sample_thinning_factor[job_ID])
num_chains <- as.numeric(job_specs$num_chains[job_ID])
locations0 <- as.numeric(job_specs[job_ID,7:(7+nstates-1)])
scales0 <- as.numeric(job_specs[job_ID,(7+nstates):(7+2*nstates-1)])
shapes0 <- as.numeric(job_specs[job_ID,(7+2*nstates):(7+3*nstates-1)])
pis0 <- as.numeric(job_specs[job_ID,(7+3*nstates):(7+4*nstates-1)])
alphas <- array(1,dim=c(nstates))
print(locations0)
print(scales0)
print(shapes0)
print(pis0)
## snorm.analysis <- smsn.mix(y=data_snmm_thinned,mu=locations0,sigma2=scales0,shape=shapes0,pii=pis0,g=2,nu=3,get.init=FALSE,group=TRUE,family="Skew.normal")
num_points <- length(data_snmm_thinned)
data_stan_snmm <- list(N=num_points,M=nstates,y=data_snmm_thinned,alphas=alphas,mus0=locations0,std0=2);
init_stan_snmm <- list(list(phis=pis0,locations=locations0,scales=scales0,shapes=shapes0))
stan_fit_snmm <- stan(file="snmm_general.stan",data=data_stan_snmm,init=init_stan_snmm,iter=num_samples,chain=num_chains,thin=sample_thinning_factor)
mat_stan_snmm_samples <- as.matrix(stan_fit_snmm)
## obtain the "case-string":
splitted <- strsplit(x=fn_datafile,split=".",fixed=TRUE)
casename <- splitted[[1]][1]
filename_snmm_samples <- paste0(casename,"_snmm_samples.csv")
write.table(x=mat_stan_snmm_samples,file=filename_snmm_samples,sep=";")
SNMM_Stan_Evaluate_Samples(stan_fit_snmm,data_snmm_thinned,num_states=nstates,casename=casename)
return(stan_fit_snmm)
}
## SNMM_Stan_Evaluate_Samples(stan_fit_snmm,num_states=2,casename="test_case")
## input: takes a stan-fit object "stan_fit_snmm", the original data "data", the
## number of states "num_states" (by default 2) and a casename "casename" (by default: "test_case")
## as an input.
## output: plots a histogram together with the model (mean-posterior parameters)
## and writes the values of the mean-posteriors to a file
SNMM_Stan_Evaluate_Samples <- function(stan_fit_snmm,data,num_states=2,casename="test_case")
{
filename_snmm_plot <- paste0(casename,"_snmm_plot.pdf")
pdf(file=filename_snmm_plot,width=14,height=11,useDingbats=FALSE)
SNMM_Stan_Plot_Skewed_Normals_From_Mean_Posterior(data,stan_fit_snmm,num_states,case_string=casename)
dev.off()
phi_means <- array(0,dim=c(num_states))
location_means <- array(0,dim=c(num_states))
scale_means <- array(0,dim=c(num_states))
shape_means <- array(0,dim=c(num_states))
mat <- as.matrix(stan_fit_snmm)
for(i in 1:num_states)
{
phi_means[i] <- mean(mat[,i]);
location_means[i] <- mean(mat[,i+num_states]);
scale_means[i] <- mean(mat[,i+2*num_states]);
shape_means[i] <- mean(mat[,i+3*num_states]);
}
df_posterior_means <- data.frame(phis=phi_means,locs = location_means,scales=scale_means,shapes=shape_means)
filename_posterior_mean <- paste0(casename,"_snmm_post_mean.csv")
write.table(x=df_posterior_means,file=filename_posterior_mean,sep=";")
}
## SNMM_Stan_Plot_Skewed_Normals_From_Mean_Posterior(data_orig,fit_stan_snmm,
## num_states=2,lab_x="deflection [nm]",case_string)
## input: the original data "data_orig" and a stan-fit object
## "fit_stan_snmm" (skewed normal mixture model-fit), as well as the
## number of states "num_states", the label of the x-axis and
## a case_string "case_string"
## output: plots a histogram of the original data, as well as
## the mixture model (components individually as well as combined)
SNMM_Stan_Plot_Skewed_Normals_From_Mean_Posterior <- function(data_orig,fit_stan_snmm,num_states=2,lab_x="deflection [nm]",case_string)
{
mat <- as.matrix(fit_stan_snmm);
phis_means <- matrix(nrow=num_states,ncol=1);
location_means <- matrix(nrow=num_states,ncol=1);
scale_means <- matrix(nrow=num_states,ncol=1);
shape_means <- matrix(nrow=num_states,ncol=1);
for(i in 1:num_states)
{
phis_means[i] <- mean(mat[,i]);
location_means[i] <- mean(mat[,i+num_states]);
scale_means[i] <- mean(mat[,i+2*num_states]);
shape_means[i] <- mean(mat[,i+3*num_states]);
}
SNMM_Stan_Plot_Skewed_Normals_Mixture_Model(data_orig,phis_means,location_means,scale_means,shape_means,lab_x,case_string);
}
## PlotSkewedNormalsMixtureModel(data_orig,phis,mus,sigmas,alphas)
## input: the original (either force or extension) data "data_orig",
## the state probabilities "phis", the mean values "mus", the standard-deviations
## "sigmas" and the scales "alphas"
SNMM_Stan_Plot_Skewed_Normals_Mixture_Model <- function(data_orig,phis,locations,scales,shapes,lab_x="force [pN]",case_string)
{
require("sn")
num_states <- length(phis)
minval <- min(data_orig)
maxval <- max(data_orig)
delta <- (maxval - minval)/200
minval <- minval-10*delta
maxval <- maxval+10*delta
xs <- seq(from=minval,to=maxval,by=delta)
ys_temp <- matrix(nrow=length(xs),ncol=1)
ys <- matrix(0,nrow=length(xs),ncol=1)
hist(data_orig,breaks=200,freq=FALSE,xlab=lab_x,xlim=c(minval,maxval))
color_table <- c("blue","red","green","yellow","cyan")
for(j in 1:num_states)
{
for(i in 1:length(xs))
{
ys_temp[i] <- phis[j]*dsn(xs[i],xi=locations[j],omega=scales[j],alpha=shapes[j])
}
points(xs,ys_temp,type="l",col=color_table[j],lwd=2)
ys <- ys + ys_temp
}
points(xs,ys,type="l",col="black",lwd=2)
}
## SNMM_Load_Data(filename,thinning_factor)
## input:
##
SNMM_Load_Data <- function(filename,thinning_factor)
{
data_snmm <- read.table(file=filename,header=TRUE)
data_snmm_thinned <- data_snmm[seq(from=1,to=length(data_snmm[,1]),by=thinning_factor),1]
return(data_snmm_thinned)
}
|
/SNMM_functions.R
|
no_license
|
cwachauf/BSNMM_rstan
|
R
| false
| false
| 6,308
|
r
|
## SNMM_Start_Job_By_ID(filename_job_specifications,job_ID)
SNMM_Stan_Start_Job_By_ID <- function(filename_job_specifications,job_ID)
{
require("rstan")
job_specs <- read.csv(file=filename_job_specifications,header=TRUE,sep=";")
print(job_specs)
fn_datafile <- toString(job_specs$filename[job_ID])
data_snmm_thinned <- SNMM_Load_Data(fn_datafile,as.numeric(job_specs$data_thinning_factor[job_ID]))
## plot a histogram of the data (just for temporary test purposes:)
hist(data_snmm_thinned,breaks=100,main="Histogram of deflection data",xlab="deflection [nm]",ylab="frequency")
##
nstates <- as.numeric(job_specs$num_states[job_ID])
num_samples <- as.numeric(job_specs$num_total_samples[job_ID])
sample_thinning_factor <- as.numeric(job_specs$sample_thinning_factor[job_ID])
num_chains <- as.numeric(job_specs$num_chains[job_ID])
locations0 <- as.numeric(job_specs[job_ID,7:(7+nstates-1)])
scales0 <- as.numeric(job_specs[job_ID,(7+nstates):(7+2*nstates-1)])
shapes0 <- as.numeric(job_specs[job_ID,(7+2*nstates):(7+3*nstates-1)])
pis0 <- as.numeric(job_specs[job_ID,(7+3*nstates):(7+4*nstates-1)])
alphas <- array(1,dim=c(nstates))
print(locations0)
print(scales0)
print(shapes0)
print(pis0)
## snorm.analysis <- smsn.mix(y=data_snmm_thinned,mu=locations0,sigma2=scales0,shape=shapes0,pii=pis0,g=2,nu=3,get.init=FALSE,group=TRUE,family="Skew.normal")
num_points <- length(data_snmm_thinned)
data_stan_snmm <- list(N=num_points,M=nstates,y=data_snmm_thinned,alphas=alphas,mus0=locations0,std0=2);
init_stan_snmm <- list(list(phis=pis0,locations=locations0,scales=scales0,shapes=shapes0))
stan_fit_snmm <- stan(file="snmm_general.stan",data=data_stan_snmm,init=init_stan_snmm,iter=num_samples,chain=num_chains,thin=sample_thinning_factor)
mat_stan_snmm_samples <- as.matrix(stan_fit_snmm)
## obtain the "case-string":
splitted <- strsplit(x=fn_datafile,split=".",fixed=TRUE)
casename <- splitted[[1]][1]
filename_snmm_samples <- paste0(casename,"_snmm_samples.csv")
write.table(x=mat_stan_snmm_samples,file=filename_snmm_samples,sep=";")
SNMM_Stan_Evaluate_Samples(stan_fit_snmm,data_snmm_thinned,num_states=nstates,casename=casename)
return(stan_fit_snmm)
}
## SNMM_Stan_Evaluate_Samples(stan_fit_snmm,num_states=2,casename="test_case")
## input: takes a stan-fit object "stan_fit_snmm", the original data "data", the
## number of states "num_states" (by default 2) and a casename "casename" (by default: "test_case")
## as an input.
## output: plots a histogram together with the model (mean-posterior parameters)
## and writes the values of the mean-posteriors to a file
SNMM_Stan_Evaluate_Samples <- function(stan_fit_snmm,data,num_states=2,casename="test_case")
{
filename_snmm_plot <- paste0(casename,"_snmm_plot.pdf")
pdf(file=filename_snmm_plot,width=14,height=11,useDingbats=FALSE)
SNMM_Stan_Plot_Skewed_Normals_From_Mean_Posterior(data,stan_fit_snmm,num_states,case_string=casename)
dev.off()
phi_means <- array(0,dim=c(num_states))
location_means <- array(0,dim=c(num_states))
scale_means <- array(0,dim=c(num_states))
shape_means <- array(0,dim=c(num_states))
mat <- as.matrix(stan_fit_snmm)
for(i in 1:num_states)
{
phi_means[i] <- mean(mat[,i]);
location_means[i] <- mean(mat[,i+num_states]);
scale_means[i] <- mean(mat[,i+2*num_states]);
shape_means[i] <- mean(mat[,i+3*num_states]);
}
df_posterior_means <- data.frame(phis=phi_means,locs = location_means,scales=scale_means,shapes=shape_means)
filename_posterior_mean <- paste0(casename,"_snmm_post_mean.csv")
write.table(x=df_posterior_means,file=filename_posterior_mean,sep=";")
}
## SNMM_Stan_Plot_Skewed_Normals_From_Mean_Posterior(data_orig,fit_stan_snmm,
## num_states=2,lab_x="deflection [nm]",case_string)
## input: the original data "data_orig" and a stan-fit object
## "fit_stan_snmm" (skewed normal mixture model-fit), as well as the
## number of states "num_states", the label of the x-axis and
## a case_string "case_string"
## output: plots a histogram of the original data, as well as
## the mixture model (components individually as well as combined)
SNMM_Stan_Plot_Skewed_Normals_From_Mean_Posterior <- function(data_orig,fit_stan_snmm,num_states=2,lab_x="deflection [nm]",case_string)
{
mat <- as.matrix(fit_stan_snmm);
phis_means <- matrix(nrow=num_states,ncol=1);
location_means <- matrix(nrow=num_states,ncol=1);
scale_means <- matrix(nrow=num_states,ncol=1);
shape_means <- matrix(nrow=num_states,ncol=1);
for(i in 1:num_states)
{
phis_means[i] <- mean(mat[,i]);
location_means[i] <- mean(mat[,i+num_states]);
scale_means[i] <- mean(mat[,i+2*num_states]);
shape_means[i] <- mean(mat[,i+3*num_states]);
}
SNMM_Stan_Plot_Skewed_Normals_Mixture_Model(data_orig,phis_means,location_means,scale_means,shape_means,lab_x,case_string);
}
## PlotSkewedNormalsMixtureModel(data_orig,phis,mus,sigmas,alphas)
## input: the original (either force or extension) data "data_orig",
## the state probabilities "phis", the mean values "mus", the standard-deviations
## "sigmas" and the scales "alphas"
SNMM_Stan_Plot_Skewed_Normals_Mixture_Model <- function(data_orig,phis,locations,scales,shapes,lab_x="force [pN]",case_string)
{
require("sn")
num_states <- length(phis)
minval <- min(data_orig)
maxval <- max(data_orig)
delta <- (maxval - minval)/200
minval <- minval-10*delta
maxval <- maxval+10*delta
xs <- seq(from=minval,to=maxval,by=delta)
ys_temp <- matrix(nrow=length(xs),ncol=1)
ys <- matrix(0,nrow=length(xs),ncol=1)
hist(data_orig,breaks=200,freq=FALSE,xlab=lab_x,xlim=c(minval,maxval))
color_table <- c("blue","red","green","yellow","cyan")
for(j in 1:num_states)
{
for(i in 1:length(xs))
{
ys_temp[i] <- phis[j]*dsn(xs[i],xi=locations[j],omega=scales[j],alpha=shapes[j])
}
points(xs,ys_temp,type="l",col=color_table[j],lwd=2)
ys <- ys + ys_temp
}
points(xs,ys,type="l",col="black",lwd=2)
}
## SNMM_Load_Data(filename,thinning_factor)
## input:
##
SNMM_Load_Data <- function(filename,thinning_factor)
{
data_snmm <- read.table(file=filename,header=TRUE)
data_snmm_thinned <- data_snmm[seq(from=1,to=length(data_snmm[,1]),by=thinning_factor),1]
return(data_snmm_thinned)
}
|
# Yige Wu @ WashU 2017 Jan
# plot 3D/linear distance and co-phosphorylation correlation FDRs and coefficients
# directory and library ---------------------------------------------------
# for working on Kuan's mac
baseD = "/Users/khuang/Box\ Sync/PhD/proteogenomics/CPTAC_pan3Cancer/"
# # for working on Yige's mac
# baseD = "/Users/yigewu/Box\ Sync/"
library(stringr)
library(ggplot2)
library(readr)
setwd(paste(baseD,"pan3can_analysis/phospho_network",sep=""))
source("../pan3can_aes.R") # aes for general purposes; it should be one directory out of the working directory
# choose cohort and significance level ------------------------------------
sig <- 0.05
# input within protein pairwise processed file ----------------------------
pairwise_brca <- read_delim(paste(baseD,"pan3can_shared_data/analysis_results/hotspot3d/table/BRCA_phosphosite_within_protein_distance_and_correlation.txt", sep=""),"\t", escape_double = FALSE, trim_ws = TRUE)
pairwise_ov <- read_delim(paste(baseD,"pan3can_shared_data/analysis_results/hotspot3d/table/OV_phosphosite_within_protein_distance_and_correlation.txt", sep=""),"\t", escape_double = FALSE, trim_ws = TRUE)
pairwise_brca$cancer <- "BRCA"
pairwise_ov$cancer <- "OV"
pairwise <- rbind(pairwise_brca, pairwise_ov)
# # plot correlation between coef_corr and distances ------------------------
# p = ggplot(data = pairwise, aes(x = dis_3d , y = coef_corr, color = fdr_corr < 0.05))
# p = p + facet_grid(.~cancer,scales = "free_y")#, drop=T, space = "free_y",scales = "free_y")#, space = "free", scales = "free")
# p = p + geom_smooth(method = "glm", se=FALSE, color="black", formula = y ~ x)
# p = p + geom_point(alpha=0.3, stroke=0) #+ scale_color_gradientn(name= "FDR", na.value=NA)
# p = p + geom_text(aes(label= ifelse((dis_3d < 4 & coef_corr <0.4) | (dis_3d > 9 & coef_corr > 0.95), pair, NA ), vjust = -1, hjust = 1),size=2,alpha=0.5)
# p = p + theme_bw()
# p = p + labs(x="3D distance (ångström)", y = "Correlation coefficient")
# #p = p + expand_limits(x = 0)
# p
# fn = paste(baseD,'pan3can_shared_data/analysis_results/hotspot3d/within_protein_2can_distance_3d_and_coef_corr_correlation.pdf',sep ="")
# ggsave(file=fn, height=5, width=10)
# plot correlation between coef_corr and distances ------------------------
p = ggplot(data = pairwise, aes(x = dis_3d , y = coef_corr, color = cancer))
#p = p + facet_grid(.~cancer,scales = "free_y")#, drop=T, space = "free_y",scales = "free_y")#, space = "free", scales = "free")
p = p + geom_smooth(method = "glm", color="black", formula = y ~ x)
p = p + geom_point(alpha=0.3, stroke=0) #+ scale_color_gradientn(name= "FDR", na.value=NA)
p = p + theme_bw()
p = p + labs(x="3D distance (ångström)", y = "Correlation coefficient")
#p = p + expand_limits(x = 0)
p
fn = paste(baseD,'pan3can_shared_data/analysis_results/hotspot3d/within_protein_2can_distance_3d_and_coef_corr_correlation.pdf',sep ="")
ggsave(file=fn, height=5, width=6,useDingbats=F)
limx <- 100
if ( cohort == "OV" ) {
limx <- 75
}
p = ggplot(data = pairwise[pairwise$dis_lin<limx,], aes(x = dis_lin , y = coef_corr, color = fdr_corr < 0.05))
p = p + facet_grid(.~cancer,scales = "free_y")#, drop=T, space = "free_y",scales = "free_y")#, space = "free", scales = "free")
p = p + geom_smooth(method = "glm", se=FALSE, color="black", formula = y ~ x)
p = p + geom_point(alpha=0.3, stroke = 0)
p = p + geom_text(aes(label= ifelse((dis_lin < 5 & coef_corr < 0.1) | (dis_lin > 0.5*limx & coef_corr > 0.90), pair, NA ), vjust = 1, hjust = -0.2 ),size=2,alpha=0.5)
p = p + theme_bw()
p = p + labs(x="linear distance", y = "Correlation coefficient")
p
fn = paste(baseD,'pan3can_shared_data/analysis_results/hotspot3d/within_protein_2can_distance_linear_and_coef_corr_correlation_max',limx,'.pdf',sep ="")
ggsave(file=fn, height=5, width=10)
|
/phospho_network/hotspot3d/plot_2can_phospho_corr_and_distance_single.R
|
no_license
|
ding-lab/phosphoproteomics
|
R
| false
| false
| 3,813
|
r
|
# Yige Wu @ WashU 2017 Jan
# plot 3D/linear distance and co-phosphorylation correlation FDRs and coefficients
# directory and library ---------------------------------------------------
# for working on Kuan's mac
baseD = "/Users/khuang/Box\ Sync/PhD/proteogenomics/CPTAC_pan3Cancer/"
# # for working on Yige's mac
# baseD = "/Users/yigewu/Box\ Sync/"
library(stringr)
library(ggplot2)
library(readr)
setwd(paste(baseD,"pan3can_analysis/phospho_network",sep=""))
source("../pan3can_aes.R") # aes for general purposes; it should be one directory out of the working directory
# choose cohort and significance level ------------------------------------
sig <- 0.05
# input within protein pairwise processed file ----------------------------
pairwise_brca <- read_delim(paste(baseD,"pan3can_shared_data/analysis_results/hotspot3d/table/BRCA_phosphosite_within_protein_distance_and_correlation.txt", sep=""),"\t", escape_double = FALSE, trim_ws = TRUE)
pairwise_ov <- read_delim(paste(baseD,"pan3can_shared_data/analysis_results/hotspot3d/table/OV_phosphosite_within_protein_distance_and_correlation.txt", sep=""),"\t", escape_double = FALSE, trim_ws = TRUE)
pairwise_brca$cancer <- "BRCA"
pairwise_ov$cancer <- "OV"
pairwise <- rbind(pairwise_brca, pairwise_ov)
# # plot correlation between coef_corr and distances ------------------------
# p = ggplot(data = pairwise, aes(x = dis_3d , y = coef_corr, color = fdr_corr < 0.05))
# p = p + facet_grid(.~cancer,scales = "free_y")#, drop=T, space = "free_y",scales = "free_y")#, space = "free", scales = "free")
# p = p + geom_smooth(method = "glm", se=FALSE, color="black", formula = y ~ x)
# p = p + geom_point(alpha=0.3, stroke=0) #+ scale_color_gradientn(name= "FDR", na.value=NA)
# p = p + geom_text(aes(label= ifelse((dis_3d < 4 & coef_corr <0.4) | (dis_3d > 9 & coef_corr > 0.95), pair, NA ), vjust = -1, hjust = 1),size=2,alpha=0.5)
# p = p + theme_bw()
# p = p + labs(x="3D distance (ångström)", y = "Correlation coefficient")
# #p = p + expand_limits(x = 0)
# p
# fn = paste(baseD,'pan3can_shared_data/analysis_results/hotspot3d/within_protein_2can_distance_3d_and_coef_corr_correlation.pdf',sep ="")
# ggsave(file=fn, height=5, width=10)
# plot correlation between coef_corr and distances ------------------------
p = ggplot(data = pairwise, aes(x = dis_3d , y = coef_corr, color = cancer))
#p = p + facet_grid(.~cancer,scales = "free_y")#, drop=T, space = "free_y",scales = "free_y")#, space = "free", scales = "free")
p = p + geom_smooth(method = "glm", color="black", formula = y ~ x)
p = p + geom_point(alpha=0.3, stroke=0) #+ scale_color_gradientn(name= "FDR", na.value=NA)
p = p + theme_bw()
p = p + labs(x="3D distance (ångström)", y = "Correlation coefficient")
#p = p + expand_limits(x = 0)
p
fn = paste(baseD,'pan3can_shared_data/analysis_results/hotspot3d/within_protein_2can_distance_3d_and_coef_corr_correlation.pdf',sep ="")
ggsave(file=fn, height=5, width=6,useDingbats=F)
limx <- 100
if ( cohort == "OV" ) {
limx <- 75
}
p = ggplot(data = pairwise[pairwise$dis_lin<limx,], aes(x = dis_lin , y = coef_corr, color = fdr_corr < 0.05))
p = p + facet_grid(.~cancer,scales = "free_y")#, drop=T, space = "free_y",scales = "free_y")#, space = "free", scales = "free")
p = p + geom_smooth(method = "glm", se=FALSE, color="black", formula = y ~ x)
p = p + geom_point(alpha=0.3, stroke = 0)
p = p + geom_text(aes(label= ifelse((dis_lin < 5 & coef_corr < 0.1) | (dis_lin > 0.5*limx & coef_corr > 0.90), pair, NA ), vjust = 1, hjust = -0.2 ),size=2,alpha=0.5)
p = p + theme_bw()
p = p + labs(x="linear distance", y = "Correlation coefficient")
p
fn = paste(baseD,'pan3can_shared_data/analysis_results/hotspot3d/within_protein_2can_distance_linear_and_coef_corr_correlation_max',limx,'.pdf',sep ="")
ggsave(file=fn, height=5, width=10)
|
library(readr)
library(dplyr)
library(stringr)
library(tools)
library(TraceQC)
library(fastqcr)
library(readr)
sra <- read_csv("./data/000_SraRunTable.txt") %>%
select(Run,`Library Name`)
qc_dir <- "./fastqc/"
fastq_dir <- "./data/020_fastq_by_identifier"
for (dir in list.dirs(fastq_dir)){
fastqc(dir,qc.dir=qc_dir)
}
identifiers <- read_csv("./data/000_ref/hgRNA_identifiers.csv")
all_length <- c(21,25,30,35)
for (l in all_length) {
ref <- read_lines(sprintf("./data/000_ref/L%s.txt",l))
refseq <- ref[1]
regions_str <- strsplit(ref[2:length(ref)],split=" ")
regions <- do.call(rbind,regions_str) %>%
as.data.frame() %>%
setNames(c("region","start","end")) %>%
mutate(start=strtoi(.data$start),
end=strtoi(.data$end)) %>%
mutate(region=as.character(.data$region))
# tmp <- list(refseq=refseq,regions=regions)
# plot_construct(tmp)
L_identifiers <- filter(identifiers,Length>=(l-1)&Length<=(l+1))
for (i in 1:nrow(L_identifiers)) {
identifier <- as.character(L_identifiers[i,"Identifier (ID)"])
spacer <- as.character(L_identifiers[i,"Spacer regions (TSS to PAM)"])
spacer_start <- regions[regions$region=="spacer","start"]
spacer_end <- regions[regions$region=="spacer","end"]
ref_id_seq <- paste(substr(refseq,start=1,stop=spacer_start-1),spacer,
substr(refseq,start=spacer_end+1,stop=nchar(refseq)),sep="")
# tmp <- list(refseq=ref_id_seq,regions=regions)
# plot_construct(tmp)
out_file <- sprintf("./data/000_ref/L%s_%s.txt",l,identifier)
write(paste(c(ref_id_seq,ref[2:3]),sep="\n"),
out_file)}}
for (f in list.files(fastq_dir,recursive=TRUE)) {
tmp <- strsplit(f,"/")[[1]][2]
tmp <- strsplit(file_path_sans_ext(tmp),split="_")[[1]]
sra <- tmp[1]
identifier <- tmp[2]
input_file <- sprintf("%s/%s",fastq_dir,f)
ref_file <- list.files("./data/000_ref/",pattern=identifier)
ref_file <- sprintf("./data/000_ref/%s",ref_file)
output_file <- sprintf("./data/030.1_alignment/%s_%s.txt",sra,identifier)
sequence_alignment(input_file=input_file,ref_file=ref_file,
output_file=output_file)}
for (input_file in list.files("./data/030.1_alignment/")) {
print(input_file)
aligned_reads <- read_tsv(sprintf("./data/030.1_alignment/%s",input_file))
if (nrow(aligned_reads)>0) {
traceQC_input <- list(aligned_reads=aligned_reads)
mutation_event <- seq_to_character(traceQC_input,ncores=1,
use_CPM=FALSE,alignment_score_cutoff=-Inf,
abundance_cutoff=0)
write_tsv(mutation_event,sprintf("./data/030.2_traceQC_obj/%s",input_file))}
}
for (ref_file in list.files("./data/000_ref/",pattern="L[0-9][0-9]_")) {
alignment_threshold <- sequence_permutation(ref_file=sprintf("./data/000_ref/%s",ref_file))
write_tsv(alignment_threshold,sprintf("./data/030.3_alignment_threshold/alignment_threshold_%s",ref_file))}
|
/hgRNA-invivo/030_run_traceQC.R
|
no_license
|
LiuzLab/TraceQC-manuscript
|
R
| false
| false
| 2,958
|
r
|
library(readr)
library(dplyr)
library(stringr)
library(tools)
library(TraceQC)
library(fastqcr)
library(readr)
sra <- read_csv("./data/000_SraRunTable.txt") %>%
select(Run,`Library Name`)
qc_dir <- "./fastqc/"
fastq_dir <- "./data/020_fastq_by_identifier"
for (dir in list.dirs(fastq_dir)){
fastqc(dir,qc.dir=qc_dir)
}
identifiers <- read_csv("./data/000_ref/hgRNA_identifiers.csv")
all_length <- c(21,25,30,35)
for (l in all_length) {
ref <- read_lines(sprintf("./data/000_ref/L%s.txt",l))
refseq <- ref[1]
regions_str <- strsplit(ref[2:length(ref)],split=" ")
regions <- do.call(rbind,regions_str) %>%
as.data.frame() %>%
setNames(c("region","start","end")) %>%
mutate(start=strtoi(.data$start),
end=strtoi(.data$end)) %>%
mutate(region=as.character(.data$region))
# tmp <- list(refseq=refseq,regions=regions)
# plot_construct(tmp)
L_identifiers <- filter(identifiers,Length>=(l-1)&Length<=(l+1))
for (i in 1:nrow(L_identifiers)) {
identifier <- as.character(L_identifiers[i,"Identifier (ID)"])
spacer <- as.character(L_identifiers[i,"Spacer regions (TSS to PAM)"])
spacer_start <- regions[regions$region=="spacer","start"]
spacer_end <- regions[regions$region=="spacer","end"]
ref_id_seq <- paste(substr(refseq,start=1,stop=spacer_start-1),spacer,
substr(refseq,start=spacer_end+1,stop=nchar(refseq)),sep="")
# tmp <- list(refseq=ref_id_seq,regions=regions)
# plot_construct(tmp)
out_file <- sprintf("./data/000_ref/L%s_%s.txt",l,identifier)
write(paste(c(ref_id_seq,ref[2:3]),sep="\n"),
out_file)}}
for (f in list.files(fastq_dir,recursive=TRUE)) {
tmp <- strsplit(f,"/")[[1]][2]
tmp <- strsplit(file_path_sans_ext(tmp),split="_")[[1]]
sra <- tmp[1]
identifier <- tmp[2]
input_file <- sprintf("%s/%s",fastq_dir,f)
ref_file <- list.files("./data/000_ref/",pattern=identifier)
ref_file <- sprintf("./data/000_ref/%s",ref_file)
output_file <- sprintf("./data/030.1_alignment/%s_%s.txt",sra,identifier)
sequence_alignment(input_file=input_file,ref_file=ref_file,
output_file=output_file)}
for (input_file in list.files("./data/030.1_alignment/")) {
print(input_file)
aligned_reads <- read_tsv(sprintf("./data/030.1_alignment/%s",input_file))
if (nrow(aligned_reads)>0) {
traceQC_input <- list(aligned_reads=aligned_reads)
mutation_event <- seq_to_character(traceQC_input,ncores=1,
use_CPM=FALSE,alignment_score_cutoff=-Inf,
abundance_cutoff=0)
write_tsv(mutation_event,sprintf("./data/030.2_traceQC_obj/%s",input_file))}
}
for (ref_file in list.files("./data/000_ref/",pattern="L[0-9][0-9]_")) {
alignment_threshold <- sequence_permutation(ref_file=sprintf("./data/000_ref/%s",ref_file))
write_tsv(alignment_threshold,sprintf("./data/030.3_alignment_threshold/alignment_threshold_%s",ref_file))}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general_use_functions.R
\name{.ls.objects}
\alias{.ls.objects}
\title{List objects + their sizes:}
\usage{
.ls.objects(pos = 1, pattern, order.by, decreasing = FALSE,
head = FALSE, n = 5)
}
\description{
https://stackoverflow.com/questions/1358003/
}
|
/epimapAUX/man/dot-ls.objects.Rd
|
no_license
|
cboix/EPIMAP_ANALYSIS
|
R
| false
| true
| 331
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general_use_functions.R
\name{.ls.objects}
\alias{.ls.objects}
\title{List objects + their sizes:}
\usage{
.ls.objects(pos = 1, pattern, order.by, decreasing = FALSE,
head = FALSE, n = 5)
}
\description{
https://stackoverflow.com/questions/1358003/
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/equilibrium_catch.R
\name{equilibrium_catch}
\alias{equilibrium_catch}
\title{Extract equilibrium catch}
\usage{
equilibrium_catch(SS_Dir, Fishery)
}
\description{
\code{equilibrium_catch} This function extracts the expected equilibrium catch
}
|
/man/equilibrium_catch.Rd
|
no_license
|
HaikunXu/IATTCassessment
|
R
| false
| true
| 323
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/equilibrium_catch.R
\name{equilibrium_catch}
\alias{equilibrium_catch}
\title{Extract equilibrium catch}
\usage{
equilibrium_catch(SS_Dir, Fishery)
}
\description{
\code{equilibrium_catch} This function extracts the expected equilibrium catch
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bgx.model.dt.tree.R
\name{bgx.model.dt.tree}
\alias{bgx.model.dt.tree}
\title{Parse a boosted tree model text dump}
\usage{
bgx.model.dt.tree(feature_names = NULL, model = NULL, text = NULL,
trees = NULL, use_int_id = FALSE, ...)
}
\arguments{
\item{feature_names}{character vector of feature names. If the model already
contains feature names, those would be used when \code{feature_names=NULL} (default value).
Non-null \code{feature_names} could be provided to override those in the model.}
\item{model}{object of class \code{bgx.Booster}}
\item{text}{\code{character} vector previously generated by the \code{bgx.dump}
function (where parameter \code{with_stats = TRUE} should have been set).
\code{text} takes precedence over \code{model}.}
\item{trees}{an integer vector of tree indices that should be parsed.
If set to \code{NULL}, all trees of the model are parsed.
It could be useful, e.g., in multiclass classification to get only
the trees of one certain class. IMPORTANT: the tree index in tsoobgx models
is zero-based (e.g., use \code{trees = 0:4} for first 5 trees).}
\item{use_int_id}{a logical flag indicating whether nodes in columns "Yes", "No", "Missing" should be
represented as integers (when FALSE) or as "Tree-Node" character strings (when FALSE).}
\item{...}{currently not used.}
}
\value{
A \code{data.table} with detailed information about model trees' nodes.
The columns of the \code{data.table} are:
\itemize{
\item \code{Tree}: integer ID of a tree in a model (zero-based index)
\item \code{Node}: integer ID of a node in a tree (zero-based index)
\item \code{ID}: character identifier of a node in a model (only when \code{use_int_id=FALSE})
\item \code{Feature}: for a branch node, it's a feature id or name (when available);
for a leaf note, it simply labels it as \code{'Leaf'}
\item \code{Split}: location of the split for a branch node (split condition is always "less than")
\item \code{Yes}: ID of the next node when the split condition is met
\item \code{No}: ID of the next node when the split condition is not met
\item \code{Missing}: ID of the next node when branch value is missing
\item \code{Quality}: either the split gain (change in loss) or the leaf value
\item \code{Cover}: metric related to the number of observation either seen by a split
or collected by a leaf during training.
}
When \code{use_int_id=FALSE}, columns "Yes", "No", and "Missing" point to model-wide node identifiers
in the "ID" column. When \code{use_int_id=TRUE}, those columns point to node identifiers from
the corresponding trees in the "Node" column.
}
\description{
Parse a boosted tree model text dump into a \code{data.table} structure.
}
\examples{
# Basic use:
data(agaricus.train, package='tsoobgx')
bst <- tsoobgx(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
(dt <- bgx.model.dt.tree(colnames(agaricus.train$data), bst))
# This bst model already has feature_names stored with it, so those would be used when
# feature_names is not set:
(dt <- bgx.model.dt.tree(model = bst))
# How to match feature names of splits that are following a current 'Yes' branch:
merge(dt, dt[, .(ID, Y.Feature=Feature)], by.x='Yes', by.y='ID', all.x=TRUE)[order(Tree,Node)]
}
|
/man/bgx.model.dt.tree.Rd
|
permissive
|
nalzok/tsoobgx
|
R
| false
| true
| 3,439
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bgx.model.dt.tree.R
\name{bgx.model.dt.tree}
\alias{bgx.model.dt.tree}
\title{Parse a boosted tree model text dump}
\usage{
bgx.model.dt.tree(feature_names = NULL, model = NULL, text = NULL,
trees = NULL, use_int_id = FALSE, ...)
}
\arguments{
\item{feature_names}{character vector of feature names. If the model already
contains feature names, those would be used when \code{feature_names=NULL} (default value).
Non-null \code{feature_names} could be provided to override those in the model.}
\item{model}{object of class \code{bgx.Booster}}
\item{text}{\code{character} vector previously generated by the \code{bgx.dump}
function (where parameter \code{with_stats = TRUE} should have been set).
\code{text} takes precedence over \code{model}.}
\item{trees}{an integer vector of tree indices that should be parsed.
If set to \code{NULL}, all trees of the model are parsed.
It could be useful, e.g., in multiclass classification to get only
the trees of one certain class. IMPORTANT: the tree index in tsoobgx models
is zero-based (e.g., use \code{trees = 0:4} for first 5 trees).}
\item{use_int_id}{a logical flag indicating whether nodes in columns "Yes", "No", "Missing" should be
represented as integers (when FALSE) or as "Tree-Node" character strings (when FALSE).}
\item{...}{currently not used.}
}
\value{
A \code{data.table} with detailed information about model trees' nodes.
The columns of the \code{data.table} are:
\itemize{
\item \code{Tree}: integer ID of a tree in a model (zero-based index)
\item \code{Node}: integer ID of a node in a tree (zero-based index)
\item \code{ID}: character identifier of a node in a model (only when \code{use_int_id=FALSE})
\item \code{Feature}: for a branch node, it's a feature id or name (when available);
for a leaf note, it simply labels it as \code{'Leaf'}
\item \code{Split}: location of the split for a branch node (split condition is always "less than")
\item \code{Yes}: ID of the next node when the split condition is met
\item \code{No}: ID of the next node when the split condition is not met
\item \code{Missing}: ID of the next node when branch value is missing
\item \code{Quality}: either the split gain (change in loss) or the leaf value
\item \code{Cover}: metric related to the number of observation either seen by a split
or collected by a leaf during training.
}
When \code{use_int_id=FALSE}, columns "Yes", "No", and "Missing" point to model-wide node identifiers
in the "ID" column. When \code{use_int_id=TRUE}, those columns point to node identifiers from
the corresponding trees in the "Node" column.
}
\description{
Parse a boosted tree model text dump into a \code{data.table} structure.
}
\examples{
# Basic use:
data(agaricus.train, package='tsoobgx')
bst <- tsoobgx(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
(dt <- bgx.model.dt.tree(colnames(agaricus.train$data), bst))
# This bst model already has feature_names stored with it, so those would be used when
# feature_names is not set:
(dt <- bgx.model.dt.tree(model = bst))
# How to match feature names of splits that are following a current 'Yes' branch:
merge(dt, dt[, .(ID, Y.Feature=Feature)], by.x='Yes', by.y='ID', all.x=TRUE)[order(Tree,Node)]
}
|
setup(options(lifecycle_verbosity = "quiet"))
teardown(options(lifecycle_verbosity = NULL))
test_that("combine handles NULL (#1596, #3365)", {
expect_equal(combine(list(NULL, 1, 2)), c(1, 2))
expect_equal(combine(list(1, NULL, 2)), c(1, 2))
expect_equal(combine(list(1, 2, NULL)), c(1, 2))
expect_equal(combine(), logical())
expect_equal(combine(list(NULL)), logical())
expect_equal(combine(list(NULL, NULL), list(NULL)), list(NULL, NULL, NULL))
expect_equal(combine(NULL, list(NULL, NULL)), list(NULL, NULL))
})
test_that("combine works with input that used to fail (#1780)", {
no <- list(alpha = letters[1:3], omega = letters[24:26])
expect_equal(combine(no), unlist(no, use.names = FALSE))
})
test_that("combine works with NA and logical (#2203)", {
# NA first
expected_result <- c(NA, TRUE, FALSE, NA, TRUE)
works1 <- combine(list(NA, TRUE, FALSE, NA, TRUE))
expect_equal(works1, expected_result)
# NA length == 1
expected_result <- c(TRUE, FALSE, NA, TRUE)
works1 <- combine(list(TRUE, FALSE, NA, TRUE))
expect_equal(works1, expected_result)
# NA length > 1
expected_result <- c(TRUE, FALSE, NA, NA, TRUE)
works3 <- combine(list(TRUE, FALSE, c(NA, NA), TRUE))
expect_equal(works3, expected_result)
})
test_that("combine works with NA and integers (#2203)", {
works <- combine(list(1L, 2L, NA, 4L))
expect_equal(works, c(1L, 2L, NA, 4L))
works <- combine(list(1L, 2L, c(NA, NA), 4L))
expect_equal(works, c(1L, 2L, NA, NA, 4L))
})
test_that("combine works with NA and factors (#2203)", {
# NA first
fac <- factor(c("a", "c", NA, "b"), levels = letters[1:3])
expected_result <- fac[c(3, 1, 3, 2)]
works1 <- combine(list(NA, fac[1], NA, fac[2]))
expect_equal(works1, expected_result)
# NA length == 1
expected_result <- fac
works1 <- combine(list(fac[1], fac[2], fac[3], fac[4]))
expect_equal(works1, expected_result)
works2 <- combine(list(fac[1], fac[2], NA, fac[4]))
expect_equal(works2, expected_result)
# NA length > 1
expected_result <- fac[c(1, 2, 3, 3, 4)]
works3 <- combine(list(fac[1], fac[2], fac[c(3, 3)], fac[4]))
expect_equal(works3, expected_result)
works4 <- combine(list(fac[1], fac[2], c(NA, NA), fac[4]))
expect_equal(works4, expected_result)
})
test_that("combine works with NA and double (#2203)", {
# NA first
works <- combine(list(NA, 1.5, 2.5, NA, 4.5))
expect_equal(works, c(NA, 1.5, 2.5, NA, 4.5))
# NA length 1
works <- combine(list(1.5, 2.5, NA, 4.5))
expect_equal(works, c(1.5, 2.5, NA, 4.5))
# NA length > 1
works <- combine(list(1.5, 2.5, c(NA, NA), 4.5))
expect_equal(works, c(1.5, 2.5, NA, NA, 4.5))
})
test_that("combine works with NA and characters (#2203)", {
# NA first
works <- combine(list(NA, "a", "b", "c", NA, "e"))
expect_equal(works, c(NA, "a", "b", "c", NA, "e"))
# NA length 1
works <- combine(list("a", "b", "c", NA, "e"))
expect_equal(works, c("a", "b", "c", NA, "e"))
# NA length > 1
works <- combine(list("a", "b", "c", c(NA, NA), "e"))
expect_equal(works, c("a", "b", "c", NA, NA, "e"))
})
test_that("combine works with NA and POSIXct (#2203)", {
# NA first
works <- combine(list(
NA, as.POSIXct("2010-01-01"), as.POSIXct("2010-01-02"),
NA, as.POSIXct("2010-01-04")
))
expect_equal(works, c(as.POSIXct(c(
NA, "2010-01-01", "2010-01-02",
NA, "2010-01-04"
))))
# NA length 1
works <- combine(list(
as.POSIXct("2010-01-01"), as.POSIXct("2010-01-02"),
NA, as.POSIXct("2010-01-04")
))
expect_equal(works, c(as.POSIXct(c(
"2010-01-01", "2010-01-02",
NA, "2010-01-04"
))))
# NA length > 1
works <- combine(list(
as.POSIXct("2010-01-01"), as.POSIXct("2010-01-02"),
c(NA, NA), as.POSIXct("2010-01-04")
))
expect_equal(works, c(as.POSIXct(c(
"2010-01-01", "2010-01-02",
NA, NA, "2010-01-04"
))))
})
test_that("combine works with NA and Date (#2203)", {
# NA first
expected_result <- as.Date("2010-01-01") + c(NA, 1, 2, NA, 4)
expect_equal(combine(as.list(expected_result)), expected_result)
# NA length == 1
expected_result <- c(as.Date(c("2010-01-01", "2010-01-02", NA, "2010-01-04")))
works1 <- combine(list(
as.Date("2010-01-01"), as.Date("2010-01-02"),
as.Date(NA), as.Date("2010-01-04")
))
expect_equal(works1, expected_result)
works2 <- combine(list(
as.Date("2010-01-01"), as.Date("2010-01-02"),
NA, as.Date("2010-01-04")
))
expect_equal(works2, expected_result)
# NA length > 1
expected_result <- as.Date("2010-01-01") + c(0, 1, NA, NA, 3)
works1 <- combine(split(expected_result, c(1, 2, 3, 3, 4)))
expect_equal(works1, expected_result)
works2 <- combine(list(
as.Date("2010-01-01"), as.Date("2010-01-02"),
c(NA, NA),
as.Date("2010-01-04")
))
expect_equal(works2, expected_result)
})
test_that("combine works with NA and complex (#2203)", {
# NA first
expected_result <- c(NA, 1 + 2i)
works1 <- combine(list(NA, 1 + 2i))
expect_equal(works1, expected_result)
# NA length == 1
expected_result <- c(1, 2, NA, 4) + 1i
expect_equal(combine(as.list(expected_result)), expected_result)
works2 <- combine(list(1 + 1i, 2 + 1i, NA, 4 + 1i))
expect_equal(works2, expected_result)
# NA length > 1
expected_result <- c(1, 2, NA, NA, 4) + 1i
expect_equal(
combine(split(expected_result, c(1, 2, 3, 3, 4))),
expected_result
)
works3 <- combine(list(1 + 1i, 2 + 1i, c(NA, NA), 4 + 1i))
expect_equal(works3, expected_result)
})
test_that("combine works with difftime", {
expect_equal(
combine(as.difftime(1, units = "mins"), as.difftime(1, units = "hours")),
as.difftime(c(60, 3600), units = "secs")
)
expect_equal(
combine(as.difftime(1, units = "secs"), as.difftime(1, units = "secs")),
as.difftime(c(1, 1), units = "secs")
)
expect_equal(
combine(as.difftime(1, units = "days"), as.difftime(1, units = "secs")),
as.difftime(c(24 * 60 * 60, 1), units = "secs")
)
expect_equal(
combine(as.difftime(2, units = "weeks"), as.difftime(1, units = "secs")),
as.difftime(c(2 * 7 * 24 * 60 * 60, 1), units = "secs")
)
expect_equal(
combine(as.difftime(2, units = "weeks"), as.difftime(3, units = "weeks")),
as.difftime(c(2, 3), units = "weeks")
)
})
test_that("combine uses tidy dots (#3407)", {
chunks <- list(1,2,3)
expect_equal(combine(!!!chunks), c(1,2,3))
})
# Errors ------------------------------------------------------------------
test_that("combine() gives meaningful error messages", {
verify_output(test_path("test-deprec-combine-errors.txt"), {
combine("a", 1)
combine(factor("a"), 1L)
})
})
|
/tests/testthat/test-deprec-combine.R
|
permissive
|
earowang/dplyr
|
R
| false
| false
| 6,642
|
r
|
setup(options(lifecycle_verbosity = "quiet"))
teardown(options(lifecycle_verbosity = NULL))
test_that("combine handles NULL (#1596, #3365)", {
expect_equal(combine(list(NULL, 1, 2)), c(1, 2))
expect_equal(combine(list(1, NULL, 2)), c(1, 2))
expect_equal(combine(list(1, 2, NULL)), c(1, 2))
expect_equal(combine(), logical())
expect_equal(combine(list(NULL)), logical())
expect_equal(combine(list(NULL, NULL), list(NULL)), list(NULL, NULL, NULL))
expect_equal(combine(NULL, list(NULL, NULL)), list(NULL, NULL))
})
test_that("combine works with input that used to fail (#1780)", {
no <- list(alpha = letters[1:3], omega = letters[24:26])
expect_equal(combine(no), unlist(no, use.names = FALSE))
})
test_that("combine works with NA and logical (#2203)", {
# NA first
expected_result <- c(NA, TRUE, FALSE, NA, TRUE)
works1 <- combine(list(NA, TRUE, FALSE, NA, TRUE))
expect_equal(works1, expected_result)
# NA length == 1
expected_result <- c(TRUE, FALSE, NA, TRUE)
works1 <- combine(list(TRUE, FALSE, NA, TRUE))
expect_equal(works1, expected_result)
# NA length > 1
expected_result <- c(TRUE, FALSE, NA, NA, TRUE)
works3 <- combine(list(TRUE, FALSE, c(NA, NA), TRUE))
expect_equal(works3, expected_result)
})
test_that("combine works with NA and integers (#2203)", {
works <- combine(list(1L, 2L, NA, 4L))
expect_equal(works, c(1L, 2L, NA, 4L))
works <- combine(list(1L, 2L, c(NA, NA), 4L))
expect_equal(works, c(1L, 2L, NA, NA, 4L))
})
test_that("combine works with NA and factors (#2203)", {
# NA first
fac <- factor(c("a", "c", NA, "b"), levels = letters[1:3])
expected_result <- fac[c(3, 1, 3, 2)]
works1 <- combine(list(NA, fac[1], NA, fac[2]))
expect_equal(works1, expected_result)
# NA length == 1
expected_result <- fac
works1 <- combine(list(fac[1], fac[2], fac[3], fac[4]))
expect_equal(works1, expected_result)
works2 <- combine(list(fac[1], fac[2], NA, fac[4]))
expect_equal(works2, expected_result)
# NA length > 1
expected_result <- fac[c(1, 2, 3, 3, 4)]
works3 <- combine(list(fac[1], fac[2], fac[c(3, 3)], fac[4]))
expect_equal(works3, expected_result)
works4 <- combine(list(fac[1], fac[2], c(NA, NA), fac[4]))
expect_equal(works4, expected_result)
})
test_that("combine works with NA and double (#2203)", {
# NA first
works <- combine(list(NA, 1.5, 2.5, NA, 4.5))
expect_equal(works, c(NA, 1.5, 2.5, NA, 4.5))
# NA length 1
works <- combine(list(1.5, 2.5, NA, 4.5))
expect_equal(works, c(1.5, 2.5, NA, 4.5))
# NA length > 1
works <- combine(list(1.5, 2.5, c(NA, NA), 4.5))
expect_equal(works, c(1.5, 2.5, NA, NA, 4.5))
})
test_that("combine works with NA and characters (#2203)", {
# NA first
works <- combine(list(NA, "a", "b", "c", NA, "e"))
expect_equal(works, c(NA, "a", "b", "c", NA, "e"))
# NA length 1
works <- combine(list("a", "b", "c", NA, "e"))
expect_equal(works, c("a", "b", "c", NA, "e"))
# NA length > 1
works <- combine(list("a", "b", "c", c(NA, NA), "e"))
expect_equal(works, c("a", "b", "c", NA, NA, "e"))
})
test_that("combine works with NA and POSIXct (#2203)", {
# NA first
works <- combine(list(
NA, as.POSIXct("2010-01-01"), as.POSIXct("2010-01-02"),
NA, as.POSIXct("2010-01-04")
))
expect_equal(works, c(as.POSIXct(c(
NA, "2010-01-01", "2010-01-02",
NA, "2010-01-04"
))))
# NA length 1
works <- combine(list(
as.POSIXct("2010-01-01"), as.POSIXct("2010-01-02"),
NA, as.POSIXct("2010-01-04")
))
expect_equal(works, c(as.POSIXct(c(
"2010-01-01", "2010-01-02",
NA, "2010-01-04"
))))
# NA length > 1
works <- combine(list(
as.POSIXct("2010-01-01"), as.POSIXct("2010-01-02"),
c(NA, NA), as.POSIXct("2010-01-04")
))
expect_equal(works, c(as.POSIXct(c(
"2010-01-01", "2010-01-02",
NA, NA, "2010-01-04"
))))
})
test_that("combine works with NA and Date (#2203)", {
# NA first
expected_result <- as.Date("2010-01-01") + c(NA, 1, 2, NA, 4)
expect_equal(combine(as.list(expected_result)), expected_result)
# NA length == 1
expected_result <- c(as.Date(c("2010-01-01", "2010-01-02", NA, "2010-01-04")))
works1 <- combine(list(
as.Date("2010-01-01"), as.Date("2010-01-02"),
as.Date(NA), as.Date("2010-01-04")
))
expect_equal(works1, expected_result)
works2 <- combine(list(
as.Date("2010-01-01"), as.Date("2010-01-02"),
NA, as.Date("2010-01-04")
))
expect_equal(works2, expected_result)
# NA length > 1
expected_result <- as.Date("2010-01-01") + c(0, 1, NA, NA, 3)
works1 <- combine(split(expected_result, c(1, 2, 3, 3, 4)))
expect_equal(works1, expected_result)
works2 <- combine(list(
as.Date("2010-01-01"), as.Date("2010-01-02"),
c(NA, NA),
as.Date("2010-01-04")
))
expect_equal(works2, expected_result)
})
test_that("combine works with NA and complex (#2203)", {
# NA first
expected_result <- c(NA, 1 + 2i)
works1 <- combine(list(NA, 1 + 2i))
expect_equal(works1, expected_result)
# NA length == 1
expected_result <- c(1, 2, NA, 4) + 1i
expect_equal(combine(as.list(expected_result)), expected_result)
works2 <- combine(list(1 + 1i, 2 + 1i, NA, 4 + 1i))
expect_equal(works2, expected_result)
# NA length > 1
expected_result <- c(1, 2, NA, NA, 4) + 1i
expect_equal(
combine(split(expected_result, c(1, 2, 3, 3, 4))),
expected_result
)
works3 <- combine(list(1 + 1i, 2 + 1i, c(NA, NA), 4 + 1i))
expect_equal(works3, expected_result)
})
test_that("combine works with difftime", {
expect_equal(
combine(as.difftime(1, units = "mins"), as.difftime(1, units = "hours")),
as.difftime(c(60, 3600), units = "secs")
)
expect_equal(
combine(as.difftime(1, units = "secs"), as.difftime(1, units = "secs")),
as.difftime(c(1, 1), units = "secs")
)
expect_equal(
combine(as.difftime(1, units = "days"), as.difftime(1, units = "secs")),
as.difftime(c(24 * 60 * 60, 1), units = "secs")
)
expect_equal(
combine(as.difftime(2, units = "weeks"), as.difftime(1, units = "secs")),
as.difftime(c(2 * 7 * 24 * 60 * 60, 1), units = "secs")
)
expect_equal(
combine(as.difftime(2, units = "weeks"), as.difftime(3, units = "weeks")),
as.difftime(c(2, 3), units = "weeks")
)
})
test_that("combine uses tidy dots (#3407)", {
chunks <- list(1,2,3)
expect_equal(combine(!!!chunks), c(1,2,3))
})
# Errors ------------------------------------------------------------------
test_that("combine() gives meaningful error messages", {
verify_output(test_path("test-deprec-combine-errors.txt"), {
combine("a", 1)
combine(factor("a"), 1L)
})
})
|
#' Pipe sequence data
#'
#' The function sequence of all of the packages using magrittr pipes.
#' @format a list per source file of each magrittr chain found, with character
#' vector elements of the function names in the chain.
"pipes"
|
/R/pipes.R
|
no_license
|
jimhester/predpipe
|
R
| false
| false
| 237
|
r
|
#' Pipe sequence data
#'
#' The function sequence of all of the packages using magrittr pipes.
#' @format a list per source file of each magrittr chain found, with character
#' vector elements of the function names in the chain.
"pipes"
|
\name{ifreq}
\alias{ifreq}
\title{Instantaneous frequency}
\description{
This function returns the instantaneous frequency (and/or phase) of a time wave
through the computation of the analytic signal (Hilbert transform).
}
\usage{
ifreq(wave, f, phase = FALSE, threshold = NULL,
plot = TRUE, xlab = "Time (s)", ylab = NULL,
ylim = NULL, type = "l", ...)
}
\arguments{
\item{wave}{an R object.}
\item{f}{sampling frequency of \code{wave} (in Hz). Does not need to be specified if embedded in \code{wave}.}
\item{phase}{if \code{TRUE} and \code{plot} is also \code{TRUE}
plots the instantaneous phase instead of the instantaneous frequency.}
\item{threshold}{amplitude threshold for signal detection (in \% ).}
\item{plot}{logical, if \code{TRUE} plots the instantaneous frequency or phase
against time (by default \code{TRUE}).}
\item{xlab}{title of the x axis.}
\item{ylab}{title of the y axis.}
\item{ylim}{the range of y values.}
\item{type}{if \code{plot} is \code{TRUE}, type of plot that should be drawn.
See \code{\link{plot}} for details (by default "l" for lines).}
\item{\dots}{other \code{\link{plot}} graphical parameters.}
}
\details{
The instantaneous phase is the argument of the
analytic signal obtained throught the Hilbert transform.\cr
The instantaneous phase is then unwrapped and derived against time to
get the instantaneous frequency.\cr
There may be some edge effects at both start and end of the time wave.
}
\value{If \code{plot} is \code{FALSE}, \code{ifreq}
returns a list of two components:
\item{f}{a two-column matrix, the first column corresponding to time in seconds
(\emph{x}-axis) and the second column corresponding to instantaneous
frequency in kHz (\emph{y}-axis).}
\item{p}{a two-column matrix, the first column corresponding to time in seconds
(\emph{x}-axis) and the second column corresponding to wrapped
instantaneous phase in radians (\emph{y}-axis).}
}
\references{Mbu Nyamsi, R. G., Aubin, T. & Bremond, J. C. 1994
On the extraction of some time dependent parameters of an acoustic signal
by means of the analytic signal concept.
Its application to animal sound study. \emph{Bioacoustics}, 5: 187-203.}
\author{Jerome Sueur \email{sueur@mnhn.fr}}
\note{This function is based on the analytic signal obtained with the
Hilbert transform (see \code{\link{hilbert}}).\cr
The function requires the package \pkg{signal}.\cr
The matrix describing the instantaneous phase has one more row than the
one describing the instantaneous frequency.
}
\seealso{\code{\link{hilbert}}, \code{\link{zc}}}
\examples{
# generate a sound with sine and linear frequency modulations
a<-synth(d=1, f=8000, cf=1500, fm=c(200,10,1000,0,0))
# plot on a single graphical device the instantaneous frequency and phase
op<-par(mfrow=c(2,1))
ifreq(a,f=8000,main="Instantaneous frequency")
ifreq(a,f=8000,phase=TRUE,main="Instantaneous phase")
par(op)
}
\keyword{ts}
\keyword{dplot}
|
/man/ifreq.Rd
|
no_license
|
dbs700/seewave
|
R
| false
| false
| 3,000
|
rd
|
\name{ifreq}
\alias{ifreq}
\title{Instantaneous frequency}
\description{
This function returns the instantaneous frequency (and/or phase) of a time wave
through the computation of the analytic signal (Hilbert transform).
}
\usage{
ifreq(wave, f, phase = FALSE, threshold = NULL,
plot = TRUE, xlab = "Time (s)", ylab = NULL,
ylim = NULL, type = "l", ...)
}
\arguments{
\item{wave}{an R object.}
\item{f}{sampling frequency of \code{wave} (in Hz). Does not need to be specified if embedded in \code{wave}.}
\item{phase}{if \code{TRUE} and \code{plot} is also \code{TRUE}
plots the instantaneous phase instead of the instantaneous frequency.}
\item{threshold}{amplitude threshold for signal detection (in \% ).}
\item{plot}{logical, if \code{TRUE} plots the instantaneous frequency or phase
against time (by default \code{TRUE}).}
\item{xlab}{title of the x axis.}
\item{ylab}{title of the y axis.}
\item{ylim}{the range of y values.}
\item{type}{if \code{plot} is \code{TRUE}, type of plot that should be drawn.
See \code{\link{plot}} for details (by default "l" for lines).}
\item{\dots}{other \code{\link{plot}} graphical parameters.}
}
\details{
The instantaneous phase is the argument of the
analytic signal obtained throught the Hilbert transform.\cr
The instantaneous phase is then unwrapped and derived against time to
get the instantaneous frequency.\cr
There may be some edge effects at both start and end of the time wave.
}
\value{If \code{plot} is \code{FALSE}, \code{ifreq}
returns a list of two components:
\item{f}{a two-column matrix, the first column corresponding to time in seconds
(\emph{x}-axis) and the second column corresponding to instantaneous
frequency in kHz (\emph{y}-axis).}
\item{p}{a two-column matrix, the first column corresponding to time in seconds
(\emph{x}-axis) and the second column corresponding to wrapped
instantaneous phase in radians (\emph{y}-axis).}
}
\references{Mbu Nyamsi, R. G., Aubin, T. & Bremond, J. C. 1994
On the extraction of some time dependent parameters of an acoustic signal
by means of the analytic signal concept.
Its application to animal sound study. \emph{Bioacoustics}, 5: 187-203.}
\author{Jerome Sueur \email{sueur@mnhn.fr}}
\note{This function is based on the analytic signal obtained with the
Hilbert transform (see \code{\link{hilbert}}).\cr
The function requires the package \pkg{signal}.\cr
The matrix describing the instantaneous phase has one more row than the
one describing the instantaneous frequency.
}
\seealso{\code{\link{hilbert}}, \code{\link{zc}}}
\examples{
# generate a sound with sine and linear frequency modulations
a<-synth(d=1, f=8000, cf=1500, fm=c(200,10,1000,0,0))
# plot on a single graphical device the instantaneous frequency and phase
op<-par(mfrow=c(2,1))
ifreq(a,f=8000,main="Instantaneous frequency")
ifreq(a,f=8000,phase=TRUE,main="Instantaneous phase")
par(op)
}
\keyword{ts}
\keyword{dplot}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_segments.R
\name{get_pw_segments}
\alias{get_pw_segments}
\title{Utility function to get segments (as character strings) from vector with cutpoints}
\usage{
get_pw_segments(x = NULL, cuts, right = FALSE, ordered_results = TRUE)
}
\arguments{
\item{x}{NULL value}
\item{cuts}{Vector with cut points}
\item{right}{Logical}
\item{ordered_results}{Logical}
}
\value{
\code{factor} of segments
}
\description{
Utility function to get segments (as character strings) from vector with cutpoints
}
|
/Rpackages/gemtcPlus/man/get_pw_segments.Rd
|
permissive
|
Diarmuid78/Global-HTA-Evidence-Open
|
R
| false
| true
| 575
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_segments.R
\name{get_pw_segments}
\alias{get_pw_segments}
\title{Utility function to get segments (as character strings) from vector with cutpoints}
\usage{
get_pw_segments(x = NULL, cuts, right = FALSE, ordered_results = TRUE)
}
\arguments{
\item{x}{NULL value}
\item{cuts}{Vector with cut points}
\item{right}{Logical}
\item{ordered_results}{Logical}
}
\value{
\code{factor} of segments
}
\description{
Utility function to get segments (as character strings) from vector with cutpoints
}
|
#' Prediction of Quantiles for Parametric Lifetime Distributions
#'
#' @description
#' This function predicts the quantiles of a parametric lifetime distribution
#' using the (log-)location-scale parameterization.
#'
#' @details
#' For a given set of parameters and specified probabilities the quantiles
#' of the chosen model are determined.
#'
#' @param p A numeric vector of probabilities.
#' @param dist_params A vector of parameters. An overview of the
#' distribution-specific parameters can be found in section 'Distributions'.
#' @param distribution Supposed distribution of the random variable.
#'
#' @return A vector with predicted quantiles.
#'
#' @template dist-params
#'
#' @examples
#' # Example 1 - Predicted quantiles for a two-parameter weibull distribution:
#' quants_weib2 <- predict_quantile(
#' p = c(0.01, 0.1, 0.5),
#' dist_params = c(5, 0.5),
#' distribution = "weibull"
#' )
#'
#' # Example 2 - Predicted quantiles for a three-parameter weibull distribution:
#' quants_weib3 <- predict_quantile(
#' p = c(0.01, 0.1, 0.5),
#' dist_params = c(5, 0.5, 10),
#' distribution = "weibull3"
#' )
#'
#' @md
#'
#' @export
predict_quantile <- function(p,
dist_params,
distribution = c(
"weibull", "lognormal", "loglogistic",
"sev", "normal", "logistic",
"weibull3", "lognormal3", "loglogistic3",
"exponential", "exponential2"
)
) {
distribution <- match.arg(distribution)
check_dist_params(dist_params, distribution)
n_par <- length(dist_params)
# Determine q_p by switching between distributions:
q_p <- switch(
std_parametric(distribution),
"weibull" = ,
"sev" = qsev(p) * dist_params[[2]] + dist_params[[1]],
"lognormal" = ,
"normal" = stats::qnorm(p) * dist_params[[2]] + dist_params[[1]],
"loglogistic" = ,
"logistic" = stats::qlogis(p) * dist_params[[2]] + dist_params[[1]],
"exponential" = stats::qexp(p) * dist_params[[1]]
)
if (std_parametric(distribution) %in% c("weibull", "lognormal", "loglogistic")) {
q_p <- exp(q_p)
}
# Threshold models:
if (has_thres(distribution)) {
q_p <- q_p + dist_params[[n_par]]
}
q_p
}
#' Prediction of Failure Probabilities for Parametric Lifetime Distributions
#'
#' @description
#' This function predicts the (failure) probabilities of a parametric lifetime
#' distribution using the (log-)location-scale parameterization.
#'
#' @details
#' For a given set of parameters and specified quantiles the probabilities
#' of the chosen model are determined.
#'
#' @inheritParams predict_quantile
#' @param q A numeric vector of quantiles.
#'
#' @return A vector with predicted (failure) probabilities.
#'
#' @template dist-params
#'
#' @examples
#' # Example 1 - Predicted probabilities for a two-parameter weibull distribution:
#' probs_weib2 <- predict_prob(
#' q = c(15, 48, 124),
#' dist_params = c(5, 0.5),
#' distribution = "weibull"
#' )
#'
#' # Example 2 - Predicted quantiles for a three-parameter weibull distribution:
#' probs_weib3 <- predict_prob(
#' q = c(25, 58, 134),
#' dist_params = c(5, 0.5, 10),
#' distribution = "weibull3"
#' )
#'
#' @md
#'
#' @export
predict_prob <- function(q,
dist_params,
distribution = c(
"weibull", "lognormal", "loglogistic",
"sev", "normal", "logistic",
"weibull3","lognormal3", "loglogistic3",
"exponential", "exponential2"
)
) {
distribution <- match.arg(distribution)
check_dist_params(dist_params, distribution)
# Standardize:
z <- standardize(
x = q, dist_params = dist_params, distribution = distribution
)
distribution <- std_parametric(distribution)
# Determine p_q by switching between distributions:
p_q <- p_std(z, distribution)
p_q
}
|
/R/predict.R
|
no_license
|
Tim-TU/weibulltools
|
R
| false
| false
| 4,102
|
r
|
#' Prediction of Quantiles for Parametric Lifetime Distributions
#'
#' @description
#' This function predicts the quantiles of a parametric lifetime distribution
#' using the (log-)location-scale parameterization.
#'
#' @details
#' For a given set of parameters and specified probabilities the quantiles
#' of the chosen model are determined.
#'
#' @param p A numeric vector of probabilities.
#' @param dist_params A vector of parameters. An overview of the
#' distribution-specific parameters can be found in section 'Distributions'.
#' @param distribution Supposed distribution of the random variable.
#'
#' @return A vector with predicted quantiles.
#'
#' @template dist-params
#'
#' @examples
#' # Example 1 - Predicted quantiles for a two-parameter weibull distribution:
#' quants_weib2 <- predict_quantile(
#' p = c(0.01, 0.1, 0.5),
#' dist_params = c(5, 0.5),
#' distribution = "weibull"
#' )
#'
#' # Example 2 - Predicted quantiles for a three-parameter weibull distribution:
#' quants_weib3 <- predict_quantile(
#' p = c(0.01, 0.1, 0.5),
#' dist_params = c(5, 0.5, 10),
#' distribution = "weibull3"
#' )
#'
#' @md
#'
#' @export
predict_quantile <- function(p,
dist_params,
distribution = c(
"weibull", "lognormal", "loglogistic",
"sev", "normal", "logistic",
"weibull3", "lognormal3", "loglogistic3",
"exponential", "exponential2"
)
) {
distribution <- match.arg(distribution)
check_dist_params(dist_params, distribution)
n_par <- length(dist_params)
# Determine q_p by switching between distributions:
q_p <- switch(
std_parametric(distribution),
"weibull" = ,
"sev" = qsev(p) * dist_params[[2]] + dist_params[[1]],
"lognormal" = ,
"normal" = stats::qnorm(p) * dist_params[[2]] + dist_params[[1]],
"loglogistic" = ,
"logistic" = stats::qlogis(p) * dist_params[[2]] + dist_params[[1]],
"exponential" = stats::qexp(p) * dist_params[[1]]
)
if (std_parametric(distribution) %in% c("weibull", "lognormal", "loglogistic")) {
q_p <- exp(q_p)
}
# Threshold models:
if (has_thres(distribution)) {
q_p <- q_p + dist_params[[n_par]]
}
q_p
}
#' Prediction of Failure Probabilities for Parametric Lifetime Distributions
#'
#' @description
#' This function predicts the (failure) probabilities of a parametric lifetime
#' distribution using the (log-)location-scale parameterization.
#'
#' @details
#' For a given set of parameters and specified quantiles the probabilities
#' of the chosen model are determined.
#'
#' @inheritParams predict_quantile
#' @param q A numeric vector of quantiles.
#'
#' @return A vector with predicted (failure) probabilities.
#'
#' @template dist-params
#'
#' @examples
#' # Example 1 - Predicted probabilities for a two-parameter weibull distribution:
#' probs_weib2 <- predict_prob(
#' q = c(15, 48, 124),
#' dist_params = c(5, 0.5),
#' distribution = "weibull"
#' )
#'
#' # Example 2 - Predicted quantiles for a three-parameter weibull distribution:
#' probs_weib3 <- predict_prob(
#' q = c(25, 58, 134),
#' dist_params = c(5, 0.5, 10),
#' distribution = "weibull3"
#' )
#'
#' @md
#'
#' @export
predict_prob <- function(q,
dist_params,
distribution = c(
"weibull", "lognormal", "loglogistic",
"sev", "normal", "logistic",
"weibull3","lognormal3", "loglogistic3",
"exponential", "exponential2"
)
) {
distribution <- match.arg(distribution)
check_dist_params(dist_params, distribution)
# Standardize:
z <- standardize(
x = q, dist_params = dist_params, distribution = distribution
)
distribution <- std_parametric(distribution)
# Determine p_q by switching between distributions:
p_q <- p_std(z, distribution)
p_q
}
|
testlist <- list(A = structure(c(2.46058189628247e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613112158-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 257
|
r
|
testlist <- list(A = structure(c(2.46058189628247e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
ABOUT CYNTHIA
|
/ABOUT CYNTHIA.R
|
no_license
|
cynthiacho/intro_git_live
|
R
| false
| false
| 13
|
r
|
ABOUT CYNTHIA
|
# Lab 1 exercise
install.packages("gcookbook")
install.packages("ggplot2")
plot(mtcars$wt, mtcars$mpg)
library(ggplot2)
qplot(mtcars$wt, mtcars$mpg)
qplot(wt, mpg, data=mtcars)
ggplot(mtcars, aes(x = wt, y= mpg)) + geom_point()
plot(pressure$temperature, pressure$pressure, type = "l")
points(pressure$temperature, pressure$pressure)
lines(pressure$temperature, pressure$pressure/2, col = "red")
points(pressure$temperature, pressure$pressure/2, col = "blue")
library(ggplot2)
qplot(pressure$temperature, pressure$pressure, geom="line")
qplot(temperature, pressure, data = pressure, geom = "line")
ggplot(pressure, aes(x=temperature, y=pressure)) + geom_line() + geom_point()
ggplot(pressure, aes(x=temperature, y=pressure)) + geom_line() + geom_point()
# Creating Bar graphs
barplot(BOD$demand, names.arg = BOD$Time)
table(mtcars$cyl)
barplot(table(mtcars$cyl))
qplot(mtcars$cyl)
qplot(factor(mtcars$cyl))
#Bar graph of counts
qplot(factor(cyl), data=mtcars)
ggplot(mtcars, aes(x=factor(cyl))) + geom_bar()
#Creating Histograms using ggplot
hist(mtcars$mpg)
hist(mtcars$mpg, breaks = 10)
hist(mtcars$mpg, breaks = 5)
hist(mtcars$mpg, breaks = 12)
qplot(mpg, data=mtcars, binwidth=4)
ggplot(mtcars, aes(x=mpg)) + geom_histogram(binwidth = 4)
ggplot(mtcars, aes(x=mpg)) + geom_histogram(binwidth = 5)
# Creating Box-plot
plot(ToothGrowth$supp, ToothGrowth$len)
#formula syntax
boxplot(len ~ supp, data = ToothGrowth)
boxplot(len ~ supp + dose, data = ToothGrowth)
# Two vectors
library(ggplot2)
qplot(ToothGrowth$supp, ToothGrowth$len, geom = "boxplot")
qplot(supp, len, data = ToothGrowth, geom = "boxplot")
ggplot(ToothGrowth, aes(x=supp, y=len)) + geom_boxplot()
#use three separate vectors
qplot(interaction(ToothGrowth$supp, ToothGrowth$dose), ToothGrowth$len, geom = "boxplot")
qplot(interaction(supp, dose), len, data = ToothGrowth, geom = "boxplot")
ggplot(ToothGrowth, aes(x=interaction(supp, dose), y=len)) + geom_boxplot()
# Chapter 3 R Graphics
library(gcookbook)
library(ggplot2)
BOD
str(BOD)
ggplot(BOD, aes(x=Time, y=demand)) + geom_bar(stat="identity")
# Convert Time to a discrete (categorical) variable with factor() function.
ggplot(BOD, aes(x=factor(Time), y=demand)) + geom_bar(stat = "identity")
ggplot(BOD, aes(x=factor(Time), y=demand)) + geom_bar(stat = "identity", fill="orange", colour="red")
cabbage_exp
ggplot(cabbage_exp, aes(x=Date, y=Weight, fill=Cultivar)) + geom_bar(stat="identity")
ggplot(diamonds, aes(x=cut)) + geom_bar()
data("diamonds")
diamonds
ggplot(diamonds,aes(x=carat)) + geom_bar()
ggplot(diamonds, aes(x=carat)) + geom_histogram()
# Taking Top 10 States
ups <- subset(uspopchange, rank(Change)>40)
ups
ggplot(ups, aes(x=Abb, y= Change, fill=Region)) + geom_bar(stat = "identity")
ggplot(ups, aes(x=Abb, y=Change, fill=Region)) +geom_bin2d()
ggplot(ups, aes(x=Abb, y=Change, fill=Region)) + geom_col()
# Different coloring scheme to make the bars more appealing
ggplot(ups, aes(x=reorder(Abb,Change), y=Change, fill=Region)) + geom_bar(stat = "identity", colour= "red") +
scale_fill_manual(values=c("#669933", "#FFCC66")) + xlab("US-States")
ggplot(ups, aes(x=reorder(Abb,Change), y=Change, fill=Region)) + geom_bar(stat = "identity", color = "purple") +
scale_fill_manual(values=c("#224455","#DDCC33"))
# Climate
library(gcookbook)
csub <- subset(climate, source="Berkeley" & Year >= 1900)
csub
csub$pos <- csub$Anomaly10y >=0
csub
ggplot(csub, aes(x=Year, y=Anomaly10y, fill= pos)) + geom_bar(stat = "identity", position = "identity")
ggplot(csub, aes(x=Year, y=Anomaly10y, fill=pos)) + geom_bar(stat="identity", colour="black", size=0.25) +
scale_fill_manual(values=c("#CCEEFF", "#FFDDDD"), guide=FALSE)
ggplot(pg_mean, aes(x=group, y=weight)) +geom_bar(stat="identity")
# Narrow Bars
ggplot(pg_mean, aes(x=group, y=weight)) +geom_bar(stat="identity", width = 0.5)
# Wider bars, maximum width = 1
ggplot(pg_mean, aes(x=group, y=weight)) +geom_bar(stat = "identity", width = 0.95)
# Different bar widths
ggplot(cabbage_exp, aes(x=Date, y= Weight, fill=Cultivar)) + geom_bar(stat = "identity", width = 0.5, position = "dodge")
ggplot(cabbage_exp, aes(x=Date, y=Weight, fill=Cultivar)) + geom_bar(stat = "identity", width = 0.5, position = position_dodge(0.7))
ggplot(cabbage_exp, aes(x=Date, y=Weight, fill=Cultivar)) + geom_bar(stat = "identity")
cabbage_exp
ggplot(cabbage_exp, aes(x= Date, y= Weight, fill=Cultivar)) + geom_bar(stat = "identity") + guides(fill=guide_legend(reverse = TRUE))
# Adding Lables to your Graphs
ggplot(cabbage_exp, aes(x=interaction(Date,Cultivar), y=Weight)) +geom_bar(stat = "identity") + geom_text(aes(label=Weight),vjust=1.5,colour="white")
# Adjust y limits to be a little higher
ggplot(cabbage_exp, aes(x=interaction(Date, Cultivar), y=Weight)) +
geom_bar(stat="identity") +
geom_text(aes(label=Weight), vjust=-0.2) +
ylim(0, max(cabbage_exp$Weight) * 1.05)
# Map y positions slightly above bar top - y range of plot will auto-adjust
ggplot(cabbage_exp, aes(x=interaction(Date, Cultivar), y=Weight)) +
geom_bar(stat="identity") +
geom_text(aes(y=Weight+0.1, label=Weight))
ggplot(cabbage_exp, aes(x=Date, y=Weight, fill=Cultivar)) +
geom_bar(stat="identity", position="dodge") +
geom_text(aes(label=Weight), vjust=1.5, colour="white", position=position_dodge(.9), size=3)
# make a Cleveland dot plot
#The simplest way to create a dot plot is to use geom_point() function
tophit <- tophitters2001[1:25,] # take top 25 top hitters
tophit
ggplot(tophit, aes(x=avg, y=name)) + geom_point()
tophit[,c("name","lg","avg")]
ggplot(tophit, aes(x=avg, y= reorder(name,avg))) + geom_point(size=3, colour="red") +
theme_bw() +
theme(
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour ="grey60",linetype="dashed")
)
ggplot(tophit, aes(x=avg, y=reorder(name,avg))) + geom_point(size=2.5, colour="blue") +
theme_classic() +
theme(
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour = "grey60", linetype = "twodash")
)
# Get the names sorted by lg, then by avg
nameorder <- tophit$name[order(tophit$lg, tophit$avg)]
tophit$name <- factor(tophit$name, levels = nameorder)
ggplot(tophit, aes(x=avg, y=name)) +
geom_segment(aes(yend=name), xend=0, colour="grey70")+
geom_point(size=3, aes(colour=lg)) +
scale_color_brewer(palette="Set1", limits=c("NL","AL")) +
theme_bw() +
theme(
panel.grid.major.y = element_blank(),
legend.position = c(1,0.55),
legend.justification = c(1,0.5)
)
#Generate two seperate graphs distinguished by the limit NL and AL
ggplot(tophit, aes(x=avg, y=name)) +
geom_segment(aes(yend=name), xend=0, colour="grey40") +
geom_point(size=3, aes(colour=lg)) +
scale_color_brewer(palette="Set1", limits=c("NL","AL"), guide=FALSE) +
theme_bw() +
theme(panel.grid.major.y = element_blank()) +
facet_grid(lg ~ ., scales = "free_y", space="free_y")
|
/Labs/Lab 1/lab1_part_VIZ_E.R
|
no_license
|
Dtrain27/DataAnalytics2021_Dominic_Schroeder
|
R
| false
| false
| 7,056
|
r
|
# Lab 1 exercise
install.packages("gcookbook")
install.packages("ggplot2")
plot(mtcars$wt, mtcars$mpg)
library(ggplot2)
qplot(mtcars$wt, mtcars$mpg)
qplot(wt, mpg, data=mtcars)
ggplot(mtcars, aes(x = wt, y= mpg)) + geom_point()
plot(pressure$temperature, pressure$pressure, type = "l")
points(pressure$temperature, pressure$pressure)
lines(pressure$temperature, pressure$pressure/2, col = "red")
points(pressure$temperature, pressure$pressure/2, col = "blue")
library(ggplot2)
qplot(pressure$temperature, pressure$pressure, geom="line")
qplot(temperature, pressure, data = pressure, geom = "line")
ggplot(pressure, aes(x=temperature, y=pressure)) + geom_line() + geom_point()
ggplot(pressure, aes(x=temperature, y=pressure)) + geom_line() + geom_point()
# Creating Bar graphs
barplot(BOD$demand, names.arg = BOD$Time)
table(mtcars$cyl)
barplot(table(mtcars$cyl))
qplot(mtcars$cyl)
qplot(factor(mtcars$cyl))
#Bar graph of counts
qplot(factor(cyl), data=mtcars)
ggplot(mtcars, aes(x=factor(cyl))) + geom_bar()
#Creating Histograms using ggplot
hist(mtcars$mpg)
hist(mtcars$mpg, breaks = 10)
hist(mtcars$mpg, breaks = 5)
hist(mtcars$mpg, breaks = 12)
qplot(mpg, data=mtcars, binwidth=4)
ggplot(mtcars, aes(x=mpg)) + geom_histogram(binwidth = 4)
ggplot(mtcars, aes(x=mpg)) + geom_histogram(binwidth = 5)
# Creating Box-plot
plot(ToothGrowth$supp, ToothGrowth$len)
#formula syntax
boxplot(len ~ supp, data = ToothGrowth)
boxplot(len ~ supp + dose, data = ToothGrowth)
# Two vectors
library(ggplot2)
qplot(ToothGrowth$supp, ToothGrowth$len, geom = "boxplot")
qplot(supp, len, data = ToothGrowth, geom = "boxplot")
ggplot(ToothGrowth, aes(x=supp, y=len)) + geom_boxplot()
#use three separate vectors
qplot(interaction(ToothGrowth$supp, ToothGrowth$dose), ToothGrowth$len, geom = "boxplot")
qplot(interaction(supp, dose), len, data = ToothGrowth, geom = "boxplot")
ggplot(ToothGrowth, aes(x=interaction(supp, dose), y=len)) + geom_boxplot()
# Chapter 3 R Graphics
library(gcookbook)
library(ggplot2)
BOD
str(BOD)
ggplot(BOD, aes(x=Time, y=demand)) + geom_bar(stat="identity")
# Convert Time to a discrete (categorical) variable with factor() function.
ggplot(BOD, aes(x=factor(Time), y=demand)) + geom_bar(stat = "identity")
ggplot(BOD, aes(x=factor(Time), y=demand)) + geom_bar(stat = "identity", fill="orange", colour="red")
cabbage_exp
ggplot(cabbage_exp, aes(x=Date, y=Weight, fill=Cultivar)) + geom_bar(stat="identity")
ggplot(diamonds, aes(x=cut)) + geom_bar()
data("diamonds")
diamonds
ggplot(diamonds,aes(x=carat)) + geom_bar()
ggplot(diamonds, aes(x=carat)) + geom_histogram()
# Taking Top 10 States
ups <- subset(uspopchange, rank(Change)>40)
ups
ggplot(ups, aes(x=Abb, y= Change, fill=Region)) + geom_bar(stat = "identity")
ggplot(ups, aes(x=Abb, y=Change, fill=Region)) +geom_bin2d()
ggplot(ups, aes(x=Abb, y=Change, fill=Region)) + geom_col()
# Different coloring scheme to make the bars more appealing
ggplot(ups, aes(x=reorder(Abb,Change), y=Change, fill=Region)) + geom_bar(stat = "identity", colour= "red") +
scale_fill_manual(values=c("#669933", "#FFCC66")) + xlab("US-States")
ggplot(ups, aes(x=reorder(Abb,Change), y=Change, fill=Region)) + geom_bar(stat = "identity", color = "purple") +
scale_fill_manual(values=c("#224455","#DDCC33"))
# Climate
library(gcookbook)
csub <- subset(climate, source="Berkeley" & Year >= 1900)
csub
csub$pos <- csub$Anomaly10y >=0
csub
ggplot(csub, aes(x=Year, y=Anomaly10y, fill= pos)) + geom_bar(stat = "identity", position = "identity")
ggplot(csub, aes(x=Year, y=Anomaly10y, fill=pos)) + geom_bar(stat="identity", colour="black", size=0.25) +
scale_fill_manual(values=c("#CCEEFF", "#FFDDDD"), guide=FALSE)
ggplot(pg_mean, aes(x=group, y=weight)) +geom_bar(stat="identity")
# Narrow Bars
ggplot(pg_mean, aes(x=group, y=weight)) +geom_bar(stat="identity", width = 0.5)
# Wider bars, maximum width = 1
ggplot(pg_mean, aes(x=group, y=weight)) +geom_bar(stat = "identity", width = 0.95)
# Different bar widths
ggplot(cabbage_exp, aes(x=Date, y= Weight, fill=Cultivar)) + geom_bar(stat = "identity", width = 0.5, position = "dodge")
ggplot(cabbage_exp, aes(x=Date, y=Weight, fill=Cultivar)) + geom_bar(stat = "identity", width = 0.5, position = position_dodge(0.7))
ggplot(cabbage_exp, aes(x=Date, y=Weight, fill=Cultivar)) + geom_bar(stat = "identity")
cabbage_exp
ggplot(cabbage_exp, aes(x= Date, y= Weight, fill=Cultivar)) + geom_bar(stat = "identity") + guides(fill=guide_legend(reverse = TRUE))
# Adding Lables to your Graphs
ggplot(cabbage_exp, aes(x=interaction(Date,Cultivar), y=Weight)) +geom_bar(stat = "identity") + geom_text(aes(label=Weight),vjust=1.5,colour="white")
# Adjust y limits to be a little higher
ggplot(cabbage_exp, aes(x=interaction(Date, Cultivar), y=Weight)) +
geom_bar(stat="identity") +
geom_text(aes(label=Weight), vjust=-0.2) +
ylim(0, max(cabbage_exp$Weight) * 1.05)
# Map y positions slightly above bar top - y range of plot will auto-adjust
ggplot(cabbage_exp, aes(x=interaction(Date, Cultivar), y=Weight)) +
geom_bar(stat="identity") +
geom_text(aes(y=Weight+0.1, label=Weight))
ggplot(cabbage_exp, aes(x=Date, y=Weight, fill=Cultivar)) +
geom_bar(stat="identity", position="dodge") +
geom_text(aes(label=Weight), vjust=1.5, colour="white", position=position_dodge(.9), size=3)
# make a Cleveland dot plot
#The simplest way to create a dot plot is to use geom_point() function
tophit <- tophitters2001[1:25,] # take top 25 top hitters
tophit
ggplot(tophit, aes(x=avg, y=name)) + geom_point()
tophit[,c("name","lg","avg")]
ggplot(tophit, aes(x=avg, y= reorder(name,avg))) + geom_point(size=3, colour="red") +
theme_bw() +
theme(
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour ="grey60",linetype="dashed")
)
ggplot(tophit, aes(x=avg, y=reorder(name,avg))) + geom_point(size=2.5, colour="blue") +
theme_classic() +
theme(
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour = "grey60", linetype = "twodash")
)
# Get the names sorted by lg, then by avg
nameorder <- tophit$name[order(tophit$lg, tophit$avg)]
tophit$name <- factor(tophit$name, levels = nameorder)
ggplot(tophit, aes(x=avg, y=name)) +
geom_segment(aes(yend=name), xend=0, colour="grey70")+
geom_point(size=3, aes(colour=lg)) +
scale_color_brewer(palette="Set1", limits=c("NL","AL")) +
theme_bw() +
theme(
panel.grid.major.y = element_blank(),
legend.position = c(1,0.55),
legend.justification = c(1,0.5)
)
#Generate two seperate graphs distinguished by the limit NL and AL
ggplot(tophit, aes(x=avg, y=name)) +
geom_segment(aes(yend=name), xend=0, colour="grey40") +
geom_point(size=3, aes(colour=lg)) +
scale_color_brewer(palette="Set1", limits=c("NL","AL"), guide=FALSE) +
theme_bw() +
theme(panel.grid.major.y = element_blank()) +
facet_grid(lg ~ ., scales = "free_y", space="free_y")
|
cdfGenerator <- function(data, accuracy)
{
data <- data[!is.na(data)]
accuracy <- 10^accuracy
S <- seq(0,max(data),accuracy)
cdf <- numeric(length(S))
for(i in S)
{
stat <- sum(i > data)
cdf[i] <- stat
}
cdf <- cdf[cdf!=0]
cdf <- cdf/length(S)
return(cdf)
}
|
/Functions/CDFGeneratorFunction.R
|
no_license
|
rmcdonnell/Daphnia-Project
|
R
| false
| false
| 285
|
r
|
cdfGenerator <- function(data, accuracy)
{
data <- data[!is.na(data)]
accuracy <- 10^accuracy
S <- seq(0,max(data),accuracy)
cdf <- numeric(length(S))
for(i in S)
{
stat <- sum(i > data)
cdf[i] <- stat
}
cdf <- cdf[cdf!=0]
cdf <- cdf/length(S)
return(cdf)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/column_functions.R
\name{make_column_classes}
\alias{make_column_classes}
\title{A function to provide default names for columns and ensure that
every column has a name}
\usage{
make_column_classes(n_cols, col_classes = NULL, partial_classes = TRUE)
}
\arguments{
\item{n_cols}{\code{numeric}. Number of columns in the data}
\item{col_classes}{\code{character} vector with classes to be assumed for the columns.
Currently, the following classes are allowed: \dQuote{character}, \dQuote{numeric},
\dQuote{integer}, \dQuote{logical}, \dQuote{factor}. Additionally, one may
use \dQuote{asis}, if the class of a column should not be changed. If the class
for a column is not specified explicitly, \dQuote{asis} will be assumed.}
\item{partial_classes}{\code{logical}. If \code{TRUE}, the first seven columns will receive
default classes:
+ subject - \code{factor}
+ md5_hash, controller, type, item - \code{character}
(notice that in Ibex items can be identified with character
strings, that's why item is not \code{numeric} by default),
+ presentation_order, element_number - \code{numeric},
So \code{col_names} will be taken to specify classes for columns
starting from 8. If \code{FALSE}, \code{col_classes} will be taken
as specifying classes for all columns. If there are less
than 8 columns, \code{partial classes} will always be taken
to be \code{FALSE}.}
}
\value{
vector with column classes
}
\description{
A function to provide default names for columns and ensure that
every column has a name
}
|
/man/make_column_classes.Rd
|
no_license
|
antonmalko/ibextor
|
R
| false
| true
| 1,817
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/column_functions.R
\name{make_column_classes}
\alias{make_column_classes}
\title{A function to provide default names for columns and ensure that
every column has a name}
\usage{
make_column_classes(n_cols, col_classes = NULL, partial_classes = TRUE)
}
\arguments{
\item{n_cols}{\code{numeric}. Number of columns in the data}
\item{col_classes}{\code{character} vector with classes to be assumed for the columns.
Currently, the following classes are allowed: \dQuote{character}, \dQuote{numeric},
\dQuote{integer}, \dQuote{logical}, \dQuote{factor}. Additionally, one may
use \dQuote{asis}, if the class of a column should not be changed. If the class
for a column is not specified explicitly, \dQuote{asis} will be assumed.}
\item{partial_classes}{\code{logical}. If \code{TRUE}, the first seven columns will receive
default classes:
+ subject - \code{factor}
+ md5_hash, controller, type, item - \code{character}
(notice that in Ibex items can be identified with character
strings, that's why item is not \code{numeric} by default),
+ presentation_order, element_number - \code{numeric},
So \code{col_names} will be taken to specify classes for columns
starting from 8. If \code{FALSE}, \code{col_classes} will be taken
as specifying classes for all columns. If there are less
than 8 columns, \code{partial classes} will always be taken
to be \code{FALSE}.}
}
\value{
vector with column classes
}
\description{
A function to provide default names for columns and ensure that
every column has a name
}
|
source("prep_data.R")
library(party)
library(rattle)
library(rpart.plot)
train_data_Raw <- read.csv("C:/Users/shikhagarg.CORP/Downloads/ML with BD/kaggle/titanic/train.csv")
test_data_Raw <- read.csv("C:/Users/shikhagarg.CORP/Downloads/ML with BD/kaggle/titanic/test.csv")
train_data_prep <- prepare_Data(train_data_Raw)
test_data_prep <- prepare_Data(test_data_Raw)
attach(train_data_prep)
t <- ctree(Survived ~ class + Sex + title + Sibsp + parch + agecat + ticket + fare + Cabin + Embarked,
data=train_data_prep, controls = ctree_control(teststat = "quad",
mincriterion = 0.95,
minsplit = 10,
minbucket = 5,
maxdepth = 0))
table(predict(t), train_data_prep$Survived)
sum1 <- 0
sum2 <-0
for(i in 1:length(t1)){
sum1 <- sum1 + t1[i]}
f
plot(t, type = "simple")
test_p <- predict(t, newdata= test_data_prep)
|
/titanic1.R
|
no_license
|
shikhagarg0192/Kaggle_Practice
|
R
| false
| false
| 1,028
|
r
|
source("prep_data.R")
library(party)
library(rattle)
library(rpart.plot)
train_data_Raw <- read.csv("C:/Users/shikhagarg.CORP/Downloads/ML with BD/kaggle/titanic/train.csv")
test_data_Raw <- read.csv("C:/Users/shikhagarg.CORP/Downloads/ML with BD/kaggle/titanic/test.csv")
train_data_prep <- prepare_Data(train_data_Raw)
test_data_prep <- prepare_Data(test_data_Raw)
attach(train_data_prep)
t <- ctree(Survived ~ class + Sex + title + Sibsp + parch + agecat + ticket + fare + Cabin + Embarked,
data=train_data_prep, controls = ctree_control(teststat = "quad",
mincriterion = 0.95,
minsplit = 10,
minbucket = 5,
maxdepth = 0))
table(predict(t), train_data_prep$Survived)
sum1 <- 0
sum2 <-0
for(i in 1:length(t1)){
sum1 <- sum1 + t1[i]}
f
plot(t, type = "simple")
test_p <- predict(t, newdata= test_data_prep)
|
psql_pt_config <- function(con = NULL){
if (is.null(con)){
stop("Você deve fornecer uma conexão válida")
}
q1 <- glue::glue_sql("CREATE EXTENSION unaccent",.con= con)
DBI::dbExecute(con,q1)
q2 <- glue::glue_sql("CREATE EXTENSION pg_trgm",.con = con)
DBI::dbExecute(con,q2)
q3 <- glue::glue_sql("CREATE TEXT SEARCH CONFIGURATION pt (COPY = pg_catalog.portuguese)",.con=con)
DBI::dbExecute(con,q3)
q4 <- glue::glue_sql("ALTER TEXT SEARCH CONFIGURATION pt
ALTER MAPPING FOR hword, hword_part, word with unaccent, portuguese_stem",.con = con)
DBI::dbExecute(con,q4)
q5 <- glue::glue_sql("CREATE TEXT SEARCH DICTIONARY public.portuguese_dic (
TEMPLATE = ispell,
DictFile = pt_br,
AffFile = pt_br,
stopwords = portuguese)",.con = con)
DBI::dbExecute(con,q5)
q6 <- glue::glue_sql("ALTER TEXT SEARCH CONFIGURATION pt
ALTER MAPPING FOR hword, hword_part, word
WITH public.portuguese_dic",.con = con)
DBI::dbExecute(con,q6)
}
|
/R/psql_pt_config.R
|
no_license
|
jjesusfilho/FullTextSearch
|
R
| false
| false
| 976
|
r
|
psql_pt_config <- function(con = NULL){
if (is.null(con)){
stop("Você deve fornecer uma conexão válida")
}
q1 <- glue::glue_sql("CREATE EXTENSION unaccent",.con= con)
DBI::dbExecute(con,q1)
q2 <- glue::glue_sql("CREATE EXTENSION pg_trgm",.con = con)
DBI::dbExecute(con,q2)
q3 <- glue::glue_sql("CREATE TEXT SEARCH CONFIGURATION pt (COPY = pg_catalog.portuguese)",.con=con)
DBI::dbExecute(con,q3)
q4 <- glue::glue_sql("ALTER TEXT SEARCH CONFIGURATION pt
ALTER MAPPING FOR hword, hword_part, word with unaccent, portuguese_stem",.con = con)
DBI::dbExecute(con,q4)
q5 <- glue::glue_sql("CREATE TEXT SEARCH DICTIONARY public.portuguese_dic (
TEMPLATE = ispell,
DictFile = pt_br,
AffFile = pt_br,
stopwords = portuguese)",.con = con)
DBI::dbExecute(con,q5)
q6 <- glue::glue_sql("ALTER TEXT SEARCH CONFIGURATION pt
ALTER MAPPING FOR hword, hword_part, word
WITH public.portuguese_dic",.con = con)
DBI::dbExecute(con,q6)
}
|
pacman::p_load(rstan, dplyr, data.table, broom)
source("functions/JPLP_functions.R")
N_sim = 1000
dt = sim_mul_jplp(kappa = 0.8, beta = 1.2, theta = 2, n_shift = 10)
fit = stan("stan/jplp_simple.stan",
chains = 1, iter = 3000, refresh = 0,
data = dt$stan_dt, seed = 123)
f_result = pull_use("beta|theta|kappa", fit)
# ----------- Start of simulation -----------------
# n_shift = 10
set.seed(123)
sim10 = list()
for (i in 1:N_sim) {
print(paste0("N shift = 10: ", i, " (out of 1000)"))
z = sim_mul_jplp(beta = 1.2, n_shift = 10)
tryCatch({fit0 = stan("stan/jplp_simple.stan",
chains = 1, iter = 3000, refresh = 0,
data = z$stan_dt, seed = 123
)}, error=function(e){})
sim10[[i]] = pull_use("beta|theta|kappa", fit0)
}
data.table::fwrite(data.table::rbindlist(sim10),
"fit/JPLP_sim_simple/sim10.csv")
# n_shift = 25
set.seed(123)
sim25 = list()
for (i in 1:N_sim) {
print(paste0("N shift = 25: ", i, " (out of 1000)"))
z = sim_mul_jplp(beta = 1.2, n_shift = 25)
tryCatch({fit0 = stan("stan/jplp_simple.stan",
chains = 1, iter = 3000, refresh = 0,
data = z$stan_dt, seed = 123
)}, error=function(e){})
sim25[[i]] = pull_use("beta|theta|kappa", fit0)
}
data.table::fwrite(data.table::rbindlist(sim25),
"fit/JPLP_sim_simple/sim25.csv")
# n_shift = 50
set.seed(123)
sim50 = list()
for (i in 1:N_sim) {
print(paste0("N shift = 50: ", i, " (out of 1000)"))
tryCatch({z = sim_mul_jplp(beta = 1.2, n_shift = 50)
fit0 = stan("stan/jplp_simple.stan",
chains = 1, iter = 4000, refresh = 0,
data = z$stan_dt, seed = 123
)}, error=function(e){})
sim50[[i]] = pull_use("beta|theta|kappa", fit0)
}
data.table::fwrite(data.table::rbindlist(sim50),
"fit/JPLP_sim_simple/sim50.csv")
# n_shift = 75
set.seed(123)
sim75 = list()
for (i in 1:N_sim) {
print(paste0("N shift = 75: ", i, " (out of 1000)"))
z = sim_mul_jplp(beta = 1.2, n_shift = 75)
tryCatch({fit0 = stan("stan/jplp_simple.stan",
chains = 1, iter = 4000, refresh = 0,
data = z$stan_dt, seed = 123
)}, error=function(e){})
sim75[[i]] = pull_use("beta|theta|kappa", fit0)
}
data.table::fwrite(data.table::rbindlist(sim75),
"fit/JPLP_sim_simple/sim75.csv")
# n_shift = 100
set.seed(123)
sim100 = list()
for (i in 1:N_sim) {
print(paste0("N shift = 100: ", i, " (out of 1000)"))
z = sim_mul_jplp(beta = 1.2, n_shift = 100)
tryCatch({fit0 = stan("stan/jplp_simple.stan",
chains = 1, iter = 4000, refresh = 0,
data = z$stan_dt, seed = 123
)}, error=function(e){})
sim100[[i]] = pull_use("beta|theta|kappa", fit0)
}
data.table::fwrite(data.table::rbindlist(sim100),
"fit/JPLP_sim_simple/sim100.csv")
# ------------------ Validate estimates ------------
d = fread('fit/JPLP_sim_simple/sim100.csv')
csv_files = list.files('fit/JPLP_sim_simple/')
dt_list = list()
for (i in seq_along(csv_files)) {
dt_list[[i]] = fread(paste0('fit/JPLP_sim_simple/', csv_files[i]))
dt_list[[i]]$n_shift = as.integer(gsub('sim|\\.csv', '', csv_files[i]))
}
dt = rbindlist(dt_list)
dt %>%
.[,.(N = .N,
estimate = mean(estimate),
estimate_sd = sd(estimate),
mean_sd = mean(std.error)),
.(term, n_shift)] %>%
.[order(term, n_shift)]
|
/scale_up_sim_code/JPLP.R
|
no_license
|
caimiao0714/Reliability_sim
|
R
| false
| false
| 3,496
|
r
|
pacman::p_load(rstan, dplyr, data.table, broom)
source("functions/JPLP_functions.R")
N_sim = 1000
dt = sim_mul_jplp(kappa = 0.8, beta = 1.2, theta = 2, n_shift = 10)
fit = stan("stan/jplp_simple.stan",
chains = 1, iter = 3000, refresh = 0,
data = dt$stan_dt, seed = 123)
f_result = pull_use("beta|theta|kappa", fit)
# ----------- Start of simulation -----------------
# n_shift = 10
set.seed(123)
sim10 = list()
for (i in 1:N_sim) {
print(paste0("N shift = 10: ", i, " (out of 1000)"))
z = sim_mul_jplp(beta = 1.2, n_shift = 10)
tryCatch({fit0 = stan("stan/jplp_simple.stan",
chains = 1, iter = 3000, refresh = 0,
data = z$stan_dt, seed = 123
)}, error=function(e){})
sim10[[i]] = pull_use("beta|theta|kappa", fit0)
}
data.table::fwrite(data.table::rbindlist(sim10),
"fit/JPLP_sim_simple/sim10.csv")
# n_shift = 25
set.seed(123)
sim25 = list()
for (i in 1:N_sim) {
print(paste0("N shift = 25: ", i, " (out of 1000)"))
z = sim_mul_jplp(beta = 1.2, n_shift = 25)
tryCatch({fit0 = stan("stan/jplp_simple.stan",
chains = 1, iter = 3000, refresh = 0,
data = z$stan_dt, seed = 123
)}, error=function(e){})
sim25[[i]] = pull_use("beta|theta|kappa", fit0)
}
data.table::fwrite(data.table::rbindlist(sim25),
"fit/JPLP_sim_simple/sim25.csv")
# n_shift = 50
set.seed(123)
sim50 = list()
for (i in 1:N_sim) {
print(paste0("N shift = 50: ", i, " (out of 1000)"))
tryCatch({z = sim_mul_jplp(beta = 1.2, n_shift = 50)
fit0 = stan("stan/jplp_simple.stan",
chains = 1, iter = 4000, refresh = 0,
data = z$stan_dt, seed = 123
)}, error=function(e){})
sim50[[i]] = pull_use("beta|theta|kappa", fit0)
}
data.table::fwrite(data.table::rbindlist(sim50),
"fit/JPLP_sim_simple/sim50.csv")
# n_shift = 75
set.seed(123)
sim75 = list()
for (i in 1:N_sim) {
print(paste0("N shift = 75: ", i, " (out of 1000)"))
z = sim_mul_jplp(beta = 1.2, n_shift = 75)
tryCatch({fit0 = stan("stan/jplp_simple.stan",
chains = 1, iter = 4000, refresh = 0,
data = z$stan_dt, seed = 123
)}, error=function(e){})
sim75[[i]] = pull_use("beta|theta|kappa", fit0)
}
data.table::fwrite(data.table::rbindlist(sim75),
"fit/JPLP_sim_simple/sim75.csv")
# n_shift = 100
set.seed(123)
sim100 = list()
for (i in 1:N_sim) {
print(paste0("N shift = 100: ", i, " (out of 1000)"))
z = sim_mul_jplp(beta = 1.2, n_shift = 100)
tryCatch({fit0 = stan("stan/jplp_simple.stan",
chains = 1, iter = 4000, refresh = 0,
data = z$stan_dt, seed = 123
)}, error=function(e){})
sim100[[i]] = pull_use("beta|theta|kappa", fit0)
}
data.table::fwrite(data.table::rbindlist(sim100),
"fit/JPLP_sim_simple/sim100.csv")
# ------------------ Validate estimates ------------
d = fread('fit/JPLP_sim_simple/sim100.csv')
csv_files = list.files('fit/JPLP_sim_simple/')
dt_list = list()
for (i in seq_along(csv_files)) {
dt_list[[i]] = fread(paste0('fit/JPLP_sim_simple/', csv_files[i]))
dt_list[[i]]$n_shift = as.integer(gsub('sim|\\.csv', '', csv_files[i]))
}
dt = rbindlist(dt_list)
dt %>%
.[,.(N = .N,
estimate = mean(estimate),
estimate_sd = sd(estimate),
mean_sd = mean(std.error)),
.(term, n_shift)] %>%
.[order(term, n_shift)]
|
# Set the working directory.
setwd("/Users/abhishek/Downloads/fashion")
#Import training data and test data
fashion_data_train <- read.csv("fashion_train.csv")
fashion_data_test <- read.csv("fashion_test.csv")
#Labeling each column with relevent name.
col_name <- c("label",sprintf("pixel%02d", seq(1,784))) #initialize a column name as a vector
colnames(fashion_data_train) <- col_name #Add column name for Training Data
colnames(fashion_data_test) <- col_name #Add column name for Test Data
head(fashion_data_train) #Display head of training dataset
list_by_label <- list() #Initialize a list to store all the group of
#data with similar label
#Running a for loop to create a 10 data frame to store similar label data and add a
#additional column to tag a classificatio label as 1 since this this will be our true data set
#for all the 10 sample of training dataset.
for (i in c(0:9)) {
temp <- subset(fashion_data_train, fashion_data_train$label==i)
temp["binary_label"] <- 1 #Adding label as 1 for true dataset.
list_by_label[[i+1]] <- temp
}
#Function that will create negetive sample for each group and add classification label as '0'
create.sample.for.each.label <- function(list_by_label,label,size_for_ith_sample) {
negetive_data <- data.frame() #Initialize a data frame.
for (i in c(0:9)) { #for loop to collect data from all the label
#other that the true label
if (i != label){ #if condition to check if the sample is equal to true label
temp <-as.data.frame(list_by_label[i+1]) #Initialize temp data frame to load the data of a particular label
#Create a sample based on size of our traing sample model this
#will get passed from the main program
temp<-temp[sample(nrow(as.data.frame(list_by_label[i+1])), size_for_ith_sample), ]
temp["binary_label"] <- 0 #Add zero as a negetive classifier
}
negetive_data <- rbind(negetive_data,temp) #combining data of all the iteration
}
return(negetive_data) #Return Negetive data
}
list_of_train_data=list() #Initialize a list to store train dataset
#for all the label
for (i in c(0:9)) { #for loop to create training sample for each label
size_for_ith_sample <- round((20000-nrow(list_by_label[[i+1]]))/9) # Calculate size of negetive data in our sample coming from each label
negetive_data_itr <- create.sample.for.each.label(list_by_label,i,size_for_ith_sample) #Call Function
temp <- as.data.frame(list_by_label[1+i]) #Store true data of our sample in a temporary variable
train_data_frame <- rbind(temp,negetive_data_itr) #Combining positive and negetive data to from sample.
list_of_train_data[[i+1]] <- train_data_frame[sample(1:20000), ] #Randomize the element in the sample
}
col_name_train <- names(train_list[[1]]) #Define Column names since we add a binary classification
x_var <- col_name_train[2:785] #Define Predictor variable column name
y_var <- col_name_train[786] #Define dependent Variable column name
rm(temp)
list_glm_model_fashion <- list() #Define list to store GLM model in form of list
for (i in c(0:9)) { #For loop to build the model for 10 labels
temp <- as.data.frame(list_of_train_data[i+1]) #Import training sample for a particular label to a temp var
temp <- cbind(temp[y_var],temp[x_var]) #Organizing the sample
temp <- temp[sample(nrow(temp),),]
#Call the glm function to build the model
list_glm_model_fashion[[i+1]] <- list(glm(formula = binary_label~.,
family = binomial(link = "logit"),
data=temp))
}
#Build the Prediction function for our test data.
rm(predict_obs)
df<-fashion_data_test[x_var] #Defining Test Data
actual_obs<-fashion_data_test[1] #Extract the actual observation
prob_vec <- rep(NA, 10) #Initialize a vector to store 10 probability score for each record
predict_obs <- data.frame() #Init a data frame to store all the probability score
predict_obs <- rbind(predict_obs,c(prob_vec)) #Initialize the first row
for (i in c(1:nrow(fashion_data_test))) { #For loop to traverse all the record of test dataset
for (j in c(0:9)) { #For loop to run prediction model for each data
glm_model <- as.list(list_glm_model_fashion[[j+1]]) #Load the glm model for each iteration
predict_test <- unlist(predict(glm_model,df[i,],
type='response')) #Call the prediction Function
prob_vec[j+1] <- predict_test #Vector to stor 10 prob score for a single image
print(j)
}
predict_obs[nrow(predict_obs) + 1,] = c(prob_vec) #Combining all vectors
print(i)
}
colnames(predict_obs) <- c(0:9) #Labeling column with respective label
predict_obs <- predict_obs[-c(1),]
Predicted <- vector() #Initialize Vector to store max probability for each record or row
for (i in c(1:nrow(fashion_data_test))) { #Assigning max probabilty for ith data or row
Predicted[i] <- (which.is.max(as.vector(as.numeric(predict_obs[i,]))))-1
}
actual<-actual_obs[c(1:nrow(fashion_data_test)),] #Extracting actual observation.
table_test<-table(actual,Predicted) #represent using table to find the actual vs prediction
print(table_test) #Print Confusion Matrix
accuracy<-sum(diag(table_test)) #Accuracy Calculation
print(accuracy/nrow(fashion_data_test))
New_train_sample <- fashion_data_train[1:20000,]
New_train_sample["binary_label"] <- 0 #Adding label for binary classification.
new_list_of_train_data=list()
for (i in c(0:9)) { #for loop to create training sample for each label
New_train_sample["binary_label"] <- 0
New_train_sample["binary_label"] <- as.numeric(ifelse(New_train_sample$label == i, 1, 0))
new_list_of_train_data[[i+1]] <- New_train_sample #assign it to the list
}
new_list_glm_model_fashion <- list() #Define list to store GLM model in form of list
for (i in c(0:9)) { #For loop to build the model for 10 labels
temp <- as.data.frame(new_list_of_train_data[[i+1]]) #Import training sample for a particular label to a temp var
temp <- cbind(temp[y_var],temp[x_var]) #Organizing the sample
#Call the glm function to build the model
new_list_glm_model_fashion[[i+1]] <- list(glm(formula = binary_label~.,
family = binomial(link = "logit"),
data=temp))
print(i)
}
rm(new_predict_obs)
df<-fashion_data_test[x_var] #Defining Test Data
actual_obs<-fashion_data_test[1] #Extract the actual observation
new_prob_vec <- rep(NA, 10) #Initialize a vector to store 10 probability score for each record
new_predict_obs <- data.frame() #Init a data frame to store all the probability score
new_predict_obs <- rbind(new_predict_obs,c(new_prob_vec)) #Initialize the first row
for (i in c(1:2000)) { #For loop to traverse all the record of test dataset
for (j in c(0:9)) { #For loop to run prediction model for each data
glm_model <- as.list(new_list_glm_model_fashion[[j+1]]) #Load the glm model for each iteration
predict_test <- unlist(predict(glm_model,df[i,],
type='response')) #Call the prediction Function
new_prob_vec[j+1] <- predict_test #Vector to stor 10 prob score for a single image
print(j)
}
new_predict_obs[nrow(new_predict_obs) + 1,] = c(new_prob_vec) #Combining all vectors
print(i)
}
colnames(new_predict_obs) <- c(0:9) #Labeling column with respective label
new_predict_obs <- new_predict_obs[-c(1),]
new_Predicted <- vector() #Initialize Vector to store max probability for each record or row
for (i in c(1:2000)) { #Assigning max probabilty for ith data or row
new_Predicted[i] <- (which.is.max(as.vector(as.numeric(new_predict_obs[i,]))))-1
}
actual<-actual_obs[c(1:2000),] #Extracting actual observation.
new_table_test<-table(actual,new_Predicted) #represent using table to find the actual vs prediction
print(table_test) #Print Confusion Matrix
new_accuracy<-sum(diag(new_table_test)) #Accuracy Calculation
print(accuracy/nrow(fashion_data_test))
|
/Code.R
|
no_license
|
abhishek-bose-cs/Image_Recogization_Using_Logistic-Regression_in_R
|
R
| false
| false
| 9,530
|
r
|
# Set the working directory.
setwd("/Users/abhishek/Downloads/fashion")
#Import training data and test data
fashion_data_train <- read.csv("fashion_train.csv")
fashion_data_test <- read.csv("fashion_test.csv")
#Labeling each column with relevent name.
col_name <- c("label",sprintf("pixel%02d", seq(1,784))) #initialize a column name as a vector
colnames(fashion_data_train) <- col_name #Add column name for Training Data
colnames(fashion_data_test) <- col_name #Add column name for Test Data
head(fashion_data_train) #Display head of training dataset
list_by_label <- list() #Initialize a list to store all the group of
#data with similar label
#Running a for loop to create a 10 data frame to store similar label data and add a
#additional column to tag a classificatio label as 1 since this this will be our true data set
#for all the 10 sample of training dataset.
for (i in c(0:9)) {
temp <- subset(fashion_data_train, fashion_data_train$label==i)
temp["binary_label"] <- 1 #Adding label as 1 for true dataset.
list_by_label[[i+1]] <- temp
}
#Function that will create negetive sample for each group and add classification label as '0'
create.sample.for.each.label <- function(list_by_label,label,size_for_ith_sample) {
negetive_data <- data.frame() #Initialize a data frame.
for (i in c(0:9)) { #for loop to collect data from all the label
#other that the true label
if (i != label){ #if condition to check if the sample is equal to true label
temp <-as.data.frame(list_by_label[i+1]) #Initialize temp data frame to load the data of a particular label
#Create a sample based on size of our traing sample model this
#will get passed from the main program
temp<-temp[sample(nrow(as.data.frame(list_by_label[i+1])), size_for_ith_sample), ]
temp["binary_label"] <- 0 #Add zero as a negetive classifier
}
negetive_data <- rbind(negetive_data,temp) #combining data of all the iteration
}
return(negetive_data) #Return Negetive data
}
list_of_train_data=list() #Initialize a list to store train dataset
#for all the label
for (i in c(0:9)) { #for loop to create training sample for each label
size_for_ith_sample <- round((20000-nrow(list_by_label[[i+1]]))/9) # Calculate size of negetive data in our sample coming from each label
negetive_data_itr <- create.sample.for.each.label(list_by_label,i,size_for_ith_sample) #Call Function
temp <- as.data.frame(list_by_label[1+i]) #Store true data of our sample in a temporary variable
train_data_frame <- rbind(temp,negetive_data_itr) #Combining positive and negetive data to from sample.
list_of_train_data[[i+1]] <- train_data_frame[sample(1:20000), ] #Randomize the element in the sample
}
col_name_train <- names(train_list[[1]]) #Define Column names since we add a binary classification
x_var <- col_name_train[2:785] #Define Predictor variable column name
y_var <- col_name_train[786] #Define dependent Variable column name
rm(temp)
list_glm_model_fashion <- list() #Define list to store GLM model in form of list
for (i in c(0:9)) { #For loop to build the model for 10 labels
temp <- as.data.frame(list_of_train_data[i+1]) #Import training sample for a particular label to a temp var
temp <- cbind(temp[y_var],temp[x_var]) #Organizing the sample
temp <- temp[sample(nrow(temp),),]
#Call the glm function to build the model
list_glm_model_fashion[[i+1]] <- list(glm(formula = binary_label~.,
family = binomial(link = "logit"),
data=temp))
}
#Build the Prediction function for our test data.
rm(predict_obs)
df<-fashion_data_test[x_var] #Defining Test Data
actual_obs<-fashion_data_test[1] #Extract the actual observation
prob_vec <- rep(NA, 10) #Initialize a vector to store 10 probability score for each record
predict_obs <- data.frame() #Init a data frame to store all the probability score
predict_obs <- rbind(predict_obs,c(prob_vec)) #Initialize the first row
for (i in c(1:nrow(fashion_data_test))) { #For loop to traverse all the record of test dataset
for (j in c(0:9)) { #For loop to run prediction model for each data
glm_model <- as.list(list_glm_model_fashion[[j+1]]) #Load the glm model for each iteration
predict_test <- unlist(predict(glm_model,df[i,],
type='response')) #Call the prediction Function
prob_vec[j+1] <- predict_test #Vector to stor 10 prob score for a single image
print(j)
}
predict_obs[nrow(predict_obs) + 1,] = c(prob_vec) #Combining all vectors
print(i)
}
colnames(predict_obs) <- c(0:9) #Labeling column with respective label
predict_obs <- predict_obs[-c(1),]
Predicted <- vector() #Initialize Vector to store max probability for each record or row
for (i in c(1:nrow(fashion_data_test))) { #Assigning max probabilty for ith data or row
Predicted[i] <- (which.is.max(as.vector(as.numeric(predict_obs[i,]))))-1
}
actual<-actual_obs[c(1:nrow(fashion_data_test)),] #Extracting actual observation.
table_test<-table(actual,Predicted) #represent using table to find the actual vs prediction
print(table_test) #Print Confusion Matrix
accuracy<-sum(diag(table_test)) #Accuracy Calculation
print(accuracy/nrow(fashion_data_test))
New_train_sample <- fashion_data_train[1:20000,]
New_train_sample["binary_label"] <- 0 #Adding label for binary classification.
new_list_of_train_data=list()
for (i in c(0:9)) { #for loop to create training sample for each label
New_train_sample["binary_label"] <- 0
New_train_sample["binary_label"] <- as.numeric(ifelse(New_train_sample$label == i, 1, 0))
new_list_of_train_data[[i+1]] <- New_train_sample #assign it to the list
}
new_list_glm_model_fashion <- list() #Define list to store GLM model in form of list
for (i in c(0:9)) { #For loop to build the model for 10 labels
temp <- as.data.frame(new_list_of_train_data[[i+1]]) #Import training sample for a particular label to a temp var
temp <- cbind(temp[y_var],temp[x_var]) #Organizing the sample
#Call the glm function to build the model
new_list_glm_model_fashion[[i+1]] <- list(glm(formula = binary_label~.,
family = binomial(link = "logit"),
data=temp))
print(i)
}
rm(new_predict_obs)
df<-fashion_data_test[x_var] #Defining Test Data
actual_obs<-fashion_data_test[1] #Extract the actual observation
new_prob_vec <- rep(NA, 10) #Initialize a vector to store 10 probability score for each record
new_predict_obs <- data.frame() #Init a data frame to store all the probability score
new_predict_obs <- rbind(new_predict_obs,c(new_prob_vec)) #Initialize the first row
for (i in c(1:2000)) { #For loop to traverse all the record of test dataset
for (j in c(0:9)) { #For loop to run prediction model for each data
glm_model <- as.list(new_list_glm_model_fashion[[j+1]]) #Load the glm model for each iteration
predict_test <- unlist(predict(glm_model,df[i,],
type='response')) #Call the prediction Function
new_prob_vec[j+1] <- predict_test #Vector to stor 10 prob score for a single image
print(j)
}
new_predict_obs[nrow(new_predict_obs) + 1,] = c(new_prob_vec) #Combining all vectors
print(i)
}
colnames(new_predict_obs) <- c(0:9) #Labeling column with respective label
new_predict_obs <- new_predict_obs[-c(1),]
new_Predicted <- vector() #Initialize Vector to store max probability for each record or row
for (i in c(1:2000)) { #Assigning max probabilty for ith data or row
new_Predicted[i] <- (which.is.max(as.vector(as.numeric(new_predict_obs[i,]))))-1
}
actual<-actual_obs[c(1:2000),] #Extracting actual observation.
new_table_test<-table(actual,new_Predicted) #represent using table to find the actual vs prediction
print(table_test) #Print Confusion Matrix
new_accuracy<-sum(diag(new_table_test)) #Accuracy Calculation
print(accuracy/nrow(fashion_data_test))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test.R
\name{test-links}
\alias{test-links}
\title{Test case: links}
\description{
\if{html}{\out{<div class="r">}}\preformatted{magrittr::subtract(10, 1)
}\if{html}{\out{</div>}}\preformatted{## [1] 9
}
}
\examples{
magrittr::subtract(10, 1)
library(magrittr, warn.conflicts = FALSE)
subtract(10, 1)
}
\seealso{
Other tests:
\code{\link{index}},
\code{\link{test-crayon}},
\code{\link{test-dont}},
\code{\link{test-figures}},
\code{\link{test-lists}},
\code{\link{test-output-styles}},
\code{\link{test-params}},
\code{\link{test-sexpr-title}},
\code{\link{test-verbatim}}
}
\concept{tests}
\keyword{internal}
|
/man/test-links.Rd
|
permissive
|
fangzhou-xie/pkgdown
|
R
| false
| true
| 691
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test.R
\name{test-links}
\alias{test-links}
\title{Test case: links}
\description{
\if{html}{\out{<div class="r">}}\preformatted{magrittr::subtract(10, 1)
}\if{html}{\out{</div>}}\preformatted{## [1] 9
}
}
\examples{
magrittr::subtract(10, 1)
library(magrittr, warn.conflicts = FALSE)
subtract(10, 1)
}
\seealso{
Other tests:
\code{\link{index}},
\code{\link{test-crayon}},
\code{\link{test-dont}},
\code{\link{test-figures}},
\code{\link{test-lists}},
\code{\link{test-output-styles}},
\code{\link{test-params}},
\code{\link{test-sexpr-title}},
\code{\link{test-verbatim}}
}
\concept{tests}
\keyword{internal}
|
# READ AND FORMAT DATA ------------------------------------------------------------------------------------------------
## yooo heres the path: my.peptide.data <- read_maxquant("~/Box/CellBio-GoldfarbLab/Users/Ria Jasuja/modificationSpecificPeptides.txt", "TMT10-K", "TMT10-Nterm", c("Acetyl (Protein N-term)"), "Phospho (STY)")
# INPUT: path to MaxQuant's evidence.txt file and modification names
#
# OUTPUT: internal peptide-level formatted data
#
# Reads MaxQuant's "Evidence.txt" file and converts it into our internal QC format.
#
# Returns a tibble with columns for the number of possible and observed TMT labels.
#
# If TMT.N.mod and TMT.K.mod are NA, then assume that this was not searched with variable TMT mods.
# The values of the internal QC columns for those label shouldalso be NA.
#
# If only one of the TMT.N and TMT.K columns are NA, then throw a warning.
# The values of the internal QC column for the NA label should also be NA.
#
# Removes Reverses and Potential contaminants
#
read_maxquant_mod_specific_peptides <- function(path,
TMT_K_mod = "TMT10 (K)",
TMT_N_mod = "TMT10 (N-term)",
N_term_blocking_mods = c("Acetyl (Protein N-term)"),
K_blocking_mods = c(),
phospho_mod = "Phospho (STY)")
{
data <- read_tsv(path)
#select the columns that contain "Experiment " (experiment and a space)
experiments <- grep("Experiment ", colnames(data), ignore.case = FALSE, perl = FALSE, fixed = FALSE, useBytes = FALSE, value = TRUE)
#organize table a bit to make it easier to work with
filtered.data <- select(data, "Sequence","Modifications", "Missed cleavages", TMT_N_mod, TMT_K_mod, N_term_blocking_mods, phospho_mod, experiments)
filtered.data$Total_Nterm_Mods <- rowSums(select(data, N_term_blocking_mods))
filtered.data <- filtered.data %>% rename("TMT10-K" = `TMT_K_mod`, "TMT10-Nterm" = `TMT_N_mod`, "N-term Modifications" = `Total_Nterm_Mods`, "Phospho (STY)" = `phospho_mod`)
#compute expected tags (amount of K and N-term) and observed tags (the number of hits in the TMT columns) for each row and add them to their own columns
filtered.data$expected_lysine <- str_count(filtered.data$Sequence, "K")
filtered.data$detected_lysine <- filtered.data$"TMT10-K"
filtered.data$expected_nterm <- str_count(filtered.data$"N-term Modifications", "0")
filtered.data$detected_nterm <- filtered.data$"TMT10-Nterm"
# calculate total expected.tags and detected.tags for each column either this way or by adding the values of the columns calculated above
filtered.data$expected_tags <- str_count(filtered.data$Sequence, "K") + str_count(filtered.data$"N-term Modifications", "0")
filtered.data$detected_tags <- filtered.data$"TMT10-K" + filtered.data$"TMT10-Nterm"
#create a column that quantifies the efficiency of the labels
#if the expected = detected, it is fully labelled,
#if expected > detected, its partially labelled
#if expected = 0, its completely unlabeled
filtered.data$labeling_efficiency <- filtered.data$expected_tags - filtered.data$detected_tags
filtered.data$labeling_efficiency[filtered.data$expected_tags - filtered.data$detected_tags == 0 & filtered.data$expected_tags > 0] <- "Fully Labeled"
filtered.data$labeling_efficiency[filtered.data$expected_tags - filtered.data$detected_tags > 0] <- "Partially Labeled"
filtered.data$labeling_efficiency[filtered.data$expected_tags > 0 & filtered.data$detected_tags == 0] <- "Unlabeled"
filtered.data$labeling_efficiency[filtered.data$expected_tags == 0] <- "No Sites Available"
filtered.data$labeling_efficiency[filtered.data$detected_tags - filtered.data$expected_tags > 0] <- "Overlabeled"
#if there are more detected than expected, throw a warning to user
for (row in 1:nrow(filtered.data))
{
#put a warning for "overlabeling" - not sure about this
if(filtered.data[row, "detected_tags"] - filtered.data[row, "expected_tags"] > 0)
{
warning('This row has more detected tags than expected tags (overlabeled)')
}
}
return(filtered.data)
}
#COUNT HOW MANY PHOSPHO SITES THERE ARE PER PEPTIDE
#data <- count_maxquant_phospho_sites("~/Box/CellBio-GoldfarbLab/Users/Ria Jasuja/modificationSpecificPeptides.txt", "Phospho (STY)")
count_maxquant_phospho_sites <- function(path,
phospho_mod = "Phospho (STY)")
{
data <- read_tsv(path)
filtered.data <- select(data, "Sequence", "Modifications", "Phospho (STY)")
}
#COUNT HOW MANY STYS THERE ARE OVER 75% CONFIDENCE
# my.phospho.data <- read_maxquant_phospho_sites("~/Box/CellBio-GoldfarbLab/Users/Ria Jasuja/Phospho (STY)Sites.txt", "Localization prob", "Amino acid")
read_maxquant_phospho_sites <- function(path,
localization_prob = "Localization prob",
amino_acid = "Amino acid")
{
data <- read_tsv(path)
fractions <- grep("Localization prob ", colnames(data), ignore.case = FALSE, perl = FALSE, fixed = FALSE, useBytes = FALSE, value = TRUE)
filtered.data <- select(data,"Proteins", localization_prob, amino_acid, fractions)
filtered.data <- filtered.data %>% rename("Localization prob" = `localization_prob`, "Amino acid" = `amino_acid`)
}
#
format_spectrum_mill <- function(path,
TMT_N_mod,
TMT_K_mod,
N_term_blocking_mods,
K_blocking_mods,
phospho_mod)
{
}
#
format_mz_tab <- function(path,
TMT_N_mod,
TMT_K_mod,
N_term_blocking_mods,
K_blocking_mods,
phospho_mod)
{
}
#
read_qc <- function(path)
{
}
|
/R/FormatData.R
|
no_license
|
GoldfarbLab/CPTACQC
|
R
| false
| false
| 5,894
|
r
|
# READ AND FORMAT DATA ------------------------------------------------------------------------------------------------
## yooo heres the path: my.peptide.data <- read_maxquant("~/Box/CellBio-GoldfarbLab/Users/Ria Jasuja/modificationSpecificPeptides.txt", "TMT10-K", "TMT10-Nterm", c("Acetyl (Protein N-term)"), "Phospho (STY)")
# INPUT: path to MaxQuant's evidence.txt file and modification names
#
# OUTPUT: internal peptide-level formatted data
#
# Reads MaxQuant's "Evidence.txt" file and converts it into our internal QC format.
#
# Returns a tibble with columns for the number of possible and observed TMT labels.
#
# If TMT.N.mod and TMT.K.mod are NA, then assume that this was not searched with variable TMT mods.
# The values of the internal QC columns for those label shouldalso be NA.
#
# If only one of the TMT.N and TMT.K columns are NA, then throw a warning.
# The values of the internal QC column for the NA label should also be NA.
#
# Removes Reverses and Potential contaminants
#
read_maxquant_mod_specific_peptides <- function(path,
TMT_K_mod = "TMT10 (K)",
TMT_N_mod = "TMT10 (N-term)",
N_term_blocking_mods = c("Acetyl (Protein N-term)"),
K_blocking_mods = c(),
phospho_mod = "Phospho (STY)")
{
data <- read_tsv(path)
#select the columns that contain "Experiment " (experiment and a space)
experiments <- grep("Experiment ", colnames(data), ignore.case = FALSE, perl = FALSE, fixed = FALSE, useBytes = FALSE, value = TRUE)
#organize table a bit to make it easier to work with
filtered.data <- select(data, "Sequence","Modifications", "Missed cleavages", TMT_N_mod, TMT_K_mod, N_term_blocking_mods, phospho_mod, experiments)
filtered.data$Total_Nterm_Mods <- rowSums(select(data, N_term_blocking_mods))
filtered.data <- filtered.data %>% rename("TMT10-K" = `TMT_K_mod`, "TMT10-Nterm" = `TMT_N_mod`, "N-term Modifications" = `Total_Nterm_Mods`, "Phospho (STY)" = `phospho_mod`)
#compute expected tags (amount of K and N-term) and observed tags (the number of hits in the TMT columns) for each row and add them to their own columns
filtered.data$expected_lysine <- str_count(filtered.data$Sequence, "K")
filtered.data$detected_lysine <- filtered.data$"TMT10-K"
filtered.data$expected_nterm <- str_count(filtered.data$"N-term Modifications", "0")
filtered.data$detected_nterm <- filtered.data$"TMT10-Nterm"
# calculate total expected.tags and detected.tags for each column either this way or by adding the values of the columns calculated above
filtered.data$expected_tags <- str_count(filtered.data$Sequence, "K") + str_count(filtered.data$"N-term Modifications", "0")
filtered.data$detected_tags <- filtered.data$"TMT10-K" + filtered.data$"TMT10-Nterm"
#create a column that quantifies the efficiency of the labels
#if the expected = detected, it is fully labelled,
#if expected > detected, its partially labelled
#if expected = 0, its completely unlabeled
filtered.data$labeling_efficiency <- filtered.data$expected_tags - filtered.data$detected_tags
filtered.data$labeling_efficiency[filtered.data$expected_tags - filtered.data$detected_tags == 0 & filtered.data$expected_tags > 0] <- "Fully Labeled"
filtered.data$labeling_efficiency[filtered.data$expected_tags - filtered.data$detected_tags > 0] <- "Partially Labeled"
filtered.data$labeling_efficiency[filtered.data$expected_tags > 0 & filtered.data$detected_tags == 0] <- "Unlabeled"
filtered.data$labeling_efficiency[filtered.data$expected_tags == 0] <- "No Sites Available"
filtered.data$labeling_efficiency[filtered.data$detected_tags - filtered.data$expected_tags > 0] <- "Overlabeled"
#if there are more detected than expected, throw a warning to user
for (row in 1:nrow(filtered.data))
{
#put a warning for "overlabeling" - not sure about this
if(filtered.data[row, "detected_tags"] - filtered.data[row, "expected_tags"] > 0)
{
warning('This row has more detected tags than expected tags (overlabeled)')
}
}
return(filtered.data)
}
#COUNT HOW MANY PHOSPHO SITES THERE ARE PER PEPTIDE
#data <- count_maxquant_phospho_sites("~/Box/CellBio-GoldfarbLab/Users/Ria Jasuja/modificationSpecificPeptides.txt", "Phospho (STY)")
count_maxquant_phospho_sites <- function(path,
phospho_mod = "Phospho (STY)")
{
data <- read_tsv(path)
filtered.data <- select(data, "Sequence", "Modifications", "Phospho (STY)")
}
#COUNT HOW MANY STYS THERE ARE OVER 75% CONFIDENCE
# my.phospho.data <- read_maxquant_phospho_sites("~/Box/CellBio-GoldfarbLab/Users/Ria Jasuja/Phospho (STY)Sites.txt", "Localization prob", "Amino acid")
read_maxquant_phospho_sites <- function(path,
localization_prob = "Localization prob",
amino_acid = "Amino acid")
{
data <- read_tsv(path)
fractions <- grep("Localization prob ", colnames(data), ignore.case = FALSE, perl = FALSE, fixed = FALSE, useBytes = FALSE, value = TRUE)
filtered.data <- select(data,"Proteins", localization_prob, amino_acid, fractions)
filtered.data <- filtered.data %>% rename("Localization prob" = `localization_prob`, "Amino acid" = `amino_acid`)
}
#
format_spectrum_mill <- function(path,
TMT_N_mod,
TMT_K_mod,
N_term_blocking_mods,
K_blocking_mods,
phospho_mod)
{
}
#
format_mz_tab <- function(path,
TMT_N_mod,
TMT_K_mod,
N_term_blocking_mods,
K_blocking_mods,
phospho_mod)
{
}
#
read_qc <- function(path)
{
}
|
PCA_PropVar <- function(pca, numb_components = NULL, barplot = FALSE, main_plot = NULL) {
#input: - pca: object of class PCA, created by the PCA function
# - numb_components: number for principal components for which the proportional variance should be computed
# - barplot: boolean, if TRUE a barplot of the proportional variance is plotted
# - main_plot: main title of the barplot.
#output: A list containing
# - var_proportion: vector containing the proportional variances
if ( isFALSE(is(pca, 'PCA')) ){
stop('The input `pca` must be an object which is an output from the PCA function.')
}
if (is.null(numb_components)) {
numb_components = length(pca$eigenval)
}
var_proportion <- pca$eigenval[1:numb_components]/sum(pca$eigenval[1:numb_components])
if (isTRUE(barplot)) {
if (is.null(main_plot)) {
main_plot = 'Proportion of the Variance that is captured by Z_m'
}
barplot(var_proportion, main = main_plot, xlab = 'Z_m', ylab = 'Proportion',
ylim = c(0, max(var_proportion*1.2, 1) ), col = 'steelblue' )
}
return(var_proportion)
}
|
/R/PCA_PropVar.R
|
no_license
|
manuhuth/PCR-Parameter-Variance-Analysis
|
R
| false
| false
| 1,169
|
r
|
PCA_PropVar <- function(pca, numb_components = NULL, barplot = FALSE, main_plot = NULL) {
#input: - pca: object of class PCA, created by the PCA function
# - numb_components: number for principal components for which the proportional variance should be computed
# - barplot: boolean, if TRUE a barplot of the proportional variance is plotted
# - main_plot: main title of the barplot.
#output: A list containing
# - var_proportion: vector containing the proportional variances
if ( isFALSE(is(pca, 'PCA')) ){
stop('The input `pca` must be an object which is an output from the PCA function.')
}
if (is.null(numb_components)) {
numb_components = length(pca$eigenval)
}
var_proportion <- pca$eigenval[1:numb_components]/sum(pca$eigenval[1:numb_components])
if (isTRUE(barplot)) {
if (is.null(main_plot)) {
main_plot = 'Proportion of the Variance that is captured by Z_m'
}
barplot(var_proportion, main = main_plot, xlab = 'Z_m', ylab = 'Proportion',
ylim = c(0, max(var_proportion*1.2, 1) ), col = 'steelblue' )
}
return(var_proportion)
}
|
# Pacotes ------------------------------------------------------------------
library(ggplot2)
library(tidymodels)
library(ISLR2)
# Dados -------------------------------------------------------------------
data("Hitters")
#Hitters <- na.omit(Hitters)
# base treino e teste -----------------------------------------------------
set.seed(123)
hitters_initial_split <- Hitters %>% initial_split(3/4)
hitters_train <- training(hitters_initial_split)
hitters_test <- testing(hitters_initial_split)
# Dataprep ----------------------------------------------------------------
hitters_recipe <- recipe(Salary ~ ., data = hitters_train) %>%
step_naomit(everything(), skip = TRUE) %>%
step_rm(all_nominal()) %>%
step_normalize(all_numeric_predictors())
# dar uma olhada no resultado da receita.
hitters_recipe %>%
prep() %>%
bake(new_data = hitters_train) %>%
glimpse()
# definicao do modelo -----------------------------------------------------
# OBS: repare que agora colocamos "tune()" nos hiperparâmetros para os quais
# queremos encontrar o melhor valor.
hitters_model <- linear_reg(
penalty = tune()
) %>%
set_engine("glmnet") %>%
set_mode("regression")
# Criando o workflow ------------------------------------------------------
hitters_wflow <- workflow() %>%
add_recipe(hitters_recipe) %>%
add_model(hitters_model)
# tunagem de hiperparametros ----------------------------------------------
# reamostragem com cross-validation ---------------------------------------
hitters_resamples <- vfold_cv(hitters_train, v = 10)
hitters_grid <- grid_regular(
penalty(c(-1, 2)),
levels = 10
)
hitters_tune_grid <- tune_grid(
hitters_wflow,
resamples = hitters_resamples,
grid = hitters_grid,
metrics = metric_set(rmse, rsq),
control = control_grid(verbose = TRUE, allow_par = FALSE)
)
# inspecao da tunagem -----------------------------------------------------
autoplot(hitters_tune_grid)
collect_metrics(hitters_tune_grid)
show_best(hitters_tune_grid)
# seleciona o melhor conjunto de hiperparametros
hitters_best_hiperparams <- select_best(hitters_tune_grid, "rmse")
hitters_wflow <- hitters_wflow %>% finalize_workflow(hitters_best_hiperparams)
# desempenho do modelo final ----------------------------------------------
hitters_last_fit <- hitters_wflow %>% last_fit(split = hitters_initial_split)
collect_metrics(hitters_last_fit)
collect_predictions(hitters_last_fit) %>%
ggplot(aes(.pred, Salary)) +
geom_point()
# modelo final ------------------------------------------------------------
hitters_final_model <- hitters_wflow %>% fit(data = Hitters)
# predicoes ---------------------------------------------------------------
hitters_com_previsao <- Hitters %>%
mutate(
salary_pred = predict(hitters_final_model, new_data = .)$.pred
)
predict(hitters_final_model, new_data = Hitters)
# guardar o modelo para usar depois ---------------------------------------
saveRDS(hitters_final_model, file = "hitters_final_model.rds")
|
/exemplos/03-cross-validation.R
|
no_license
|
curso-r/202108-intro-ml
|
R
| false
| false
| 2,994
|
r
|
# Pacotes ------------------------------------------------------------------
library(ggplot2)
library(tidymodels)
library(ISLR2)
# Dados -------------------------------------------------------------------
data("Hitters")
#Hitters <- na.omit(Hitters)
# base treino e teste -----------------------------------------------------
set.seed(123)
hitters_initial_split <- Hitters %>% initial_split(3/4)
hitters_train <- training(hitters_initial_split)
hitters_test <- testing(hitters_initial_split)
# Dataprep ----------------------------------------------------------------
hitters_recipe <- recipe(Salary ~ ., data = hitters_train) %>%
step_naomit(everything(), skip = TRUE) %>%
step_rm(all_nominal()) %>%
step_normalize(all_numeric_predictors())
# dar uma olhada no resultado da receita.
hitters_recipe %>%
prep() %>%
bake(new_data = hitters_train) %>%
glimpse()
# definicao do modelo -----------------------------------------------------
# OBS: repare que agora colocamos "tune()" nos hiperparâmetros para os quais
# queremos encontrar o melhor valor.
hitters_model <- linear_reg(
penalty = tune()
) %>%
set_engine("glmnet") %>%
set_mode("regression")
# Criando o workflow ------------------------------------------------------
hitters_wflow <- workflow() %>%
add_recipe(hitters_recipe) %>%
add_model(hitters_model)
# tunagem de hiperparametros ----------------------------------------------
# reamostragem com cross-validation ---------------------------------------
hitters_resamples <- vfold_cv(hitters_train, v = 10)
hitters_grid <- grid_regular(
penalty(c(-1, 2)),
levels = 10
)
hitters_tune_grid <- tune_grid(
hitters_wflow,
resamples = hitters_resamples,
grid = hitters_grid,
metrics = metric_set(rmse, rsq),
control = control_grid(verbose = TRUE, allow_par = FALSE)
)
# inspecao da tunagem -----------------------------------------------------
autoplot(hitters_tune_grid)
collect_metrics(hitters_tune_grid)
show_best(hitters_tune_grid)
# seleciona o melhor conjunto de hiperparametros
hitters_best_hiperparams <- select_best(hitters_tune_grid, "rmse")
hitters_wflow <- hitters_wflow %>% finalize_workflow(hitters_best_hiperparams)
# desempenho do modelo final ----------------------------------------------
hitters_last_fit <- hitters_wflow %>% last_fit(split = hitters_initial_split)
collect_metrics(hitters_last_fit)
collect_predictions(hitters_last_fit) %>%
ggplot(aes(.pred, Salary)) +
geom_point()
# modelo final ------------------------------------------------------------
hitters_final_model <- hitters_wflow %>% fit(data = Hitters)
# predicoes ---------------------------------------------------------------
hitters_com_previsao <- Hitters %>%
mutate(
salary_pred = predict(hitters_final_model, new_data = .)$.pred
)
predict(hitters_final_model, new_data = Hitters)
# guardar o modelo para usar depois ---------------------------------------
saveRDS(hitters_final_model, file = "hitters_final_model.rds")
|
## SPATIAL
library(sp)
library(rgeos)
library(raster)
library(rgdal)
library(maptools)
## DATA MANAGEMENT
library(tidyverse)
library(skimr)
library(patchwork)
library(readxl)
# library(zoo)
library(pryr)
## PLOTTING
library(scales)
library(units)
library(viridis)
library(extrafont)
library(gtable)
library(grid)
library(rasterVis)
library(RColorBrewer)
library(ComplexHeatmap)
#----------------------------------------------------------------------------
########################################
## FUNCTIONS
########################################
##-------------
## read in exceedance/redcution rasters
##-------------
# Create an empty raster for when one is not processed via Python
# s832_ba <- raster("rasters/s832.tif")
# empty_raster <- raster(vals = NA,
# nrows = nrow(s832_ba),
# ncols = ncol(s832_ba),
# ext = extent(s832_ba),
# crs = proj4string(s832_ba))
# saveRDS(empty_raster, "rasters/empty_raster.RDS")
# function to check if file exists and read in file; else is empty raster
read_raster <- function(FILE) {
if(file.exists(FILE) == TRUE){
raster(FILE)
} else {
# readRDS("rasters/empty_raster_exceedance.RDS")
readRDS("rasters/empty_raster.RDS")
}
}
##-------------
## plotting function for reduction/exceedance rasters
##-------------
red_exc_plot <- function(RASTER, TITLE, COLS, BREAKS) {
# forest ownership
# plot(forown,
#
# # total pixels to plot
# # maxpixels = 1e5,
#
# # turn off plot features
# axes = FALSE,
# box = FALSE,
# legend = FALSE,
#
# # colors
# col = c("grey70", "transparent"))
#
# plot reduction/exceedance raster
plot(RASTER,
# total pixels to plot
# maxpixels = ncell(RASTER),
# turn off plot features
axes = FALSE,
box = FALSE,
legend = FALSE,
# colors and bins
col = COLS,
breaks = BREAKS)
# plot states
plot(states_sh,
add = TRUE)
# add panel title
title(TITLE, line = -1.2, cex = 0.8)
}
#----------------------------------------------------------------------------
########################################
## load gis and raster data
########################################
#forown
forown <- raster("gis/forown_binary_crop.tif")
new_crs <- proj4string(forown)
# base states map
states_sh <- readOGR("gis/states")
states_sh <- spTransform(states_sh, new_crs)
# species codes and data
sp_dat <- read_csv("data/spp_codes.csv")
colnames(sp_dat)[4] <- "spp_code"
sp_dat <- sp_dat %>%
mutate(spp_code = paste("s", spp_code, sep = ""))
#----------------------------------------------------------------------------
########################################
## PLOTTING
########################################
##-------------
## basal area and proportional basal area
##-------------
plot_ba_propba <- function(SP) {
# rasters
sp_ba <- raster(paste("rasters/", SP, ".tif", sep = ""))
sp_prop <- raster(paste("rasters/", SP, "_proportion.tif", sep = ""))
# # little range
# little <- readOGR(paste("gis/little_horn_sh/", SP, ".shp", sep = ""))
# little <- spTransform(little, new_crs)
# plot pars
ba_cols <- rev(viridis(256))
prop_ba_cols <- c("khaki2", rev(inferno(6))[2:6])
# species title
sp_latin <- with(sp_dat, paste(GENUS[spp_code == SP],
SPECIES[spp_code == SP],
sep = " "))
sp_common <- with(sp_dat, COMMON_NAME[spp_code == SP])
# prop_ba breaks and labels
prop_breaks <- c(0,0.05,0.10,0.20,0.40, 0.60, 1)
prop_labels <- c("<5", "5-10", "10-20", "20-40", "40-60", ">60")
# multipanel plot
pdf(file = paste("figures_md/ba/", SP, "_ba_propba.pdf", sep = ""),
height = 5,
width = 10)
par(mar = c(0,0,0,4),
mfrow = c(1,2),
oma = c(0,0,2,0),
cex = 0.8)
# basal area
plot(forown,
# total pixels to plot
# maxpixels = 1e7,
# turn off plot features
axes = FALSE,
box = FALSE,
legend = FALSE,
# colors
col = c("grey70", "transparent" ))
plot(sp_ba,
# total pixels to plot
# maxpixels = 1e7,
#turn off plot features
axes = FALSE,
box = FALSE,
legend = FALSE,
# colors
col = ba_cols,
add = TRUE)
plot(states_sh,
add = TRUE)
# plot(little,
# lty = 1,
# add = TRUE)
plot(sp_ba,
# colors
col = ba_cols,
#legend properties
# legend.shrink = 0.4,
legend.only = TRUE,
horizontal = FALSE,
# legend.width = 1.5,
# cex = 1.2,
smallplot = c(0.85,0.89,0.18,0.76),
# legend title
legend.args=list(text=expression(m^2),
line = 0.3,
side = 3,
cex = 1.1,
las = 1),
# legend labels
axis.args = list(cex.axis = 1.1,
mgp = c(2.5,0.5,0),
tck = -0.25),
add = TRUE)
title("Basal Area", line = -5, cex = 1)
# proportional basal area
plot(forown,
# total pixels to plot
# maxpixels = 1e7,
# turn off plot features
axes = FALSE,
box = FALSE,
legend = FALSE,
# colors
col = c("grey70", "transparent"))
plot(sp_prop,
# total pixels to plot
# maxpixels = 1e7,
#turn off plot features
axes = FALSE,
box = FALSE,
legend = FALSE,
# colors
col = prop_ba_cols,
# breaks
breaks = prop_breaks,
add = TRUE)
plot(states_sh,
add = TRUE)
# plot(little,
# lty = 2,
# lwd = 1,
# add = TRUE)
# add legend for prop_ba using COmplexHeatmap::Legend
draw(Legend(labels = rev(prop_labels),
title = "%",
title_position = "topleft",
legend_gp = gpar(fill = rev(prop_ba_cols)),
labels_gp = gpar(fontsize = 12),
title_gp = gpar(fontsize = 12),
grid_height = unit(12, "mm"),
grid_width = unit(5, "mm"),
ncol = 1),
x = unit(9.59, "in"),
y = unit(2.275, "in"))
title("Proportional Basal Area", line = -5, cex = 1)
# species title
mtext(bquote(italic(.(sp_latin))~"("*.(sp_common)*")"),
side = 3, line = -0.5, cex = 2, font = 3, outer = TRUE)
# NLCD legend
draw(Legend(labels = c("Forested", "Non-forested"),
title = ,
title_position = "topleft",
legend_gp = gpar(fill = c("grey70", "White")),
border = "grey15",
ncol = 2,
labels_gp = gpar(fontsize = 14),
title_gp = gpar(fontsize = 12),
grid_height = unit(8, "mm"),
grid_width = unit(8, "mm")),
x = unit(5, "in"),
y = unit(0.5, "in"))
dev.off()
# remove files
rm(sp_ba)
rm(sp_prop)
}
# id test species
test_sp <- c("s832", "s93", "s132", "s73", "s901", "s833", "s711", "s263", "s129")
#bt_sp
bt_sp <- c("s108", "s202", "s746", "s93")
# generate pdfs of plots
lapply(bt_sp, function(x) plot_ba_propba(x))
plot_ba_propba("s93")
##-------------
## exceedance----proportion basal area
##-------------
exceedance_plot <- function(SP) {
# rasters
sp_n_growth_exc <- read_raster(paste("rasters/", SP, "_proportion_exc_n_growth.tif", sep = ""))
sp_n_survival_exc <- read_raster(paste("rasters/", SP, "_proportion_exc_n_survival.tif", sep = ""))
sp_s_growth_exc <- read_raster(paste("rasters/", SP, "_proportion_exc_s_growth.tif", sep = ""))
sp_s_survival_exc <- read_raster(paste("rasters/", SP, "_proportion_exc_s_survival.tif", sep = ""))
# basal area for mask
sp_ba <- raster(paste("rasters/", SP, ".tif", sep = ""))
# create a raster stack
exc_stack <- stack(sp_n_growth_exc,
sp_n_survival_exc,
sp_s_growth_exc,
sp_s_survival_exc)
names(exc_stack) <- c("Growth_N",
"Survival_N",
"Growth_S",
"Survival_S")
# remove input files
rm(sp_n_growth_exc,
sp_n_survival_exc,
sp_s_growth_exc,
sp_s_survival_exc)
# mask the raster stack with the basal area raster and then remove original
exc_stack_mask <- mask(exc_stack, sp_ba)
rm(exc_stack)
# color palette and breaks for creating categorical variable
exc_cols <- c("steelblue3", brewer_pal(palette = "YlOrRd")(6)[2:6])
exc_breaks <- c(0, 0.000001, 0.01, 0.05, 0.1, 0.2, 3.5)
exc_labels <- c("0", "0-1", "1-5", "5-10", "10-20", ">20")
# create pdf file
pdf(file = paste("figures_md/exc/", SP, "_exc.pdf", sep = ""),
height = 5,
width = 8)
# set up multipanel par
par(mfrow=c(2,2),mar=c(0,0,0,0),oma=c(0,0,2,4.5), xpd = NA)
# plot the individual rasters
red_exc_plot(exc_stack_mask[[1]], "Growth Rate - N Deposition", exc_cols, exc_breaks )
red_exc_plot(exc_stack_mask[[2]], "Survival - N Deposition", exc_cols, exc_breaks)
red_exc_plot(exc_stack_mask[[3]], "Growth Rate - S Deposition", exc_cols, exc_breaks)
red_exc_plot(exc_stack_mask[[4]], "Survival - S Deposition", exc_cols, exc_breaks)
# add in the title
mtext("Percent of Basal Area in Exceedance of Critical Load for:", side = 3, line = 0, cex = 1.2, font = 2, outer = TRUE)
# add legend for all plots using COmplexHeatmap::Legend
draw(Legend(labels = rev(exc_labels),
title = "%",
title_position = "topleft",
legend_gp = gpar(fill = rev(exc_cols)),
# gap = unit(5, "mm"),
grid_height = unit(8, "mm"),
grid_width = unit(8, "mm"),
ncol = 1),
x = unit(7.45, "in"),
y = unit(2.6, "in"))
dev.off()
}
lapply(bt_sp, function(x) exceedance_plot(x))
##-------------
## reduction
##-------------
reduction_plot <- function(SP) {
# rasters
sp_n_growth_red <- read_raster(paste("rasters/", SP, "_proportion_exc_n_growth_n_growth_reduction.tif", sep = ""))
sp_n_survival_red <- read_raster(paste("rasters/", SP, "_proportion_exc_n_survival_n_survival_reduction.tif", sep = ""))
sp_s_growth_red <- read_raster(paste("rasters/", SP, "_proportion_exc_s_growth_s_growth_reduction.tif", sep = ""))
sp_s_survival_red <- read_raster(paste("rasters/", SP, "_proportion_exc_s_survival_s_survival_reduction.tif", sep = ""))
# basal area raster for mask
sp_ba <- raster(paste("rasters/", SP, ".tif", sep = ""))
# stack and name rasters
red_stack <- stack(sp_n_growth_red,
sp_n_survival_red,
sp_s_growth_red,
sp_s_survival_red)
names(red_stack) <- c("Growth_N",
"Survival_N",
"Growth_S",
"Survival_S")
# remove input files
rm(sp_n_growth_red,
sp_n_survival_red,
sp_s_growth_red,
sp_s_survival_red)
# mask the raster stack with the basal area raster and then remove original
red_stack_mask <- mask(red_stack, sp_ba)
rm(red_stack)
# color palette and breaks for creating categorical variable
red_cols <- c("steelblue3", brewer_pal(palette = "YlOrRd")(6)[2:6])
red_breaks <- c(0, 0.000001, 0.01, 0.05, 0.1, 0.2, 3.5)
red_labels <- c("0", "0-1", "1-5", "5-10", "10-20", ">20")
# multipanel reduction plots
pdf(file = paste("figures_md/red/", SP, "_red.pdf", sep = ""),
height = 5,
width = 8)
# set up multipanel par
par(mfrow=c(2,2),mar=c(0,0,0,0),oma=c(0,0,2,4.5), xpd = NA)
# plot the individual rasters
red_exc_plot(red_stack_mask[[1]], "Growth Rate - N Deposition", red_cols, red_breaks)
red_exc_plot(red_stack_mask[[2]], "Survival - N Deposition", red_cols, red_breaks)
red_exc_plot(red_stack_mask[[3]], "Growth Rate - S Deposition", red_cols, red_breaks)
red_exc_plot(red_stack_mask[[4]], "Survival - S Deposition", red_cols, red_breaks)
# add in the title
mtext("Percent Reduction in:", side = 3, line = 0, cex = 1.2, font = 2, outer = TRUE)
# add legend for all plots
draw(Legend(labels = rev(red_labels),
title = "%",
title_position = "topleft",
legend_gp = gpar(fill = rev(red_cols)),
# gap = unit(5, "mm"),
grid_height = unit(8, "mm"),
grid_width = unit(8, "mm"),
ncol = 1),
x = unit(7.45, "in"),
y = unit(2.6, "in"))
dev.off()
}
lapply(bt_sp, function(x) reduction_plot(x))
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#############################################################################
## testing
#############################################################################
|
/code/natl_spp_plots_raster.R
|
no_license
|
jeremyash/tree_CL
|
R
| false
| false
| 13,391
|
r
|
## SPATIAL
library(sp)
library(rgeos)
library(raster)
library(rgdal)
library(maptools)
## DATA MANAGEMENT
library(tidyverse)
library(skimr)
library(patchwork)
library(readxl)
# library(zoo)
library(pryr)
## PLOTTING
library(scales)
library(units)
library(viridis)
library(extrafont)
library(gtable)
library(grid)
library(rasterVis)
library(RColorBrewer)
library(ComplexHeatmap)
#----------------------------------------------------------------------------
########################################
## FUNCTIONS
########################################
##-------------
## read in exceedance/redcution rasters
##-------------
# Create an empty raster for when one is not processed via Python
# s832_ba <- raster("rasters/s832.tif")
# empty_raster <- raster(vals = NA,
# nrows = nrow(s832_ba),
# ncols = ncol(s832_ba),
# ext = extent(s832_ba),
# crs = proj4string(s832_ba))
# saveRDS(empty_raster, "rasters/empty_raster.RDS")
# function to check if file exists and read in file; else is empty raster
read_raster <- function(FILE) {
if(file.exists(FILE) == TRUE){
raster(FILE)
} else {
# readRDS("rasters/empty_raster_exceedance.RDS")
readRDS("rasters/empty_raster.RDS")
}
}
##-------------
## plotting function for reduction/exceedance rasters
##-------------
red_exc_plot <- function(RASTER, TITLE, COLS, BREAKS) {
# forest ownership
# plot(forown,
#
# # total pixels to plot
# # maxpixels = 1e5,
#
# # turn off plot features
# axes = FALSE,
# box = FALSE,
# legend = FALSE,
#
# # colors
# col = c("grey70", "transparent"))
#
# plot reduction/exceedance raster
plot(RASTER,
# total pixels to plot
# maxpixels = ncell(RASTER),
# turn off plot features
axes = FALSE,
box = FALSE,
legend = FALSE,
# colors and bins
col = COLS,
breaks = BREAKS)
# plot states
plot(states_sh,
add = TRUE)
# add panel title
title(TITLE, line = -1.2, cex = 0.8)
}
#----------------------------------------------------------------------------
########################################
## load gis and raster data
########################################
#forown
forown <- raster("gis/forown_binary_crop.tif")
new_crs <- proj4string(forown)
# base states map
states_sh <- readOGR("gis/states")
states_sh <- spTransform(states_sh, new_crs)
# species codes and data
sp_dat <- read_csv("data/spp_codes.csv")
colnames(sp_dat)[4] <- "spp_code"
sp_dat <- sp_dat %>%
mutate(spp_code = paste("s", spp_code, sep = ""))
#----------------------------------------------------------------------------
########################################
## PLOTTING
########################################
##-------------
## basal area and proportional basal area
##-------------
plot_ba_propba <- function(SP) {
# rasters
sp_ba <- raster(paste("rasters/", SP, ".tif", sep = ""))
sp_prop <- raster(paste("rasters/", SP, "_proportion.tif", sep = ""))
# # little range
# little <- readOGR(paste("gis/little_horn_sh/", SP, ".shp", sep = ""))
# little <- spTransform(little, new_crs)
# plot pars
ba_cols <- rev(viridis(256))
prop_ba_cols <- c("khaki2", rev(inferno(6))[2:6])
# species title
sp_latin <- with(sp_dat, paste(GENUS[spp_code == SP],
SPECIES[spp_code == SP],
sep = " "))
sp_common <- with(sp_dat, COMMON_NAME[spp_code == SP])
# prop_ba breaks and labels
prop_breaks <- c(0,0.05,0.10,0.20,0.40, 0.60, 1)
prop_labels <- c("<5", "5-10", "10-20", "20-40", "40-60", ">60")
# multipanel plot
pdf(file = paste("figures_md/ba/", SP, "_ba_propba.pdf", sep = ""),
height = 5,
width = 10)
par(mar = c(0,0,0,4),
mfrow = c(1,2),
oma = c(0,0,2,0),
cex = 0.8)
# basal area
plot(forown,
# total pixels to plot
# maxpixels = 1e7,
# turn off plot features
axes = FALSE,
box = FALSE,
legend = FALSE,
# colors
col = c("grey70", "transparent" ))
plot(sp_ba,
# total pixels to plot
# maxpixels = 1e7,
#turn off plot features
axes = FALSE,
box = FALSE,
legend = FALSE,
# colors
col = ba_cols,
add = TRUE)
plot(states_sh,
add = TRUE)
# plot(little,
# lty = 1,
# add = TRUE)
plot(sp_ba,
# colors
col = ba_cols,
#legend properties
# legend.shrink = 0.4,
legend.only = TRUE,
horizontal = FALSE,
# legend.width = 1.5,
# cex = 1.2,
smallplot = c(0.85,0.89,0.18,0.76),
# legend title
legend.args=list(text=expression(m^2),
line = 0.3,
side = 3,
cex = 1.1,
las = 1),
# legend labels
axis.args = list(cex.axis = 1.1,
mgp = c(2.5,0.5,0),
tck = -0.25),
add = TRUE)
title("Basal Area", line = -5, cex = 1)
# proportional basal area
plot(forown,
# total pixels to plot
# maxpixels = 1e7,
# turn off plot features
axes = FALSE,
box = FALSE,
legend = FALSE,
# colors
col = c("grey70", "transparent"))
plot(sp_prop,
# total pixels to plot
# maxpixels = 1e7,
#turn off plot features
axes = FALSE,
box = FALSE,
legend = FALSE,
# colors
col = prop_ba_cols,
# breaks
breaks = prop_breaks,
add = TRUE)
plot(states_sh,
add = TRUE)
# plot(little,
# lty = 2,
# lwd = 1,
# add = TRUE)
# add legend for prop_ba using COmplexHeatmap::Legend
draw(Legend(labels = rev(prop_labels),
title = "%",
title_position = "topleft",
legend_gp = gpar(fill = rev(prop_ba_cols)),
labels_gp = gpar(fontsize = 12),
title_gp = gpar(fontsize = 12),
grid_height = unit(12, "mm"),
grid_width = unit(5, "mm"),
ncol = 1),
x = unit(9.59, "in"),
y = unit(2.275, "in"))
title("Proportional Basal Area", line = -5, cex = 1)
# species title
mtext(bquote(italic(.(sp_latin))~"("*.(sp_common)*")"),
side = 3, line = -0.5, cex = 2, font = 3, outer = TRUE)
# NLCD legend
draw(Legend(labels = c("Forested", "Non-forested"),
title = ,
title_position = "topleft",
legend_gp = gpar(fill = c("grey70", "White")),
border = "grey15",
ncol = 2,
labels_gp = gpar(fontsize = 14),
title_gp = gpar(fontsize = 12),
grid_height = unit(8, "mm"),
grid_width = unit(8, "mm")),
x = unit(5, "in"),
y = unit(0.5, "in"))
dev.off()
# remove files
rm(sp_ba)
rm(sp_prop)
}
# id test species
test_sp <- c("s832", "s93", "s132", "s73", "s901", "s833", "s711", "s263", "s129")
#bt_sp
bt_sp <- c("s108", "s202", "s746", "s93")
# generate pdfs of plots
lapply(bt_sp, function(x) plot_ba_propba(x))
plot_ba_propba("s93")
##-------------
## exceedance----proportion basal area
##-------------
exceedance_plot <- function(SP) {
# rasters
sp_n_growth_exc <- read_raster(paste("rasters/", SP, "_proportion_exc_n_growth.tif", sep = ""))
sp_n_survival_exc <- read_raster(paste("rasters/", SP, "_proportion_exc_n_survival.tif", sep = ""))
sp_s_growth_exc <- read_raster(paste("rasters/", SP, "_proportion_exc_s_growth.tif", sep = ""))
sp_s_survival_exc <- read_raster(paste("rasters/", SP, "_proportion_exc_s_survival.tif", sep = ""))
# basal area for mask
sp_ba <- raster(paste("rasters/", SP, ".tif", sep = ""))
# create a raster stack
exc_stack <- stack(sp_n_growth_exc,
sp_n_survival_exc,
sp_s_growth_exc,
sp_s_survival_exc)
names(exc_stack) <- c("Growth_N",
"Survival_N",
"Growth_S",
"Survival_S")
# remove input files
rm(sp_n_growth_exc,
sp_n_survival_exc,
sp_s_growth_exc,
sp_s_survival_exc)
# mask the raster stack with the basal area raster and then remove original
exc_stack_mask <- mask(exc_stack, sp_ba)
rm(exc_stack)
# color palette and breaks for creating categorical variable
exc_cols <- c("steelblue3", brewer_pal(palette = "YlOrRd")(6)[2:6])
exc_breaks <- c(0, 0.000001, 0.01, 0.05, 0.1, 0.2, 3.5)
exc_labels <- c("0", "0-1", "1-5", "5-10", "10-20", ">20")
# create pdf file
pdf(file = paste("figures_md/exc/", SP, "_exc.pdf", sep = ""),
height = 5,
width = 8)
# set up multipanel par
par(mfrow=c(2,2),mar=c(0,0,0,0),oma=c(0,0,2,4.5), xpd = NA)
# plot the individual rasters
red_exc_plot(exc_stack_mask[[1]], "Growth Rate - N Deposition", exc_cols, exc_breaks )
red_exc_plot(exc_stack_mask[[2]], "Survival - N Deposition", exc_cols, exc_breaks)
red_exc_plot(exc_stack_mask[[3]], "Growth Rate - S Deposition", exc_cols, exc_breaks)
red_exc_plot(exc_stack_mask[[4]], "Survival - S Deposition", exc_cols, exc_breaks)
# add in the title
mtext("Percent of Basal Area in Exceedance of Critical Load for:", side = 3, line = 0, cex = 1.2, font = 2, outer = TRUE)
# add legend for all plots using COmplexHeatmap::Legend
draw(Legend(labels = rev(exc_labels),
title = "%",
title_position = "topleft",
legend_gp = gpar(fill = rev(exc_cols)),
# gap = unit(5, "mm"),
grid_height = unit(8, "mm"),
grid_width = unit(8, "mm"),
ncol = 1),
x = unit(7.45, "in"),
y = unit(2.6, "in"))
dev.off()
}
lapply(bt_sp, function(x) exceedance_plot(x))
##-------------
## reduction
##-------------
reduction_plot <- function(SP) {
# rasters
sp_n_growth_red <- read_raster(paste("rasters/", SP, "_proportion_exc_n_growth_n_growth_reduction.tif", sep = ""))
sp_n_survival_red <- read_raster(paste("rasters/", SP, "_proportion_exc_n_survival_n_survival_reduction.tif", sep = ""))
sp_s_growth_red <- read_raster(paste("rasters/", SP, "_proportion_exc_s_growth_s_growth_reduction.tif", sep = ""))
sp_s_survival_red <- read_raster(paste("rasters/", SP, "_proportion_exc_s_survival_s_survival_reduction.tif", sep = ""))
# basal area raster for mask
sp_ba <- raster(paste("rasters/", SP, ".tif", sep = ""))
# stack and name rasters
red_stack <- stack(sp_n_growth_red,
sp_n_survival_red,
sp_s_growth_red,
sp_s_survival_red)
names(red_stack) <- c("Growth_N",
"Survival_N",
"Growth_S",
"Survival_S")
# remove input files
rm(sp_n_growth_red,
sp_n_survival_red,
sp_s_growth_red,
sp_s_survival_red)
# mask the raster stack with the basal area raster and then remove original
red_stack_mask <- mask(red_stack, sp_ba)
rm(red_stack)
# color palette and breaks for creating categorical variable
red_cols <- c("steelblue3", brewer_pal(palette = "YlOrRd")(6)[2:6])
red_breaks <- c(0, 0.000001, 0.01, 0.05, 0.1, 0.2, 3.5)
red_labels <- c("0", "0-1", "1-5", "5-10", "10-20", ">20")
# multipanel reduction plots
pdf(file = paste("figures_md/red/", SP, "_red.pdf", sep = ""),
height = 5,
width = 8)
# set up multipanel par
par(mfrow=c(2,2),mar=c(0,0,0,0),oma=c(0,0,2,4.5), xpd = NA)
# plot the individual rasters
red_exc_plot(red_stack_mask[[1]], "Growth Rate - N Deposition", red_cols, red_breaks)
red_exc_plot(red_stack_mask[[2]], "Survival - N Deposition", red_cols, red_breaks)
red_exc_plot(red_stack_mask[[3]], "Growth Rate - S Deposition", red_cols, red_breaks)
red_exc_plot(red_stack_mask[[4]], "Survival - S Deposition", red_cols, red_breaks)
# add in the title
mtext("Percent Reduction in:", side = 3, line = 0, cex = 1.2, font = 2, outer = TRUE)
# add legend for all plots
draw(Legend(labels = rev(red_labels),
title = "%",
title_position = "topleft",
legend_gp = gpar(fill = rev(red_cols)),
# gap = unit(5, "mm"),
grid_height = unit(8, "mm"),
grid_width = unit(8, "mm"),
ncol = 1),
x = unit(7.45, "in"),
y = unit(2.6, "in"))
dev.off()
}
lapply(bt_sp, function(x) reduction_plot(x))
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#############################################################################
## testing
#############################################################################
|
PLOT<-function() {
#################################################################################################################################
# Step 9: Print the BRAT table, with values ----
# this should be the same as Figure 2 in the manuscipt
# run the below code (up to Step 9b) to make a crude output figure similar to the BRAT diagram
# currently just for Threats and barriers. Future versions will involve the Hazzard, mitigation, and consequence values
install.packages("diagram")
library(diagram)
# creates an empty plot
openplotmat()
pdf("BRAT_ThreatsAndBarriers.pdf")
# create the coordinates
# I want the boxes arranged in a 8, 2, 4, 6 formation (for Threat names/initial/top values, and one for each barrier)
pos <- coordinates(c(8,2,4,6))
pos # gives the position of these boxes
class(pos)
plot(pos, type = 'n', main = "BRAT diagram Threats and barriers", xlim = c(0, 1), ylim = c(0, 1), ylab = "", xlab = "")
#text(pos)
# add arrows and segments between positional numbers first
# Threat1
segmentarrow(from = pos[1,], to = pos[8,], dd = 0.45)
# Threat2
segmentarrow(from = pos[9, ], to = pos[10, ], dd = 0.45)
#Threat3
segmentarrow(from = pos[11, ], to = pos[14, ], dd = 0.45)
#Threat3
segmentarrow(from = pos[15, ], to = pos[20, ], dd = 0.45)
# now draw boxes on top of the arrows
my_labels<-c(1:20)
my_threats<-c(1, 9, 11, 15)
my_names_barriers<-c(1, 2, 3, 4, 5, 6, 7, 1, 1, 2, 3, 1, 2, 3, 4, 5)
my_barriers<-c(2, 3, 4, 5, 6, 7, 8, 10, 12, 13, 14, 16, 17, 18, 19, 20)
my_text_size = 0.9
my_edge_length <- 0.05
# identify the barrier boxes
for (i in 1:length(my_labels)) {
if (i %in% 1:length(my_barriers)) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = ("barrier"), cex = my_text_size, box.col = "white")
}
}
# identify the threat boxes, and add their values
for(i in 1:length(my_labels)) {
if (i %in% my_labels[1]){
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length, lab = "Threat 1 \n Initial Frequency", cex = my_text_size, box.col = "#0072B2")
text(x = 0.0275, y = 0.71, Threat1_InitialFreq, cex = my_text_size)
text(x = 0.0275, y = 0.69, "Current frequency", cex = my_text_size)
text(x = 0.0275, y = 0.67, Threat1_topEvent, cex = my_text_size)
} else if (i %in% my_labels[9]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "Threat 2 \n Initial Frequency", cex = my_text_size, box.col = "#0072B2")
text(x = 0.230, y = 0.51, Threat2_InitialFreq, cex = my_text_size)
text(x = 0.230, y = 0.49, "Current frequency", cex = my_text_size)
text(x = 0.230, y = 0.47, Threat2_topevent, cex = my_text_size)
} else if (i %in% my_labels[11]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "Threat 3 \n Initial Frequency", cex = my_text_size, box.col = "#0072B2")
text(x = 0.095, y = 0.32, Threat3_InitialFreq, cex = my_text_size)
text(x = 0.095, y = 0.30, "Current frequency", cex = my_text_size)
text(x = 0.095, y = 0.28, Threat3_topevent, cex = my_text_size)
} else if (i %in% my_labels[15]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "Threat 4 \n Initial Frequency", cex = my_text_size, box.col = "#0072B2")
text(x = 0.050, y = 0.13, Threat4_InitialFreq, cex = my_text_size)
text(x = 0.050, y = 0.11, "Current frequency", cex = my_text_size)
text(x = 0.050, y = 0.09, Threat4_topevent, cex = my_text_size)
}
}
# identify the barrier boxes, and add their values
# remind myself of which position numbers represent barriers:
# my_barriers<-c(2, 3, 4, 5, 6, 7, 8, 10, 12, 13, 14, 16, 17, 18, 19, 20)
for(i in 1:length(my_labels)) {
#For threat 1
if (i %in% my_labels[2]){
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length, lab = "barrier 1 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, Threat1_barrier_1, cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Threat_LambdaEffect[[1]][1], cex = my_text_size)
} else if (i %in% my_labels[3]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 2 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, Threat1_barriers[2], cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Threat_LambdaEffect[[1]][2], cex = my_text_size)
} else if (i %in% my_labels[4]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 3 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, Threat1_barriers[3], cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Threat_LambdaEffect[[1]][3], cex = my_text_size)
} else if (i %in% my_labels[5]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 4 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, Threat1_barriers[4], cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Threat_LambdaEffect[[1]][4], cex = my_text_size)
} else if (i %in% my_labels[6]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 5 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, Threat1_barriers[5], cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Threat_LambdaEffect[[1]][5], cex = my_text_size)
} else if (i %in% my_labels[7]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 6 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, Threat1_barriers[6], cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Threat_LambdaEffect[[1]][6], cex = my_text_size)
} else if (i %in% my_labels[8]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 7 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, Threat1_barriers[7], cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Threat_LambdaEffect[[1]][7], cex = my_text_size)
}
# For threat 2
else if (i %in% my_labels[10]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 1 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][9]-0.03, Threat2_barrier_1, cex = my_text_size)
text(x = pos[i], y = pos[,2][9]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][9]-0.07, Threat_LambdaEffect[[2]][1], cex = my_text_size)
}
# For threat 3
else if (i %in% my_labels[12]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 1 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][11]-0.03, Threat3_barriers[1], cex = my_text_size)
text(x = pos[i], y = pos[,2][11]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][11]-0.07, Threat_LambdaEffect[[3]][1], cex = my_text_size)
} else if (i %in% my_labels[13]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 2 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][11]-0.03, Threat3_barriers[2], cex = my_text_size)
text(x = pos[i], y = pos[,2][11]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][11]-0.07, Threat_LambdaEffect[[3]][2], cex = my_text_size)
} else if (i %in% my_labels[14]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 3 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][11]-0.03, Threat3_barriers[2], cex = my_text_size)
text(x = pos[i], y = pos[,2][11]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][11]-0.07, Threat_LambdaEffect[[3]][3], cex = my_text_size)
}
# For threat 4
else if (i %in% my_labels[16]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 1 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][15]-0.03, Threat4_barriers[1], cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.07, Threat_LambdaEffect[[4]][1], cex = my_text_size)
} else if (i %in% my_labels[17]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 2 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][15]-0.03, Threat4_barriers[2], cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.07, Threat_LambdaEffect[[4]][2], cex = my_text_size)
} else if (i %in% my_labels[18]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 3 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][15]-0.03, Threat4_barriers[3], cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.07, Threat_LambdaEffect[[4]][3], cex = my_text_size)
} else if (i %in% my_labels[19]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 4 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][15]-0.03, Threat4_barriers[4], cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.07, Threat_LambdaEffect[[4]][4], cex = my_text_size)
} else if (i %in% my_labels[20]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 5 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][15]-0.03, Threat4_barriers[5], cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.07, Threat_LambdaEffect[[4]][5], cex = my_text_size)
}
}
# make sure to Zoom on the plot to expand the figure enough to see all the numbers clearly
dev.off()
############################################################################################
# Hazard, mitigation, and consequence portion of the BRAT diagram
pdf("BRAT_HazardMitigationConsequence.pdf")
# creates an empty plot
openplotmat()
# create the coordinates
# I want 5 boxes (1 hazzard, 3 mitigations, 1 consequence) all on the same line
pos <- coordinates(c(5))
pos # gives the position of these boxes
class(pos)
plot(pos, type = 'n', main = "BRAT diagram hazzard, mitigation, and consequence", xlim = c(0, 1), ylim = c(0.1, 0.8), ylab = "", xlab = "")
#text(pos)
# add arrows and segments between positional numbers first
# Main line
segmentarrow(from = pos[1,], to = pos[5,], dd = 0.45)
# now draw boxes on top of the arrows
my_labels<-c(1:5)
my_hazzard<-c(1)
my_mitigation<-c(2, 3, 4)
my_consequence<-c(5)
my_text_size = 0.9
my_edge_length <- 0.05
# identify the Hazzard box
for(i in 1:length(my_labels)) {
if (i %in% my_labels[1]){
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length, lab = "HAZZARD \n Target Frequency", cex = my_text_size, box.col = "red")
text(x = pos[i], y = pos[,2][1]-0.03, (1+(1-lambdaQuartile)), cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "Target lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, lambdaQuartile, cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.09, "Current Total top event frequency: ", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.11, topEvent, cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.13, "Current Total top event lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.15, (1+(1-topEvent)), cex = my_text_size)
}
}
# identify the mitigation boxes
for(i in 1:length(my_labels)) {
if (i %in% my_labels[2]){
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length, lab = "Mitigation 1 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, "0.95", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "Mitigation lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Mitigation_LambdaEffect[[1]], cex = my_text_size)
} else if (i %in% my_labels[3]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "Mitigation 2 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, "0.814", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "Mitigation lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Mitigation_LambdaEffect[[2]], cex = my_text_size)
} else if (i %in% my_labels[4]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "Mitigation 3 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, "0.95", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "Mitigation lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Mitigation_LambdaEffect[[3]], cex = my_text_size)
}
}
# identify the consequence boxes
for(i in 1:length(my_labels)) {
if (i %in% my_labels[5]){
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length, lab = "CONSEQUENCE \n target frequency", cex = my_text_size, box.col = "green")
text(x = pos[i], y = pos[,2][1]-0.03, (1+(1-lambdaQuartile)), cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "Consequence target lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, lambdaQuartile, cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.09, "Current consequence frequency: ", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.11, postMitigateS, cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.13, "current consequence top event lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.15, (1+(1-postMitigateS)), cex = my_text_size)
}
}
# make sure to Zoom on the plot to expand the figure enough to see all the numbers clearly
dev.off()
#############################################################################################################################
# Step 9b: Or you can just print out each value on its own
# Threat 1
print("This is Threat 1")
message("Threat 1, Initial Frequency: ", Threat1_InitialFreq)
message ("Threat 1, Current top event Frequency: ", Threat1_topEvent)
message ("Threat1, barrier1, frequency: ", Threat1_barrier_1)
message ("Threat1, barrier2, frequency: ", Threat1_barriers[2])
message ("Threat1, barrier3, frequency: ", Threat1_barriers[3])
message ("Threat1, barrier4, frequency: ", Threat1_barriers[4])
message ("Threat1, barrier5, frequency: ", Threat1_barriers[5])
message ("Threat1, barrier6, frequency: ", Threat1_barriers[6])
message ("Threat1, barrier7, frequency: ", Threat1_barriers[7])
message ("Threat1, barriers, lambda list 1 through 7: ", Threat_LambdaEffect[c(1)]) # this goes in the comment box
message ("Threat 1, barrier1, comments, additive predation: ", wolvesOnAdults) # this goes in the comment box
message ("Threat 1, barrier1, comments, compensatory predation: ", otherOnAdults) # this goes in the comment box
message ("Threat 1, barrier1, comments, effectiveness on adults: ", effectivenessAdults) # this goes in the comment box
# Threat 2
print("This is Threat 2")
message("Threat 2, Initial Frequency: ", Threat2_InitialFreq)
message ("Threat 2, Current top event Frequency: ", Threat2_topevent)
message ("Threat2, barrier2, frequency: ", Threat2_barrier_1)
message ("Threat2, barriers, lambda: ", Threat_LambdaEffect[c(2)]) # this goes in the comment box
message ("Threat 2, barrier2, comments, additive predation: ", wolvesOnJuvs) # this goes in the comment box
message ("Threat 2, barrier2, comments, compensatory predation: ", otherOnJuvs) # this goes in the comment box
message ("Threat 2, barrier2, comments, effectiveness on juveniles: ", effectivenessJuvs) # this goes in the comment box
#Threat3
print ("this is Threat 3")
message("Threat 3, Initial Frequency: ", Threat3_InitialFreq)
message ("Threat 3, Current top event Frequency: ", Threat3_topevent)
message ("Threat3, barrier1, frequency: ", Threat3_barriers[1])
message ("Threat3, barrier2, frequency: ", Threat3_barriers[2])
message ("Threat3, barrier3, frequency: ", Threat3_barriers[3])
message ("Threat1, barriers, lambda list 1 through 3: ", Threat_LambdaEffect[c(3)]) # these go in the comment boxes
#Threat4
print ("this is Threat 4")
message("Threat 4, Initial Frequency: ", Threat4_InitialFreq)
message ("Threat 4, Current top event Frequency: ", Threat4_topevent)
message ("Threat4, barrier1, frequency: ", Threat4_barriers[1])
message ("Threat4, barrier2, frequency: ", Threat4_barriers[2])
message ("Threat4, barrier3, frequency: ", Threat4_barriers[3])
message ("Threat4, barrier3, frequency: ", Threat4_barriers[4])
message ("Threat4, barrier3, frequency: ", Threat4_barriers[5])
message ("Threat1, barriers, lambda list 1 through 5: ", Threat_LambdaEffect[c(4)]) # these go in the comment boxes
## Hazzard (Policy objective)
message ("This is the target frequency: ", (1+(1-lambdaQuartile)))
message ("This is the target lambda: ", lambdaQuartile) # put this in the risk event red circle
message ("This is the Current Total top event frequency: ", topEvent)
message ("This is the current Total top event lambda: ", (1+(1-topEvent))) # put this in comments
### Mitigation Boxes
message ("This is the 'responsive restoration of linear features to reduce access': ", 0.95)
message ("This is the 'responsive restoriation of linear features to reduce access': ", Mitigation_LambdaEffect[1]) # put this in the comment box
message ("This is the 'wolf cull': ", 0.814)
message ("This is the 'wolf cull': ", Mitigation_LambdaEffect[2]) # put this in the comment box
message ("This is the 'Intensive in situ conservation': ", 0.95)
message ("This is the 'Intensive in situ conservation': ", Mitigation_LambdaEffect[3]) # put this in the comment box
#### Consequency Box
message ("This is the acceptable consequency frequency: ", (1+(1-lambdaQuartile)))
message ("This is the current consequence frequency: ", postMitigateS)
message ("This is the current consequency lambda: ", (1+(1-postMitigateS))) # put this in a comment box
}
|
/modules/BRATframework/R/PLOT.R
|
no_license
|
StewartResearch/BRAT_CaribouCalculations
|
R
| false
| false
| 19,293
|
r
|
PLOT<-function() {
#################################################################################################################################
# Step 9: Print the BRAT table, with values ----
# this should be the same as Figure 2 in the manuscipt
# run the below code (up to Step 9b) to make a crude output figure similar to the BRAT diagram
# currently just for Threats and barriers. Future versions will involve the Hazzard, mitigation, and consequence values
install.packages("diagram")
library(diagram)
# creates an empty plot
openplotmat()
pdf("BRAT_ThreatsAndBarriers.pdf")
# create the coordinates
# I want the boxes arranged in a 8, 2, 4, 6 formation (for Threat names/initial/top values, and one for each barrier)
pos <- coordinates(c(8,2,4,6))
pos # gives the position of these boxes
class(pos)
plot(pos, type = 'n', main = "BRAT diagram Threats and barriers", xlim = c(0, 1), ylim = c(0, 1), ylab = "", xlab = "")
#text(pos)
# add arrows and segments between positional numbers first
# Threat1
segmentarrow(from = pos[1,], to = pos[8,], dd = 0.45)
# Threat2
segmentarrow(from = pos[9, ], to = pos[10, ], dd = 0.45)
#Threat3
segmentarrow(from = pos[11, ], to = pos[14, ], dd = 0.45)
#Threat3
segmentarrow(from = pos[15, ], to = pos[20, ], dd = 0.45)
# now draw boxes on top of the arrows
my_labels<-c(1:20)
my_threats<-c(1, 9, 11, 15)
my_names_barriers<-c(1, 2, 3, 4, 5, 6, 7, 1, 1, 2, 3, 1, 2, 3, 4, 5)
my_barriers<-c(2, 3, 4, 5, 6, 7, 8, 10, 12, 13, 14, 16, 17, 18, 19, 20)
my_text_size = 0.9
my_edge_length <- 0.05
# identify the barrier boxes
for (i in 1:length(my_labels)) {
if (i %in% 1:length(my_barriers)) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = ("barrier"), cex = my_text_size, box.col = "white")
}
}
# identify the threat boxes, and add their values
for(i in 1:length(my_labels)) {
if (i %in% my_labels[1]){
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length, lab = "Threat 1 \n Initial Frequency", cex = my_text_size, box.col = "#0072B2")
text(x = 0.0275, y = 0.71, Threat1_InitialFreq, cex = my_text_size)
text(x = 0.0275, y = 0.69, "Current frequency", cex = my_text_size)
text(x = 0.0275, y = 0.67, Threat1_topEvent, cex = my_text_size)
} else if (i %in% my_labels[9]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "Threat 2 \n Initial Frequency", cex = my_text_size, box.col = "#0072B2")
text(x = 0.230, y = 0.51, Threat2_InitialFreq, cex = my_text_size)
text(x = 0.230, y = 0.49, "Current frequency", cex = my_text_size)
text(x = 0.230, y = 0.47, Threat2_topevent, cex = my_text_size)
} else if (i %in% my_labels[11]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "Threat 3 \n Initial Frequency", cex = my_text_size, box.col = "#0072B2")
text(x = 0.095, y = 0.32, Threat3_InitialFreq, cex = my_text_size)
text(x = 0.095, y = 0.30, "Current frequency", cex = my_text_size)
text(x = 0.095, y = 0.28, Threat3_topevent, cex = my_text_size)
} else if (i %in% my_labels[15]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "Threat 4 \n Initial Frequency", cex = my_text_size, box.col = "#0072B2")
text(x = 0.050, y = 0.13, Threat4_InitialFreq, cex = my_text_size)
text(x = 0.050, y = 0.11, "Current frequency", cex = my_text_size)
text(x = 0.050, y = 0.09, Threat4_topevent, cex = my_text_size)
}
}
# identify the barrier boxes, and add their values
# remind myself of which position numbers represent barriers:
# my_barriers<-c(2, 3, 4, 5, 6, 7, 8, 10, 12, 13, 14, 16, 17, 18, 19, 20)
for(i in 1:length(my_labels)) {
#For threat 1
if (i %in% my_labels[2]){
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length, lab = "barrier 1 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, Threat1_barrier_1, cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Threat_LambdaEffect[[1]][1], cex = my_text_size)
} else if (i %in% my_labels[3]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 2 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, Threat1_barriers[2], cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Threat_LambdaEffect[[1]][2], cex = my_text_size)
} else if (i %in% my_labels[4]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 3 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, Threat1_barriers[3], cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Threat_LambdaEffect[[1]][3], cex = my_text_size)
} else if (i %in% my_labels[5]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 4 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, Threat1_barriers[4], cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Threat_LambdaEffect[[1]][4], cex = my_text_size)
} else if (i %in% my_labels[6]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 5 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, Threat1_barriers[5], cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Threat_LambdaEffect[[1]][5], cex = my_text_size)
} else if (i %in% my_labels[7]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 6 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, Threat1_barriers[6], cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Threat_LambdaEffect[[1]][6], cex = my_text_size)
} else if (i %in% my_labels[8]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 7 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, Threat1_barriers[7], cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Threat_LambdaEffect[[1]][7], cex = my_text_size)
}
# For threat 2
else if (i %in% my_labels[10]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 1 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][9]-0.03, Threat2_barrier_1, cex = my_text_size)
text(x = pos[i], y = pos[,2][9]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][9]-0.07, Threat_LambdaEffect[[2]][1], cex = my_text_size)
}
# For threat 3
else if (i %in% my_labels[12]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 1 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][11]-0.03, Threat3_barriers[1], cex = my_text_size)
text(x = pos[i], y = pos[,2][11]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][11]-0.07, Threat_LambdaEffect[[3]][1], cex = my_text_size)
} else if (i %in% my_labels[13]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 2 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][11]-0.03, Threat3_barriers[2], cex = my_text_size)
text(x = pos[i], y = pos[,2][11]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][11]-0.07, Threat_LambdaEffect[[3]][2], cex = my_text_size)
} else if (i %in% my_labels[14]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 3 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][11]-0.03, Threat3_barriers[2], cex = my_text_size)
text(x = pos[i], y = pos[,2][11]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][11]-0.07, Threat_LambdaEffect[[3]][3], cex = my_text_size)
}
# For threat 4
else if (i %in% my_labels[16]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 1 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][15]-0.03, Threat4_barriers[1], cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.07, Threat_LambdaEffect[[4]][1], cex = my_text_size)
} else if (i %in% my_labels[17]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 2 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][15]-0.03, Threat4_barriers[2], cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.07, Threat_LambdaEffect[[4]][2], cex = my_text_size)
} else if (i %in% my_labels[18]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 3 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][15]-0.03, Threat4_barriers[3], cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.07, Threat_LambdaEffect[[4]][3], cex = my_text_size)
} else if (i %in% my_labels[19]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 4 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][15]-0.03, Threat4_barriers[4], cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.07, Threat_LambdaEffect[[4]][4], cex = my_text_size)
} else if (i %in% my_labels[20]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "barrier 5 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][15]-0.03, Threat4_barriers[5], cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.05, "lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][15]-0.07, Threat_LambdaEffect[[4]][5], cex = my_text_size)
}
}
# make sure to Zoom on the plot to expand the figure enough to see all the numbers clearly
dev.off()
############################################################################################
# Hazard, mitigation, and consequence portion of the BRAT diagram
pdf("BRAT_HazardMitigationConsequence.pdf")
# creates an empty plot
openplotmat()
# create the coordinates
# I want 5 boxes (1 hazzard, 3 mitigations, 1 consequence) all on the same line
pos <- coordinates(c(5))
pos # gives the position of these boxes
class(pos)
plot(pos, type = 'n', main = "BRAT diagram hazzard, mitigation, and consequence", xlim = c(0, 1), ylim = c(0.1, 0.8), ylab = "", xlab = "")
#text(pos)
# add arrows and segments between positional numbers first
# Main line
segmentarrow(from = pos[1,], to = pos[5,], dd = 0.45)
# now draw boxes on top of the arrows
my_labels<-c(1:5)
my_hazzard<-c(1)
my_mitigation<-c(2, 3, 4)
my_consequence<-c(5)
my_text_size = 0.9
my_edge_length <- 0.05
# identify the Hazzard box
for(i in 1:length(my_labels)) {
if (i %in% my_labels[1]){
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length, lab = "HAZZARD \n Target Frequency", cex = my_text_size, box.col = "red")
text(x = pos[i], y = pos[,2][1]-0.03, (1+(1-lambdaQuartile)), cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "Target lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, lambdaQuartile, cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.09, "Current Total top event frequency: ", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.11, topEvent, cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.13, "Current Total top event lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.15, (1+(1-topEvent)), cex = my_text_size)
}
}
# identify the mitigation boxes
for(i in 1:length(my_labels)) {
if (i %in% my_labels[2]){
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length, lab = "Mitigation 1 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, "0.95", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "Mitigation lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Mitigation_LambdaEffect[[1]], cex = my_text_size)
} else if (i %in% my_labels[3]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "Mitigation 2 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, "0.814", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "Mitigation lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Mitigation_LambdaEffect[[2]], cex = my_text_size)
} else if (i %in% my_labels[4]) {
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length,lab = "Mitigation 3 \n Frequency", cex = my_text_size, box.col = "white")
text(x = pos[i], y = pos[,2][1]-0.03, "0.95", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "Mitigation lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, Mitigation_LambdaEffect[[3]], cex = my_text_size)
}
}
# identify the consequence boxes
for(i in 1:length(my_labels)) {
if (i %in% my_labels[5]){
textrect(mid = pos[i,], radx = my_edge_length, rady = my_edge_length, lab = "CONSEQUENCE \n target frequency", cex = my_text_size, box.col = "green")
text(x = pos[i], y = pos[,2][1]-0.03, (1+(1-lambdaQuartile)), cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.05, "Consequence target lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.07, lambdaQuartile, cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.09, "Current consequence frequency: ", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.11, postMitigateS, cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.13, "current consequence top event lambda", cex = my_text_size)
text(x = pos[i], y = pos[,2][1]-0.15, (1+(1-postMitigateS)), cex = my_text_size)
}
}
# make sure to Zoom on the plot to expand the figure enough to see all the numbers clearly
dev.off()
#############################################################################################################################
# Step 9b: Or you can just print out each value on its own
# Threat 1
print("This is Threat 1")
message("Threat 1, Initial Frequency: ", Threat1_InitialFreq)
message ("Threat 1, Current top event Frequency: ", Threat1_topEvent)
message ("Threat1, barrier1, frequency: ", Threat1_barrier_1)
message ("Threat1, barrier2, frequency: ", Threat1_barriers[2])
message ("Threat1, barrier3, frequency: ", Threat1_barriers[3])
message ("Threat1, barrier4, frequency: ", Threat1_barriers[4])
message ("Threat1, barrier5, frequency: ", Threat1_barriers[5])
message ("Threat1, barrier6, frequency: ", Threat1_barriers[6])
message ("Threat1, barrier7, frequency: ", Threat1_barriers[7])
message ("Threat1, barriers, lambda list 1 through 7: ", Threat_LambdaEffect[c(1)]) # this goes in the comment box
message ("Threat 1, barrier1, comments, additive predation: ", wolvesOnAdults) # this goes in the comment box
message ("Threat 1, barrier1, comments, compensatory predation: ", otherOnAdults) # this goes in the comment box
message ("Threat 1, barrier1, comments, effectiveness on adults: ", effectivenessAdults) # this goes in the comment box
# Threat 2
print("This is Threat 2")
message("Threat 2, Initial Frequency: ", Threat2_InitialFreq)
message ("Threat 2, Current top event Frequency: ", Threat2_topevent)
message ("Threat2, barrier2, frequency: ", Threat2_barrier_1)
message ("Threat2, barriers, lambda: ", Threat_LambdaEffect[c(2)]) # this goes in the comment box
message ("Threat 2, barrier2, comments, additive predation: ", wolvesOnJuvs) # this goes in the comment box
message ("Threat 2, barrier2, comments, compensatory predation: ", otherOnJuvs) # this goes in the comment box
message ("Threat 2, barrier2, comments, effectiveness on juveniles: ", effectivenessJuvs) # this goes in the comment box
#Threat3
print ("this is Threat 3")
message("Threat 3, Initial Frequency: ", Threat3_InitialFreq)
message ("Threat 3, Current top event Frequency: ", Threat3_topevent)
message ("Threat3, barrier1, frequency: ", Threat3_barriers[1])
message ("Threat3, barrier2, frequency: ", Threat3_barriers[2])
message ("Threat3, barrier3, frequency: ", Threat3_barriers[3])
message ("Threat1, barriers, lambda list 1 through 3: ", Threat_LambdaEffect[c(3)]) # these go in the comment boxes
#Threat4
print ("this is Threat 4")
message("Threat 4, Initial Frequency: ", Threat4_InitialFreq)
message ("Threat 4, Current top event Frequency: ", Threat4_topevent)
message ("Threat4, barrier1, frequency: ", Threat4_barriers[1])
message ("Threat4, barrier2, frequency: ", Threat4_barriers[2])
message ("Threat4, barrier3, frequency: ", Threat4_barriers[3])
message ("Threat4, barrier3, frequency: ", Threat4_barriers[4])
message ("Threat4, barrier3, frequency: ", Threat4_barriers[5])
message ("Threat1, barriers, lambda list 1 through 5: ", Threat_LambdaEffect[c(4)]) # these go in the comment boxes
## Hazzard (Policy objective)
message ("This is the target frequency: ", (1+(1-lambdaQuartile)))
message ("This is the target lambda: ", lambdaQuartile) # put this in the risk event red circle
message ("This is the Current Total top event frequency: ", topEvent)
message ("This is the current Total top event lambda: ", (1+(1-topEvent))) # put this in comments
### Mitigation Boxes
message ("This is the 'responsive restoration of linear features to reduce access': ", 0.95)
message ("This is the 'responsive restoriation of linear features to reduce access': ", Mitigation_LambdaEffect[1]) # put this in the comment box
message ("This is the 'wolf cull': ", 0.814)
message ("This is the 'wolf cull': ", Mitigation_LambdaEffect[2]) # put this in the comment box
message ("This is the 'Intensive in situ conservation': ", 0.95)
message ("This is the 'Intensive in situ conservation': ", Mitigation_LambdaEffect[3]) # put this in the comment box
#### Consequency Box
message ("This is the acceptable consequency frequency: ", (1+(1-lambdaQuartile)))
message ("This is the current consequence frequency: ", postMitigateS)
message ("This is the current consequency lambda: ", (1+(1-postMitigateS))) # put this in a comment box
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarize.R
\name{summarize_ce}
\alias{summarize_ce}
\title{Summarize costs and effectiveness}
\usage{
summarize_ce(costs, qalys, by_grp = FALSE)
}
\arguments{
\item{costs}{Simulated costs by category (objects of class \code{\link{costs}}).}
\item{qalys}{Simulated QALYs (objects of class \code{\link{qalys}}).}
\item{by_grp}{If \code{TRUE}, then costs and QALYs are computed by subgroup. If
\code{FALSE}, then costs and QALYs are aggregated across all patients (and subgroups).}
}
\value{
An object of class \code{\link{ce}}.
}
\description{
Summarize costs and quality-adjusted life-years (QALYs) given output simulated
from an economic model. The summary output is used to perform
cost-effectiveness analysis with \code{\link[=cea]{cea()}} and \code{\link[=cea_pw]{cea_pw()}}.
}
\details{
If mean costs and/or QALYs have already been computed
(i.e., an average within a population), then there
must be one observation for each discount rate (\code{dr}),
PSA sample (\code{sample}), treatment strategy (\code{strategy_id}),
and health state (\code{state_id}). Alternatively, there can be a column
denoting a patient (\code{patient_id}), in which case outcomes will first be
averaged across patients. A \code{grp_id} column can also be used so that
outcomes are computed for each subgroup (if \code{by_grp = TRUE}); otherwise it is assumed that
there is only one subgroup.
}
\keyword{internal}
|
/man/summarize_ce.Rd
|
no_license
|
jeff-m-sullivan/hesim
|
R
| false
| true
| 1,475
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarize.R
\name{summarize_ce}
\alias{summarize_ce}
\title{Summarize costs and effectiveness}
\usage{
summarize_ce(costs, qalys, by_grp = FALSE)
}
\arguments{
\item{costs}{Simulated costs by category (objects of class \code{\link{costs}}).}
\item{qalys}{Simulated QALYs (objects of class \code{\link{qalys}}).}
\item{by_grp}{If \code{TRUE}, then costs and QALYs are computed by subgroup. If
\code{FALSE}, then costs and QALYs are aggregated across all patients (and subgroups).}
}
\value{
An object of class \code{\link{ce}}.
}
\description{
Summarize costs and quality-adjusted life-years (QALYs) given output simulated
from an economic model. The summary output is used to perform
cost-effectiveness analysis with \code{\link[=cea]{cea()}} and \code{\link[=cea_pw]{cea_pw()}}.
}
\details{
If mean costs and/or QALYs have already been computed
(i.e., an average within a population), then there
must be one observation for each discount rate (\code{dr}),
PSA sample (\code{sample}), treatment strategy (\code{strategy_id}),
and health state (\code{state_id}). Alternatively, there can be a column
denoting a patient (\code{patient_id}), in which case outcomes will first be
averaged across patients. A \code{grp_id} column can also be used so that
outcomes are computed for each subgroup (if \code{by_grp = TRUE}); otherwise it is assumed that
there is only one subgroup.
}
\keyword{internal}
|
#' Prior Definitions for \pkg{brms} Models
#'
#' Define priors for specific parameters or classes of parameters
#'
#' @param prior A character string defining a distribution in \pkg{Stan} language
#' @param class The parameter class. Defaults to \code{"b"} (fixed effects).
#' See 'Details' for other valid parameter classes.
#' @param coef Name of the (population- or group-level) parameter
#' @param group Grouping factor of group-level parameters.
#' @param nlpar Name of a non-linear / auxiliary parameter.
#' Only used in non-linear / distributional models.
#' @param resp Name of the response variable / category.
#' Only used in multivariate and categorical models.
#' Is internally handled as an alias of \code{nlpar}.
#' @param lb Lower bound for parameter restriction. Currently only allowed
#' for classes \code{"b"}, \code{"ar"}, \code{"ma"}, and \code{"arr"}.
#' Defaults to \code{NULL}, that is no restriction.
#' @param ub Upper bound for parameter restriction. Currently only allowed
#' for classes \code{"b"}, \code{"ar"}, \code{"ma"}, and \code{"arr"}.
#' Defaults to \code{NULL}, that is no restriction.
#'
#' @return An object of class \code{brmsprior} to be used in the \code{prior}
#' argument of \code{\link[brms:brm]{brm}}.
#'
#' @details
#' \code{set_prior} is used to define prior distributions for parameters
#' in \pkg{brms} models. Below, we explain its usage and list some common
#' prior distributions for parameters.
#' A complete overview on possible prior distributions is given
#' in the Stan Reference Manual available at \url{http://mc-stan.org/}.
#'
#' To combine multiple priors, use \code{c(...)},
#' e.g., \code{c(set_prior(...), set_prior(...))}.
#' \pkg{brms} does not check if the priors are written in correct \pkg{Stan} language.
#' Instead, \pkg{Stan} will check their syntactical correctness when the model
#' is parsed to \code{C++} and returns an error if they are not.
#' This, however, does not imply that priors are always meaningful if they are
#' accepted by \pkg{Stan}. Although \pkg{brms} trys to find common problems
#' (e.g., setting bounded priors on unbounded parameters), there is no guarantee
#' that the defined priors are reasonable for the model.
#' Currently, there are seven types of parameters in \pkg{brms} models,
#' for which the user can specify prior distributions. \cr
#'
#' 1. Population-level ('fixed') effects
#'
#' Every Population-level effect has its own regression parameter
# These parameters are internally named as \code{b_<fixed>}, where \code{<fixed>}
#' represents the name of the corresponding population-level effect.
#' Suppose, for instance, that \code{y} is predicted by \code{x1} and \code{x2}
#' (i.e. \code{y ~ x1+x2} in formula syntax).
#' Then, \code{x1} and \code{x2} have regression parameters
#' \code{b_x1} and \code{b_x2} respectively.
#' The default prior for population-level effects (including monotonic and
#' category specific effects) is an improper flat prior over the reals.
#' Other common options are normal priors or student-t priors.
#' If we want to have a normal prior with mean 0 and
#' standard deviation 5 for \code{x1}, and a unit student-t prior with 10
#' degrees of freedom for \code{x2}, we can specify this via
#' \code{set_prior("normal(0,5)", class = "b", coef = "x1")} and \cr
#' \code{set_prior("student_t(10,0,1)", class = "b", coef = "x2")}.
#' To put the same prior on all fixed effects at once,
#' we may write as a shortcut \code{set_prior("<prior>", class = "b")}.
#' This also leads to faster sampling, because priors can be vectorized in this case.
#' Both ways of defining priors can be combined using for instance
#' \code{set_prior("normal(0,2)", class = "b")} and \cr
#' \code{set_prior("normal(0,10)", class = "b", coef = "x1")}
#' at the same time. This will set a \code{normal(0,10)} prior on
#' the fixed effect of \code{x1} and a \code{normal(0,2)} prior
#' on all other fixed effects. However, this will break vectorization and
#' may slow down the sampling procedure a bit.
#'
#' In case of the default intercept parameterization
#' (discussed in the 'Details' section of \code{\link[brms:brm]{brm}}),
#' the fixed effects intercept has its own parameter class
#' named \code{"Intercept"} and priors can thus be
#' specified via \code{set_prior("<prior>", class = "Intercept")}.
#' Setting a prior on the intercept will not break vectorization
#' of the other population-level effects.
#'
#' A special shrinkage prior to be applied on population-level effects
#' is the horseshoe prior.
#' It is symmetric around zero with fat tails and an infinitely large spike
#' at zero. This makes it ideal for sparse models that have
#' many regression coefficients,although only a minority of them is non-zero.
#' For more details see Carvalho et al. (2009).
#' The horseshoe prior can be applied on all population-level effects at once
#' (excluding the intercept) by using \code{set_prior("horseshoe(1)")}.
#' The \code{1} implies that the student-t prior of the local shrinkage
#' parameters has 1 degrees of freedom. This may, however, lead to an
#' increased number of divergent transition in \pkg{Stan}.
#' Accordingly, increasing the degrees of freedom to slightly higher values
#' (e.g., \code{3}) may often be a better option, although the prior
#' no longer resembles a horseshoe in this case.
#' Generally, models with horseshoe priors a more likely than other models
#' to have divergent transitions so that increasing \code{adapt_delta}
#' from \code{0.8} to values closer to \code{1} will often be necessary.
#' See the documentation of \code{\link[brms:brm]{brm}} for instructions
#' on how to increase \code{adapt_delta}. \cr
#'
#' In non-linear models, population-level effects are defined separately
#' for each non-linear parameter. Accordingly, it is necessary to specify
#' the non-linear parameter in \code{set_prior} so that priors
#' we can be assigned correctly.
#' If, for instance, \code{alpha} is the parameter and \code{x} the predictor
#' for which we want to define the prior, we can write
#' \code{set_prior("<prior>", coef = "x", nlpar = "alpha")}.
#' As a shortcut we can use \code{set_prior("<prior>", nlpar = "alpha")}
#' to set the same prior on all population-level effects of \code{alpha} at once.
#'
#' If desired, population-level effects can be restricted to fall only
#' within a certain interval using the \code{lb} and \code{ub} arguments
#' of \code{set_prior}. This is often required when defining priors
#' that are not defined everywhere on the real line, such as uniform
#' or gamma priors. When defining a \code{uniform(2,4)} prior,
#' you should write \code{set_prior("uniform(2,4)", lb = 2, ub = 4)}.
#' When using a prior that is defined on the postive reals only
#' (such as a gamma prior) set \code{lb = 0}.
#' In most situations, it is not useful to restrict population-level
#' parameters through bounded priors
#' (non-linear models are an important exception),
#' but if you really want to this is the way to go.
#'
#' 2. Standard deviations of group-level ('random') effects
#'
#' Each group-level effect of each grouping factor has a standard deviation named
#' \code{sd_<group>_<random>}. Consider, for instance, the formula
#' \code{y ~ x1+x2+(1+x1|g)}.
#' We see that the intercept as well as \code{x1} are group-level effects
#' nested in the grouping factor \code{g}.
#' The corresponding standard deviation parameters are named as
#' \code{sd_g_Intercept} and \code{sd_g_x1} respectively.
#' These parameters are restriced to be non-negative and, by default,
#' have a half student-t prior with 3 degrees of freedom and a
#' scale parameter that depends on the standard deviation of the response
#' after applying the link function. Minimally, the scale parameter is 10.
#' This prior is used (a) to be only very weakly informative in order to influence
#' results as few as possible, while (b) providing at least some regularization
#' to considerably improve convergence and sampling efficiency.
#' To define a prior distribution only for standard deviations
#' of a specific grouping factor,
#' use \cr \code{set_prior("<prior>", class = "sd", group = "<group>")}.
#' To define a prior distribution only for a specific standard deviation
#' of a specific grouping factor, you may write \cr
#' \code{set_prior("<prior>", class = "sd", group = "<group>", coef = "<coef>")}.
#' Recommendations on useful prior distributions for
#' standard deviations are given in Gelman (2006), but note that he
#' is no longer recommending uniform priors, anymore. \cr
#'
#' When defining priors on group-level effects parameters in non-linear models,
#' please make sure to specify the corresponding non-linear parameter
#' through the \code{nlpar} argument in the same way as
#' for population-level effects.
#'
#' 3. Correlations of group-level ('random') effects
#'
#' If there is more than one group-level effect per grouping factor,
#' the correlations between those effects have to be estimated.
#' The prior \code{"lkj_corr_cholesky(eta)"} or in short
#' \code{"lkj(eta)"} with \code{eta > 0}
#' is essentially the only prior for (choelsky factors) of correlation matrices.
#' If \code{eta = 1} (the default) all correlations matrices
#' are equally likely a priori. If \code{eta > 1}, extreme correlations
#' become less likely, whereas \code{0 < eta < 1} results in
#' higher probabilities for extreme correlations.
#' Correlation matrix parameters in \code{brms} models are named as
#' \code{cor_(group)}, (e.g., \code{cor_g} if \code{g} is the grouping factor).
#' To set the same prior on every correlation matrix,
#' use for instance \code{set_prior("lkj(2)", class = "cor")}.
#'
#' 4. Standard deviations of smoothing terms
#'
#' GAMMs are implemented in \pkg{brms} using the 'random effects'
#' formulation of smoothing terms (for details see
#' \code{\link[mgcv:gamm]{gamm}}). Thus, each smoothing term
#' has its corresponding standard deviation modeling
#' the variability within this term. In \pkg{brms}, this
#' parameter class is called \code{sds} and priors can
#' be specified via \code{set_prior("<prior>", class = "sds",
#' coef = "<term label>")}. The default prior is the same as
#' for standard deviations of group-level effects.
#'
#' 5. Autocorrelation parameters
#'
#' The autocorrelation parameters currently implemented are named
#' \code{ar} (autoregression), \code{ma} (moving average),
#' and \code{arr} (autoregression of the response).
#'
#' Priors can be defined by \code{set_prior("<prior>", class = "ar")}
#' for \code{ar} and similar for \code{ma} and \code{arr} effects.
#' By default, \code{ar} and \code{ma} are bounded between \code{-1}
#' and \code{1} and \code{arr} is unbounded (you may change this
#' by using the arguments \code{lb} and \code{ub}). The default
#' prior is flat over the definition area.
#'
#' 6. Distance parameters of monotonic effects
#'
#' As explained in the details section of \code{\link[brms:brm]{brm}},
#' monotonic effects make use of a special parameter vector to
#' estimate the 'normalized distances' between consecutive predictor
#' categories. This is realized in \pkg{Stan} using the \code{simplex}
#' parameter type and thus this class is also named \code{"simplex"} in
#' \pkg{brms}. The only valid prior for simplex parameters is the
#' dirichlet prior, which accepts a vector of length \code{K - 1}
#' (K = number of predictor categories) as input defining the
#' 'concentration' of the distribution. Explaining the dirichlet prior
#' is beyond the scope of this documentation, but we want to describe
#' how to define this prior syntactically correct.
#' If a predictor \code{x} with \code{K} categories is modeled as monotonic,
#' we can define a prior on its corresponding simplex via \cr
#' \code{set_prior("dirichlet(<vector>)", class = "simplex", coef = "x")}.
#' For \code{<vector>}, we can put in any \code{R} expression
#' defining a vector of length \code{K - 1}. The default is a uniform
#' prior (i.e. \code{<vector> = rep(1, K-1)}) over all simplexes
#' of the respective dimension.
#'
#' 7. Parameters for specific families
#'
#' Some families need additional parameters to be estimated.
#' Families \code{gaussian}, \code{student}, and \code{cauchy}
#' need the parameter \code{sigma}
#' to account for the residual standard deviation.
#' By default, \code{sigma} has a half student-t prior that scales
#' in the same way as the random effects standard deviations.
#' Furthermore, family \code{student} needs the parameter
#' \code{nu} representing the degrees of freedom of students t distribution.
#' By default, \code{nu} has prior \code{"gamma(2,0.1)"}
#' and a fixed lower bound of \code{0}.
#' Families \code{gamma}, \code{weibull}, \code{inverse.gaussian}, and
#' \code{negbinomial} need a \code{shape} parameter that has a
#' \code{"gamma(0.01,0.01)"} prior by default.
#' For families \code{cumulative}, \code{cratio}, \code{sratio},
#' and \code{acat}, and only if \code{threshold = "equidistant"},
#' the parameter \code{delta} is used to model the distance between
#' two adjacent thresholds.
#' By default, \code{delta} has an improper flat prior over the reals.
#' The \code{von_mises} family needs the parameter \code{kappa}, representing
#' the concentration parameter. By default, \code{kappa} has prior
#' \code{"gamma(2, 0.01)"}. \cr
#' Every family specific parameter has its own prior class, so that
#' \code{set_prior("<prior>", class = "<parameter>")} is the right way to go.
#' All of these priors are chosen to be weakly informative,
#' having only minimal influence on the estimations,
#' while improving convergence and sampling efficiency.
#'
#' Often, it may not be immediately clear,
#' which parameters are present in the model.
#' To get a full list of parameters and parameter classes for which
#' priors can be specified (depending on the model)
#' use function \code{\link[brms:get_prior]{get_prior}}.
#'
#' @seealso \code{\link[brms:get_prior]{get_prior}}
#'
#' @references
#' Gelman A (2006). Prior distributions for variance parameters in hierarchical models.
#' Bayesian analysis, 1(3), 515 -- 534.
#'
#' Carvalho, C. M., Polson, N. G., & Scott, J. G. (2009).
#' Handling sparsity via the horseshoe.
#' In International Conference on Artificial Intelligence and Statistics (pp. 73-80).
#'
#' @examples
#' ## check which parameters can have priors
#' get_prior(rating ~ treat + period + carry + (1|subject),
#' data = inhaler, family = sratio(),
#' threshold = "equidistant")
#'
#' ## define some priors
#' prior <- c(set_prior("normal(0,10)", class = "b"),
#' set_prior("normal(1,2)", class = "b", coef = "treat"),
#' set_prior("cauchy(0,2)", class = "sd",
#' group = "subject", coef = "Intercept"),
#' set_prior("uniform(-5,5)", class = "delta"))
#'
#' ## verify that the priors indeed found their way into Stan's model code
#' make_stancode(rating ~ period + carry + cse(treat) + (1|subject),
#' data = inhaler, family = sratio(),
#' threshold = "equidistant",
#' prior = prior)
#'
#' ## use horseshoe priors to model sparsity in population-level effects parameters
#' make_stancode(count ~ log_Age_c + log_Base4_c * Trt_c,
#' data = epilepsy, family = poisson(),
#' prior = set_prior("horseshoe(3)"))
#'
#' @export
set_prior <- function(prior, class = "b", coef = "", group = "",
nlpar = "", resp = NULL, lb = NULL, ub = NULL) {
prior <- as.character(prior)
class <- as.character(class)
group <- as.character(group)
coef <- as.character(coef)
nlpar <- as.character(use_alias(nlpar, resp, warn = FALSE))
lb <- as.numeric(lb)
ub <- as.numeric(ub)
if (length(prior) != 1 || length(class) != 1 || length(coef) != 1 ||
length(group) != 1 || length(nlpar) != 1 || length(lb) > 1 ||
length(ub) > 1)
stop("All arguments of set_prior must be of length 1.", call. = FALSE)
valid_classes <- c("Intercept", "b", "sd", "sds", "simplex", "cor", "L",
"ar", "ma", "arr", "sigma", "sigmaLL", "rescor",
"Lrescor", "nu", "shape", "delta", "phi", "kappa")
if (!class %in% valid_classes) {
stop(paste(class, "is not a valid parameter class"), call. = FALSE)
}
if (nchar(group) && !class %in% c("sd", "cor", "L")) {
stop(paste("argument 'group' not meaningful for class", class),
call. = FALSE)
}
coef_classes <- c("Intercept", "b", "sd", "sds", "sigma", "simplex")
if (nchar(coef) && !class %in% coef_classes) {
stop(paste("argument 'coef' not meaningful for class", class),
call. = FALSE)
}
if (nchar(nlpar) && !class %in% valid_classes[1:5]) {
stop(paste("argument 'nlpar' not meaningful for class", class),
call. = FALSE)
}
is_arma <- class %in% c("ar", "ma")
if (length(lb) || length(ub) || is_arma) {
if (!(class %in% c("b", "arr") || is_arma))
stop("Currently boundaries are only allowed for ",
"population-level and ARMA effects.", call. = FALSE)
if (nchar(coef)) {
stop("'coef' may not be specified when using boundaries")
}
if (is_arma) {
lb <- ifelse(length(lb), lb, -1)
ub <- ifelse(length(ub), ub, 1)
if (is.na(lb) || is.na(ub) || abs(lb) > 1 || abs(ub) > 1) {
warning("Setting boundaries of ARMA parameters outside of ",
"[-1,1] may not be appropriate.", call. = FALSE)
}
}
# don't put spaces in boundary declarations
lb <- if (length(lb) && !is.na(lb)) paste0("lower=", lb)
ub <- if (length(ub) && !is.na(ub)) paste0("upper=", ub)
if (!is.null(lb) || !is.null(ub)) {
bound <- paste0("<", paste(c(lb, ub), collapse = ","), ">")
} else {
bound <- ""
}
} else {
bound <- ""
}
if (grepl("^increment_log_prob\\(", prior)) {
# increment_log_prob can be used to directly add a term
# to the log posterior
class <- coef <- group <- nlpar <- ""
}
out <- nlist(prior, class, coef, group, nlpar, bound)
class(out) <- c("brmsprior", "list")
out
}
#' Overview on Priors for \pkg{brms} Models
#'
#' Get information on all parameters (and parameter classes) for which priors
#' may be specified including default priors.
#'
#' @inheritParams brm
#' @param internal A flag indicating if the names of additional internal parameters should be displayed.
#' Setting priors on these parameters is not recommended
#'
#' @return A data.frame with columns \code{prior}, \code{class}, \code{coef}, and \code{group}
#' and several rows, each providing information on a parameter (or parameter class) on which
#' priors can be specified. The prior column is empty except for internal default priors.
#'
#' @seealso \code{\link[brms:set_prior]{set_prior}}
#'
#' @examples
#' ## get all parameters and parameters classes to define priors on
#' (prior <- get_prior(count ~ log_Age_c + log_Base4_c * Trt_c
#' + (1|patient) + (1|visit),
#' data = epilepsy, family = poisson()))
#'
#' ## define a prior on all population-level effects a once
#' prior$prior[1] <- "normal(0,10)"
#'
#' ## define a specific prior on the population-level effect of Trt_c
#' prior$prior[5] <- "student_t(10, 0, 5)"
#'
#' ## verify that the priors indeed found their way into Stan's model code
#' make_stancode(count ~ log_Age_c + log_Base4_c * Trt_c
#' + (1|patient) + (1|visit),
#' data = epilepsy, family = poisson(),
#' prior = prior)
#'
#' @export
get_prior <- function(formula, data = NULL, family = gaussian(),
autocor = NULL, nonlinear = NULL, partial = NULL,
threshold = c("flexible", "equidistant"),
internal = FALSE) {
# note that default priors are stored in this function
family <- check_family(family)
link <- family$link
formula <- update_formula(formula, data = data, family = family,
partial = partial, nonlinear = nonlinear)
threshold <- match.arg(threshold)
autocor <- check_autocor(autocor)
ee <- extract_effects(formula, family = family)
data <- update_data(data, family = family, effects = ee)
# ensure that RE and residual SDs only have a weakly informative prior by default
Y <- unname(model.response(data))
prior_scale <- 10
if (is.lognormal(family)) link <- "log"
if (link %in% c("identity", "log", "inverse", "sqrt", "1/mu^2")) {
if (link %in% c("log", "inverse", "1/mu^2")) {
Y <- ifelse(Y == 0, Y + 0.1, Y) # avoid Inf in link(Y)
}
suggested_scale <- SW(round(link(sd(Y), link = link)))
if (!is.nan(suggested_scale)) {
prior_scale <- max(prior_scale, suggested_scale, na.rm = TRUE)
}
}
def_scale_prior <- paste0("student_t(3, 0, ", prior_scale, ")")
# initialize output
prior <- empty_prior_frame()
# priors for primary regression effects
if (length(ee$nonlinear)) {
nlpars <- names(ee$nonlinear)
for (i in seq_along(nlpars)) {
prior_eff <- get_prior_effects(ee$nonlinear[[i]], data = data,
autocor = autocor, nlpar = nlpars[i],
spec_intercept = FALSE,
def_scale_prior = def_scale_prior,
internal = internal)
prior <- rbind(prior, prior_eff)
}
} else {
if (length(ee$response) > 1L) {
# priors for effects in multivariate models
for (r in c("", ee$response)) {
# r = "" adds global priors affecting parameters of all responses
prior_eff <- get_prior_effects(ee, data = data, autocor = autocor,
def_scale_prior = def_scale_prior,
internal = internal, nlpar = r)
prior <- rbind(prior, prior_eff)
}
} else {
# priors for effects in univariate models
prior_eff <- get_prior_effects(ee, data = data, autocor = autocor,
def_scale_prior = def_scale_prior,
internal = internal)
prior <- rbind(prior, prior_eff)
}
}
# priors for auxiliary parameters
def_auxprior <- c(sigma = def_scale_prior, shape = "gamma(0.01, 0.01)",
nu = "gamma(2, 0.1)", phi = "gamma(0.01, 0.01)",
kappa = "gamma(2, 0.01)", zi = "beta(1, 1)",
hu = "beta(1, 1)")
valid_auxpars <- valid_auxpars(family, effects = ee, autocor = autocor)
for (ap in valid_auxpars) {
if (!is.null(ee[[ap]])) {
auxprior <- get_prior_effects(ee[[ap]], data = data,
autocor = autocor, nlpar = ap,
spec_intercept = FALSE,
def_scale_prior = def_scale_prior,
internal = internal)
} else {
auxprior <- prior_frame(class = ap, prior = def_auxprior[ap])
}
prior <- rbind(prior, auxprior)
}
# priors of group-level parameters
ranef <- tidy_ranef(ee, data)
prior_ranef <- get_prior_ranef(ranef, def_scale_prior = def_scale_prior,
global_sd = length(ee$response) > 1L,
internal = internal)
prior <- rbind(prior, prior_ranef)
# prior for the delta parameter for equidistant thresholds
if (is.ordinal(family) && threshold == "equidistant") {
prior <- rbind(prior, prior_frame(class = "delta"))
}
# priors for auxiliary parameters of multivariate models
if (is.linear(family) && length(ee$response) > 1L) {
sigma_coef <- c("", ee$response)
sigma_prior <- c(def_scale_prior, rep("", length(ee$response)))
sigma_prior <- prior_frame(class = "sigma", coef = sigma_coef,
prior = sigma_prior)
prior <- rbind(prior, sigma_prior)
if (internal) {
prior <- rbind(prior, prior_frame(class = "Lrescor",
prior = "lkj_corr_cholesky(1)"))
} else {
prior <- rbind(prior, prior_frame(class = "rescor", prior = "lkj(1)"))
}
}
# priors for autocor parameters
cbound <- "<lower=-1,upper=1>"
if (get_ar(autocor)) {
prior <- rbind(prior, prior_frame(class = "ar", bound = cbound))
}
if (get_ma(autocor)) {
prior <- rbind(prior, prior_frame(class = "ma", bound = cbound))
}
if (get_arr(autocor)) {
prior <- rbind(prior, prior_frame(class = "arr"))
}
if (is(autocor, "cor_bsts")) {
prior <- rbind(prior, prior_frame(class = "sigmaLL",
prior = def_scale_prior))
}
# do not remove unique(.)
prior <- unique(prior[with(prior, order(nlpar, class, group, coef)), ])
rownames(prior) <- 1:nrow(prior)
prior
}
get_prior_effects <- function(effects, data, autocor = cor_arma(),
nlpar = "", spec_intercept = TRUE,
def_scale_prior = "", internal = FALSE) {
# wrapper function to get priors for various kinds of effects
# don't use the family argument here to avoid
# removal of the intercept for ordinal models
# Args:
# spec_intercept: special parameter class for the FE Intercept?
fixef <- colnames(data_fixef(effects, data, autocor = autocor)$X)
spec_intercept <- has_intercept(effects$fixed) && spec_intercept
prior_fixef <- get_prior_fixef(fixef, spec_intercept = spec_intercept,
nlpar = nlpar, internal = internal)
monef <- colnames(get_model_matrix(effects$mono, data))
prior_monef <- get_prior_monef(monef, fixef = fixef, nlpar = nlpar)
# group-level priors are prepared separately
splines <- get_spline_labels(effects)
prior_splines <- get_prior_splines(splines, def_scale_prior, nlpar = nlpar)
csef <- colnames(get_model_matrix(effects$cse, data = data))
prior_csef <- get_prior_csef(csef, fixef = fixef)
rbind(prior_fixef, prior_monef, prior_splines, prior_csef)
}
get_prior_fixef <- function(fixef, spec_intercept = TRUE,
nlpar = "", internal = FALSE) {
# priors for fixed effects parameters
# Args:
# fixef: names of the fixed effects
# spec_intercept: special parameter class for the Intercept?
# internal: see get_prior
# Returns:
# an object of class prior_frame
prior <- empty_prior_frame()
if (length(fixef)) {
prior <- rbind(prior, prior_frame(class = "b", coef = c("", fixef),
nlpar = nlpar))
}
if (spec_intercept) {
prior <- rbind(prior, prior_frame(class = "Intercept", coef = "",
nlpar = nlpar))
if (internal) {
prior <- rbind(prior, prior_frame(class = "temp_Intercept",
coef = "", nlpar = nlpar))
}
}
prior
}
get_prior_monef <- function(monef, fixef = NULL, nlpar = "") {
# priors for monotonic effects parameters
# Args:
# monef: names of the monotonic effects
# fixef: names of the fixed effects
# nlpar: optional name of a non-linear parameter
# Returns:
# an object of class prior_frame
prior <- empty_prior_frame()
if (length(monef)) {
invalid <- intersect(fixef, monef)
if (length(invalid)) {
stop(paste("Variables cannot be modeled as fixed and",
"monotonic effects at the same time.",
"\nError occured for variables:",
paste(invalid, collapse = ", ")), call. = FALSE)
}
prior <- rbind(prior_frame(class = "b", coef = c("", monef),
nlpar = nlpar),
prior_frame(class = "simplex", coef = monef,
nlpar = nlpar))
}
prior
}
get_prior_csef <- function(csef, fixef = NULL) {
# priors for category spcific effects parameters
# Args:
# csef: names of the category specific effects
# fixef: names of the fixed effects
# Returns:
# an object of class prior_frame
prior <- empty_prior_frame()
if (length(csef)) {
invalid <- intersect(fixef, csef)
if (length(invalid)) {
stop(paste("Variables cannot be modeled as fixed and",
"category specific effects at the same time.",
"\nError occured for variables:",
paste(invalid, collapse = ", ")), call. = FALSE)
}
prior <- prior_frame(class = "b", coef = c("", csef))
}
prior
}
get_prior_ranef <- function(ranef, def_scale_prior,
global_sd = FALSE, internal = FALSE) {
# priors for random effects parameters
# Args:
# ranef: a list returned by tidy_ranef
# def_scale_prior: a character string defining the default
# prior for random effects SDs
# global_sd: allow to set a global SD prior
# affecting all non-linear parameters?
# internal: see get_prior
# Returns:
# an object of class prior_frame
prior <- empty_prior_frame()
if (nrow(ranef)) {
# global sd class
nlpars <- unique(ranef$nlpar)
if (global_sd) {
global_sd_prior <- rep("", length(setdiff(nlpars, "")))
global_sd_prior <- c(def_scale_prior, global_sd_prior)
global_sd_prior <- prior_frame(class = "sd",
prior = global_sd_prior,
nlpar = union("", nlpars))
} else {
global_sd_prior <- prior_frame(class = "sd",
prior = def_scale_prior,
nlpar = nlpars)
}
prior <- rbind(prior, global_sd_prior)
for (id in unique(ranef$id)) {
r <- ranef[ranef$id == id, ]
group <- r$group[1]
# include group-level standard deviations
prior <- rbind(prior,
prior_frame(class = "sd", group = group,
nlpar = unique(r$nlpar)),
prior_frame(class = "sd", coef = r$coef,
group = group, nlpar = r$nlpar))
# detect duplicated group-level effects
J <- with(prior, class == "sd" & nchar(coef))
dupli <- duplicated(prior[J, ])
if (any(dupli)) {
stop("Duplicated group-level effects detected for group ",
group, call. = FALSE)
}
# include correlation parameters
if (isTRUE(r$cor[1]) && nrow(r) > 1L) {
if (internal) {
prior <- rbind(prior,
prior_frame(class = "L", group = c("", group),
prior = c("lkj_corr_cholesky(1)", "")))
} else {
prior <- rbind(prior,
prior_frame(class = "cor", group = c("", group),
prior = c("lkj(1)", "")))
}
}
}
}
prior
}
get_prior_splines <- function(splines, def_scale_prior, nlpar = "") {
# priors for GAMM models
# Args:
# splines: names of the spline terms
# def_scale_prior: a character string defining the default
# prior for spline SDs
# nlpar: optional name of a non-linear parameter
if (length(splines)) {
prior_strings <- c(def_scale_prior, rep("", length(splines)))
prior <- prior_frame(class = "sds", coef = c("", splines),
prior = prior_strings, nlpar = nlpar)
} else {
prior <- empty_prior_frame()
}
prior
}
check_prior <- function(prior, formula, data = NULL, family = gaussian(),
sample_prior = FALSE, autocor = NULL,
threshold = "flexible", check_rows = NULL,
warn = FALSE) {
# check prior input and amend it if needed
# Args:
# same as the respective parameters in brm
# (nonlinear is expected to be an attribute of formula)
# check_rows: if not NULL, check only the rows given in check_rows
# warn: passed to check_prior_content
# Returns:
# a data.frame of prior specifications to be used in stan_prior (see stan.R)
if (isTRUE(attr(prior, "checked"))) {
return(prior) # prior has already been checked; no need to do it twice
}
stopifnot(is(formula, "brmsformula"))
ee <- extract_effects(formula, family = family)
all_priors <- get_prior(formula = formula, data = data,
family = family, autocor = autocor,
threshold = threshold, internal = TRUE)
if (is.null(prior)) {
prior <- all_priors
} else {
prior <- as.prior_frame(prior)
}
# exclude priors using increment_log_prob to readd them at the end
has_incr_lp <- grepl("^increment_log_prob\\(", prior$prior)
prior_incr_lp <- prior[has_incr_lp, ]
prior <- prior[!has_incr_lp, ]
# check for duplicated priors
prior$class <- rename(prior$class, symbols = c("^cor$", "^rescor$"),
subs = c("L", "Lrescor"), fixed = FALSE)
duplicated_input <- duplicated(prior[, 2:5])
if (any(duplicated_input)) {
stop("Duplicated prior specifications are not allowed.", call. = FALSE)
}
# handle special priors that are not explictly coded as functions in Stan
has_specef <- is.formula(ee[c("mono", "cse")])
temp <- handle_special_priors(prior, has_specef = has_specef)
prior <- temp$prior
attrib <- temp$attrib
# check if parameters in prior are valid
if (nrow(prior)) {
valid <- which(duplicated(rbind(all_priors[, 2:5], prior[, 2:5])))
invalid <- which(!1:nrow(prior) %in% (valid - nrow(all_priors)))
if (length(invalid)) {
msg_priors <- lapply(as.brmsprior(prior[invalid, ]), .print_prior)
message(paste("The following priors don't correspond to any",
"model parameter \nand will thus not affect the results:",
collapse(" \n", msg_priors)), "\n")
prior <- prior[-invalid, ]
}
}
check_prior_content(prior, family = family, warn = warn)
# merge prior with all_priors
prior <- rbind(prior, all_priors)
prior <- prior[!duplicated(prior[, 2:5]), ]
rows2remove <- NULL
# special treatment of fixed effects Intercepts
int_index <- which(prior$class == "Intercept")
if (length(int_index)) {
int_prior <- prior[int_index, ]
if (length(int_index) > 1L) {
intercepts <- prior$coef[int_index]
intercepts <- intercepts[nchar(intercepts) > 0]
} else intercepts <- "Intercept"
bint_index <- which(prior$class == "b" & prior$coef %in% intercepts)
bint_prior <- prior[bint_index, ]
for (t in which(prior$class %in% "temp_Intercept")) {
ti <- int_prior$coef == prior$coef[t]
tb <- bint_prior$coef %in% c(prior$coef[t], "Intercept")
if (sum(ti) && nchar(int_prior$prior[ti]) > 0) {
# take 'Intercept' priors first if specified
prior$prior[t] <- int_prior$prior[ti]
} else if (sum(tb) && nchar(bint_prior$prior[tb]) > 0) {
# fall back to 'b' (fixed effects) priors
prior$prior[t] <- bint_prior$prior[tb]
}
}
rows2remove <- c(rows2remove, int_index, bint_index)
}
# prepare priors of monotonic effects
mono_forms <- get_effect(ee, "mono")
for (k in seq_along(mono_forms)) {
monef <- colnames(get_model_matrix(mono_forms[[k]], data = data))
for (i in seq_along(monef)) {
take <- with(prior, class == "simplex" & coef == monef[i] &
nlpar == names(mono_forms)[k])
simplex_prior <- paste0(".", prior$prior[take])
if (nchar(simplex_prior) > 1L) {
simplex_prior <- paste(eval(parse(text = simplex_prior)),
collapse = ",")
prior$prior[take] <- paste0("dirichlet(c(", simplex_prior, "))")
}
}
}
# check if priors for non-linear parameters are defined
if (length(ee$nonlinear)) {
nlpars <- names(ee$nonlinear)
for (nlp in nlpars) {
nlp_prior <- prior$prior[with(prior, nlpar == nlp & class == "b")]
if (!any(as.logical(nchar(nlp_prior)))) {
stop(paste0("Priors on fixed effects are required in non-linear ",
"models, but none were found for parameter '", nlp,
"'. \nSee help(set_prior) for more details."),
call. = FALSE)
}
}
}
if (length(rows2remove)) {
prior <- prior[-rows2remove, ]
}
prior <- prior[with(prior, order(nlpar, class, group, coef)), ]
prior <- rbind(prior, prior_incr_lp)
rownames(prior) <- 1:nrow(prior)
# add attributes to prior generated in handle_special_priors
for (i in seq_along(attrib)) {
attr(prior, names(attrib)[i]) <- attrib[[i]]
}
attr(prior, "prior_only") <- identical(sample_prior, "only")
attr(prior, "checked") <- TRUE
prior
}
check_prior_content <- function(prior, family = gaussian(), warn = TRUE) {
# try to check if prior distributions are reasonable
# Args:
# prior: A prior_frame
# family: the model family
# warn: logical; print boundary warnings?
stopifnot(is(prior, "prior_frame"))
stopifnot(is(family, "family"))
family <- family$family
if (nrow(prior)) {
lb_priors <- c("lognormal", "chi_square", "inv_chi_square",
"scaled_inv_chi_square", "exponential", "gamma",
"inv_gamma", "weibull", "frechet", "rayleigh",
"pareto", "pareto_type_2")
lb_priors_reg <- paste0("^(", paste0(lb_priors, collapse = "|"), ")")
ulb_priors <- c("beta", "uniform", "von_mises")
ulb_priors_reg <- paste0("^(", paste0(ulb_priors, collapse = "|"), ")")
nb_pars <- c("b", "Intercept", if (!family %in% "cumulative") "delta")
lb_pars <- c("sd", "sigma", "nu", "shape", "phi", "kappa",
if (family %in% "cumulative") "delta")
cor_pars <- c("cor", "L", "rescor", "Lrescor")
autocor_pars <- c("ar", "ma")
lb_warning <- ub_warning <- ""
autocor_warning <- FALSE
for (i in 1:nrow(prior)) {
msg_prior <- .print_prior(as.brmsprior(prior[i, , drop = FALSE])[[1]])
has_lb_prior <- grepl(lb_priors_reg, prior$prior[i])
has_ulb_prior <- grepl(ulb_priors_reg, prior$prior[i])
# priors with nchar(coef) inherit their boundaries
j <- with(prior, which(class == class[i] & group == group[i] &
nlpar == nlpar[i] & !nchar(coef)))
bound <- if (length(j)) prior$bound[j] else ""
has_lb <- grepl("lower", bound)
has_ub <- grepl("upper", bound)
if (prior$class[i] %in% nb_pars) {
if ((has_lb_prior || has_ulb_prior) && !has_lb) {
lb_warning <- paste0(lb_warning, msg_prior, "\n")
}
if (has_ulb_prior && !has_ub) {
ub_warning <- paste0(ub_warning, msg_prior, "\n")
}
} else if (prior$class[i] %in% lb_pars) {
if (has_ulb_prior && !has_ub) {
ub_warning <- paste0(ub_warning, msg_prior, "\n")
}
} else if (prior$class[i] %in% cor_pars) {
if (nchar(prior$prior[i]) && !grepl("^lkj", prior$prior[i])) {
stop(paste("Currently 'lkj' is the only valid prior",
"for group-level correlations. See help(set_prior)",
"for more details."), call. = FALSE)
}
} else if (prior$class[i] %in% autocor_pars) {
if (prior$bound[i] != "<lower=-1,upper=1>") {
autocor_warning <- TRUE
}
} else if (prior$class[i] == "simplex") {
if (nchar(prior$prior[i]) && !grepl("^dirichlet\\(", prior$prior[i])) {
stop(paste("Currently 'dirichlet' is the only valid prior",
"for simplex parameters. See help(set_prior)",
"for more details."), call. = FALSE)
}
}
} # end for
if (nchar(lb_warning) && warn) {
warning(paste0("It appears that you have specified a lower bounded ",
"prior on a parameter that has no natural lower bound.",
"\nIf this is really what you want, please specify ",
"argument 'lb' of 'set_prior' appropriately.",
"\nWarning occurred for prior \n", lb_warning),
call. = FALSE)
}
if (nchar(ub_warning) && warn) {
warning(paste0("It appears that you have specified an upper bounded ",
"prior on a parameter that has no natural upper bound.",
"\nIf this is really what you want, please specify ",
"argument 'ub' of 'set_prior' appropriately.",
"\nWarning occurred for prior \n", ub_warning),
call. = FALSE)
}
if (autocor_warning && warn) {
warning(paste("Changing the boundaries of autocorrelation",
"parameters is not recommended."), call. = FALSE)
}
}
invisible(NULL)
}
handle_special_priors <- function(prior, has_specef = FALSE) {
# look for special priors such as horseshoe and process them appropriately
#
# Args:
# prior: an object of class prior_frame
# has_specef: are monotonic or category specific effects present?
#
# Returns:
# an named list of two objects:
# prior: an updated version of prior
# attrib: a named list containing future attributes of prior
attrib <- list()
b_index <- which(prior$class == "b" & !nchar(prior$coef))
if (length(b_index) && grepl("^horseshoe\\(.+\\)$", prior$prior[b_index])) {
# horseshoe prior for fixed effects parameters
if (any(nchar(prior$nlpar))) {
stop("Horseshoe priors are not yet allowed in non-linear models.",
call. = FALSE)
}
if (has_specef) {
stop(paste("Horseshoe priors are not yet allowed in models with",
"monotonic or category specific effects."),
call. = FALSE)
}
hs_df <- gsub("^horseshoe\\(|\\)$", "", prior$prior[b_index])
hs_df <- suppressWarnings(as.numeric(hs_df))
if (!is.na(hs_df) && hs_df > 0) {
b_coef_indices <- which(prior$class == "b" & nchar(prior$coef)
& prior$coef != "Intercept")
if (any(nchar(prior$prior[b_coef_indices]))) {
stop(paste("Defining priors for single fixed effects parameters",
"is not allowed when using horseshoe priors",
"(except for the Intercept)"), call. = FALSE)
}
attrib$hs_df <- hs_df
prior$prior[b_index] <- "normal(0, hs_local * hs_global)"
} else {
stop("degrees of freedom of horseshoe prior must be a positive number",
call. = FALSE)
}
}
# expand lkj correlation prior to full name
prior$prior <- sub("^(lkj\\(|lkj_corr\\()", "lkj_corr_cholesky(", prior$prior)
list(prior = prior, attrib = attrib)
}
get_bound <- function(prior, class = "b", coef = "",
group = "", nlpar = "") {
# extract the boundaries of a parameter described by class etc.
# Args:
# prior: object of class prior_frame5
# class, coef, group, nlpar: strings of length 1
stopifnot(length(class) == 1L)
if (!length(coef)) coef <- ""
if (!length(group)) group <- ""
if (!length(nlpar)) nlpar <- ""
take <- prior$class == class & prior$coef == coef &
prior$group == group & prior$nlpar == nlpar
if (sum(take) > 1L) {
stop("extracted more than one boundary at once")
}
prior$bound[take]
}
prior_frame <- function(prior = "", class = "", coef = "", group = "",
nlpar = "", bound = "") {
# helper function to create data.frames containing prior information
out <- data.frame(prior = prior, class = class, coef = coef,
group = group, nlpar = nlpar, bound = bound,
stringsAsFactors = FALSE)
class(out) <- c("prior_frame", "data.frame")
out
}
empty_prior_frame <- function() {
# define a prior_frame with zero rows
prior_frame(prior = character(0), class = character(0),
coef = character(0), group = character(0),
nlpar = character(0), bound = character(0))
}
#' @export
print.brmsprior <- function(x, ...) {
cat(.print_prior(x))
invisible(x)
}
.print_prior <- function(x) {
# prepare text for print.brmsprior
group <- usc(x$group, "prefix")
coef <- usc(x$coef, "prefix")
nlpar <- usc(x$nlpar, "prefix")
bound <- ifelse(nchar(x$bound), paste0(x$bound, " "), "")
tilde <- ifelse(nchar(x$class) + nchar(group) + nchar(coef), " ~ ", "")
prior <- ifelse(nchar(x$prior), x$prior, "(no prior)")
paste0(bound, x$class, nlpar, group, coef, tilde, prior)
}
#' @export
c.brmsprior <- function(x, ...) {
# combines multiple brmsprior objects into one prior_frame
if(any(!sapply(list(...), is, class2 = "brmsprior")))
stop("All arguments must be of class brmsprior")
prior <- data.frame(matrix(unlist(list(x, ...)), ncol = 6, byrow = TRUE),
stringsAsFactors = FALSE)
names(prior) <- c("prior", "class", "coef", "group", "nlpar", "bound")
class(prior) <- c("prior_frame", "data.frame")
prior
}
as.brmsprior <- function(prior) {
# convert a prior_frame into a list of brmsprior objects
# Args:
# prior: an object of class 'prior_frame' or 'brmsprior'
stopifnot(is(prior, "prior_frame") || is(prior, "brmsprior"))
if (is(prior, "prior_frame")) {
.convert <- function(x) {
structure(as.list(x), class = c("brmsprior", "list"))
}
prior <- unname(apply(prior, MARGIN = 1, FUN = .convert))
}
prior
}
as.prior_frame <- function(prior) {
# convert a brmsprior object into a prior_frame object
# Args:
# prior: an object of class 'prior_frame' or 'brmsprior'
if (is.null(prior)) {
prior <- prior_frame()
} else if (is(prior, "brmsprior")) {
prior <- c(prior)
} else if (!is(prior, "prior_frame")) {
stop(paste("Invalid 'prior' argument. See help(set_prior)",
"for further information."), call. = FALSE)
}
prior
}
.dirichlet <- function(...) {
# helper function for dirichlet priors of simplex parameters
out <- as.numeric(c(...))
if (any(out <= 0)) {
stop("The dirichlet prior expects positive values.", call. = FALSE)
}
out
}
|
/R/priors.R
|
no_license
|
hoardboard/brms
|
R
| false
| false
| 46,738
|
r
|
#' Prior Definitions for \pkg{brms} Models
#'
#' Define priors for specific parameters or classes of parameters
#'
#' @param prior A character string defining a distribution in \pkg{Stan} language
#' @param class The parameter class. Defaults to \code{"b"} (fixed effects).
#' See 'Details' for other valid parameter classes.
#' @param coef Name of the (population- or group-level) parameter
#' @param group Grouping factor of group-level parameters.
#' @param nlpar Name of a non-linear / auxiliary parameter.
#' Only used in non-linear / distributional models.
#' @param resp Name of the response variable / category.
#' Only used in multivariate and categorical models.
#' Is internally handled as an alias of \code{nlpar}.
#' @param lb Lower bound for parameter restriction. Currently only allowed
#' for classes \code{"b"}, \code{"ar"}, \code{"ma"}, and \code{"arr"}.
#' Defaults to \code{NULL}, that is no restriction.
#' @param ub Upper bound for parameter restriction. Currently only allowed
#' for classes \code{"b"}, \code{"ar"}, \code{"ma"}, and \code{"arr"}.
#' Defaults to \code{NULL}, that is no restriction.
#'
#' @return An object of class \code{brmsprior} to be used in the \code{prior}
#' argument of \code{\link[brms:brm]{brm}}.
#'
#' @details
#' \code{set_prior} is used to define prior distributions for parameters
#' in \pkg{brms} models. Below, we explain its usage and list some common
#' prior distributions for parameters.
#' A complete overview on possible prior distributions is given
#' in the Stan Reference Manual available at \url{http://mc-stan.org/}.
#'
#' To combine multiple priors, use \code{c(...)},
#' e.g., \code{c(set_prior(...), set_prior(...))}.
#' \pkg{brms} does not check if the priors are written in correct \pkg{Stan} language.
#' Instead, \pkg{Stan} will check their syntactical correctness when the model
#' is parsed to \code{C++} and returns an error if they are not.
#' This, however, does not imply that priors are always meaningful if they are
#' accepted by \pkg{Stan}. Although \pkg{brms} trys to find common problems
#' (e.g., setting bounded priors on unbounded parameters), there is no guarantee
#' that the defined priors are reasonable for the model.
#' Currently, there are seven types of parameters in \pkg{brms} models,
#' for which the user can specify prior distributions. \cr
#'
#' 1. Population-level ('fixed') effects
#'
#' Every Population-level effect has its own regression parameter
# These parameters are internally named as \code{b_<fixed>}, where \code{<fixed>}
#' represents the name of the corresponding population-level effect.
#' Suppose, for instance, that \code{y} is predicted by \code{x1} and \code{x2}
#' (i.e. \code{y ~ x1+x2} in formula syntax).
#' Then, \code{x1} and \code{x2} have regression parameters
#' \code{b_x1} and \code{b_x2} respectively.
#' The default prior for population-level effects (including monotonic and
#' category specific effects) is an improper flat prior over the reals.
#' Other common options are normal priors or student-t priors.
#' If we want to have a normal prior with mean 0 and
#' standard deviation 5 for \code{x1}, and a unit student-t prior with 10
#' degrees of freedom for \code{x2}, we can specify this via
#' \code{set_prior("normal(0,5)", class = "b", coef = "x1")} and \cr
#' \code{set_prior("student_t(10,0,1)", class = "b", coef = "x2")}.
#' To put the same prior on all fixed effects at once,
#' we may write as a shortcut \code{set_prior("<prior>", class = "b")}.
#' This also leads to faster sampling, because priors can be vectorized in this case.
#' Both ways of defining priors can be combined using for instance
#' \code{set_prior("normal(0,2)", class = "b")} and \cr
#' \code{set_prior("normal(0,10)", class = "b", coef = "x1")}
#' at the same time. This will set a \code{normal(0,10)} prior on
#' the fixed effect of \code{x1} and a \code{normal(0,2)} prior
#' on all other fixed effects. However, this will break vectorization and
#' may slow down the sampling procedure a bit.
#'
#' In case of the default intercept parameterization
#' (discussed in the 'Details' section of \code{\link[brms:brm]{brm}}),
#' the fixed effects intercept has its own parameter class
#' named \code{"Intercept"} and priors can thus be
#' specified via \code{set_prior("<prior>", class = "Intercept")}.
#' Setting a prior on the intercept will not break vectorization
#' of the other population-level effects.
#'
#' A special shrinkage prior to be applied on population-level effects
#' is the horseshoe prior.
#' It is symmetric around zero with fat tails and an infinitely large spike
#' at zero. This makes it ideal for sparse models that have
#' many regression coefficients,although only a minority of them is non-zero.
#' For more details see Carvalho et al. (2009).
#' The horseshoe prior can be applied on all population-level effects at once
#' (excluding the intercept) by using \code{set_prior("horseshoe(1)")}.
#' The \code{1} implies that the student-t prior of the local shrinkage
#' parameters has 1 degrees of freedom. This may, however, lead to an
#' increased number of divergent transition in \pkg{Stan}.
#' Accordingly, increasing the degrees of freedom to slightly higher values
#' (e.g., \code{3}) may often be a better option, although the prior
#' no longer resembles a horseshoe in this case.
#' Generally, models with horseshoe priors a more likely than other models
#' to have divergent transitions so that increasing \code{adapt_delta}
#' from \code{0.8} to values closer to \code{1} will often be necessary.
#' See the documentation of \code{\link[brms:brm]{brm}} for instructions
#' on how to increase \code{adapt_delta}. \cr
#'
#' In non-linear models, population-level effects are defined separately
#' for each non-linear parameter. Accordingly, it is necessary to specify
#' the non-linear parameter in \code{set_prior} so that priors
#' we can be assigned correctly.
#' If, for instance, \code{alpha} is the parameter and \code{x} the predictor
#' for which we want to define the prior, we can write
#' \code{set_prior("<prior>", coef = "x", nlpar = "alpha")}.
#' As a shortcut we can use \code{set_prior("<prior>", nlpar = "alpha")}
#' to set the same prior on all population-level effects of \code{alpha} at once.
#'
#' If desired, population-level effects can be restricted to fall only
#' within a certain interval using the \code{lb} and \code{ub} arguments
#' of \code{set_prior}. This is often required when defining priors
#' that are not defined everywhere on the real line, such as uniform
#' or gamma priors. When defining a \code{uniform(2,4)} prior,
#' you should write \code{set_prior("uniform(2,4)", lb = 2, ub = 4)}.
#' When using a prior that is defined on the postive reals only
#' (such as a gamma prior) set \code{lb = 0}.
#' In most situations, it is not useful to restrict population-level
#' parameters through bounded priors
#' (non-linear models are an important exception),
#' but if you really want to this is the way to go.
#'
#' 2. Standard deviations of group-level ('random') effects
#'
#' Each group-level effect of each grouping factor has a standard deviation named
#' \code{sd_<group>_<random>}. Consider, for instance, the formula
#' \code{y ~ x1+x2+(1+x1|g)}.
#' We see that the intercept as well as \code{x1} are group-level effects
#' nested in the grouping factor \code{g}.
#' The corresponding standard deviation parameters are named as
#' \code{sd_g_Intercept} and \code{sd_g_x1} respectively.
#' These parameters are restriced to be non-negative and, by default,
#' have a half student-t prior with 3 degrees of freedom and a
#' scale parameter that depends on the standard deviation of the response
#' after applying the link function. Minimally, the scale parameter is 10.
#' This prior is used (a) to be only very weakly informative in order to influence
#' results as few as possible, while (b) providing at least some regularization
#' to considerably improve convergence and sampling efficiency.
#' To define a prior distribution only for standard deviations
#' of a specific grouping factor,
#' use \cr \code{set_prior("<prior>", class = "sd", group = "<group>")}.
#' To define a prior distribution only for a specific standard deviation
#' of a specific grouping factor, you may write \cr
#' \code{set_prior("<prior>", class = "sd", group = "<group>", coef = "<coef>")}.
#' Recommendations on useful prior distributions for
#' standard deviations are given in Gelman (2006), but note that he
#' is no longer recommending uniform priors, anymore. \cr
#'
#' When defining priors on group-level effects parameters in non-linear models,
#' please make sure to specify the corresponding non-linear parameter
#' through the \code{nlpar} argument in the same way as
#' for population-level effects.
#'
#' 3. Correlations of group-level ('random') effects
#'
#' If there is more than one group-level effect per grouping factor,
#' the correlations between those effects have to be estimated.
#' The prior \code{"lkj_corr_cholesky(eta)"} or in short
#' \code{"lkj(eta)"} with \code{eta > 0}
#' is essentially the only prior for (choelsky factors) of correlation matrices.
#' If \code{eta = 1} (the default) all correlations matrices
#' are equally likely a priori. If \code{eta > 1}, extreme correlations
#' become less likely, whereas \code{0 < eta < 1} results in
#' higher probabilities for extreme correlations.
#' Correlation matrix parameters in \code{brms} models are named as
#' \code{cor_(group)}, (e.g., \code{cor_g} if \code{g} is the grouping factor).
#' To set the same prior on every correlation matrix,
#' use for instance \code{set_prior("lkj(2)", class = "cor")}.
#'
#' 4. Standard deviations of smoothing terms
#'
#' GAMMs are implemented in \pkg{brms} using the 'random effects'
#' formulation of smoothing terms (for details see
#' \code{\link[mgcv:gamm]{gamm}}). Thus, each smoothing term
#' has its corresponding standard deviation modeling
#' the variability within this term. In \pkg{brms}, this
#' parameter class is called \code{sds} and priors can
#' be specified via \code{set_prior("<prior>", class = "sds",
#' coef = "<term label>")}. The default prior is the same as
#' for standard deviations of group-level effects.
#'
#' 5. Autocorrelation parameters
#'
#' The autocorrelation parameters currently implemented are named
#' \code{ar} (autoregression), \code{ma} (moving average),
#' and \code{arr} (autoregression of the response).
#'
#' Priors can be defined by \code{set_prior("<prior>", class = "ar")}
#' for \code{ar} and similar for \code{ma} and \code{arr} effects.
#' By default, \code{ar} and \code{ma} are bounded between \code{-1}
#' and \code{1} and \code{arr} is unbounded (you may change this
#' by using the arguments \code{lb} and \code{ub}). The default
#' prior is flat over the definition area.
#'
#' 6. Distance parameters of monotonic effects
#'
#' As explained in the details section of \code{\link[brms:brm]{brm}},
#' monotonic effects make use of a special parameter vector to
#' estimate the 'normalized distances' between consecutive predictor
#' categories. This is realized in \pkg{Stan} using the \code{simplex}
#' parameter type and thus this class is also named \code{"simplex"} in
#' \pkg{brms}. The only valid prior for simplex parameters is the
#' dirichlet prior, which accepts a vector of length \code{K - 1}
#' (K = number of predictor categories) as input defining the
#' 'concentration' of the distribution. Explaining the dirichlet prior
#' is beyond the scope of this documentation, but we want to describe
#' how to define this prior syntactically correct.
#' If a predictor \code{x} with \code{K} categories is modeled as monotonic,
#' we can define a prior on its corresponding simplex via \cr
#' \code{set_prior("dirichlet(<vector>)", class = "simplex", coef = "x")}.
#' For \code{<vector>}, we can put in any \code{R} expression
#' defining a vector of length \code{K - 1}. The default is a uniform
#' prior (i.e. \code{<vector> = rep(1, K-1)}) over all simplexes
#' of the respective dimension.
#'
#' 7. Parameters for specific families
#'
#' Some families need additional parameters to be estimated.
#' Families \code{gaussian}, \code{student}, and \code{cauchy}
#' need the parameter \code{sigma}
#' to account for the residual standard deviation.
#' By default, \code{sigma} has a half student-t prior that scales
#' in the same way as the random effects standard deviations.
#' Furthermore, family \code{student} needs the parameter
#' \code{nu} representing the degrees of freedom of students t distribution.
#' By default, \code{nu} has prior \code{"gamma(2,0.1)"}
#' and a fixed lower bound of \code{0}.
#' Families \code{gamma}, \code{weibull}, \code{inverse.gaussian}, and
#' \code{negbinomial} need a \code{shape} parameter that has a
#' \code{"gamma(0.01,0.01)"} prior by default.
#' For families \code{cumulative}, \code{cratio}, \code{sratio},
#' and \code{acat}, and only if \code{threshold = "equidistant"},
#' the parameter \code{delta} is used to model the distance between
#' two adjacent thresholds.
#' By default, \code{delta} has an improper flat prior over the reals.
#' The \code{von_mises} family needs the parameter \code{kappa}, representing
#' the concentration parameter. By default, \code{kappa} has prior
#' \code{"gamma(2, 0.01)"}. \cr
#' Every family specific parameter has its own prior class, so that
#' \code{set_prior("<prior>", class = "<parameter>")} is the right way to go.
#' All of these priors are chosen to be weakly informative,
#' having only minimal influence on the estimations,
#' while improving convergence and sampling efficiency.
#'
#' Often, it may not be immediately clear,
#' which parameters are present in the model.
#' To get a full list of parameters and parameter classes for which
#' priors can be specified (depending on the model)
#' use function \code{\link[brms:get_prior]{get_prior}}.
#'
#' @seealso \code{\link[brms:get_prior]{get_prior}}
#'
#' @references
#' Gelman A (2006). Prior distributions for variance parameters in hierarchical models.
#' Bayesian analysis, 1(3), 515 -- 534.
#'
#' Carvalho, C. M., Polson, N. G., & Scott, J. G. (2009).
#' Handling sparsity via the horseshoe.
#' In International Conference on Artificial Intelligence and Statistics (pp. 73-80).
#'
#' @examples
#' ## check which parameters can have priors
#' get_prior(rating ~ treat + period + carry + (1|subject),
#' data = inhaler, family = sratio(),
#' threshold = "equidistant")
#'
#' ## define some priors
#' prior <- c(set_prior("normal(0,10)", class = "b"),
#' set_prior("normal(1,2)", class = "b", coef = "treat"),
#' set_prior("cauchy(0,2)", class = "sd",
#' group = "subject", coef = "Intercept"),
#' set_prior("uniform(-5,5)", class = "delta"))
#'
#' ## verify that the priors indeed found their way into Stan's model code
#' make_stancode(rating ~ period + carry + cse(treat) + (1|subject),
#' data = inhaler, family = sratio(),
#' threshold = "equidistant",
#' prior = prior)
#'
#' ## use horseshoe priors to model sparsity in population-level effects parameters
#' make_stancode(count ~ log_Age_c + log_Base4_c * Trt_c,
#' data = epilepsy, family = poisson(),
#' prior = set_prior("horseshoe(3)"))
#'
#' @export
set_prior <- function(prior, class = "b", coef = "", group = "",
nlpar = "", resp = NULL, lb = NULL, ub = NULL) {
prior <- as.character(prior)
class <- as.character(class)
group <- as.character(group)
coef <- as.character(coef)
nlpar <- as.character(use_alias(nlpar, resp, warn = FALSE))
lb <- as.numeric(lb)
ub <- as.numeric(ub)
if (length(prior) != 1 || length(class) != 1 || length(coef) != 1 ||
length(group) != 1 || length(nlpar) != 1 || length(lb) > 1 ||
length(ub) > 1)
stop("All arguments of set_prior must be of length 1.", call. = FALSE)
valid_classes <- c("Intercept", "b", "sd", "sds", "simplex", "cor", "L",
"ar", "ma", "arr", "sigma", "sigmaLL", "rescor",
"Lrescor", "nu", "shape", "delta", "phi", "kappa")
if (!class %in% valid_classes) {
stop(paste(class, "is not a valid parameter class"), call. = FALSE)
}
if (nchar(group) && !class %in% c("sd", "cor", "L")) {
stop(paste("argument 'group' not meaningful for class", class),
call. = FALSE)
}
coef_classes <- c("Intercept", "b", "sd", "sds", "sigma", "simplex")
if (nchar(coef) && !class %in% coef_classes) {
stop(paste("argument 'coef' not meaningful for class", class),
call. = FALSE)
}
if (nchar(nlpar) && !class %in% valid_classes[1:5]) {
stop(paste("argument 'nlpar' not meaningful for class", class),
call. = FALSE)
}
is_arma <- class %in% c("ar", "ma")
if (length(lb) || length(ub) || is_arma) {
if (!(class %in% c("b", "arr") || is_arma))
stop("Currently boundaries are only allowed for ",
"population-level and ARMA effects.", call. = FALSE)
if (nchar(coef)) {
stop("'coef' may not be specified when using boundaries")
}
if (is_arma) {
lb <- ifelse(length(lb), lb, -1)
ub <- ifelse(length(ub), ub, 1)
if (is.na(lb) || is.na(ub) || abs(lb) > 1 || abs(ub) > 1) {
warning("Setting boundaries of ARMA parameters outside of ",
"[-1,1] may not be appropriate.", call. = FALSE)
}
}
# don't put spaces in boundary declarations
lb <- if (length(lb) && !is.na(lb)) paste0("lower=", lb)
ub <- if (length(ub) && !is.na(ub)) paste0("upper=", ub)
if (!is.null(lb) || !is.null(ub)) {
bound <- paste0("<", paste(c(lb, ub), collapse = ","), ">")
} else {
bound <- ""
}
} else {
bound <- ""
}
if (grepl("^increment_log_prob\\(", prior)) {
# increment_log_prob can be used to directly add a term
# to the log posterior
class <- coef <- group <- nlpar <- ""
}
out <- nlist(prior, class, coef, group, nlpar, bound)
class(out) <- c("brmsprior", "list")
out
}
#' Overview on Priors for \pkg{brms} Models
#'
#' Get information on all parameters (and parameter classes) for which priors
#' may be specified including default priors.
#'
#' @inheritParams brm
#' @param internal A flag indicating if the names of additional internal parameters should be displayed.
#' Setting priors on these parameters is not recommended
#'
#' @return A data.frame with columns \code{prior}, \code{class}, \code{coef}, and \code{group}
#' and several rows, each providing information on a parameter (or parameter class) on which
#' priors can be specified. The prior column is empty except for internal default priors.
#'
#' @seealso \code{\link[brms:set_prior]{set_prior}}
#'
#' @examples
#' ## get all parameters and parameters classes to define priors on
#' (prior <- get_prior(count ~ log_Age_c + log_Base4_c * Trt_c
#' + (1|patient) + (1|visit),
#' data = epilepsy, family = poisson()))
#'
#' ## define a prior on all population-level effects a once
#' prior$prior[1] <- "normal(0,10)"
#'
#' ## define a specific prior on the population-level effect of Trt_c
#' prior$prior[5] <- "student_t(10, 0, 5)"
#'
#' ## verify that the priors indeed found their way into Stan's model code
#' make_stancode(count ~ log_Age_c + log_Base4_c * Trt_c
#' + (1|patient) + (1|visit),
#' data = epilepsy, family = poisson(),
#' prior = prior)
#'
#' @export
get_prior <- function(formula, data = NULL, family = gaussian(),
autocor = NULL, nonlinear = NULL, partial = NULL,
threshold = c("flexible", "equidistant"),
internal = FALSE) {
# note that default priors are stored in this function
family <- check_family(family)
link <- family$link
formula <- update_formula(formula, data = data, family = family,
partial = partial, nonlinear = nonlinear)
threshold <- match.arg(threshold)
autocor <- check_autocor(autocor)
ee <- extract_effects(formula, family = family)
data <- update_data(data, family = family, effects = ee)
# ensure that RE and residual SDs only have a weakly informative prior by default
Y <- unname(model.response(data))
prior_scale <- 10
if (is.lognormal(family)) link <- "log"
if (link %in% c("identity", "log", "inverse", "sqrt", "1/mu^2")) {
if (link %in% c("log", "inverse", "1/mu^2")) {
Y <- ifelse(Y == 0, Y + 0.1, Y) # avoid Inf in link(Y)
}
suggested_scale <- SW(round(link(sd(Y), link = link)))
if (!is.nan(suggested_scale)) {
prior_scale <- max(prior_scale, suggested_scale, na.rm = TRUE)
}
}
def_scale_prior <- paste0("student_t(3, 0, ", prior_scale, ")")
# initialize output
prior <- empty_prior_frame()
# priors for primary regression effects
if (length(ee$nonlinear)) {
nlpars <- names(ee$nonlinear)
for (i in seq_along(nlpars)) {
prior_eff <- get_prior_effects(ee$nonlinear[[i]], data = data,
autocor = autocor, nlpar = nlpars[i],
spec_intercept = FALSE,
def_scale_prior = def_scale_prior,
internal = internal)
prior <- rbind(prior, prior_eff)
}
} else {
if (length(ee$response) > 1L) {
# priors for effects in multivariate models
for (r in c("", ee$response)) {
# r = "" adds global priors affecting parameters of all responses
prior_eff <- get_prior_effects(ee, data = data, autocor = autocor,
def_scale_prior = def_scale_prior,
internal = internal, nlpar = r)
prior <- rbind(prior, prior_eff)
}
} else {
# priors for effects in univariate models
prior_eff <- get_prior_effects(ee, data = data, autocor = autocor,
def_scale_prior = def_scale_prior,
internal = internal)
prior <- rbind(prior, prior_eff)
}
}
# priors for auxiliary parameters
def_auxprior <- c(sigma = def_scale_prior, shape = "gamma(0.01, 0.01)",
nu = "gamma(2, 0.1)", phi = "gamma(0.01, 0.01)",
kappa = "gamma(2, 0.01)", zi = "beta(1, 1)",
hu = "beta(1, 1)")
valid_auxpars <- valid_auxpars(family, effects = ee, autocor = autocor)
for (ap in valid_auxpars) {
if (!is.null(ee[[ap]])) {
auxprior <- get_prior_effects(ee[[ap]], data = data,
autocor = autocor, nlpar = ap,
spec_intercept = FALSE,
def_scale_prior = def_scale_prior,
internal = internal)
} else {
auxprior <- prior_frame(class = ap, prior = def_auxprior[ap])
}
prior <- rbind(prior, auxprior)
}
# priors of group-level parameters
ranef <- tidy_ranef(ee, data)
prior_ranef <- get_prior_ranef(ranef, def_scale_prior = def_scale_prior,
global_sd = length(ee$response) > 1L,
internal = internal)
prior <- rbind(prior, prior_ranef)
# prior for the delta parameter for equidistant thresholds
if (is.ordinal(family) && threshold == "equidistant") {
prior <- rbind(prior, prior_frame(class = "delta"))
}
# priors for auxiliary parameters of multivariate models
if (is.linear(family) && length(ee$response) > 1L) {
sigma_coef <- c("", ee$response)
sigma_prior <- c(def_scale_prior, rep("", length(ee$response)))
sigma_prior <- prior_frame(class = "sigma", coef = sigma_coef,
prior = sigma_prior)
prior <- rbind(prior, sigma_prior)
if (internal) {
prior <- rbind(prior, prior_frame(class = "Lrescor",
prior = "lkj_corr_cholesky(1)"))
} else {
prior <- rbind(prior, prior_frame(class = "rescor", prior = "lkj(1)"))
}
}
# priors for autocor parameters
cbound <- "<lower=-1,upper=1>"
if (get_ar(autocor)) {
prior <- rbind(prior, prior_frame(class = "ar", bound = cbound))
}
if (get_ma(autocor)) {
prior <- rbind(prior, prior_frame(class = "ma", bound = cbound))
}
if (get_arr(autocor)) {
prior <- rbind(prior, prior_frame(class = "arr"))
}
if (is(autocor, "cor_bsts")) {
prior <- rbind(prior, prior_frame(class = "sigmaLL",
prior = def_scale_prior))
}
# do not remove unique(.)
prior <- unique(prior[with(prior, order(nlpar, class, group, coef)), ])
rownames(prior) <- 1:nrow(prior)
prior
}
get_prior_effects <- function(effects, data, autocor = cor_arma(),
nlpar = "", spec_intercept = TRUE,
def_scale_prior = "", internal = FALSE) {
# wrapper function to get priors for various kinds of effects
# don't use the family argument here to avoid
# removal of the intercept for ordinal models
# Args:
# spec_intercept: special parameter class for the FE Intercept?
fixef <- colnames(data_fixef(effects, data, autocor = autocor)$X)
spec_intercept <- has_intercept(effects$fixed) && spec_intercept
prior_fixef <- get_prior_fixef(fixef, spec_intercept = spec_intercept,
nlpar = nlpar, internal = internal)
monef <- colnames(get_model_matrix(effects$mono, data))
prior_monef <- get_prior_monef(monef, fixef = fixef, nlpar = nlpar)
# group-level priors are prepared separately
splines <- get_spline_labels(effects)
prior_splines <- get_prior_splines(splines, def_scale_prior, nlpar = nlpar)
csef <- colnames(get_model_matrix(effects$cse, data = data))
prior_csef <- get_prior_csef(csef, fixef = fixef)
rbind(prior_fixef, prior_monef, prior_splines, prior_csef)
}
get_prior_fixef <- function(fixef, spec_intercept = TRUE,
nlpar = "", internal = FALSE) {
# priors for fixed effects parameters
# Args:
# fixef: names of the fixed effects
# spec_intercept: special parameter class for the Intercept?
# internal: see get_prior
# Returns:
# an object of class prior_frame
prior <- empty_prior_frame()
if (length(fixef)) {
prior <- rbind(prior, prior_frame(class = "b", coef = c("", fixef),
nlpar = nlpar))
}
if (spec_intercept) {
prior <- rbind(prior, prior_frame(class = "Intercept", coef = "",
nlpar = nlpar))
if (internal) {
prior <- rbind(prior, prior_frame(class = "temp_Intercept",
coef = "", nlpar = nlpar))
}
}
prior
}
get_prior_monef <- function(monef, fixef = NULL, nlpar = "") {
# priors for monotonic effects parameters
# Args:
# monef: names of the monotonic effects
# fixef: names of the fixed effects
# nlpar: optional name of a non-linear parameter
# Returns:
# an object of class prior_frame
prior <- empty_prior_frame()
if (length(monef)) {
invalid <- intersect(fixef, monef)
if (length(invalid)) {
stop(paste("Variables cannot be modeled as fixed and",
"monotonic effects at the same time.",
"\nError occured for variables:",
paste(invalid, collapse = ", ")), call. = FALSE)
}
prior <- rbind(prior_frame(class = "b", coef = c("", monef),
nlpar = nlpar),
prior_frame(class = "simplex", coef = monef,
nlpar = nlpar))
}
prior
}
get_prior_csef <- function(csef, fixef = NULL) {
# priors for category spcific effects parameters
# Args:
# csef: names of the category specific effects
# fixef: names of the fixed effects
# Returns:
# an object of class prior_frame
prior <- empty_prior_frame()
if (length(csef)) {
invalid <- intersect(fixef, csef)
if (length(invalid)) {
stop(paste("Variables cannot be modeled as fixed and",
"category specific effects at the same time.",
"\nError occured for variables:",
paste(invalid, collapse = ", ")), call. = FALSE)
}
prior <- prior_frame(class = "b", coef = c("", csef))
}
prior
}
get_prior_ranef <- function(ranef, def_scale_prior,
global_sd = FALSE, internal = FALSE) {
# priors for random effects parameters
# Args:
# ranef: a list returned by tidy_ranef
# def_scale_prior: a character string defining the default
# prior for random effects SDs
# global_sd: allow to set a global SD prior
# affecting all non-linear parameters?
# internal: see get_prior
# Returns:
# an object of class prior_frame
prior <- empty_prior_frame()
if (nrow(ranef)) {
# global sd class
nlpars <- unique(ranef$nlpar)
if (global_sd) {
global_sd_prior <- rep("", length(setdiff(nlpars, "")))
global_sd_prior <- c(def_scale_prior, global_sd_prior)
global_sd_prior <- prior_frame(class = "sd",
prior = global_sd_prior,
nlpar = union("", nlpars))
} else {
global_sd_prior <- prior_frame(class = "sd",
prior = def_scale_prior,
nlpar = nlpars)
}
prior <- rbind(prior, global_sd_prior)
for (id in unique(ranef$id)) {
r <- ranef[ranef$id == id, ]
group <- r$group[1]
# include group-level standard deviations
prior <- rbind(prior,
prior_frame(class = "sd", group = group,
nlpar = unique(r$nlpar)),
prior_frame(class = "sd", coef = r$coef,
group = group, nlpar = r$nlpar))
# detect duplicated group-level effects
J <- with(prior, class == "sd" & nchar(coef))
dupli <- duplicated(prior[J, ])
if (any(dupli)) {
stop("Duplicated group-level effects detected for group ",
group, call. = FALSE)
}
# include correlation parameters
if (isTRUE(r$cor[1]) && nrow(r) > 1L) {
if (internal) {
prior <- rbind(prior,
prior_frame(class = "L", group = c("", group),
prior = c("lkj_corr_cholesky(1)", "")))
} else {
prior <- rbind(prior,
prior_frame(class = "cor", group = c("", group),
prior = c("lkj(1)", "")))
}
}
}
}
prior
}
get_prior_splines <- function(splines, def_scale_prior, nlpar = "") {
# priors for GAMM models
# Args:
# splines: names of the spline terms
# def_scale_prior: a character string defining the default
# prior for spline SDs
# nlpar: optional name of a non-linear parameter
if (length(splines)) {
prior_strings <- c(def_scale_prior, rep("", length(splines)))
prior <- prior_frame(class = "sds", coef = c("", splines),
prior = prior_strings, nlpar = nlpar)
} else {
prior <- empty_prior_frame()
}
prior
}
check_prior <- function(prior, formula, data = NULL, family = gaussian(),
sample_prior = FALSE, autocor = NULL,
threshold = "flexible", check_rows = NULL,
warn = FALSE) {
# check prior input and amend it if needed
# Args:
# same as the respective parameters in brm
# (nonlinear is expected to be an attribute of formula)
# check_rows: if not NULL, check only the rows given in check_rows
# warn: passed to check_prior_content
# Returns:
# a data.frame of prior specifications to be used in stan_prior (see stan.R)
if (isTRUE(attr(prior, "checked"))) {
return(prior) # prior has already been checked; no need to do it twice
}
stopifnot(is(formula, "brmsformula"))
ee <- extract_effects(formula, family = family)
all_priors <- get_prior(formula = formula, data = data,
family = family, autocor = autocor,
threshold = threshold, internal = TRUE)
if (is.null(prior)) {
prior <- all_priors
} else {
prior <- as.prior_frame(prior)
}
# exclude priors using increment_log_prob to readd them at the end
has_incr_lp <- grepl("^increment_log_prob\\(", prior$prior)
prior_incr_lp <- prior[has_incr_lp, ]
prior <- prior[!has_incr_lp, ]
# check for duplicated priors
prior$class <- rename(prior$class, symbols = c("^cor$", "^rescor$"),
subs = c("L", "Lrescor"), fixed = FALSE)
duplicated_input <- duplicated(prior[, 2:5])
if (any(duplicated_input)) {
stop("Duplicated prior specifications are not allowed.", call. = FALSE)
}
# handle special priors that are not explictly coded as functions in Stan
has_specef <- is.formula(ee[c("mono", "cse")])
temp <- handle_special_priors(prior, has_specef = has_specef)
prior <- temp$prior
attrib <- temp$attrib
# check if parameters in prior are valid
if (nrow(prior)) {
valid <- which(duplicated(rbind(all_priors[, 2:5], prior[, 2:5])))
invalid <- which(!1:nrow(prior) %in% (valid - nrow(all_priors)))
if (length(invalid)) {
msg_priors <- lapply(as.brmsprior(prior[invalid, ]), .print_prior)
message(paste("The following priors don't correspond to any",
"model parameter \nand will thus not affect the results:",
collapse(" \n", msg_priors)), "\n")
prior <- prior[-invalid, ]
}
}
check_prior_content(prior, family = family, warn = warn)
# merge prior with all_priors
prior <- rbind(prior, all_priors)
prior <- prior[!duplicated(prior[, 2:5]), ]
rows2remove <- NULL
# special treatment of fixed effects Intercepts
int_index <- which(prior$class == "Intercept")
if (length(int_index)) {
int_prior <- prior[int_index, ]
if (length(int_index) > 1L) {
intercepts <- prior$coef[int_index]
intercepts <- intercepts[nchar(intercepts) > 0]
} else intercepts <- "Intercept"
bint_index <- which(prior$class == "b" & prior$coef %in% intercepts)
bint_prior <- prior[bint_index, ]
for (t in which(prior$class %in% "temp_Intercept")) {
ti <- int_prior$coef == prior$coef[t]
tb <- bint_prior$coef %in% c(prior$coef[t], "Intercept")
if (sum(ti) && nchar(int_prior$prior[ti]) > 0) {
# take 'Intercept' priors first if specified
prior$prior[t] <- int_prior$prior[ti]
} else if (sum(tb) && nchar(bint_prior$prior[tb]) > 0) {
# fall back to 'b' (fixed effects) priors
prior$prior[t] <- bint_prior$prior[tb]
}
}
rows2remove <- c(rows2remove, int_index, bint_index)
}
# prepare priors of monotonic effects
mono_forms <- get_effect(ee, "mono")
for (k in seq_along(mono_forms)) {
monef <- colnames(get_model_matrix(mono_forms[[k]], data = data))
for (i in seq_along(monef)) {
take <- with(prior, class == "simplex" & coef == monef[i] &
nlpar == names(mono_forms)[k])
simplex_prior <- paste0(".", prior$prior[take])
if (nchar(simplex_prior) > 1L) {
simplex_prior <- paste(eval(parse(text = simplex_prior)),
collapse = ",")
prior$prior[take] <- paste0("dirichlet(c(", simplex_prior, "))")
}
}
}
# check if priors for non-linear parameters are defined
if (length(ee$nonlinear)) {
nlpars <- names(ee$nonlinear)
for (nlp in nlpars) {
nlp_prior <- prior$prior[with(prior, nlpar == nlp & class == "b")]
if (!any(as.logical(nchar(nlp_prior)))) {
stop(paste0("Priors on fixed effects are required in non-linear ",
"models, but none were found for parameter '", nlp,
"'. \nSee help(set_prior) for more details."),
call. = FALSE)
}
}
}
if (length(rows2remove)) {
prior <- prior[-rows2remove, ]
}
prior <- prior[with(prior, order(nlpar, class, group, coef)), ]
prior <- rbind(prior, prior_incr_lp)
rownames(prior) <- 1:nrow(prior)
# add attributes to prior generated in handle_special_priors
for (i in seq_along(attrib)) {
attr(prior, names(attrib)[i]) <- attrib[[i]]
}
attr(prior, "prior_only") <- identical(sample_prior, "only")
attr(prior, "checked") <- TRUE
prior
}
check_prior_content <- function(prior, family = gaussian(), warn = TRUE) {
# try to check if prior distributions are reasonable
# Args:
# prior: A prior_frame
# family: the model family
# warn: logical; print boundary warnings?
stopifnot(is(prior, "prior_frame"))
stopifnot(is(family, "family"))
family <- family$family
if (nrow(prior)) {
lb_priors <- c("lognormal", "chi_square", "inv_chi_square",
"scaled_inv_chi_square", "exponential", "gamma",
"inv_gamma", "weibull", "frechet", "rayleigh",
"pareto", "pareto_type_2")
lb_priors_reg <- paste0("^(", paste0(lb_priors, collapse = "|"), ")")
ulb_priors <- c("beta", "uniform", "von_mises")
ulb_priors_reg <- paste0("^(", paste0(ulb_priors, collapse = "|"), ")")
nb_pars <- c("b", "Intercept", if (!family %in% "cumulative") "delta")
lb_pars <- c("sd", "sigma", "nu", "shape", "phi", "kappa",
if (family %in% "cumulative") "delta")
cor_pars <- c("cor", "L", "rescor", "Lrescor")
autocor_pars <- c("ar", "ma")
lb_warning <- ub_warning <- ""
autocor_warning <- FALSE
for (i in 1:nrow(prior)) {
msg_prior <- .print_prior(as.brmsprior(prior[i, , drop = FALSE])[[1]])
has_lb_prior <- grepl(lb_priors_reg, prior$prior[i])
has_ulb_prior <- grepl(ulb_priors_reg, prior$prior[i])
# priors with nchar(coef) inherit their boundaries
j <- with(prior, which(class == class[i] & group == group[i] &
nlpar == nlpar[i] & !nchar(coef)))
bound <- if (length(j)) prior$bound[j] else ""
has_lb <- grepl("lower", bound)
has_ub <- grepl("upper", bound)
if (prior$class[i] %in% nb_pars) {
if ((has_lb_prior || has_ulb_prior) && !has_lb) {
lb_warning <- paste0(lb_warning, msg_prior, "\n")
}
if (has_ulb_prior && !has_ub) {
ub_warning <- paste0(ub_warning, msg_prior, "\n")
}
} else if (prior$class[i] %in% lb_pars) {
if (has_ulb_prior && !has_ub) {
ub_warning <- paste0(ub_warning, msg_prior, "\n")
}
} else if (prior$class[i] %in% cor_pars) {
if (nchar(prior$prior[i]) && !grepl("^lkj", prior$prior[i])) {
stop(paste("Currently 'lkj' is the only valid prior",
"for group-level correlations. See help(set_prior)",
"for more details."), call. = FALSE)
}
} else if (prior$class[i] %in% autocor_pars) {
if (prior$bound[i] != "<lower=-1,upper=1>") {
autocor_warning <- TRUE
}
} else if (prior$class[i] == "simplex") {
if (nchar(prior$prior[i]) && !grepl("^dirichlet\\(", prior$prior[i])) {
stop(paste("Currently 'dirichlet' is the only valid prior",
"for simplex parameters. See help(set_prior)",
"for more details."), call. = FALSE)
}
}
} # end for
if (nchar(lb_warning) && warn) {
warning(paste0("It appears that you have specified a lower bounded ",
"prior on a parameter that has no natural lower bound.",
"\nIf this is really what you want, please specify ",
"argument 'lb' of 'set_prior' appropriately.",
"\nWarning occurred for prior \n", lb_warning),
call. = FALSE)
}
if (nchar(ub_warning) && warn) {
warning(paste0("It appears that you have specified an upper bounded ",
"prior on a parameter that has no natural upper bound.",
"\nIf this is really what you want, please specify ",
"argument 'ub' of 'set_prior' appropriately.",
"\nWarning occurred for prior \n", ub_warning),
call. = FALSE)
}
if (autocor_warning && warn) {
warning(paste("Changing the boundaries of autocorrelation",
"parameters is not recommended."), call. = FALSE)
}
}
invisible(NULL)
}
handle_special_priors <- function(prior, has_specef = FALSE) {
# look for special priors such as horseshoe and process them appropriately
#
# Args:
# prior: an object of class prior_frame
# has_specef: are monotonic or category specific effects present?
#
# Returns:
# an named list of two objects:
# prior: an updated version of prior
# attrib: a named list containing future attributes of prior
attrib <- list()
b_index <- which(prior$class == "b" & !nchar(prior$coef))
if (length(b_index) && grepl("^horseshoe\\(.+\\)$", prior$prior[b_index])) {
# horseshoe prior for fixed effects parameters
if (any(nchar(prior$nlpar))) {
stop("Horseshoe priors are not yet allowed in non-linear models.",
call. = FALSE)
}
if (has_specef) {
stop(paste("Horseshoe priors are not yet allowed in models with",
"monotonic or category specific effects."),
call. = FALSE)
}
hs_df <- gsub("^horseshoe\\(|\\)$", "", prior$prior[b_index])
hs_df <- suppressWarnings(as.numeric(hs_df))
if (!is.na(hs_df) && hs_df > 0) {
b_coef_indices <- which(prior$class == "b" & nchar(prior$coef)
& prior$coef != "Intercept")
if (any(nchar(prior$prior[b_coef_indices]))) {
stop(paste("Defining priors for single fixed effects parameters",
"is not allowed when using horseshoe priors",
"(except for the Intercept)"), call. = FALSE)
}
attrib$hs_df <- hs_df
prior$prior[b_index] <- "normal(0, hs_local * hs_global)"
} else {
stop("degrees of freedom of horseshoe prior must be a positive number",
call. = FALSE)
}
}
# expand lkj correlation prior to full name
prior$prior <- sub("^(lkj\\(|lkj_corr\\()", "lkj_corr_cholesky(", prior$prior)
list(prior = prior, attrib = attrib)
}
get_bound <- function(prior, class = "b", coef = "",
group = "", nlpar = "") {
# extract the boundaries of a parameter described by class etc.
# Args:
# prior: object of class prior_frame5
# class, coef, group, nlpar: strings of length 1
stopifnot(length(class) == 1L)
if (!length(coef)) coef <- ""
if (!length(group)) group <- ""
if (!length(nlpar)) nlpar <- ""
take <- prior$class == class & prior$coef == coef &
prior$group == group & prior$nlpar == nlpar
if (sum(take) > 1L) {
stop("extracted more than one boundary at once")
}
prior$bound[take]
}
prior_frame <- function(prior = "", class = "", coef = "", group = "",
nlpar = "", bound = "") {
# helper function to create data.frames containing prior information
out <- data.frame(prior = prior, class = class, coef = coef,
group = group, nlpar = nlpar, bound = bound,
stringsAsFactors = FALSE)
class(out) <- c("prior_frame", "data.frame")
out
}
empty_prior_frame <- function() {
# define a prior_frame with zero rows
prior_frame(prior = character(0), class = character(0),
coef = character(0), group = character(0),
nlpar = character(0), bound = character(0))
}
#' @export
print.brmsprior <- function(x, ...) {
cat(.print_prior(x))
invisible(x)
}
.print_prior <- function(x) {
# prepare text for print.brmsprior
group <- usc(x$group, "prefix")
coef <- usc(x$coef, "prefix")
nlpar <- usc(x$nlpar, "prefix")
bound <- ifelse(nchar(x$bound), paste0(x$bound, " "), "")
tilde <- ifelse(nchar(x$class) + nchar(group) + nchar(coef), " ~ ", "")
prior <- ifelse(nchar(x$prior), x$prior, "(no prior)")
paste0(bound, x$class, nlpar, group, coef, tilde, prior)
}
#' @export
c.brmsprior <- function(x, ...) {
# combines multiple brmsprior objects into one prior_frame
if(any(!sapply(list(...), is, class2 = "brmsprior")))
stop("All arguments must be of class brmsprior")
prior <- data.frame(matrix(unlist(list(x, ...)), ncol = 6, byrow = TRUE),
stringsAsFactors = FALSE)
names(prior) <- c("prior", "class", "coef", "group", "nlpar", "bound")
class(prior) <- c("prior_frame", "data.frame")
prior
}
as.brmsprior <- function(prior) {
# convert a prior_frame into a list of brmsprior objects
# Args:
# prior: an object of class 'prior_frame' or 'brmsprior'
stopifnot(is(prior, "prior_frame") || is(prior, "brmsprior"))
if (is(prior, "prior_frame")) {
.convert <- function(x) {
structure(as.list(x), class = c("brmsprior", "list"))
}
prior <- unname(apply(prior, MARGIN = 1, FUN = .convert))
}
prior
}
as.prior_frame <- function(prior) {
# convert a brmsprior object into a prior_frame object
# Args:
# prior: an object of class 'prior_frame' or 'brmsprior'
if (is.null(prior)) {
prior <- prior_frame()
} else if (is(prior, "brmsprior")) {
prior <- c(prior)
} else if (!is(prior, "prior_frame")) {
stop(paste("Invalid 'prior' argument. See help(set_prior)",
"for further information."), call. = FALSE)
}
prior
}
.dirichlet <- function(...) {
# helper function for dirichlet priors of simplex parameters
out <- as.numeric(c(...))
if (any(out <= 0)) {
stop("The dirichlet prior expects positive values.", call. = FALSE)
}
out
}
|
# Practice Exercise 2 is about group_by(), summarise(), and ungroup().
# There is a data set called "data_gutenberg100_clean.csv" that contains
# the cleaned data (i.e., what the data should look like at the end of
# Exercise 1). Read that data into R.
#
# Use group_by() and summarise() to create a new tibble called
# "gutenberg_by_author" that contains one row for each author in the
# data set. For each author it should contain their name (i.e., the
# "author" column) and two new columns that summarise their contribution to
# the top 100:
#
# - the "n_books" variable should count the number of books that author
# has in the top 100.
#
# - the "n_downloads" variable should calculate the sum() of all downloads
# of books by that author, calculated across all books they have in the
# top 100
#
# For the sake of neatness, remember to ungroup() at the end.
|
/static/dancing/practice_wrangling_02.R
|
no_license
|
djnavarro/robust-tools
|
R
| false
| false
| 878
|
r
|
# Practice Exercise 2 is about group_by(), summarise(), and ungroup().
# There is a data set called "data_gutenberg100_clean.csv" that contains
# the cleaned data (i.e., what the data should look like at the end of
# Exercise 1). Read that data into R.
#
# Use group_by() and summarise() to create a new tibble called
# "gutenberg_by_author" that contains one row for each author in the
# data set. For each author it should contain their name (i.e., the
# "author" column) and two new columns that summarise their contribution to
# the top 100:
#
# - the "n_books" variable should count the number of books that author
# has in the top 100.
#
# - the "n_downloads" variable should calculate the sum() of all downloads
# of books by that author, calculated across all books they have in the
# top 100
#
# For the sake of neatness, remember to ungroup() at the end.
|
#' @export
populateShinyApp <- function(outputDirectory = './ShinyApp',
shinyDirectory,
resultDirectory,
minCellCount = 10,
databaseName = 'sharable name of development data'){
#check inputs
if(missing(shinyDirectory)){
shinyDirectory <- system.file("shiny", "PLPViewer", package = "SimpleABCceftriaxone")
}
if(missing(resultDirectory)){
stop('Need to enter the resultDirectory')
}
if(!dir.exists(resultDirectory)){
stop('resultDirectory does not exist')
}
# create the shiny data folder
if(!dir.exists(outputDirectory)){
dir.create(outputDirectory, recursive = T)
}
# copy shiny folder to outputDirectory
R.utils::copyDirectory(from = shinyDirectory,
to= outputDirectory,
recursive=TRUE)
outputDirectory <- file.path(outputDirectory,'data')
if(!dir.exists(outputDirectory)){
dir.create(outputDirectory, recursive = T)
}
#outputDirectory <- file.path(shinyDirectory,'data')
# copy the settings csv
file <- utils::read.csv(file.path(resultDirectory,'settings.csv'))
utils::write.csv(file, file.path(outputDirectory,'settings.csv'), row.names = F)
# copy each analysis as a rds file and copy the log
files <- dir(resultDirectory, full.names = F)
files <- files[grep('Analysis', files)]
for(file in files){
if(!dir.exists(file.path(outputDirectory,file))){
dir.create(file.path(outputDirectory,file))
}
if(dir.exists(file.path(resultDirectory,file, 'plpResult'))){
res <- PatientLevelPrediction::loadPlpResult(file.path(resultDirectory,file, 'plpResult'))
res <- PatientLevelPrediction::transportPlp(res, n= minCellCount,
save = F, dataName = databaseName)
saveRDS(res, file.path(outputDirectory,file, 'plpResult.rds'))
}
if(file.exists(file.path(resultDirectory,file, 'plpLog.txt'))){
file.copy(from = file.path(resultDirectory,file, 'plpLog.txt'),
to = file.path(outputDirectory,file, 'plpLog.txt'))
}
}
# copy any validation results
if(dir.exists(file.path(resultDirectory,'Validation'))){
valFolders <- dir(file.path(resultDirectory,'Validation'), full.names = F)
if(length(valFolders)>0){
# move each of the validation rds
for(valFolder in valFolders){
# get the analysisIds
valSubfolders <- dir(file.path(resultDirectory,'Validation',valFolder), full.names = F)
if(length(valSubfolders)!=0){
for(valSubfolder in valSubfolders ){
valOut <- file.path(valFolder,valSubfolder)
if(!dir.exists(file.path(outputDirectory,'Validation',valOut))){
dir.create(file.path(outputDirectory,'Validation',valOut), recursive = T)
}
if(file.exists(file.path(resultDirectory,'Validation',valOut, 'validationResult.rds'))){
res <- readRDS(file.path(resultDirectory,'Validation',valOut, 'validationResult.rds'))
res <- PatientLevelPrediction::transportPlp(res, n= minCellCount,
save = F, dataName = databaseName)
saveRDS(res, file.path(outputDirectory,'Validation',valOut, 'validationResult.rds'))
}
}
}
}
}
}
ParallelLogger::logInfo(paste0('Shiny App created at: ', outputDirectory))
ParallelLogger::logInfo(paste0('Upload the folder ', outputDirectory, ' to the shinyDeploy OHDSI github to share the results with others.'))
return(outputDirectory)
}
|
/SimpleAbxBetterChoice_IP/SimpleABCceftriaxone/R/populateShinyApp.R
|
no_license
|
ABMI/AbxBetterChoice
|
R
| false
| false
| 3,769
|
r
|
#' @export
populateShinyApp <- function(outputDirectory = './ShinyApp',
shinyDirectory,
resultDirectory,
minCellCount = 10,
databaseName = 'sharable name of development data'){
#check inputs
if(missing(shinyDirectory)){
shinyDirectory <- system.file("shiny", "PLPViewer", package = "SimpleABCceftriaxone")
}
if(missing(resultDirectory)){
stop('Need to enter the resultDirectory')
}
if(!dir.exists(resultDirectory)){
stop('resultDirectory does not exist')
}
# create the shiny data folder
if(!dir.exists(outputDirectory)){
dir.create(outputDirectory, recursive = T)
}
# copy shiny folder to outputDirectory
R.utils::copyDirectory(from = shinyDirectory,
to= outputDirectory,
recursive=TRUE)
outputDirectory <- file.path(outputDirectory,'data')
if(!dir.exists(outputDirectory)){
dir.create(outputDirectory, recursive = T)
}
#outputDirectory <- file.path(shinyDirectory,'data')
# copy the settings csv
file <- utils::read.csv(file.path(resultDirectory,'settings.csv'))
utils::write.csv(file, file.path(outputDirectory,'settings.csv'), row.names = F)
# copy each analysis as a rds file and copy the log
files <- dir(resultDirectory, full.names = F)
files <- files[grep('Analysis', files)]
for(file in files){
if(!dir.exists(file.path(outputDirectory,file))){
dir.create(file.path(outputDirectory,file))
}
if(dir.exists(file.path(resultDirectory,file, 'plpResult'))){
res <- PatientLevelPrediction::loadPlpResult(file.path(resultDirectory,file, 'plpResult'))
res <- PatientLevelPrediction::transportPlp(res, n= minCellCount,
save = F, dataName = databaseName)
saveRDS(res, file.path(outputDirectory,file, 'plpResult.rds'))
}
if(file.exists(file.path(resultDirectory,file, 'plpLog.txt'))){
file.copy(from = file.path(resultDirectory,file, 'plpLog.txt'),
to = file.path(outputDirectory,file, 'plpLog.txt'))
}
}
# copy any validation results
if(dir.exists(file.path(resultDirectory,'Validation'))){
valFolders <- dir(file.path(resultDirectory,'Validation'), full.names = F)
if(length(valFolders)>0){
# move each of the validation rds
for(valFolder in valFolders){
# get the analysisIds
valSubfolders <- dir(file.path(resultDirectory,'Validation',valFolder), full.names = F)
if(length(valSubfolders)!=0){
for(valSubfolder in valSubfolders ){
valOut <- file.path(valFolder,valSubfolder)
if(!dir.exists(file.path(outputDirectory,'Validation',valOut))){
dir.create(file.path(outputDirectory,'Validation',valOut), recursive = T)
}
if(file.exists(file.path(resultDirectory,'Validation',valOut, 'validationResult.rds'))){
res <- readRDS(file.path(resultDirectory,'Validation',valOut, 'validationResult.rds'))
res <- PatientLevelPrediction::transportPlp(res, n= minCellCount,
save = F, dataName = databaseName)
saveRDS(res, file.path(outputDirectory,'Validation',valOut, 'validationResult.rds'))
}
}
}
}
}
}
ParallelLogger::logInfo(paste0('Shiny App created at: ', outputDirectory))
ParallelLogger::logInfo(paste0('Upload the folder ', outputDirectory, ' to the shinyDeploy OHDSI github to share the results with others.'))
return(outputDirectory)
}
|
library(shiny)
library(data.table)
library(dplyr)
library(tidyr)
library(rsconnect)
library(readxl)
funds <- read_xlsx('vanguard.xlsx', range = 'B2:Z221')
returns <- funds %>%
filter(fund_type == 'Domestic Stock') %>%
select(fund_name, fund_type, fund_category, one_year,
five_year, ten_year, beta)
returns <- returns %>% filter(beta > 0)
growth <- funds %>%
filter(fund_type == 'Domestic Stock') %>%
select(fund_name, fund_type, fund_category,
earnings_growth_rate, `price/earnings_ratio`)
inception <- funds %>%
filter(fund_type == 'Domestic Stock') %>%
select(fund_name, fund_type, fund_category,
`since inception`, inception_date)
upper <- function(num, value1, value2, value3, value4, value5){
p = list()
for (x in num) {
if (x == 1) {
p[x] = value1
} else if (x == 2){
p[x] = value2
} else if (x == 3){
p[x] = value3
} else{
p[x] = value4
}
}
return(p)
}
lower <- function(num, value1, value2, value3, value4, value5){
q = list()
for (x in num) {
if (x == 1) {
q[x] = value2
} else if (x == 2){
q[x] = value3
} else if (x == 3){
q[x] = value4
} else{
q[x] = value5
}
}
return(q)
}
|
/global.R
|
no_license
|
Jello95/Project_Vanguard
|
R
| false
| false
| 1,240
|
r
|
library(shiny)
library(data.table)
library(dplyr)
library(tidyr)
library(rsconnect)
library(readxl)
funds <- read_xlsx('vanguard.xlsx', range = 'B2:Z221')
returns <- funds %>%
filter(fund_type == 'Domestic Stock') %>%
select(fund_name, fund_type, fund_category, one_year,
five_year, ten_year, beta)
returns <- returns %>% filter(beta > 0)
growth <- funds %>%
filter(fund_type == 'Domestic Stock') %>%
select(fund_name, fund_type, fund_category,
earnings_growth_rate, `price/earnings_ratio`)
inception <- funds %>%
filter(fund_type == 'Domestic Stock') %>%
select(fund_name, fund_type, fund_category,
`since inception`, inception_date)
upper <- function(num, value1, value2, value3, value4, value5){
p = list()
for (x in num) {
if (x == 1) {
p[x] = value1
} else if (x == 2){
p[x] = value2
} else if (x == 3){
p[x] = value3
} else{
p[x] = value4
}
}
return(p)
}
lower <- function(num, value1, value2, value3, value4, value5){
q = list()
for (x in num) {
if (x == 1) {
q[x] = value2
} else if (x == 2){
q[x] = value3
} else if (x == 3){
q[x] = value4
} else{
q[x] = value5
}
}
return(q)
}
|
# The City of Lights
#
# Graphing marine marauding using night lights
# at the edge of the world
# load required packages
library(tidyverse) # plotting and data wrangling
library(raster) # to load raster data exported from GEE
library(sf) # vector processing + plotting
library(rnaturalearth) # fetch geographic data
library(ggtext)
# load data (will take a while)
if(!exists("land")){
source("analysis/load_data_peru.R")
}
# set colour theme
col_land <- "#333333"
col_water <- "#222222"
col_boundary <- "#585152"
col_ice <- "#5E625D"
col_road <- "#454545"
theme_map <- function(...) {
theme_minimal() +
theme(
text = element_text(family = "Ubuntu Thin", color = "#C3C3C3"),
axis.line = element_blank(),
axis.text.x = element_text(color = "#333333"),
axis.text.y = element_text(color = "#333333"),
axis.ticks = element_line(color = "#333333"),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_line(color = "#444444", size = 0.2, linetype = 3),
#panel.grid.minor = element_blank(),
plot.background = element_rect(fill = "#222222", color = NA),
panel.background = element_rect(fill = "#222222", color = "#222222"),
#legend.position = "bottom",
legend.direction = "horizontal",
legend.position = c(.95, .95),
legend.justification = c("right", "top"),
#legend.background = element_rect(fill = "#f5f5f2", color = NA),
#panel.border = element_rect(fill = col_water, colour = NA),
...
)
}
p <- ggplot() +
geom_tile(data = r_df, aes(x=x,y=y,fill= val)) +
scale_fill_viridis_c(
option = "B",
name = "light intensity",
labels = c("low","high"),
breaks = c(2, 7)) +
geom_sf(data = land, fill = col_land, color = NA) +
guides(fill = guide_colourbar(title.position="top", title.hjust = 0.5)) +
geom_sf(data = minor_islands, fill = col_land, color = NA) +
geom_sf(data = ice, fill = col_ice, color = NA) +
geom_sf(data = rivers, fill = NA, color = col_water) +
geom_sf(data = lakes, fill = col_water, color = NA) +
geom_sf(data = roads, fill = NA, color = col_road) +
geom_sf(data = land_buffer, fill = NA, color = col_road, lty = 3) +
geom_sf(data = country_boundary, fill = NA, color = col_road, lwd = 1.2) +
geom_sf(data = country_boundary, fill = NA, color = col_boundary) +
coord_sf(ylim = c(-20,-5),xlim = c(-90,-67)) +
labs(
title = "Vessel Lights - Peru",
subtitle = "Illuminating fishing activity with onboard flood lights",
caption = "graphics & analysis by @koen_hufkens") +
theme_map()
ggsave(filename = "city_of_lights_peru.png", height = 9)
|
/analysis/city_of_lights_peru.R
|
no_license
|
khufkens/city_of_lights
|
R
| false
| false
| 2,669
|
r
|
# The City of Lights
#
# Graphing marine marauding using night lights
# at the edge of the world
# load required packages
library(tidyverse) # plotting and data wrangling
library(raster) # to load raster data exported from GEE
library(sf) # vector processing + plotting
library(rnaturalearth) # fetch geographic data
library(ggtext)
# load data (will take a while)
if(!exists("land")){
source("analysis/load_data_peru.R")
}
# set colour theme
col_land <- "#333333"
col_water <- "#222222"
col_boundary <- "#585152"
col_ice <- "#5E625D"
col_road <- "#454545"
theme_map <- function(...) {
theme_minimal() +
theme(
text = element_text(family = "Ubuntu Thin", color = "#C3C3C3"),
axis.line = element_blank(),
axis.text.x = element_text(color = "#333333"),
axis.text.y = element_text(color = "#333333"),
axis.ticks = element_line(color = "#333333"),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_line(color = "#444444", size = 0.2, linetype = 3),
#panel.grid.minor = element_blank(),
plot.background = element_rect(fill = "#222222", color = NA),
panel.background = element_rect(fill = "#222222", color = "#222222"),
#legend.position = "bottom",
legend.direction = "horizontal",
legend.position = c(.95, .95),
legend.justification = c("right", "top"),
#legend.background = element_rect(fill = "#f5f5f2", color = NA),
#panel.border = element_rect(fill = col_water, colour = NA),
...
)
}
p <- ggplot() +
geom_tile(data = r_df, aes(x=x,y=y,fill= val)) +
scale_fill_viridis_c(
option = "B",
name = "light intensity",
labels = c("low","high"),
breaks = c(2, 7)) +
geom_sf(data = land, fill = col_land, color = NA) +
guides(fill = guide_colourbar(title.position="top", title.hjust = 0.5)) +
geom_sf(data = minor_islands, fill = col_land, color = NA) +
geom_sf(data = ice, fill = col_ice, color = NA) +
geom_sf(data = rivers, fill = NA, color = col_water) +
geom_sf(data = lakes, fill = col_water, color = NA) +
geom_sf(data = roads, fill = NA, color = col_road) +
geom_sf(data = land_buffer, fill = NA, color = col_road, lty = 3) +
geom_sf(data = country_boundary, fill = NA, color = col_road, lwd = 1.2) +
geom_sf(data = country_boundary, fill = NA, color = col_boundary) +
coord_sf(ylim = c(-20,-5),xlim = c(-90,-67)) +
labs(
title = "Vessel Lights - Peru",
subtitle = "Illuminating fishing activity with onboard flood lights",
caption = "graphics & analysis by @koen_hufkens") +
theme_map()
ggsave(filename = "city_of_lights_peru.png", height = 9)
|
library(ape)
testtree <- read.tree("11624_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11624_0_unrooted.txt")
|
/codeml_files/newick_trees_processed_and_cleaned/11624_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 137
|
r
|
library(ape)
testtree <- read.tree("11624_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11624_0_unrooted.txt")
|
#' create_fbtrait_config
#'
#' For Android fieldbook app
#'
#' @param fieldbook a fieldbook
#' @param dictionary a dictionary
#' @importFrom magrittr '%>%'
#'
#' @return a dataframe
#' @export
create_fbtrait_config <- function(fieldbook, dictionary){
#print(str(fieldbook))
#print(str(dictionary))
fbn = names(fieldbook)
fbn = fbn[(which(fbn == "TRT1") + 1):length(fbn)]
n = length(fbn)
dd = dictionary
DF = data.frame(
trait = fbn,
format = rep("numeric", n),
defaultValue = rep("", n),
minimum = rep(NA, n),
maximum = rep(NA, n),
details = rep("", n),
categories = rep("", n),
isVisible = rep(TRUE, n),
realPosition = c(1:n)
)
DF[, 1] = as.character(DF[, 1])
DF[, 2] = as.character(DF[, 2])
DF[, 3] = as.character(DF[, 3])
DF[, 4] = as.numeric(DF[, 4])
DF[, 5] = as.numeric(DF[, 5])
DF[, 6] = as.character(DF[, 6])
DF[, 7] = as.character(DF[, 7])
dd = cbind(dd, cats = rep("", nrow(dd)))
dd[, "cats"] <- as.character(dd[, "cats"])
dd_n = nrow(dd)
dd_m = ncol(dd)
for(i in 1:dd_n) {
if(stringr::str_detect(dd$format[i], "categorical")){
cs = paste(dd[i, 11:(dd_m - 2)])
#cs = stringr::str_split(cs, "=") %>% unlist
#cs = cs[seq(2,length(cs), 2)]
cs = cs[cs!="NA"]
cs = stringr::str_trim(cs)
dd[i, "cats"] = paste(cs, collapse = "/")
}
}
# create helper columm for subsampled variables
dd = cbind(dd, traits = rep("", nrow(dd)))
dd[, "traits"] <- as.character(dd[, "traits"])
for(i in 1:nrow(DF)){
DF$traits[i] = DF$trait[i]
if(stringr::str_detect(DF[i, "traits"], "_TP")){
DF$traits[i] = stringr::str_split(DF[i, "traits"], "_TP")[[1]][1]
}
if(stringr::str_detect(DF[i, "traits"], "_SS")){
DF$traits[i] = stringr::str_split(DF[i, "traits"], "_SS")[[1]][1]
}
}
for(i in 1:n){
#print(dd[DF$traits[i] == dd$id, "format"])
DF[i, "format"] = dd[DF$traits[i] == dd$id, "format"]
DF[i, "details"] = dd[DF$traits[i] == dd$id, "units"]
DF[i, "minimum"] = dd[DF$traits[i] == dd$id, "min"]
DF[i, "maximum"] = dd[DF$traits[i] == dd$id, "max"]
DF[i, "categories"] = dd[DF$traits[i] == dd$id, "cats"]
}
DF = DF[, -c(ncol(DF))]
DF
}
|
/R/create_fbtrait_config.R
|
no_license
|
c5sire/fbdesign
|
R
| false
| false
| 2,224
|
r
|
#' create_fbtrait_config
#'
#' For Android fieldbook app
#'
#' @param fieldbook a fieldbook
#' @param dictionary a dictionary
#' @importFrom magrittr '%>%'
#'
#' @return a dataframe
#' @export
create_fbtrait_config <- function(fieldbook, dictionary){
#print(str(fieldbook))
#print(str(dictionary))
fbn = names(fieldbook)
fbn = fbn[(which(fbn == "TRT1") + 1):length(fbn)]
n = length(fbn)
dd = dictionary
DF = data.frame(
trait = fbn,
format = rep("numeric", n),
defaultValue = rep("", n),
minimum = rep(NA, n),
maximum = rep(NA, n),
details = rep("", n),
categories = rep("", n),
isVisible = rep(TRUE, n),
realPosition = c(1:n)
)
DF[, 1] = as.character(DF[, 1])
DF[, 2] = as.character(DF[, 2])
DF[, 3] = as.character(DF[, 3])
DF[, 4] = as.numeric(DF[, 4])
DF[, 5] = as.numeric(DF[, 5])
DF[, 6] = as.character(DF[, 6])
DF[, 7] = as.character(DF[, 7])
dd = cbind(dd, cats = rep("", nrow(dd)))
dd[, "cats"] <- as.character(dd[, "cats"])
dd_n = nrow(dd)
dd_m = ncol(dd)
for(i in 1:dd_n) {
if(stringr::str_detect(dd$format[i], "categorical")){
cs = paste(dd[i, 11:(dd_m - 2)])
#cs = stringr::str_split(cs, "=") %>% unlist
#cs = cs[seq(2,length(cs), 2)]
cs = cs[cs!="NA"]
cs = stringr::str_trim(cs)
dd[i, "cats"] = paste(cs, collapse = "/")
}
}
# create helper columm for subsampled variables
dd = cbind(dd, traits = rep("", nrow(dd)))
dd[, "traits"] <- as.character(dd[, "traits"])
for(i in 1:nrow(DF)){
DF$traits[i] = DF$trait[i]
if(stringr::str_detect(DF[i, "traits"], "_TP")){
DF$traits[i] = stringr::str_split(DF[i, "traits"], "_TP")[[1]][1]
}
if(stringr::str_detect(DF[i, "traits"], "_SS")){
DF$traits[i] = stringr::str_split(DF[i, "traits"], "_SS")[[1]][1]
}
}
for(i in 1:n){
#print(dd[DF$traits[i] == dd$id, "format"])
DF[i, "format"] = dd[DF$traits[i] == dd$id, "format"]
DF[i, "details"] = dd[DF$traits[i] == dd$id, "units"]
DF[i, "minimum"] = dd[DF$traits[i] == dd$id, "min"]
DF[i, "maximum"] = dd[DF$traits[i] == dd$id, "max"]
DF[i, "categories"] = dd[DF$traits[i] == dd$id, "cats"]
}
DF = DF[, -c(ncol(DF))]
DF
}
|
#examining data (need to have run data set-up script beforehand)
structure(femweights)
length(femweights$Bodyweight)
#extracting specific entries
femweights[12,2]
femweights$Bodyweight[11]
#performing a function on extracted entries
mean(femweights$Bodyweight[13:24])
#working with sample(), but choosing a specific value using set.seed() for grading purposes
?sample
?set.seed
sample(femweights$Bodyweight[13:24], 1)
set.seed(1)
|
/Statistics and Data Visualization/Code/Assessment 1.R
|
no_license
|
rghansen/R-course
|
R
| false
| false
| 433
|
r
|
#examining data (need to have run data set-up script beforehand)
structure(femweights)
length(femweights$Bodyweight)
#extracting specific entries
femweights[12,2]
femweights$Bodyweight[11]
#performing a function on extracted entries
mean(femweights$Bodyweight[13:24])
#working with sample(), but choosing a specific value using set.seed() for grading purposes
?sample
?set.seed
sample(femweights$Bodyweight[13:24], 1)
set.seed(1)
|
library(shiny)
library(googleVis)
library(d3heatmap)
source("helpers.R")
api_key <- "api-key=4afa5e239fc8c4847a7f7fc0b537d285:2:72422982"
base_url <- "http://api.nytimes.com/svc/events/v2/listings.json?"
coordinate <- list(nyt = "40.756146,-73.99021")
shinyServer(function(input, output, session) {
retrieved <- reactive({
if(input$webData) {
# construct NYT EVENT API searching url
url <- getURL(base_url, api_key,
category=input$category, borough=input$borough,
ll="40.7127,-74.0059", radius=input$radius, limit=input$limit)
print(url)
# retrive events from NYT event api
return(retrieveEvent(url))
} else {
if(input$category == "Everything") {
return(events)
} else {
return(events[events$category == input$category,])
}
}
})
output$gvis <- renderGvis({
# plot events on google map
eventToPlot <- formatEvent(retrieved())
print(paste0("Plotting events: ", min(input$limit, nrow(eventToPlot))))
gvisMap(eventToPlot[1:min(input$limit, nrow(eventToPlot)),], locationvar = "LatLong" , tipvar = "info",
options=list(width=200,
height=500,
showTip=TRUE,
showLine=TRUE,
enableScrollWheel=TRUE,
mapType="styledMap",
showLine=TRUE,
useMapTypeControl=TRUE,
icons=paste0("{",
"'default': {'normal': 'http://icons.iconarchive.com/",
"icons/icons-land/vista-map-markers/48/",
"Map-Marker-Ball-Pink-icon.png',\n",
"'selected': 'http://icons.iconarchive.com/",
"icons/icons-land/vista-map-markers/48/",
"Map-Marker-Ball-Right-Pink-icon.png'",
"}}"),
maps=paste0("{",
"'styledMap': {",
"'name': 'Styled Map',\n",
"'styles': [",
"{'featureType': 'landscape',",
"'stylers': [{'hue': '#259b24'}, {'saturation': 10}, {'lightness': -22}]",
"}",
"]}}")
))
})
writeToDB <- eventReactive(input$writeToDB, {
if(input$webData) {
toDB(dbPath, tbName, retrieved())
}
})
readFromDB <- eventReactive(input$readFromDB, {
events <<- fromDB(dbPath, tbName)
days <<- dayFreq(events)
event.day <<- eventByDay(events, days)
})
headMapAxis <- eventReactive(input$heatmapAxis, {
c(input$yaxis,input$xaxis)
})
observe(writeToDB())
observe(readFromDB())
output$byDay <- renderGvis({
eventByDay <- data.frame("Day" = c("SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"),
"Total" = sapply(days, sum),
"Times Pick" = sapply(days, function(x) sum(events$times_pick==1 & x)),
"Kid Friendly" = sapply(days, function(x) sum(events$kid_friendly==1 & x)),
"Free" = sapply(days, function(x) sum(events$free==1 & x))
)
checkTotal <- input$checkTotal
if(!(input$checkPick || input$checkKid || input$checkFree)) {
checkTotal <- TRUE
}
print(nrow(eventByDay))
gvisAreaChart(eventByDay[, c(TRUE, checkTotal,
input$checkPick,
input$checkKid,
input$checkFree
)],
options=list(width="900px", height="600px"))
})
output$byCategory <- renderGvis({
df2 <- events[,c("category", "times_pick", "kid_friendly", "free")]
df2.category <- df2 %>% group_by(category) %>% count(category)
df2.pick <- df2 %>% filter(times_pick==1) %>% group_by(category) %>% count(category)
df2.kid <- df2 %>% filter(kid_friendly==1) %>% group_by(category) %>% count(category)
df2.free <- df2 %>% filter(free==1) %>% group_by(category) %>% count(category)
pie.category <- gvisPieChart(df2.category,
options=list(width="400px", height="400px",
title="Total", legend='none'))
pie.pick <- gvisPieChart(df2.pick,
options=list(width="400px", height="400px",
title="Times Pick", legend='none'))
pie.kid <- gvisPieChart(df2.kid,
options=list(width="400px", height="400px",
title="Kid Friendly", legend='none'))
pie.free <- gvisPieChart(df2.free,
options=list(width="400px", height="400px",
title="Free", legend='none'))
gvisMerge(
gvisMerge(pie.free, pie.category,horizontal=TRUE),
gvisMerge(pie.pick, pie.kid, horizontal=TRUE),
horizontal=FALSE)
})
output$heatmap <- renderD3heatmap({
axis <- headMapAxis()
if(axis[1] == axis[2]) {
return()
}
print(axis)
d3heatmap(table(event.day[,axis]), colors="Greens", scale = "column", Colv = FALSE)
})
output$eventTable <- renderGvis({
df3 <- events
df3$name <- apply(df3, 1,
function(x) paste0("<a href=\"",
x["event_detail_url"], "\" target=\"_blank\">",
x["event_name"], "</a>"))
if(input$checkPick) {
df3 <- df3[df3$times_pick==1,]
}
if(input$checkKid) {
df3 <- df3[df3$kid_friendly==1,]
}
if(input$checkFree) {
df3 <- df3[df3$free==1,]
}
gvisTable(data.frame("Name"=df3$name, "Venue"=df3$venue_name,
"Category"=df3$category, "Neighborhood"=df3$neighborhood),
options=list(page='enable', pageSize=20))
})
})
|
/server.R
|
no_license
|
SYAN83/NYT_Event_Finder
|
R
| false
| false
| 6,920
|
r
|
library(shiny)
library(googleVis)
library(d3heatmap)
source("helpers.R")
api_key <- "api-key=4afa5e239fc8c4847a7f7fc0b537d285:2:72422982"
base_url <- "http://api.nytimes.com/svc/events/v2/listings.json?"
coordinate <- list(nyt = "40.756146,-73.99021")
shinyServer(function(input, output, session) {
retrieved <- reactive({
if(input$webData) {
# construct NYT EVENT API searching url
url <- getURL(base_url, api_key,
category=input$category, borough=input$borough,
ll="40.7127,-74.0059", radius=input$radius, limit=input$limit)
print(url)
# retrive events from NYT event api
return(retrieveEvent(url))
} else {
if(input$category == "Everything") {
return(events)
} else {
return(events[events$category == input$category,])
}
}
})
output$gvis <- renderGvis({
# plot events on google map
eventToPlot <- formatEvent(retrieved())
print(paste0("Plotting events: ", min(input$limit, nrow(eventToPlot))))
gvisMap(eventToPlot[1:min(input$limit, nrow(eventToPlot)),], locationvar = "LatLong" , tipvar = "info",
options=list(width=200,
height=500,
showTip=TRUE,
showLine=TRUE,
enableScrollWheel=TRUE,
mapType="styledMap",
showLine=TRUE,
useMapTypeControl=TRUE,
icons=paste0("{",
"'default': {'normal': 'http://icons.iconarchive.com/",
"icons/icons-land/vista-map-markers/48/",
"Map-Marker-Ball-Pink-icon.png',\n",
"'selected': 'http://icons.iconarchive.com/",
"icons/icons-land/vista-map-markers/48/",
"Map-Marker-Ball-Right-Pink-icon.png'",
"}}"),
maps=paste0("{",
"'styledMap': {",
"'name': 'Styled Map',\n",
"'styles': [",
"{'featureType': 'landscape',",
"'stylers': [{'hue': '#259b24'}, {'saturation': 10}, {'lightness': -22}]",
"}",
"]}}")
))
})
writeToDB <- eventReactive(input$writeToDB, {
if(input$webData) {
toDB(dbPath, tbName, retrieved())
}
})
readFromDB <- eventReactive(input$readFromDB, {
events <<- fromDB(dbPath, tbName)
days <<- dayFreq(events)
event.day <<- eventByDay(events, days)
})
headMapAxis <- eventReactive(input$heatmapAxis, {
c(input$yaxis,input$xaxis)
})
observe(writeToDB())
observe(readFromDB())
output$byDay <- renderGvis({
eventByDay <- data.frame("Day" = c("SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"),
"Total" = sapply(days, sum),
"Times Pick" = sapply(days, function(x) sum(events$times_pick==1 & x)),
"Kid Friendly" = sapply(days, function(x) sum(events$kid_friendly==1 & x)),
"Free" = sapply(days, function(x) sum(events$free==1 & x))
)
checkTotal <- input$checkTotal
if(!(input$checkPick || input$checkKid || input$checkFree)) {
checkTotal <- TRUE
}
print(nrow(eventByDay))
gvisAreaChart(eventByDay[, c(TRUE, checkTotal,
input$checkPick,
input$checkKid,
input$checkFree
)],
options=list(width="900px", height="600px"))
})
output$byCategory <- renderGvis({
df2 <- events[,c("category", "times_pick", "kid_friendly", "free")]
df2.category <- df2 %>% group_by(category) %>% count(category)
df2.pick <- df2 %>% filter(times_pick==1) %>% group_by(category) %>% count(category)
df2.kid <- df2 %>% filter(kid_friendly==1) %>% group_by(category) %>% count(category)
df2.free <- df2 %>% filter(free==1) %>% group_by(category) %>% count(category)
pie.category <- gvisPieChart(df2.category,
options=list(width="400px", height="400px",
title="Total", legend='none'))
pie.pick <- gvisPieChart(df2.pick,
options=list(width="400px", height="400px",
title="Times Pick", legend='none'))
pie.kid <- gvisPieChart(df2.kid,
options=list(width="400px", height="400px",
title="Kid Friendly", legend='none'))
pie.free <- gvisPieChart(df2.free,
options=list(width="400px", height="400px",
title="Free", legend='none'))
gvisMerge(
gvisMerge(pie.free, pie.category,horizontal=TRUE),
gvisMerge(pie.pick, pie.kid, horizontal=TRUE),
horizontal=FALSE)
})
output$heatmap <- renderD3heatmap({
axis <- headMapAxis()
if(axis[1] == axis[2]) {
return()
}
print(axis)
d3heatmap(table(event.day[,axis]), colors="Greens", scale = "column", Colv = FALSE)
})
output$eventTable <- renderGvis({
df3 <- events
df3$name <- apply(df3, 1,
function(x) paste0("<a href=\"",
x["event_detail_url"], "\" target=\"_blank\">",
x["event_name"], "</a>"))
if(input$checkPick) {
df3 <- df3[df3$times_pick==1,]
}
if(input$checkKid) {
df3 <- df3[df3$kid_friendly==1,]
}
if(input$checkFree) {
df3 <- df3[df3$free==1,]
}
gvisTable(data.frame("Name"=df3$name, "Venue"=df3$venue_name,
"Category"=df3$category, "Neighborhood"=df3$neighborhood),
options=list(page='enable', pageSize=20))
})
})
|
#' Files for each hemisphere sea ice concentration (25km)
#'
#' NSIDC passive microwave sea ice concentration since 1978.
#'
#' Time series has been expanded to be daily, by infilling a date for any missing,
#' with this indicated on the `miss` column.
#'
#' @param ... ignored for now
#' @param .local_root allows local use, to shortcut and use the local data library (under expert guidance)
#' @return data frame of `date` and `url` and `miss` which is `TRUE` if infilled
#' @export
#' @aliases nsidc_north_files
#' @importFrom tibble tibble
#' @examples
#' nsidc_south_files()
#' nsidc_north_files()
nsidc_south_files <- function(..., .local_root = NULL) {
files <- nsidc_south_sources
if (!is.null(.local_root)) {
## keep the slash
files$url <- gsub("^ftp:/", .local_root, files$url)
}
fulldates <- seq(min(files$date), max(files$date), by = "1 day")
idx <- findInterval(as.integer(fulldates), as.integer(files$date))
tibble::tibble(date = fulldates, url = files$url[idx], miss = duplicated(idx))
}
#' @name nsidc_south_files
#' @export
nsidc_north_files <- function(..., .local_root = NULL) {
files <- nsidc_north_sources
if (!is.null(.local_root)) {
## keep the slash
files$url <- gsub("^ftp:/", .local_root, files$url)
}
fulldates <- seq(min(files$date), max(files$date), by = "1 day")
idx <- findInterval(as.integer(fulldates), as.integer(files$date))
tibble::tibble(date = fulldates, url = files$url[idx], miss = duplicated(idx))
}
#' Generate NSIDC source links from a date
#'
#' NSIDC
#'
#' Details
#' @param date date-time, date, or convertible character string
#'
#' @inheritDotParams nsidc_south_files
#' @inheritParams nsidc_south_files
#' @return FTP url of NSIDC binary file
#' @export
#' @aliases nsidc_north_ftp
#' @examples
#' nsidc_south_ftp("2010-01-01")
#' nsidc_north_ftp("2010-01-01")
nsidc_south_ftp <- function(date, ..., .local_root = NULL) {
if (missing(date)) date <- .si_default_date()
date <- .si_timedate(date)
files <- nsidc_south_files(.local_root = .local_root)
files$url[findInterval(date, files$date)]
}
#' @export
#' @name nsidc_south_ftp
nsidc_north_ftp <- function(date, ..., .local_root = NULL) {
if (missing(date)) date <- .si_default_date()
date <- .si_timedate(date)
files <- nsidc_north_files(.local_root = .local_root)
files$url[findInterval(date, files$date)]
}
#' Generate NSIDC Virtual Raster text from a date
#'
#' NSIDC
#'
#' Details
#' @param date date-time, date, or convertible character string
#'
#' @inheritDotParams nsidc_south_files
#' @inheritParams nsidc_south_files
#' @return VRT text, used by GDAL
#' @export
#' @aliases nsidc_north_text
#' @importFrom glue glue
#' @examples
#' nsidc_south_vrt_text("2010-01-01")
#' nsidc_north_vrt_text("2010-01-01")
nsidc_south_vrt_text <- function(date, ..., .local_root = NULL) {
if (missing(date)) date <- .si_default_date()
date <- .si_timedate(date)
## probably here we need a vsi function, to pivot on the local_root part
vsi_prefix <- if (is.null(.local_root)) "/vsicurl/" else ""
FTP <- glue::glue("{vsi_prefix}{nsidc_south_ftp(date, .local_root = .local_root)}")
glue::glue(
'<VRTDataset rasterXSize="316" rasterYSize="332">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTRawRasterBand">
<SourceFilename relativetoVRT="1">{FTP}</SourceFilename>
<ImageOffset>300</ImageOffset>
<PixelOffset>1</PixelOffset>
<LineOffset>316</LineOffset>
</VRTRasterBand>
<SRS>PROJCRS[\"WGS 84 / NSIDC Sea Ice Polar Stereographic South\",\n BASEGEOGCRS[\"WGS 84\",\n DATUM[\"World Geodetic System 1984\",\n ELLIPSOID[\"WGS 84\",6378137,298.257223563,\n LENGTHUNIT[\"metre\",1]]],\n PRIMEM[\"Greenwich\",0,\n ANGLEUNIT[\"degree\",0.0174532925199433]],\n ID[\"EPSG\",4326]],\n CONVERSION[\"US NSIDC Sea Ice polar stereographic south\",\n METHOD[\"Polar Stereographic (variant B)\",\n ID[\"EPSG\",9829]],\n PARAMETER[\"Latitude of standard parallel\",-70,\n ANGLEUNIT[\"degree\",0.0174532925199433],\n ID[\"EPSG\",8832]],\n PARAMETER[\"Longitude of origin\",0,\n ANGLEUNIT[\"degree\",0.0174532925199433],\n ID[\"EPSG\",8833]],\n PARAMETER[\"False easting\",0,\n LENGTHUNIT[\"metre\",1],\n ID[\"EPSG\",8806]],\n PARAMETER[\"False northing\",0,\n LENGTHUNIT[\"metre\",1],\n ID[\"EPSG\",8807]]],\n CS[Cartesian,2],\n AXIS[\"easting (X)\",north,\n MERIDIAN[90,\n ANGLEUNIT[\"degree\",0.0174532925199433]],\n ORDER[1],\n LENGTHUNIT[\"metre\",1]],\n AXIS[\"northing (Y)\",north,\n MERIDIAN[0,\n ANGLEUNIT[\"degree\",0.0174532925199433]],\n ORDER[2],\n LENGTHUNIT[\"metre\",1]],\n USAGE[\n SCOPE[\"Polar research.\"],\n AREA[\"Southern hemisphere - south of 60S onshore and offshore - Antarctica.\"],\n BBOX[-90,-180,-60,180]],\n ID[\"EPSG\",3976]]</SRS>
<GeoTransform> -3.9500000000000000e+06, 2.5000000000000000e+04, 0.0000000000000000e+00, 4.3500000000000000e+06, 0.0000000000000000e+00, -2.5000000000000000e+04</GeoTransform>
</VRTDataset>'
)
}
#' @export
#' @name nsidc_south_vrt_text
nsidc_north_vrt_text <- function(date, ..., .local_root = NULL) {
if (missing(date)) date <- .si_default_date()
date <- .si_timedate(date)
vsi_prefix <- if (is.null(.local_root)) "/vsicurl/" else ""
FTP <- glue::glue("{vsi_prefix}{nsidc_north_ftp(date, .local_root = .local_root)}")
glue::glue('<VRTDataset rasterXSize="304" rasterYSize="448">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTRawRasterBand">
<SourceFilename relativetoVRT="1">{FTP}</SourceFilename>
<ImageOffset>300</ImageOffset>
<PixelOffset>1</PixelOffset>
<LineOffset>304</LineOffset>
</VRTRasterBand>
<SRS>
PROJCRS[\"WGS 84 / NSIDC Sea Ice Polar Stereographic North\",\n BASEGEOGCRS[\"WGS 84\",\n DATUM[\"World Geodetic System 1984\",\n ELLIPSOID[\"WGS 84\",6378137,298.257223563,\n LENGTHUNIT[\"metre\",1]]],\n PRIMEM[\"Greenwich\",0,\n ANGLEUNIT[\"degree\",0.0174532925199433]],\n ID[\"EPSG\",4326]],\n CONVERSION[\"US NSIDC Sea Ice polar stereographic north\",\n METHOD[\"Polar Stereographic (variant B)\",\n ID[\"EPSG\",9829]],\n PARAMETER[\"Latitude of standard parallel\",70,\n ANGLEUNIT[\"degree\",0.0174532925199433],\n ID[\"EPSG\",8832]],\n PARAMETER[\"Longitude of origin\",-45,\n ANGLEUNIT[\"degree\",0.0174532925199433],\n ID[\"EPSG\",8833]],\n PARAMETER[\"False easting\",0,\n LENGTHUNIT[\"metre\",1],\n ID[\"EPSG\",8806]],\n PARAMETER[\"False northing\",0,\n LENGTHUNIT[\"metre\",1],\n ID[\"EPSG\",8807]]],\n CS[Cartesian,2],\n AXIS[\"easting (X)\",south,\n MERIDIAN[45,\n ANGLEUNIT[\"degree\",0.0174532925199433]],\n ORDER[1],\n LENGTHUNIT[\"metre\",1]],\n AXIS[\"northing (Y)\",south,\n MERIDIAN[135,\n ANGLEUNIT[\"degree\",0.0174532925199433]],\n ORDER[2],\n LENGTHUNIT[\"metre\",1]],\n USAGE[\n SCOPE[\"Polar research.\"],\n AREA[\"Northern hemisphere - north of 60N onshore and offshore, including Arctic.\"],\n BBOX[60,-180,90,180]],\n ID[\"EPSG\",3413]]
</SRS>
<GeoTransform> -3.8375000000000000e+06, 2.5000000000000000e+04, 0.0000000000000000e+00, 5.8375000000000000e+06, 0.0000000000000000e+00, -2.5000000000000000e+04</GeoTransform>
</VRTDataset>'
)
}
#' Generate NSIDC filename
#'
#' Temp file contains text of Virtual Raster
#'
#' Details
#' @param date date-time, date, or convertible character string
#'
#' @inheritDotParams nsidc_south_files
#' @inheritParams nsidc_south_files
#'
#' @return VRT tempfile, to be used by GDAL
#' @export
#' @aliases nsidc_north_vrt
#' @examples
#' nsidc_south_vrt("2010-01-01")
#' nsidc_north_vrt("2010-01-01")
nsidc_south_vrt <- function(date, ..., .local_root = NULL) {
if (missing(date)) date <- .si_default_date()
date <- .si_timedate(date)
tfile <- tempfile(fileext = ".vrt")
writeLines(nsidc_south_vrt_text(date, .local_root = .local_root), tfile)
tfile
}
#' @export
#' @name nsidc_south_vrt
nsidc_north_vrt <- function(date, ..., .local_root = NULL) {
if (missing(date)) date <- .si_default_date()
date <- .si_timedate(date)
tfile <- tempfile(fileext = ".vrt")
writeLines(nsidc_north_vrt_text(date, .local_root = .local_root), tfile)
tfile
}
|
/R/nsidc.R
|
permissive
|
AustralianAntarcticDivision/seaice
|
R
| false
| false
| 8,721
|
r
|
#' Files for each hemisphere sea ice concentration (25km)
#'
#' NSIDC passive microwave sea ice concentration since 1978.
#'
#' Time series has been expanded to be daily, by infilling a date for any missing,
#' with this indicated on the `miss` column.
#'
#' @param ... ignored for now
#' @param .local_root allows local use, to shortcut and use the local data library (under expert guidance)
#' @return data frame of `date` and `url` and `miss` which is `TRUE` if infilled
#' @export
#' @aliases nsidc_north_files
#' @importFrom tibble tibble
#' @examples
#' nsidc_south_files()
#' nsidc_north_files()
nsidc_south_files <- function(..., .local_root = NULL) {
files <- nsidc_south_sources
if (!is.null(.local_root)) {
## keep the slash
files$url <- gsub("^ftp:/", .local_root, files$url)
}
fulldates <- seq(min(files$date), max(files$date), by = "1 day")
idx <- findInterval(as.integer(fulldates), as.integer(files$date))
tibble::tibble(date = fulldates, url = files$url[idx], miss = duplicated(idx))
}
#' @name nsidc_south_files
#' @export
nsidc_north_files <- function(..., .local_root = NULL) {
files <- nsidc_north_sources
if (!is.null(.local_root)) {
## keep the slash
files$url <- gsub("^ftp:/", .local_root, files$url)
}
fulldates <- seq(min(files$date), max(files$date), by = "1 day")
idx <- findInterval(as.integer(fulldates), as.integer(files$date))
tibble::tibble(date = fulldates, url = files$url[idx], miss = duplicated(idx))
}
#' Generate NSIDC source links from a date
#'
#' NSIDC
#'
#' Details
#' @param date date-time, date, or convertible character string
#'
#' @inheritDotParams nsidc_south_files
#' @inheritParams nsidc_south_files
#' @return FTP url of NSIDC binary file
#' @export
#' @aliases nsidc_north_ftp
#' @examples
#' nsidc_south_ftp("2010-01-01")
#' nsidc_north_ftp("2010-01-01")
nsidc_south_ftp <- function(date, ..., .local_root = NULL) {
if (missing(date)) date <- .si_default_date()
date <- .si_timedate(date)
files <- nsidc_south_files(.local_root = .local_root)
files$url[findInterval(date, files$date)]
}
#' @export
#' @name nsidc_south_ftp
nsidc_north_ftp <- function(date, ..., .local_root = NULL) {
if (missing(date)) date <- .si_default_date()
date <- .si_timedate(date)
files <- nsidc_north_files(.local_root = .local_root)
files$url[findInterval(date, files$date)]
}
#' Generate NSIDC Virtual Raster text from a date
#'
#' NSIDC
#'
#' Details
#' @param date date-time, date, or convertible character string
#'
#' @inheritDotParams nsidc_south_files
#' @inheritParams nsidc_south_files
#' @return VRT text, used by GDAL
#' @export
#' @aliases nsidc_north_text
#' @importFrom glue glue
#' @examples
#' nsidc_south_vrt_text("2010-01-01")
#' nsidc_north_vrt_text("2010-01-01")
nsidc_south_vrt_text <- function(date, ..., .local_root = NULL) {
if (missing(date)) date <- .si_default_date()
date <- .si_timedate(date)
## probably here we need a vsi function, to pivot on the local_root part
vsi_prefix <- if (is.null(.local_root)) "/vsicurl/" else ""
FTP <- glue::glue("{vsi_prefix}{nsidc_south_ftp(date, .local_root = .local_root)}")
glue::glue(
'<VRTDataset rasterXSize="316" rasterYSize="332">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTRawRasterBand">
<SourceFilename relativetoVRT="1">{FTP}</SourceFilename>
<ImageOffset>300</ImageOffset>
<PixelOffset>1</PixelOffset>
<LineOffset>316</LineOffset>
</VRTRasterBand>
<SRS>PROJCRS[\"WGS 84 / NSIDC Sea Ice Polar Stereographic South\",\n BASEGEOGCRS[\"WGS 84\",\n DATUM[\"World Geodetic System 1984\",\n ELLIPSOID[\"WGS 84\",6378137,298.257223563,\n LENGTHUNIT[\"metre\",1]]],\n PRIMEM[\"Greenwich\",0,\n ANGLEUNIT[\"degree\",0.0174532925199433]],\n ID[\"EPSG\",4326]],\n CONVERSION[\"US NSIDC Sea Ice polar stereographic south\",\n METHOD[\"Polar Stereographic (variant B)\",\n ID[\"EPSG\",9829]],\n PARAMETER[\"Latitude of standard parallel\",-70,\n ANGLEUNIT[\"degree\",0.0174532925199433],\n ID[\"EPSG\",8832]],\n PARAMETER[\"Longitude of origin\",0,\n ANGLEUNIT[\"degree\",0.0174532925199433],\n ID[\"EPSG\",8833]],\n PARAMETER[\"False easting\",0,\n LENGTHUNIT[\"metre\",1],\n ID[\"EPSG\",8806]],\n PARAMETER[\"False northing\",0,\n LENGTHUNIT[\"metre\",1],\n ID[\"EPSG\",8807]]],\n CS[Cartesian,2],\n AXIS[\"easting (X)\",north,\n MERIDIAN[90,\n ANGLEUNIT[\"degree\",0.0174532925199433]],\n ORDER[1],\n LENGTHUNIT[\"metre\",1]],\n AXIS[\"northing (Y)\",north,\n MERIDIAN[0,\n ANGLEUNIT[\"degree\",0.0174532925199433]],\n ORDER[2],\n LENGTHUNIT[\"metre\",1]],\n USAGE[\n SCOPE[\"Polar research.\"],\n AREA[\"Southern hemisphere - south of 60S onshore and offshore - Antarctica.\"],\n BBOX[-90,-180,-60,180]],\n ID[\"EPSG\",3976]]</SRS>
<GeoTransform> -3.9500000000000000e+06, 2.5000000000000000e+04, 0.0000000000000000e+00, 4.3500000000000000e+06, 0.0000000000000000e+00, -2.5000000000000000e+04</GeoTransform>
</VRTDataset>'
)
}
#' @export
#' @name nsidc_south_vrt_text
nsidc_north_vrt_text <- function(date, ..., .local_root = NULL) {
if (missing(date)) date <- .si_default_date()
date <- .si_timedate(date)
vsi_prefix <- if (is.null(.local_root)) "/vsicurl/" else ""
FTP <- glue::glue("{vsi_prefix}{nsidc_north_ftp(date, .local_root = .local_root)}")
glue::glue('<VRTDataset rasterXSize="304" rasterYSize="448">
<VRTRasterBand dataType="Byte" band="1" subClass="VRTRawRasterBand">
<SourceFilename relativetoVRT="1">{FTP}</SourceFilename>
<ImageOffset>300</ImageOffset>
<PixelOffset>1</PixelOffset>
<LineOffset>304</LineOffset>
</VRTRasterBand>
<SRS>
PROJCRS[\"WGS 84 / NSIDC Sea Ice Polar Stereographic North\",\n BASEGEOGCRS[\"WGS 84\",\n DATUM[\"World Geodetic System 1984\",\n ELLIPSOID[\"WGS 84\",6378137,298.257223563,\n LENGTHUNIT[\"metre\",1]]],\n PRIMEM[\"Greenwich\",0,\n ANGLEUNIT[\"degree\",0.0174532925199433]],\n ID[\"EPSG\",4326]],\n CONVERSION[\"US NSIDC Sea Ice polar stereographic north\",\n METHOD[\"Polar Stereographic (variant B)\",\n ID[\"EPSG\",9829]],\n PARAMETER[\"Latitude of standard parallel\",70,\n ANGLEUNIT[\"degree\",0.0174532925199433],\n ID[\"EPSG\",8832]],\n PARAMETER[\"Longitude of origin\",-45,\n ANGLEUNIT[\"degree\",0.0174532925199433],\n ID[\"EPSG\",8833]],\n PARAMETER[\"False easting\",0,\n LENGTHUNIT[\"metre\",1],\n ID[\"EPSG\",8806]],\n PARAMETER[\"False northing\",0,\n LENGTHUNIT[\"metre\",1],\n ID[\"EPSG\",8807]]],\n CS[Cartesian,2],\n AXIS[\"easting (X)\",south,\n MERIDIAN[45,\n ANGLEUNIT[\"degree\",0.0174532925199433]],\n ORDER[1],\n LENGTHUNIT[\"metre\",1]],\n AXIS[\"northing (Y)\",south,\n MERIDIAN[135,\n ANGLEUNIT[\"degree\",0.0174532925199433]],\n ORDER[2],\n LENGTHUNIT[\"metre\",1]],\n USAGE[\n SCOPE[\"Polar research.\"],\n AREA[\"Northern hemisphere - north of 60N onshore and offshore, including Arctic.\"],\n BBOX[60,-180,90,180]],\n ID[\"EPSG\",3413]]
</SRS>
<GeoTransform> -3.8375000000000000e+06, 2.5000000000000000e+04, 0.0000000000000000e+00, 5.8375000000000000e+06, 0.0000000000000000e+00, -2.5000000000000000e+04</GeoTransform>
</VRTDataset>'
)
}
#' Generate NSIDC filename
#'
#' Temp file contains text of Virtual Raster
#'
#' Details
#' @param date date-time, date, or convertible character string
#'
#' @inheritDotParams nsidc_south_files
#' @inheritParams nsidc_south_files
#'
#' @return VRT tempfile, to be used by GDAL
#' @export
#' @aliases nsidc_north_vrt
#' @examples
#' nsidc_south_vrt("2010-01-01")
#' nsidc_north_vrt("2010-01-01")
nsidc_south_vrt <- function(date, ..., .local_root = NULL) {
if (missing(date)) date <- .si_default_date()
date <- .si_timedate(date)
tfile <- tempfile(fileext = ".vrt")
writeLines(nsidc_south_vrt_text(date, .local_root = .local_root), tfile)
tfile
}
#' @export
#' @name nsidc_south_vrt
nsidc_north_vrt <- function(date, ..., .local_root = NULL) {
if (missing(date)) date <- .si_default_date()
date <- .si_timedate(date)
tfile <- tempfile(fileext = ".vrt")
writeLines(nsidc_north_vrt_text(date, .local_root = .local_root), tfile)
tfile
}
|
#############################################
library("Matrix")
library("geigen")
library("rARPACK")
library(maps)
library(WDI)
library(RColorBrewer)
library("maptools")
source("Preprocess.R")
source("SpectralClustering.R")
source("Postprocess.R")
######################################################
# speCluster()
speCluster <- function(data, conMatrix, cluster.number,
iter.max=400, repetition= 400 ){
# Perform Spectral Clustering on a data matrix
#
# Args:
# data: A numeric data frame or matrix.
# conMatrix: Contiguity matrix.
# cluster.number: The number of clusters.
# iter.max: The maximum number of iterations allowed for
# kmeans step.
# repetition: How many random sets should be chosen
# for as the initial centers in kmeans step.
#
# Returns:
# A list contains two parts:
# clusters: A vector of integers(from 1:cluster.number)
# indicating the cluster to which each point is
# allocated.
# SS: A list with two values SSW for Sum Squered Within and
# SSB for SumSquered Between
# Error handeling
############################################
#Preprocess
outId <-outlierDetector(data)
dataAfterPC <- prinComp(data=data,outId=outId)
rm(data)
############################################
# Spectral clustering Algorithm
S <- similarity(data = dataAfterPC , neighbors=conMatrix)
rm(outId, conMatrix)
U <- produceU( similarity = S, ncol=cluster.number)
rm(S)
clusters <- kmeansU(data=U, cluster.number = cluster.number,iter.max=500)
############################################
#postprocess
SS <- sumSquares(data=dataAfterPC, clusters= clusters)
############################################
out <- list(clusters= clusters,SS= SS)
return(out)
}
stepOne <- function(data, conMatrix, ncol){
# This function Computes the data after Principal component
#
#
# Args:
# data: A numeric data frame or matrix.
# conMatrix: Contiguity matrix.
# ncol: number of columns of the output matrix U
#
#
# Returns:
# A list contains two parts:
# dataAfterPC: After Principal component data
# U: n by ncol numeric matrix that contains the ncol tops
# eigenvectors of Laplacian matrix as column.
#
# Error handeling
############################################
#Preprocess
outId <-outlierDetector(data)
dataAfterPC <- prinComp(data=data,outId=outId)
rm(data)
############################################
# Spectral clustering Algorithm
S <- similarity(data = dataAfterPC, neighbors=conMatrix)
rm(outId, conMatrix)
U <- produceU( similarity = S, ncol=ncol)
out <- list( dataAfterPC=dataAfterPC, U=U)
return(out)
}
stepTwo <- function(data, U, cluster.number= cluster.number,
iter.max=400, repetition=400){
# Perform Spectral Clustering on U matrix.
#
# Args:
# data: A numeric data frame or matrix.
# U: A numeric matrix
# cluster.number: The number of clusters.
# iter.max: The maximum number of iterations allowed for
# kmeans step.
# repetition: How many random sets should be chosen
# for as the initial centers in kmeans step.
#
# Returns:
# A list contains two parts:
# clusters: A vector of integers(from 1:cluster.number)
# indicating the cluster to which each point is
# allocated.
# SS: A list with two values SSW for Sum Squered Within and
# SSB for SumSquered Between
# Error handeling
############################################
clusters <- kmeansU(data=U, cluster.number = cluster.number,
iter.max=iter.max, repetition=repetition)
SS <- sumSquares(data=data, clusters= clusters)
############################################
out <- list(clusters= clusters,SS= SS)
return(out)
}
|
/main.R
|
no_license
|
cont-limno/SpectralClustering4Regions
|
R
| false
| false
| 4,053
|
r
|
#############################################
library("Matrix")
library("geigen")
library("rARPACK")
library(maps)
library(WDI)
library(RColorBrewer)
library("maptools")
source("Preprocess.R")
source("SpectralClustering.R")
source("Postprocess.R")
######################################################
# speCluster()
speCluster <- function(data, conMatrix, cluster.number,
iter.max=400, repetition= 400 ){
# Perform Spectral Clustering on a data matrix
#
# Args:
# data: A numeric data frame or matrix.
# conMatrix: Contiguity matrix.
# cluster.number: The number of clusters.
# iter.max: The maximum number of iterations allowed for
# kmeans step.
# repetition: How many random sets should be chosen
# for as the initial centers in kmeans step.
#
# Returns:
# A list contains two parts:
# clusters: A vector of integers(from 1:cluster.number)
# indicating the cluster to which each point is
# allocated.
# SS: A list with two values SSW for Sum Squered Within and
# SSB for SumSquered Between
# Error handeling
############################################
#Preprocess
outId <-outlierDetector(data)
dataAfterPC <- prinComp(data=data,outId=outId)
rm(data)
############################################
# Spectral clustering Algorithm
S <- similarity(data = dataAfterPC , neighbors=conMatrix)
rm(outId, conMatrix)
U <- produceU( similarity = S, ncol=cluster.number)
rm(S)
clusters <- kmeansU(data=U, cluster.number = cluster.number,iter.max=500)
############################################
#postprocess
SS <- sumSquares(data=dataAfterPC, clusters= clusters)
############################################
out <- list(clusters= clusters,SS= SS)
return(out)
}
stepOne <- function(data, conMatrix, ncol){
# This function Computes the data after Principal component
#
#
# Args:
# data: A numeric data frame or matrix.
# conMatrix: Contiguity matrix.
# ncol: number of columns of the output matrix U
#
#
# Returns:
# A list contains two parts:
# dataAfterPC: After Principal component data
# U: n by ncol numeric matrix that contains the ncol tops
# eigenvectors of Laplacian matrix as column.
#
# Error handeling
############################################
#Preprocess
outId <-outlierDetector(data)
dataAfterPC <- prinComp(data=data,outId=outId)
rm(data)
############################################
# Spectral clustering Algorithm
S <- similarity(data = dataAfterPC, neighbors=conMatrix)
rm(outId, conMatrix)
U <- produceU( similarity = S, ncol=ncol)
out <- list( dataAfterPC=dataAfterPC, U=U)
return(out)
}
stepTwo <- function(data, U, cluster.number= cluster.number,
iter.max=400, repetition=400){
# Perform Spectral Clustering on U matrix.
#
# Args:
# data: A numeric data frame or matrix.
# U: A numeric matrix
# cluster.number: The number of clusters.
# iter.max: The maximum number of iterations allowed for
# kmeans step.
# repetition: How many random sets should be chosen
# for as the initial centers in kmeans step.
#
# Returns:
# A list contains two parts:
# clusters: A vector of integers(from 1:cluster.number)
# indicating the cluster to which each point is
# allocated.
# SS: A list with two values SSW for Sum Squered Within and
# SSB for SumSquered Between
# Error handeling
############################################
clusters <- kmeansU(data=U, cluster.number = cluster.number,
iter.max=iter.max, repetition=repetition)
SS <- sumSquares(data=data, clusters= clusters)
############################################
out <- list(clusters= clusters,SS= SS)
return(out)
}
|
#' Genomic coordinate to chromosome arm
#'
#' Returns chromosome arms for given chromosome and genomic position.
#' Currently not implemented and returns NULL.
#'
#' @param chromosome Character or numeric vector, with chromosome of genomic coordinate
#' @param position Numeric vector, with genomic position within chromosome
#' @param assembly a string specifying which genome assembly version should be applied
#' to determine chromosome arms. Allowed options are "hg38", hg19", "hg18", "hg17"
#' and "hg16" (corresponding to the five latest human genome annotations in the
#' UCSC genome browser).
#' @return Character vector, with choromosome arm of given genomic coordinates
coord_to_arm <- function(chromosome, position, assembly = "hg19", full = F){
if(length(chromosome) != length(position)){
stop("chromosome and position must have equal length")
}
if (!(assembly %in% c("hg38", "hg19", "hg18", "hg17", "hg16"))) {
stop("Invalid assembly, allowed options are hg38, hg19, hg18, hg17 and hg16")
}
if(any(stringr::str_sub(chromosome, 1, 3) != "chr")){
chromosome <- stringr::str_c("chr", chromosome)
}
if(any(!grepl("chr[X-Y]|[0-9]+", chromosome))){
stop("Invalid chromosome, must be 1-22, X or Y (or chr1-chr22, chrX or chrY)")
}
data(cytoband_map)
arms <- rep(" ", length(chromosome))
for(i in unique(chromosome)){
map <- cytoband_map[[assembly]][V1 == i]
arm <- map[(findInterval(position[chromosome == i], map$V3)+1)]$V4
if(!full){
arm <- stringr::str_sub(arm, 1,1)
}
arms[chromosome == i] <- arm
}
return(arms)
}
|
/R/cytobands.R
|
permissive
|
arnijohnsen/arjtools
|
R
| false
| false
| 1,605
|
r
|
#' Genomic coordinate to chromosome arm
#'
#' Returns chromosome arms for given chromosome and genomic position.
#' Currently not implemented and returns NULL.
#'
#' @param chromosome Character or numeric vector, with chromosome of genomic coordinate
#' @param position Numeric vector, with genomic position within chromosome
#' @param assembly a string specifying which genome assembly version should be applied
#' to determine chromosome arms. Allowed options are "hg38", hg19", "hg18", "hg17"
#' and "hg16" (corresponding to the five latest human genome annotations in the
#' UCSC genome browser).
#' @return Character vector, with choromosome arm of given genomic coordinates
coord_to_arm <- function(chromosome, position, assembly = "hg19", full = F){
if(length(chromosome) != length(position)){
stop("chromosome and position must have equal length")
}
if (!(assembly %in% c("hg38", "hg19", "hg18", "hg17", "hg16"))) {
stop("Invalid assembly, allowed options are hg38, hg19, hg18, hg17 and hg16")
}
if(any(stringr::str_sub(chromosome, 1, 3) != "chr")){
chromosome <- stringr::str_c("chr", chromosome)
}
if(any(!grepl("chr[X-Y]|[0-9]+", chromosome))){
stop("Invalid chromosome, must be 1-22, X or Y (or chr1-chr22, chrX or chrY)")
}
data(cytoband_map)
arms <- rep(" ", length(chromosome))
for(i in unique(chromosome)){
map <- cytoband_map[[assembly]][V1 == i]
arm <- map[(findInterval(position[chromosome == i], map$V3)+1)]$V4
if(!full){
arm <- stringr::str_sub(arm, 1,1)
}
arms[chromosome == i] <- arm
}
return(arms)
}
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "steel-plates-fault")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.rotationForest", par.vals = list(K = 2, L = 18), predict.type = "prob")
#:# hash
#:# ab0c0a8e1e531620c33e4723cbd080fe
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_steel-plates-fault/classification_Class/ab0c0a8e1e531620c33e4723cbd080fe/code.R
|
no_license
|
pysiakk/CaseStudies2019S
|
R
| false
| false
| 713
|
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "steel-plates-fault")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.rotationForest", par.vals = list(K = 2, L = 18), predict.type = "prob")
#:# hash
#:# ab0c0a8e1e531620c33e4723cbd080fe
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.R
\name{read}
\alias{read}
\title{Read NetCDF data from global inventories}
\source{
Read abbout EDGAR at http://edgar.jrc.ec.europa.eu and MACCITY at
http://accent.aero.jussieu.fr/MACC_metadata.php
}
\usage{
read(
file = file.choose(),
version = NA,
coef = rep(1, length(file)),
spec = NULL,
year = 1,
month = 1,
hour = 1,
categories,
reproject = TRUE,
as_raster = TRUE,
skip_missing = FALSE,
verbose = TRUE
)
}
\arguments{
\item{file}{file name or names (variables are summed)}
\item{version}{Character; One of of the following:
\tabular{lllll}{
\strong{argument}\tab \strong{tested}\tab \strong{region}\tab \strong{resolution}\tab \strong{projection}\cr
EDGAR\tab 4.32 and 5.0 \tab Global \tab 0.1 x 0.1 ° \tab longlat\cr
EDGAR_HTAPv2\tab 2.2 \tab Global \tab 0.1 x 0.1 ° \tab longlat\cr
GAINS\tab v5a \tab Global \tab 0.5 x 0.5 ° \tab longlat\cr
RCP\tab RCP3PD Glb \tab Global \tab 0.5 x 0.5 ° \tab longlat\cr
MACCITY\tab 2010 \tab Global \tab 0.5 x 0.5 ° \tab longlat\cr
FFDAS\tab 2.2 \tab Global \tab 0.1 x 0.1 ° \tab longlat\cr
ODIAC\tab 2020 \tab Global \tab 1 x 1 ° \tab longlat\cr
VULCAN-y\tab 3.0 \tab US \tab 1 x 1 Km \tab lcc\cr
VULCAN-h\tab 3.0 \tab US \tab 1 x 1 Km \tab lcc\cr
ACES\tab 2020 \tab NE US \tab 1 x 1 km \tab lcc\cr
}}
\item{coef}{coefficients to merge different sources (file) into one emission}
\item{spec}{numeric speciation vector to split emission into different species}
\item{year}{scenario index (only for GAINS and VULCAN-y)}
\item{month}{the desired month of the inventory (MACCITY and ODIAC)}
\item{hour}{hour of the emission (only for ACES and VULCAN-h)}
\item{categories}{considered categories (for MACCITY/GAINS variable names), empty for use all}
\item{reproject}{to project the output to "+proj=longlat" needed for emission function (only for VULCAN and ACES)}
\item{as_raster}{return a raster (default) or matrix (with units)}
\item{skip_missing}{return a zero emission and a warning for missing files/variables}
\item{verbose}{display additional information}
}
\value{
Matrix or raster
}
\description{
Read data from global inventories. Several files can be read to produce one
emission output and/or can be splitted into several species
}
\note{
for EDGAR (all versions), GAINS, RCP and MACCTITY, please use flux (kg m-2 s-1) NetCDF file.
}
\examples{
\donttest{
dir.create(file.path(tempdir(), "EDGARv432"))
folder <- setwd(file.path(tempdir(), "EDGARv432"))
url <- "http://jeodpp.jrc.ec.europa.eu/ftp/jrc-opendata/EDGAR/datasets/v432_AP/NOx"
file <- 'v432_NOx_2012.0.1x0.1.zip'
download.file(paste0(url,'/TOTALS/',file), file)
unzip('v432_NOx_2012.0.1x0.1.zip')
nox <- read(file = dir(pattern = '.nc'),
version = 'EDGAR',
spec = c(E_NO = 0.9 , # 90\% of NOx is NO
E_NO2 = 0.1 )) # 10\% of NOx is NO2
setwd(folder)
# creating a color scale
cor <- colorRampPalette(colors = c(c("#031057", "#0522FC",
"#7E0AFA", "#EF0AFF",
"#FFA530", "#FFF957")))
raster::plot(nox$E_NO,xlab="Lat", ylab="Lon",
col = cor(12),zlim = c(-6.5e-7,1.4e-5),
main="NO emissions from EDGAR (in g / m2 s)")
d1 <- gridInfo(paste(system.file("extdata", package = "EmissV"),"/wrfinput_d01",sep=""))
NO <- emission(grid = d1, inventory = nox$E_NO, pol = "NO", mm = 30.01, plot = TRUE)
}
}
\references{
Janssens-Maenhout, G., Dentener, F., Van Aardenne, J., Monni, S., Pagliari, V., Orlandini,
L., ... & Wankmüller, R. (2012). EDGAR-HTAP: a harmonized gridded air pollution emission dataset
based on national inventories. European Commission Joint Research Centre Institute for
Environment and Sustainability. JRC 68434 UR 25229 EUR 25229, ISBN 978-92-79-23123-0.
Lamarque, J.-F., Bond, T. C., Eyring, V., Granier, C., Heil, A., Klimont, Z., Lee, D., Liousse,
C., Mieville, A., Owen, B., Schultz, M. G., Shindell, D., Smith, S. J., Stehfest, E.,
Van Aardenne, J., Cooper, O. R., Kainuma, M., Mahowald, N., McConnell, J. R., Naik, V.,
Riahi, K., and van Vuuren, D. P.: Historical (1850-2000) gridded anthropogenic and biomass
burning emissions of reactive gases and aerosols: methodology and application,
Atmos. Chem. Phys., 10, 7017-7039, doi:10.5194/acp-10-7017-2010, 2010.
Z Klimont, S. J. Smith and J Cofala The last decade of global anthropogenic sulfur dioxide:
2000–2011 emissions Environmental Research Letters 8, 014003, 2013
Gurney, Kevin R., Jianming Liang, Risa Patarasuk, Yang Song, Jianhua Huang, and
Geoffrey Roest (2019) The Vulcan Version 3.0 High-Resolution Fossil Fuel CO2 Emissions
for the United States. Nature Scientific Data.
}
\seealso{
\code{\link{rasterSource}} and \code{\link{gridInfo}}
\code{\link{species}}
}
|
/man/read.Rd
|
no_license
|
cran/EmissV
|
R
| false
| true
| 5,015
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.R
\name{read}
\alias{read}
\title{Read NetCDF data from global inventories}
\source{
Read abbout EDGAR at http://edgar.jrc.ec.europa.eu and MACCITY at
http://accent.aero.jussieu.fr/MACC_metadata.php
}
\usage{
read(
file = file.choose(),
version = NA,
coef = rep(1, length(file)),
spec = NULL,
year = 1,
month = 1,
hour = 1,
categories,
reproject = TRUE,
as_raster = TRUE,
skip_missing = FALSE,
verbose = TRUE
)
}
\arguments{
\item{file}{file name or names (variables are summed)}
\item{version}{Character; One of of the following:
\tabular{lllll}{
\strong{argument}\tab \strong{tested}\tab \strong{region}\tab \strong{resolution}\tab \strong{projection}\cr
EDGAR\tab 4.32 and 5.0 \tab Global \tab 0.1 x 0.1 ° \tab longlat\cr
EDGAR_HTAPv2\tab 2.2 \tab Global \tab 0.1 x 0.1 ° \tab longlat\cr
GAINS\tab v5a \tab Global \tab 0.5 x 0.5 ° \tab longlat\cr
RCP\tab RCP3PD Glb \tab Global \tab 0.5 x 0.5 ° \tab longlat\cr
MACCITY\tab 2010 \tab Global \tab 0.5 x 0.5 ° \tab longlat\cr
FFDAS\tab 2.2 \tab Global \tab 0.1 x 0.1 ° \tab longlat\cr
ODIAC\tab 2020 \tab Global \tab 1 x 1 ° \tab longlat\cr
VULCAN-y\tab 3.0 \tab US \tab 1 x 1 Km \tab lcc\cr
VULCAN-h\tab 3.0 \tab US \tab 1 x 1 Km \tab lcc\cr
ACES\tab 2020 \tab NE US \tab 1 x 1 km \tab lcc\cr
}}
\item{coef}{coefficients to merge different sources (file) into one emission}
\item{spec}{numeric speciation vector to split emission into different species}
\item{year}{scenario index (only for GAINS and VULCAN-y)}
\item{month}{the desired month of the inventory (MACCITY and ODIAC)}
\item{hour}{hour of the emission (only for ACES and VULCAN-h)}
\item{categories}{considered categories (for MACCITY/GAINS variable names), empty for use all}
\item{reproject}{to project the output to "+proj=longlat" needed for emission function (only for VULCAN and ACES)}
\item{as_raster}{return a raster (default) or matrix (with units)}
\item{skip_missing}{return a zero emission and a warning for missing files/variables}
\item{verbose}{display additional information}
}
\value{
Matrix or raster
}
\description{
Read data from global inventories. Several files can be read to produce one
emission output and/or can be splitted into several species
}
\note{
for EDGAR (all versions), GAINS, RCP and MACCTITY, please use flux (kg m-2 s-1) NetCDF file.
}
\examples{
\donttest{
dir.create(file.path(tempdir(), "EDGARv432"))
folder <- setwd(file.path(tempdir(), "EDGARv432"))
url <- "http://jeodpp.jrc.ec.europa.eu/ftp/jrc-opendata/EDGAR/datasets/v432_AP/NOx"
file <- 'v432_NOx_2012.0.1x0.1.zip'
download.file(paste0(url,'/TOTALS/',file), file)
unzip('v432_NOx_2012.0.1x0.1.zip')
nox <- read(file = dir(pattern = '.nc'),
version = 'EDGAR',
spec = c(E_NO = 0.9 , # 90\% of NOx is NO
E_NO2 = 0.1 )) # 10\% of NOx is NO2
setwd(folder)
# creating a color scale
cor <- colorRampPalette(colors = c(c("#031057", "#0522FC",
"#7E0AFA", "#EF0AFF",
"#FFA530", "#FFF957")))
raster::plot(nox$E_NO,xlab="Lat", ylab="Lon",
col = cor(12),zlim = c(-6.5e-7,1.4e-5),
main="NO emissions from EDGAR (in g / m2 s)")
d1 <- gridInfo(paste(system.file("extdata", package = "EmissV"),"/wrfinput_d01",sep=""))
NO <- emission(grid = d1, inventory = nox$E_NO, pol = "NO", mm = 30.01, plot = TRUE)
}
}
\references{
Janssens-Maenhout, G., Dentener, F., Van Aardenne, J., Monni, S., Pagliari, V., Orlandini,
L., ... & Wankmüller, R. (2012). EDGAR-HTAP: a harmonized gridded air pollution emission dataset
based on national inventories. European Commission Joint Research Centre Institute for
Environment and Sustainability. JRC 68434 UR 25229 EUR 25229, ISBN 978-92-79-23123-0.
Lamarque, J.-F., Bond, T. C., Eyring, V., Granier, C., Heil, A., Klimont, Z., Lee, D., Liousse,
C., Mieville, A., Owen, B., Schultz, M. G., Shindell, D., Smith, S. J., Stehfest, E.,
Van Aardenne, J., Cooper, O. R., Kainuma, M., Mahowald, N., McConnell, J. R., Naik, V.,
Riahi, K., and van Vuuren, D. P.: Historical (1850-2000) gridded anthropogenic and biomass
burning emissions of reactive gases and aerosols: methodology and application,
Atmos. Chem. Phys., 10, 7017-7039, doi:10.5194/acp-10-7017-2010, 2010.
Z Klimont, S. J. Smith and J Cofala The last decade of global anthropogenic sulfur dioxide:
2000–2011 emissions Environmental Research Letters 8, 014003, 2013
Gurney, Kevin R., Jianming Liang, Risa Patarasuk, Yang Song, Jianhua Huang, and
Geoffrey Roest (2019) The Vulcan Version 3.0 High-Resolution Fossil Fuel CO2 Emissions
for the United States. Nature Scientific Data.
}
\seealso{
\code{\link{rasterSource}} and \code{\link{gridInfo}}
\code{\link{species}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/supervised.R
\name{mbpls}
\alias{mbpls}
\title{Multiblock Partial Least Squares - MB-PLS}
\usage{
mbpls(X, Y, ncomp = 1, scale = FALSE, ...)
}
\arguments{
\item{X}{\code{list} of input blocks.}
\item{Y}{\code{matrix} of responses.}
\item{ncomp}{\code{integer} number of PLS components.}
\item{scale}{\code{logical} for autoscaling inputs (default = FALSE).}
\item{...}{additional arguments to pls::plsr.}
}
\value{
\code{multiblock, mvr} object with super-scores, super-loadings, block-scores and block-loading, and the underlying
\code{mvr} (PLS) object for the super model, with all its result and plot possibilities. Relevant plotting functions: \code{\link{multiblock_plots}}
and result functions: \code{\link{multiblock_results}}.
}
\description{
A function computing MB-PLS scores, loadings, etc. on the super-level and
block-level.
}
\details{
MB-PLS is the prototypical component based supervised multiblock method.
It was originally formulated as a two-level method with a block-level and a super-level,
but it was later discovered that it could be expressed as an ordinary PLS on concatenated
weighted X blocks followed by a simple loop for calculating block-level loading weights,
loadings and scores. This implementation uses the \code{\link[pls]{plsr}} function on the
scaled input blocks (1/sqrt(ncol)) enabling all summaries and plots from the \code{pls}
package.
}
\examples{
data(potato)
mb <- mbpls(potato[c('Chemical','Compression')], potato[['Sensory']], ncomp = 5)
print(mb)
scoreplot(mb, labels="names") # Exploiting mvr object structure from pls package
}
\references{
\itemize{
\item Wangen, L.E. and Kowalski, B.R. (1988). A multiblock partial least squares algorithm for investigating complex chemical systems. Journal of Chemometrics, 3, 3–20.
\item Westerhuis, J.A., Kourti, T., and MacGregor,J.F. (1998). Analysis of multiblock and hierarchical PCA and PLS models. Journal of Chemometrics, 12, 301–321.
}
}
\seealso{
Overviews of available methods, \code{\link{multiblock}}, and methods organised by main structure: \code{\link{basic}}, \code{\link{unsupervised}}, \code{\link{asca}}, \code{\link{supervised}} and \code{\link{complex}}.
}
|
/man/mbpls.Rd
|
no_license
|
minghao2016/multiblock
|
R
| false
| true
| 2,254
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/supervised.R
\name{mbpls}
\alias{mbpls}
\title{Multiblock Partial Least Squares - MB-PLS}
\usage{
mbpls(X, Y, ncomp = 1, scale = FALSE, ...)
}
\arguments{
\item{X}{\code{list} of input blocks.}
\item{Y}{\code{matrix} of responses.}
\item{ncomp}{\code{integer} number of PLS components.}
\item{scale}{\code{logical} for autoscaling inputs (default = FALSE).}
\item{...}{additional arguments to pls::plsr.}
}
\value{
\code{multiblock, mvr} object with super-scores, super-loadings, block-scores and block-loading, and the underlying
\code{mvr} (PLS) object for the super model, with all its result and plot possibilities. Relevant plotting functions: \code{\link{multiblock_plots}}
and result functions: \code{\link{multiblock_results}}.
}
\description{
A function computing MB-PLS scores, loadings, etc. on the super-level and
block-level.
}
\details{
MB-PLS is the prototypical component based supervised multiblock method.
It was originally formulated as a two-level method with a block-level and a super-level,
but it was later discovered that it could be expressed as an ordinary PLS on concatenated
weighted X blocks followed by a simple loop for calculating block-level loading weights,
loadings and scores. This implementation uses the \code{\link[pls]{plsr}} function on the
scaled input blocks (1/sqrt(ncol)) enabling all summaries and plots from the \code{pls}
package.
}
\examples{
data(potato)
mb <- mbpls(potato[c('Chemical','Compression')], potato[['Sensory']], ncomp = 5)
print(mb)
scoreplot(mb, labels="names") # Exploiting mvr object structure from pls package
}
\references{
\itemize{
\item Wangen, L.E. and Kowalski, B.R. (1988). A multiblock partial least squares algorithm for investigating complex chemical systems. Journal of Chemometrics, 3, 3–20.
\item Westerhuis, J.A., Kourti, T., and MacGregor,J.F. (1998). Analysis of multiblock and hierarchical PCA and PLS models. Journal of Chemometrics, 12, 301–321.
}
}
\seealso{
Overviews of available methods, \code{\link{multiblock}}, and methods organised by main structure: \code{\link{basic}}, \code{\link{unsupervised}}, \code{\link{asca}}, \code{\link{supervised}} and \code{\link{complex}}.
}
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "heart-statlog")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "class")
lrn = makeLearner("classif.gamboost", par.vals = list(baselearner = "bols", Binomial.link = "logit", risk = "none"), predict.type = "prob")
#:# hash
#:# 1619fdc149d64b6174a19c3a0af8cb26
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_heart-statlog/classification_class/1619fdc149d64b6174a19c3a0af8cb26/code.R
|
no_license
|
pysiakk/CaseStudies2019S
|
R
| false
| false
| 749
|
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "heart-statlog")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "class")
lrn = makeLearner("classif.gamboost", par.vals = list(baselearner = "bols", Binomial.link = "logit", risk = "none"), predict.type = "prob")
#:# hash
#:# 1619fdc149d64b6174a19c3a0af8cb26
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
## The following code is part of the example scripts included
## in the "Soil Organic Carbon Mapping Cookbook"
## @knitr optional-Merging
profiles <- read.csv("data/dataproc_profiles.csv")
topsoils <- read.csv("data/dataproc.csv")
# column names could be different, but the units and order has
# to be the same! Because we are going to add the rows from 1
# table to the other table.
topsoils <- topsoils[, c("ID", "X", "Y", "SOC", "BLD",
"OCSKGM", "meaERROR")]
profiles <- profiles[, c("id", "X", "Y", "SOC", "BLD",
"OCSKGM", "meaERROR")]
names(profiles) <- names(topsoils)
dat <- rbind(topsoils, profiles)
write.csv(dat, "data/dataproc_all.csv", row.names = F)
|
/code/optional-Merging.R
|
no_license
|
anhnguyendepocen/SOC-Mapping-Cookbook
|
R
| false
| false
| 720
|
r
|
## The following code is part of the example scripts included
## in the "Soil Organic Carbon Mapping Cookbook"
## @knitr optional-Merging
profiles <- read.csv("data/dataproc_profiles.csv")
topsoils <- read.csv("data/dataproc.csv")
# column names could be different, but the units and order has
# to be the same! Because we are going to add the rows from 1
# table to the other table.
topsoils <- topsoils[, c("ID", "X", "Y", "SOC", "BLD",
"OCSKGM", "meaERROR")]
profiles <- profiles[, c("id", "X", "Y", "SOC", "BLD",
"OCSKGM", "meaERROR")]
names(profiles) <- names(topsoils)
dat <- rbind(topsoils, profiles)
write.csv(dat, "data/dataproc_all.csv", row.names = F)
|
require(splines)
require(Matrix)
#' Spline function with 1 continuous dimension and 1 discrete dimension defined on finite Time
#'
#' Creates a functional representation for a 1 dimensional splines indexed by a discrete variable and time.
#' time is discrete and assumed to end at period T. This functional is useful for situations where the
#' form of the structural function V is known in T, i.e. does not have to be approximated.
#'
#' @param xsupp a vector of support point for the the continuous dimension
#' @param ivals a vector of discrete values for the support of the discrete dimension
#' @param degree integer of desired spline degree
#' @param nbasis.fun integer for desired number of basis functions
#' @return frep an object of class frep that can be sued to evaluate functions and get levels and Jacobians
#' @export
#' @seealso \code{\link{F_SplineInt1D.r}}
#' @family frep
#' @example examples/example-MultiSplineFitting.r
F_SplineTime1D <- function(xsupp,ivals,degree,nbasis.funs) {
# get the slpines knots and parameter length
xknots <- knot.select2(degree=degree,x=xsupp,num.basis=nbasis.funs,stretch=0.01)
Nx <- length(xsupp) # number of data points in each discrete bin
ng <- attr(xknots,'num.basis') # number of spline coefficients in each discrete bin
ff <- function(ain,zin,gin,deriv=0) {
# spline parameter gin
# --------------------------
# compute value of approximation to V(ain,gin) and return in F
# compute partial derivative of approximation to V(ain,gin) w.r.t gin and return in M
if (class(gin) == "FDiff") {
M = Matrix(0,nrow=length(zin) , ncol = length(ivals) * ng, sparse=TRUE) # M not necessarily square
F = array(0,length(zin))
# here it would be good to be more flexbile
# with timing, there is always a last period
# e.g. i don't want to know the euler equation in the last period, it's not defined.
# key <- data.table(expand.grid(ia=1:Na,iz=1:2,it=1:5),key="it")
# key[,index := 1:nrow(key)]
for (i in unique(zin)){
I = which(zin==i) # row indices of M for discrete bin i
J = ((i-1)*ng+1) : ((i-1)*ng+ng) # col indices
# check for final period
if (i == maxtime){
F[I] = log( ain[I] ) # suppose final function is log
# M[I,J] = diag(length(I)) there is zero impact of the coefficients in the last period, so don't change.
} else {
# check for coloring
if (is.fdiff(ain) & gin@coloring) {
D = array(1,c(length(I),length(J)))
} else {
# return the value of the basis function evaluated at ain
D = splineDesign(xknots,ain[I],derivs = rep(deriv,length(ain[I])),outer.ok=TRUE,ord=(degree+1),sparse=TRUE)
}
M[I,J] = D
F[I] = F[I] + as.numeric(D %*% gin@F[J])
}
}
vars = list(v1 = length(ivals) * ng)
names(vars) <- names(gin@vars[[1]])
if (gin@coloring) M = (M!=0)*1;
R = new("FDiff",F=c(F),J=Matrix(M,sparse=T),vars=gin@vars,coloring=gin@coloring)
} else {
stop('function representation is not a parameter, this seems odd')
}
# endogenous choice variable ain
# ------------------------------
# check if we have an exogneous or endogenous variable
if (class(ain)=="FDiff") {
M = Matrix(0, nrow=length(zin) , ncol = length(ain@F),sparse=T) # M is square
for (i in unique(zin)){
I = which(zin==i)
J = 1:ng
# check for final period
if (i == maxtime){
M[I,I] = Diagonal(length(I), 1/ ain@F[I] ) # suppose final function is log
} else {
D = splineDesign(xknots,ain[I],deriv=rep(1+deriv,length(ain[I])),outer.ok = TRUE,ord=degree+1,sparse=TRUE)
if (ain@coloring) {
M[I,I] = Diagonal(length(I))
} else {
M[I,I] = Diagonal(length(I), as.numeric(D %*% gin[J]))
}
}
}
R = appendJac(R,Matrix(M,sparse=T),ain@vars)
}
return(R)
}
class(ff) = 'frep'
attr(ff,'ng') = length(ivals) * ng # that's the total num of spline coefficients across bin # that's the total num of spline coefficients across bins
return(ff)
}
|
/R/F_SplineTime1D.r
|
no_license
|
SunRonghe/mpeccable
|
R
| false
| false
| 4,001
|
r
|
require(splines)
require(Matrix)
#' Spline function with 1 continuous dimension and 1 discrete dimension defined on finite Time
#'
#' Creates a functional representation for a 1 dimensional splines indexed by a discrete variable and time.
#' time is discrete and assumed to end at period T. This functional is useful for situations where the
#' form of the structural function V is known in T, i.e. does not have to be approximated.
#'
#' @param xsupp a vector of support point for the the continuous dimension
#' @param ivals a vector of discrete values for the support of the discrete dimension
#' @param degree integer of desired spline degree
#' @param nbasis.fun integer for desired number of basis functions
#' @return frep an object of class frep that can be sued to evaluate functions and get levels and Jacobians
#' @export
#' @seealso \code{\link{F_SplineInt1D.r}}
#' @family frep
#' @example examples/example-MultiSplineFitting.r
F_SplineTime1D <- function(xsupp,ivals,degree,nbasis.funs) {
# get the slpines knots and parameter length
xknots <- knot.select2(degree=degree,x=xsupp,num.basis=nbasis.funs,stretch=0.01)
Nx <- length(xsupp) # number of data points in each discrete bin
ng <- attr(xknots,'num.basis') # number of spline coefficients in each discrete bin
ff <- function(ain,zin,gin,deriv=0) {
# spline parameter gin
# --------------------------
# compute value of approximation to V(ain,gin) and return in F
# compute partial derivative of approximation to V(ain,gin) w.r.t gin and return in M
if (class(gin) == "FDiff") {
M = Matrix(0,nrow=length(zin) , ncol = length(ivals) * ng, sparse=TRUE) # M not necessarily square
F = array(0,length(zin))
# here it would be good to be more flexbile
# with timing, there is always a last period
# e.g. i don't want to know the euler equation in the last period, it's not defined.
# key <- data.table(expand.grid(ia=1:Na,iz=1:2,it=1:5),key="it")
# key[,index := 1:nrow(key)]
for (i in unique(zin)){
I = which(zin==i) # row indices of M for discrete bin i
J = ((i-1)*ng+1) : ((i-1)*ng+ng) # col indices
# check for final period
if (i == maxtime){
F[I] = log( ain[I] ) # suppose final function is log
# M[I,J] = diag(length(I)) there is zero impact of the coefficients in the last period, so don't change.
} else {
# check for coloring
if (is.fdiff(ain) & gin@coloring) {
D = array(1,c(length(I),length(J)))
} else {
# return the value of the basis function evaluated at ain
D = splineDesign(xknots,ain[I],derivs = rep(deriv,length(ain[I])),outer.ok=TRUE,ord=(degree+1),sparse=TRUE)
}
M[I,J] = D
F[I] = F[I] + as.numeric(D %*% gin@F[J])
}
}
vars = list(v1 = length(ivals) * ng)
names(vars) <- names(gin@vars[[1]])
if (gin@coloring) M = (M!=0)*1;
R = new("FDiff",F=c(F),J=Matrix(M,sparse=T),vars=gin@vars,coloring=gin@coloring)
} else {
stop('function representation is not a parameter, this seems odd')
}
# endogenous choice variable ain
# ------------------------------
# check if we have an exogneous or endogenous variable
if (class(ain)=="FDiff") {
M = Matrix(0, nrow=length(zin) , ncol = length(ain@F),sparse=T) # M is square
for (i in unique(zin)){
I = which(zin==i)
J = 1:ng
# check for final period
if (i == maxtime){
M[I,I] = Diagonal(length(I), 1/ ain@F[I] ) # suppose final function is log
} else {
D = splineDesign(xknots,ain[I],deriv=rep(1+deriv,length(ain[I])),outer.ok = TRUE,ord=degree+1,sparse=TRUE)
if (ain@coloring) {
M[I,I] = Diagonal(length(I))
} else {
M[I,I] = Diagonal(length(I), as.numeric(D %*% gin[J]))
}
}
}
R = appendJac(R,Matrix(M,sparse=T),ain@vars)
}
return(R)
}
class(ff) = 'frep'
attr(ff,'ng') = length(ivals) * ng # that's the total num of spline coefficients across bin # that's the total num of spline coefficients across bins
return(ff)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.