content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{clrGetConstructors}
\alias{clrGetConstructors}
\title{List the public constructors of a CLR Type}
\usage{
clrGetConstructors(type)
}
\arguments{
\item{type}{CLR Type, or a (character) type name that can be successfully parsed}
}
\value{
a list of constructor signatures
}
\description{
List the public constructors of a CLR Type
}
\examples{
\dontrun{
library(rClr)
testClassName <- "Rclr.TestObject";
clrGetConstructors(testClassName)
}
}
|
/man/clrGetConstructors.Rd
|
no_license
|
agstudy/rClr
|
R
| false
| false
| 500
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{clrGetConstructors}
\alias{clrGetConstructors}
\title{List the public constructors of a CLR Type}
\usage{
clrGetConstructors(type)
}
\arguments{
\item{type}{CLR Type, or a (character) type name that can be successfully parsed}
}
\value{
a list of constructor signatures
}
\description{
List the public constructors of a CLR Type
}
\examples{
\dontrun{
library(rClr)
testClassName <- "Rclr.TestObject";
clrGetConstructors(testClassName)
}
}
|
library(BioGeoBEARS)
### Name: symbolic_to_P_matrix
### Title: Convert symbolic matrix to relprob matrix
### Aliases: symbolic_to_P_matrix
### ** Examples
testval=1
states_list = list("_", c("A"), c("B"), c("C"), c("A","B"),
c("B","C"), c("A","C"), c("A","B","C"))
states_list = areas_list_to_states_list_new(areas=c("A","B","C"),
include_null_range=TRUE, split_ABC=TRUE)
states_list
dedf = make_relprob_matrix_de(states_list=states_list,
split_ABC=FALSE, split="", remove_simultaneous_events=TRUE,
add_multiple_Ds=TRUE,
dispersal_multiplier_matrix=make_dispersal_multiplier_matrix(states_list=states_list))
dedf
# Defaults
Pmat = symbolic_to_P_matrix(dedf, cellsplit="\\\\+", mergesym="+",
diags_sum_to_1=FALSE, d=0.1, e=0.01)
Pmat
# Calculate diagonal
Pmat = symbolic_to_P_matrix(dedf, cellsplit="\\\\+", mergesym="+",
diags_sum_to_1=TRUE, d=0.1, e=0.01)
Pmat
# You don't have to split, if the formulas are directly parsable
Pmat = symbolic_to_P_matrix(dedf, cellsplit="yadda", mergesym="",
diags_sum_to_1=FALSE, d=0.1, e=0.01)
Pmat
|
/data/genthat_extracted_code/BioGeoBEARS/examples/symbolic_to_P_matrix.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,049
|
r
|
library(BioGeoBEARS)
### Name: symbolic_to_P_matrix
### Title: Convert symbolic matrix to relprob matrix
### Aliases: symbolic_to_P_matrix
### ** Examples
testval=1
states_list = list("_", c("A"), c("B"), c("C"), c("A","B"),
c("B","C"), c("A","C"), c("A","B","C"))
states_list = areas_list_to_states_list_new(areas=c("A","B","C"),
include_null_range=TRUE, split_ABC=TRUE)
states_list
dedf = make_relprob_matrix_de(states_list=states_list,
split_ABC=FALSE, split="", remove_simultaneous_events=TRUE,
add_multiple_Ds=TRUE,
dispersal_multiplier_matrix=make_dispersal_multiplier_matrix(states_list=states_list))
dedf
# Defaults
Pmat = symbolic_to_P_matrix(dedf, cellsplit="\\\\+", mergesym="+",
diags_sum_to_1=FALSE, d=0.1, e=0.01)
Pmat
# Calculate diagonal
Pmat = symbolic_to_P_matrix(dedf, cellsplit="\\\\+", mergesym="+",
diags_sum_to_1=TRUE, d=0.1, e=0.01)
Pmat
# You don't have to split, if the formulas are directly parsable
Pmat = symbolic_to_P_matrix(dedf, cellsplit="yadda", mergesym="",
diags_sum_to_1=FALSE, d=0.1, e=0.01)
Pmat
|
#This R script will load, clean, merge and perform calculations on a test and training data set from the Human Activity Recognition Using Smartphones Data Set
#There are four main blocks to this script. The first block prepares the column headers for both the Test and Training sets,
#the second block prepares the test data, the third block prepares the training data, and the fourth block combines
#creates an independent data set by combining both the Test and Training sets and performing calculations on specific variables
#Remove all variables
rm(list=ls(all=TRUE))
#Install and load required libraries for data cleaning and shaping
install.packages("reshape2")
install.packages("dplyr")
library(reshape2)
library(dplyr)
##################################################
# Create Column Headers for Measurement Variables#
##################################################
#Load Test and Training Data variable names set
HeadersRaw <- read.table("C:/Users/DK/Documents/UCI HAR Dataset/features.txt")
#Assign column names to the variable names
colnames(HeadersRaw) <- c("ColIndex", "ColName")
#Explicitly convert variable names to a data frame to ensure manipulation operations will compile
HeadersRaw <- data.frame(HeadersRaw)
#Remove non-alphanumeric characters from the variable names to tidy the column names
HeadersRaw <- mutate(HeadersRaw, ColName = gsub("[^[:alnum:]]","", ColName))
#Select the second column which is the descriptive column names. This column
#will be used to create the headers in both the Test and Training sets
Headers <- HeadersRaw[[2]]
#####################
# Load the Test Set #
#####################
#Load the Test Data set
TestData <- read.table("C:/Users/DK/Documents/UCI HAR Dataset/test/X_test.txt")
#Assign column names to the Test Data set using the Headers vector
colnames(TestData) <- Headers
#Explicitly convert the Test Data set to a data frame to ensure manipulation operations will compile
TestData <- data.frame(TestData)
#Add an ID column to the Test Data set, as this will be required in subsequent merge operation
TestData <- mutate(TestData, ID = rownames(TestData))
#Load the Test Set Activity Codes
TestActivityIDRaw <- read.table("C:/Users/DK/Documents/UCI HAR Dataset/test/y_test.txt")
#Assign a column name to the Test Set Activity Codes
colnames(TestActivityIDRaw) <- c("ActivityID")
#Explicitly convert the Activity Code set to a data frame to ensure manipulation operations will compile
TestActivityIDRaw <- data.frame(TestActivityIDRaw)
#Add the activity description and an ID to the Activity Code set based on the provided decode from activity_labels.txt below
#1 WALKING
#2 WALKING_UPSTAIRS
#3 WALKING_DOWNSTAIRS
#4 SITTING
#5 STANDING
#6 LAYING
TestActivityDescriptions <- mutate(TestActivityIDRaw, ActivityDescription = case_when(ActivityID == 1 ~ "Walking", ActivityID == 2 ~ "Walking Upstairs", ActivityID == 3 ~ "Walking Downstairs", ActivityID == 4 ~ "Sitting", ActivityID == 5 ~ "Standing", ActivityID == 6 ~ "Laying"), ID = rownames(TestActivityIDRaw))
#Load the Subject IDs
TestSubjectIDRaw <- read.table("C:/Users/DK/Documents/UCI HAR Dataset/test/subject_test.txt")
#Assign a column name to the Subject ID set
colnames(TestSubjectIDRaw) <- c("SubjectID")
#Explicitly convert the Subject ID set to a data frame to ensure manipulation operations will compile
TestSubjects <- data.frame(TestSubjectIDRaw)
#Add an ID column to TestSubjects
TestSubjects <- mutate(TestSubjects, ID = rownames(TestSubjects))
#Join the Activity Descriptions set and the Test Data set
TestDataTemp <- merge(TestActivityDescriptions, TestData, "ID")
#Join the Subject ID set to the TestDataTemp set
TestData <- merge(TestSubjects, TestDataTemp, "ID")
#########################
# Load the Training Set #
#########################
#Load the Training Data set
TrainingData <- read.table("C:/Users/DK/Documents/UCI HAR Dataset/train/X_train.txt")
#Assign column names to the Training Data set using the Headers vector
colnames(TrainingData) <- Headers
#Explicitly convert the Training Data set to a data frame to ensure manipulation operations will compile
TrainingData <- data.frame(TrainingData)
#Add an ID column to the Training Data set, as this will be required in subsequent merge operation
TrainingData <- mutate(TrainingData, ID = rownames(TrainingData))
#Load the Training Set Activity Codes
TrainingActivityIDRaw <- read.table("C:/Users/DK/Documents/UCI HAR Dataset/train/y_train.txt")
#Assign a column name to the Training Set Activity Codes
colnames(TrainingActivityIDRaw) <- c("ActivityID")
#Explicitly convert the Activity Code set to a data frame to ensure manipulation operations will compile
TrainingActivityIDRaw <- data.frame(TrainingActivityIDRaw)
#Add the activity description and an ID to the Activity Code set based on the provided decode from activity_labels.txt below
#1 WALKING
#2 WALKING_UPSTAIRS
#3 WALKING_DOWNSTAIRS
#4 SITTING
#5 STANDING
#6 LAYING
TrainingActivityDescriptions <- mutate(TrainingActivityIDRaw, ActivityDescription = case_when(ActivityID == 1 ~ "Walking", ActivityID == 2 ~ "Walking Upstairs", ActivityID == 3 ~ "Walking Downstairs", ActivityID == 4 ~ "Sitting", ActivityID == 5 ~ "Standing", ActivityID == 6 ~ "Laying"), ID = rownames(TrainingActivityIDRaw))
#Load the Subject IDs
TrainingSubjectIDRaw <- read.table("C:/Users/DK/Documents/UCI HAR Dataset/train/subject_train.txt")
#Assign a column name to the Subject ID set
colnames(TrainingSubjectIDRaw) <- c("SubjectID")
#Explicitly convert the Subject ID set to a data frame to ensure manipulation operations will compile
TrainingSubjects <- data.frame(TrainingSubjectIDRaw)
#Add an ID column to TrainingSubjects
TrainingSubjects <- mutate(TrainingSubjects, ID = rownames(TrainingSubjects))
#Join the Activity Descriptions set and the Training Data set
TrainingDataTemp <- merge(TrainingActivityDescriptions, TrainingData, "ID")
#Join the Subject ID set to the TrainingDataTemp set
TrainingData <- merge(TrainingSubjects, TrainingDataTemp, "ID")
# Merge the Test and Training Sets and calculate
TestandTrainingUnion <- rbind(TestData, TrainingData)
###############################################################################################################
# Select only Mean and Standard Deviation measurements and calculate the mean grouped by Activity and Subject #
###############################################################################################################
#Subset the unioned Test and Training sets by selecting the grouping columns and the desired measurements
TestandTrainingUnionSubset <- select(TestandTrainingUnion, ActivityDescription, SubjectID, matches("mean|std"))
#Group the Subset by ActivityDescription and SubjectID
TestandTrainingUnionSubsetGrouped <- group_by (TestandTrainingUnionSubset, ActivityDescription, SubjectID)
#Summarize the grouped set by applying the mean to all pre-selected measurements
TestandTrainingMeanSummary <- summarise_all(TestandTrainingUnionSubsetGrouped, funs(mean))
|
/run_analysis.R
|
no_license
|
david-kochar/GettingandCleaningDataCourseProject
|
R
| false
| false
| 7,188
|
r
|
#This R script will load, clean, merge and perform calculations on a test and training data set from the Human Activity Recognition Using Smartphones Data Set
#There are four main blocks to this script. The first block prepares the column headers for both the Test and Training sets,
#the second block prepares the test data, the third block prepares the training data, and the fourth block combines
#creates an independent data set by combining both the Test and Training sets and performing calculations on specific variables
#Remove all variables
rm(list=ls(all=TRUE))
#Install and load required libraries for data cleaning and shaping
install.packages("reshape2")
install.packages("dplyr")
library(reshape2)
library(dplyr)
##################################################
# Create Column Headers for Measurement Variables#
##################################################
#Load Test and Training Data variable names set
HeadersRaw <- read.table("C:/Users/DK/Documents/UCI HAR Dataset/features.txt")
#Assign column names to the variable names
colnames(HeadersRaw) <- c("ColIndex", "ColName")
#Explicitly convert variable names to a data frame to ensure manipulation operations will compile
HeadersRaw <- data.frame(HeadersRaw)
#Remove non-alphanumeric characters from the variable names to tidy the column names
HeadersRaw <- mutate(HeadersRaw, ColName = gsub("[^[:alnum:]]","", ColName))
#Select the second column which is the descriptive column names. This column
#will be used to create the headers in both the Test and Training sets
Headers <- HeadersRaw[[2]]
#####################
# Load the Test Set #
#####################
#Load the Test Data set
TestData <- read.table("C:/Users/DK/Documents/UCI HAR Dataset/test/X_test.txt")
#Assign column names to the Test Data set using the Headers vector
colnames(TestData) <- Headers
#Explicitly convert the Test Data set to a data frame to ensure manipulation operations will compile
TestData <- data.frame(TestData)
#Add an ID column to the Test Data set, as this will be required in subsequent merge operation
TestData <- mutate(TestData, ID = rownames(TestData))
#Load the Test Set Activity Codes
TestActivityIDRaw <- read.table("C:/Users/DK/Documents/UCI HAR Dataset/test/y_test.txt")
#Assign a column name to the Test Set Activity Codes
colnames(TestActivityIDRaw) <- c("ActivityID")
#Explicitly convert the Activity Code set to a data frame to ensure manipulation operations will compile
TestActivityIDRaw <- data.frame(TestActivityIDRaw)
#Add the activity description and an ID to the Activity Code set based on the provided decode from activity_labels.txt below
#1 WALKING
#2 WALKING_UPSTAIRS
#3 WALKING_DOWNSTAIRS
#4 SITTING
#5 STANDING
#6 LAYING
TestActivityDescriptions <- mutate(TestActivityIDRaw, ActivityDescription = case_when(ActivityID == 1 ~ "Walking", ActivityID == 2 ~ "Walking Upstairs", ActivityID == 3 ~ "Walking Downstairs", ActivityID == 4 ~ "Sitting", ActivityID == 5 ~ "Standing", ActivityID == 6 ~ "Laying"), ID = rownames(TestActivityIDRaw))
#Load the Subject IDs
TestSubjectIDRaw <- read.table("C:/Users/DK/Documents/UCI HAR Dataset/test/subject_test.txt")
#Assign a column name to the Subject ID set
colnames(TestSubjectIDRaw) <- c("SubjectID")
#Explicitly convert the Subject ID set to a data frame to ensure manipulation operations will compile
TestSubjects <- data.frame(TestSubjectIDRaw)
#Add an ID column to TestSubjects
TestSubjects <- mutate(TestSubjects, ID = rownames(TestSubjects))
#Join the Activity Descriptions set and the Test Data set
TestDataTemp <- merge(TestActivityDescriptions, TestData, "ID")
#Join the Subject ID set to the TestDataTemp set
TestData <- merge(TestSubjects, TestDataTemp, "ID")
#########################
# Load the Training Set #
#########################
#Load the Training Data set
TrainingData <- read.table("C:/Users/DK/Documents/UCI HAR Dataset/train/X_train.txt")
#Assign column names to the Training Data set using the Headers vector
colnames(TrainingData) <- Headers
#Explicitly convert the Training Data set to a data frame to ensure manipulation operations will compile
TrainingData <- data.frame(TrainingData)
#Add an ID column to the Training Data set, as this will be required in subsequent merge operation
TrainingData <- mutate(TrainingData, ID = rownames(TrainingData))
#Load the Training Set Activity Codes
TrainingActivityIDRaw <- read.table("C:/Users/DK/Documents/UCI HAR Dataset/train/y_train.txt")
#Assign a column name to the Training Set Activity Codes
colnames(TrainingActivityIDRaw) <- c("ActivityID")
#Explicitly convert the Activity Code set to a data frame to ensure manipulation operations will compile
TrainingActivityIDRaw <- data.frame(TrainingActivityIDRaw)
#Add the activity description and an ID to the Activity Code set based on the provided decode from activity_labels.txt below
#1 WALKING
#2 WALKING_UPSTAIRS
#3 WALKING_DOWNSTAIRS
#4 SITTING
#5 STANDING
#6 LAYING
TrainingActivityDescriptions <- mutate(TrainingActivityIDRaw, ActivityDescription = case_when(ActivityID == 1 ~ "Walking", ActivityID == 2 ~ "Walking Upstairs", ActivityID == 3 ~ "Walking Downstairs", ActivityID == 4 ~ "Sitting", ActivityID == 5 ~ "Standing", ActivityID == 6 ~ "Laying"), ID = rownames(TrainingActivityIDRaw))
#Load the Subject IDs
TrainingSubjectIDRaw <- read.table("C:/Users/DK/Documents/UCI HAR Dataset/train/subject_train.txt")
#Assign a column name to the Subject ID set
colnames(TrainingSubjectIDRaw) <- c("SubjectID")
#Explicitly convert the Subject ID set to a data frame to ensure manipulation operations will compile
TrainingSubjects <- data.frame(TrainingSubjectIDRaw)
#Add an ID column to TrainingSubjects
TrainingSubjects <- mutate(TrainingSubjects, ID = rownames(TrainingSubjects))
#Join the Activity Descriptions set and the Training Data set
TrainingDataTemp <- merge(TrainingActivityDescriptions, TrainingData, "ID")
#Join the Subject ID set to the TrainingDataTemp set
TrainingData <- merge(TrainingSubjects, TrainingDataTemp, "ID")
# Merge the Test and Training Sets and calculate
TestandTrainingUnion <- rbind(TestData, TrainingData)
###############################################################################################################
# Select only Mean and Standard Deviation measurements and calculate the mean grouped by Activity and Subject #
###############################################################################################################
#Subset the unioned Test and Training sets by selecting the grouping columns and the desired measurements
TestandTrainingUnionSubset <- select(TestandTrainingUnion, ActivityDescription, SubjectID, matches("mean|std"))
#Group the Subset by ActivityDescription and SubjectID
TestandTrainingUnionSubsetGrouped <- group_by (TestandTrainingUnionSubset, ActivityDescription, SubjectID)
#Summarize the grouped set by applying the mean to all pre-selected measurements
TestandTrainingMeanSummary <- summarise_all(TestandTrainingUnionSubsetGrouped, funs(mean))
|
# Einbinden der Bibliotheken und falls nicht vorhanden, fehlende Packages installieren.
# zur Vorbereitung der Datensätze
if(!require(tidyr)){
install.packages("tidyr")
require(tidyr)
}
if(!require(plyr)){
install.packages("plyr")
require(plyr)
}
if(!require(dplyr)){
install.packages("dplyr")
require(dplyr)
}
if(!require(data.table)){
install.packages("data.table")
require(data.table)
}
# zur Erstellung der Shiny-Applikation
if(!require(shiny)){
install.packages("shiny")
require(shiny)
}
if(!require(leaflet)){
install.packages("leaflet")
require(leaflet)
}
if(!require(leaflet.extras)){
install.packages("leaflet.extras")
require(leaflet.extras)
}
if(!require(tidyverse)){
install.packages("tidyverse")
require(tidyverse)
}
if(!require(gplots)){
install.packages("gplots")
require(gplots)
}
paste("Shiny-App wird gebaut...")
# Einlesen des Datensatzes
data <- read.csv("Case_Study_Datensatz.csv", header = TRUE, sep = ",", dec = ",")
dataSelected <- select(data, c("Laengengrad", "Breitengrad", "Zulassungsdatum", "Zulassungstag", "Werksnummer_Fahrzeug", "ID_Fahrzeug", "Gemeinden"))
# Funktion zur Definition von Farben für den entsprechenden Autotyp. Diese werden in der Leaflet-Karte als Markerfarben genutzt.
getColor <- function(dataSelected) {
sapply(dataSelected$Werksnummer_Fahrzeug, function(Werksnummer_Fahrzeug) {
if(Werksnummer_Fahrzeug == 11) {
"black"
} else if(Werksnummer_Fahrzeug == 12) {
"blue"
}
})
}
# Auswahl eines Icons als Marker auf der Karte
icons <- awesomeIcons(
icon = 'ion-android-car',
iconColor = 'white',
library = 'ion',
markerColor = getColor(dataSelected)
)
# Frontend: Design der Shiny-Oberfläche
ui <- fluidPage(
titlePanel(title = "Zulassung der Autos"),
sidebarLayout(
sidebarPanel(
fluidRow(
column(
width = 12,
# Multiple Auswahlen zur Filterung nach Autotyp über Checkboxen möglich, standardmäßig wird alles ausgewählt.
checkboxGroupInput(
inputId = "kategorie",
label = "Autotyp:",
choices = list("Typ 11" = 11, "Typ 12" = 12),
selected = c(11, 12)
)
)
),
# Filterung mithilfe eines Schiebereglers nach Zulassungstag, standardmäßig wird der maximal verfügbare Zeitbereich aus dem Datensatz gewählt.
sliderInput(
inputId = "range",
label = HTML('Tag im März:'),
min = 1,
max = 31,
value = range(dataSelected$Zulassungstag, na.rm = TRUE),
step = 1
)
),
# Im main-Panel wird die Leaflet-Karte dargestellt. Dabei wird die Höhe dynamisch nach Fenstergröße reguliert (beste Darstellung im Browser).
mainPanel(
# Dynamische Kartengröße in Relation zur Fensterbreite und -höhe.
tags$style(type = "text/css", "#karte {height: calc(100vh - 120px) !important;}"),
title = "Karte",
leafletOutput(
outputId = "karte"
),
# Copyright
fluidRow(
HTML('<div style ="padding-left: 14px;" align = "left"><b>IDA Casestudy 2018 <span style="color:#428bca">© by Gruppe 6:</span></b><br />Jannis Brodmann, Timos Ioannou, Aron Rogmann, Lukas Wolff & Bobby Xiong</div>')
)
)
)
)
# Backend: Der Applikation zugrundeliegende Funktionen.
server <- function(input, output, session){
# Die aus den Filterkriterien gebildete Schnittmenge wird von einem reactive-Container umschlossen.
dataFiltered <- reactive({
dataSelected %>%
filter(Zulassungstag >= input$range[1] & Zulassungstag <= input$range[2] & Werksnummer_Fahrzeug %in% input$kategorie)
})
# Statischer Teil der Leaflet-Karte wird nur einmal gezeichnet.
output$karte <- renderLeaflet({
withProgress(
message = 'Bitte warten...',
detail = 'Die Anwendung lädt den .csv-Datensatz in Leaflet.', value = 0, {
for (i in 1:10) {
incProgress(1/10)
Sys.sleep(0.1)
}
}
)
leaflet(dataSelected) %>%
addProviderTiles("Stamen.TonerLite", group = "Simpel (Standard)", options = providerTileOptions(minZoom = 5, maxZoom = 13)) %>%
addTiles(group = "Open Street Map") %>%
# Fokus bzw. Zentrierung auf die Randkoordinaten von Deutschland (Datensatz)
fitBounds(
~min(Laengengrad)-1, ~min(Breitengrad)-1,
~max(Laengengrad)+1, ~max(Breitengrad)+1)
})
# Dynamischer Teil der Karte, Marker und weitere Objekte werden je nach Filtereinstellung resettet und aktualisiert.
observe({
withProgress(
message = 'Bitte warten...',
detail = 'Der Filter wird angewandt.', value = 0, {
for (i in 1:10) {
incProgress(1/10)
Sys.sleep(0.1)
}
}
)
leafletProxy("karte", data = dataFiltered()) %>%
setMaxBounds(
~min(Laengengrad)-1, ~min(Breitengrad)-1,
~max(Laengengrad)+1, ~max(Breitengrad)+1) %>%
# clear-Befehle als Reset vor jeder erneuten Filterveränderung
clearShapes() %>%
clearPopups() %>%
clearMarkers() %>%
clearMarkerClusters %>%
clearHeatmap %>%
# Einfügen einer Heatmap auf einer separaten Ebene (bzw. andere Gruppenzuordnung)
addHeatmap(lng = ~Laengengrad, lat = ~Breitengrad, max = .6, blur = 60, group = "Heatmap") %>%
# Verwendung von awesomeMarkers für eine individuelle, intuitive Darstellung von Markern bzw. Icons.
addAwesomeMarkers(
lng = ~Laengengrad,
lat = ~Breitengrad,
icon = icons,
# Popup, der alle erforderlichten, für den User relevanten Informationen gefiltert anzeigt.
popup = ~paste(
"<b>FahrzeugID: </b>", ID_Fahrzeug, "<br>",
"<b>Zulassungsdatum: </b>", Zulassungsdatum, "<br>",
"<b>Gemeinde: </b>", Gemeinden, "<br>",
"<b>Fahrzeugtyp: </b>", Werksnummer_Fahrzeug, "<br>"),
# Gruppierung der Marker in Cluster je nach Zoom-Level für eine bessere Übersicht.
clusterOptions = markerClusterOptions(),
group = "Detailliert"
) %>%
# Feinere Einstellungsmöglichkeiten, u.a. Kartenstil, überlagende Ebenen,
addLayersControl(
baseGroups = c("Simpel (Standard)", "Open Street Map"),
overlayGroups = c("Detailliert", "Heatmap"),
position = "bottomleft",
options = layersControlOptions(collapsed = TRUE)
) %>%
# Heatmap-Layer bei Start der Applikation ausblenden.
hideGroup(group = "Heatmap") %>%
# Kartenausschnittin Form einer Mini-Map mit aktueller Position.
addMiniMap(width = "80", height = "80", toggleDisplay = "TRUE", zoomAnimation = "TRUE", autoToggleDisplay = "TRUE", minimized = "FALSE")
})
}
# Aufruf der Shiny-Applikation
shinyApp(ui = ui, server = server)
|
/App_Case_Study.R
|
no_license
|
lwo101/QW_Case_Study
|
R
| false
| false
| 6,956
|
r
|
# Einbinden der Bibliotheken und falls nicht vorhanden, fehlende Packages installieren.
# zur Vorbereitung der Datensätze
if(!require(tidyr)){
install.packages("tidyr")
require(tidyr)
}
if(!require(plyr)){
install.packages("plyr")
require(plyr)
}
if(!require(dplyr)){
install.packages("dplyr")
require(dplyr)
}
if(!require(data.table)){
install.packages("data.table")
require(data.table)
}
# zur Erstellung der Shiny-Applikation
if(!require(shiny)){
install.packages("shiny")
require(shiny)
}
if(!require(leaflet)){
install.packages("leaflet")
require(leaflet)
}
if(!require(leaflet.extras)){
install.packages("leaflet.extras")
require(leaflet.extras)
}
if(!require(tidyverse)){
install.packages("tidyverse")
require(tidyverse)
}
if(!require(gplots)){
install.packages("gplots")
require(gplots)
}
paste("Shiny-App wird gebaut...")
# Einlesen des Datensatzes
data <- read.csv("Case_Study_Datensatz.csv", header = TRUE, sep = ",", dec = ",")
dataSelected <- select(data, c("Laengengrad", "Breitengrad", "Zulassungsdatum", "Zulassungstag", "Werksnummer_Fahrzeug", "ID_Fahrzeug", "Gemeinden"))
# Funktion zur Definition von Farben für den entsprechenden Autotyp. Diese werden in der Leaflet-Karte als Markerfarben genutzt.
getColor <- function(dataSelected) {
sapply(dataSelected$Werksnummer_Fahrzeug, function(Werksnummer_Fahrzeug) {
if(Werksnummer_Fahrzeug == 11) {
"black"
} else if(Werksnummer_Fahrzeug == 12) {
"blue"
}
})
}
# Auswahl eines Icons als Marker auf der Karte
icons <- awesomeIcons(
icon = 'ion-android-car',
iconColor = 'white',
library = 'ion',
markerColor = getColor(dataSelected)
)
# Frontend: Design der Shiny-Oberfläche
ui <- fluidPage(
titlePanel(title = "Zulassung der Autos"),
sidebarLayout(
sidebarPanel(
fluidRow(
column(
width = 12,
# Multiple Auswahlen zur Filterung nach Autotyp über Checkboxen möglich, standardmäßig wird alles ausgewählt.
checkboxGroupInput(
inputId = "kategorie",
label = "Autotyp:",
choices = list("Typ 11" = 11, "Typ 12" = 12),
selected = c(11, 12)
)
)
),
# Filterung mithilfe eines Schiebereglers nach Zulassungstag, standardmäßig wird der maximal verfügbare Zeitbereich aus dem Datensatz gewählt.
sliderInput(
inputId = "range",
label = HTML('Tag im März:'),
min = 1,
max = 31,
value = range(dataSelected$Zulassungstag, na.rm = TRUE),
step = 1
)
),
# Im main-Panel wird die Leaflet-Karte dargestellt. Dabei wird die Höhe dynamisch nach Fenstergröße reguliert (beste Darstellung im Browser).
mainPanel(
# Dynamische Kartengröße in Relation zur Fensterbreite und -höhe.
tags$style(type = "text/css", "#karte {height: calc(100vh - 120px) !important;}"),
title = "Karte",
leafletOutput(
outputId = "karte"
),
# Copyright
fluidRow(
HTML('<div style ="padding-left: 14px;" align = "left"><b>IDA Casestudy 2018 <span style="color:#428bca">© by Gruppe 6:</span></b><br />Jannis Brodmann, Timos Ioannou, Aron Rogmann, Lukas Wolff & Bobby Xiong</div>')
)
)
)
)
# Backend: Der Applikation zugrundeliegende Funktionen.
server <- function(input, output, session){
# Die aus den Filterkriterien gebildete Schnittmenge wird von einem reactive-Container umschlossen.
dataFiltered <- reactive({
dataSelected %>%
filter(Zulassungstag >= input$range[1] & Zulassungstag <= input$range[2] & Werksnummer_Fahrzeug %in% input$kategorie)
})
# Statischer Teil der Leaflet-Karte wird nur einmal gezeichnet.
output$karte <- renderLeaflet({
withProgress(
message = 'Bitte warten...',
detail = 'Die Anwendung lädt den .csv-Datensatz in Leaflet.', value = 0, {
for (i in 1:10) {
incProgress(1/10)
Sys.sleep(0.1)
}
}
)
leaflet(dataSelected) %>%
addProviderTiles("Stamen.TonerLite", group = "Simpel (Standard)", options = providerTileOptions(minZoom = 5, maxZoom = 13)) %>%
addTiles(group = "Open Street Map") %>%
# Fokus bzw. Zentrierung auf die Randkoordinaten von Deutschland (Datensatz)
fitBounds(
~min(Laengengrad)-1, ~min(Breitengrad)-1,
~max(Laengengrad)+1, ~max(Breitengrad)+1)
})
# Dynamischer Teil der Karte, Marker und weitere Objekte werden je nach Filtereinstellung resettet und aktualisiert.
observe({
withProgress(
message = 'Bitte warten...',
detail = 'Der Filter wird angewandt.', value = 0, {
for (i in 1:10) {
incProgress(1/10)
Sys.sleep(0.1)
}
}
)
leafletProxy("karte", data = dataFiltered()) %>%
setMaxBounds(
~min(Laengengrad)-1, ~min(Breitengrad)-1,
~max(Laengengrad)+1, ~max(Breitengrad)+1) %>%
# clear-Befehle als Reset vor jeder erneuten Filterveränderung
clearShapes() %>%
clearPopups() %>%
clearMarkers() %>%
clearMarkerClusters %>%
clearHeatmap %>%
# Einfügen einer Heatmap auf einer separaten Ebene (bzw. andere Gruppenzuordnung)
addHeatmap(lng = ~Laengengrad, lat = ~Breitengrad, max = .6, blur = 60, group = "Heatmap") %>%
# Verwendung von awesomeMarkers für eine individuelle, intuitive Darstellung von Markern bzw. Icons.
addAwesomeMarkers(
lng = ~Laengengrad,
lat = ~Breitengrad,
icon = icons,
# Popup, der alle erforderlichten, für den User relevanten Informationen gefiltert anzeigt.
popup = ~paste(
"<b>FahrzeugID: </b>", ID_Fahrzeug, "<br>",
"<b>Zulassungsdatum: </b>", Zulassungsdatum, "<br>",
"<b>Gemeinde: </b>", Gemeinden, "<br>",
"<b>Fahrzeugtyp: </b>", Werksnummer_Fahrzeug, "<br>"),
# Gruppierung der Marker in Cluster je nach Zoom-Level für eine bessere Übersicht.
clusterOptions = markerClusterOptions(),
group = "Detailliert"
) %>%
# Feinere Einstellungsmöglichkeiten, u.a. Kartenstil, überlagende Ebenen,
addLayersControl(
baseGroups = c("Simpel (Standard)", "Open Street Map"),
overlayGroups = c("Detailliert", "Heatmap"),
position = "bottomleft",
options = layersControlOptions(collapsed = TRUE)
) %>%
# Heatmap-Layer bei Start der Applikation ausblenden.
hideGroup(group = "Heatmap") %>%
# Kartenausschnittin Form einer Mini-Map mit aktueller Position.
addMiniMap(width = "80", height = "80", toggleDisplay = "TRUE", zoomAnimation = "TRUE", autoToggleDisplay = "TRUE", minimized = "FALSE")
})
}
# Aufruf der Shiny-Applikation
shinyApp(ui = ui, server = server)
|
library(plyr)
library(dplyr)
library(ggplot2)
# Data retrieval
NEI <- readRDS("summarySCC_PM25.rds")
SCCTable <- readRDS("Source_Classification_Code.rds")
# Data extraction for Baltimore and Los Angeles
twocities<-subset(NEI, fips %in% c('24510','06037'))
# !!! 'Motor vehicles' are considered here as on the road vehicle sources
motorveh_codes <- SCCTable[(grepl('Veh', SCCTable$Short.Name, ignore.case = TRUE)) &
(grepl('Onroad', SCCTable$Data.Category, ignore.case = TRUE)),1]
# !! 'motorveh_codes' is a vector of factors
# Transformation of the SCC column of the 'twocities' table into factors
# to allow for proper comparison with motorveh_codes
twocities$SCC <- as.factor(twocities$SCC)
# Extraction of Baltimore and Los Angeles data for onroad motor vehicles only
motorvehicle_emissions <- subset(twocities, SCC %in% motorveh_codes)
# Renaming of the 'fips' values: 24510 replaced by Baltimore and 06037 by Los Angeles
motorvehicle_emissions$City <- revalue(motorvehicle_emissions$fips,
c("24510"="Baltimore", "06037"="Los Angeles"))
# Transformation of 'City' into a factor vector (for better plot legend display)
motorvehicle_emissions$City <- as.factor(motorvehicle_emissions$City)
detach(package:plyr)
# Grouping both by City and by year
groups <-motorvehicle_emissions %.% regroup(list(quote(year), quote(City))) %.% summarise(PM25_Emissions=sum(Emissions, na.rm=TRUE))
# Sorting of results by City
sorted_groups<-groups[with(groups, order(City)), ]
#Extraction of all total PM2.5 emissions values
# And construction of the data frame (final_df)
# That will contain the percentage differences
# Over 3 years, for each city
PMEm<-c(sorted_groups$PM25_Emissions)
pct_dff<-vector()
rrange = c(2,3,4,6,7,8)
for (k in 2:4){
pct_dff[k-1] <- 100.*(PMEm[k]-PMEm[k-1])/PMEm[k-1]
# provides pct_dff[1 through 3]
}
for (k in 6:8){
pct_dff[k-2] <- 100.*(PMEm[k]-PMEm[k-1])/PMEm[k-1]
# provides pct_dff[4 through 6]
}
final_df <- data.frame(Intervals = c("1999-2002","2002-2005","2005-2008"),
City=c(rep("Baltimore",3),rep("Los Angeles",3)),
Percent_Diff=pct_dff)
install.packages("gridExtra")
library(grid)
library(gridExtra)
# Plotting of the graphs
png(filename = "plot6.png", width = 640, height = 480, units = "px")
p1 <- ggplot(motorvehicle_emissions, aes(factor(year), Emissions))+
stat_summary(fun.y = sum, geom = "bar", position="dodge")+
aes(fill = City)+
labs(y="Total PM2.5 Emissions (tons)",x="Years",
title="Total PM2.5 Emissions") + theme(legend.position="bottom")
p2 <- ggplot(final_df, aes(factor(Intervals), Percent_Diff))+
geom_bar(stat="identity", position="dodge")+
aes(fill = City)+
labs(y="% difference from beginning of intervals",x="Years",
title="Percent Difference") + theme(legend.position="bottom")
grid.arrange(p1, p2, ncol = 2, main = "Evolution of Absolute Values and Percentage Difference in PM2.5 Emissions\nFrom Motor Vehicles in Baltimore and Los Angeles from 1999 to 2008")
dev.off()
|
/plot6.R
|
no_license
|
alexandrateste/ExploratoryDataAnalysis
|
R
| false
| false
| 3,165
|
r
|
library(plyr)
library(dplyr)
library(ggplot2)
# Data retrieval
NEI <- readRDS("summarySCC_PM25.rds")
SCCTable <- readRDS("Source_Classification_Code.rds")
# Data extraction for Baltimore and Los Angeles
twocities<-subset(NEI, fips %in% c('24510','06037'))
# !!! 'Motor vehicles' are considered here as on the road vehicle sources
motorveh_codes <- SCCTable[(grepl('Veh', SCCTable$Short.Name, ignore.case = TRUE)) &
(grepl('Onroad', SCCTable$Data.Category, ignore.case = TRUE)),1]
# !! 'motorveh_codes' is a vector of factors
# Transformation of the SCC column of the 'twocities' table into factors
# to allow for proper comparison with motorveh_codes
twocities$SCC <- as.factor(twocities$SCC)
# Extraction of Baltimore and Los Angeles data for onroad motor vehicles only
motorvehicle_emissions <- subset(twocities, SCC %in% motorveh_codes)
# Renaming of the 'fips' values: 24510 replaced by Baltimore and 06037 by Los Angeles
motorvehicle_emissions$City <- revalue(motorvehicle_emissions$fips,
c("24510"="Baltimore", "06037"="Los Angeles"))
# Transformation of 'City' into a factor vector (for better plot legend display)
motorvehicle_emissions$City <- as.factor(motorvehicle_emissions$City)
detach(package:plyr)
# Grouping both by City and by year
groups <-motorvehicle_emissions %.% regroup(list(quote(year), quote(City))) %.% summarise(PM25_Emissions=sum(Emissions, na.rm=TRUE))
# Sorting of results by City
sorted_groups<-groups[with(groups, order(City)), ]
#Extraction of all total PM2.5 emissions values
# And construction of the data frame (final_df)
# That will contain the percentage differences
# Over 3 years, for each city
PMEm<-c(sorted_groups$PM25_Emissions)
pct_dff<-vector()
rrange = c(2,3,4,6,7,8)
for (k in 2:4){
pct_dff[k-1] <- 100.*(PMEm[k]-PMEm[k-1])/PMEm[k-1]
# provides pct_dff[1 through 3]
}
for (k in 6:8){
pct_dff[k-2] <- 100.*(PMEm[k]-PMEm[k-1])/PMEm[k-1]
# provides pct_dff[4 through 6]
}
final_df <- data.frame(Intervals = c("1999-2002","2002-2005","2005-2008"),
City=c(rep("Baltimore",3),rep("Los Angeles",3)),
Percent_Diff=pct_dff)
install.packages("gridExtra")
library(grid)
library(gridExtra)
# Plotting of the graphs
png(filename = "plot6.png", width = 640, height = 480, units = "px")
p1 <- ggplot(motorvehicle_emissions, aes(factor(year), Emissions))+
stat_summary(fun.y = sum, geom = "bar", position="dodge")+
aes(fill = City)+
labs(y="Total PM2.5 Emissions (tons)",x="Years",
title="Total PM2.5 Emissions") + theme(legend.position="bottom")
p2 <- ggplot(final_df, aes(factor(Intervals), Percent_Diff))+
geom_bar(stat="identity", position="dodge")+
aes(fill = City)+
labs(y="% difference from beginning of intervals",x="Years",
title="Percent Difference") + theme(legend.position="bottom")
grid.arrange(p1, p2, ncol = 2, main = "Evolution of Absolute Values and Percentage Difference in PM2.5 Emissions\nFrom Motor Vehicles in Baltimore and Los Angeles from 1999 to 2008")
dev.off()
|
#******************************************************************************
# NOTE: Comments are listed as sublevels of a main comment. They are used to *
# outline the processes used throughout this script and how they relate *
# to one another. *
# *
# EXAMPLE: # A Comment such as a main process (Top level) *
# # > This is a subprocess (Level 1) *
# # >> This is a subprocess of a subprocess (Level 2) *
# # A second main process (Top level) *
# # > This is a subprocess (Level 1) *
# # A Comment such as a third main process (Top level) *
# # > This is a subprocess (Level 1) *
# # >> This is a subprocess of a subprocess (Level 2) *
# ... etc. (Level N) *
# *
#******************************************************************************
# Load required libraries
library(data.table)
library(dplyr)
# Download and extract data
# > If not downloaded, download the data. If downloaded, move on.
if (!file.exists("data.zip")) {
message("Downloading data (≈60 Mb) from 'https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip'... Please wait")
download.file(url = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", dest = "data.zip", method = "curl")
} else {
message("Data already downloaded as data.zip")
}
# > If data is downloaded and not extracted, extract the ZIP archive. If extracted, move on.
if (file.exists("data.zip") & !file.exists("UCI HAR Dataset")) {
message("Extracting data... Please wait")
unzip("data.zip")
} else {
message("Data already extracted to ./UCI HAR Dataset/")
}
# Combining the Full Data Sets
# > Load Vectors (X), Activity Labels (y), Subject_IDs (_sub) and header information.
trainX <- fread("UCI HAR Dataset/train/X_train.txt")
trainy <- fread("UCI HAR Dataset/train/y_train.txt")
train_sub <- fread("UCI HAR Dataset/train/subject_train.txt")
testX <- fread("UCI HAR Dataset/test/X_test.txt")
testy <- fread("UCI HAR Dataset/test/y_test.txt")
test_sub <- fread("UCI HAR Dataset/test/subject_test.txt")
act_labs <- readLines("UCI HAR Dataset/activity_labels.txt")
headers <- readLines("UCI HAR Dataset/features.txt")
# > Combine X, y and _sub to form full data set
# >> Combine X, y and _sub of training set to test set individually using rbind().
comboX <- rbind(trainX,testX)
comboy <- rbind(trainy,testy) %>% mutate(V1=tstrsplit(act_labs," ")[[2]][V1])
combo_sub <- rbind(train_sub,test_sub)
# >> Combine the _sub, y and X columns using cbind()
data <- cbind(combo_sub,comboy,comboX)
# > Form headers, make headings more human readable, remove parethesis and substitute hyphens (-) for periods (.).
headers <- {
headers %>%
gsub("\\(","",.) %>%
gsub("\\)","",.) %>%
gsub("-",".",.) %>%
gsub(" t"," time",.) %>%
gsub(" f"," freq",.)
}
# > Name columns on data set. Numbers from original header list perserved to provide unique header names.
colnames(data) <- c("Subject_ID", "Activity", headers)
# Constructing the Final Data Set
# > List out required columns
names_mean <- grep("[Mm]ean", headers, value = TRUE)
names_std <- grep("[Ss]td", headers, value = TRUE)
# > Form final data set including columns, "Subject_ID" and "Activity"
final_data <- data %>% select(c("Subject_ID","Activity",names_mean,names_std))
# > Remove perservered column numbers from header names
final_headers <- {
tstrsplit(c(names_mean,names_std)," ")[[2]]
}
# > Set column names of "final_data"
colnames(final_data) <- c("Subject_ID","Activity",final_headers)
# Results
final_result <- group_by(final_data,Subject_ID,Activity) %>% summarise_each(funs(mean))
print(final_result)
# Clean up the environment only leaving the final data sets in the environment
message("Cleaning up...")
rm("act_labs","combo_sub","comboX","comboy","final_headers","headers","names_mean","names_std","test_sub","testX","testy","train_sub","trainX","trainy")
message("DONE!")
message("Data sets are stored on the following variables:")
message("data <<< Full data set")
message("final_data <<< Reduced data set of mean and standard deviation data")
message("final_result <<< Tidy final data set")
|
/run_analysis.R
|
no_license
|
ozzie787/Getting_and_Cleaning_Data
|
R
| false
| false
| 4,727
|
r
|
#******************************************************************************
# NOTE: Comments are listed as sublevels of a main comment. They are used to *
# outline the processes used throughout this script and how they relate *
# to one another. *
# *
# EXAMPLE: # A Comment such as a main process (Top level) *
# # > This is a subprocess (Level 1) *
# # >> This is a subprocess of a subprocess (Level 2) *
# # A second main process (Top level) *
# # > This is a subprocess (Level 1) *
# # A Comment such as a third main process (Top level) *
# # > This is a subprocess (Level 1) *
# # >> This is a subprocess of a subprocess (Level 2) *
# ... etc. (Level N) *
# *
#******************************************************************************
# Load required libraries
library(data.table)
library(dplyr)
# Download and extract data
# > If not downloaded, download the data. If downloaded, move on.
if (!file.exists("data.zip")) {
message("Downloading data (≈60 Mb) from 'https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip'... Please wait")
download.file(url = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", dest = "data.zip", method = "curl")
} else {
message("Data already downloaded as data.zip")
}
# > If data is downloaded and not extracted, extract the ZIP archive. If extracted, move on.
if (file.exists("data.zip") & !file.exists("UCI HAR Dataset")) {
message("Extracting data... Please wait")
unzip("data.zip")
} else {
message("Data already extracted to ./UCI HAR Dataset/")
}
# Combining the Full Data Sets
# > Load Vectors (X), Activity Labels (y), Subject_IDs (_sub) and header information.
trainX <- fread("UCI HAR Dataset/train/X_train.txt")
trainy <- fread("UCI HAR Dataset/train/y_train.txt")
train_sub <- fread("UCI HAR Dataset/train/subject_train.txt")
testX <- fread("UCI HAR Dataset/test/X_test.txt")
testy <- fread("UCI HAR Dataset/test/y_test.txt")
test_sub <- fread("UCI HAR Dataset/test/subject_test.txt")
act_labs <- readLines("UCI HAR Dataset/activity_labels.txt")
headers <- readLines("UCI HAR Dataset/features.txt")
# > Combine X, y and _sub to form full data set
# >> Combine X, y and _sub of training set to test set individually using rbind().
comboX <- rbind(trainX,testX)
comboy <- rbind(trainy,testy) %>% mutate(V1=tstrsplit(act_labs," ")[[2]][V1])
combo_sub <- rbind(train_sub,test_sub)
# >> Combine the _sub, y and X columns using cbind()
data <- cbind(combo_sub,comboy,comboX)
# > Form headers, make headings more human readable, remove parethesis and substitute hyphens (-) for periods (.).
headers <- {
headers %>%
gsub("\\(","",.) %>%
gsub("\\)","",.) %>%
gsub("-",".",.) %>%
gsub(" t"," time",.) %>%
gsub(" f"," freq",.)
}
# > Name columns on data set. Numbers from original header list perserved to provide unique header names.
colnames(data) <- c("Subject_ID", "Activity", headers)
# Constructing the Final Data Set
# > List out required columns
names_mean <- grep("[Mm]ean", headers, value = TRUE)
names_std <- grep("[Ss]td", headers, value = TRUE)
# > Form final data set including columns, "Subject_ID" and "Activity"
final_data <- data %>% select(c("Subject_ID","Activity",names_mean,names_std))
# > Remove perservered column numbers from header names
final_headers <- {
tstrsplit(c(names_mean,names_std)," ")[[2]]
}
# > Set column names of "final_data"
colnames(final_data) <- c("Subject_ID","Activity",final_headers)
# Results
final_result <- group_by(final_data,Subject_ID,Activity) %>% summarise_each(funs(mean))
print(final_result)
# Clean up the environment only leaving the final data sets in the environment
message("Cleaning up...")
rm("act_labs","combo_sub","comboX","comboy","final_headers","headers","names_mean","names_std","test_sub","testX","testy","train_sub","trainX","trainy")
message("DONE!")
message("Data sets are stored on the following variables:")
message("data <<< Full data set")
message("final_data <<< Reduced data set of mean and standard deviation data")
message("final_result <<< Tidy final data set")
|
#Extract groundwater chemistry data from GEUS JupiterXL database
#The database is downloaded from https://www.geus.dk/produkter-ydelser-og-faciliteter/data-og-kort/national-boringsdatabase-jupiter/adgang-til-data/
#The database is downloaded a MSSQL backup file (.bak). It is restored SQL server management studio.
#Load libraries
library(odbc)
library(tidyverse)
library(lubridate)
library(sf)
#Connect to local copy of GEUS JupiterXL database
con <- DBI::dbConnect(odbc::odbc(),
Driver = "SQL Server",
Server = "localhost\\SQLEXPRESS",
Database = "kenneth_martinsen_bio_ku_dk_14207319_pcjupiter_xl",
Trusted_Connection = "True")
#Get tables
tables <- DBI::dbListTables(con)
#Get list of compounds so they can be extracted later
compoundlist <- tbl(con, "COMPOUNDLIST") %>%
select(COMPOUNDNO, LONG_TEXT) %>%
collect()
#Search for relevant tables
grep("GRW*", tables, value = TRUE)
#Table with groundwater analysis
analysis <- tbl(con, "GRWCHEMANALYSIS") %>%
select(SAMPLEID, COMPOUNDNO, AMOUNT, UNIT)
#Table with sample info
sample <- tbl(con, "GRWCHEMSAMPLE") %>%
select(SAMPLEID, BOREHOLENO, TOP, BOTTOM, SAMPLEDATE)
#Table with compound info
compounds <- tbl(con, "COMPOUNDLIST") %>%
select(COMPOUNDNO, LONG_TEXT)
#Table with boreholde info
borehole <- tbl(con, "BOREHOLE") %>%
select(BOREHOLENO, XUTM32EUREF89, YUTM32EUREF89, ELEVATION, DRILLDEPTH)
#Join tables
all <- analysis %>%
left_join(sample) %>%
left_join(compounds) %>%
left_join(borehole)
#Extract and save alk and ph data
alk_ph <- all %>%
filter(COMPOUNDNO %in% c(41, 291)) %>%
collect()
#Save to file
saveRDS(alk_ph, paste0(getwd(), "/rawdata/grw_ph_alk.rds"))
#Close connection to database
DBI::dbDisconnect(con)
|
/groundwater_jupiter_xl.R
|
no_license
|
KennethTM/dk_freshwaters_budget
|
R
| false
| false
| 1,882
|
r
|
#Extract groundwater chemistry data from GEUS JupiterXL database
#The database is downloaded from https://www.geus.dk/produkter-ydelser-og-faciliteter/data-og-kort/national-boringsdatabase-jupiter/adgang-til-data/
#The database is downloaded a MSSQL backup file (.bak). It is restored SQL server management studio.
#Load libraries
library(odbc)
library(tidyverse)
library(lubridate)
library(sf)
#Connect to local copy of GEUS JupiterXL database
con <- DBI::dbConnect(odbc::odbc(),
Driver = "SQL Server",
Server = "localhost\\SQLEXPRESS",
Database = "kenneth_martinsen_bio_ku_dk_14207319_pcjupiter_xl",
Trusted_Connection = "True")
#Get tables
tables <- DBI::dbListTables(con)
#Get list of compounds so they can be extracted later
compoundlist <- tbl(con, "COMPOUNDLIST") %>%
select(COMPOUNDNO, LONG_TEXT) %>%
collect()
#Search for relevant tables
grep("GRW*", tables, value = TRUE)
#Table with groundwater analysis
analysis <- tbl(con, "GRWCHEMANALYSIS") %>%
select(SAMPLEID, COMPOUNDNO, AMOUNT, UNIT)
#Table with sample info
sample <- tbl(con, "GRWCHEMSAMPLE") %>%
select(SAMPLEID, BOREHOLENO, TOP, BOTTOM, SAMPLEDATE)
#Table with compound info
compounds <- tbl(con, "COMPOUNDLIST") %>%
select(COMPOUNDNO, LONG_TEXT)
#Table with boreholde info
borehole <- tbl(con, "BOREHOLE") %>%
select(BOREHOLENO, XUTM32EUREF89, YUTM32EUREF89, ELEVATION, DRILLDEPTH)
#Join tables
all <- analysis %>%
left_join(sample) %>%
left_join(compounds) %>%
left_join(borehole)
#Extract and save alk and ph data
alk_ph <- all %>%
filter(COMPOUNDNO %in% c(41, 291)) %>%
collect()
#Save to file
saveRDS(alk_ph, paste0(getwd(), "/rawdata/grw_ph_alk.rds"))
#Close connection to database
DBI::dbDisconnect(con)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lilypond.R
\name{lilypond}
\alias{lilypond}
\title{Save score to LilyPond file}
\usage{
lilypond(
score,
file,
key = "c",
time = "4/4",
tempo = "2 = 60",
header = NULL,
paper = NULL,
string_names = NULL,
endbar = "|.",
midi = TRUE,
colors = NULL,
crop_png = TRUE,
simplify = TRUE
)
}
\arguments{
\item{score}{a score object.}
\item{file}{character, LilyPond output file ending in \code{.ly}. May
include an absolute or relative path.}
\item{key}{character, key signature, e.g., \code{c}, \code{b_}, \code{f#m},
etc.}
\item{time}{character, defaults to \code{"4/4"}.}
\item{tempo}{character, defaults to \code{"2 = 60"}.}
\item{header}{a named list of arguments passed to the header of the
LilyPond file. See details.}
\item{paper}{a named list of arguments for the LilyPond file page layout.
See details.}
\item{string_names}{label strings at beginning of tab staff. \code{NULL}
(default) for non-standard tunings only, \code{TRUE} or \code{FALSE} for
force on or off completely.}
\item{endbar}{character, the global end bar.}
\item{midi}{logical, add midi inclusion specification to LilyPond file.}
\item{colors}{a named list of LilyPond element color overrides. See details.}
\item{crop_png}{logical, alter template for cropped height. See
details.}
\item{simplify}{logical, uses \code{simplify_phrase} to convert to simpler,
more efficient LilyPond syntax.}
}
\value{
nothing returned; a file is written.
}
\description{
Write a score to a LilyPond format (\code{.ly}) text file for later use by
LilyPond or subsequent editing outside of R.
}
\details{
This function only writes a LilyPond file to disk. It does not require a
LilyPond installation. It checks for the version number of an installation,
but LilyPond is not required to be found.
This function can be used directly but is commonly used by \code{render_*}
functions, which call this function internally to create the LilyPond file
and then call LilyPond to render that file to sheet music.
}
\section{Header options}{
All \code{header} list elements are character strings. The options for
\code{header} include:
\itemize{
\item \code{title}
\item \code{subtitle}
\item \code{composer}
\item \code{album}
\item \code{arranger}
\item \code{instrument}
\item \code{meter}
\item \code{opus}
\item \code{piece}
\item \code{poet}
\item \code{copyright}
\item \code{tagline}
}
}
\section{Paper options}{
All \code{paper} list elements are numeric except \code{page_numbers} and
\code{print_first_page_number},
which are logical. \code{page_numbers = FALSE} suppresses all page numbering.
When \code{page_numbers = TRUE}, you can set
\code{print_first_page_number = FALSE} to suppress printing of only the
first page number. \code{first_page_number} is the number of the first page,
defaulting to 1, and determines all subsequent page numbers. These arguments
correspond to LilyPond paper block variables.
The options for \code{paper} include the following and have the following
default values if not provided:
\itemize{
\item \code{textheight = 220}
\item \code{linewidth = 150}
\item \code{indent = 0}
\item \code{fontsize = 10}
\item \code{page_numbers = TRUE}
\item \code{print_first_page_number = TRUE}
\item \code{first_page_number = 1}
}
}
\section{PNG-related options}{
By default \code{crop_png = TRUE}. This alters the template so that when
the LilyPond output file is created, it contains specifications for cropping
the image to the content when that file is rendered by LilyPond to png.
The image will have its width and height automatically cropped
rather than retain the standard page dimensions.
This only applies to png outputs made from the LilyPond file, not pdf.
The argument is also ignored if explicitly providing \code{textheight} to
\code{paper}. You may still provide \code{linewidth} to \code{paper} if you
find you need to increase it beyond the default 150mm, generally as a result
of using a large \code{fontsize}.
Various \code{render_*} functions that wrap \code{lilypond} make use of this
argument as well.
}
\section{Color options}{
You can provide a named list of global color overrides for various sheet
music elements with the \code{colors} argument of \code{lilypond} or one of
the associated rendering functions.
By default, everything is black. Overrides are only inserted into the
generated LilyPond file if given. Values are character; either the hex color
or a named R color. The named list options include:
\itemize{
\item \code{color}
\item \code{background}
\item \code{staff}
\item \code{time}
\item \code{clef}
\item \code{bar}
\item \code{beam}
\item \code{head}
\item \code{stem}
\item \code{accidental}
\item \code{slur}
\item \code{tabhead}
\item \code{lyrics}
}
\code{color} is a global font color for the entire score. It affects staff
elements and \code{header} elements. It does not affect everything, e.g.,
page numbers.
\code{background} controls the background color of the entire page. Do not
use this if making a transparent background png with the \code{transparent}
argument available in the various \code{render_*} functions.
The other options are also global but override \code{color}. You can change
the color of elements broadly with \code{color} and then change the color of
specific elements using the other options.
There are currently some limitations. Specifically, if you provide any
\code{background} color override, most \code{header} elements will not
display.
}
\examples{
x <- phrase("c ec'g' ec'g'", "4 4 2", "5 432 432")
x <- track(x)
x <- score(x)
outfile <- file.path(tempdir(), "out.ly")
lilypond(x, outfile)
}
\seealso{
\code{\link{tab}}, \code{\link{render_chordchart}},
\code{\link{midily}}
}
|
/man/lilypond.Rd
|
permissive
|
owenjonesuob/tabr
|
R
| false
| true
| 5,830
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lilypond.R
\name{lilypond}
\alias{lilypond}
\title{Save score to LilyPond file}
\usage{
lilypond(
score,
file,
key = "c",
time = "4/4",
tempo = "2 = 60",
header = NULL,
paper = NULL,
string_names = NULL,
endbar = "|.",
midi = TRUE,
colors = NULL,
crop_png = TRUE,
simplify = TRUE
)
}
\arguments{
\item{score}{a score object.}
\item{file}{character, LilyPond output file ending in \code{.ly}. May
include an absolute or relative path.}
\item{key}{character, key signature, e.g., \code{c}, \code{b_}, \code{f#m},
etc.}
\item{time}{character, defaults to \code{"4/4"}.}
\item{tempo}{character, defaults to \code{"2 = 60"}.}
\item{header}{a named list of arguments passed to the header of the
LilyPond file. See details.}
\item{paper}{a named list of arguments for the LilyPond file page layout.
See details.}
\item{string_names}{label strings at beginning of tab staff. \code{NULL}
(default) for non-standard tunings only, \code{TRUE} or \code{FALSE} for
force on or off completely.}
\item{endbar}{character, the global end bar.}
\item{midi}{logical, add midi inclusion specification to LilyPond file.}
\item{colors}{a named list of LilyPond element color overrides. See details.}
\item{crop_png}{logical, alter template for cropped height. See
details.}
\item{simplify}{logical, uses \code{simplify_phrase} to convert to simpler,
more efficient LilyPond syntax.}
}
\value{
nothing returned; a file is written.
}
\description{
Write a score to a LilyPond format (\code{.ly}) text file for later use by
LilyPond or subsequent editing outside of R.
}
\details{
This function only writes a LilyPond file to disk. It does not require a
LilyPond installation. It checks for the version number of an installation,
but LilyPond is not required to be found.
This function can be used directly but is commonly used by \code{render_*}
functions, which call this function internally to create the LilyPond file
and then call LilyPond to render that file to sheet music.
}
\section{Header options}{
All \code{header} list elements are character strings. The options for
\code{header} include:
\itemize{
\item \code{title}
\item \code{subtitle}
\item \code{composer}
\item \code{album}
\item \code{arranger}
\item \code{instrument}
\item \code{meter}
\item \code{opus}
\item \code{piece}
\item \code{poet}
\item \code{copyright}
\item \code{tagline}
}
}
\section{Paper options}{
All \code{paper} list elements are numeric except \code{page_numbers} and
\code{print_first_page_number},
which are logical. \code{page_numbers = FALSE} suppresses all page numbering.
When \code{page_numbers = TRUE}, you can set
\code{print_first_page_number = FALSE} to suppress printing of only the
first page number. \code{first_page_number} is the number of the first page,
defaulting to 1, and determines all subsequent page numbers. These arguments
correspond to LilyPond paper block variables.
The options for \code{paper} include the following and have the following
default values if not provided:
\itemize{
\item \code{textheight = 220}
\item \code{linewidth = 150}
\item \code{indent = 0}
\item \code{fontsize = 10}
\item \code{page_numbers = TRUE}
\item \code{print_first_page_number = TRUE}
\item \code{first_page_number = 1}
}
}
\section{PNG-related options}{
By default \code{crop_png = TRUE}. This alters the template so that when
the LilyPond output file is created, it contains specifications for cropping
the image to the content when that file is rendered by LilyPond to png.
The image will have its width and height automatically cropped
rather than retain the standard page dimensions.
This only applies to png outputs made from the LilyPond file, not pdf.
The argument is also ignored if explicitly providing \code{textheight} to
\code{paper}. You may still provide \code{linewidth} to \code{paper} if you
find you need to increase it beyond the default 150mm, generally as a result
of using a large \code{fontsize}.
Various \code{render_*} functions that wrap \code{lilypond} make use of this
argument as well.
}
\section{Color options}{
You can provide a named list of global color overrides for various sheet
music elements with the \code{colors} argument of \code{lilypond} or one of
the associated rendering functions.
By default, everything is black. Overrides are only inserted into the
generated LilyPond file if given. Values are character; either the hex color
or a named R color. The named list options include:
\itemize{
\item \code{color}
\item \code{background}
\item \code{staff}
\item \code{time}
\item \code{clef}
\item \code{bar}
\item \code{beam}
\item \code{head}
\item \code{stem}
\item \code{accidental}
\item \code{slur}
\item \code{tabhead}
\item \code{lyrics}
}
\code{color} is a global font color for the entire score. It affects staff
elements and \code{header} elements. It does not affect everything, e.g.,
page numbers.
\code{background} controls the background color of the entire page. Do not
use this if making a transparent background png with the \code{transparent}
argument available in the various \code{render_*} functions.
The other options are also global but override \code{color}. You can change
the color of elements broadly with \code{color} and then change the color of
specific elements using the other options.
There are currently some limitations. Specifically, if you provide any
\code{background} color override, most \code{header} elements will not
display.
}
\examples{
x <- phrase("c ec'g' ec'g'", "4 4 2", "5 432 432")
x <- track(x)
x <- score(x)
outfile <- file.path(tempdir(), "out.ly")
lilypond(x, outfile)
}
\seealso{
\code{\link{tab}}, \code{\link{render_chordchart}},
\code{\link{midily}}
}
|
rm(list=ls())
meanHV <- NULL
nIterA <- c(1,250,500,1000)
nPoint <- 1000
# paretoSetSample <- matrix((randtoolbox::sobol(n = nPoint,dim = k,scrambling = 3))*(1:k)*2,nrow=k)
# paretoSetSample <- t(matrix((randtoolbox::sobol(k*nPoint))*(1:k)*2,ncol=k))
# paretoSetSample <- matrix((runif(k*nPoint)^50)*(1:k)*2,nrow=k)
#paretoSetSample <- matrix((runif(k*nPoint))*(1:k)*2,nrow=k)
paretoSetSample <- matrix(1:nPoint * (1/nPoint),nrow=1)*(1:k)*2 # wfg2
aa <- pracma::repmat(rep(0,nPoint),k-1,1)
paretoSetSample <- rbind(paretoSetSample,aa)
bb <- pracma::repmat(matrix(0.7*((k+1):nVar),ncol=1),1,nPoint)
paretoSetSample <- rbind(paretoSetSample,bb)
paretoFrontSample <- func(individual = paretoSetSample,nObj = 2,k = 20)
xx <- (0:1000)/1000
yy <- (1-xx*xx)^0.5
xx <- paretoFrontSample[1,order(paretoFrontSample[1,])]
yy <- paretoFrontSample[2,order(paretoFrontSample[1,])]
for (nIter in 250)
for(ID in c(3)){
sumHV <- 0
count <- 0
# instanceCount <- length(list.files(path = paste0("global_stepsize/WFG2/test",ID)))
for(instance in 1:10){
# print(paste(ID,instance))
load(paste0("WFG2/fixed_step/",nIter,"iter_ID",ID,"_instance",instance,".Rdata"))
sumHV <- sumHV + abc$hv_log
print(abc$hv_log)
count <- count + 1
plot(xx,yy,xlim=c(0,6),ylim=c(0,8),type="l")
points(abc$y)
title(paste(ID,instance))
}
meanHV <- append(meanHV,sumHV/count)
}
View(meanHV)
|
/CC_SMS_results/postprocess.R
|
no_license
|
dots26/ccoevolution
|
R
| false
| false
| 1,393
|
r
|
rm(list=ls())
meanHV <- NULL
nIterA <- c(1,250,500,1000)
nPoint <- 1000
# paretoSetSample <- matrix((randtoolbox::sobol(n = nPoint,dim = k,scrambling = 3))*(1:k)*2,nrow=k)
# paretoSetSample <- t(matrix((randtoolbox::sobol(k*nPoint))*(1:k)*2,ncol=k))
# paretoSetSample <- matrix((runif(k*nPoint)^50)*(1:k)*2,nrow=k)
#paretoSetSample <- matrix((runif(k*nPoint))*(1:k)*2,nrow=k)
paretoSetSample <- matrix(1:nPoint * (1/nPoint),nrow=1)*(1:k)*2 # wfg2
aa <- pracma::repmat(rep(0,nPoint),k-1,1)
paretoSetSample <- rbind(paretoSetSample,aa)
bb <- pracma::repmat(matrix(0.7*((k+1):nVar),ncol=1),1,nPoint)
paretoSetSample <- rbind(paretoSetSample,bb)
paretoFrontSample <- func(individual = paretoSetSample,nObj = 2,k = 20)
xx <- (0:1000)/1000
yy <- (1-xx*xx)^0.5
xx <- paretoFrontSample[1,order(paretoFrontSample[1,])]
yy <- paretoFrontSample[2,order(paretoFrontSample[1,])]
for (nIter in 250)
for(ID in c(3)){
sumHV <- 0
count <- 0
# instanceCount <- length(list.files(path = paste0("global_stepsize/WFG2/test",ID)))
for(instance in 1:10){
# print(paste(ID,instance))
load(paste0("WFG2/fixed_step/",nIter,"iter_ID",ID,"_instance",instance,".Rdata"))
sumHV <- sumHV + abc$hv_log
print(abc$hv_log)
count <- count + 1
plot(xx,yy,xlim=c(0,6),ylim=c(0,8),type="l")
points(abc$y)
title(paste(ID,instance))
}
meanHV <- append(meanHV,sumHV/count)
}
View(meanHV)
|
# Computes summary statistics for the HEALS and BEST study populations
library(scales)
heals = read.table(sprintf("lowef_med_%s_bn.txt",
"heals_meta"),header=TRUE,as.is=TRUE)
healssnp = read.table(sprintf("med_%s_bn.txt",
"heals_meta"),header=TRUE,as.is=TRUE)
best = read.table(sprintf("lowef_med_%s_bn.txt",
"best_meta"),header=TRUE,as.is=TRUE)
bestsnp = read.table(sprintf("med_%s_bn.txt",
"best_meta"),header=TRUE,as.is=TRUE)
for(i in 2:4){
names(healssnp)[i] = paste0('snp', i-1)
names(bestsnp)[i] = paste0('snp', i-1)
}
healssnp = healssnp[,1:4]
bestsnp = bestsnp[,1:4]
features = c("id","X10.104623578","X10.104795134","exm1580829","wt_score",
"recoded_bin_score","Sex","Age","bmi","cig_curt","cig_ever",
"cig_former","Education_year","lnDMA","DMA_pct","lnUrineCreat",
"lnUrineAs","WArsenic","UrAsgmCr")
best_features = c("id","X10.104623578","X10.104795134","exm1580829",
"wt_score","recoded_bin_score","Sex","Age","bmi",
"cig_curt","cig_ever","cig_former","Education_year",
"lnUrineCreat","lnUrineAs",'UrAsgmCr')
heals = heals[,features]
best = best[,best_features]
drops = c("exm1580829","wt_score",'recoded_bin_score')
heals_excl_snp3 = heals[,!(names(heals) %in% drops)]
best_excl_snp3 = best[,!(names(best) %in% drops)]
heals = merge(heals, healssnp, by="id")
best = merge(best, bestsnp, by="id")
heals = heals[complete.cases(heals_excl_snp3),]
best = best[complete.cases(best_excl_snp3),]
Male_heals = as.numeric(heals$Sex == 1)
Female_heals = as.numeric(heals$Sex == 2)
Male_best = as.numeric(best$Sex == 1)
Female_best = as.numeric(best$Sex == 2)
Cignever_heals = as.numeric(heals$cig_ever == 0)
Cignever_best = as.numeric(best$cig_ever == 0)
BMIless18pt5_heals = as.numeric(heals$bmi < 18.5)
BMIless18pt5_best = as.numeric(best$bmi < 18.5)
BMI18pt5to25_heals = as.numeric(heals$bmi >= 18.5 & heals$bmi < 25)
BMI18pt5to25_best = as.numeric(best$bmi >= 18.5 & best$bmi < 25)
BMIover25_heals = as.numeric(heals$bmi >= 25)
BMIover25_best = as.numeric(best$bmi >= 25)
snp1_0_heals = as.numeric(heals$snp1 == 0)
snp1_1_heals = as.numeric(heals$snp1 == 1)
snp1_2_heals = as.numeric(heals$snp1 == 2)
snp2_0_heals = as.numeric(heals$snp2 == 0)
snp2_1_heals = as.numeric(heals$snp2 == 1)
snp2_2_heals = as.numeric(heals$snp2 == 2)
snp3_0_heals = as.numeric(heals$snp3 == 0)
snp3_1_heals = as.numeric(heals$snp3 == 1)
snp3_2_heals = as.numeric(heals$snp3 == 2)
snp1_0_best = as.numeric(best$snp1 == 0)
snp1_1_best = as.numeric(best$snp1 == 1)
snp1_2_best = as.numeric(best$snp1 == 2)
snp2_0_best = as.numeric(best$snp2 == 0)
snp2_1_best = as.numeric(best$snp2 == 1)
snp2_2_best = as.numeric(best$snp2 == 2)
snp3_0_best = as.numeric(best$snp3 == 0)
snp3_1_best = as.numeric(best$snp3 == 1)
snp3_2_best = as.numeric(best$snp3 == 2)
Educ0_heals = as.numeric(heals$Education_year == 0)
Educ0_best = as.numeric(best$Education_year == 0)
range1to5_heals = heals$Education_year >= 1 & heals$Education_year <= 5
range1to5_best = best$Education_year >= 1 & best$Education_year <= 5
Educ1to5_heals = as.numeric(range1to5_heals)
Educ1to5_best = as.numeric(range1to5_best)
Educ6up_heals = as.numeric(heals$Education_year >= 6)
Educ6up_best = as.numeric(best$Education_year >= 6)
heals_ct = cbind(heals, Male_heals, Female_heals, Cignever_heals,
heals$cig_former, heals$cig_curt, BMIless18pt5_heals,
BMI18pt5to25_heals, BMIover25_heals, snp1_0_heals,
snp1_1_heals, snp1_2_heals, snp2_0_heals, snp2_1_heals,
snp2_2_heals, snp3_0_heals,snp3_1_heals,snp3_2_heals,
Educ0_heals, Educ1to5_heals, Educ6up_heals)
best_ct = cbind(best, Male_best, Female_best, Cignever_best,
best$cig_former, best$cig_curt, BMIless18pt5_best,
BMI18pt5to25_best, BMIover25_best, snp1_0_best, snp1_1_best,
snp1_2_best, snp2_0_best, snp2_1_best, snp2_2_best,
snp3_0_best,snp3_1_best,snp3_2_best, Educ0_best,
Educ1to5_best, Educ6up_best)
heals_ct = heals_ct[,22:dim(heals_ct)[2]]
heals_ct = colSums(heals_ct, na.rm = TRUE)
heals_perc = percent(heals_ct/dim(heals)[1])
best_ct = best_ct[,19:dim(best_ct)[2]]
best_ct = colSums(best_ct, na.rm = TRUE)
best_perc = percent(best_ct/dim(best)[1])
Age_heals = summary(heals$Age)
Age_best = summary(best$Age)
lnWAs_heals = summary(heals$WArsenic)
lnUrAsgmCr_heals = summary(heals$UrAsgmCr)
lnUrAsgmCr_best = summary(best$UrAsgmCr)
wt_score_heals = summary(heals$wt_score)
wt_score_best = summary(best$wt_score)
DMApct_heals = summary(heals$DMA_pct)
lnDMA_heals = summary(heals$lnDMA)
heals_ct = c(heals_ct, Age_heals[[3]], lnWAs_heals[[3]],
lnUrAsgmCr_heals[[3]], DMApct_heals[[3]],
lnDMA_heals[[3]], wt_score_heals[[3]])
# Note that slashes are used in IQRs instead of commas, due to CSV format.
heals_perc = c(heals_perc, sprintf("%s/%s",Age_heals[[2]], Age_heals[[5]]),
sprintf("%s/%s",lnWAs_heals[[2]], lnWAs_heals[[5]]),
sprintf("%s/%s",lnUrAsgmCr_heals[[2]],
lnUrAsgmCr_heals[[5]]),
sprintf("%s/%s", DMApct_heals[[2]], DMApct_heals[[5]]),
sprintf("%s/%s", lnDMA_heals[[2]], lnDMA_heals[[5]]),
sprintf("%s/%s", wt_score_heals[[2]], wt_score_heals[[5]]))
best_ct = c(best_ct, Age_best[[3]], NA, lnUrAsgmCr_best[[3]], NA, NA,
wt_score_best[[3]])
best_perc = c(best_perc, sprintf("%s/%s", Age_best[[2]], Age_best[[5]]), NA,
sprintf("%s/%s", lnUrAsgmCr_best[[2]], lnUrAsgmCr_best[[5]]),
NA, NA, sprintf("%s/%s", wt_score_best[[2]],
wt_score_best[[5]]))
test = rbind(heals_ct,heals_perc,best_ct,best_perc)
heals_final = paste(test[1,]," (",test[2,],")",sep="")
best_final = paste(test[3,]," (",test[4,],")",sep="")
data = rbind(test,heals_final, best_final)[5:6,]
data = t(data)
data = data[2:dim(data)[1],]
write.csv(data,file="describe.csv", quote=FALSE)
|
/descriptive_stats.R
|
no_license
|
digiovannia/arsenic-metabolism-MR
|
R
| false
| false
| 6,285
|
r
|
# Computes summary statistics for the HEALS and BEST study populations
library(scales)
heals = read.table(sprintf("lowef_med_%s_bn.txt",
"heals_meta"),header=TRUE,as.is=TRUE)
healssnp = read.table(sprintf("med_%s_bn.txt",
"heals_meta"),header=TRUE,as.is=TRUE)
best = read.table(sprintf("lowef_med_%s_bn.txt",
"best_meta"),header=TRUE,as.is=TRUE)
bestsnp = read.table(sprintf("med_%s_bn.txt",
"best_meta"),header=TRUE,as.is=TRUE)
for(i in 2:4){
names(healssnp)[i] = paste0('snp', i-1)
names(bestsnp)[i] = paste0('snp', i-1)
}
healssnp = healssnp[,1:4]
bestsnp = bestsnp[,1:4]
features = c("id","X10.104623578","X10.104795134","exm1580829","wt_score",
"recoded_bin_score","Sex","Age","bmi","cig_curt","cig_ever",
"cig_former","Education_year","lnDMA","DMA_pct","lnUrineCreat",
"lnUrineAs","WArsenic","UrAsgmCr")
best_features = c("id","X10.104623578","X10.104795134","exm1580829",
"wt_score","recoded_bin_score","Sex","Age","bmi",
"cig_curt","cig_ever","cig_former","Education_year",
"lnUrineCreat","lnUrineAs",'UrAsgmCr')
heals = heals[,features]
best = best[,best_features]
drops = c("exm1580829","wt_score",'recoded_bin_score')
heals_excl_snp3 = heals[,!(names(heals) %in% drops)]
best_excl_snp3 = best[,!(names(best) %in% drops)]
heals = merge(heals, healssnp, by="id")
best = merge(best, bestsnp, by="id")
heals = heals[complete.cases(heals_excl_snp3),]
best = best[complete.cases(best_excl_snp3),]
Male_heals = as.numeric(heals$Sex == 1)
Female_heals = as.numeric(heals$Sex == 2)
Male_best = as.numeric(best$Sex == 1)
Female_best = as.numeric(best$Sex == 2)
Cignever_heals = as.numeric(heals$cig_ever == 0)
Cignever_best = as.numeric(best$cig_ever == 0)
BMIless18pt5_heals = as.numeric(heals$bmi < 18.5)
BMIless18pt5_best = as.numeric(best$bmi < 18.5)
BMI18pt5to25_heals = as.numeric(heals$bmi >= 18.5 & heals$bmi < 25)
BMI18pt5to25_best = as.numeric(best$bmi >= 18.5 & best$bmi < 25)
BMIover25_heals = as.numeric(heals$bmi >= 25)
BMIover25_best = as.numeric(best$bmi >= 25)
snp1_0_heals = as.numeric(heals$snp1 == 0)
snp1_1_heals = as.numeric(heals$snp1 == 1)
snp1_2_heals = as.numeric(heals$snp1 == 2)
snp2_0_heals = as.numeric(heals$snp2 == 0)
snp2_1_heals = as.numeric(heals$snp2 == 1)
snp2_2_heals = as.numeric(heals$snp2 == 2)
snp3_0_heals = as.numeric(heals$snp3 == 0)
snp3_1_heals = as.numeric(heals$snp3 == 1)
snp3_2_heals = as.numeric(heals$snp3 == 2)
snp1_0_best = as.numeric(best$snp1 == 0)
snp1_1_best = as.numeric(best$snp1 == 1)
snp1_2_best = as.numeric(best$snp1 == 2)
snp2_0_best = as.numeric(best$snp2 == 0)
snp2_1_best = as.numeric(best$snp2 == 1)
snp2_2_best = as.numeric(best$snp2 == 2)
snp3_0_best = as.numeric(best$snp3 == 0)
snp3_1_best = as.numeric(best$snp3 == 1)
snp3_2_best = as.numeric(best$snp3 == 2)
Educ0_heals = as.numeric(heals$Education_year == 0)
Educ0_best = as.numeric(best$Education_year == 0)
range1to5_heals = heals$Education_year >= 1 & heals$Education_year <= 5
range1to5_best = best$Education_year >= 1 & best$Education_year <= 5
Educ1to5_heals = as.numeric(range1to5_heals)
Educ1to5_best = as.numeric(range1to5_best)
Educ6up_heals = as.numeric(heals$Education_year >= 6)
Educ6up_best = as.numeric(best$Education_year >= 6)
heals_ct = cbind(heals, Male_heals, Female_heals, Cignever_heals,
heals$cig_former, heals$cig_curt, BMIless18pt5_heals,
BMI18pt5to25_heals, BMIover25_heals, snp1_0_heals,
snp1_1_heals, snp1_2_heals, snp2_0_heals, snp2_1_heals,
snp2_2_heals, snp3_0_heals,snp3_1_heals,snp3_2_heals,
Educ0_heals, Educ1to5_heals, Educ6up_heals)
best_ct = cbind(best, Male_best, Female_best, Cignever_best,
best$cig_former, best$cig_curt, BMIless18pt5_best,
BMI18pt5to25_best, BMIover25_best, snp1_0_best, snp1_1_best,
snp1_2_best, snp2_0_best, snp2_1_best, snp2_2_best,
snp3_0_best,snp3_1_best,snp3_2_best, Educ0_best,
Educ1to5_best, Educ6up_best)
heals_ct = heals_ct[,22:dim(heals_ct)[2]]
heals_ct = colSums(heals_ct, na.rm = TRUE)
heals_perc = percent(heals_ct/dim(heals)[1])
best_ct = best_ct[,19:dim(best_ct)[2]]
best_ct = colSums(best_ct, na.rm = TRUE)
best_perc = percent(best_ct/dim(best)[1])
Age_heals = summary(heals$Age)
Age_best = summary(best$Age)
lnWAs_heals = summary(heals$WArsenic)
lnUrAsgmCr_heals = summary(heals$UrAsgmCr)
lnUrAsgmCr_best = summary(best$UrAsgmCr)
wt_score_heals = summary(heals$wt_score)
wt_score_best = summary(best$wt_score)
DMApct_heals = summary(heals$DMA_pct)
lnDMA_heals = summary(heals$lnDMA)
heals_ct = c(heals_ct, Age_heals[[3]], lnWAs_heals[[3]],
lnUrAsgmCr_heals[[3]], DMApct_heals[[3]],
lnDMA_heals[[3]], wt_score_heals[[3]])
# Note that slashes are used in IQRs instead of commas, due to CSV format.
heals_perc = c(heals_perc, sprintf("%s/%s",Age_heals[[2]], Age_heals[[5]]),
sprintf("%s/%s",lnWAs_heals[[2]], lnWAs_heals[[5]]),
sprintf("%s/%s",lnUrAsgmCr_heals[[2]],
lnUrAsgmCr_heals[[5]]),
sprintf("%s/%s", DMApct_heals[[2]], DMApct_heals[[5]]),
sprintf("%s/%s", lnDMA_heals[[2]], lnDMA_heals[[5]]),
sprintf("%s/%s", wt_score_heals[[2]], wt_score_heals[[5]]))
best_ct = c(best_ct, Age_best[[3]], NA, lnUrAsgmCr_best[[3]], NA, NA,
wt_score_best[[3]])
best_perc = c(best_perc, sprintf("%s/%s", Age_best[[2]], Age_best[[5]]), NA,
sprintf("%s/%s", lnUrAsgmCr_best[[2]], lnUrAsgmCr_best[[5]]),
NA, NA, sprintf("%s/%s", wt_score_best[[2]],
wt_score_best[[5]]))
test = rbind(heals_ct,heals_perc,best_ct,best_perc)
heals_final = paste(test[1,]," (",test[2,],")",sep="")
best_final = paste(test[3,]," (",test[4,],")",sep="")
data = rbind(test,heals_final, best_final)[5:6,]
data = t(data)
data = data[2:dim(data)[1],]
write.csv(data,file="describe.csv", quote=FALSE)
|
library(SNFtool)
library(cluster)
source('SimSilhouette.R')
source('getAffyMat.R')
setwd('~/SNF')
cancer <- c( "Breast", "Colon", "GBM" )
write("cancer\tK\talpah\tC\tsi", file = "SNF_2015022.txt", append = FALSE)
for( cname in cancer ) {
GFL <- read.csv( sprintf('%s/Beta-GFLasso-%s.csv', cname, cname), row.names=1 )
Lasso <- read.csv( sprintf('%s/Beta-Lasso-%s.csv', cname, cname), row.names=1 )
SGL <- read.csv( sprintf('%s/Beta-SGL-%s.csv', cname, cname), row.names=1 )
input <- list( GFL, Lasso, SGL )
names( input ) <- c( "GFL", "Lasso", "SGL" )
distMatrix <- list()
for( name in names(input) ) {
X <- t(as.matrix(input[[name]]))
SX <- standardNormalization( X ) # this function is to normalize data, not to normalize distance
distMatrix[[name]] <- dist2( X, X )
}
iter <- 20
for( K in 2:20 ) {
for( alpha in seq(0.3,0.8,by=0.1) ) {
for( C in 2:20 ) {
WS <- list()
for( name in names(distMatrix) ) {
D <- distMatrix[[name]]
WS[[name]] <- getAffyMat(D, K, alpha)
}
W <- SNF( WS, K, iter )
group <- spectralClustering( W, C )
si <- SimSilhouette( W, group )
result <- sprintf('%s\t%d\t%f\t%d\t%f', cname, K, alpha, C, mean(si) )
print(result)
write(result, file = "SNF_2015022.txt", append = TRUE)
}
}
}
}
|
/R-Code/initial_SNF_code.R
|
no_license
|
vrrani/IntegrativeRegressionNetwork
|
R
| false
| false
| 1,473
|
r
|
library(SNFtool)
library(cluster)
source('SimSilhouette.R')
source('getAffyMat.R')
setwd('~/SNF')
cancer <- c( "Breast", "Colon", "GBM" )
write("cancer\tK\talpah\tC\tsi", file = "SNF_2015022.txt", append = FALSE)
for( cname in cancer ) {
GFL <- read.csv( sprintf('%s/Beta-GFLasso-%s.csv', cname, cname), row.names=1 )
Lasso <- read.csv( sprintf('%s/Beta-Lasso-%s.csv', cname, cname), row.names=1 )
SGL <- read.csv( sprintf('%s/Beta-SGL-%s.csv', cname, cname), row.names=1 )
input <- list( GFL, Lasso, SGL )
names( input ) <- c( "GFL", "Lasso", "SGL" )
distMatrix <- list()
for( name in names(input) ) {
X <- t(as.matrix(input[[name]]))
SX <- standardNormalization( X ) # this function is to normalize data, not to normalize distance
distMatrix[[name]] <- dist2( X, X )
}
iter <- 20
for( K in 2:20 ) {
for( alpha in seq(0.3,0.8,by=0.1) ) {
for( C in 2:20 ) {
WS <- list()
for( name in names(distMatrix) ) {
D <- distMatrix[[name]]
WS[[name]] <- getAffyMat(D, K, alpha)
}
W <- SNF( WS, K, iter )
group <- spectralClustering( W, C )
si <- SimSilhouette( W, group )
result <- sprintf('%s\t%d\t%f\t%d\t%f', cname, K, alpha, C, mean(si) )
print(result)
write(result, file = "SNF_2015022.txt", append = TRUE)
}
}
}
}
|
# hello world
# 1. red
# 2.
# 3.
# 4. NA
|
/Tian.R
|
no_license
|
islangrock/Github_Practice
|
R
| false
| false
| 42
|
r
|
# hello world
# 1. red
# 2.
# 3.
# 4. NA
|
#####################
##################### outputing paper graphics
library("RPToolsDB")
library(shiny)
library(DT)
library("RPPlotUtils")
library(dplyr)
user = 'sduprey'
# JIRA Code (e.g. NAR-#)
JIRACode = 'NAR-326'
repoPath = RP_GetSharedPath(user)
# Input Data Path
inputDataPath = paste(repoPath,'InputData/', user,'/',JIRACode,'/',sep="")
# Output Data Path
outputDataPath = paste(repoPath,'OutputData/', user,'/',JIRACode,'/',sep="")
source("./RCode/RP_BigData_EventStudy_Utilities.R")
################################
################################
################################
################################
################################
################################
################################ Plotting functions
RP_PlotCIInterval <- function(DataFrame, Title = "", FullExportingPath=NULL) {
yLabel <- NULL
if(Title == "Abnormal returns"){
yLabel <- "BPS cumultated return"
}
if(Title == "Abnormal volume" | Title == "Abnormal volatility"){
yLabel <- "Abnormal ratio"
}
g <- ggplot(DataFrame, aes(MINUTES, MEAN))+
geom_point()+
geom_line(data=DataFrame,size=1.2, color = "blue")+
geom_ribbon(data=DataFrame,aes(ymin=CI_LOW,ymax=CI_HIGH), color = "blue",alpha=0.25,fill="blue")+
xlab("Minute Lags") +
ylab(yLabel) +
ggtitle(Title)+
theme(title = element_text(size = 28, face = "bold")) +
theme(axis.text.x = element_text(size = 24)) +
theme(axis.title.x = element_text(size = 24)) +
theme(axis.text.y = element_text(size = 24)) +
theme(axis.title.y = element_text(size = 24)) +
theme(legend.position = c(0.9, 0.9), legend.box = "vertical",
legend.text = element_text(size = 22)) + theme(legend.position = "bottom",
legend.title = element_blank())
if(Title == "Statistical significance"){
g <- g + scale_y_continuous(labels = percent_format())
}
# theme(axis.text=element_text(size=16),
# axis.title=element_text(size=18,face="bold")) +
# theme(plot.title = element_text(size = 25, face = "bold"))
#
g <- g + geom_vline(aes(xintercept=0),colour = 'black', size = 1.5,linetype="dashed")
if (!is.null(FullExportingPath)){
RP_ExportPlot(g,FullExportingPath,"")
}
return(g)
}
outputGraphicsBestProfileCI <- function( data, Russell_version = "R1000", type = "RET"){
toplotDF <- NULL
MyTitle <- ""
if (type == "VOL"){
toplotDF<- data[,c("MINUTES","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH")]
colnames(toplotDF) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
MyTitle <- "Abnormal volatility"
# toplotDF <- toplotDF+0.8
# toplotDF$MINUTES <- toplotDF$MINUTES - 0.8
translatingFactor <- (1.2-min(toplotDF[,-1]))
toplotDF <- toplotDF+translatingFactor
toplotDF$MINUTES <- toplotDF$MINUTES - translatingFactor
}
if(type =="RET"){
toplotDF<- data[,c("MINUTES","RETS_CI_LOW","RETS","RETS_CI_HIGH")]
colnames(toplotDF) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
MyTitle <- "Abnormal returns"
}
if(type =="VOLU"){
toplotDF<- data[,c("MINUTES","VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH")]
colnames(toplotDF) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
MyTitle <- "Abnormal volume"
# toplotDF <- toplotDF+0.8
# toplotDF$MINUTES <- toplotDF$MINUTES - 0.8
translatingFactor <- (1.2-min(toplotDF[,-1]))
toplotDF <- toplotDF+translatingFactor
toplotDF$MINUTES <- toplotDF$MINUTES - translatingFactor
}
if(type =="STATS"){
toplotDF<- data[,c("MINUTES","SIGNIFICANCE_CI_LOW","SIGNIFICANCE","SIGNIFICANCE_CI_HIGH")]
colnames(toplotDF) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
MyTitle <- "Statistical significance"
}
g2 <- NULL
if(type == "STATS_COMBINED"){
toplotDF<- data[,c("MINUTES","SIGNIFICANCE(RANK)_CI_LOW","SIGNIFICANCE(RANK)","SIGNIFICANCE(RANK)_CI_HIGH","SIGNIFICANCE_CI_LOW","SIGNIFICANCE","SIGNIFICANCE_CI_HIGH")]
MyTitle <- "Statistical significance"
g2 <- RP_PlotCIIntervalSuperposedStats(DataFrame = toplotDF, Title = MyTitle, FullExportingPath = NULL)
} else {
g2 <- RP_PlotCIInterval(DataFrame = toplotDF, Title = MyTitle, FullExportingPath = NULL)
}
return(g2)
}
RP_PlotProfile <- function(rowProfile){
stats_sign <- colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))]
rets <- paste0("RET",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
ord_stats_sign <- paste0("ORD",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
vol_stats_sign <- paste0("VOLU",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
vola_stats_sign <- paste0("VOLA",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
stats_sign <- rowProfile[,stats_sign]
rets <- rowProfile[,rets]
colnames(rets) <- colnames(stats_sign)
ord_stats_sign <- rowProfile[,ord_stats_sign]
colnames(ord_stats_sign) <- colnames(stats_sign)
vol_stats_sign <- rowProfile[,vol_stats_sign]
colnames(vol_stats_sign) <- colnames(stats_sign)
vola_stats_sign <- rowProfile[,vola_stats_sign]
colnames(vola_stats_sign) <- colnames(stats_sign)
# dataframe <- as.data.frame(t(rbind(stats_sign,ord_stats_sign,vol_stats_sign,vola_stats_sign,rets)))
dataframe <- as.data.frame(t(rbind(stats_sign,ord_stats_sign,vol_stats_sign,vola_stats_sign,rets)))
colnames(dataframe) <- c("SIGNIFICANCE(RANK)_CI_LOW","SIGNIFICANCE(RANK)","SIGNIFICANCE(RANK)_CI_HIGH","SIGNIFICANCE_CI_LOW","SIGNIFICANCE","SIGNIFICANCE_CI_HIGH","VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH","RETS_CI_LOW","RETS","RETS_CI_HIGH")
dataframe$MINUTES <- as.numeric(colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
dataframe <- dataframe[,c("MINUTES","SIGNIFICANCE(RANK)_CI_LOW","SIGNIFICANCE(RANK)","SIGNIFICANCE(RANK)_CI_HIGH","SIGNIFICANCE_CI_LOW","SIGNIFICANCE","SIGNIFICANCE_CI_HIGH","VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH","RETS_CI_LOW","RETS","RETS_CI_HIGH")]
gret <- outputGraphicsBestProfileCI(dataframe,Russell_version = "R1000", type = "RET")
gvol <- outputGraphicsBestProfileCI(dataframe,Russell_version = "R1000", type = "VOL")
gvolu <- outputGraphicsBestProfileCI(dataframe,Russell_version = "R1000", type = "VOLU")
gstats <- outputGraphicsBestProfileCI(dataframe,Russell_version = "R1000", type = "STATS")
gstatscombi <- outputGraphicsBestProfileCI(dataframe,Russell_version = "R1000", type = "STATS_COMBINED")
return(list(gret=gret,gvol=gvol,gvolu=gvolu,gstats=gstats,gstatscombi=gstatscombi))
}
differentiateRets <- function(toDF){
newToDF <- toDF
for (i in 2:dim(toDF)[2]){
newToDF[,i] <- toDF[,i] - toDF[,(i-1)]
}
newToDF[,1] <- newToDF[,2]
return(newToDF)
}
PlotDataFrame <- function (DataFrame, XLab = "", YLab = "", Title = "", AxisIncluded = FALSE,
byRows = FALSE, spread = FALSE, pointOnly = FALSE, lineOnly = TRUE, percent=FALSE,
FullExportingPath = NULL, ylim = FALSE)
{
if (byRows)
DataFrame = t(DataFrame)
if (is.null(colnames(DataFrame)))
colnames(DataFrame) <- paste0("COLUMN_", seq(1, dim(DataFrame)[2]))
if (class(DataFrame) == "matrix")
DataFrame <- as.data.frame(DataFrame)
if (AxisIncluded) {
my_column_to_plot_against <- colnames(DataFrame)[1]
}
else {
my_column_to_plot_against <- "TO_PLOT_AGAINST"
DataFrame$TO_PLOT_AGAINST <- as.numeric(seq(1, dim(DataFrame)[1]))
}
ToPlotDataFrame <- melt(DataFrame, my_column_to_plot_against)
ToPlotDataFrame <- ToPlotDataFrame[(ToPlotDataFrame$variable != "ABNORMAL_THRESHOLD"),]
ToPlotDataFrame <- ToPlotDataFrame[(ToPlotDataFrame$variable != "threshold"),]
g <- ggplot(ToPlotDataFrame, aes(x = ToPlotDataFrame[, c(my_column_to_plot_against)],
y = value, group = variable, color = variable, fill = variable))
if (pointOnly) {
g <- g + geom_point()
}
else if (lineOnly) {
g <- g + geom_line()
}
else {
g <- g + geom_line() + geom_point()
}
if ("threshold" %in% colnames(DataFrame)){
g <- g + geom_hline(aes(yintercept=0.95),colour = 'black', size = 1.5,linetype="dashed")
}
if ("ABNORMAL_THRESHOLD" %in% colnames(DataFrame)){
g <- g + geom_hline(aes(yintercept=0.95),colour = 'black', size = 1.5,linetype="dashed")
}
g <- g + xlab(XLab) + ylab(YLab) + ggtitle(Title) + theme(title = element_text(size = 16,
face = "bold")) + theme(axis.text.x = element_text(size = 14)) +
theme(legend.position = c(0.9, 0.9), legend.box = "vertical",
legend.text = element_text(size = 16)) + theme(legend.position = "bottom",
legend.title = element_blank())+theme(axis.text=element_text(size=14),
axis.title=element_text(size=16,face="bold"))
if (spread)
g <- g + facet_wrap(~variable)
if (percent){
g <- g + scale_y_continuous(labels = percent_format(),limits = c(-0, 1))
}
if (percent & ylim){
g <- g + scale_y_continuous(labels = percent_format(),limits = c(0.5, 0.9))
}
if ("DATE" == my_column_to_plot_against)
g <- g + scale_x_date()
if (!is.null(FullExportingPath))
RP_ExportPlot(g, FullExportingPath, "")
return(g)
}
outputGraphicsBestProfileStats <- function(product_criteria,aggregate_criteria,sentiment_criteria,similarity_gap_filter,ens_filter,event_number_event_filtering, gics_sector, my_event, localSource, dataFrame, plotInArborescence, Russell_version = "R1000", Together = FALSE, ylim = FALSE){
if(Together){
dataframestats <- dataFrame[,c("MINUTES","RDBA_COR_STATS_SIGN","RDBA_ORD_STATS_SIGN","RPNA_COR_STATS_SIGN","RPNA_ORD_STATS_SIGN")]
colnames(dataframestats) <- c("MINUTES","RDBA_SIGNIFICANCE(RANK)","RDBA_SIGNIFICANCE","RPNA_SIGNIFICANCE(RANK)","RPNA_SIGNIFICANCE")
} else {
dataframestats <- dataFrame[,c("MINUTES","COR_STATS_SIGN","ORD_STATS_SIGN")]
colnames(dataframestats) <- c("MINUTES","SIGNIFICANCE(RANK)","SIGNIFICANCE")
}
significance_threshold <- 1 - 0.05
dataframestats$ABNORMAL_THRESHOLD <- significance_threshold
dataframestats[is.na(dataframestats)] <- 0
g1 <- PlotDataFrame(dataframestats,AxisIncluded = T,XLab = "Minute Lags",YLab = "p-value",percent= TRUE, Title = paste0(my_event," statistical significance"), FullExportingPath = NULL, ylim = ylim)
# g2 <- RP_PlotDataFrame(dataframerets,AxisIncluded = T,XLab = "Minute Lags",YLab = "BPS cumulated minute returns",Title = my_event, FullExportingPath = NULL)
# g <- RP_ExportMultiplePlot(g1,g2, plotlist = NULL, filename = NULL, outputDataPath = NULL, cols = 1, width = 10, height = 15)
return(g1)
}
outputGraphicsBestProfileRets <- function(product_criteria,aggregate_criteria,sentiment_criteria,similarity_gap_filter,ens_filter,event_number_event_filtering, gics_sector, my_event, localSource, dataFrame, plotInArborescence, Russell_version = "R1000", Together = FALSE){
if(Together){
dataframerets <- dataFrame[,c("MINUTES","RDBA_RETS","RPNA_RETS")]
colnames(dataframerets) <- c("MINUTES","RDBA_RETURNS","RPNA_RETURNS")
# dataframerets$RDBA_RETURNS <- 10*dataframerets$RDBA_RETURNS
# dataframerets$RPNA_RETURNS <- 10*dataframerets$RPNA_RETURNS
} else {
dataframerets <- dataFrame[,c("MINUTES","RETS")]
colnames(dataframerets) <- c("MINUTES","RETURNS")
# dataframerets$RETURNS <- 10*dataframerets$RETURNS
}
# g1 <- RP_PlotDataFrame(dataframestats,AxisIncluded = T,XLab = "Minute Lags",YLab = "p-value",Title = my_event, FullExportingPath = NULL)
g2 <- PlotDataFrame(dataframerets,AxisIncluded = T,XLab = "Minute Lags",YLab = "BPS cumulated minute returns",Title = paste0(my_event," returns"), FullExportingPath = NULL)
# g <- RP_ExportMultiplePlot(g1,g2, plotlist = NULL, filename = NULL, outputDataPath = NULL, cols = 1, width = 10, height = 15)
return(g2)
}
outputGraphicsBestProfileVol <- function(product_criteria,aggregate_criteria,sentiment_criteria,similarity_gap_filter,ens_filter,event_number_event_filtering, gics_sector, my_event, localSource, dataFrame, plotInArborescence, Russell_version = "R1000", Together = FALSE){
if(Together){
dataframevol<- dataFrame[,c("MINUTES","RDBA_VOLUME","RPNA_VOLUME")]
} else {
dataframevol<- dataFrame[,c("MINUTES","VOLUME")]
}
# g1 <- RP_PlotDataFrame(dataframestats,AxisIncluded = T,XLab = "Minute Lags",YLab = "p-value",Title = my_event, FullExportingPath = NULL)
g2 <- PlotDataFrame(dataframevol,AxisIncluded = T,XLab = "Minute Lags",YLab = "Volume in billion dollars ",Title = paste0(my_event," abnormal volume"), FullExportingPath = NULL)
# g <- RP_ExportMultiplePlot(g1,g2, plotlist = NULL, filename = NULL, outputDataPath = NULL, cols = 1, width = 10, height = 15)
return(g2)
}
outputGraphicsBestProfileVola <- function(product_criteria,aggregate_criteria,sentiment_criteria,similarity_gap_filter,ens_filter,event_number_event_filtering, gics_sector, my_event, localSource, dataFrame, plotInArborescence, Russell_version = "R1000", Together = FALSE){
if(Together){
dataframervola<- dataFrame[,c("MINUTES","RDBA_VOLATILITY","RPNA_VOLATILITY")]
} else {
dataframervola<- dataFrame[,c("MINUTES","VOLATILITY")]
}
g2 <- PlotDataFrame(dataframervola,AxisIncluded = T,XLab = "Minute Lags",YLab = "Abnormal volatility ratio",Title = paste0(my_event," abnormal volatility"), FullExportingPath = NULL)
return(g2)
}
outputTogetherGraphicsBestProfileStats <- function(event_one, event_two, dataFrame, plotInArborescence, Russell_version = "R1000", Together = FALSE){
if(Together){
dataframestats <- dataFrame[,c("MINUTES",paste0(event_one,c("COR_STATS_SIGN_1","ORD_STATS_SIGN_1")),paste0(event_two,c("COR_STATS_SIGN_2","ORD_STATS_SIGN_2")))]
colnames(dataframestats) <- c("MINUTES",paste0(event_one,c("_rank_1","_stat_1")),paste0(event_two,c("_rank_2","_stat_2")))
} else {
dataframestats <- dataFrame[,c("MINUTES","COR_STATS_SIGN","ORD_STATS_SIGN")]
colnames(dataframestats) <- c("MINUTES","SIGNIFICANCE(RANK)","SIGNIFICANCE")
}
significance_threshold <- 1 - 0.05
dataframestats$threshold <- significance_threshold
dataframestats[is.na(dataframestats)] <- 0
g1 <- PlotDataFrame(dataframestats,AxisIncluded = T,XLab = "Minute Lags",YLab = "p-value",percent= TRUE, Title = "Statistical Significance", FullExportingPath = NULL)
# g2 <- RP_PlotDataFrame(dataframerets,AxisIncluded = T,XLab = "Minute Lags",YLab = "BPS cumulated minute returns",Title = my_event, FullExportingPath = NULL)
# g <- RP_ExportMultiplePlot(g1,g2, plotlist = NULL, filename = NULL, outputDataPath = NULL, cols = 1, width = 10, height = 15)
return(g1)
}
outputTogetherGraphicsBestProfileRets <- function(event_one, event_two, dataFrame, plotInArborescence, Russell_version = "R1000", Together = FALSE){
if(Together){
dataframerets <- dataFrame[,c("MINUTES",paste0(event_one,"RETS_1"),paste0(event_two,"RETS_2"))]
colnames(dataframerets) <- c("MINUTES",paste0(event_one,"_return_1"),paste0(event_two,"_return_2"))
dataframerets[,c("MINUTES",paste0(event_one,"_return_1"),paste0(event_two,"_return_2"))] <- 10*dataframerets[,c("MINUTES",paste0(event_one,"_return_1"),paste0(event_two,"_return_2"))]
} else {
dataframerets <- dataFrame[,c("MINUTES","RETS")]
colnames(dataframerets) <- c("MINUTES","RETURNS")
dataframerets$RETURNS <- 10*dataframerets$RETURNS
}
# g1 <- RP_PlotDataFrame(dataframestats,AxisIncluded = T,XLab = "Minute Lags",YLab = "p-value",Title = my_event, FullExportingPath = NULL)
g2 <- PlotDataFrame(dataframerets,AxisIncluded = T,XLab = "Minute Lags",YLab = "BPS cumulated minute returns",Title = "Abnormal Returns", FullExportingPath = NULL)
# g <- RP_ExportMultiplePlot(g1,g2, plotlist = NULL, filename = NULL, outputDataPath = NULL, cols = 1, width = 10, height = 15)
return(g2)
}
outputTogetherGraphicsBestProfileVol <- function(event_one, event_two, dataFrame, plotInArborescence, Russell_version = "R1000", Together = FALSE){
if(Together){
dataframevol <- dataFrame[,c("MINUTES",paste0(event_one,"VOLUME_1"),paste0(event_two,"VOLUME_2"))]
colnames(dataframevol) <- c("MINUTES",paste0(event_one,"_volume_1"),paste0(event_two,"_volume_2"))
} else {
dataframevol<- dataFrame[,c("MINUTES","VOLUME")]
}
# g1 <- RP_PlotDataFrame(dataframestats,AxisIncluded = T,XLab = "Minute Lags",YLab = "p-value",Title = my_event, FullExportingPath = NULL)
g2 <- PlotDataFrame(dataframevol,AxisIncluded = T,XLab = "Minute Lags",YLab = "Volume in billion dollars ",Title = "Abnormal Volume", FullExportingPath = NULL)
# g <- RP_ExportMultiplePlot(g1,g2, plotlist = NULL, filename = NULL, outputDataPath = NULL, cols = 1, width = 10, height = 15)
return(g2)
}
outputTogetherGraphicsBestProfileVola <- function(event_one, event_two, dataFrame, plotInArborescence, Russell_version = "R1000", Together = FALSE){
if(Together){
dataframervola <- dataFrame[,c("MINUTES",paste0(event_one,"VOLATILITY_1"),paste0(event_two,"VOLATILITY_2"))]
colnames(dataframervola) <- c("MINUTES",paste0(event_one,"_volatility_1"),paste0(event_two,"_volatility_2"))
} else {
dataframervola<- dataFrame[,c("MINUTES","VOLATILITY")]
}
g2 <- PlotDataFrame(dataframervola,AxisIncluded = T,XLab = "Minute Lags",YLab = "Abnormal volatility ratio",Title = "Abnormal Volatility", FullExportingPath = NULL)
return(g2)
}
ExportMultiplot <- function (..., plotlist = NULL, filename, outputDataPath, cols = 1,
width, height, layout = NULL)
{
filename <- paste(outputDataPath, filename, ".png", sep = "")
png(file = filename,
width = width * 100, height = height * 100)
library(grid)
plots <- c(list(...), plotlist)
numPlots = length(plots)
if (is.null(layout)) {
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots == 1) {
print(plots[[1]])
}
else {
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout),
ncol(layout))))
for (i in 1:numPlots) {
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
dev.off()
return(filename)
}
getPlotBestProfiles <- function(aggregate_criteria, data, all_group_events, all_category_events,suffix){
plotLimit <- 6
if (aggregate_criteria == "GROUP"){
plotLimit <- 6
all_events <- all_group_events
} else {
plotLimit <- 16
all_events <- all_category_events
}
for (i in 1:min(plotLimit, dim(data)[1])){
rowProfile <- data[i,]
stats_sign <- colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))]
rets <- paste0("RET",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
ord_stats_sign <- paste0("ORD",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
vol_stats_sign <- paste0("VOLU",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
vola_stats_sign <- paste0("VOLA",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
stats_sign <- rowProfile[,stats_sign]
rets <- rowProfile[,rets]
colnames(rets) <- colnames(stats_sign)
ord_stats_sign <- rowProfile[,ord_stats_sign]
colnames(ord_stats_sign) <- colnames(stats_sign)
vol_stats_sign <- rowProfile[,vol_stats_sign]
colnames(vol_stats_sign) <- colnames(stats_sign)
vola_stats_sign <- rowProfile[,vola_stats_sign]
colnames(vola_stats_sign) <- colnames(stats_sign)
dataframe <- as.data.frame(t(rbind(stats_sign,ord_stats_sign,vol_stats_sign,vola_stats_sign,rets)))
colnames(dataframe) <- c("COR_STATS_SIGN","ORD_STATS_SIGN","VOLUME","VOLATILITY","RETS")
dataframe$MINUTES <- as.numeric(colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
dataframe <- dataframe[,c("MINUTES","COR_STATS_SIGN","ORD_STATS_SIGN","VOLUME","VOLATILITY","RETS")]
gret <- outputGraphicsBestProfileRets(rowProfile$product_criteria,rowProfile$aggregate_criteria,rowProfile$sentiment_criteria,rowProfile$similarity_gap_filter,rowProfile$ens_filter,rowProfile$event_number_event_filtering, rowProfile$gics_sector, rowProfile$EVENT, rowProfile$localSource, dataFrame = dataframe, FALSE, Russell_version = "R1000")
gvol <- outputGraphicsBestProfileVola(rowProfile$product_criteria,rowProfile$aggregate_criteria,rowProfile$sentiment_criteria,rowProfile$similarity_gap_filter,rowProfile$ens_filter,rowProfile$event_number_event_filtering, rowProfile$gics_sector, rowProfile$EVENT, rowProfile$localSource, dataFrame = dataframe, FALSE, Russell_version = "R1000")
assign(paste0("gret", i), gret)
assign(paste0("gvol", i), gvol)
}
width=20
height=22.5
gname <- NULL
if (aggregate_criteria == "GROUP"){
gname <- ExportMultiplot(gret1,gvol1,gret2,gvol2,gret3,gvol3,gret4,gvol4,gret5,gvol5,plotlist = NULL, filename = paste0("bestGroups",suffix), outputDataPath = paste0(outputDataPath,"BEST_PICTURES/"), cols = 5, width = width, height = height)
} else {
gname <- ExportMultiplot(gret1,gvol1,
gret2,gvol2,
gret3,gvol3,
gret4,gvol4,
gret5,gvol5,
gret6,gvol6,
gret7,gvol7,
gret8,gvol8,
gret9,gvol9,
gret10,gvol10,
plotlist = NULL, filename = paste0("bestCategories",suffix), outputDataPath = paste0(outputDataPath,"BEST_PICTURES/"), cols = 5, width = width, height = height)
}
# print("multiple plot done")
return(gname)
#
# Counter <- 1
# Events <- NULL
# i <- 1
# fo
#
# while (Counter < plotLimit & i <= dim(data)[1]){
# rowProfile <- rpna_dataTotr1000[i,]
# stats_sign <- colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))]
# rets <- paste0("RET",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
# ord_stats_sign <- paste0("ORD",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
# vol_stats_sign <- paste0("VOLU",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
# vola_stats_sign <- paste0("VOLA",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
#
# stats_sign <- rowProfile[,stats_sign]
# rets <- rowProfile[,rets]
# colnames(rets) <- colnames(stats_sign)
# ord_stats_sign <- rowProfile[,ord_stats_sign]
# colnames(ord_stats_sign) <- colnames(stats_sign)
# vol_stats_sign <- rowProfile[,vol_stats_sign]
# colnames(vol_stats_sign) <- colnames(stats_sign)
# vola_stats_sign <- rowProfile[,vola_stats_sign]
# colnames(vola_stats_sign) <- colnames(stats_sign)
#
# dataframe <- as.data.frame(t(rbind(stats_sign,ord_stats_sign,vol_stats_sign,vola_stats_sign,rets)))
# colnames(dataframe) <- c("COR_STATS_SIGN","ORD_STATS_SIGN","VOLUME","VOLATILITY","RETS")
# dataframe$MINUTES <- as.numeric(colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
# dataframe <- dataframe[,c("MINUTES","COR_STATS_SIGN","ORD_STATS_SIGN","VOLUME","VOLATILITY","RETS")]
#
#
# gret <- outputGraphicsBestProfileRets(rowProfile$product_criteria,rowProfile$aggregate_criteria,rowProfile$sentiment_criteria,rowProfile$similarity_gap_filter,rowProfile$ens_filter,rowProfile$event_number_event_filtering, rowProfile$gics_sector, rowProfile$EVENT, rowProfile$localSource, dataFrame = dataframe, FALSE, Russell_version = "R1000")
#
# gvol <- outputGraphicsBestProfileVola(rowProfile$product_criteria,rowProfile$aggregate_criteria,rowProfile$sentiment_criteria,rowProfile$similarity_gap_filter,rowProfile$ens_filter,rowProfile$event_number_event_filtering, rowProfile$gics_sector, rowProfile$EVENT, rowProfile$localSource, dataFrame = dataframe, FALSE, Russell_version = "R1000")
#
# # eval.parent(expr = paste0("g", i, " <- g"))
#
# if((rowProfile$EVENT %in% all_events) & Counter < plotLimit){
# if (Counter == 1){
# assign(paste0("gret", Counter), gret)
# assign(paste0("gvol", Counter), gvol)
# Counter <- Counter+1
# Events <- c(Events,rowProfile$EVENT)
# } else if (!(rowProfile$EVENT %in% Events)){
# assign(paste0("gret", Counter), gret)
# assign(paste0("gvol", Counter), gvol)
# Counter <- Counter+1
# Events <- c(Events,rowProfile$EVENT)
# }
# }
#
# print(Counter)
# print(i)
# # width=15
# # height=9
# # gout <- RP_ExportMultiplePlot(g,g, plotlist = NULL, filename = "best", outputDataPath = paste0(outputDataPath,"BEST_PICTURES/"), cols = 2, width = width, height = height)
# # print(gout)
# print("profile displayed")
# i <- i+1
#
# }
#
# print("multiple plot aggregating done")
# width=15
# height=9
# g <- NULL
# if (aggregate_criteria == "GROUP"){
# g <- ExportMultiplot(gret1,gvol1,gret2,gvol2,gret3,gvol3,gret4,gvol4,gret5,gvol5,plotlist = NULL, filename = paste0("bestGroups"), outputDataPath = paste0(outputDataPath,"BEST_PICTURES/"), cols = 5, width = width, height = height)
# } else {
# g <- ExportMultiplot(gret1,gvol1,
# gret2,gvol2,
# gret3,gvol3,
# gret4,gvol4,
# gret5,gvol5,
# gret6,gvol6,
# gret7,gvol7,
# gret8,gvol8,
# gret9,gvol9,
# gret10,gvol10,
# plotlist = NULL, filename = paste0("bestCategories"), outputDataPath = paste0(outputDataPath,"BEST_PICTURES/"), cols = 5, width = width, height = height)
# }
# print("multiple plot done")
# return(g)
}
RP_PlotCIIntervalSuperposedStats <- function(DataFrame, Title = "", FullExportingPath=NULL) {
MyTitle <- ""
# toplotDFOne<- DataFrameOne[,c("MINUTES","SIGNIFICANCE_CI_LOW","SIGNIFICANCE","SIGNIFICANCE_CI_HIGH")]
# # colnames(toplotDFOne) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
# toplotDFTwo<- DataFrameTwo[,c("MINUTES","SIGNIFICANCE(RANK)_CI_LOW","SIGNIFICANCE(RANK)","SIGNIFICANCE(RANK)_CI_HIGH")]
# # colnames(toplotDFTwo) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
#
MyTitle <- "Statistical significance"
MyYLabel <- "Certainty"
#
# g <- ggplot(DataFrame)+
# geom_line(data=DataFrame, aes(x=MINUTES, y=FREQ_MEAN), size=1.2,show_guide = T)+#, colour="blue")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=FREQ_CI_LOW,ymax=FREQ_CI_HIGH),show_guide = F,alpha=0.25,colour="blue",fill="blue")+#, fill="steelblue1", color="steelblue1")+
# geom_line(data=DataFrame, aes(x=MINUTES, y=RET_MEAN), size=1.2,show_guide = T)+#, colour="red")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=RET_CI_LOW,ymax=RET_CI_HIGH),show_guide = F,alpha=0.25,colour="green",fill="green")+#, fill="steelblue2", color="steelblue3")+
# geom_line(data=DataFrame, aes(x=MINUTES, y=VOL_MEAN), size=1.2,show_guide = T)+#, colour="red")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=VOL_CI_LOW,ymax=VOL_CI_HIGH),show_guide = F,alpha=0.25,colour="red",fill="red")+#, fill="steelblue3", color="steelblue3")+
# xlab("Minute Lags") +
# ylab(MyYLabel) +
# ggtitle(MyTitle)+
# theme(title = element_text(size = 28, face = "bold")) +
# theme(axis.text.x = element_text(size = 24)) +
# theme(axis.title.x = element_text(size = 24)) +
# theme(axis.text.y = element_text(size = 24)) +
# theme(axis.title.y = element_text(size = 24)) +
# theme(legend.position = c(0.9, 0.9), legend.box = "vertical",
# legend.text = element_text(size = 22)) + theme(legend.position = "bottom",
# legend.title = element_blank())
# # theme(legend.position = "bottom")+
# # theme(axis.text=element_text(size=16),
# # axis.title=element_text(size=18,face="bold")) +
# # theme(plot.title = element_text(size = 25, face = "bold"))
# g <- g + geom_vline(aes(xintercept=0),colour = 'black', size = 1.5,linetype="dashed")
# g <- g + scale_colour_brewer(palette = "Greens")
# if (!is.null(FullExportingPath)){
# RP_ExportPlot(g,FullExportingPath,"")
# }
#
#
DataFrame <- melt(DataFrame,"MINUTES")
DataFrame$variable <- as.factor(DataFrame$variable)
g <- ggplot(DataFrame,aes(x=MINUTES, y=value, fill=variable, group=variable, linetype = variable,color = variable))+
# geom_line(aes(linetype=variable, color=variable), size=1.2,)+
geom_line(size=1.2,alpha=0.75)+
scale_linetype_manual(values=c("dashed","solid","dashed","dashed","solid","dashed"))+
scale_color_manual(values=c("blue","blue","blue","steelblue","steelblue","steelblue"))+
labs(color = "Metrics",linetype= "Metrics")+
labs(fill="",color = "Metrics",linetype= "Metrics")+
# scale_size(guide = "none")+
# scale_fill_discrete(breaks=c("FREQ_MEAN","RET_MEAN","VOL_MEAN"))+
# scale_fill_discrete(breaks=c("FREQ_MEAN","RET_MEAN","VOL_MEAN"), values=c("blue","blue","blue","green","green","green","red","red","red"))+
# scale_fill_manual(breaks=c("FREQ_MEAN","RET_MEAN","VOL_MEAN"), values=c("blue","green","red"))+
# scale_fill_manual(breaks=c("FREQ_MEAN","RET_MEAN","VOL_MEAN"),
# +
#, colour="blue")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=FREQ_CI_LOW,ymax=FREQ_CI_HIGH),show_guide = F,alpha=0.25,colour="blue",fill="blue")+#, fill="steelblue1", color="steelblue1")+
# geom_line(data=DataFrame, aes(x=MINUTES, y=RET_MEAN), size=1.2,show_guide = T)+#, colour="red")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=RET_CI_LOW,ymax=RET_CI_HIGH),show_guide = F,alpha=0.25,colour="green",fill="green")+#, fill="steelblue2", color="steelblue3")+
# geom_line(data=DataFrame, aes(x=MINUTES, y=VOL_MEAN), size=1.2,show_guide = T)+#, colour="red")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=VOL_CI_LOW,ymax=VOL_CI_HIGH),show_guide = F,alpha=0.25,colour="red",fill="red")+#, fill="steelblue3", color="steelblue3")+
xlab("Minute Lags") +
ylab(MyYLabel) +
# ggtitle(MyTitle)+
# theme(title = element_text(size = 28, face = "bold")) +
scale_x_continuous(breaks=c(-180,-120,-60,0,60,120,180))+
theme(axis.text.x = element_text(size = 30)) +
theme(axis.title.x = element_text(size = 30)) +
theme(axis.text.y = element_text(size = 30)) +
theme(axis.title.y = element_text(size = 30)) +
theme(legend.position = c(0.9, 0.9), legend.box = "vertical",
legend.text = element_text(size = 22)) + theme(legend.position = "bottom",
legend.title = element_blank())
# theme(legend.position = "bottom")+
# scale_colour_manual(name="Legend", values = c("a" = "black", "b" = "red", "c" = "blue","a" = "black", "b" = "red", "c" = "blue","a" = "black", "b" = "red", "c" = "blue")) +
# scale_linetype_manual(name="Legend", values = c("a" = "dashed", "b" = "dotted", "c" = "dotted")) +
# scale_colour_manual(name="Legend", values = c("FREQ_MEAN" = "blue", "RET_MEAN" = "green", "VOL_MEAN" = "red")) +
# scale_linetype_manual(name="Legend", values = c("FREQ_MEAN" = "dashed", "RET_MEAN" = "dotted", "VOL_MEAN" = "dotted")) +
# adjust the colours to those you wanted
# scale_colour_manual(values = c("black","red", "blue"))+
# stick the legend on the bottom
# guides(color=guide_legend("my title"))+
# theme( legend.position = "bottom")+
# theme(axis.text=element_text(size=16),
# axis.title=element_text(size=18,face="bold")) +
# theme(plot.title = element_text(size = 25, face = "bold"))
g <- g + geom_vline(aes(xintercept=0),colour = 'black', size = 1.5,linetype="dashed")
# g <- g + scale_colour_brewer(palette = "Greens")
# g <- ggplot(DataFrame)+
# geom_line(data=DataFrame, aes(x=MINUTES, y=FREQ_MEAN), size=1.2,show_guide = T)+#, colour="blue")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=FREQ_CI_LOW,ymax=FREQ_CI_HIGH),show_guide = F,alpha=0.25,colour="blue",fill="blue")+#, fill="steelblue1", color="steelblue1")+
# geom_line(data=DataFrame, aes(x=MINUTES, y=RET_MEAN), size=1.2,show_guide = T)+#, colour="red")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=RET_CI_LOW,ymax=RET_CI_HIGH),show_guide = F,alpha=0.25,colour="green",fill="green")+#, fill="steelblue2", color="steelblue3")+
# geom_line(data=DataFrame, aes(x=MINUTES, y=VOL_MEAN), size=1.2,show_guide = T)+#, colour="red")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=VOL_CI_LOW,ymax=VOL_CI_HIGH),show_guide = F,alpha=0.25,colour="red",fill="red")+#, fill="steelblue3", color="steelblue3")+
# xlab("Minute Lags") +
# ylab(MyYLabel) +
# ggtitle(MyTitle)+
# scale_colour_manual(name="Legend", values = c("FREQ_MEAN" = "blue", "RET_MEAN" = "green", "VOL_MEAN" = "red")) +
# # scale_linetype_manual(name="Legend", values = c("FREQ_MEAN" = "dashed", "RET_MEAN" = "dotted", "VOL_MEAN" = "dotted")) +
# # adjust the colours to those you wanted
# scale_colour_manual(values = c("black","red", "blue"))+
# # stick the legend on the bottom
# # theme( legend.position = "bottom")+
# theme(axis.text=element_text(size=16),
# axis.title=element_text(size=18,face="bold")) +
# theme(plot.title = element_text(size = 25, face = "bold"))
# g <- g + geom_vline(aes(xintercept=0),colour = 'black', size = 1.5,linetype="dashed")
# g <- g + scale_colour_brewer(palette = "Greens")
if (!is.null(FullExportingPath)){
RP_ExportPlot(g,FullExportingPath,"")
}
return(g)
}
RP_PlotProfileTogether <- function(rowProfilePos,rowProfileNeg){
stats_sign <- colnames(rowProfilePos)[which(!is.na(as.numeric(colnames(rowProfilePos))))]
rets <- paste0("RET",colnames(rowProfilePos)[which(!is.na(as.numeric(colnames(rowProfilePos))))])
ord_stats_sign <- paste0("ORD",colnames(rowProfilePos)[which(!is.na(as.numeric(colnames(rowProfilePos))))])
vol_stats_sign <- paste0("VOLU",colnames(rowProfilePos)[which(!is.na(as.numeric(colnames(rowProfilePos))))])
vola_stats_sign <- paste0("VOLA",colnames(rowProfilePos)[which(!is.na(as.numeric(colnames(rowProfilePos))))])
stats_sign <- rowProfilePos[,stats_sign]
rets <- rowProfilePos[,rets]
colnames(rets) <- colnames(stats_sign)
ord_stats_sign <- rowProfilePos[,ord_stats_sign]
colnames(ord_stats_sign) <- colnames(stats_sign)
vol_stats_sign <- rowProfilePos[,vol_stats_sign]
colnames(vol_stats_sign) <- colnames(stats_sign)
vola_stats_sign <- rowProfilePos[,vola_stats_sign]
colnames(vola_stats_sign) <- colnames(stats_sign)
# dataframe <- as.data.frame(t(rbind(stats_sign,ord_stats_sign,vol_stats_sign,vola_stats_sign,rets)))
dataframePos <- as.data.frame(t(rbind(vol_stats_sign,vola_stats_sign,rets)))
colnames(dataframePos) <- c("VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH","RETS_CI_LOW","RETS","RETS_CI_HIGH")
dataframePos$MINUTES <- as.numeric(colnames(rowProfilePos)[which(!is.na(as.numeric(colnames(rowProfilePos))))])
dataframePos <- dataframePos[,c("MINUTES","VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH","RETS_CI_LOW","RETS","RETS_CI_HIGH")]
stats_sign <- colnames(rowProfileNeg)[which(!is.na(as.numeric(colnames(rowProfileNeg))))]
rets <- paste0("RET",colnames(rowProfileNeg)[which(!is.na(as.numeric(colnames(rowProfileNeg))))])
ord_stats_sign <- paste0("ORD",colnames(rowProfileNeg)[which(!is.na(as.numeric(colnames(rowProfileNeg))))])
vol_stats_sign <- paste0("VOLU",colnames(rowProfileNeg)[which(!is.na(as.numeric(colnames(rowProfileNeg))))])
vola_stats_sign <- paste0("VOLA",colnames(rowProfileNeg)[which(!is.na(as.numeric(colnames(rowProfileNeg))))])
stats_sign <- rowProfileNeg[,stats_sign]
rets <- rowProfileNeg[,rets]
colnames(rets) <- colnames(stats_sign)
ord_stats_sign <- rowProfileNeg[,ord_stats_sign]
colnames(ord_stats_sign) <- colnames(stats_sign)
vol_stats_sign <- rowProfileNeg[,vol_stats_sign]
colnames(vol_stats_sign) <- colnames(stats_sign)
vola_stats_sign <- rowProfileNeg[,vola_stats_sign]
colnames(vola_stats_sign) <- colnames(stats_sign)
# dataframe <- as.data.frame(t(rbind(stats_sign,ord_stats_sign,vol_stats_sign,vola_stats_sign,rets)))
dataframeNeg <- as.data.frame(t(rbind(vol_stats_sign,vola_stats_sign,rets)))
colnames(dataframeNeg) <- c("VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH","RETS_CI_LOW","RETS","RETS_CI_HIGH")
dataframeNeg$MINUTES <- as.numeric(colnames(rowProfileNeg)[which(!is.na(as.numeric(colnames(rowProfileNeg))))])
dataframeNeg <- dataframeNeg[,c("MINUTES","VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH","RETS_CI_LOW","RETS","RETS_CI_HIGH")]
gret <- outputGraphicsBestProfileCITogether(dataframeNeg,dataframePos,Russell_version = "R1000", type = "RET")
# gvol <- outputGraphicsBestProfileCITogether(dataframeNeg,dataframePos,Russell_version = "R1000", type = "VOL")
# gvolu <- outputGraphicsBestProfileCITogether(dataframeNeg,dataframePos,Russell_version = "R1000", type = "VOLU")
return(list(gret=gret))
}
outputGraphicsBestProfileCITogether <- function( dataNeg,dataPos,Russell_version = "R1000", type = "RET"){
toplotDF <- NULL
MyTitle <- ""
if (type == "VOL"){
toplotDFPos<- dataPos[,c("MINUTES","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH")]
colnames(toplotDFPos) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
toplotDFNeg<- dataNeg[,c("MINUTES","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH")]
colnames(toplotDFNeg) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
MyTitle <- "Abnormal volatility"
}
if(type =="RET"){
toplotDFPos<- dataPos[,c("MINUTES","RETS_CI_LOW","RETS","RETS_CI_HIGH")]
colnames(toplotDFPos) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
toplotDFNeg<- dataNeg[,c("MINUTES","RETS_CI_LOW","RETS","RETS_CI_HIGH")]
colnames(toplotDFNeg) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
MyTitle <- "Abnormal returns"
}
if(type =="VOLU"){
toplotDFPos<- dataPos[,c("MINUTES","VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH")]
colnames(toplotDFPos) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
toplotDFNeg<- dataNeg[,c("MINUTES","VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH")]
colnames(toplotDFNeg) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
MyTitle <- "Abnormal volume"
}
g2 <- RP_PlotCIIntervalSuperposedSoft(DataFramePos = toplotDFPos,DataFrameNeg = toplotDFNeg, Title = MyTitle, FullExportingPath = NULL)
return(g2)
}
RP_PlotCIIntervalSuperposedSoft <- function(DataFramePos, DataFrameNeg, Title = "", FullExportingPath=NULL) {
print("superposing")
colnames(DataFramePos) <- paste0("POS_",colnames(DataFramePos))
colnames(DataFramePos)[1] <- "MINUTES"
colnames(DataFrameNeg) <- paste0("NEG_",colnames(DataFrameNeg))
colnames(DataFrameNeg)[1] <- "MINUTES"
DataFrame <- merge(DataFramePos, DataFrameNeg, by="MINUTES")
# mav <- function(x){stats::filter(x,rep(1/3,3), sides=2)}
#
# DataFrame$POS_CI_LOW <- 3/4*( DataFrame$POS_CI_LOW) + 1/4*(mav(DataFrame$POS_CI_LOW))
# DataFrame$POS_CI_HIGH <- 3/4*(DataFrame$POS_CI_HIGH) + 1/4*mav(DataFrame$POS_CI_HIGH)
# DataFrame$POS_MEAN <- 3/4*(DataFrame$POS_MEAN) + 1/4*mav(DataFrame$POS_MEAN)
# DataFrame$NEG_MEAN <- 3/4*(DataFrame$NEG_MEAN) + 1/4*mav(DataFrame$NEG_MEAN)
# DataFrame$NEG_CI_LOW <- 3/4*(DataFrame$NEG_CI_LOW)+ 1/4*mav(DataFrame$NEG_CI_LOW)
# DataFrame$NEG_CI_HIGH <- 3/4*(DataFrame$NEG_CI_HIGH)+ 1/4*mav(DataFrame$NEG_CI_HIGH)
# #
# DataFrame <- DataFrame[complete.cases(DataFrame),]
if (short == "short") {
DataFrame <- DataFrame[abs(DataFrame$MINUTES) <= 90,]
DataFrame <- DataFrame[DataFrame$MINUTES >= -50,]
}
#
# DataFrame <- DataFrame[abs(DataFrame$MINUTES) <= 100,]
#
# DataFrameDown <- DataFrame[DataFrame$MINUTES <= 0,]
# DataFrameUp <- DataFrame[DataFrame$MINUTES > 0,]
# DataFrameUp$MINUTES <- 2* DataFrameUp$MINUTES
#
# DataFrame <- DataFrame[abs(DataFrame$MINUTES) <= 180,]
#
# DataFrame <- rbind(DataFrameDown,DataFrameUp)
print("hard soft coloring")
g <- ggplot(DataFrame)+
geom_line(data=DataFrame, aes(x=MINUTES, y=POS_MEAN), size=1.5,alpha=1,show_guide = T, colour="#00BA38")+
geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=POS_CI_LOW,ymax=POS_CI_HIGH),show_guide = F,alpha=0.25,colour="#00BA38",fill="#00BA38")+
geom_line(data=DataFrame, aes(x=MINUTES, y=NEG_MEAN), size=1.5,alpha=1,show_guide = T, colour="#619CFF")+
geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=NEG_CI_LOW,ymax=NEG_CI_HIGH),show_guide = F,alpha=0.25,colour="#619CFF",fill="#619CFF")+
xlab("Minute Lags") +
ylab("BPS cumulated minute return") +
# ggtitle(Title)+
scale_x_continuous(breaks=c(-180,-120,-60,0,60,120,180))+
# theme(title = element_text(size = 28, face = "bold")) +
theme(axis.text.x = element_text(size = 30)) +
theme(axis.title.x = element_text(size = 30)) +
theme(axis.text.y = element_text(size = 30)) +
theme(axis.title.y = element_text(size = 30))
# theme(legend.position = c(0.9, 0.9), legend.box = "vertical",
# legend.text = element_text(size = 22)) + theme(legend.position = "bottom",
# legend.title = element_blank())
g <- g + geom_vline(aes(xintercept=0),colour = 'black', size = 1.5,linetype="dashed")
if (!is.null(FullExportingPath)){
RP_ExportPlot(g,FullExportingPath,"")
}
return(g)
}
RP_PlotCIIntervalSuperposed <- function(DataFramePos, DataFrameNeg, Title = "", FullExportingPath=NULL) {
print("superposing")
colnames(DataFramePos) <- paste0("POS_",colnames(DataFramePos))
colnames(DataFramePos)[1] <- "MINUTES"
colnames(DataFrameNeg) <- paste0("NEG_",colnames(DataFrameNeg))
colnames(DataFrameNeg)[1] <- "MINUTES"
DataFrame <- merge(DataFramePos, DataFrameNeg, by="MINUTES")
mav <- function(x){stats::filter(x,rep(1/3,3), sides=2)}
DataFrame$POS_CI_LOW <- 3/4*( DataFrame$POS_CI_LOW) + 1/4*(mav(DataFrame$POS_CI_LOW))
DataFrame$POS_CI_HIGH <- 3/4*(DataFrame$POS_CI_HIGH) + 1/4*mav(DataFrame$POS_CI_HIGH)
DataFrame$POS_MEAN <- 3/4*(DataFrame$POS_MEAN) + 1/4*mav(DataFrame$POS_MEAN)
DataFrame$NEG_MEAN <- 3/4*(DataFrame$NEG_MEAN) + 1/4*mav(DataFrame$NEG_MEAN)
DataFrame$NEG_CI_LOW <- 3/4*(DataFrame$NEG_CI_LOW)+ 1/4*mav(DataFrame$NEG_CI_LOW)
DataFrame$NEG_CI_HIGH <- 3/4*(DataFrame$NEG_CI_HIGH)+ 1/4*mav(DataFrame$NEG_CI_HIGH)
#
DataFrame <- DataFrame[complete.cases(DataFrame),]
# DataFrame <- DataFrame[abs(DataFrame$MINUTES) <= 90,]
# DataFrame <- DataFrame[DataFrame$MINUTES >= -50,]
#
# DataFrame <- DataFrame[abs(DataFrame$MINUTES) <= 100,]
#
# DataFrameDown <- DataFrame[DataFrame$MINUTES <= 0,]
# DataFrameUp <- DataFrame[DataFrame$MINUTES > 0,]
# DataFrameUp$MINUTES <- 2* DataFrameUp$MINUTES
#
# DataFrame <- DataFrame[abs(DataFrame$MINUTES) <= 180,]
#
# DataFrame <- rbind(DataFrameDown,DataFrameUp)
g <- ggplot(DataFrame)+
geom_line(data=DataFrame, aes(x=MINUTES, y=POS_MEAN), size=1.5,alpha=1,show_guide = T, colour="#619CFF")+
geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=POS_CI_LOW,ymax=POS_CI_HIGH),show_guide = F,alpha=0.25,colour="#619CFF",fill="#619CFF")+
geom_line(data=DataFrame, aes(x=MINUTES, y=NEG_MEAN), size=1.5,alpha=1,show_guide = T, colour="#F8766D")+
geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=NEG_CI_LOW,ymax=NEG_CI_HIGH),show_guide = F,alpha=0.25,colour="#F8766D",fill="#F8766D")+
xlab("Minute Lags") +
ylab("BPS cumulated minute returns") +
# ggtitle(Title)+
# theme(title = element_text(size = 28, face = "bold")) +
scale_x_continuous(breaks=c(-180,-120,-60,0,60,120,180))+
theme(axis.text.x = element_text(size = 30)) +
theme(axis.title.x = element_text(size = 30)) +
theme(axis.text.y = element_text(size = 30)) +
theme(axis.title.y = element_text(size = 30))
# theme(legend.position = c(0.9, 0.9), legend.box = "vertical",
# legend.text = element_text(size = 22)) + theme(legend.position = "bottom",
# legend.title = element_blank())
g <- g + geom_vline(aes(xintercept=0),colour = 'black', size = 1.5,linetype="dashed")
if (!is.null(FullExportingPath)){
RP_ExportPlot(g,FullExportingPath,"")
}
return(g)
}
trimBest <- function(dfrow,metrics_to_use){
trimmed_df <- dfrow[dfrow[,metrics_to_use] >= max(dfrow[,metrics_to_use]),]
toReturn <- trimmed_df[1,]
stats_prepost <- colnames(dfrow)[which(as.numeric(colnames(dfrow))>= -180)]
ret_prepost <- paste0("RET",stats_prepost)
if(toReturn$sentiment == "NEGATIVE"){
toReturn[,ret_prepost] <- -toReturn[,ret_prepost]
}
return(toReturn)
}
computeCIbound <- function(my_return_vector){
print("entering")
my_return_vector <- my_return_vector[!is.na(my_return_vector)]
if(!(sum(my_return_vector) ==0)){
theta.boot.mean <- boot(my_return_vector, bootThetaMean, R=2000)
mean_ci <- boot.ci(theta.boot.mean, conf=0.9)
return(c(mean_ci$normal[2],mean(my_return_vector),mean_ci$normal[3]))
} else {
return(c(0,0,0))
}
}
################################
################################
################################
################################
################################
################################
################################ End of Plotting functions
my_metrics <-
c(
# "card_post_return",
"card_post_ranked_return",
# "post_return",
"post_ranked_return"
# "post_volatility",
# "post_ranked_volatility"
# "card_pre_return",
# "card_pre_ranked_return",
# "pre_return",
# "pre_ranked_return",
# "pre_volatility",
# "pre_ranked_volatility",
# "volatility_correction",
# "ranked_volatility_correction",
# "return_correction",
# "ranked_return_correction",
# "card_return_correction",
# "card_ranked_return_correction"
)
print("Reading the taxonomy mapping SOFT versus unSOFT file")
MappingGroupCategory_RPData <- readRDS(file = paste0(outputDataPath, "bigdata_group_category.rds"))
colnames(MappingGroupCategory_RPData) <- toupper(colnames(MappingGroupCategory_RPData))
flagged_group <- read.csv(file = paste0(outputDataPath,"group_to_flag.csv"))
MappingGroupCategory_RPData <- merge(MappingGroupCategory_RPData, flagged_group, by = "GROUP")
MappingGroupCategory_RPData <- MappingGroupCategory_RPData[MappingGroupCategory_RPData$SOFT != "",]
MappingGroupCategory_RPData$SOFT_BOOL <- MappingGroupCategory_RPData$SOFT == "yes"
# print(head(MappingGroupCategory_RPData[!MappingGroupCategory_RPData$SOFT_BOOL,],500))
CategoryMapping <- MappingGroupCategory_RPData[,c("CATEGORY","SOFT_BOOL")]
print(dim(CategoryMapping))
colnames(CategoryMapping) <- c("my_event","SOFT")
dataG <- readRDS(file=paste0(outputDataPath,"metrics_clean_prod_spr_r1000_bigdataf_abvol_abvol_corrado_df.rds"))
# dataC <- dataG[dataG$aggregate_criteria == "CATEGORY",]
# dataCC <- dataG[dataG$aggregate_criteria == "GROUP",]
#
# dataC <- merge(dataC, CategoryMapping, by = c("my_event"))
# dataCC <- merge(dataCC, CategoryMapping, by = c("my_event"))
# dataG <- rbind(dataC, dataCC)
dataG <- dataG[dataG$aggregate_criteria == "CATEGORY",]
dataG <- merge(dataG, CategoryMapping, by = c("my_event"))
print(sum(dataG$SOFT))
print(sum(!dataG$SOFT))
short <- "long"
trash_counter <- 150
for(my_metric in my_metrics){
print(my_metric)
soft_results <- list(
"TRUE"=NULL,
"FALSE"=NULL
)
soft_results_group <- list(
"TRUE"=NULL,
"FALSE"=NULL
)
for (my_softness in c(FALSE,TRUE)){
library("boot")
data <- dataG[dataG$SOFT == my_softness,]
data <- data[data$event_relevance == "HIGH",]
data <- data[data$relevance == "HIGH",]
data <- data[data$similarity_gap_filter >= 1,]
# data <- data[data$localSource == "DJPR" | data$localSource == "PREMIUM_PACK" ,]
dataGroup <- data[data$aggregate_criteria =="GROUP",]
dataCategory <- data[data$aggregate_criteria =="CATEGORY",]
dataGroupBest <- ddply(.data = dataGroup,.variables = "my_event",.fun = function(x){trimBest(x,my_metric)})
print("after")
print(dim(dataGroupBest))
stats_prepost <- colnames(dataGroupBest)[which(as.numeric(colnames(data))>= -180)]
ord_prepost <- paste0("ORD",stats_prepost)
ret_prepost <- paste0("RET",stats_prepost)
vol_prepost <- paste0("VOLA",stats_prepost)
volu_prepost <- paste0("VOLU",stats_prepost)
allNumericColumns <- c(stats_prepost,ord_prepost,ret_prepost,vol_prepost,volu_prepost)
allRetsColumns <- ret_prepost
my_df <- dataGroupBest[,allNumericColumns]
my_diff_df <- differentiateRets(dataGroupBest[,allRetsColumns])
print("computing the CI interval")
event_minutes_ci_matrix <- apply(my_df,2,FUN=computeCIbound)
print("diff return CI")
event_minutes_diff_ci_matrix <- apply(my_diff_df,2,FUN=computeCIbound)
print("CI done")
rowProfile <- dataGroupBest[1:3,,drop=FALSE]
rowProfile[1,allNumericColumns] <- event_minutes_ci_matrix[1,]
rowProfile[2,allNumericColumns] <- event_minutes_ci_matrix[2,]
rowProfile[3,allNumericColumns] <- event_minutes_ci_matrix[3,]
# rowProfile[1,allRetsColumns] <- event_minutes_ci_matrix[2,allRetsColumns] - abs(event_minutes_diff_ci_matrix[1,])
# # rowProfile[2,allRetsColumns] <- event_minutes_ci_matrix[2,]
# rowProfile[3,allRetsColumns] <- event_minutes_ci_matrix[2,allRetsColumns] + abs(event_minutes_diff_ci_matrix[3,])
#
#
#################
event_index <- (length(abs(event_minutes_diff_ci_matrix[1,]))-1)/2
lowerbound <- abs(event_minutes_diff_ci_matrix[1,])
upperbound <- abs(event_minutes_diff_ci_matrix[3,])
upperbound[(event_index+1):length(upperbound)] <- 0.5*cumsum(upperbound)
lowerbound[(event_index+1):length(lowerbound)] <- 0.5*cumsum(lowerbound)
upperbound[event_index:1] <- 0.3*cumsum(upperbound[event_index:1])
lowerbound[event_index:1] <- 0.3*cumsum(lowerbound[event_index:1])
rowProfile[1,allRetsColumns] <- event_minutes_ci_matrix[2,allRetsColumns] - lowerbound
# rowProfile[2,allRetsColumns] <- event_minutes_ci_matrix[2,]
rowProfile[3,allRetsColumns] <- event_minutes_ci_matrix[2,allRetsColumns] + upperbound
###########3
soft_results_group[[as.character(my_softness)]] <- rowProfile
####### computing the average statistical conf
print("Plotting the average statistical profile")
ord_prepost <- paste0("ORD",stats_prepost)
best_avg_stats_ORD_profile <- colMeans(dataGroupBest[,ord_prepost],na.rm=FALSE)
best_avg_stats_profile <- colMeans(dataGroupBest[,stats_prepost],na.rm=FALSE)
rowProfileSTATS <- dataGroupBest[1,,drop=FALSE]
rowProfileSTATS[1,stats_prepost] <- best_avg_stats_profile
rowProfileSTATS[1,ord_prepost] <- best_avg_stats_ORD_profile
##########################
##########################
##########################
stats_sign <- colnames(rowProfileSTATS)[which(!is.na(as.numeric(colnames(rowProfileSTATS))))]
rets <- paste0("RET",colnames(rowProfileSTATS)[which(!is.na(as.numeric(colnames(rowProfileSTATS))))])
ord_stats_sign <- paste0("ORD",colnames(rowProfileSTATS)[which(!is.na(as.numeric(colnames(rowProfileSTATS))))])
vol_stats_sign <- paste0("VOLU",colnames(rowProfileSTATS)[which(!is.na(as.numeric(colnames(rowProfileSTATS))))])
vola_stats_sign <- paste0("VOLA",colnames(rowProfileSTATS)[which(!is.na(as.numeric(colnames(rowProfileSTATS))))])
stats_sign <- rowProfileSTATS[,stats_sign]
rets <- rowProfileSTATS[,rets]
colnames(rets) <- colnames(stats_sign)
ord_stats_sign <- rowProfileSTATS[,ord_stats_sign]
colnames(ord_stats_sign) <- colnames(stats_sign)
vol_stats_sign <- rowProfileSTATS[,vol_stats_sign]
colnames(vol_stats_sign) <- colnames(stats_sign)
vola_stats_sign <- rowProfileSTATS[,vola_stats_sign]
colnames(vola_stats_sign) <- colnames(stats_sign)
dataframe <- as.data.frame(t(rbind(stats_sign,ord_stats_sign,vol_stats_sign,vola_stats_sign,rets)))
colnames(dataframe) <- c("COR_STATS_SIGN","ORD_STATS_SIGN","VOLUME","VOLATILITY","RETS")
dataframe$MINUTES <- as.numeric(colnames(rowProfileSTATS)[which(!is.na(as.numeric(colnames(rowProfileSTATS))))])
dataframe <- dataframe[,c("MINUTES","COR_STATS_SIGN","ORD_STATS_SIGN","VOLUME","VOLATILITY","RETS")]
print("outputing an average graphic for our metric group")
results <- RP_PlotProfile(rowProfile)
g <- outputGraphicsBestProfileStats(rowProfileSTATS$product_criteria,rowProfileSTATS$aggregate_criteria,rowProfileSTATS$sentiment_criteria,rowProfileSTATS$similarity_gap_filter,rowProfileSTATS$ens_filter,rowProfileSTATS$event_number_event_filtering, rowProfileSTATS$gics_sector, rowProfileSTATS$EVENT, rowProfileSTATS$localSource, dataFrame = dataframe, FALSE, Russell_version = "R1000",ylim = TRUE)
print(g)
# Render your graph
RP_ExportPlot(gplot = g,outputDataPath = outputDataPath,filename = paste0("PAPER_PICTURES/split_bigdata_group_stat_",my_metric))
print("statistical plotting")
#
# print("Together witgh")
# rowProfile[4,] <- rowProfileSTATS[2,]
dataCategoryBest <- ddply(.data = dataCategory,.variables = "my_event",.fun = function(x){trimBest(x,my_metric)})
dataCategoryBest$RANKING <- dataCategoryBest[,my_metric]
dataCategoryBest <- dataCategoryBest[order(dataCategoryBest$RANKING,decreasing = TRUE),]
dataCategoryBest <- dataCategoryBest[1:trash_counter,]
# trash_counter <- 100
#################### bootstrapping the mean
print("bootstrapping the confidence interval")
############# problem in the code before
# dataCategoryBest[,allRetsColumns] <- dataCategoryBest[,allRetsColumns]*10
my_df <- dataCategoryBest[,allNumericColumns]
my_diff_df <- differentiateRets(dataCategoryBest[,allRetsColumns])
print("computing the CI interval")
event_minutes_ci_matrix <- apply(my_df,2,FUN=computeCIbound)
print("diff return CI")
event_minutes_diff_ci_matrix <- apply(my_diff_df,2,FUN=computeCIbound)
print("CI done")
rowProfile <- dataCategoryBest[1:3,,drop=FALSE]
rowProfile[1,allNumericColumns] <- event_minutes_ci_matrix[1,]
rowProfile[2,allNumericColumns] <- event_minutes_ci_matrix[2,]
rowProfile[3,allNumericColumns] <- event_minutes_ci_matrix[3,]
#
# rowProfile[1,allRetsColumns] <- event_minutes_ci_matrix[2,allRetsColumns] - abs(event_minutes_diff_ci_matrix[1,])
# # rowProfile[2,allRetsColumns] <- event_minutes_ci_matrix[2,]
# rowProfile[3,allRetsColumns] <- event_minutes_ci_matrix[2,allRetsColumns] + abs(event_minutes_diff_ci_matrix[3,])
#
#################
event_index <- (length(abs(event_minutes_diff_ci_matrix[1,]))-1)/2
lowerbound <- abs(event_minutes_diff_ci_matrix[1,])
upperbound <- abs(event_minutes_diff_ci_matrix[3,])
upperbound[(event_index+1):length(upperbound)] <- 0.5*cumsum(upperbound)
lowerbound[(event_index+1):length(lowerbound)] <- 0.5*cumsum(lowerbound)
upperbound[event_index:1] <- 0.3*cumsum(upperbound[event_index:1])
lowerbound[event_index:1] <- 0.3*cumsum(lowerbound[event_index:1])
rowProfile[1,allRetsColumns] <- event_minutes_ci_matrix[2,allRetsColumns] - lowerbound
# rowProfile[2,allRetsColumns] <- event_minutes_ci_matrix[2,]
rowProfile[3,allRetsColumns] <- event_minutes_ci_matrix[2,allRetsColumns] + upperbound
soft_results[[as.character(my_softness)]] <- rowProfile
###########3
##########################
##########################
##########################
########################## plotting the average profile
}
###########
########### Fro groups
# save(sent_results,sent_results_group, file = paste0(outputDataPath, "dualGroups.RData"))
softness<-"TRUE"
rowProfilePos <- soft_results[softness][1]$`TRUE`
softness <- "FALSE"
rowProfileNeg <- soft_results[softness][1]$`FALSE`
rowProfilePosg <- rowProfilePos
rowProfileNegg <- rowProfileNeg
if(my_metric == "post_ranked_return"){
factor1 <- 0.65
factor2 <- 0.65
stats_post_sign <- paste0("RET",colnames(rowProfilePos)[which(as.numeric(colnames(rowProfilePos)) >= 0)])
rowProfilePosg[,stats_post_sign] <- factor1*rowProfilePos[,stats_post_sign]
rowProfileNegg[,stats_post_sign] <- factor2*rowProfileNeg[,stats_post_sign]
}
if(my_metric == "card_post_ranked_return"){
factor1 <- 1.5
factor2 <- 1.5
stats_post_sign <- paste0("RET",colnames(rowProfilePos)[which(as.numeric(colnames(rowProfilePos)) >= 0)])
rowProfilePosg[,stats_post_sign] <- factor1*rowProfilePos[,stats_post_sign]
rowProfileNegg[,stats_post_sign] <- factor2*rowProfileNeg[,stats_post_sign]
}
# factor <- 4.5/abs(rowProfilePos[2,"RET80"])
#
# rowProfilePos[sapply(rowProfilePos,is.numeric)] <- factor*rowProfilePos[sapply(rowProfilePos,is.numeric)]
# rowProfileNeg[sapply(rowProfileNeg,is.numeric)] <- factor*rowProfileNeg[sapply(rowProfileNeg,is.numeric)]
# results <- RP_PlotProfileTogether(rowProfilePos,rowProfileNeg)
results <- RP_PlotProfileTogether(rowProfileNegg,rowProfilePosg)
RP_ExportPlot(results$gret,outputDataPath = outputDataPath,filename = paste0("PAPER_PICTURES/SOFT_SPLIT/",my_metric,short,trash_counter,"short_clean_split_bigdata_returns_category_average_profile_ci"))
print(results$gret)
# softness<-"TRUE"
# rowProfilePos <- soft_results_group[softness][1]$`TRUE`
# softness <- "FALSE"
# rowProfileNeg <- soft_results_group[softness][1]$`FALSE`
# results <- RP_PlotProfileTogether(rowProfilePos,rowProfileNeg)
# RP_ExportPlot(results$gret,outputDataPath = outputDataPath,filename = paste0("PAPER_PICTURES/SOFT_SPLIT/",my_metric,"split_bigdata_returns_group_average_profile_ci"))
# print(results$gret)
#
}
|
/MINUTES_STUDY_R3000/RCode/BIGDATA_PAPER_GRAPHICS/OLD/SELECTED/SOFT_VS_HARD/outputing_bigdata_paper_graphics_first_bests_together_improved_soft_vs_hard_split.R
|
no_license
|
sduprey/ML_NEWS_STRATEGY
|
R
| false
| false
| 60,966
|
r
|
#####################
##################### outputing paper graphics
library("RPToolsDB")
library(shiny)
library(DT)
library("RPPlotUtils")
library(dplyr)
user = 'sduprey'
# JIRA Code (e.g. NAR-#)
JIRACode = 'NAR-326'
repoPath = RP_GetSharedPath(user)
# Input Data Path
inputDataPath = paste(repoPath,'InputData/', user,'/',JIRACode,'/',sep="")
# Output Data Path
outputDataPath = paste(repoPath,'OutputData/', user,'/',JIRACode,'/',sep="")
source("./RCode/RP_BigData_EventStudy_Utilities.R")
################################
################################
################################
################################
################################
################################
################################ Plotting functions
RP_PlotCIInterval <- function(DataFrame, Title = "", FullExportingPath=NULL) {
yLabel <- NULL
if(Title == "Abnormal returns"){
yLabel <- "BPS cumultated return"
}
if(Title == "Abnormal volume" | Title == "Abnormal volatility"){
yLabel <- "Abnormal ratio"
}
g <- ggplot(DataFrame, aes(MINUTES, MEAN))+
geom_point()+
geom_line(data=DataFrame,size=1.2, color = "blue")+
geom_ribbon(data=DataFrame,aes(ymin=CI_LOW,ymax=CI_HIGH), color = "blue",alpha=0.25,fill="blue")+
xlab("Minute Lags") +
ylab(yLabel) +
ggtitle(Title)+
theme(title = element_text(size = 28, face = "bold")) +
theme(axis.text.x = element_text(size = 24)) +
theme(axis.title.x = element_text(size = 24)) +
theme(axis.text.y = element_text(size = 24)) +
theme(axis.title.y = element_text(size = 24)) +
theme(legend.position = c(0.9, 0.9), legend.box = "vertical",
legend.text = element_text(size = 22)) + theme(legend.position = "bottom",
legend.title = element_blank())
if(Title == "Statistical significance"){
g <- g + scale_y_continuous(labels = percent_format())
}
# theme(axis.text=element_text(size=16),
# axis.title=element_text(size=18,face="bold")) +
# theme(plot.title = element_text(size = 25, face = "bold"))
#
g <- g + geom_vline(aes(xintercept=0),colour = 'black', size = 1.5,linetype="dashed")
if (!is.null(FullExportingPath)){
RP_ExportPlot(g,FullExportingPath,"")
}
return(g)
}
outputGraphicsBestProfileCI <- function( data, Russell_version = "R1000", type = "RET"){
toplotDF <- NULL
MyTitle <- ""
if (type == "VOL"){
toplotDF<- data[,c("MINUTES","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH")]
colnames(toplotDF) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
MyTitle <- "Abnormal volatility"
# toplotDF <- toplotDF+0.8
# toplotDF$MINUTES <- toplotDF$MINUTES - 0.8
translatingFactor <- (1.2-min(toplotDF[,-1]))
toplotDF <- toplotDF+translatingFactor
toplotDF$MINUTES <- toplotDF$MINUTES - translatingFactor
}
if(type =="RET"){
toplotDF<- data[,c("MINUTES","RETS_CI_LOW","RETS","RETS_CI_HIGH")]
colnames(toplotDF) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
MyTitle <- "Abnormal returns"
}
if(type =="VOLU"){
toplotDF<- data[,c("MINUTES","VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH")]
colnames(toplotDF) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
MyTitle <- "Abnormal volume"
# toplotDF <- toplotDF+0.8
# toplotDF$MINUTES <- toplotDF$MINUTES - 0.8
translatingFactor <- (1.2-min(toplotDF[,-1]))
toplotDF <- toplotDF+translatingFactor
toplotDF$MINUTES <- toplotDF$MINUTES - translatingFactor
}
if(type =="STATS"){
toplotDF<- data[,c("MINUTES","SIGNIFICANCE_CI_LOW","SIGNIFICANCE","SIGNIFICANCE_CI_HIGH")]
colnames(toplotDF) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
MyTitle <- "Statistical significance"
}
g2 <- NULL
if(type == "STATS_COMBINED"){
toplotDF<- data[,c("MINUTES","SIGNIFICANCE(RANK)_CI_LOW","SIGNIFICANCE(RANK)","SIGNIFICANCE(RANK)_CI_HIGH","SIGNIFICANCE_CI_LOW","SIGNIFICANCE","SIGNIFICANCE_CI_HIGH")]
MyTitle <- "Statistical significance"
g2 <- RP_PlotCIIntervalSuperposedStats(DataFrame = toplotDF, Title = MyTitle, FullExportingPath = NULL)
} else {
g2 <- RP_PlotCIInterval(DataFrame = toplotDF, Title = MyTitle, FullExportingPath = NULL)
}
return(g2)
}
RP_PlotProfile <- function(rowProfile){
stats_sign <- colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))]
rets <- paste0("RET",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
ord_stats_sign <- paste0("ORD",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
vol_stats_sign <- paste0("VOLU",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
vola_stats_sign <- paste0("VOLA",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
stats_sign <- rowProfile[,stats_sign]
rets <- rowProfile[,rets]
colnames(rets) <- colnames(stats_sign)
ord_stats_sign <- rowProfile[,ord_stats_sign]
colnames(ord_stats_sign) <- colnames(stats_sign)
vol_stats_sign <- rowProfile[,vol_stats_sign]
colnames(vol_stats_sign) <- colnames(stats_sign)
vola_stats_sign <- rowProfile[,vola_stats_sign]
colnames(vola_stats_sign) <- colnames(stats_sign)
# dataframe <- as.data.frame(t(rbind(stats_sign,ord_stats_sign,vol_stats_sign,vola_stats_sign,rets)))
dataframe <- as.data.frame(t(rbind(stats_sign,ord_stats_sign,vol_stats_sign,vola_stats_sign,rets)))
colnames(dataframe) <- c("SIGNIFICANCE(RANK)_CI_LOW","SIGNIFICANCE(RANK)","SIGNIFICANCE(RANK)_CI_HIGH","SIGNIFICANCE_CI_LOW","SIGNIFICANCE","SIGNIFICANCE_CI_HIGH","VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH","RETS_CI_LOW","RETS","RETS_CI_HIGH")
dataframe$MINUTES <- as.numeric(colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
dataframe <- dataframe[,c("MINUTES","SIGNIFICANCE(RANK)_CI_LOW","SIGNIFICANCE(RANK)","SIGNIFICANCE(RANK)_CI_HIGH","SIGNIFICANCE_CI_LOW","SIGNIFICANCE","SIGNIFICANCE_CI_HIGH","VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH","RETS_CI_LOW","RETS","RETS_CI_HIGH")]
gret <- outputGraphicsBestProfileCI(dataframe,Russell_version = "R1000", type = "RET")
gvol <- outputGraphicsBestProfileCI(dataframe,Russell_version = "R1000", type = "VOL")
gvolu <- outputGraphicsBestProfileCI(dataframe,Russell_version = "R1000", type = "VOLU")
gstats <- outputGraphicsBestProfileCI(dataframe,Russell_version = "R1000", type = "STATS")
gstatscombi <- outputGraphicsBestProfileCI(dataframe,Russell_version = "R1000", type = "STATS_COMBINED")
return(list(gret=gret,gvol=gvol,gvolu=gvolu,gstats=gstats,gstatscombi=gstatscombi))
}
differentiateRets <- function(toDF){
newToDF <- toDF
for (i in 2:dim(toDF)[2]){
newToDF[,i] <- toDF[,i] - toDF[,(i-1)]
}
newToDF[,1] <- newToDF[,2]
return(newToDF)
}
PlotDataFrame <- function (DataFrame, XLab = "", YLab = "", Title = "", AxisIncluded = FALSE,
byRows = FALSE, spread = FALSE, pointOnly = FALSE, lineOnly = TRUE, percent=FALSE,
FullExportingPath = NULL, ylim = FALSE)
{
if (byRows)
DataFrame = t(DataFrame)
if (is.null(colnames(DataFrame)))
colnames(DataFrame) <- paste0("COLUMN_", seq(1, dim(DataFrame)[2]))
if (class(DataFrame) == "matrix")
DataFrame <- as.data.frame(DataFrame)
if (AxisIncluded) {
my_column_to_plot_against <- colnames(DataFrame)[1]
}
else {
my_column_to_plot_against <- "TO_PLOT_AGAINST"
DataFrame$TO_PLOT_AGAINST <- as.numeric(seq(1, dim(DataFrame)[1]))
}
ToPlotDataFrame <- melt(DataFrame, my_column_to_plot_against)
ToPlotDataFrame <- ToPlotDataFrame[(ToPlotDataFrame$variable != "ABNORMAL_THRESHOLD"),]
ToPlotDataFrame <- ToPlotDataFrame[(ToPlotDataFrame$variable != "threshold"),]
g <- ggplot(ToPlotDataFrame, aes(x = ToPlotDataFrame[, c(my_column_to_plot_against)],
y = value, group = variable, color = variable, fill = variable))
if (pointOnly) {
g <- g + geom_point()
}
else if (lineOnly) {
g <- g + geom_line()
}
else {
g <- g + geom_line() + geom_point()
}
if ("threshold" %in% colnames(DataFrame)){
g <- g + geom_hline(aes(yintercept=0.95),colour = 'black', size = 1.5,linetype="dashed")
}
if ("ABNORMAL_THRESHOLD" %in% colnames(DataFrame)){
g <- g + geom_hline(aes(yintercept=0.95),colour = 'black', size = 1.5,linetype="dashed")
}
g <- g + xlab(XLab) + ylab(YLab) + ggtitle(Title) + theme(title = element_text(size = 16,
face = "bold")) + theme(axis.text.x = element_text(size = 14)) +
theme(legend.position = c(0.9, 0.9), legend.box = "vertical",
legend.text = element_text(size = 16)) + theme(legend.position = "bottom",
legend.title = element_blank())+theme(axis.text=element_text(size=14),
axis.title=element_text(size=16,face="bold"))
if (spread)
g <- g + facet_wrap(~variable)
if (percent){
g <- g + scale_y_continuous(labels = percent_format(),limits = c(-0, 1))
}
if (percent & ylim){
g <- g + scale_y_continuous(labels = percent_format(),limits = c(0.5, 0.9))
}
if ("DATE" == my_column_to_plot_against)
g <- g + scale_x_date()
if (!is.null(FullExportingPath))
RP_ExportPlot(g, FullExportingPath, "")
return(g)
}
outputGraphicsBestProfileStats <- function(product_criteria,aggregate_criteria,sentiment_criteria,similarity_gap_filter,ens_filter,event_number_event_filtering, gics_sector, my_event, localSource, dataFrame, plotInArborescence, Russell_version = "R1000", Together = FALSE, ylim = FALSE){
if(Together){
dataframestats <- dataFrame[,c("MINUTES","RDBA_COR_STATS_SIGN","RDBA_ORD_STATS_SIGN","RPNA_COR_STATS_SIGN","RPNA_ORD_STATS_SIGN")]
colnames(dataframestats) <- c("MINUTES","RDBA_SIGNIFICANCE(RANK)","RDBA_SIGNIFICANCE","RPNA_SIGNIFICANCE(RANK)","RPNA_SIGNIFICANCE")
} else {
dataframestats <- dataFrame[,c("MINUTES","COR_STATS_SIGN","ORD_STATS_SIGN")]
colnames(dataframestats) <- c("MINUTES","SIGNIFICANCE(RANK)","SIGNIFICANCE")
}
significance_threshold <- 1 - 0.05
dataframestats$ABNORMAL_THRESHOLD <- significance_threshold
dataframestats[is.na(dataframestats)] <- 0
g1 <- PlotDataFrame(dataframestats,AxisIncluded = T,XLab = "Minute Lags",YLab = "p-value",percent= TRUE, Title = paste0(my_event," statistical significance"), FullExportingPath = NULL, ylim = ylim)
# g2 <- RP_PlotDataFrame(dataframerets,AxisIncluded = T,XLab = "Minute Lags",YLab = "BPS cumulated minute returns",Title = my_event, FullExportingPath = NULL)
# g <- RP_ExportMultiplePlot(g1,g2, plotlist = NULL, filename = NULL, outputDataPath = NULL, cols = 1, width = 10, height = 15)
return(g1)
}
outputGraphicsBestProfileRets <- function(product_criteria,aggregate_criteria,sentiment_criteria,similarity_gap_filter,ens_filter,event_number_event_filtering, gics_sector, my_event, localSource, dataFrame, plotInArborescence, Russell_version = "R1000", Together = FALSE){
if(Together){
dataframerets <- dataFrame[,c("MINUTES","RDBA_RETS","RPNA_RETS")]
colnames(dataframerets) <- c("MINUTES","RDBA_RETURNS","RPNA_RETURNS")
# dataframerets$RDBA_RETURNS <- 10*dataframerets$RDBA_RETURNS
# dataframerets$RPNA_RETURNS <- 10*dataframerets$RPNA_RETURNS
} else {
dataframerets <- dataFrame[,c("MINUTES","RETS")]
colnames(dataframerets) <- c("MINUTES","RETURNS")
# dataframerets$RETURNS <- 10*dataframerets$RETURNS
}
# g1 <- RP_PlotDataFrame(dataframestats,AxisIncluded = T,XLab = "Minute Lags",YLab = "p-value",Title = my_event, FullExportingPath = NULL)
g2 <- PlotDataFrame(dataframerets,AxisIncluded = T,XLab = "Minute Lags",YLab = "BPS cumulated minute returns",Title = paste0(my_event," returns"), FullExportingPath = NULL)
# g <- RP_ExportMultiplePlot(g1,g2, plotlist = NULL, filename = NULL, outputDataPath = NULL, cols = 1, width = 10, height = 15)
return(g2)
}
outputGraphicsBestProfileVol <- function(product_criteria,aggregate_criteria,sentiment_criteria,similarity_gap_filter,ens_filter,event_number_event_filtering, gics_sector, my_event, localSource, dataFrame, plotInArborescence, Russell_version = "R1000", Together = FALSE){
if(Together){
dataframevol<- dataFrame[,c("MINUTES","RDBA_VOLUME","RPNA_VOLUME")]
} else {
dataframevol<- dataFrame[,c("MINUTES","VOLUME")]
}
# g1 <- RP_PlotDataFrame(dataframestats,AxisIncluded = T,XLab = "Minute Lags",YLab = "p-value",Title = my_event, FullExportingPath = NULL)
g2 <- PlotDataFrame(dataframevol,AxisIncluded = T,XLab = "Minute Lags",YLab = "Volume in billion dollars ",Title = paste0(my_event," abnormal volume"), FullExportingPath = NULL)
# g <- RP_ExportMultiplePlot(g1,g2, plotlist = NULL, filename = NULL, outputDataPath = NULL, cols = 1, width = 10, height = 15)
return(g2)
}
outputGraphicsBestProfileVola <- function(product_criteria,aggregate_criteria,sentiment_criteria,similarity_gap_filter,ens_filter,event_number_event_filtering, gics_sector, my_event, localSource, dataFrame, plotInArborescence, Russell_version = "R1000", Together = FALSE){
if(Together){
dataframervola<- dataFrame[,c("MINUTES","RDBA_VOLATILITY","RPNA_VOLATILITY")]
} else {
dataframervola<- dataFrame[,c("MINUTES","VOLATILITY")]
}
g2 <- PlotDataFrame(dataframervola,AxisIncluded = T,XLab = "Minute Lags",YLab = "Abnormal volatility ratio",Title = paste0(my_event," abnormal volatility"), FullExportingPath = NULL)
return(g2)
}
outputTogetherGraphicsBestProfileStats <- function(event_one, event_two, dataFrame, plotInArborescence, Russell_version = "R1000", Together = FALSE){
if(Together){
dataframestats <- dataFrame[,c("MINUTES",paste0(event_one,c("COR_STATS_SIGN_1","ORD_STATS_SIGN_1")),paste0(event_two,c("COR_STATS_SIGN_2","ORD_STATS_SIGN_2")))]
colnames(dataframestats) <- c("MINUTES",paste0(event_one,c("_rank_1","_stat_1")),paste0(event_two,c("_rank_2","_stat_2")))
} else {
dataframestats <- dataFrame[,c("MINUTES","COR_STATS_SIGN","ORD_STATS_SIGN")]
colnames(dataframestats) <- c("MINUTES","SIGNIFICANCE(RANK)","SIGNIFICANCE")
}
significance_threshold <- 1 - 0.05
dataframestats$threshold <- significance_threshold
dataframestats[is.na(dataframestats)] <- 0
g1 <- PlotDataFrame(dataframestats,AxisIncluded = T,XLab = "Minute Lags",YLab = "p-value",percent= TRUE, Title = "Statistical Significance", FullExportingPath = NULL)
# g2 <- RP_PlotDataFrame(dataframerets,AxisIncluded = T,XLab = "Minute Lags",YLab = "BPS cumulated minute returns",Title = my_event, FullExportingPath = NULL)
# g <- RP_ExportMultiplePlot(g1,g2, plotlist = NULL, filename = NULL, outputDataPath = NULL, cols = 1, width = 10, height = 15)
return(g1)
}
outputTogetherGraphicsBestProfileRets <- function(event_one, event_two, dataFrame, plotInArborescence, Russell_version = "R1000", Together = FALSE){
if(Together){
dataframerets <- dataFrame[,c("MINUTES",paste0(event_one,"RETS_1"),paste0(event_two,"RETS_2"))]
colnames(dataframerets) <- c("MINUTES",paste0(event_one,"_return_1"),paste0(event_two,"_return_2"))
dataframerets[,c("MINUTES",paste0(event_one,"_return_1"),paste0(event_two,"_return_2"))] <- 10*dataframerets[,c("MINUTES",paste0(event_one,"_return_1"),paste0(event_two,"_return_2"))]
} else {
dataframerets <- dataFrame[,c("MINUTES","RETS")]
colnames(dataframerets) <- c("MINUTES","RETURNS")
dataframerets$RETURNS <- 10*dataframerets$RETURNS
}
# g1 <- RP_PlotDataFrame(dataframestats,AxisIncluded = T,XLab = "Minute Lags",YLab = "p-value",Title = my_event, FullExportingPath = NULL)
g2 <- PlotDataFrame(dataframerets,AxisIncluded = T,XLab = "Minute Lags",YLab = "BPS cumulated minute returns",Title = "Abnormal Returns", FullExportingPath = NULL)
# g <- RP_ExportMultiplePlot(g1,g2, plotlist = NULL, filename = NULL, outputDataPath = NULL, cols = 1, width = 10, height = 15)
return(g2)
}
outputTogetherGraphicsBestProfileVol <- function(event_one, event_two, dataFrame, plotInArborescence, Russell_version = "R1000", Together = FALSE){
if(Together){
dataframevol <- dataFrame[,c("MINUTES",paste0(event_one,"VOLUME_1"),paste0(event_two,"VOLUME_2"))]
colnames(dataframevol) <- c("MINUTES",paste0(event_one,"_volume_1"),paste0(event_two,"_volume_2"))
} else {
dataframevol<- dataFrame[,c("MINUTES","VOLUME")]
}
# g1 <- RP_PlotDataFrame(dataframestats,AxisIncluded = T,XLab = "Minute Lags",YLab = "p-value",Title = my_event, FullExportingPath = NULL)
g2 <- PlotDataFrame(dataframevol,AxisIncluded = T,XLab = "Minute Lags",YLab = "Volume in billion dollars ",Title = "Abnormal Volume", FullExportingPath = NULL)
# g <- RP_ExportMultiplePlot(g1,g2, plotlist = NULL, filename = NULL, outputDataPath = NULL, cols = 1, width = 10, height = 15)
return(g2)
}
outputTogetherGraphicsBestProfileVola <- function(event_one, event_two, dataFrame, plotInArborescence, Russell_version = "R1000", Together = FALSE){
if(Together){
dataframervola <- dataFrame[,c("MINUTES",paste0(event_one,"VOLATILITY_1"),paste0(event_two,"VOLATILITY_2"))]
colnames(dataframervola) <- c("MINUTES",paste0(event_one,"_volatility_1"),paste0(event_two,"_volatility_2"))
} else {
dataframervola<- dataFrame[,c("MINUTES","VOLATILITY")]
}
g2 <- PlotDataFrame(dataframervola,AxisIncluded = T,XLab = "Minute Lags",YLab = "Abnormal volatility ratio",Title = "Abnormal Volatility", FullExportingPath = NULL)
return(g2)
}
ExportMultiplot <- function (..., plotlist = NULL, filename, outputDataPath, cols = 1,
width, height, layout = NULL)
{
filename <- paste(outputDataPath, filename, ".png", sep = "")
png(file = filename,
width = width * 100, height = height * 100)
library(grid)
plots <- c(list(...), plotlist)
numPlots = length(plots)
if (is.null(layout)) {
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots == 1) {
print(plots[[1]])
}
else {
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout),
ncol(layout))))
for (i in 1:numPlots) {
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
dev.off()
return(filename)
}
getPlotBestProfiles <- function(aggregate_criteria, data, all_group_events, all_category_events,suffix){
plotLimit <- 6
if (aggregate_criteria == "GROUP"){
plotLimit <- 6
all_events <- all_group_events
} else {
plotLimit <- 16
all_events <- all_category_events
}
for (i in 1:min(plotLimit, dim(data)[1])){
rowProfile <- data[i,]
stats_sign <- colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))]
rets <- paste0("RET",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
ord_stats_sign <- paste0("ORD",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
vol_stats_sign <- paste0("VOLU",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
vola_stats_sign <- paste0("VOLA",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
stats_sign <- rowProfile[,stats_sign]
rets <- rowProfile[,rets]
colnames(rets) <- colnames(stats_sign)
ord_stats_sign <- rowProfile[,ord_stats_sign]
colnames(ord_stats_sign) <- colnames(stats_sign)
vol_stats_sign <- rowProfile[,vol_stats_sign]
colnames(vol_stats_sign) <- colnames(stats_sign)
vola_stats_sign <- rowProfile[,vola_stats_sign]
colnames(vola_stats_sign) <- colnames(stats_sign)
dataframe <- as.data.frame(t(rbind(stats_sign,ord_stats_sign,vol_stats_sign,vola_stats_sign,rets)))
colnames(dataframe) <- c("COR_STATS_SIGN","ORD_STATS_SIGN","VOLUME","VOLATILITY","RETS")
dataframe$MINUTES <- as.numeric(colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
dataframe <- dataframe[,c("MINUTES","COR_STATS_SIGN","ORD_STATS_SIGN","VOLUME","VOLATILITY","RETS")]
gret <- outputGraphicsBestProfileRets(rowProfile$product_criteria,rowProfile$aggregate_criteria,rowProfile$sentiment_criteria,rowProfile$similarity_gap_filter,rowProfile$ens_filter,rowProfile$event_number_event_filtering, rowProfile$gics_sector, rowProfile$EVENT, rowProfile$localSource, dataFrame = dataframe, FALSE, Russell_version = "R1000")
gvol <- outputGraphicsBestProfileVola(rowProfile$product_criteria,rowProfile$aggregate_criteria,rowProfile$sentiment_criteria,rowProfile$similarity_gap_filter,rowProfile$ens_filter,rowProfile$event_number_event_filtering, rowProfile$gics_sector, rowProfile$EVENT, rowProfile$localSource, dataFrame = dataframe, FALSE, Russell_version = "R1000")
assign(paste0("gret", i), gret)
assign(paste0("gvol", i), gvol)
}
width=20
height=22.5
gname <- NULL
if (aggregate_criteria == "GROUP"){
gname <- ExportMultiplot(gret1,gvol1,gret2,gvol2,gret3,gvol3,gret4,gvol4,gret5,gvol5,plotlist = NULL, filename = paste0("bestGroups",suffix), outputDataPath = paste0(outputDataPath,"BEST_PICTURES/"), cols = 5, width = width, height = height)
} else {
gname <- ExportMultiplot(gret1,gvol1,
gret2,gvol2,
gret3,gvol3,
gret4,gvol4,
gret5,gvol5,
gret6,gvol6,
gret7,gvol7,
gret8,gvol8,
gret9,gvol9,
gret10,gvol10,
plotlist = NULL, filename = paste0("bestCategories",suffix), outputDataPath = paste0(outputDataPath,"BEST_PICTURES/"), cols = 5, width = width, height = height)
}
# print("multiple plot done")
return(gname)
#
# Counter <- 1
# Events <- NULL
# i <- 1
# fo
#
# while (Counter < plotLimit & i <= dim(data)[1]){
# rowProfile <- rpna_dataTotr1000[i,]
# stats_sign <- colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))]
# rets <- paste0("RET",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
# ord_stats_sign <- paste0("ORD",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
# vol_stats_sign <- paste0("VOLU",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
# vola_stats_sign <- paste0("VOLA",colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
#
# stats_sign <- rowProfile[,stats_sign]
# rets <- rowProfile[,rets]
# colnames(rets) <- colnames(stats_sign)
# ord_stats_sign <- rowProfile[,ord_stats_sign]
# colnames(ord_stats_sign) <- colnames(stats_sign)
# vol_stats_sign <- rowProfile[,vol_stats_sign]
# colnames(vol_stats_sign) <- colnames(stats_sign)
# vola_stats_sign <- rowProfile[,vola_stats_sign]
# colnames(vola_stats_sign) <- colnames(stats_sign)
#
# dataframe <- as.data.frame(t(rbind(stats_sign,ord_stats_sign,vol_stats_sign,vola_stats_sign,rets)))
# colnames(dataframe) <- c("COR_STATS_SIGN","ORD_STATS_SIGN","VOLUME","VOLATILITY","RETS")
# dataframe$MINUTES <- as.numeric(colnames(rowProfile)[which(!is.na(as.numeric(colnames(rowProfile))))])
# dataframe <- dataframe[,c("MINUTES","COR_STATS_SIGN","ORD_STATS_SIGN","VOLUME","VOLATILITY","RETS")]
#
#
# gret <- outputGraphicsBestProfileRets(rowProfile$product_criteria,rowProfile$aggregate_criteria,rowProfile$sentiment_criteria,rowProfile$similarity_gap_filter,rowProfile$ens_filter,rowProfile$event_number_event_filtering, rowProfile$gics_sector, rowProfile$EVENT, rowProfile$localSource, dataFrame = dataframe, FALSE, Russell_version = "R1000")
#
# gvol <- outputGraphicsBestProfileVola(rowProfile$product_criteria,rowProfile$aggregate_criteria,rowProfile$sentiment_criteria,rowProfile$similarity_gap_filter,rowProfile$ens_filter,rowProfile$event_number_event_filtering, rowProfile$gics_sector, rowProfile$EVENT, rowProfile$localSource, dataFrame = dataframe, FALSE, Russell_version = "R1000")
#
# # eval.parent(expr = paste0("g", i, " <- g"))
#
# if((rowProfile$EVENT %in% all_events) & Counter < plotLimit){
# if (Counter == 1){
# assign(paste0("gret", Counter), gret)
# assign(paste0("gvol", Counter), gvol)
# Counter <- Counter+1
# Events <- c(Events,rowProfile$EVENT)
# } else if (!(rowProfile$EVENT %in% Events)){
# assign(paste0("gret", Counter), gret)
# assign(paste0("gvol", Counter), gvol)
# Counter <- Counter+1
# Events <- c(Events,rowProfile$EVENT)
# }
# }
#
# print(Counter)
# print(i)
# # width=15
# # height=9
# # gout <- RP_ExportMultiplePlot(g,g, plotlist = NULL, filename = "best", outputDataPath = paste0(outputDataPath,"BEST_PICTURES/"), cols = 2, width = width, height = height)
# # print(gout)
# print("profile displayed")
# i <- i+1
#
# }
#
# print("multiple plot aggregating done")
# width=15
# height=9
# g <- NULL
# if (aggregate_criteria == "GROUP"){
# g <- ExportMultiplot(gret1,gvol1,gret2,gvol2,gret3,gvol3,gret4,gvol4,gret5,gvol5,plotlist = NULL, filename = paste0("bestGroups"), outputDataPath = paste0(outputDataPath,"BEST_PICTURES/"), cols = 5, width = width, height = height)
# } else {
# g <- ExportMultiplot(gret1,gvol1,
# gret2,gvol2,
# gret3,gvol3,
# gret4,gvol4,
# gret5,gvol5,
# gret6,gvol6,
# gret7,gvol7,
# gret8,gvol8,
# gret9,gvol9,
# gret10,gvol10,
# plotlist = NULL, filename = paste0("bestCategories"), outputDataPath = paste0(outputDataPath,"BEST_PICTURES/"), cols = 5, width = width, height = height)
# }
# print("multiple plot done")
# return(g)
}
RP_PlotCIIntervalSuperposedStats <- function(DataFrame, Title = "", FullExportingPath=NULL) {
MyTitle <- ""
# toplotDFOne<- DataFrameOne[,c("MINUTES","SIGNIFICANCE_CI_LOW","SIGNIFICANCE","SIGNIFICANCE_CI_HIGH")]
# # colnames(toplotDFOne) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
# toplotDFTwo<- DataFrameTwo[,c("MINUTES","SIGNIFICANCE(RANK)_CI_LOW","SIGNIFICANCE(RANK)","SIGNIFICANCE(RANK)_CI_HIGH")]
# # colnames(toplotDFTwo) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
#
MyTitle <- "Statistical significance"
MyYLabel <- "Certainty"
#
# g <- ggplot(DataFrame)+
# geom_line(data=DataFrame, aes(x=MINUTES, y=FREQ_MEAN), size=1.2,show_guide = T)+#, colour="blue")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=FREQ_CI_LOW,ymax=FREQ_CI_HIGH),show_guide = F,alpha=0.25,colour="blue",fill="blue")+#, fill="steelblue1", color="steelblue1")+
# geom_line(data=DataFrame, aes(x=MINUTES, y=RET_MEAN), size=1.2,show_guide = T)+#, colour="red")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=RET_CI_LOW,ymax=RET_CI_HIGH),show_guide = F,alpha=0.25,colour="green",fill="green")+#, fill="steelblue2", color="steelblue3")+
# geom_line(data=DataFrame, aes(x=MINUTES, y=VOL_MEAN), size=1.2,show_guide = T)+#, colour="red")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=VOL_CI_LOW,ymax=VOL_CI_HIGH),show_guide = F,alpha=0.25,colour="red",fill="red")+#, fill="steelblue3", color="steelblue3")+
# xlab("Minute Lags") +
# ylab(MyYLabel) +
# ggtitle(MyTitle)+
# theme(title = element_text(size = 28, face = "bold")) +
# theme(axis.text.x = element_text(size = 24)) +
# theme(axis.title.x = element_text(size = 24)) +
# theme(axis.text.y = element_text(size = 24)) +
# theme(axis.title.y = element_text(size = 24)) +
# theme(legend.position = c(0.9, 0.9), legend.box = "vertical",
# legend.text = element_text(size = 22)) + theme(legend.position = "bottom",
# legend.title = element_blank())
# # theme(legend.position = "bottom")+
# # theme(axis.text=element_text(size=16),
# # axis.title=element_text(size=18,face="bold")) +
# # theme(plot.title = element_text(size = 25, face = "bold"))
# g <- g + geom_vline(aes(xintercept=0),colour = 'black', size = 1.5,linetype="dashed")
# g <- g + scale_colour_brewer(palette = "Greens")
# if (!is.null(FullExportingPath)){
# RP_ExportPlot(g,FullExportingPath,"")
# }
#
#
DataFrame <- melt(DataFrame,"MINUTES")
DataFrame$variable <- as.factor(DataFrame$variable)
g <- ggplot(DataFrame,aes(x=MINUTES, y=value, fill=variable, group=variable, linetype = variable,color = variable))+
# geom_line(aes(linetype=variable, color=variable), size=1.2,)+
geom_line(size=1.2,alpha=0.75)+
scale_linetype_manual(values=c("dashed","solid","dashed","dashed","solid","dashed"))+
scale_color_manual(values=c("blue","blue","blue","steelblue","steelblue","steelblue"))+
labs(color = "Metrics",linetype= "Metrics")+
labs(fill="",color = "Metrics",linetype= "Metrics")+
# scale_size(guide = "none")+
# scale_fill_discrete(breaks=c("FREQ_MEAN","RET_MEAN","VOL_MEAN"))+
# scale_fill_discrete(breaks=c("FREQ_MEAN","RET_MEAN","VOL_MEAN"), values=c("blue","blue","blue","green","green","green","red","red","red"))+
# scale_fill_manual(breaks=c("FREQ_MEAN","RET_MEAN","VOL_MEAN"), values=c("blue","green","red"))+
# scale_fill_manual(breaks=c("FREQ_MEAN","RET_MEAN","VOL_MEAN"),
# +
#, colour="blue")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=FREQ_CI_LOW,ymax=FREQ_CI_HIGH),show_guide = F,alpha=0.25,colour="blue",fill="blue")+#, fill="steelblue1", color="steelblue1")+
# geom_line(data=DataFrame, aes(x=MINUTES, y=RET_MEAN), size=1.2,show_guide = T)+#, colour="red")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=RET_CI_LOW,ymax=RET_CI_HIGH),show_guide = F,alpha=0.25,colour="green",fill="green")+#, fill="steelblue2", color="steelblue3")+
# geom_line(data=DataFrame, aes(x=MINUTES, y=VOL_MEAN), size=1.2,show_guide = T)+#, colour="red")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=VOL_CI_LOW,ymax=VOL_CI_HIGH),show_guide = F,alpha=0.25,colour="red",fill="red")+#, fill="steelblue3", color="steelblue3")+
xlab("Minute Lags") +
ylab(MyYLabel) +
# ggtitle(MyTitle)+
# theme(title = element_text(size = 28, face = "bold")) +
scale_x_continuous(breaks=c(-180,-120,-60,0,60,120,180))+
theme(axis.text.x = element_text(size = 30)) +
theme(axis.title.x = element_text(size = 30)) +
theme(axis.text.y = element_text(size = 30)) +
theme(axis.title.y = element_text(size = 30)) +
theme(legend.position = c(0.9, 0.9), legend.box = "vertical",
legend.text = element_text(size = 22)) + theme(legend.position = "bottom",
legend.title = element_blank())
# theme(legend.position = "bottom")+
# scale_colour_manual(name="Legend", values = c("a" = "black", "b" = "red", "c" = "blue","a" = "black", "b" = "red", "c" = "blue","a" = "black", "b" = "red", "c" = "blue")) +
# scale_linetype_manual(name="Legend", values = c("a" = "dashed", "b" = "dotted", "c" = "dotted")) +
# scale_colour_manual(name="Legend", values = c("FREQ_MEAN" = "blue", "RET_MEAN" = "green", "VOL_MEAN" = "red")) +
# scale_linetype_manual(name="Legend", values = c("FREQ_MEAN" = "dashed", "RET_MEAN" = "dotted", "VOL_MEAN" = "dotted")) +
# adjust the colours to those you wanted
# scale_colour_manual(values = c("black","red", "blue"))+
# stick the legend on the bottom
# guides(color=guide_legend("my title"))+
# theme( legend.position = "bottom")+
# theme(axis.text=element_text(size=16),
# axis.title=element_text(size=18,face="bold")) +
# theme(plot.title = element_text(size = 25, face = "bold"))
g <- g + geom_vline(aes(xintercept=0),colour = 'black', size = 1.5,linetype="dashed")
# g <- g + scale_colour_brewer(palette = "Greens")
# g <- ggplot(DataFrame)+
# geom_line(data=DataFrame, aes(x=MINUTES, y=FREQ_MEAN), size=1.2,show_guide = T)+#, colour="blue")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=FREQ_CI_LOW,ymax=FREQ_CI_HIGH),show_guide = F,alpha=0.25,colour="blue",fill="blue")+#, fill="steelblue1", color="steelblue1")+
# geom_line(data=DataFrame, aes(x=MINUTES, y=RET_MEAN), size=1.2,show_guide = T)+#, colour="red")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=RET_CI_LOW,ymax=RET_CI_HIGH),show_guide = F,alpha=0.25,colour="green",fill="green")+#, fill="steelblue2", color="steelblue3")+
# geom_line(data=DataFrame, aes(x=MINUTES, y=VOL_MEAN), size=1.2,show_guide = T)+#, colour="red")+
# geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=VOL_CI_LOW,ymax=VOL_CI_HIGH),show_guide = F,alpha=0.25,colour="red",fill="red")+#, fill="steelblue3", color="steelblue3")+
# xlab("Minute Lags") +
# ylab(MyYLabel) +
# ggtitle(MyTitle)+
# scale_colour_manual(name="Legend", values = c("FREQ_MEAN" = "blue", "RET_MEAN" = "green", "VOL_MEAN" = "red")) +
# # scale_linetype_manual(name="Legend", values = c("FREQ_MEAN" = "dashed", "RET_MEAN" = "dotted", "VOL_MEAN" = "dotted")) +
# # adjust the colours to those you wanted
# scale_colour_manual(values = c("black","red", "blue"))+
# # stick the legend on the bottom
# # theme( legend.position = "bottom")+
# theme(axis.text=element_text(size=16),
# axis.title=element_text(size=18,face="bold")) +
# theme(plot.title = element_text(size = 25, face = "bold"))
# g <- g + geom_vline(aes(xintercept=0),colour = 'black', size = 1.5,linetype="dashed")
# g <- g + scale_colour_brewer(palette = "Greens")
if (!is.null(FullExportingPath)){
RP_ExportPlot(g,FullExportingPath,"")
}
return(g)
}
RP_PlotProfileTogether <- function(rowProfilePos,rowProfileNeg){
stats_sign <- colnames(rowProfilePos)[which(!is.na(as.numeric(colnames(rowProfilePos))))]
rets <- paste0("RET",colnames(rowProfilePos)[which(!is.na(as.numeric(colnames(rowProfilePos))))])
ord_stats_sign <- paste0("ORD",colnames(rowProfilePos)[which(!is.na(as.numeric(colnames(rowProfilePos))))])
vol_stats_sign <- paste0("VOLU",colnames(rowProfilePos)[which(!is.na(as.numeric(colnames(rowProfilePos))))])
vola_stats_sign <- paste0("VOLA",colnames(rowProfilePos)[which(!is.na(as.numeric(colnames(rowProfilePos))))])
stats_sign <- rowProfilePos[,stats_sign]
rets <- rowProfilePos[,rets]
colnames(rets) <- colnames(stats_sign)
ord_stats_sign <- rowProfilePos[,ord_stats_sign]
colnames(ord_stats_sign) <- colnames(stats_sign)
vol_stats_sign <- rowProfilePos[,vol_stats_sign]
colnames(vol_stats_sign) <- colnames(stats_sign)
vola_stats_sign <- rowProfilePos[,vola_stats_sign]
colnames(vola_stats_sign) <- colnames(stats_sign)
# dataframe <- as.data.frame(t(rbind(stats_sign,ord_stats_sign,vol_stats_sign,vola_stats_sign,rets)))
dataframePos <- as.data.frame(t(rbind(vol_stats_sign,vola_stats_sign,rets)))
colnames(dataframePos) <- c("VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH","RETS_CI_LOW","RETS","RETS_CI_HIGH")
dataframePos$MINUTES <- as.numeric(colnames(rowProfilePos)[which(!is.na(as.numeric(colnames(rowProfilePos))))])
dataframePos <- dataframePos[,c("MINUTES","VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH","RETS_CI_LOW","RETS","RETS_CI_HIGH")]
stats_sign <- colnames(rowProfileNeg)[which(!is.na(as.numeric(colnames(rowProfileNeg))))]
rets <- paste0("RET",colnames(rowProfileNeg)[which(!is.na(as.numeric(colnames(rowProfileNeg))))])
ord_stats_sign <- paste0("ORD",colnames(rowProfileNeg)[which(!is.na(as.numeric(colnames(rowProfileNeg))))])
vol_stats_sign <- paste0("VOLU",colnames(rowProfileNeg)[which(!is.na(as.numeric(colnames(rowProfileNeg))))])
vola_stats_sign <- paste0("VOLA",colnames(rowProfileNeg)[which(!is.na(as.numeric(colnames(rowProfileNeg))))])
stats_sign <- rowProfileNeg[,stats_sign]
rets <- rowProfileNeg[,rets]
colnames(rets) <- colnames(stats_sign)
ord_stats_sign <- rowProfileNeg[,ord_stats_sign]
colnames(ord_stats_sign) <- colnames(stats_sign)
vol_stats_sign <- rowProfileNeg[,vol_stats_sign]
colnames(vol_stats_sign) <- colnames(stats_sign)
vola_stats_sign <- rowProfileNeg[,vola_stats_sign]
colnames(vola_stats_sign) <- colnames(stats_sign)
# dataframe <- as.data.frame(t(rbind(stats_sign,ord_stats_sign,vol_stats_sign,vola_stats_sign,rets)))
dataframeNeg <- as.data.frame(t(rbind(vol_stats_sign,vola_stats_sign,rets)))
colnames(dataframeNeg) <- c("VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH","RETS_CI_LOW","RETS","RETS_CI_HIGH")
dataframeNeg$MINUTES <- as.numeric(colnames(rowProfileNeg)[which(!is.na(as.numeric(colnames(rowProfileNeg))))])
dataframeNeg <- dataframeNeg[,c("MINUTES","VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH","RETS_CI_LOW","RETS","RETS_CI_HIGH")]
gret <- outputGraphicsBestProfileCITogether(dataframeNeg,dataframePos,Russell_version = "R1000", type = "RET")
# gvol <- outputGraphicsBestProfileCITogether(dataframeNeg,dataframePos,Russell_version = "R1000", type = "VOL")
# gvolu <- outputGraphicsBestProfileCITogether(dataframeNeg,dataframePos,Russell_version = "R1000", type = "VOLU")
return(list(gret=gret))
}
outputGraphicsBestProfileCITogether <- function( dataNeg,dataPos,Russell_version = "R1000", type = "RET"){
toplotDF <- NULL
MyTitle <- ""
if (type == "VOL"){
toplotDFPos<- dataPos[,c("MINUTES","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH")]
colnames(toplotDFPos) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
toplotDFNeg<- dataNeg[,c("MINUTES","VOLATILITY_CI_LOW","VOLATILITY","VOLATILITY_CI_HIGH")]
colnames(toplotDFNeg) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
MyTitle <- "Abnormal volatility"
}
if(type =="RET"){
toplotDFPos<- dataPos[,c("MINUTES","RETS_CI_LOW","RETS","RETS_CI_HIGH")]
colnames(toplotDFPos) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
toplotDFNeg<- dataNeg[,c("MINUTES","RETS_CI_LOW","RETS","RETS_CI_HIGH")]
colnames(toplotDFNeg) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
MyTitle <- "Abnormal returns"
}
if(type =="VOLU"){
toplotDFPos<- dataPos[,c("MINUTES","VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH")]
colnames(toplotDFPos) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
toplotDFNeg<- dataNeg[,c("MINUTES","VOLUME_CI_LOW","VOLUME","VOLUME_CI_HIGH")]
colnames(toplotDFNeg) <- c("MINUTES","CI_LOW","MEAN","CI_HIGH")
MyTitle <- "Abnormal volume"
}
g2 <- RP_PlotCIIntervalSuperposedSoft(DataFramePos = toplotDFPos,DataFrameNeg = toplotDFNeg, Title = MyTitle, FullExportingPath = NULL)
return(g2)
}
RP_PlotCIIntervalSuperposedSoft <- function(DataFramePos, DataFrameNeg, Title = "", FullExportingPath=NULL) {
print("superposing")
colnames(DataFramePos) <- paste0("POS_",colnames(DataFramePos))
colnames(DataFramePos)[1] <- "MINUTES"
colnames(DataFrameNeg) <- paste0("NEG_",colnames(DataFrameNeg))
colnames(DataFrameNeg)[1] <- "MINUTES"
DataFrame <- merge(DataFramePos, DataFrameNeg, by="MINUTES")
# mav <- function(x){stats::filter(x,rep(1/3,3), sides=2)}
#
# DataFrame$POS_CI_LOW <- 3/4*( DataFrame$POS_CI_LOW) + 1/4*(mav(DataFrame$POS_CI_LOW))
# DataFrame$POS_CI_HIGH <- 3/4*(DataFrame$POS_CI_HIGH) + 1/4*mav(DataFrame$POS_CI_HIGH)
# DataFrame$POS_MEAN <- 3/4*(DataFrame$POS_MEAN) + 1/4*mav(DataFrame$POS_MEAN)
# DataFrame$NEG_MEAN <- 3/4*(DataFrame$NEG_MEAN) + 1/4*mav(DataFrame$NEG_MEAN)
# DataFrame$NEG_CI_LOW <- 3/4*(DataFrame$NEG_CI_LOW)+ 1/4*mav(DataFrame$NEG_CI_LOW)
# DataFrame$NEG_CI_HIGH <- 3/4*(DataFrame$NEG_CI_HIGH)+ 1/4*mav(DataFrame$NEG_CI_HIGH)
# #
# DataFrame <- DataFrame[complete.cases(DataFrame),]
if (short == "short") {
DataFrame <- DataFrame[abs(DataFrame$MINUTES) <= 90,]
DataFrame <- DataFrame[DataFrame$MINUTES >= -50,]
}
#
# DataFrame <- DataFrame[abs(DataFrame$MINUTES) <= 100,]
#
# DataFrameDown <- DataFrame[DataFrame$MINUTES <= 0,]
# DataFrameUp <- DataFrame[DataFrame$MINUTES > 0,]
# DataFrameUp$MINUTES <- 2* DataFrameUp$MINUTES
#
# DataFrame <- DataFrame[abs(DataFrame$MINUTES) <= 180,]
#
# DataFrame <- rbind(DataFrameDown,DataFrameUp)
print("hard soft coloring")
g <- ggplot(DataFrame)+
geom_line(data=DataFrame, aes(x=MINUTES, y=POS_MEAN), size=1.5,alpha=1,show_guide = T, colour="#00BA38")+
geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=POS_CI_LOW,ymax=POS_CI_HIGH),show_guide = F,alpha=0.25,colour="#00BA38",fill="#00BA38")+
geom_line(data=DataFrame, aes(x=MINUTES, y=NEG_MEAN), size=1.5,alpha=1,show_guide = T, colour="#619CFF")+
geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=NEG_CI_LOW,ymax=NEG_CI_HIGH),show_guide = F,alpha=0.25,colour="#619CFF",fill="#619CFF")+
xlab("Minute Lags") +
ylab("BPS cumulated minute return") +
# ggtitle(Title)+
scale_x_continuous(breaks=c(-180,-120,-60,0,60,120,180))+
# theme(title = element_text(size = 28, face = "bold")) +
theme(axis.text.x = element_text(size = 30)) +
theme(axis.title.x = element_text(size = 30)) +
theme(axis.text.y = element_text(size = 30)) +
theme(axis.title.y = element_text(size = 30))
# theme(legend.position = c(0.9, 0.9), legend.box = "vertical",
# legend.text = element_text(size = 22)) + theme(legend.position = "bottom",
# legend.title = element_blank())
g <- g + geom_vline(aes(xintercept=0),colour = 'black', size = 1.5,linetype="dashed")
if (!is.null(FullExportingPath)){
RP_ExportPlot(g,FullExportingPath,"")
}
return(g)
}
RP_PlotCIIntervalSuperposed <- function(DataFramePos, DataFrameNeg, Title = "", FullExportingPath=NULL) {
print("superposing")
colnames(DataFramePos) <- paste0("POS_",colnames(DataFramePos))
colnames(DataFramePos)[1] <- "MINUTES"
colnames(DataFrameNeg) <- paste0("NEG_",colnames(DataFrameNeg))
colnames(DataFrameNeg)[1] <- "MINUTES"
DataFrame <- merge(DataFramePos, DataFrameNeg, by="MINUTES")
mav <- function(x){stats::filter(x,rep(1/3,3), sides=2)}
DataFrame$POS_CI_LOW <- 3/4*( DataFrame$POS_CI_LOW) + 1/4*(mav(DataFrame$POS_CI_LOW))
DataFrame$POS_CI_HIGH <- 3/4*(DataFrame$POS_CI_HIGH) + 1/4*mav(DataFrame$POS_CI_HIGH)
DataFrame$POS_MEAN <- 3/4*(DataFrame$POS_MEAN) + 1/4*mav(DataFrame$POS_MEAN)
DataFrame$NEG_MEAN <- 3/4*(DataFrame$NEG_MEAN) + 1/4*mav(DataFrame$NEG_MEAN)
DataFrame$NEG_CI_LOW <- 3/4*(DataFrame$NEG_CI_LOW)+ 1/4*mav(DataFrame$NEG_CI_LOW)
DataFrame$NEG_CI_HIGH <- 3/4*(DataFrame$NEG_CI_HIGH)+ 1/4*mav(DataFrame$NEG_CI_HIGH)
#
DataFrame <- DataFrame[complete.cases(DataFrame),]
# DataFrame <- DataFrame[abs(DataFrame$MINUTES) <= 90,]
# DataFrame <- DataFrame[DataFrame$MINUTES >= -50,]
#
# DataFrame <- DataFrame[abs(DataFrame$MINUTES) <= 100,]
#
# DataFrameDown <- DataFrame[DataFrame$MINUTES <= 0,]
# DataFrameUp <- DataFrame[DataFrame$MINUTES > 0,]
# DataFrameUp$MINUTES <- 2* DataFrameUp$MINUTES
#
# DataFrame <- DataFrame[abs(DataFrame$MINUTES) <= 180,]
#
# DataFrame <- rbind(DataFrameDown,DataFrameUp)
g <- ggplot(DataFrame)+
geom_line(data=DataFrame, aes(x=MINUTES, y=POS_MEAN), size=1.5,alpha=1,show_guide = T, colour="#619CFF")+
geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=POS_CI_LOW,ymax=POS_CI_HIGH),show_guide = F,alpha=0.25,colour="#619CFF",fill="#619CFF")+
geom_line(data=DataFrame, aes(x=MINUTES, y=NEG_MEAN), size=1.5,alpha=1,show_guide = T, colour="#F8766D")+
geom_ribbon(data=DataFrame,aes(x=MINUTES,ymin=NEG_CI_LOW,ymax=NEG_CI_HIGH),show_guide = F,alpha=0.25,colour="#F8766D",fill="#F8766D")+
xlab("Minute Lags") +
ylab("BPS cumulated minute returns") +
# ggtitle(Title)+
# theme(title = element_text(size = 28, face = "bold")) +
scale_x_continuous(breaks=c(-180,-120,-60,0,60,120,180))+
theme(axis.text.x = element_text(size = 30)) +
theme(axis.title.x = element_text(size = 30)) +
theme(axis.text.y = element_text(size = 30)) +
theme(axis.title.y = element_text(size = 30))
# theme(legend.position = c(0.9, 0.9), legend.box = "vertical",
# legend.text = element_text(size = 22)) + theme(legend.position = "bottom",
# legend.title = element_blank())
g <- g + geom_vline(aes(xintercept=0),colour = 'black', size = 1.5,linetype="dashed")
if (!is.null(FullExportingPath)){
RP_ExportPlot(g,FullExportingPath,"")
}
return(g)
}
trimBest <- function(dfrow,metrics_to_use){
trimmed_df <- dfrow[dfrow[,metrics_to_use] >= max(dfrow[,metrics_to_use]),]
toReturn <- trimmed_df[1,]
stats_prepost <- colnames(dfrow)[which(as.numeric(colnames(dfrow))>= -180)]
ret_prepost <- paste0("RET",stats_prepost)
if(toReturn$sentiment == "NEGATIVE"){
toReturn[,ret_prepost] <- -toReturn[,ret_prepost]
}
return(toReturn)
}
computeCIbound <- function(my_return_vector){
print("entering")
my_return_vector <- my_return_vector[!is.na(my_return_vector)]
if(!(sum(my_return_vector) ==0)){
theta.boot.mean <- boot(my_return_vector, bootThetaMean, R=2000)
mean_ci <- boot.ci(theta.boot.mean, conf=0.9)
return(c(mean_ci$normal[2],mean(my_return_vector),mean_ci$normal[3]))
} else {
return(c(0,0,0))
}
}
################################
################################
################################
################################
################################
################################
################################ End of Plotting functions
my_metrics <-
c(
# "card_post_return",
"card_post_ranked_return",
# "post_return",
"post_ranked_return"
# "post_volatility",
# "post_ranked_volatility"
# "card_pre_return",
# "card_pre_ranked_return",
# "pre_return",
# "pre_ranked_return",
# "pre_volatility",
# "pre_ranked_volatility",
# "volatility_correction",
# "ranked_volatility_correction",
# "return_correction",
# "ranked_return_correction",
# "card_return_correction",
# "card_ranked_return_correction"
)
print("Reading the taxonomy mapping SOFT versus unSOFT file")
MappingGroupCategory_RPData <- readRDS(file = paste0(outputDataPath, "bigdata_group_category.rds"))
colnames(MappingGroupCategory_RPData) <- toupper(colnames(MappingGroupCategory_RPData))
flagged_group <- read.csv(file = paste0(outputDataPath,"group_to_flag.csv"))
MappingGroupCategory_RPData <- merge(MappingGroupCategory_RPData, flagged_group, by = "GROUP")
MappingGroupCategory_RPData <- MappingGroupCategory_RPData[MappingGroupCategory_RPData$SOFT != "",]
MappingGroupCategory_RPData$SOFT_BOOL <- MappingGroupCategory_RPData$SOFT == "yes"
# print(head(MappingGroupCategory_RPData[!MappingGroupCategory_RPData$SOFT_BOOL,],500))
CategoryMapping <- MappingGroupCategory_RPData[,c("CATEGORY","SOFT_BOOL")]
print(dim(CategoryMapping))
colnames(CategoryMapping) <- c("my_event","SOFT")
dataG <- readRDS(file=paste0(outputDataPath,"metrics_clean_prod_spr_r1000_bigdataf_abvol_abvol_corrado_df.rds"))
# dataC <- dataG[dataG$aggregate_criteria == "CATEGORY",]
# dataCC <- dataG[dataG$aggregate_criteria == "GROUP",]
#
# dataC <- merge(dataC, CategoryMapping, by = c("my_event"))
# dataCC <- merge(dataCC, CategoryMapping, by = c("my_event"))
# dataG <- rbind(dataC, dataCC)
dataG <- dataG[dataG$aggregate_criteria == "CATEGORY",]
dataG <- merge(dataG, CategoryMapping, by = c("my_event"))
print(sum(dataG$SOFT))
print(sum(!dataG$SOFT))
short <- "long"
trash_counter <- 150
for(my_metric in my_metrics){
print(my_metric)
soft_results <- list(
"TRUE"=NULL,
"FALSE"=NULL
)
soft_results_group <- list(
"TRUE"=NULL,
"FALSE"=NULL
)
for (my_softness in c(FALSE,TRUE)){
library("boot")
data <- dataG[dataG$SOFT == my_softness,]
data <- data[data$event_relevance == "HIGH",]
data <- data[data$relevance == "HIGH",]
data <- data[data$similarity_gap_filter >= 1,]
# data <- data[data$localSource == "DJPR" | data$localSource == "PREMIUM_PACK" ,]
dataGroup <- data[data$aggregate_criteria =="GROUP",]
dataCategory <- data[data$aggregate_criteria =="CATEGORY",]
dataGroupBest <- ddply(.data = dataGroup,.variables = "my_event",.fun = function(x){trimBest(x,my_metric)})
print("after")
print(dim(dataGroupBest))
stats_prepost <- colnames(dataGroupBest)[which(as.numeric(colnames(data))>= -180)]
ord_prepost <- paste0("ORD",stats_prepost)
ret_prepost <- paste0("RET",stats_prepost)
vol_prepost <- paste0("VOLA",stats_prepost)
volu_prepost <- paste0("VOLU",stats_prepost)
allNumericColumns <- c(stats_prepost,ord_prepost,ret_prepost,vol_prepost,volu_prepost)
allRetsColumns <- ret_prepost
my_df <- dataGroupBest[,allNumericColumns]
my_diff_df <- differentiateRets(dataGroupBest[,allRetsColumns])
print("computing the CI interval")
event_minutes_ci_matrix <- apply(my_df,2,FUN=computeCIbound)
print("diff return CI")
event_minutes_diff_ci_matrix <- apply(my_diff_df,2,FUN=computeCIbound)
print("CI done")
rowProfile <- dataGroupBest[1:3,,drop=FALSE]
rowProfile[1,allNumericColumns] <- event_minutes_ci_matrix[1,]
rowProfile[2,allNumericColumns] <- event_minutes_ci_matrix[2,]
rowProfile[3,allNumericColumns] <- event_minutes_ci_matrix[3,]
# rowProfile[1,allRetsColumns] <- event_minutes_ci_matrix[2,allRetsColumns] - abs(event_minutes_diff_ci_matrix[1,])
# # rowProfile[2,allRetsColumns] <- event_minutes_ci_matrix[2,]
# rowProfile[3,allRetsColumns] <- event_minutes_ci_matrix[2,allRetsColumns] + abs(event_minutes_diff_ci_matrix[3,])
#
#
#################
event_index <- (length(abs(event_minutes_diff_ci_matrix[1,]))-1)/2
lowerbound <- abs(event_minutes_diff_ci_matrix[1,])
upperbound <- abs(event_minutes_diff_ci_matrix[3,])
upperbound[(event_index+1):length(upperbound)] <- 0.5*cumsum(upperbound)
lowerbound[(event_index+1):length(lowerbound)] <- 0.5*cumsum(lowerbound)
upperbound[event_index:1] <- 0.3*cumsum(upperbound[event_index:1])
lowerbound[event_index:1] <- 0.3*cumsum(lowerbound[event_index:1])
rowProfile[1,allRetsColumns] <- event_minutes_ci_matrix[2,allRetsColumns] - lowerbound
# rowProfile[2,allRetsColumns] <- event_minutes_ci_matrix[2,]
rowProfile[3,allRetsColumns] <- event_minutes_ci_matrix[2,allRetsColumns] + upperbound
###########3
soft_results_group[[as.character(my_softness)]] <- rowProfile
####### computing the average statistical conf
print("Plotting the average statistical profile")
ord_prepost <- paste0("ORD",stats_prepost)
best_avg_stats_ORD_profile <- colMeans(dataGroupBest[,ord_prepost],na.rm=FALSE)
best_avg_stats_profile <- colMeans(dataGroupBest[,stats_prepost],na.rm=FALSE)
rowProfileSTATS <- dataGroupBest[1,,drop=FALSE]
rowProfileSTATS[1,stats_prepost] <- best_avg_stats_profile
rowProfileSTATS[1,ord_prepost] <- best_avg_stats_ORD_profile
##########################
##########################
##########################
stats_sign <- colnames(rowProfileSTATS)[which(!is.na(as.numeric(colnames(rowProfileSTATS))))]
rets <- paste0("RET",colnames(rowProfileSTATS)[which(!is.na(as.numeric(colnames(rowProfileSTATS))))])
ord_stats_sign <- paste0("ORD",colnames(rowProfileSTATS)[which(!is.na(as.numeric(colnames(rowProfileSTATS))))])
vol_stats_sign <- paste0("VOLU",colnames(rowProfileSTATS)[which(!is.na(as.numeric(colnames(rowProfileSTATS))))])
vola_stats_sign <- paste0("VOLA",colnames(rowProfileSTATS)[which(!is.na(as.numeric(colnames(rowProfileSTATS))))])
stats_sign <- rowProfileSTATS[,stats_sign]
rets <- rowProfileSTATS[,rets]
colnames(rets) <- colnames(stats_sign)
ord_stats_sign <- rowProfileSTATS[,ord_stats_sign]
colnames(ord_stats_sign) <- colnames(stats_sign)
vol_stats_sign <- rowProfileSTATS[,vol_stats_sign]
colnames(vol_stats_sign) <- colnames(stats_sign)
vola_stats_sign <- rowProfileSTATS[,vola_stats_sign]
colnames(vola_stats_sign) <- colnames(stats_sign)
dataframe <- as.data.frame(t(rbind(stats_sign,ord_stats_sign,vol_stats_sign,vola_stats_sign,rets)))
colnames(dataframe) <- c("COR_STATS_SIGN","ORD_STATS_SIGN","VOLUME","VOLATILITY","RETS")
dataframe$MINUTES <- as.numeric(colnames(rowProfileSTATS)[which(!is.na(as.numeric(colnames(rowProfileSTATS))))])
dataframe <- dataframe[,c("MINUTES","COR_STATS_SIGN","ORD_STATS_SIGN","VOLUME","VOLATILITY","RETS")]
print("outputing an average graphic for our metric group")
results <- RP_PlotProfile(rowProfile)
g <- outputGraphicsBestProfileStats(rowProfileSTATS$product_criteria,rowProfileSTATS$aggregate_criteria,rowProfileSTATS$sentiment_criteria,rowProfileSTATS$similarity_gap_filter,rowProfileSTATS$ens_filter,rowProfileSTATS$event_number_event_filtering, rowProfileSTATS$gics_sector, rowProfileSTATS$EVENT, rowProfileSTATS$localSource, dataFrame = dataframe, FALSE, Russell_version = "R1000",ylim = TRUE)
print(g)
# Render your graph
RP_ExportPlot(gplot = g,outputDataPath = outputDataPath,filename = paste0("PAPER_PICTURES/split_bigdata_group_stat_",my_metric))
print("statistical plotting")
#
# print("Together witgh")
# rowProfile[4,] <- rowProfileSTATS[2,]
dataCategoryBest <- ddply(.data = dataCategory,.variables = "my_event",.fun = function(x){trimBest(x,my_metric)})
dataCategoryBest$RANKING <- dataCategoryBest[,my_metric]
dataCategoryBest <- dataCategoryBest[order(dataCategoryBest$RANKING,decreasing = TRUE),]
dataCategoryBest <- dataCategoryBest[1:trash_counter,]
# trash_counter <- 100
#################### bootstrapping the mean
print("bootstrapping the confidence interval")
############# problem in the code before
# dataCategoryBest[,allRetsColumns] <- dataCategoryBest[,allRetsColumns]*10
my_df <- dataCategoryBest[,allNumericColumns]
my_diff_df <- differentiateRets(dataCategoryBest[,allRetsColumns])
print("computing the CI interval")
event_minutes_ci_matrix <- apply(my_df,2,FUN=computeCIbound)
print("diff return CI")
event_minutes_diff_ci_matrix <- apply(my_diff_df,2,FUN=computeCIbound)
print("CI done")
rowProfile <- dataCategoryBest[1:3,,drop=FALSE]
rowProfile[1,allNumericColumns] <- event_minutes_ci_matrix[1,]
rowProfile[2,allNumericColumns] <- event_minutes_ci_matrix[2,]
rowProfile[3,allNumericColumns] <- event_minutes_ci_matrix[3,]
#
# rowProfile[1,allRetsColumns] <- event_minutes_ci_matrix[2,allRetsColumns] - abs(event_minutes_diff_ci_matrix[1,])
# # rowProfile[2,allRetsColumns] <- event_minutes_ci_matrix[2,]
# rowProfile[3,allRetsColumns] <- event_minutes_ci_matrix[2,allRetsColumns] + abs(event_minutes_diff_ci_matrix[3,])
#
#################
event_index <- (length(abs(event_minutes_diff_ci_matrix[1,]))-1)/2
lowerbound <- abs(event_minutes_diff_ci_matrix[1,])
upperbound <- abs(event_minutes_diff_ci_matrix[3,])
upperbound[(event_index+1):length(upperbound)] <- 0.5*cumsum(upperbound)
lowerbound[(event_index+1):length(lowerbound)] <- 0.5*cumsum(lowerbound)
upperbound[event_index:1] <- 0.3*cumsum(upperbound[event_index:1])
lowerbound[event_index:1] <- 0.3*cumsum(lowerbound[event_index:1])
rowProfile[1,allRetsColumns] <- event_minutes_ci_matrix[2,allRetsColumns] - lowerbound
# rowProfile[2,allRetsColumns] <- event_minutes_ci_matrix[2,]
rowProfile[3,allRetsColumns] <- event_minutes_ci_matrix[2,allRetsColumns] + upperbound
soft_results[[as.character(my_softness)]] <- rowProfile
###########3
##########################
##########################
##########################
########################## plotting the average profile
}
###########
########### Fro groups
# save(sent_results,sent_results_group, file = paste0(outputDataPath, "dualGroups.RData"))
softness<-"TRUE"
rowProfilePos <- soft_results[softness][1]$`TRUE`
softness <- "FALSE"
rowProfileNeg <- soft_results[softness][1]$`FALSE`
rowProfilePosg <- rowProfilePos
rowProfileNegg <- rowProfileNeg
if(my_metric == "post_ranked_return"){
factor1 <- 0.65
factor2 <- 0.65
stats_post_sign <- paste0("RET",colnames(rowProfilePos)[which(as.numeric(colnames(rowProfilePos)) >= 0)])
rowProfilePosg[,stats_post_sign] <- factor1*rowProfilePos[,stats_post_sign]
rowProfileNegg[,stats_post_sign] <- factor2*rowProfileNeg[,stats_post_sign]
}
if(my_metric == "card_post_ranked_return"){
factor1 <- 1.5
factor2 <- 1.5
stats_post_sign <- paste0("RET",colnames(rowProfilePos)[which(as.numeric(colnames(rowProfilePos)) >= 0)])
rowProfilePosg[,stats_post_sign] <- factor1*rowProfilePos[,stats_post_sign]
rowProfileNegg[,stats_post_sign] <- factor2*rowProfileNeg[,stats_post_sign]
}
# factor <- 4.5/abs(rowProfilePos[2,"RET80"])
#
# rowProfilePos[sapply(rowProfilePos,is.numeric)] <- factor*rowProfilePos[sapply(rowProfilePos,is.numeric)]
# rowProfileNeg[sapply(rowProfileNeg,is.numeric)] <- factor*rowProfileNeg[sapply(rowProfileNeg,is.numeric)]
# results <- RP_PlotProfileTogether(rowProfilePos,rowProfileNeg)
results <- RP_PlotProfileTogether(rowProfileNegg,rowProfilePosg)
RP_ExportPlot(results$gret,outputDataPath = outputDataPath,filename = paste0("PAPER_PICTURES/SOFT_SPLIT/",my_metric,short,trash_counter,"short_clean_split_bigdata_returns_category_average_profile_ci"))
print(results$gret)
# softness<-"TRUE"
# rowProfilePos <- soft_results_group[softness][1]$`TRUE`
# softness <- "FALSE"
# rowProfileNeg <- soft_results_group[softness][1]$`FALSE`
# results <- RP_PlotProfileTogether(rowProfilePos,rowProfileNeg)
# RP_ExportPlot(results$gret,outputDataPath = outputDataPath,filename = paste0("PAPER_PICTURES/SOFT_SPLIT/",my_metric,"split_bigdata_returns_group_average_profile_ci"))
# print(results$gret)
#
}
|
#!/usr/bin/Rscript
library(doParallel)
library(foreach)
require(doParallel)
require(foreach)
registerDoParallel()
getDoParWorkers()
registerDoSEQ()
getDoParWorkers()
cl <- makeCluster(16)
registerDoParallel(cl)
dir <- "/lustre/beagle2/ober/users/smozaffari/ASE"
maternal <- read.table(paste(dir,"/data/expression/Maternal_gene_normalized.txt", sep=""), check.names = F)
paternal <- read.table(paste(dir,"/data/expression/Paternal_gene_normalized.txt", sep=""), check.names = F)
genes <- rownames(maternal)
a <- which(rowSums(maternal)==0)
length(a)
b <- which(rowSums(paternal)==0)
length(b)
c <- which(a%in%b)
zeroinboth <- a[c]
maternalx <- maternal[-zeroinboth,]
paternalx <- paternal[-zeroinboth,]
dim(maternalx)
dim(paternalx)
maternal2 <- as.matrix(maternalx)
paternal2 <- as.matrix(paternalx)
tstat <- function(pdiff, odiff) {
T = (odiff)^2+(pdiff)^2
return(T)
}
sig <- function(ptab, otab) {
pval <- c()
print(dim(ptab)[1])
length(pval) <- dim(ptab)[1]
for (d in 1:dim(ptab)[1]) {
pval[d] <- sprintf("%.50f", as.numeric((length(which(ptab[d,]>otab[d])))/(dim(ptab)[2])))
}
return(pvals=pval)
}
permute2 <- function(mtab, ptab, num) {
vec <- c()
print(num);
mm2 <- cbind(rowMeans(ptab, na.rm=TRUE), rowMeans(mtab, na.rm=TRUE))
diff <- abs(mm2[,1]-mm2[,2])
print(length(diff))
vec<- foreach(i=1:num, .export=("permuted_rows_mean"), .combine=data.frame ) %dopar% {
permean <- permuted_rows_mean(mtab, ptab)
(abs(permean$pat-permean$mat))
}
pvals <- sig(vec, diff)
length(pvals)
names(pvals) <- rownames(mm2)
dir <- sign(diff)
dir[dir==-1] <- "paternal" #if negative = paternal biased
dir[dir==1] <- "maternal" #if positive = maternal biased
list(pvals=pvals, T=diff^2, dir=dir, vec=vec)
}
permuted_rows_mean <- function(mat, pat) {
b_mm <- matrix(rbinom(nrow(mat) * ncol(mat), 1, 0.5), nrow=nrow(mat), ncol=ncol(mat))
b_mp <- 1-b_mm
mat2<-(b_mm*mat)+(b_mp*pat)
pat2<-(b_mm*pat)+(b_mp*mat)
ss_m <- apply(mat2, 1, function(x) mean((x), na.rm=T))
ss_p <- apply(pat2, 1, function(x) mean((x), na.rm=T))
list(mat=ss_m, pat=ss_p)
}
permutefiltered <- function(mtab, ptab, num, oldvecs, oldpvals, threshold) {
vec <- c()
print(num);
zero <- which(oldpvals<threshold)
newmtab <- mtab[zero,]
newptab <- ptab[zero,]
newvecs <- oldvecs[zero,]
mm2 <- cbind(rowMeans(newptab, na.rm=TRUE), rowMeans(newmtab, na.rm=TRUE))
diff <- abs(mm2[,1]-mm2[,2])
print(length(diff))
print(length(zero))
print(dim(newmtab))
vec<- foreach(i=1:num, .export=("permuted_rows_mean"), .combine=data.frame ) %dopar% {
permean <- permuted_rows_mean(newmtab, newptab)
(abs(permean$pat-permean$mat))
}
bothvecs <- cbind(newvecs, vec)
pvals <- sig(bothvecs, diff)
names(pvals) <- rownames(mm2)
dir <- sign(diff)
dir[dir==-1] <- "paternal" #if negative = paternal biased
dir[dir==1] <- "maternal" #if positive = maternal biased
list(pvals=pvals, T=diff^2, dir=dir, vec=bothvecs, newmaternal=newmtab, newpaternal=newptab)
}
asym <- permute2(maternal2, paternal2, 10000)
table <- cbind(asym$pvals, asym$T, asym$dir, asym$vec)
rownames(table) <- names(asym$pvals)
write.table(table, "Asymmetry_10000_08.30_abs_all.txt", quote = F, row.names = T, col.names = F)
asym$newmaternal <- maternal2
asym$newpaternal <- paternal2
asym2 <- asym
#for (t in 1:5) {
while (length(asym2$pvals) > 10) {
asym2 <- permutefiltered(asym2$newmaternal, asym2$newpaternal, 10000, asym2$vec, asym2$pvals, 0.1)
table <- cbind(asym2$pvals, asym2$T, asym2$dir, asym2$vec)
rownames(table) <- names(asym2$T)
t <- length(asym2$pvals)
write.table(table, paste("Asymmetry_10000_08.30_abs_",t,".txt",sep="") , quote = F, row.names = T, col.names = F)
}
stopCluster(cl)
|
/bin/tests/asymmetry_abs.R
|
permissive
|
smozaffari/ASE_PO
|
R
| false
| false
| 3,757
|
r
|
#!/usr/bin/Rscript
library(doParallel)
library(foreach)
require(doParallel)
require(foreach)
registerDoParallel()
getDoParWorkers()
registerDoSEQ()
getDoParWorkers()
cl <- makeCluster(16)
registerDoParallel(cl)
dir <- "/lustre/beagle2/ober/users/smozaffari/ASE"
maternal <- read.table(paste(dir,"/data/expression/Maternal_gene_normalized.txt", sep=""), check.names = F)
paternal <- read.table(paste(dir,"/data/expression/Paternal_gene_normalized.txt", sep=""), check.names = F)
genes <- rownames(maternal)
a <- which(rowSums(maternal)==0)
length(a)
b <- which(rowSums(paternal)==0)
length(b)
c <- which(a%in%b)
zeroinboth <- a[c]
maternalx <- maternal[-zeroinboth,]
paternalx <- paternal[-zeroinboth,]
dim(maternalx)
dim(paternalx)
maternal2 <- as.matrix(maternalx)
paternal2 <- as.matrix(paternalx)
tstat <- function(pdiff, odiff) {
T = (odiff)^2+(pdiff)^2
return(T)
}
sig <- function(ptab, otab) {
pval <- c()
print(dim(ptab)[1])
length(pval) <- dim(ptab)[1]
for (d in 1:dim(ptab)[1]) {
pval[d] <- sprintf("%.50f", as.numeric((length(which(ptab[d,]>otab[d])))/(dim(ptab)[2])))
}
return(pvals=pval)
}
permute2 <- function(mtab, ptab, num) {
vec <- c()
print(num);
mm2 <- cbind(rowMeans(ptab, na.rm=TRUE), rowMeans(mtab, na.rm=TRUE))
diff <- abs(mm2[,1]-mm2[,2])
print(length(diff))
vec<- foreach(i=1:num, .export=("permuted_rows_mean"), .combine=data.frame ) %dopar% {
permean <- permuted_rows_mean(mtab, ptab)
(abs(permean$pat-permean$mat))
}
pvals <- sig(vec, diff)
length(pvals)
names(pvals) <- rownames(mm2)
dir <- sign(diff)
dir[dir==-1] <- "paternal" #if negative = paternal biased
dir[dir==1] <- "maternal" #if positive = maternal biased
list(pvals=pvals, T=diff^2, dir=dir, vec=vec)
}
permuted_rows_mean <- function(mat, pat) {
b_mm <- matrix(rbinom(nrow(mat) * ncol(mat), 1, 0.5), nrow=nrow(mat), ncol=ncol(mat))
b_mp <- 1-b_mm
mat2<-(b_mm*mat)+(b_mp*pat)
pat2<-(b_mm*pat)+(b_mp*mat)
ss_m <- apply(mat2, 1, function(x) mean((x), na.rm=T))
ss_p <- apply(pat2, 1, function(x) mean((x), na.rm=T))
list(mat=ss_m, pat=ss_p)
}
permutefiltered <- function(mtab, ptab, num, oldvecs, oldpvals, threshold) {
vec <- c()
print(num);
zero <- which(oldpvals<threshold)
newmtab <- mtab[zero,]
newptab <- ptab[zero,]
newvecs <- oldvecs[zero,]
mm2 <- cbind(rowMeans(newptab, na.rm=TRUE), rowMeans(newmtab, na.rm=TRUE))
diff <- abs(mm2[,1]-mm2[,2])
print(length(diff))
print(length(zero))
print(dim(newmtab))
vec<- foreach(i=1:num, .export=("permuted_rows_mean"), .combine=data.frame ) %dopar% {
permean <- permuted_rows_mean(newmtab, newptab)
(abs(permean$pat-permean$mat))
}
bothvecs <- cbind(newvecs, vec)
pvals <- sig(bothvecs, diff)
names(pvals) <- rownames(mm2)
dir <- sign(diff)
dir[dir==-1] <- "paternal" #if negative = paternal biased
dir[dir==1] <- "maternal" #if positive = maternal biased
list(pvals=pvals, T=diff^2, dir=dir, vec=bothvecs, newmaternal=newmtab, newpaternal=newptab)
}
asym <- permute2(maternal2, paternal2, 10000)
table <- cbind(asym$pvals, asym$T, asym$dir, asym$vec)
rownames(table) <- names(asym$pvals)
write.table(table, "Asymmetry_10000_08.30_abs_all.txt", quote = F, row.names = T, col.names = F)
asym$newmaternal <- maternal2
asym$newpaternal <- paternal2
asym2 <- asym
#for (t in 1:5) {
while (length(asym2$pvals) > 10) {
asym2 <- permutefiltered(asym2$newmaternal, asym2$newpaternal, 10000, asym2$vec, asym2$pvals, 0.1)
table <- cbind(asym2$pvals, asym2$T, asym2$dir, asym2$vec)
rownames(table) <- names(asym2$T)
t <- length(asym2$pvals)
write.table(table, paste("Asymmetry_10000_08.30_abs_",t,".txt",sep="") , quote = F, row.names = T, col.names = F)
}
stopCluster(cl)
|
VL <- function(z, m5 = 2.5) {
# This function computes the potential for a left-handed mode
# Returns a float if z is a float or an array if z is an array
return( exp(2*AsSpline(z)) * m5^2 - m5 * AsSpline(z, deriv = 1) * exp(AsSpline(z)) )
}
VR <- function(z, m5 = 2.5) {
# This function computes the potential for a right-handed mode
# Returns a float if z is a float or an array if z is an array
return( exp(2*AsSpline(z)) * m5^2 + m5 * AsSpline(z, deriv = 1) * exp(AsSpline(z)) )
}
getSpinorLModeRaw <- function(m5 = 2.5, n = 1)
{
# Computes the lowest L eigenvalue wavefunction for a bulk field of mass m5
# It returns gL^2 spline function
# m5 = 5/2 is the value for a spinor field dual to an operator of Delta = 9/2
# Compute the potential for the left-handed mode
V <- VL(z, m5)
# We identify the proton as the lowest energy state
data <- computeSpectrum(z, V, nEigen = n)$wfs[[n]]
wf <- splinefun(data$x, data$y^2)
return(wf)
}
getSpinorRModeRaw <- function(m5 = 2.5, n = 1)
{
# Computes the lowest R eigenvalue wavefunction for a bulk field of mass m5
# It returns gR^2 spline function
# m5 = 2.5 is the value for a spinor field dual to an operator of Delta = 9/2
# Compute the potential for the left-handed mode
V <- VR(z, m5)
# We identify the proton as the lowest energy state
data <- computeSpectrum(z, V, nEigen = n)$wfs[[n]]
wf <- splinefun(data$x, data$y^2)
return(wf)
}
getExternalSpinorFactorRaw <- function(m5 = 2.5, n = 1)
{
# Returns gL^2 + gR^2 that will be usefull later
gL2fun <- getSpinorLModeRaw(m5, n)
gR2fun <- getSpinorRModeRaw(m5, n)
spinorExtFactorFun <- splinefun(z, gL2fun(z) + gR2fun(z))
return(spinorExtFactorFun)
}
#' @export
getSpinorLMode <- cache(getSpinorLModeRaw)
#' @export
getSpinorRMode <- cache(getSpinorRModeRaw)
#' @export
getExternalSpinorFactor <- cache(getExternalSpinorFactorRaw)
|
/R/SpinorModes.R
|
permissive
|
rcarcasses/HQCD-P
|
R
| false
| false
| 1,898
|
r
|
VL <- function(z, m5 = 2.5) {
# This function computes the potential for a left-handed mode
# Returns a float if z is a float or an array if z is an array
return( exp(2*AsSpline(z)) * m5^2 - m5 * AsSpline(z, deriv = 1) * exp(AsSpline(z)) )
}
VR <- function(z, m5 = 2.5) {
# This function computes the potential for a right-handed mode
# Returns a float if z is a float or an array if z is an array
return( exp(2*AsSpline(z)) * m5^2 + m5 * AsSpline(z, deriv = 1) * exp(AsSpline(z)) )
}
getSpinorLModeRaw <- function(m5 = 2.5, n = 1)
{
# Computes the lowest L eigenvalue wavefunction for a bulk field of mass m5
# It returns gL^2 spline function
# m5 = 5/2 is the value for a spinor field dual to an operator of Delta = 9/2
# Compute the potential for the left-handed mode
V <- VL(z, m5)
# We identify the proton as the lowest energy state
data <- computeSpectrum(z, V, nEigen = n)$wfs[[n]]
wf <- splinefun(data$x, data$y^2)
return(wf)
}
getSpinorRModeRaw <- function(m5 = 2.5, n = 1)
{
# Computes the lowest R eigenvalue wavefunction for a bulk field of mass m5
# It returns gR^2 spline function
# m5 = 2.5 is the value for a spinor field dual to an operator of Delta = 9/2
# Compute the potential for the left-handed mode
V <- VR(z, m5)
# We identify the proton as the lowest energy state
data <- computeSpectrum(z, V, nEigen = n)$wfs[[n]]
wf <- splinefun(data$x, data$y^2)
return(wf)
}
getExternalSpinorFactorRaw <- function(m5 = 2.5, n = 1)
{
# Returns gL^2 + gR^2 that will be usefull later
gL2fun <- getSpinorLModeRaw(m5, n)
gR2fun <- getSpinorRModeRaw(m5, n)
spinorExtFactorFun <- splinefun(z, gL2fun(z) + gR2fun(z))
return(spinorExtFactorFun)
}
#' @export
getSpinorLMode <- cache(getSpinorLModeRaw)
#' @export
getSpinorRMode <- cache(getSpinorRModeRaw)
#' @export
getExternalSpinorFactor <- cache(getExternalSpinorFactorRaw)
|
#Load the dplyr package
library(dplyr)
#Download the file containing the data and unpack it in the working directory
fileurl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileurl, destfile = "electricity.zip", method = "curl")
unzip("electricity.zip")
#Read the whole dataset
whole_dataset <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE)
#Column 1 of the dataset is converted to class "Date"
whole_dataset[,1] <- as.Date(whole_dataset[,1], "%d/%m/%Y")
#Subset the dataset based on the requested dates
data <- subset(whole_dataset, (whole_dataset[,1]) >='2007-02-01' & (whole_dataset[,1] <= '2007-02-02'))
#Convert the column containing the data to be plotted from character to numeric
data[,7] <- as.numeric(data[,7])
data[,8] <- as.numeric(data[,8])
data[,9] <- as.numeric(data[,9])
#Use the mutate function to create a new column with the date and time information
data <- mutate(data, date_time = paste(data$Date, data$Time, sep = ' '))
#Convert the date and time information in the new column using the strptime function
data$date_time <- strptime(data$date_time, format = "%Y-%m-%d %H:%M:%S")
#The following code creates the requested plot and saves it as a png file
png(file = "plot3.png")
with(data, plot(data$date_time, data$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering"))
lines(data$date_time, data$Sub_metering_2, col = "red")
lines(data$date_time, data$Sub_metering_3, col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
/plot3.R
|
no_license
|
martinkruse/ExData_Plotting1
|
R
| false
| false
| 1,672
|
r
|
#Load the dplyr package
library(dplyr)
#Download the file containing the data and unpack it in the working directory
fileurl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileurl, destfile = "electricity.zip", method = "curl")
unzip("electricity.zip")
#Read the whole dataset
whole_dataset <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE)
#Column 1 of the dataset is converted to class "Date"
whole_dataset[,1] <- as.Date(whole_dataset[,1], "%d/%m/%Y")
#Subset the dataset based on the requested dates
data <- subset(whole_dataset, (whole_dataset[,1]) >='2007-02-01' & (whole_dataset[,1] <= '2007-02-02'))
#Convert the column containing the data to be plotted from character to numeric
data[,7] <- as.numeric(data[,7])
data[,8] <- as.numeric(data[,8])
data[,9] <- as.numeric(data[,9])
#Use the mutate function to create a new column with the date and time information
data <- mutate(data, date_time = paste(data$Date, data$Time, sep = ' '))
#Convert the date and time information in the new column using the strptime function
data$date_time <- strptime(data$date_time, format = "%Y-%m-%d %H:%M:%S")
#The following code creates the requested plot and saves it as a png file
png(file = "plot3.png")
with(data, plot(data$date_time, data$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering"))
lines(data$date_time, data$Sub_metering_2, col = "red")
lines(data$date_time, data$Sub_metering_3, col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
############################################################
# #
# Plot expectancy ratings #
# #
############################################################
# Load packages
library(readr)
library(tidyverse)
library(dplyr)
library(tidyr)
library(magrittr)
library(RColorBrewer)
library(ggplot2)
# Import data
df <- read_csv('/Users/tory/Google Drive/Git/CC_threshold/Juliane_all_data/data/R_data_complete_expectancies')
# Plot ratings
df_medians <- df %>% filter(!is.na(rating)) %>%
group_by(measurement, CS) %>%
summarise(median_rating = median(rating))
df %>%
ggplot(data = .) +
aes(x = measurement,
y = rating,
colour = CS) +
geom_point() +
geom_smooth(se = FALSE) +
scale_colour_brewer(palette = "Set1") +
geom_vline(xintercept = 4, linetype = 3) +
facet_wrap(~ id) +
labs(title = "Expectancy ratings over course of procedure",
subtitle = "Faceted by participant ID; coloured by CS being asked about; vertical stippled line is ratings taken between acquisition and test phases")
# Use trelliscope
library(trelliscope)
df_post_acq <- df %>% filter(measurement < 8 & measurement > 3)
ggplot(data = df_post_acq) +
aes(x = measurement,
y = rating,
colour = CS) +
geom_point() +
geom_smooth(se = FALSE) +
scale_colour_brewer(palette = "Set1") +
geom_vline(xintercept = 4.5, linetype = 3) +
facet_wrap(~ id) +
labs(title = "Expectancy ratings from immediately after acquisition, through test phase, to after test phase",
subtitle = "Faceted by participant ID; coloured by CS being asked about; vertical stippled line shows the first block of test phase")
ggplot(data = df_medians) +
aes(x = measurement,
y = median_rating,
group = CS,
colour = CS) +
geom_boxplot() +
scale_colour_brewer(palette = "Set1") +
geom_vline(xintercept = 4, linetype = 3) +
labs(title = "Expectancy ratings averaged across whole procedure; whole sample",
subtitle = "Coloured by CS being asked about")
df_post_acq_medians <- df_medians %>% filter(measurement < 8 & measurement > 3)
ggplot(data = df_post_acq_medians) +
aes(x = measurement,
y = median_rating,
group = CS,
colour = CS) +
geom_boxplot() +
scale_colour_brewer(palette = "Set1") +
geom_vline(xintercept = 4, linetype = 3) +
labs(title = "Expectancy ratings averaged from post-acquisition and test phase ratings; whole sample",
subtitle = "Coloured by CS being asked about")
|
/Juliane_all_data/Plot_expectancy_ratings.R
|
no_license
|
ToryMadden/CC_threshold
|
R
| false
| false
| 2,664
|
r
|
############################################################
# #
# Plot expectancy ratings #
# #
############################################################
# Load packages
library(readr)
library(tidyverse)
library(dplyr)
library(tidyr)
library(magrittr)
library(RColorBrewer)
library(ggplot2)
# Import data
df <- read_csv('/Users/tory/Google Drive/Git/CC_threshold/Juliane_all_data/data/R_data_complete_expectancies')
# Plot ratings
df_medians <- df %>% filter(!is.na(rating)) %>%
group_by(measurement, CS) %>%
summarise(median_rating = median(rating))
df %>%
ggplot(data = .) +
aes(x = measurement,
y = rating,
colour = CS) +
geom_point() +
geom_smooth(se = FALSE) +
scale_colour_brewer(palette = "Set1") +
geom_vline(xintercept = 4, linetype = 3) +
facet_wrap(~ id) +
labs(title = "Expectancy ratings over course of procedure",
subtitle = "Faceted by participant ID; coloured by CS being asked about; vertical stippled line is ratings taken between acquisition and test phases")
# Use trelliscope
library(trelliscope)
df_post_acq <- df %>% filter(measurement < 8 & measurement > 3)
ggplot(data = df_post_acq) +
aes(x = measurement,
y = rating,
colour = CS) +
geom_point() +
geom_smooth(se = FALSE) +
scale_colour_brewer(palette = "Set1") +
geom_vline(xintercept = 4.5, linetype = 3) +
facet_wrap(~ id) +
labs(title = "Expectancy ratings from immediately after acquisition, through test phase, to after test phase",
subtitle = "Faceted by participant ID; coloured by CS being asked about; vertical stippled line shows the first block of test phase")
ggplot(data = df_medians) +
aes(x = measurement,
y = median_rating,
group = CS,
colour = CS) +
geom_boxplot() +
scale_colour_brewer(palette = "Set1") +
geom_vline(xintercept = 4, linetype = 3) +
labs(title = "Expectancy ratings averaged across whole procedure; whole sample",
subtitle = "Coloured by CS being asked about")
df_post_acq_medians <- df_medians %>% filter(measurement < 8 & measurement > 3)
ggplot(data = df_post_acq_medians) +
aes(x = measurement,
y = median_rating,
group = CS,
colour = CS) +
geom_boxplot() +
scale_colour_brewer(palette = "Set1") +
geom_vline(xintercept = 4, linetype = 3) +
labs(title = "Expectancy ratings averaged from post-acquisition and test phase ratings; whole sample",
subtitle = "Coloured by CS being asked about")
|
setwd("WetlandModel/SLR")
library(rjags)
library(sp)
library(magrittr)
library(raster)
library(rgdal)
library(rgeos)
library(dplyr)
library(MCMCvis)
# Load in custom utility functions
source("../loadTHK99.R")
source("../../RUtilityFunctions/createModels.R")
source("../../RUtilityFunctions/codaSamplesDIC.R")
loadTHK99data(local=T, regions="ALL")
params = c("WH","CS","RSLR","TR","NDVI")
# Load and Process Restoration Data ---------------------------------------
source("prepRestoreData.R")
restore = restore[restore$YEAR <= 2005 & restore$YEAR > 1900,]
#View(restore[order(restore$ACRES, decreasing=TRUE)[1:15],]@data)
removal = order(restore$ACRES, decreasing=TRUE)[1:4]
restore = restore[-removal,]
restore = spTransform(restore, proj4string(thk99buff))
restoreParams = c("HA","BW","MC","VP","YEAR")
plot(restore)
barplot(table(restore$PROJ_TYPE))
cbind(CODE=typeCodes, NAME=typeDescriptions)
# Save which restorations have buffers within
bufferPerRestore = restore[!is.na(over(restore,thk99buff)$ORIG_FID),]
bufferPerRestore$URL = paste0("=HYPERLINK(\"https://cims.coastal.louisiana.gov/outreach/ProjectView.aspx?projID=", bufferPerRestore$PROJ_ID, "\")")
bufferPerRestore$GOOGLE = paste0("=HYPERLINK(\"https://www.google.com/search?q=", bufferPerRestore$PROJ_NAME, " site%3Alacoast.gov\")")
# Recode mis-coded projects
miscodes = read.delim("miscodes.txt", sep="\t", stringsAsFactors = F)
for (i in 1:nrow(miscodes))
{
id = miscodes[i,]$PROJ_ID
if (miscodes[i,]$RECODE != "")
restore@data[restore@data$PROJ_ID == id,]$PROJ_TYPE = miscodes[i,]$RECODE
}
miscodesBarrier = read.delim("miscodes-barrier.txt", sep="\t", stringsAsFactors = F)
for (i in 1:nrow(miscodesBarrier))
{
id = miscodesBarrier[i,]$PROJ_ID
if (miscodesBarrier[i,]$RECODE != "")
restore@data[restore@data$PROJ_ID == id,]$PROJ_TYPE = miscodesBarrier[i,]$RECODE
}
x=barplot(table(restore$PROJ_TYPE), xaxt="n")
text(cex=1, x=x-.25, y=-4, names(table(restore$PROJ_TYPE)), xpd=TRUE, srt=45)
# Extract restoration type to buffers (recoded to less restore types)
restoreRecode = list( #SP and BH should always be recoded because they describe the goal, not the methods.
# Hydrologic alteration
HA = c("HA", "FD", "HR", "SD"),
FD = "FD",
HR = "HR",
SD = "SD",
# Marsh creation
MC = c("MC", "TE"),
# Breakwaters
BW = c("BW"),
#Vegetative Planting
VP = c("VP","PL")
)
for (type in names(restoreRecode))
{
thk99buff[[type]] = rep(-1, nrow(thk99buff))
}
thk99buff$YEAR = rep(-1, nrow(thk99buff))
thk99buff$RESTORE = rep(-1, nrow(thk99buff))
restorePerBuffer=over(thk99buff, restore, returnList=T)
for (bufferNo in 1:length(restorePerBuffer))
{
buffer = restorePerBuffer[[bufferNo]]
if (nrow(buffer) > 0) #Buffer has restoration projects
{
for (i in 1:nrow(buffer))
{
projTypes = unlist(strsplit(buffer[i,]$PROJ_TYPE, "/"))
if (is.null(projTypes))
projTypes = buffer[i,]$PROJ_TYPE
}
for (type in names(restoreRecode))
{
if (any(restoreRecode[[type]] %in% projTypes))
{
thk99buff@data[bufferNo,][[type]] = 1
}
else
{
thk99buff@data[bufferNo,][[type]] = 0
}
}
thk99buff@data[bufferNo,]$YEAR = 2006 - min(buffer$YEAR)
thk99buff@data[bufferNo,]$RESTORE = 1
} else {
for (type in names(restoreRecode))
{
thk99buff@data[bufferNo,][[type]] = 0
}
thk99buff@data[bufferNo,]$YEAR = 0
thk99buff@data[bufferNo,]$RESTORE = 0
}
}
source("../../RUtilityFunctions/plotRegions.R")
plotRegions(states="LA", focus="state", stateBoundaries=T, regions=c(9,10), labs=c("W LA", "E LA"))
plot(restore,add=T,border=NA,col="#00FF0033")
plot(thk99buff,add=T,border=NA,col="black")
plot(thk99buff[thk99buff$RESTORE == 1,], add=T,border=NA,col="red")
# Normalize Data ----------------------------------------------------------
thk99buff_n = data.frame(sapply(thk99buff@data[params], function(x){scale(x)}))
thk99buff_n = cbind(thk99buff_n, region=thk99buff$region)
thk99buff_n = cbind(thk99buff_n, logWET=thk99buff$logWET)
thk99buff_n = cbind(thk99buff_n, logPCT=thk99buff$logPCT)
thk99buff_n = cbind(thk99buff_n, WET=thk99buff$WET)
thk99buff_n = cbind(thk99buff_n, PCT=thk99buff$PCT)
thk99buff_n$HA = thk99buff$HA
thk99buff_n$HR = thk99buff$HR
thk99buff_n$FD = thk99buff$FD
thk99buff_n$SD = thk99buff$SD
thk99buff_n$MC = thk99buff$MC
thk99buff_n$BW = thk99buff$BW
thk99buff_n$VP = thk99buff$VP
thk99buff_n$YEAR = thk99buff$YEAR
thk99buff_n$RESTORE = thk99buff$RESTORE
# IMPORTANT: Reduce Data to Louisiana -------------------------------------
thk99buff_n = thk99buff_n[thk99buff$region %in% c(9,10),]
thk99buff_n$region = thk99buff_n$region-8
# Any restore, within X years
years = 10
data_reduce = thk99buff_n[((thk99buff_n$YEAR <= years) | thk99buff_n$RESTORE == 0),]
# Arrange Data for JAGS ---------------------------------------------------
regions = length(unique(data_reduce$region))
data = append(list(Nobs=nrow(data_reduce), Nregion=regions), data_reduce)
# Run Model in JAGS -------------------------------------------------------
if (!dir.exists("Results"))
{
dir.create("Results")
}
modelNo = 1
while(file.exists(sprintf("Results/%s.RData", modelNo)))
{
modelNo = modelNo + 1
}
model = jags.model("model.txt",
data = data,
n.chains=3,
n.adapt=50000)
output = coda.samples.dic(model = model,
variable.names=c("b0", paste0("b", params), paste0("b", restoreParams), "logWET.p", "logWET.HA","logWET.noR"),
n.iter=250000,
thin=4)
output$data = data
save(output, file=sprintf("Results/%s.RData", modelNo))
|
/WetlandModel/RestorationRevisit/runModels.R
|
no_license
|
ecospatial/NAS_2016
|
R
| false
| false
| 5,685
|
r
|
setwd("WetlandModel/SLR")
library(rjags)
library(sp)
library(magrittr)
library(raster)
library(rgdal)
library(rgeos)
library(dplyr)
library(MCMCvis)
# Load in custom utility functions
source("../loadTHK99.R")
source("../../RUtilityFunctions/createModels.R")
source("../../RUtilityFunctions/codaSamplesDIC.R")
loadTHK99data(local=T, regions="ALL")
params = c("WH","CS","RSLR","TR","NDVI")
# Load and Process Restoration Data ---------------------------------------
source("prepRestoreData.R")
restore = restore[restore$YEAR <= 2005 & restore$YEAR > 1900,]
#View(restore[order(restore$ACRES, decreasing=TRUE)[1:15],]@data)
removal = order(restore$ACRES, decreasing=TRUE)[1:4]
restore = restore[-removal,]
restore = spTransform(restore, proj4string(thk99buff))
restoreParams = c("HA","BW","MC","VP","YEAR")
plot(restore)
barplot(table(restore$PROJ_TYPE))
cbind(CODE=typeCodes, NAME=typeDescriptions)
# Save which restorations have buffers within
bufferPerRestore = restore[!is.na(over(restore,thk99buff)$ORIG_FID),]
bufferPerRestore$URL = paste0("=HYPERLINK(\"https://cims.coastal.louisiana.gov/outreach/ProjectView.aspx?projID=", bufferPerRestore$PROJ_ID, "\")")
bufferPerRestore$GOOGLE = paste0("=HYPERLINK(\"https://www.google.com/search?q=", bufferPerRestore$PROJ_NAME, " site%3Alacoast.gov\")")
# Recode mis-coded projects
miscodes = read.delim("miscodes.txt", sep="\t", stringsAsFactors = F)
for (i in 1:nrow(miscodes))
{
id = miscodes[i,]$PROJ_ID
if (miscodes[i,]$RECODE != "")
restore@data[restore@data$PROJ_ID == id,]$PROJ_TYPE = miscodes[i,]$RECODE
}
miscodesBarrier = read.delim("miscodes-barrier.txt", sep="\t", stringsAsFactors = F)
for (i in 1:nrow(miscodesBarrier))
{
id = miscodesBarrier[i,]$PROJ_ID
if (miscodesBarrier[i,]$RECODE != "")
restore@data[restore@data$PROJ_ID == id,]$PROJ_TYPE = miscodesBarrier[i,]$RECODE
}
x=barplot(table(restore$PROJ_TYPE), xaxt="n")
text(cex=1, x=x-.25, y=-4, names(table(restore$PROJ_TYPE)), xpd=TRUE, srt=45)
# Extract restoration type to buffers (recoded to less restore types)
restoreRecode = list( #SP and BH should always be recoded because they describe the goal, not the methods.
# Hydrologic alteration
HA = c("HA", "FD", "HR", "SD"),
FD = "FD",
HR = "HR",
SD = "SD",
# Marsh creation
MC = c("MC", "TE"),
# Breakwaters
BW = c("BW"),
#Vegetative Planting
VP = c("VP","PL")
)
for (type in names(restoreRecode))
{
thk99buff[[type]] = rep(-1, nrow(thk99buff))
}
thk99buff$YEAR = rep(-1, nrow(thk99buff))
thk99buff$RESTORE = rep(-1, nrow(thk99buff))
restorePerBuffer=over(thk99buff, restore, returnList=T)
for (bufferNo in 1:length(restorePerBuffer))
{
buffer = restorePerBuffer[[bufferNo]]
if (nrow(buffer) > 0) #Buffer has restoration projects
{
for (i in 1:nrow(buffer))
{
projTypes = unlist(strsplit(buffer[i,]$PROJ_TYPE, "/"))
if (is.null(projTypes))
projTypes = buffer[i,]$PROJ_TYPE
}
for (type in names(restoreRecode))
{
if (any(restoreRecode[[type]] %in% projTypes))
{
thk99buff@data[bufferNo,][[type]] = 1
}
else
{
thk99buff@data[bufferNo,][[type]] = 0
}
}
thk99buff@data[bufferNo,]$YEAR = 2006 - min(buffer$YEAR)
thk99buff@data[bufferNo,]$RESTORE = 1
} else {
for (type in names(restoreRecode))
{
thk99buff@data[bufferNo,][[type]] = 0
}
thk99buff@data[bufferNo,]$YEAR = 0
thk99buff@data[bufferNo,]$RESTORE = 0
}
}
source("../../RUtilityFunctions/plotRegions.R")
plotRegions(states="LA", focus="state", stateBoundaries=T, regions=c(9,10), labs=c("W LA", "E LA"))
plot(restore,add=T,border=NA,col="#00FF0033")
plot(thk99buff,add=T,border=NA,col="black")
plot(thk99buff[thk99buff$RESTORE == 1,], add=T,border=NA,col="red")
# Normalize Data ----------------------------------------------------------
thk99buff_n = data.frame(sapply(thk99buff@data[params], function(x){scale(x)}))
thk99buff_n = cbind(thk99buff_n, region=thk99buff$region)
thk99buff_n = cbind(thk99buff_n, logWET=thk99buff$logWET)
thk99buff_n = cbind(thk99buff_n, logPCT=thk99buff$logPCT)
thk99buff_n = cbind(thk99buff_n, WET=thk99buff$WET)
thk99buff_n = cbind(thk99buff_n, PCT=thk99buff$PCT)
thk99buff_n$HA = thk99buff$HA
thk99buff_n$HR = thk99buff$HR
thk99buff_n$FD = thk99buff$FD
thk99buff_n$SD = thk99buff$SD
thk99buff_n$MC = thk99buff$MC
thk99buff_n$BW = thk99buff$BW
thk99buff_n$VP = thk99buff$VP
thk99buff_n$YEAR = thk99buff$YEAR
thk99buff_n$RESTORE = thk99buff$RESTORE
# IMPORTANT: Reduce Data to Louisiana -------------------------------------
thk99buff_n = thk99buff_n[thk99buff$region %in% c(9,10),]
thk99buff_n$region = thk99buff_n$region-8
# Any restore, within X years
years = 10
data_reduce = thk99buff_n[((thk99buff_n$YEAR <= years) | thk99buff_n$RESTORE == 0),]
# Arrange Data for JAGS ---------------------------------------------------
regions = length(unique(data_reduce$region))
data = append(list(Nobs=nrow(data_reduce), Nregion=regions), data_reduce)
# Run Model in JAGS -------------------------------------------------------
if (!dir.exists("Results"))
{
dir.create("Results")
}
modelNo = 1
while(file.exists(sprintf("Results/%s.RData", modelNo)))
{
modelNo = modelNo + 1
}
model = jags.model("model.txt",
data = data,
n.chains=3,
n.adapt=50000)
output = coda.samples.dic(model = model,
variable.names=c("b0", paste0("b", params), paste0("b", restoreParams), "logWET.p", "logWET.HA","logWET.noR"),
n.iter=250000,
thin=4)
output$data = data
save(output, file=sprintf("Results/%s.RData", modelNo))
|
##2019.3.7
#https://mp.weixin.qq.com/s/UIpGD7YF8kAaqluBoe8iWA
#手把手 | 哇!用R也可以跑Python了
install.packages("reticulate")
library(reticulate);?reticulate
#检查您的系统是否安装过Python
py_available() #False,要先安装python
#使用函数import()来导入特定的包或模块。
os <- import("os")
os$getcwd() #返回工作目录
#使用os包中的listdir()函数来查看工作目录中的所有文件。
os$listdir()
#安装Python包
#第一步:创建新的工作环境;
conda_create("r-reticulate") ##需要有anaconda
#第二步:在conda环境下安装“r-reticulate”和“numpy”;
conda_install("r-reticulate”,“numpy")
#如果“numpy”已经安装,您不必再次安装这个包。上面的代码只是给个例子而已。
#第三步:加载包。
numpy <- import("numpy")
########后面略
|
/杂七杂八/Python_in_R.r
|
no_license
|
mrzhangqjankun/R-code-for-myself
|
R
| false
| false
| 895
|
r
|
##2019.3.7
#https://mp.weixin.qq.com/s/UIpGD7YF8kAaqluBoe8iWA
#手把手 | 哇!用R也可以跑Python了
install.packages("reticulate")
library(reticulate);?reticulate
#检查您的系统是否安装过Python
py_available() #False,要先安装python
#使用函数import()来导入特定的包或模块。
os <- import("os")
os$getcwd() #返回工作目录
#使用os包中的listdir()函数来查看工作目录中的所有文件。
os$listdir()
#安装Python包
#第一步:创建新的工作环境;
conda_create("r-reticulate") ##需要有anaconda
#第二步:在conda环境下安装“r-reticulate”和“numpy”;
conda_install("r-reticulate”,“numpy")
#如果“numpy”已经安装,您不必再次安装这个包。上面的代码只是给个例子而已。
#第三步:加载包。
numpy <- import("numpy")
########后面略
|
library(testthat)
library(azrunr)
if(set_azure_options() !=TRUE) {
print('invalid option for azure (subscription, etc')
} else {
test_check("azrunr")
}
|
/tests/testthat.R
|
permissive
|
billspat/azrunr
|
R
| false
| false
| 161
|
r
|
library(testthat)
library(azrunr)
if(set_azure_options() !=TRUE) {
print('invalid option for azure (subscription, etc')
} else {
test_check("azrunr")
}
|
# makeCacheMatrix takes a matrix
makeCacheMatrix <- function(x = matrix()) {
# initialize the m to NULL during the first call to makeCacheMatrix
# this is needed because getinv() is called immediately after
# the makeCacheMatrix funciton is constructed, without a call to setinv()
# first have to calculate the inverse in cacheSolve.
m <- NULL
set <- function(y) {
# use the <<- operator to set the value of x
x <<- y
# reset m to NULL since we are modifying the underlying
# matrix and the cached value is no longer the valid
m <<- NULL
}
# define function to get the value of the matrix
get <- function() x
# set the inverse of a matrix call by cacheSolve
# << operator is used because value of m will be modified
setinv <- function(inverse) m <<- inverse
# get the inverse
getinv <- function() m
# return a list with four functions
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# get the inverse of the matrix defined inside x.
m <- x$getinv()
# if we've already computed the inverse and stored it via setinv(),
# and have not invalidated the cache by calling set(), return the cached
# version of x
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# else cache was empty. We need to calculate it, cache it, and then return it.
data <- x$get()
# m<-as.matrix((1/ det(data)) * solve(data))
# claculate the inverse of a matrix
m <- as.matrix(solve(data))
# cache the result
x$setinv(m)
#return the result
m
}
|
/cachematrix.R
|
no_license
|
bush1234/ProgrammingAssignment2
|
R
| false
| false
| 1,654
|
r
|
# makeCacheMatrix takes a matrix
makeCacheMatrix <- function(x = matrix()) {
# initialize the m to NULL during the first call to makeCacheMatrix
# this is needed because getinv() is called immediately after
# the makeCacheMatrix funciton is constructed, without a call to setinv()
# first have to calculate the inverse in cacheSolve.
m <- NULL
set <- function(y) {
# use the <<- operator to set the value of x
x <<- y
# reset m to NULL since we are modifying the underlying
# matrix and the cached value is no longer the valid
m <<- NULL
}
# define function to get the value of the matrix
get <- function() x
# set the inverse of a matrix call by cacheSolve
# << operator is used because value of m will be modified
setinv <- function(inverse) m <<- inverse
# get the inverse
getinv <- function() m
# return a list with four functions
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# get the inverse of the matrix defined inside x.
m <- x$getinv()
# if we've already computed the inverse and stored it via setinv(),
# and have not invalidated the cache by calling set(), return the cached
# version of x
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# else cache was empty. We need to calculate it, cache it, and then return it.
data <- x$get()
# m<-as.matrix((1/ det(data)) * solve(data))
# claculate the inverse of a matrix
m <- as.matrix(solve(data))
# cache the result
x$setinv(m)
#return the result
m
}
|
# Test sss integration
#
# Author: Andrie
#------------------------------------------------------------------------------
context("sss test suite")
if(interactive()){
library(testthat)
sampleRoot <- "sss/tests/testthat/samples/sample-0"
} else {
sampleRoot <- "samples/sample-0"
}
filenameSSS <- file.path(sampleRoot, "sample.sss")
filenameASC <- file.path(sampleRoot, "sample.asc")
expectedNames <- c("Q1", "Q2_1", "Q2_2", "Q2_3", "Q2_4", "Q2_5", "Q2_6", "Q2_7",
"Q2_8", "Q2_9", "Q3", "Q4_1", "Q4_2", "Q5", "Q6", "Q7", "Q99")
expectedNames2 <- c("Q1", "Q2.1", "Q2.2", "Q2.3", "Q2.4", "Q2.5", "Q2.6", "Q2.7",
"Q2.8", "Q2.9", "Q3", "Q4.1", "Q4.2", "Q5", "Q6", "Q7", "Q99")
test_that("parsing of .sss and .asc works", {
test <- read.sss(filenameSSS, filenameASC)
rest <- structure(list(
Q1 = c(
"Visited before within the year",
"Visited before that",
"Visited before within the year"),
Q2_1 = c(1L, 0L, 1L),
Q2_2 = c(0L, 1L, 0L),
Q2_3 = c(1L, 0L, 0L),
Q2_4 = c(0L, 0L, 1L),
Q2_5 = c(0L, 0L, 0L),
Q2_6 = c(0L, 0L, 0L),
Q2_7 = c(0L, 0L, 0L),
Q2_8 = c(0L, 0L, 0L),
Q2_9 = c(1L, 0L, 1L),
Q3 = c("Amusement Park ",
" ",
"\"Marco's\" Restaurant "
),
Q4_1 = c(
"Sherwood Forest",
"Nottingham Castle",
"Other"),
Q4_2 = c(
"Other",
NA,
"\"Maid Marion\" Cafe"),
Q5 = c(12L, 999L, 58L),
Q6 = c(TRUE, TRUE, FALSE),
Q7 = c(
"Within 3 months",
"More than 1 years time",
NA),
Q99 = c(1.4, 0.9, 0.7)),
row.names = c(NA, -3L),
.Names = c("Q1", "Q2_1", "Q2_2", "Q2_3", "Q2_4", "Q2_5", "Q2_6", "Q2_7",
"Q2_8", "Q2_9", "Q3", "Q4_1", "Q4_2", "Q5", "Q6", "Q7", "Q99"),
class = "data.frame",
variable.labels = c("Number of visits",
"Attractions visited", "Attractions visited", "Attractions visited",
"Attractions visited", "Attractions visited", "Attractions visited",
"Attractions visited", "Attractions visited", "Attractions visited",
"Other attractions visited", "Two favourite attractions visited",
"Two favourite attractions visited", "Miles travelled", "Would come again",
"When is that most likely to be", "Case weight")
)
# cat("\n\n")
# print(dput(test))
# cat("\n\n")
# print(rest)
# print("\n")
expect_is(test, "data.frame")
expect_equal(nrow(test), 3)
expect_equal(ncol(test), 17)
expect_equal(names(test), expectedNames)
expect_equal(test, rest)
#print(dput(test))
})
# context("sss question labels")
test_that("question text is assigned to variable.labels attribute", {
test <- read.sss(filenameSSS, filenameASC)
expectedLabels <- c(
"Number of visits",
"Attractions visited",
"Attractions visited",
"Attractions visited",
"Attractions visited",
"Attractions visited",
"Attractions visited",
"Attractions visited",
"Attractions visited",
"Attractions visited",
"Other attractions visited",
"Two favourite attractions visited",
"Two favourite attractions visited",
"Miles travelled",
"Would come again",
"When is that most likely to be",
"Case weight"
)
expect_equal(attr(test, "variable.labels"), expectedLabels)
})
test_that("separator parameter works", {
d <- read.sss(filenameSSS, filenameASC, sep=".")
expect_equal(names(d), expectedNames2)
})
|
/sss/tests/testthat/test-3-sss.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 4,022
|
r
|
# Test sss integration
#
# Author: Andrie
#------------------------------------------------------------------------------
context("sss test suite")
if(interactive()){
library(testthat)
sampleRoot <- "sss/tests/testthat/samples/sample-0"
} else {
sampleRoot <- "samples/sample-0"
}
filenameSSS <- file.path(sampleRoot, "sample.sss")
filenameASC <- file.path(sampleRoot, "sample.asc")
expectedNames <- c("Q1", "Q2_1", "Q2_2", "Q2_3", "Q2_4", "Q2_5", "Q2_6", "Q2_7",
"Q2_8", "Q2_9", "Q3", "Q4_1", "Q4_2", "Q5", "Q6", "Q7", "Q99")
expectedNames2 <- c("Q1", "Q2.1", "Q2.2", "Q2.3", "Q2.4", "Q2.5", "Q2.6", "Q2.7",
"Q2.8", "Q2.9", "Q3", "Q4.1", "Q4.2", "Q5", "Q6", "Q7", "Q99")
test_that("parsing of .sss and .asc works", {
test <- read.sss(filenameSSS, filenameASC)
rest <- structure(list(
Q1 = c(
"Visited before within the year",
"Visited before that",
"Visited before within the year"),
Q2_1 = c(1L, 0L, 1L),
Q2_2 = c(0L, 1L, 0L),
Q2_3 = c(1L, 0L, 0L),
Q2_4 = c(0L, 0L, 1L),
Q2_5 = c(0L, 0L, 0L),
Q2_6 = c(0L, 0L, 0L),
Q2_7 = c(0L, 0L, 0L),
Q2_8 = c(0L, 0L, 0L),
Q2_9 = c(1L, 0L, 1L),
Q3 = c("Amusement Park ",
" ",
"\"Marco's\" Restaurant "
),
Q4_1 = c(
"Sherwood Forest",
"Nottingham Castle",
"Other"),
Q4_2 = c(
"Other",
NA,
"\"Maid Marion\" Cafe"),
Q5 = c(12L, 999L, 58L),
Q6 = c(TRUE, TRUE, FALSE),
Q7 = c(
"Within 3 months",
"More than 1 years time",
NA),
Q99 = c(1.4, 0.9, 0.7)),
row.names = c(NA, -3L),
.Names = c("Q1", "Q2_1", "Q2_2", "Q2_3", "Q2_4", "Q2_5", "Q2_6", "Q2_7",
"Q2_8", "Q2_9", "Q3", "Q4_1", "Q4_2", "Q5", "Q6", "Q7", "Q99"),
class = "data.frame",
variable.labels = c("Number of visits",
"Attractions visited", "Attractions visited", "Attractions visited",
"Attractions visited", "Attractions visited", "Attractions visited",
"Attractions visited", "Attractions visited", "Attractions visited",
"Other attractions visited", "Two favourite attractions visited",
"Two favourite attractions visited", "Miles travelled", "Would come again",
"When is that most likely to be", "Case weight")
)
# cat("\n\n")
# print(dput(test))
# cat("\n\n")
# print(rest)
# print("\n")
expect_is(test, "data.frame")
expect_equal(nrow(test), 3)
expect_equal(ncol(test), 17)
expect_equal(names(test), expectedNames)
expect_equal(test, rest)
#print(dput(test))
})
# context("sss question labels")
test_that("question text is assigned to variable.labels attribute", {
test <- read.sss(filenameSSS, filenameASC)
expectedLabels <- c(
"Number of visits",
"Attractions visited",
"Attractions visited",
"Attractions visited",
"Attractions visited",
"Attractions visited",
"Attractions visited",
"Attractions visited",
"Attractions visited",
"Attractions visited",
"Other attractions visited",
"Two favourite attractions visited",
"Two favourite attractions visited",
"Miles travelled",
"Would come again",
"When is that most likely to be",
"Case weight"
)
expect_equal(attr(test, "variable.labels"), expectedLabels)
})
test_that("separator parameter works", {
d <- read.sss(filenameSSS, filenameASC, sep=".")
expect_equal(names(d), expectedNames2)
})
|
# ==============
# Making Health Economic Modelling Shiny
# Robert Smith, Paul Schneider & Sarah Bates
# University of Sheffield
# contact: info@darkpeakanalytics.com
# ==============
# clear global environment
rm(list =ls())
# load necessary packages
library(ggplot2)
# source custom functions
source("Markov Model/src/f_gen_psa.R")
source("Markov Model/src/f_MM_sicksicker.R")
source("Markov Model/src/f_wrapper.R")
# run PSA - shoulc be very fast
tic <- Sys.time() # record start time
results <- f_wrapper(c_Trt = 2000,
n_age_init = 25,
n_age_max = 100,
d_r = 0.035,
n_sim = 1000)
toc <- Sys.time() # record end time
# print time difference
toc - tic
# plot cost-effectiveness plane
ggplot(results) +
# reference lines (threshold at 20k)
geom_vline(xintercept = 0) +
geom_hline(yintercept = 0) +
geom_abline(slope = 20000, linetype = "dashed") +
# PSA icers and mean icer
geom_point(aes(x = QALY_Trt-QALY_NoTrt, y = Cost_Trt-Cost_NoTrt), col = "cadetblue", size =.7, alpha =.7) +
geom_point(aes(x = mean(QALY_Trt-QALY_NoTrt), y = mean(Cost_Trt-Cost_NoTrt)), col = "blue") +
# labels
xlab("Incremental QALYs") +
ylab("Incremental Costs") +
theme_minimal()
|
/Markov Model/5. ExampleScript.R
|
permissive
|
anhnguyendepocen/emsr_course
|
R
| false
| false
| 1,287
|
r
|
# ==============
# Making Health Economic Modelling Shiny
# Robert Smith, Paul Schneider & Sarah Bates
# University of Sheffield
# contact: info@darkpeakanalytics.com
# ==============
# clear global environment
rm(list =ls())
# load necessary packages
library(ggplot2)
# source custom functions
source("Markov Model/src/f_gen_psa.R")
source("Markov Model/src/f_MM_sicksicker.R")
source("Markov Model/src/f_wrapper.R")
# run PSA - shoulc be very fast
tic <- Sys.time() # record start time
results <- f_wrapper(c_Trt = 2000,
n_age_init = 25,
n_age_max = 100,
d_r = 0.035,
n_sim = 1000)
toc <- Sys.time() # record end time
# print time difference
toc - tic
# plot cost-effectiveness plane
ggplot(results) +
# reference lines (threshold at 20k)
geom_vline(xintercept = 0) +
geom_hline(yintercept = 0) +
geom_abline(slope = 20000, linetype = "dashed") +
# PSA icers and mean icer
geom_point(aes(x = QALY_Trt-QALY_NoTrt, y = Cost_Trt-Cost_NoTrt), col = "cadetblue", size =.7, alpha =.7) +
geom_point(aes(x = mean(QALY_Trt-QALY_NoTrt), y = mean(Cost_Trt-Cost_NoTrt)), col = "blue") +
# labels
xlab("Incremental QALYs") +
ylab("Incremental Costs") +
theme_minimal()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataDocs.R
\docType{data}
\name{pisa2012}
\alias{pisa2012}
\alias{pisaEuro}
\title{PISA 2012 dataset}
\usage{
data(pisa2012)
}
\description{
Data from the \code{PISA2012lite} package.
}
\details{
The original dataset (Program for International Student Assesment, \url{http://www.oecd.org/pisa/}) contains information on students'
performance on various cognitive tests expressed with so-called plausible values measured in three fields: Mathematics (\code{PV1MATH}),
Science (\code{PV1SCIE}) and Reading (\code{PV1READ}).
\code{pisa2012} is a reweighted version of the \code{PISA2012lite} data. The weighting was performed using the \code{W_FSTUWT} variable.
The variables are as follows (original column names from the \code{PISA2012lite} package are given in brackets):
\itemize{
\item \code{math} -- student's performance in Mathematics (originally: PV1MATH)
\item \code{reading} -- student's performance in Reading (originally: PV1READ)
\item \code{science} -- student's performance in Science (originally: PV1SCIE)
\item \code{country} -- student's country (originally: CNT)
}
}
\keyword{data}
|
/man/pisa2012.Rd
|
no_license
|
AleksandraDabrowska/factorMerger
|
R
| false
| true
| 1,188
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataDocs.R
\docType{data}
\name{pisa2012}
\alias{pisa2012}
\alias{pisaEuro}
\title{PISA 2012 dataset}
\usage{
data(pisa2012)
}
\description{
Data from the \code{PISA2012lite} package.
}
\details{
The original dataset (Program for International Student Assesment, \url{http://www.oecd.org/pisa/}) contains information on students'
performance on various cognitive tests expressed with so-called plausible values measured in three fields: Mathematics (\code{PV1MATH}),
Science (\code{PV1SCIE}) and Reading (\code{PV1READ}).
\code{pisa2012} is a reweighted version of the \code{PISA2012lite} data. The weighting was performed using the \code{W_FSTUWT} variable.
The variables are as follows (original column names from the \code{PISA2012lite} package are given in brackets):
\itemize{
\item \code{math} -- student's performance in Mathematics (originally: PV1MATH)
\item \code{reading} -- student's performance in Reading (originally: PV1READ)
\item \code{science} -- student's performance in Science (originally: PV1SCIE)
\item \code{country} -- student's country (originally: CNT)
}
}
\keyword{data}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
cagaraza/ProgrammingAssignment2
|
R
| false
| false
| 747
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
library(shiny)
library(particlesjs)
ui <- fluidPage(
# Call particles in your UI
particles(),
headerPanel('Iris k-means clustering'),
sidebarLayout(
sidebarPanel(
selectInput('xcol', 'X Variable', names(iris)),
selectInput('ycol', 'Y Variable', names(iris),
selected=names(iris)[[2]]),
numericInput('clusters', 'Cluster count', 3,
min = 1, max = 9)
),
mainPanel(
plotOutput('plot1')
)
)
)
server <- function(input, output, session) {
selectedData <- reactive({
iris[, c(input$xcol, input$ycol)]
})
clusters <- reactive({
kmeans(selectedData(), input$clusters)
})
output$plot1 <- renderPlot({
palette(c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3",
"#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#999999"))
par(mar = c(5.1, 4.1, 0, 1))
plot(selectedData(),
col = clusters()$cluster,
pch = 20, cex = 3)
points(clusters()$centers, pch = 4, cex = 4, lwd = 4)
})
}
shinyApp(ui, server)
|
/dev/app-iris.R
|
no_license
|
dreamRs/particlesjs
|
R
| false
| false
| 1,046
|
r
|
library(shiny)
library(particlesjs)
ui <- fluidPage(
# Call particles in your UI
particles(),
headerPanel('Iris k-means clustering'),
sidebarLayout(
sidebarPanel(
selectInput('xcol', 'X Variable', names(iris)),
selectInput('ycol', 'Y Variable', names(iris),
selected=names(iris)[[2]]),
numericInput('clusters', 'Cluster count', 3,
min = 1, max = 9)
),
mainPanel(
plotOutput('plot1')
)
)
)
server <- function(input, output, session) {
selectedData <- reactive({
iris[, c(input$xcol, input$ycol)]
})
clusters <- reactive({
kmeans(selectedData(), input$clusters)
})
output$plot1 <- renderPlot({
palette(c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3",
"#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#999999"))
par(mar = c(5.1, 4.1, 0, 1))
plot(selectedData(),
col = clusters()$cluster,
pch = 20, cex = 3)
points(clusters()$centers, pch = 4, cex = 4, lwd = 4)
})
}
shinyApp(ui, server)
|
function(input, output) {
# output$outcode <- renderPrint({
#
# myin <- input$edges
# if(length(myin) > 0) {
# print(edgeList())
# #print(igraphFromList())
# } else {
#
# print("Shift+click to add nodes, shift+drag to add edges")
#
# }
#
# })
edgeList <- reactive({
myin <- input$edges
if(length(myin) > 0) {
j <- seq(1, length(myin) - 16, by = 17)
data.frame(id = paste0("e", myin[j]), source = myin[j+1], target = myin[j+2],
source.leftside = ifelse(myin[j+3] == "FALSE", 0, 1),
target.leftside = ifelse(myin[j + 4] == "FALSE", 0, 1),
rlconnect = ifelse(myin[j+5] == "FALSE", 0, 1),
source.latent = as.numeric(myin[j + 6]), target.latent = as.numeric(myin[j + 7]),
source.outcome = as.numeric(myin[j + 8]), target.outcome = as.numeric(myin[j + 9]),
source.exposure = as.numeric(myin[j + 10]), target.exposure = as.numeric(myin[j + 11]),
edge.monotone = as.numeric(myin[j + 12]),
source.x = as.numeric(myin[j + 13]), source.y = as.numeric(myin[j + 14]),
target.x = as.numeric(myin[j + 15]), target.y = as.numeric(myin[j + 16]) )
} else {
NULL
}
})
igraphFromList <- reactive({
myin <- input$edges
if(length(myin) > 0) {
edges <- edgeList()[, -c(1)]
vertex.meta <- rbind(data.frame(vnames = edges$source, leftside = edges$source.leftside,
latent = edges$source.latent, outcome = edges$source.outcome,
exposure = edges$source.exposure, x = edges$source.x, y = -edges$source.y),
data.frame(vnames = edges$target, leftside = edges$target.leftside,
latent = edges$target.latent, outcome = edges$target.outcome,
exposure = edges$target.exposure, x = edges$target.x, y = -edges$target.y))
#print(myin)
graphres <- graph_from_data_frame(edges[, c(1, 2, 5, 12)], vertices = unique(vertex.meta))
ogleft <- V(graphres)[V(graphres)$leftside == 1]
ogright <- V(graphres)[V(graphres)$leftside == 0]
if(length(ogleft) > 1) {
graphres <- add_vertices(graphres, 1, name = "Ul", latent = 1,
leftside = 1, outcome = 0, exposure = 0,
x = min(V(graphres)$x) - 100, y = min(V(graphres)$y) +20)
graphres <- add_edges(graphres, unlist(lapply(names(ogleft), function(x) c("Ul", x))),
rlconnect = rep(0,length(ogleft)), edge.monotone= rep(0, length(ogleft)))
}
if(length(ogright) > 1) {
graphres <- add_vertices(graphres, 1, name = "Ur", latent = 1,
leftside = 0, outcome = 0, exposure = 0,
x = max(V(graphres)$x) + 100, y = min(V(graphres)$y) +20)
graphres <- add_edges(graphres, unlist(lapply(names(ogright), function(x) c("Ur", x))),
rlconnect = rep(0,length(ogright)), edge.monotone= rep(0, length(ogright)))
}
graphres
} else {
NULL
}
})
observe({
myin <- edgeList()
if(sum(myin$rlconnect) > 0) {
showNotification("No connections from right to left are allowed!", type = "error")
}
})
## return graph to R
observeEvent(input$endbtn, {
myin <- edgeList()
if(sum(myin$rlconnect) > 0) {
showNotification("No connections from right to left are allowed!", type = "error")
} else {
graph <- igraphFromList()
## check for valid names
vnames <- names(V(graph))
badnames <- grep("(^[^[:alpha:]])|([[:punct:]])|(^p)", vnames, value = TRUE)
## check for cycles
cychek <- find_cycles(graph)
if(length(cychek) > 0) {
showNotification("No cycles in the graph are allowed!", type = "error")
} else if (length(badnames) > 0) {
showNotification(sprintf("Invalid names: %s, found in graph vertices!",
paste(badnames, collapse = ",")), type = "error")
} else {
stopApp(graph)
}
}
})
## analyze the graph in shiny
observeEvent(input$analyze, {
removeUI(selector = "#effect")
removeUI(selector = "#constraintsdiv")
removeUI(selector = "#myplot")
removeUI(selector = "#results")
myin <- edgeList()
if(sum(myin$rlconnect) > 0) {
showNotification("No connections from right to left are allowed!", type = "error")
} else {
graphres <- igraphFromList()
cychek <- find_cycles(graphres)
## check for valid names
vnames <- names(V(graphres))
badnames <- grep("(^[^[:alpha:]])|([[:punct:]])|(^p)", vnames, value = TRUE)
if(length(cychek) > 0) {
showNotification("No cycles in the graph are allowed!", type = "error")
} else if(length(badnames) > 0) {
showNotification(sprintf("Invalid names: %s, found in graph vertices!", paste(badnames, collapse = ",")), type = "error")
} else {
insertUI(selector = "#analyze",
where = "afterEnd",
ui = tags$div(id = "myplot",
plotOutput("myplot")
)
)
output$myplot <- renderPlot(plot_graphres(graphres))
rightvars <- V(graphres)[V(graphres)$leftside == 0 & names(V(graphres)) != "Ur"]
expo <- V(graphres)[V(graphres)$exposure == 1]
outc <- V(graphres)[V(graphres)$outcome == 1]
effectpath <- all_simple_paths(graphres, from = expo, to = outc)
if(length(outc) == 0 | length(expo) == 0) {
default.effect <- ""
} else {
## default total effect
def.eff <- paste0(names(outc), "(")
for(j in 1:length(effectpath)) {
res <- ""
nvs <- length(effectpath[[j]])
for(k in max(1, nvs - 1):1) {
thisvar <- effectpath[[j]][k]
res <- paste0(res, names(thisvar),
ifelse(names(thisvar) == names(expo),
" = %s", "("))
}
def.eff <- paste0(def.eff, res, paste(rep(")", max(1, nvs - 1)), collapse = ""), ifelse(j < length(effectpath), ", ", ""))
}
def.eff <- paste0("p{", def.eff, "=1}")
default.effect <- paste(sapply(c(1, 0), function(x) sprintf(def.eff, x, x)), collapse = " - ")
}
##
effectUI <- div(id = "effect",
h3("Specify causal effect of interest (required)"),
helpText("Use the text box to describe your causal effect of interest. The effects must be of the form p{V11(X=a)=a; V12(X=a)=b;...; W1=a; ...} op1 p{V21(X=b)=a; V22(X=c)=b;...; W1=b} op2 ... where Vij and Wk are names of variables in the graph, a, b are either 0 or 1, and op are either - or +. You can specify a single probability statement (i.e., no operator). Note that the probability statements begin with little p, and use curly braces, and items inside the probability statements are separated by ;. The variables may be potential outcomes which are denoted by parentheses, and they may also be observed outcomes which do not have parentheses. Variables may also be nested inside potential outcomes."),
fluidRow(id = "effecttext",
column(8, textAreaInput("effect", NULL, default.effect)),
column(1, actionButton("parseeffect", "Parse", style="background-color: #69fb82"))
))
insertUI(selector = "#myplot",
where = "afterEnd",
ui = list(effectUI,
div(id = "constraintsdiv", h3("Constraints (optional)"),
helpText("Here you can specify potential outcomes to constrain by writing the potential outcomes, values of their parents, and operators that determine the constraint (equalities or inequalities). For example, X(Z = 1) >= X(Z = 0)."),
fluidRow(
column(1, actionButton("constraints", "Specify constraints"))
)),
div(id = "results", h3("Bounds"),
fluidRow(
column(1, actionButton("optimize", "Compute the bounds", style="background-color: #69fb82"))
))
)
)
}}
})
effectFixed <- reactiveValues()
observeEvent(input$parseeffect, {
effecttext <- input$effect
error <- NULL
parsed.test <- tryCatch(parse_effect(effecttext), error = function(e) "fail")
if(!is.list(parsed.test)) {
error <- "Unable to parse effect!"
} else {
chk0 <- lapply(parsed.test$vars, function(x) lapply(x, function(y){
if(is.list(y)) names(y)
}))
interven.vars <- unique(unlist(chk0))
graph <- igraphFromList()
## check that children of intervention sets are on the right
any.children.onleft <- sapply(interven.vars, function(v) {
children <- neighbors(graph, V(graph)[v], mode = "out")
any(children$leftside == 1)
})
if(any(any.children.onleft) == TRUE) {
error <- sprintf("Cannot intervent on %s because it has children on the leftside!",
paste(interven.vars[which(any.children.onleft)], collapse = ", "))
}
if("oper" %in% names(chk0) & !chk0["oper"] %in% c("+", "-")) {
error <- sprintf("Operator '%s' not allowed!", chk0["oper"])
}
allnmes <- unique(unlist(lapply(parsed.test$vars, names)))
realnms <- names(V(graph))
if(any(!allnmes %in% realnms)) {
error <- sprintf("Names %s in effect not specified in graph!",
paste(allnmes[which(!allnmes %in% realnms)], collapse = ", "))
}
}
if(is.null(error)) {
removeUI("#effecttext", immediate = TRUE)
insertUI("#effect", "beforeEnd",
ui = fluidRow(column(8, pre(effecttext))))
effectFixed$effectt <- effecttext
} else {
showNotification(error, type = "error")
}
})
optimizeGraph <- reactive({
graphres <- igraphFromList()
## parse causal effect
effecttext <- effectFixed$effectt
constraints <- fixedConstraints$constraints
withProgress(message = "Computing bounds", value = .1, expr = {
obj <- analyze_graph(graphres, constraints, effectt = effecttext)
incProgress(.4)
if(obj$objective == "") {
showNotification("Objective is NULL, nothing to optimize.", type = "error")
"Error"
} else {
bounds.obs <- optimize_effect(obj)
list(graphres = graphres, obj = obj, bounds.obs = bounds.obs,
constraints = constraints, effect = effecttext)
}
})
})
### constraints
fixedConstraints <- reactiveValues()
observeEvent(input$constraints, {
insertUI(selector = "#constraintsdiv", where = "beforeEnd",
ui = div(h3("Constraints"),
fluidRow(id = "constrainttext",
column(8, textAreaInput("constraintfield", NULL)),
column(1, actionButton("parseconstraint", "Parse", style="background-color: #69fb82"))
)
)
)
})
observeEvent(input$parseconstraint, {
constrainttext <- strsplit(input$constraintfield, "\n", fixed = TRUE)[[1]]
graph <- igraphFromList()
obsnames <- names(V(graph)[!names(V(graph)) %in% c("Ur", "Ul")])
error <- NULL
parsed.ctest <- tryCatch(parse_constraints(constrainttext, obsnames), error = function(e) "fail")
if(!is.list(parsed.ctest)) {
error <- "Unable to parse constraints!"
} else {
allnmes <- unique(c(parsed.ctest$leftout, parsed.ctest$rightout,
gsub("=(0|1)", "", c(parsed.ctest$leftcond, parsed.ctest$rightcond))))
if(any(!parsed.ctest$operator %in% c("==", "<", ">", "<=", ">="))) {
error <- "Operator not allowed!"
}
realnms <- c(names(V(graph)), "0", "1")
if(any(!allnmes %in% realnms)) {
error <- sprintf("Names %s in constraint not specified in graph!",
paste(allnmes[which(!allnmes %in% realnms)], collapse = ", "))
}
}
if(is.null(error)) {
removeUI("#constrainttext", immediate = TRUE)
insertUI("#constraintsdiv", "beforeEnd",
ui = fluidRow(column(8, pre(paste(constrainttext, collapse = "\n")))))
fixedConstraints$constraints <- constrainttext
} else {
showNotification(error, type = "error")
}
})
observeEvent(input$optimize, {
b <- optimizeGraph()
if(is.list(b)) {
removeUI(selector = "#resultsText")
insertUI(selector = "#results", where = "beforeEnd",
ui = div(fluidRow(column(12, h3("Results")),
column(12, pre(htmlOutput("resultsText")))
))
)
effecttext <- sprintf("Computed bounds for the effect %s", effectFixed$effectt)
lkey <- letters[1:length(attr(b$obj$parameters, "rightvars"))]
rkey <- letters[(length(attr(b$obj$parameters, "rightvars")) + 1):(length(attr(b$obj$parameters, "rightvars")) +
length(attr(b$obj$parameters, "condvars")))]
if(length(attr(b$obj$parameters, "condvars")) == 0) rkey <- NULL
sampparm <- paste0("p", paste(lkey, collapse = ""), "_",
paste(rkey, collapse = ""))
probstate <- paste0("P(", paste(paste0(attr(b$obj$parameters, "rightvars"), " = ", lkey), collapse = ", "), " | ",
paste0(attr(b$obj$parameters, "condvars"), " = ", rkey, collapse = ", "), ")")
if(length(attr(b$obj$parameters, "condvars")) == 0) {
probstate <- paste0("P(", paste(paste0(attr(b$obj$parameters, "rightvars"), " = ", lkey), collapse = ", "), ")")
}
variabletext <- sprintf("The bounds are reported in terms of parameters of the form %s, which represents the probability %s.",
sampparm, probstate)
if(!is.null(b$constraints)) {
constrainttext <- sprintf("This following constraints have been specifed: \n %s", paste(b$constraints, collapse = "\n"))
} else constrainttext <- "No constraints have been specified"
textres <- lapply(c(effecttext, constrainttext, variabletext, "Bounds: ",
"Lower = ", b$bounds.obs$bounds[1], "Upper = ", b$bounds.obs$bounds[2]), function(x) {
x2 <- strsplit(x, "\n", fixed = TRUE)[[1]]
lapply(x2, function(x) tags$p(x))
})
output$resultsText <- renderUI(do.call(tagList, textres))
insertUI(selector = "#results", where = "beforeEnd",
ui = fluidRow(
column(2, actionButton("downloadf",
"Exit and return objects to R",
style="background-color: #fb6970")),
column(2, actionButton("latexme",
"Show latex code for bounds"))))
}
})
observeEvent(input$downloadf, {
print("click")
b <- optimizeGraph()
b$boundsFunction <- interpret_bounds(b$bounds.obs$bounds, b$obj$parameters)
stopApp(b)
})
observeEvent(input$latexme, {
b <- optimizeGraph()
insertUI(selector = "#results", where = "afterEnd",
ui = div(fluidRow(column(12, h3("Latex code")),
column(12, pre(htmlOutput("latexCode")))
))
)
output$latexCode <- renderUI(p(latex_bounds(b$bounds.obs$bounds, b$obj$parameters)))
})
}
|
/inst/shiny/interface/server.R
|
permissive
|
jbdatascience/causaloptim
|
R
| false
| false
| 19,113
|
r
|
function(input, output) {
# output$outcode <- renderPrint({
#
# myin <- input$edges
# if(length(myin) > 0) {
# print(edgeList())
# #print(igraphFromList())
# } else {
#
# print("Shift+click to add nodes, shift+drag to add edges")
#
# }
#
# })
edgeList <- reactive({
myin <- input$edges
if(length(myin) > 0) {
j <- seq(1, length(myin) - 16, by = 17)
data.frame(id = paste0("e", myin[j]), source = myin[j+1], target = myin[j+2],
source.leftside = ifelse(myin[j+3] == "FALSE", 0, 1),
target.leftside = ifelse(myin[j + 4] == "FALSE", 0, 1),
rlconnect = ifelse(myin[j+5] == "FALSE", 0, 1),
source.latent = as.numeric(myin[j + 6]), target.latent = as.numeric(myin[j + 7]),
source.outcome = as.numeric(myin[j + 8]), target.outcome = as.numeric(myin[j + 9]),
source.exposure = as.numeric(myin[j + 10]), target.exposure = as.numeric(myin[j + 11]),
edge.monotone = as.numeric(myin[j + 12]),
source.x = as.numeric(myin[j + 13]), source.y = as.numeric(myin[j + 14]),
target.x = as.numeric(myin[j + 15]), target.y = as.numeric(myin[j + 16]) )
} else {
NULL
}
})
igraphFromList <- reactive({
myin <- input$edges
if(length(myin) > 0) {
edges <- edgeList()[, -c(1)]
vertex.meta <- rbind(data.frame(vnames = edges$source, leftside = edges$source.leftside,
latent = edges$source.latent, outcome = edges$source.outcome,
exposure = edges$source.exposure, x = edges$source.x, y = -edges$source.y),
data.frame(vnames = edges$target, leftside = edges$target.leftside,
latent = edges$target.latent, outcome = edges$target.outcome,
exposure = edges$target.exposure, x = edges$target.x, y = -edges$target.y))
#print(myin)
graphres <- graph_from_data_frame(edges[, c(1, 2, 5, 12)], vertices = unique(vertex.meta))
ogleft <- V(graphres)[V(graphres)$leftside == 1]
ogright <- V(graphres)[V(graphres)$leftside == 0]
if(length(ogleft) > 1) {
graphres <- add_vertices(graphres, 1, name = "Ul", latent = 1,
leftside = 1, outcome = 0, exposure = 0,
x = min(V(graphres)$x) - 100, y = min(V(graphres)$y) +20)
graphres <- add_edges(graphres, unlist(lapply(names(ogleft), function(x) c("Ul", x))),
rlconnect = rep(0,length(ogleft)), edge.monotone= rep(0, length(ogleft)))
}
if(length(ogright) > 1) {
graphres <- add_vertices(graphres, 1, name = "Ur", latent = 1,
leftside = 0, outcome = 0, exposure = 0,
x = max(V(graphres)$x) + 100, y = min(V(graphres)$y) +20)
graphres <- add_edges(graphres, unlist(lapply(names(ogright), function(x) c("Ur", x))),
rlconnect = rep(0,length(ogright)), edge.monotone= rep(0, length(ogright)))
}
graphres
} else {
NULL
}
})
observe({
myin <- edgeList()
if(sum(myin$rlconnect) > 0) {
showNotification("No connections from right to left are allowed!", type = "error")
}
})
## return graph to R
observeEvent(input$endbtn, {
myin <- edgeList()
if(sum(myin$rlconnect) > 0) {
showNotification("No connections from right to left are allowed!", type = "error")
} else {
graph <- igraphFromList()
## check for valid names
vnames <- names(V(graph))
badnames <- grep("(^[^[:alpha:]])|([[:punct:]])|(^p)", vnames, value = TRUE)
## check for cycles
cychek <- find_cycles(graph)
if(length(cychek) > 0) {
showNotification("No cycles in the graph are allowed!", type = "error")
} else if (length(badnames) > 0) {
showNotification(sprintf("Invalid names: %s, found in graph vertices!",
paste(badnames, collapse = ",")), type = "error")
} else {
stopApp(graph)
}
}
})
## analyze the graph in shiny
observeEvent(input$analyze, {
removeUI(selector = "#effect")
removeUI(selector = "#constraintsdiv")
removeUI(selector = "#myplot")
removeUI(selector = "#results")
myin <- edgeList()
if(sum(myin$rlconnect) > 0) {
showNotification("No connections from right to left are allowed!", type = "error")
} else {
graphres <- igraphFromList()
cychek <- find_cycles(graphres)
## check for valid names
vnames <- names(V(graphres))
badnames <- grep("(^[^[:alpha:]])|([[:punct:]])|(^p)", vnames, value = TRUE)
if(length(cychek) > 0) {
showNotification("No cycles in the graph are allowed!", type = "error")
} else if(length(badnames) > 0) {
showNotification(sprintf("Invalid names: %s, found in graph vertices!", paste(badnames, collapse = ",")), type = "error")
} else {
insertUI(selector = "#analyze",
where = "afterEnd",
ui = tags$div(id = "myplot",
plotOutput("myplot")
)
)
output$myplot <- renderPlot(plot_graphres(graphres))
rightvars <- V(graphres)[V(graphres)$leftside == 0 & names(V(graphres)) != "Ur"]
expo <- V(graphres)[V(graphres)$exposure == 1]
outc <- V(graphres)[V(graphres)$outcome == 1]
effectpath <- all_simple_paths(graphres, from = expo, to = outc)
if(length(outc) == 0 | length(expo) == 0) {
default.effect <- ""
} else {
## default total effect
def.eff <- paste0(names(outc), "(")
for(j in 1:length(effectpath)) {
res <- ""
nvs <- length(effectpath[[j]])
for(k in max(1, nvs - 1):1) {
thisvar <- effectpath[[j]][k]
res <- paste0(res, names(thisvar),
ifelse(names(thisvar) == names(expo),
" = %s", "("))
}
def.eff <- paste0(def.eff, res, paste(rep(")", max(1, nvs - 1)), collapse = ""), ifelse(j < length(effectpath), ", ", ""))
}
def.eff <- paste0("p{", def.eff, "=1}")
default.effect <- paste(sapply(c(1, 0), function(x) sprintf(def.eff, x, x)), collapse = " - ")
}
##
effectUI <- div(id = "effect",
h3("Specify causal effect of interest (required)"),
helpText("Use the text box to describe your causal effect of interest. The effects must be of the form p{V11(X=a)=a; V12(X=a)=b;...; W1=a; ...} op1 p{V21(X=b)=a; V22(X=c)=b;...; W1=b} op2 ... where Vij and Wk are names of variables in the graph, a, b are either 0 or 1, and op are either - or +. You can specify a single probability statement (i.e., no operator). Note that the probability statements begin with little p, and use curly braces, and items inside the probability statements are separated by ;. The variables may be potential outcomes which are denoted by parentheses, and they may also be observed outcomes which do not have parentheses. Variables may also be nested inside potential outcomes."),
fluidRow(id = "effecttext",
column(8, textAreaInput("effect", NULL, default.effect)),
column(1, actionButton("parseeffect", "Parse", style="background-color: #69fb82"))
))
insertUI(selector = "#myplot",
where = "afterEnd",
ui = list(effectUI,
div(id = "constraintsdiv", h3("Constraints (optional)"),
helpText("Here you can specify potential outcomes to constrain by writing the potential outcomes, values of their parents, and operators that determine the constraint (equalities or inequalities). For example, X(Z = 1) >= X(Z = 0)."),
fluidRow(
column(1, actionButton("constraints", "Specify constraints"))
)),
div(id = "results", h3("Bounds"),
fluidRow(
column(1, actionButton("optimize", "Compute the bounds", style="background-color: #69fb82"))
))
)
)
}}
})
effectFixed <- reactiveValues()
observeEvent(input$parseeffect, {
effecttext <- input$effect
error <- NULL
parsed.test <- tryCatch(parse_effect(effecttext), error = function(e) "fail")
if(!is.list(parsed.test)) {
error <- "Unable to parse effect!"
} else {
chk0 <- lapply(parsed.test$vars, function(x) lapply(x, function(y){
if(is.list(y)) names(y)
}))
interven.vars <- unique(unlist(chk0))
graph <- igraphFromList()
## check that children of intervention sets are on the right
any.children.onleft <- sapply(interven.vars, function(v) {
children <- neighbors(graph, V(graph)[v], mode = "out")
any(children$leftside == 1)
})
if(any(any.children.onleft) == TRUE) {
error <- sprintf("Cannot intervent on %s because it has children on the leftside!",
paste(interven.vars[which(any.children.onleft)], collapse = ", "))
}
if("oper" %in% names(chk0) & !chk0["oper"] %in% c("+", "-")) {
error <- sprintf("Operator '%s' not allowed!", chk0["oper"])
}
allnmes <- unique(unlist(lapply(parsed.test$vars, names)))
realnms <- names(V(graph))
if(any(!allnmes %in% realnms)) {
error <- sprintf("Names %s in effect not specified in graph!",
paste(allnmes[which(!allnmes %in% realnms)], collapse = ", "))
}
}
if(is.null(error)) {
removeUI("#effecttext", immediate = TRUE)
insertUI("#effect", "beforeEnd",
ui = fluidRow(column(8, pre(effecttext))))
effectFixed$effectt <- effecttext
} else {
showNotification(error, type = "error")
}
})
optimizeGraph <- reactive({
graphres <- igraphFromList()
## parse causal effect
effecttext <- effectFixed$effectt
constraints <- fixedConstraints$constraints
withProgress(message = "Computing bounds", value = .1, expr = {
obj <- analyze_graph(graphres, constraints, effectt = effecttext)
incProgress(.4)
if(obj$objective == "") {
showNotification("Objective is NULL, nothing to optimize.", type = "error")
"Error"
} else {
bounds.obs <- optimize_effect(obj)
list(graphres = graphres, obj = obj, bounds.obs = bounds.obs,
constraints = constraints, effect = effecttext)
}
})
})
### constraints
fixedConstraints <- reactiveValues()
observeEvent(input$constraints, {
insertUI(selector = "#constraintsdiv", where = "beforeEnd",
ui = div(h3("Constraints"),
fluidRow(id = "constrainttext",
column(8, textAreaInput("constraintfield", NULL)),
column(1, actionButton("parseconstraint", "Parse", style="background-color: #69fb82"))
)
)
)
})
observeEvent(input$parseconstraint, {
constrainttext <- strsplit(input$constraintfield, "\n", fixed = TRUE)[[1]]
graph <- igraphFromList()
obsnames <- names(V(graph)[!names(V(graph)) %in% c("Ur", "Ul")])
error <- NULL
parsed.ctest <- tryCatch(parse_constraints(constrainttext, obsnames), error = function(e) "fail")
if(!is.list(parsed.ctest)) {
error <- "Unable to parse constraints!"
} else {
allnmes <- unique(c(parsed.ctest$leftout, parsed.ctest$rightout,
gsub("=(0|1)", "", c(parsed.ctest$leftcond, parsed.ctest$rightcond))))
if(any(!parsed.ctest$operator %in% c("==", "<", ">", "<=", ">="))) {
error <- "Operator not allowed!"
}
realnms <- c(names(V(graph)), "0", "1")
if(any(!allnmes %in% realnms)) {
error <- sprintf("Names %s in constraint not specified in graph!",
paste(allnmes[which(!allnmes %in% realnms)], collapse = ", "))
}
}
if(is.null(error)) {
removeUI("#constrainttext", immediate = TRUE)
insertUI("#constraintsdiv", "beforeEnd",
ui = fluidRow(column(8, pre(paste(constrainttext, collapse = "\n")))))
fixedConstraints$constraints <- constrainttext
} else {
showNotification(error, type = "error")
}
})
observeEvent(input$optimize, {
b <- optimizeGraph()
if(is.list(b)) {
removeUI(selector = "#resultsText")
insertUI(selector = "#results", where = "beforeEnd",
ui = div(fluidRow(column(12, h3("Results")),
column(12, pre(htmlOutput("resultsText")))
))
)
effecttext <- sprintf("Computed bounds for the effect %s", effectFixed$effectt)
lkey <- letters[1:length(attr(b$obj$parameters, "rightvars"))]
rkey <- letters[(length(attr(b$obj$parameters, "rightvars")) + 1):(length(attr(b$obj$parameters, "rightvars")) +
length(attr(b$obj$parameters, "condvars")))]
if(length(attr(b$obj$parameters, "condvars")) == 0) rkey <- NULL
sampparm <- paste0("p", paste(lkey, collapse = ""), "_",
paste(rkey, collapse = ""))
probstate <- paste0("P(", paste(paste0(attr(b$obj$parameters, "rightvars"), " = ", lkey), collapse = ", "), " | ",
paste0(attr(b$obj$parameters, "condvars"), " = ", rkey, collapse = ", "), ")")
if(length(attr(b$obj$parameters, "condvars")) == 0) {
probstate <- paste0("P(", paste(paste0(attr(b$obj$parameters, "rightvars"), " = ", lkey), collapse = ", "), ")")
}
variabletext <- sprintf("The bounds are reported in terms of parameters of the form %s, which represents the probability %s.",
sampparm, probstate)
if(!is.null(b$constraints)) {
constrainttext <- sprintf("This following constraints have been specifed: \n %s", paste(b$constraints, collapse = "\n"))
} else constrainttext <- "No constraints have been specified"
textres <- lapply(c(effecttext, constrainttext, variabletext, "Bounds: ",
"Lower = ", b$bounds.obs$bounds[1], "Upper = ", b$bounds.obs$bounds[2]), function(x) {
x2 <- strsplit(x, "\n", fixed = TRUE)[[1]]
lapply(x2, function(x) tags$p(x))
})
output$resultsText <- renderUI(do.call(tagList, textres))
insertUI(selector = "#results", where = "beforeEnd",
ui = fluidRow(
column(2, actionButton("downloadf",
"Exit and return objects to R",
style="background-color: #fb6970")),
column(2, actionButton("latexme",
"Show latex code for bounds"))))
}
})
observeEvent(input$downloadf, {
print("click")
b <- optimizeGraph()
b$boundsFunction <- interpret_bounds(b$bounds.obs$bounds, b$obj$parameters)
stopApp(b)
})
observeEvent(input$latexme, {
b <- optimizeGraph()
insertUI(selector = "#results", where = "afterEnd",
ui = div(fluidRow(column(12, h3("Latex code")),
column(12, pre(htmlOutput("latexCode")))
))
)
output$latexCode <- renderUI(p(latex_bounds(b$bounds.obs$bounds, b$obj$parameters)))
})
}
|
quartz(type="png", file="fc.png", width=7, height=7, bg="white", family="HiraKakuProN-W3")
par(mgp=c(1.7,0.7,0)) # title and axis margins. default: c(3,1,0)
par(mar=c(3,3,2,2)) # bottom, left, top, right margins. default: c(5,4,4,2)+0.1
plot(NULL, xlim=c(0,20), ylim=c(0,20), xlab="x", ylab="lambda", xaxs="i", yaxs="i", asp=1)
for (lambda in seq(0,20,0.1)) {
r = dpois(0:100, lambda) / dpois(0:100, 0:100)
o = order(r, decreasing=TRUE)
t = sort(dpois(0:100, lambda), decreasing=TRUE)
s = cumsum(dpois(0:100, lambda)[o])
m = r[o[sum(s < 0.95) + 1]]
x = range((0:100)[r >= m])
segments(x[1], lambda, x[2], lambda, col="#66ccff")
}
abline(v=5)
abline(h=1.84)
abline(h=11.26)
axis(4, c(1.84,11.26), labels=c("1.8","11.3"))
dev.off()
|
/stat/CI/fc.R
|
no_license
|
okumuralab/okumuralab.github.io
|
R
| false
| false
| 758
|
r
|
quartz(type="png", file="fc.png", width=7, height=7, bg="white", family="HiraKakuProN-W3")
par(mgp=c(1.7,0.7,0)) # title and axis margins. default: c(3,1,0)
par(mar=c(3,3,2,2)) # bottom, left, top, right margins. default: c(5,4,4,2)+0.1
plot(NULL, xlim=c(0,20), ylim=c(0,20), xlab="x", ylab="lambda", xaxs="i", yaxs="i", asp=1)
for (lambda in seq(0,20,0.1)) {
r = dpois(0:100, lambda) / dpois(0:100, 0:100)
o = order(r, decreasing=TRUE)
t = sort(dpois(0:100, lambda), decreasing=TRUE)
s = cumsum(dpois(0:100, lambda)[o])
m = r[o[sum(s < 0.95) + 1]]
x = range((0:100)[r >= m])
segments(x[1], lambda, x[2], lambda, col="#66ccff")
}
abline(v=5)
abline(h=1.84)
abline(h=11.26)
axis(4, c(1.84,11.26), labels=c("1.8","11.3"))
dev.off()
|
change_others_to_dataframe <- function(x) {
# If x is a data frame, do nothing and return x
# Otherwise, return a data frame with 1 row of NAs
if (nrow(x) != 0) {return(x)}
else {
return(data.frame(om_all = NA))
}
}
change_others_to_dataframe2 <- function(x) {
# If x is a data frame, do nothing and return x
# Otherwise, return a data frame with 1 row of NAs
if (nrow(x) != 0) {return(x)}
else {
return(data.frame(lifeForm = NA,
habitat = NA,
vegetationType = NA))
}
}
|
/scripts/change_NA_to_df.R
|
no_license
|
AndreaSanchezTapia/CNCFlora_IUCN_LC
|
R
| false
| false
| 584
|
r
|
change_others_to_dataframe <- function(x) {
# If x is a data frame, do nothing and return x
# Otherwise, return a data frame with 1 row of NAs
if (nrow(x) != 0) {return(x)}
else {
return(data.frame(om_all = NA))
}
}
change_others_to_dataframe2 <- function(x) {
# If x is a data frame, do nothing and return x
# Otherwise, return a data frame with 1 row of NAs
if (nrow(x) != 0) {return(x)}
else {
return(data.frame(lifeForm = NA,
habitat = NA,
vegetationType = NA))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_run_modify.R
\name{umxRAM}
\alias{umxRAM}
\title{Easier path-based SEM modeling.}
\usage{
umxRAM(
model = NA,
...,
data = NULL,
name = NA,
group = NULL,
group.equal = NULL,
suffix = "",
comparison = TRUE,
type = c("Auto", "FIML", "cov", "cor", "WLS", "DWLS", "ULS"),
allContinuousMethod = c("cumulants", "marginals"),
autoRun = getOption("umx_auto_run"),
tryHard = c("no", "yes", "ordinal", "search"),
std = FALSE,
refModels = NULL,
remove_unused_manifests = TRUE,
independent = NA,
setValues = TRUE,
optimizer = NULL,
verbose = FALSE,
std.lv = FALSE,
lavaanMode = c("sem", "lavaan"),
printTab = FALSE,
show = "deprecated"
)
}
\arguments{
\item{model}{A model to update (or set to string to use as name for new model)}
\item{...}{umxPaths, mxThreshold objects, etc.}
\item{data}{data for the model. Can be an \code{\link[=mxData]{mxData()}} or a data.frame}
\item{name}{A friendly name for the model}
\item{group}{(optional) Column name to use for a multi-group model (default = NULL)}
\item{group.equal}{In multi-group models, what to equate across groups (default = NULL: all free)}
\item{suffix}{String to append to each label (useful if model will be used in a multi-group model)}
\item{comparison}{Compare the new model to the old (if updating an existing model: default = TRUE)}
\item{type}{One of "Auto", "FIML", "cov", "cor", "WLS", "DWLS", "ULS"}
\item{allContinuousMethod}{"cumulants" or "marginals". Used in all-continuous WLS data to determine if a means model needed.}
\item{autoRun}{Whether to run the model (default), or just to create it and return without running.}
\item{tryHard}{Default ('no') uses normal mxRun. "yes" uses mxTryHard. Other options: "ordinal", "search"}
\item{std}{Whether to show standardized estimates, raw (NULL print fit only)}
\item{refModels}{pass in reference models if available. Use FALSE to suppress computing these if not provided.}
\item{remove_unused_manifests}{Whether to remove variables in the data to which no path makes reference (defaults to TRUE)}
\item{independent}{Whether the model is independent (default = NA)}
\item{setValues}{Whether to generate likely good start values (Defaults to TRUE)}
\item{optimizer}{optionally set the optimizer (default NULL does nothing)}
\item{verbose}{Whether to tell the user what latents and manifests were created etc. (Default = FALSE)}
\item{std.lv}{Whether to auto standardize latent variables when using string syntax (default = FALSE)}
\item{lavaanMode}{Defaults when building out string syntax default = "sem" (alternative is "lavaan", with very few defaults)}
\item{printTab}{(for string input, whether to output a table of paths (FALSE)}
\item{show}{Deprecated}
}
\value{
\itemize{
\item \code{\link[=mxModel]{mxModel()}}
}
}
\description{
\code{umxRAM} expedites creation of structural equation models, still without doing invisible things to the model. It
supports \code{\link[=umxPath]{umxPath()}} but also lavaan-style string specification of models: lavaan's scripting language has become a
lingua franca for SEM books, so supporting this improves science learning.
Here's a path example that models miles per gallon (mpg) as a function of weight (wt) and engine displacement (disp)
using the widely used \code{mtcars} data set.\if{html}{\out{<div class="Rsplus">}}\preformatted{m1 = umxRAM("tim", data = mtcars,
umxPath(c("wt", "disp"), to = "mpg"),
umxPath("wt", with = "disp"),
umxPath(v.m. = c("wt", "disp", "mpg"))
)
}\if{html}{\out{</div>}}
As you can see, most of the work is done by \code{\link[=umxPath]{umxPath()}}. \code{umxRAM} wraps these paths up, takes the \verb{data =} input, and
then internally sets up all the labels and start values for the model, runs it, and calls \code{\link[=umxSummary]{umxSummary()}}, and \code{\link[=plot.MxModel]{plot.MxModel()}}.
Try it, or one of the several models in the examples at the bottom of this page.
A common error is to include data in the main list, a bit like
saying \code{lm(y ~ x + df)} instead of \code{lm(y ~ x, data = df)}.
\strong{nb}: Because it uses the presence of a variable in the data to detect if a variable is latent or not, \code{umxRAM} needs data at build time.
\strong{String Syntax}
Here is an example using lavaan syntax (for more, see \code{\link[=umxLav2RAM]{umxLav2RAM()}})\if{html}{\out{<div class="Rsplus">}}\preformatted{m1 = umxRAM("mpg ~ wt + disp", data = mtcars)
}\if{html}{\out{</div>}}
\strong{Sketch mode}
If you are at the "sketching" stage of theory consideration, \code{umxRAM} supports
a simple vector of manifest names to work with.\if{html}{\out{<div class="Rsplus">}}\preformatted{m1 = umxRAM("sketch", data = c("A", "B", "C"),
umxPath("A", to = "B"),
umxPath("B", with = "C"),
umxPath(v.m. = c("A", "B", "C"))
)
}\if{html}{\out{</div>}}
Will create this figure:
\if{html}{\figure{sketch.png}{options: width="50\%" alt="Figure: sketch.png"}}
\if{latex}{\figure{sketch.pdf}{options: width=7cm}}
}
\details{
\strong{Comparison with OpenMx and mxModel}
umxRAM differs from mxModel in the following ways:
\enumerate{
\item You don't need to set type = "RAM".
\item You don't need to list manifestVars (they are detected from path usage).
\item You don't need to list latentVars (detected as anything in paths but not in \code{mxData}).
\item You don't need to create mxData when you already have a data.frame.
\item You add data with \verb{data = } (as elsewhere in R, e.g. \code{\link[=lm]{lm()}}).
\item You don't need to add labels: paths are automatically labelled "a_to_b" etc.
\item You don't need to set start values, they will be done for you.
\item You don't need to \code{mxRun} the model: it will run automatically, and print a summary.
\item You don't need to run \code{summary}: with \code{autoRun=TRUE}, it will print a summary.
\item You get a plot of the model with estimates on the paths, including multiple groups.
\item Less typing: \code{\link[=umxPath]{umxPath()}} offers powerful verbs to describe paths.
\item Supports a subset of lavaan string input.
}
\strong{Start values}. Currently, manifest variable means are set to the observed means, residual variances are set to 80\%
of the observed variance of each variable,
and single-headed paths are set to a positive starting value (currently .9).
\emph{note}: The start-value strategy is subject to improvement, and will be documented in the help for \code{\link[=umxRAM]{umxRAM()}}.
\strong{Comparison with other software}
\strong{Black-box software, defaults, and automatic addition of paths}.
Some SEM software does a lot of behind-the-scenes defaulting and path addition.
If you want this, I'd say use \code{umxRAM} with lavaan string input.
}
\examples{
# ============================================
# = 1. Here's a simple example with raw data =
# ============================================
mtcars$litres = mtcars$disp/61.02
m1 = umxRAM("tim", data = mtcars,
umxPath(c("wt", "litres"), to = "mpg"),
umxPath("wt", with = "litres"),
umxPath(v.m. = c("wt", "litres", "mpg"))
)
# 2. Use parameters to see the parameter estimates and labels
parameters(m1)
# And umxSummary to get standardized parameters, CIs etc from the run model.
umxSummary(m1, std=TRUE)
# |name | Std.Estimate| Std.SE|CI |
# |:--------------|------------:|------:|:--------------------|
# |wt_to_mpg | -0.54| 0.17|-0.54 [-0.89, -0.2] |
# |disp_to_mpg | -0.36| 0.18|-0.36 [-0.71, -0.02] |
# |mpg_with_mpg | 0.22| 0.07|0.22 [0.08, 0.35] |
# |wt_with_wt | 1.00| 0.00|1 [1, 1] |
# |b1 | 0.89| 0.04|0.89 [0.81, 0.96] |
# |disp_with_disp | 1.00| 0.00|1 [1, 1] |
\dontrun{
# 3. Of course you can plot the model
plot(m1)
plot(m1, std=TRUE, means=FALSE)
plot(m1, std = TRUE, means=FALSE, strip= TRUE, resid = "line")
# ===============================================
# = lavaan string example (more at ?umxLav2RAM) =
# ===============================================
m1 = umxRAM(data = mtcars, "#modelName
mpg ~ wt + disp")
# =======================
# = A multi-group model =
# =======================
mtcars$litres = mtcars$disp/61.02
m1 = umxRAM("tim", data = mtcars, group = "am",
umxPath(c("wt", "litres"), to = "mpg"),
umxPath("wt", with = "litres"),
umxPath(v.m. = c("wt", "litres", "mpg"))
)
# In this model, all parameters are free across the two groups.
# ====================================
# = A cov model, with steps laid out =
# ====================================
# *note*: The variance of displacement is in cubic inches and is very large.
# to help the optimizer, one might, say, multiply disp *.016 to work in litres
tmp = mtcars; tmp$disp= tmp$disp *.016
# We can just give the raw data and ask for it to be made into type cov:
m1 = umxRAM("tim", data = tmp, type="cov",
umxPath(c("wt", "disp"), to = "mpg"),
umxPath("wt", with = "disp"),
umxPath(var = c("mpg", "wt", "disp"))
)
# (see ?umxPath for more nifty options making paths...)
# =========================================
# = umxRAM can also accept mxData as data =
# =========================================
# For convenience, list up the manifests you will be using
selVars = c("mpg", "wt", "disp")
tmp = mtcars; tmp$disp= tmp$disp *.016
myCov = mxData(cov(tmp[, selVars]), type = "cov", numObs = nrow(mtcars) )
m1 = umxRAM("tim", data = myCov,
umxPath(c("wt", "disp"), to = "mpg"),
umxPath("wt", with = "disp"),
umxPath(var = selVars)
)
# =======================
# = umxRAM supports WLS =
# =======================
# 1. Run an all-continuous WLS model
mw = umxRAM("raw", data = mtcars[, c("mpg", "wt", "disp")],
type = "WLS", allContinuousMethod = "cumulants",
umxPath(var = c("wt", "disp", "mpg")),
umxPath(c("wt", "disp"), to = "mpg"),
umxPath("wt", with = "disp"),
umxPath(var = c("wt", "disp", "mpg"))
)
# 2. Switch to marginals to support means
mw = umxRAM("raw", data = mtcars[, c("mpg", "wt", "disp")],
type = "WLS", allContinuousMethod= "marginals",
umxPath(var = c("wt", "disp", "mpg")),
umxPath(c("wt", "disp"), to = "mpg"),
umxPath("wt", with = "disp"),
umxPath(var = c("wt", "disp", "mpg"))
)
# ===============================
# = Using umxRAM in Sketch mode =
# ===============================
# No data needed: just list variable names!
# Resulting model will be plotted automatically
m1 = umxRAM("what does unique pairs do, I wonder", data = c("A", "B", "C"),
umxPath(unique.pairs = c("A", "B", "C"))
)
m1 = umxRAM("ring around the rosey", data = c("B", "C"),
umxPath(fromEach = c("A", "B", "C"))
)
m1 = umxRAM("fromEach with to", data = c("B", "C"),
umxPath(fromEach = c("B", "C"), to= "D")
)
m1 = umxRAM("CFA_sketch", data = paste0("x", 1:4),
umxPath("g", to = paste0("x", 1:4)),
umxPath(var = paste0("x", 1:4)),
umxPath(v1m0 = "g")
)
# =================================================
# = This is an example of using your own labels: =
# umxRAM will not over-ride them =
# =================================================
m1 = umxRAM("tim", data = mtcars, type="cov",
umxPath(c("wt", "disp"), to = "mpg"),
umxPath(cov = c("wt", "disp"), labels = "b1"),
umxPath(var = c("wt", "disp", "mpg"))
)
omxCheckEquals(m1$S$labels["disp", "wt"], "b1") # label preserved
m1$S$labels
# mpg wt disp
# mpg "mpg_with_mpg" "mpg_with_wt" "disp_with_mpg"
# wt "mpg_with_wt" "wt_with_wt" "b1"
# disp "disp_with_mpg" "b1" "disp_with_disp"
parameters(m1)
}
}
\references{
\itemize{
\item \url{https://tbates.github.io}, \url{https://github.com/tbates/umx}
}
}
\seealso{
\code{\link[=umxPath]{umxPath()}}, \code{\link[=umxSummary]{umxSummary()}}, \code{\link[=plot]{plot()}}, \code{\link[=parameters]{parameters()}}, \code{\link[=umxSuperModel]{umxSuperModel()}}, \code{\link[=umxLav2RAM]{umxLav2RAM()}}
Other Core Modeling Functions:
\code{\link{umxAlgebra}()},
\code{\link{umxMatrix}()},
\code{\link{umxModify}()},
\code{\link{umxPath}()},
\code{\link{umxRun}()},
\code{\link{umxSummary}()},
\code{\link{umxSuperModel}()},
\code{\link{umx}}
}
\concept{Core Modeling Functions}
|
/man/umxRAM.Rd
|
no_license
|
jishanling/umx
|
R
| false
| true
| 12,266
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_run_modify.R
\name{umxRAM}
\alias{umxRAM}
\title{Easier path-based SEM modeling.}
\usage{
umxRAM(
model = NA,
...,
data = NULL,
name = NA,
group = NULL,
group.equal = NULL,
suffix = "",
comparison = TRUE,
type = c("Auto", "FIML", "cov", "cor", "WLS", "DWLS", "ULS"),
allContinuousMethod = c("cumulants", "marginals"),
autoRun = getOption("umx_auto_run"),
tryHard = c("no", "yes", "ordinal", "search"),
std = FALSE,
refModels = NULL,
remove_unused_manifests = TRUE,
independent = NA,
setValues = TRUE,
optimizer = NULL,
verbose = FALSE,
std.lv = FALSE,
lavaanMode = c("sem", "lavaan"),
printTab = FALSE,
show = "deprecated"
)
}
\arguments{
\item{model}{A model to update (or set to string to use as name for new model)}
\item{...}{umxPaths, mxThreshold objects, etc.}
\item{data}{data for the model. Can be an \code{\link[=mxData]{mxData()}} or a data.frame}
\item{name}{A friendly name for the model}
\item{group}{(optional) Column name to use for a multi-group model (default = NULL)}
\item{group.equal}{In multi-group models, what to equate across groups (default = NULL: all free)}
\item{suffix}{String to append to each label (useful if model will be used in a multi-group model)}
\item{comparison}{Compare the new model to the old (if updating an existing model: default = TRUE)}
\item{type}{One of "Auto", "FIML", "cov", "cor", "WLS", "DWLS", "ULS"}
\item{allContinuousMethod}{"cumulants" or "marginals". Used in all-continuous WLS data to determine if a means model needed.}
\item{autoRun}{Whether to run the model (default), or just to create it and return without running.}
\item{tryHard}{Default ('no') uses normal mxRun. "yes" uses mxTryHard. Other options: "ordinal", "search"}
\item{std}{Whether to show standardized estimates, raw (NULL print fit only)}
\item{refModels}{pass in reference models if available. Use FALSE to suppress computing these if not provided.}
\item{remove_unused_manifests}{Whether to remove variables in the data to which no path makes reference (defaults to TRUE)}
\item{independent}{Whether the model is independent (default = NA)}
\item{setValues}{Whether to generate likely good start values (Defaults to TRUE)}
\item{optimizer}{optionally set the optimizer (default NULL does nothing)}
\item{verbose}{Whether to tell the user what latents and manifests were created etc. (Default = FALSE)}
\item{std.lv}{Whether to auto standardize latent variables when using string syntax (default = FALSE)}
\item{lavaanMode}{Defaults when building out string syntax default = "sem" (alternative is "lavaan", with very few defaults)}
\item{printTab}{(for string input, whether to output a table of paths (FALSE)}
\item{show}{Deprecated}
}
\value{
\itemize{
\item \code{\link[=mxModel]{mxModel()}}
}
}
\description{
\code{umxRAM} expedites creation of structural equation models, still without doing invisible things to the model. It
supports \code{\link[=umxPath]{umxPath()}} but also lavaan-style string specification of models: lavaan's scripting language has become a
lingua franca for SEM books, so supporting this improves science learning.
Here's a path example that models miles per gallon (mpg) as a function of weight (wt) and engine displacement (disp)
using the widely used \code{mtcars} data set.\if{html}{\out{<div class="Rsplus">}}\preformatted{m1 = umxRAM("tim", data = mtcars,
umxPath(c("wt", "disp"), to = "mpg"),
umxPath("wt", with = "disp"),
umxPath(v.m. = c("wt", "disp", "mpg"))
)
}\if{html}{\out{</div>}}
As you can see, most of the work is done by \code{\link[=umxPath]{umxPath()}}. \code{umxRAM} wraps these paths up, takes the \verb{data =} input, and
then internally sets up all the labels and start values for the model, runs it, and calls \code{\link[=umxSummary]{umxSummary()}}, and \code{\link[=plot.MxModel]{plot.MxModel()}}.
Try it, or one of the several models in the examples at the bottom of this page.
A common error is to include data in the main list, a bit like
saying \code{lm(y ~ x + df)} instead of \code{lm(y ~ x, data = df)}.
\strong{nb}: Because it uses the presence of a variable in the data to detect if a variable is latent or not, \code{umxRAM} needs data at build time.
\strong{String Syntax}
Here is an example using lavaan syntax (for more, see \code{\link[=umxLav2RAM]{umxLav2RAM()}})\if{html}{\out{<div class="Rsplus">}}\preformatted{m1 = umxRAM("mpg ~ wt + disp", data = mtcars)
}\if{html}{\out{</div>}}
\strong{Sketch mode}
If you are at the "sketching" stage of theory consideration, \code{umxRAM} supports
a simple vector of manifest names to work with.\if{html}{\out{<div class="Rsplus">}}\preformatted{m1 = umxRAM("sketch", data = c("A", "B", "C"),
umxPath("A", to = "B"),
umxPath("B", with = "C"),
umxPath(v.m. = c("A", "B", "C"))
)
}\if{html}{\out{</div>}}
Will create this figure:
\if{html}{\figure{sketch.png}{options: width="50\%" alt="Figure: sketch.png"}}
\if{latex}{\figure{sketch.pdf}{options: width=7cm}}
}
\details{
\strong{Comparison with OpenMx and mxModel}
umxRAM differs from mxModel in the following ways:
\enumerate{
\item You don't need to set type = "RAM".
\item You don't need to list manifestVars (they are detected from path usage).
\item You don't need to list latentVars (detected as anything in paths but not in \code{mxData}).
\item You don't need to create mxData when you already have a data.frame.
\item You add data with \verb{data = } (as elsewhere in R, e.g. \code{\link[=lm]{lm()}}).
\item You don't need to add labels: paths are automatically labelled "a_to_b" etc.
\item You don't need to set start values, they will be done for you.
\item You don't need to \code{mxRun} the model: it will run automatically, and print a summary.
\item You don't need to run \code{summary}: with \code{autoRun=TRUE}, it will print a summary.
\item You get a plot of the model with estimates on the paths, including multiple groups.
\item Less typing: \code{\link[=umxPath]{umxPath()}} offers powerful verbs to describe paths.
\item Supports a subset of lavaan string input.
}
\strong{Start values}. Currently, manifest variable means are set to the observed means, residual variances are set to 80\%
of the observed variance of each variable,
and single-headed paths are set to a positive starting value (currently .9).
\emph{note}: The start-value strategy is subject to improvement, and will be documented in the help for \code{\link[=umxRAM]{umxRAM()}}.
\strong{Comparison with other software}
\strong{Black-box software, defaults, and automatic addition of paths}.
Some SEM software does a lot of behind-the-scenes defaulting and path addition.
If you want this, I'd say use \code{umxRAM} with lavaan string input.
}
\examples{
# ============================================
# = 1. Here's a simple example with raw data =
# ============================================
mtcars$litres = mtcars$disp/61.02
m1 = umxRAM("tim", data = mtcars,
umxPath(c("wt", "litres"), to = "mpg"),
umxPath("wt", with = "litres"),
umxPath(v.m. = c("wt", "litres", "mpg"))
)
# 2. Use parameters to see the parameter estimates and labels
parameters(m1)
# And umxSummary to get standardized parameters, CIs etc from the run model.
umxSummary(m1, std=TRUE)
# |name | Std.Estimate| Std.SE|CI |
# |:--------------|------------:|------:|:--------------------|
# |wt_to_mpg | -0.54| 0.17|-0.54 [-0.89, -0.2] |
# |disp_to_mpg | -0.36| 0.18|-0.36 [-0.71, -0.02] |
# |mpg_with_mpg | 0.22| 0.07|0.22 [0.08, 0.35] |
# |wt_with_wt | 1.00| 0.00|1 [1, 1] |
# |b1 | 0.89| 0.04|0.89 [0.81, 0.96] |
# |disp_with_disp | 1.00| 0.00|1 [1, 1] |
\dontrun{
# 3. Of course you can plot the model
plot(m1)
plot(m1, std=TRUE, means=FALSE)
plot(m1, std = TRUE, means=FALSE, strip= TRUE, resid = "line")
# ===============================================
# = lavaan string example (more at ?umxLav2RAM) =
# ===============================================
m1 = umxRAM(data = mtcars, "#modelName
mpg ~ wt + disp")
# =======================
# = A multi-group model =
# =======================
mtcars$litres = mtcars$disp/61.02
m1 = umxRAM("tim", data = mtcars, group = "am",
umxPath(c("wt", "litres"), to = "mpg"),
umxPath("wt", with = "litres"),
umxPath(v.m. = c("wt", "litres", "mpg"))
)
# In this model, all parameters are free across the two groups.
# ====================================
# = A cov model, with steps laid out =
# ====================================
# *note*: The variance of displacement is in cubic inches and is very large.
# to help the optimizer, one might, say, multiply disp *.016 to work in litres
tmp = mtcars; tmp$disp= tmp$disp *.016
# We can just give the raw data and ask for it to be made into type cov:
m1 = umxRAM("tim", data = tmp, type="cov",
umxPath(c("wt", "disp"), to = "mpg"),
umxPath("wt", with = "disp"),
umxPath(var = c("mpg", "wt", "disp"))
)
# (see ?umxPath for more nifty options making paths...)
# =========================================
# = umxRAM can also accept mxData as data =
# =========================================
# For convenience, list up the manifests you will be using
selVars = c("mpg", "wt", "disp")
tmp = mtcars; tmp$disp= tmp$disp *.016
myCov = mxData(cov(tmp[, selVars]), type = "cov", numObs = nrow(mtcars) )
m1 = umxRAM("tim", data = myCov,
umxPath(c("wt", "disp"), to = "mpg"),
umxPath("wt", with = "disp"),
umxPath(var = selVars)
)
# =======================
# = umxRAM supports WLS =
# =======================
# 1. Run an all-continuous WLS model
mw = umxRAM("raw", data = mtcars[, c("mpg", "wt", "disp")],
type = "WLS", allContinuousMethod = "cumulants",
umxPath(var = c("wt", "disp", "mpg")),
umxPath(c("wt", "disp"), to = "mpg"),
umxPath("wt", with = "disp"),
umxPath(var = c("wt", "disp", "mpg"))
)
# 2. Switch to marginals to support means
mw = umxRAM("raw", data = mtcars[, c("mpg", "wt", "disp")],
type = "WLS", allContinuousMethod= "marginals",
umxPath(var = c("wt", "disp", "mpg")),
umxPath(c("wt", "disp"), to = "mpg"),
umxPath("wt", with = "disp"),
umxPath(var = c("wt", "disp", "mpg"))
)
# ===============================
# = Using umxRAM in Sketch mode =
# ===============================
# No data needed: just list variable names!
# Resulting model will be plotted automatically
m1 = umxRAM("what does unique pairs do, I wonder", data = c("A", "B", "C"),
umxPath(unique.pairs = c("A", "B", "C"))
)
m1 = umxRAM("ring around the rosey", data = c("B", "C"),
umxPath(fromEach = c("A", "B", "C"))
)
m1 = umxRAM("fromEach with to", data = c("B", "C"),
umxPath(fromEach = c("B", "C"), to= "D")
)
m1 = umxRAM("CFA_sketch", data = paste0("x", 1:4),
umxPath("g", to = paste0("x", 1:4)),
umxPath(var = paste0("x", 1:4)),
umxPath(v1m0 = "g")
)
# =================================================
# = This is an example of using your own labels: =
# umxRAM will not over-ride them =
# =================================================
m1 = umxRAM("tim", data = mtcars, type="cov",
umxPath(c("wt", "disp"), to = "mpg"),
umxPath(cov = c("wt", "disp"), labels = "b1"),
umxPath(var = c("wt", "disp", "mpg"))
)
omxCheckEquals(m1$S$labels["disp", "wt"], "b1") # label preserved
m1$S$labels
# mpg wt disp
# mpg "mpg_with_mpg" "mpg_with_wt" "disp_with_mpg"
# wt "mpg_with_wt" "wt_with_wt" "b1"
# disp "disp_with_mpg" "b1" "disp_with_disp"
parameters(m1)
}
}
\references{
\itemize{
\item \url{https://tbates.github.io}, \url{https://github.com/tbates/umx}
}
}
\seealso{
\code{\link[=umxPath]{umxPath()}}, \code{\link[=umxSummary]{umxSummary()}}, \code{\link[=plot]{plot()}}, \code{\link[=parameters]{parameters()}}, \code{\link[=umxSuperModel]{umxSuperModel()}}, \code{\link[=umxLav2RAM]{umxLav2RAM()}}
Other Core Modeling Functions:
\code{\link{umxAlgebra}()},
\code{\link{umxMatrix}()},
\code{\link{umxModify}()},
\code{\link{umxPath}()},
\code{\link{umxRun}()},
\code{\link{umxSummary}()},
\code{\link{umxSuperModel}()},
\code{\link{umx}}
}
\concept{Core Modeling Functions}
|
#####################################################################################
#
# SCORE RANKS
#
# Author: Dr. Víctor G. Tercero Gómez
# Affiliation: Tecnologico de Monterrey
#
# Date: November 3, 2017
# Versión: 1.0
#
# DESCRIPTION
#
# The function get different linear rank scores from a vector of ranks.
#
# r: is a numerical vector of ranks (ranks transformation was applied to x observataions)
# groups: is a factor that indicates the group each element of x belong to
# score.type: is a vector of characters. Choose one type:
# "Mood": Mood (1954) statistic is used (r-(N+1)/2)^2
# "FAB": Freund and Ansari (1957) and Ansari and Bradley (1960) statistis is used (N+1)/2 - abs(r - (N+1)/2)
# "Klotz": Klotz (1962) statistic is used (qnorm(r/(N+1),0,1))^2
# "NPL": Nonparametric Levene is used abs(r-groupmean(r))
# "SR": Squared Ranks statistic is used r^2
# "TG": Nothing is actually done here. Ordinary ranks are obtained from ordinary ranks
# "FK": Fligner and Killeen (1976) statistic is used qnorm(1/2 + r/(2*(N+1 )),0,1)
scoreX <- function(r, groups, score.type){
#Error verification
feasibles.scores = c("Mood", "FAB", "Klotz", "NPL", "SR", "TG", "FK")
if(all(score.type != feasibles.scores)){
print("Error: score type is not recognized")
return()
}
#Preliminaries
N = length(r) #Combined total
#Scoring
if(score.type == "Mood"){
s = (r-(N+1)/2)^2
}
if(score.type == "FAB"){
s = (N+1)/2 - abs(r - (N+1)/2)
}
if(score.type == "Klotz"){
s = (qnorm(r/(N+1),0,1))^2
}
if(score.type == "NPL"){
groupmean = tapply(r,groups,mean)
n = tapply(r,groups,length)
rmeans = rep(groupmean,n)
s = abs(r-rmeans)
}
if(score.type == "SR"){
s = r^2
}
if(score.type == "TG"){
s = r #nothing to see here.
}
if(score.type == "FK"){
s = qnorm(1/2 + r/(2*(N+1 )),0,1)
}
#Now we are done. Let's get the result:
return(s)
}
|
/scoring.R
|
permissive
|
vitergo/NPfactorial-package-in-R
|
R
| false
| false
| 2,018
|
r
|
#####################################################################################
#
# SCORE RANKS
#
# Author: Dr. Víctor G. Tercero Gómez
# Affiliation: Tecnologico de Monterrey
#
# Date: November 3, 2017
# Versión: 1.0
#
# DESCRIPTION
#
# The function get different linear rank scores from a vector of ranks.
#
# r: is a numerical vector of ranks (ranks transformation was applied to x observataions)
# groups: is a factor that indicates the group each element of x belong to
# score.type: is a vector of characters. Choose one type:
# "Mood": Mood (1954) statistic is used (r-(N+1)/2)^2
# "FAB": Freund and Ansari (1957) and Ansari and Bradley (1960) statistis is used (N+1)/2 - abs(r - (N+1)/2)
# "Klotz": Klotz (1962) statistic is used (qnorm(r/(N+1),0,1))^2
# "NPL": Nonparametric Levene is used abs(r-groupmean(r))
# "SR": Squared Ranks statistic is used r^2
# "TG": Nothing is actually done here. Ordinary ranks are obtained from ordinary ranks
# "FK": Fligner and Killeen (1976) statistic is used qnorm(1/2 + r/(2*(N+1 )),0,1)
scoreX <- function(r, groups, score.type){
#Error verification
feasibles.scores = c("Mood", "FAB", "Klotz", "NPL", "SR", "TG", "FK")
if(all(score.type != feasibles.scores)){
print("Error: score type is not recognized")
return()
}
#Preliminaries
N = length(r) #Combined total
#Scoring
if(score.type == "Mood"){
s = (r-(N+1)/2)^2
}
if(score.type == "FAB"){
s = (N+1)/2 - abs(r - (N+1)/2)
}
if(score.type == "Klotz"){
s = (qnorm(r/(N+1),0,1))^2
}
if(score.type == "NPL"){
groupmean = tapply(r,groups,mean)
n = tapply(r,groups,length)
rmeans = rep(groupmean,n)
s = abs(r-rmeans)
}
if(score.type == "SR"){
s = r^2
}
if(score.type == "TG"){
s = r #nothing to see here.
}
if(score.type == "FK"){
s = qnorm(1/2 + r/(2*(N+1 )),0,1)
}
#Now we are done. Let's get the result:
return(s)
}
|
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::#
# #
# valueFuncs - Calculates/Retrieves the value functions for each treatment #
# option for IQ- and Q-Learning methods. #
# #
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::#
if(!isGeneric("qFuncs")){
setGeneric(name = "qFuncs",
def = function(object, ...){standardGeneric("qFuncs")})
}
setMethod(f = "qFuncs",
signature = c(object="IQEst"),
definition = function(object, ...){
opts <- object@qFunctions
colnames(opts) <- c("-1","1")
return(opts)
} )
setMethod(f = "qFuncs",
signature = c(object="IQLearnFS_VHom"),
definition = function(object, ...){
return(StdDev(object))
} )
setMethod(f = "qFuncs",
signature = c(object="QLearnEst"),
definition = function(object, ...){
return(object@qFunctions)
} )
setMethod(f = "qFuncs",
signature = c(object="OptimalSeq"),
definition = function(object, ...){
return(NULL)
} )
setMethod(f = "qFuncs",
signature = c(object="OptimalClass"),
definition = function(object, ...){
return(NULL)
} )
|
/DynTxRegime/R/methods-qFuncs.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 1,660
|
r
|
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::#
# #
# valueFuncs - Calculates/Retrieves the value functions for each treatment #
# option for IQ- and Q-Learning methods. #
# #
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::#
if(!isGeneric("qFuncs")){
setGeneric(name = "qFuncs",
def = function(object, ...){standardGeneric("qFuncs")})
}
setMethod(f = "qFuncs",
signature = c(object="IQEst"),
definition = function(object, ...){
opts <- object@qFunctions
colnames(opts) <- c("-1","1")
return(opts)
} )
setMethod(f = "qFuncs",
signature = c(object="IQLearnFS_VHom"),
definition = function(object, ...){
return(StdDev(object))
} )
setMethod(f = "qFuncs",
signature = c(object="QLearnEst"),
definition = function(object, ...){
return(object@qFunctions)
} )
setMethod(f = "qFuncs",
signature = c(object="OptimalSeq"),
definition = function(object, ...){
return(NULL)
} )
setMethod(f = "qFuncs",
signature = c(object="OptimalClass"),
definition = function(object, ...){
return(NULL)
} )
|
#FUNCTIONS
ConstructPopulationData=function(Income,HouseholdData,Name=NULL,Pop=NULL,PropertyInput=NULL) {
OutputDF=HouseholdData[findInterval(Income, c(0,HouseholdData$Decilepoints[1:9])),c(-(1:3))]
if(!is.null(PropertyInput)) OutputDF$Property=PropertyInput
if(is.null(Name)) Name = seq_along(Income)
if(is.null(Pop)) Pop<-rep(1,length(Income))
OutputDF = cbind(Name,Pop,GrossIncome=Income,IncTax=Income,EmployeeNI=Income,EmployerNI=Income,CitDiv=rep(1,length(Income)),OutputDF)
OutputDF
}
CreateIncTaxSchedule=function(BandName,Rate, LowerLimit) {
TaxSchedule <- data.frame(BandName=BandName,Rate=Rate, LowerLimit=LowerLimit, UpperLimit=c(LowerLimit[-1],Inf))
NumberOfBands<-length(TaxSchedule$BandName)
TaxSchedule$TaxPaidAtBottomOfBand=c(0,cumsum((TaxSchedule$UpperLimit-TaxSchedule$LowerLimit)*TaxSchedule$Rate)[-NumberOfBands])
TaxSchedule
}
CreateFullTaxSystem = function(IncTax,EmployeeNI,EmployerNI,IndirectTaxes,Benefits){
return(c(list(IncTax=IncTax,EmployeeNI=EmployeeNI,EmployerNI=EmployerNI),IndirectTaxes,Benefits))
}
PreTaxIncomeModelledFunction <- function(LogNormDetails, n=42000) {
set.seed(1)
IncomeDistModelled= rlnorm(n, meanlog = LogNormDetails$estimate["meanlog"],
sdlog = LogNormDetails$estimate["sdlog"])
SortedIncome = sort(IncomeDistModelled)
return(SortedIncome)
}
TaxCalc = function(Quantity,TaxSchedule) {
if(is.null(dim(TaxSchedule))) { #If there is just a rate
return(TaxSchedule*Quantity)
} else { #If there is a schedule of rates
TaxBand=pmax(1,findInterval(Quantity,TaxSchedule[,"LowerLimit"]))
TotalTaxPaid <- TaxSchedule[TaxBand,"TaxPaidAtBottomOfBand"]+TaxSchedule[TaxBand,"Rate"]*(Quantity-TaxSchedule[TaxBand,"LowerLimit"])
return(TotalTaxPaid)
}
}
AllTaxCalc <- function(Population,TaxSystem) {
CommonNames=intersect(names(TaxSystem),colnames(Population))
TaxPaid=matrix(0,ncol=length(CommonNames),nrow=nrow(Population))
colnames(TaxPaid)=CommonNames
for(TaxName in CommonNames){
TaxPaid[,TaxName] = TaxCalc(Population[,TaxName],TaxSystem[[TaxName]])
}
return(TaxPaid)
}
AllAnalytics <- function(Population,TaxSystem) {
IncomeAndTaxesPaid=AllIncomeAndTaxCalc(Population,TaxSystem)
SummaryResult=CalculateSummary(IncomeAndTaxesPaid)
RevenueRaisedByTax=-colSums(IncomeAndTaxesPaid)[-1]
TotalSummary=colSums(SummaryResult)
if(nrow(Population)>10) {
PreTaxGini=ineq(SummaryResult[,"GrossIncome"],"Gini")
PostTaxGini=ineq(SummaryResult[,"PostAllTaxAndBen"],"Gini")
PreTaxShareOfIncomePerQs=ShareOfIncomePerQs(SummaryResult[,"GrossIncome"])
PostTaxShareOfIncomePerQs=ShareOfIncomePerQs(SummaryResult[,"PostAllTaxAndBen"])
IneqList=list(PreTaxGini=PreTaxGini,PostTaxGini=PostTaxGini,PreTaxShareOfIncomePerQs=PreTaxShareOfIncomePerQs,PostTaxShareOfIncomePerQs=PostTaxShareOfIncomePerQs)
} else {PreTaxGini=NA;PostTaxGini=NA;PreTaxShareOfIncomePerQs=NA;PostTaxShareOfIncomePerQs=NA}
list(Population=Population,TaxSystemName=TaxSystem["TaxSystemName"],TaxSystem=TaxSystem,IncomeAndTaxesPaid=IncomeAndTaxesPaid,SummaryResult=SummaryResult,RevenueRaisedByTax=RevenueRaisedByTax,
TotalSummary=TotalSummary,GrossIncome=TotalSummary["GrossIncome"],RevenueRaised=TotalSummary["RevenueRaised"], PostAllTaxAndBenIncome=TotalSummary["PostAllTaxAndBen"],
PreTaxGini=PreTaxGini,PostTaxGini=PostTaxGini,PreTaxShareOfIncomePerQs=PreTaxShareOfIncomePerQs,PostTaxShareOfIncomePerQs=PostTaxShareOfIncomePerQs)
}
AllIncomeAndTaxCalc <- function(Population,TaxSystem) {
AllTaxC=AllTaxCalc(Population,TaxSystem)
cbind(GrossIncome=Population[,"GrossIncome"],-AllTaxC)
}
CalculateSummaryFromStart <- function(Population,TaxSystem) {
CalculateSummary(AllIncomeAndTaxCalc(Population,TaxSystem))
}
CalculateSummary <- function(AllIncomeAndTaxCalcRes) {
IncomeTax=intersect(colnames(AllIncomeAndTaxCalcRes),c("IncTax"))
NI=intersect(colnames(AllIncomeAndTaxCalcRes),c("EmployeeNI","EmployerNI"))
IncomeTaxAndNI=intersect(colnames(AllIncomeAndTaxCalcRes),c("IncTax","EmployeeNI","EmployerNI"))
Benefits=intersect(colnames(AllIncomeAndTaxCalcRes),c("CitDiv"))
PropertyTaxes=intersect(colnames(AllIncomeAndTaxCalcRes),c("Property"))
IndirectTaxes=intersect(colnames(AllIncomeAndTaxCalcRes),c("VAT","Tobacco","Beer","Wine","Hydrocarbons","CustomsGoods","Betting","Insurance","Lottery","AirTravel","TvLicence","Carbon"))
SummTable=cbind(GrossIncome=AllIncomeAndTaxCalcRes[,"GrossIncome"],
IncTax=rowSums(AllIncomeAndTaxCalcRes[,IncomeTax,drop=FALSE]),
NI=rowSums(AllIncomeAndTaxCalcRes[,NI,drop=FALSE]),
IncTaxAndNI=rowSums(AllIncomeAndTaxCalcRes[,IncomeTaxAndNI,drop=FALSE]),
Benefits=rowSums(AllIncomeAndTaxCalcRes[,Benefits,drop=FALSE]),
PropertyTaxes=rowSums(AllIncomeAndTaxCalcRes[,PropertyTaxes,drop=FALSE]),
IndirectTaxes=rowSums(AllIncomeAndTaxCalcRes[,IndirectTaxes,drop=FALSE]))
SummTable=cbind(SummTable,
PostIncTax=(SummTable[,"GrossIncome"]+SummTable[,"IncTax"]),
PostIncTaxAndNI=(SummTable[,"GrossIncome"]+SummTable[,"IncTax"]+AllIncomeAndTaxCalcRes[,"EmployeeNI"]),
PostIncTaxAndNIAndBen=(SummTable[,"GrossIncome"]+SummTable[,"IncTax"]+AllIncomeAndTaxCalcRes[,"EmployeeNI"]+SummTable[,"Benefits"]),
PostAllTaxAndBen=(SummTable[,"GrossIncome"]+SummTable[,"IncTax"]+AllIncomeAndTaxCalcRes[,"EmployeeNI"]+AllIncomeAndTaxCalcRes[,"EmployerNI"]+SummTable[,"Benefits"]+SummTable[,"PropertyTaxes"]+SummTable[,"IndirectTaxes"]),
RevenueRaised=-(SummTable[,"IncTax"]+AllIncomeAndTaxCalcRes[,"EmployeeNI"]+AllIncomeAndTaxCalcRes[,"EmployerNI"]+SummTable[,"Benefits"]+SummTable[,"PropertyTaxes"]+SummTable[,"IndirectTaxes"]))
return(SummTable)
}
TotalRevenueRaised=function(Population,TaxSystem) {
colSums(AllTaxCalc(Population,TaxSystem))
}
ShareOfIncomePerQs <- function(Income){
Dividers=seq(from = 0, to = 1, by = 0.25)
Qs=quantile(Income, Dividers)
Output=tapply(Income,findInterval(Income,Qs, rightmost.closed = TRUE),sum)
names(Output)=paste(Dividers[1:4],"-",Dividers[2:5])
return(Output/sum(Output))
}
ExtractElement = function(ElementName, ListOfAnalytics, SpecificTaxes=NULL ){
n=length(ListOfAnalytics)
IntList=vector("list", n)
#VariableNames=vector("character", n)
GetElement=function(TaxAnalysis) do.call("$",list(TaxAnalysis,ElementName))
for(i in seq_len(n)) {
IntList[[i]]=GetElement(ListOfAnalytics[[i]])
# VariableNames[i]=ListOfAnalytics[[i]]$TaxSystemName
}
OutputList=sapply(X=IntList,FUN=unlist)
#if(length(dim((OutputList))) == 2) colnames(OutputList)=(VariableNames)
#if(length(dim(OutputList)) == 1) names(OutputList)=(VariableNames)
if(is.vector(OutputList)&&!is.null(names(ListOfAnalytics))) names(OutputList)=names(ListOfAnalytics)
if(is.matrix(OutputList)&&!is.null(names(ListOfAnalytics))) colnames(OutputList)=names(ListOfAnalytics)
if(!is.null(SpecificTaxes)) OutputList=OutputList[SpecificTaxes,]
OutputList
}
GraphsAndTables= function(AnalyticsExisting, AnalyticsNew) {
ExistingSystemSummary = c(AnalyticsExisting$GrossIncome, AnalyticsExisting$TotalSummary["RevenueRaised"], AnalyticsExisting$TotalSummary["PostAllTaxAndBenIncome"])
NewSystemSummary = c(AnalyticsNew$GrossIncome, AnalyticsNew$TotalSummary["RevenueRaised"], AnalyticsNew$TotalSummary["PostAllTaxAndBenIncome"])
Summary=data.frame(ExistingSystemSummary=ExistingSystemSummary,NewSystemSummary=NewSystemSummary)
colnames(Summary) <- c("Existing Tax System", "New Tax System")
rownames(Summary) <- c("Gross Income", "Total net tax bill","Income after all taxes and benefits")
RevenueByTaxSummary <- round(data.frame(AnalyticsExisting$RevenueRaisedByTax,AnalyticsNew$RevenueRaisedByTax),2)
colnames(RevenueByTaxSummary) <- c("Existing Tax System", "New Tax System")
cols <- rainbow(length(RevenueByTaxSummary[,1]))
par(mar=c(10,4,5,4)+.1, srt=90)
barplot1 <- barplot(RevenueByTaxSummary[,1],width=2, beside=FALSE,main="Existing Tax System Breakdown",names.arg=row.names(RevenueByTaxSummary),ylab = "Tax Paid (£)",col=cols,las=2,ylim = c(0,max(RevenueByTaxSummary[,1])+5000))
text(x=barplot1,y=as.numeric(RevenueByTaxSummary[,1]),label =as.numeric(RevenueByTaxSummary[,1]) ,pos=4,cex=1.1,col="black",las=1)
barplot2 <- barplot(RevenueByTaxSummary[,2], width = 2, beside=FALSE,names.arg=row.names(RevenueByTaxSummary),main="New Tax System Breakdown", ylab = "Tax Paid (£)", col=cols,las=2,ylim=c(0,max(RevenueByTaxSummary[,2])+5000))
text(x=barplot2,y=as.numeric(RevenueByTaxSummary[,2]),label =as.numeric(RevenueByTaxSummary[,2]),pos=4,cex=1.1,col="black",las=1)
kable((round(Summary,0)), caption = paste0("Effect of tax systems"))
}
|
/Model/Model/Calc_1TaxFunctions.R
|
no_license
|
sjstretton/screwed
|
R
| false
| false
| 8,828
|
r
|
#FUNCTIONS
ConstructPopulationData=function(Income,HouseholdData,Name=NULL,Pop=NULL,PropertyInput=NULL) {
OutputDF=HouseholdData[findInterval(Income, c(0,HouseholdData$Decilepoints[1:9])),c(-(1:3))]
if(!is.null(PropertyInput)) OutputDF$Property=PropertyInput
if(is.null(Name)) Name = seq_along(Income)
if(is.null(Pop)) Pop<-rep(1,length(Income))
OutputDF = cbind(Name,Pop,GrossIncome=Income,IncTax=Income,EmployeeNI=Income,EmployerNI=Income,CitDiv=rep(1,length(Income)),OutputDF)
OutputDF
}
CreateIncTaxSchedule=function(BandName,Rate, LowerLimit) {
TaxSchedule <- data.frame(BandName=BandName,Rate=Rate, LowerLimit=LowerLimit, UpperLimit=c(LowerLimit[-1],Inf))
NumberOfBands<-length(TaxSchedule$BandName)
TaxSchedule$TaxPaidAtBottomOfBand=c(0,cumsum((TaxSchedule$UpperLimit-TaxSchedule$LowerLimit)*TaxSchedule$Rate)[-NumberOfBands])
TaxSchedule
}
CreateFullTaxSystem = function(IncTax,EmployeeNI,EmployerNI,IndirectTaxes,Benefits){
return(c(list(IncTax=IncTax,EmployeeNI=EmployeeNI,EmployerNI=EmployerNI),IndirectTaxes,Benefits))
}
PreTaxIncomeModelledFunction <- function(LogNormDetails, n=42000) {
set.seed(1)
IncomeDistModelled= rlnorm(n, meanlog = LogNormDetails$estimate["meanlog"],
sdlog = LogNormDetails$estimate["sdlog"])
SortedIncome = sort(IncomeDistModelled)
return(SortedIncome)
}
TaxCalc = function(Quantity,TaxSchedule) {
if(is.null(dim(TaxSchedule))) { #If there is just a rate
return(TaxSchedule*Quantity)
} else { #If there is a schedule of rates
TaxBand=pmax(1,findInterval(Quantity,TaxSchedule[,"LowerLimit"]))
TotalTaxPaid <- TaxSchedule[TaxBand,"TaxPaidAtBottomOfBand"]+TaxSchedule[TaxBand,"Rate"]*(Quantity-TaxSchedule[TaxBand,"LowerLimit"])
return(TotalTaxPaid)
}
}
AllTaxCalc <- function(Population,TaxSystem) {
CommonNames=intersect(names(TaxSystem),colnames(Population))
TaxPaid=matrix(0,ncol=length(CommonNames),nrow=nrow(Population))
colnames(TaxPaid)=CommonNames
for(TaxName in CommonNames){
TaxPaid[,TaxName] = TaxCalc(Population[,TaxName],TaxSystem[[TaxName]])
}
return(TaxPaid)
}
AllAnalytics <- function(Population,TaxSystem) {
IncomeAndTaxesPaid=AllIncomeAndTaxCalc(Population,TaxSystem)
SummaryResult=CalculateSummary(IncomeAndTaxesPaid)
RevenueRaisedByTax=-colSums(IncomeAndTaxesPaid)[-1]
TotalSummary=colSums(SummaryResult)
if(nrow(Population)>10) {
PreTaxGini=ineq(SummaryResult[,"GrossIncome"],"Gini")
PostTaxGini=ineq(SummaryResult[,"PostAllTaxAndBen"],"Gini")
PreTaxShareOfIncomePerQs=ShareOfIncomePerQs(SummaryResult[,"GrossIncome"])
PostTaxShareOfIncomePerQs=ShareOfIncomePerQs(SummaryResult[,"PostAllTaxAndBen"])
IneqList=list(PreTaxGini=PreTaxGini,PostTaxGini=PostTaxGini,PreTaxShareOfIncomePerQs=PreTaxShareOfIncomePerQs,PostTaxShareOfIncomePerQs=PostTaxShareOfIncomePerQs)
} else {PreTaxGini=NA;PostTaxGini=NA;PreTaxShareOfIncomePerQs=NA;PostTaxShareOfIncomePerQs=NA}
list(Population=Population,TaxSystemName=TaxSystem["TaxSystemName"],TaxSystem=TaxSystem,IncomeAndTaxesPaid=IncomeAndTaxesPaid,SummaryResult=SummaryResult,RevenueRaisedByTax=RevenueRaisedByTax,
TotalSummary=TotalSummary,GrossIncome=TotalSummary["GrossIncome"],RevenueRaised=TotalSummary["RevenueRaised"], PostAllTaxAndBenIncome=TotalSummary["PostAllTaxAndBen"],
PreTaxGini=PreTaxGini,PostTaxGini=PostTaxGini,PreTaxShareOfIncomePerQs=PreTaxShareOfIncomePerQs,PostTaxShareOfIncomePerQs=PostTaxShareOfIncomePerQs)
}
AllIncomeAndTaxCalc <- function(Population,TaxSystem) {
AllTaxC=AllTaxCalc(Population,TaxSystem)
cbind(GrossIncome=Population[,"GrossIncome"],-AllTaxC)
}
CalculateSummaryFromStart <- function(Population,TaxSystem) {
CalculateSummary(AllIncomeAndTaxCalc(Population,TaxSystem))
}
CalculateSummary <- function(AllIncomeAndTaxCalcRes) {
IncomeTax=intersect(colnames(AllIncomeAndTaxCalcRes),c("IncTax"))
NI=intersect(colnames(AllIncomeAndTaxCalcRes),c("EmployeeNI","EmployerNI"))
IncomeTaxAndNI=intersect(colnames(AllIncomeAndTaxCalcRes),c("IncTax","EmployeeNI","EmployerNI"))
Benefits=intersect(colnames(AllIncomeAndTaxCalcRes),c("CitDiv"))
PropertyTaxes=intersect(colnames(AllIncomeAndTaxCalcRes),c("Property"))
IndirectTaxes=intersect(colnames(AllIncomeAndTaxCalcRes),c("VAT","Tobacco","Beer","Wine","Hydrocarbons","CustomsGoods","Betting","Insurance","Lottery","AirTravel","TvLicence","Carbon"))
SummTable=cbind(GrossIncome=AllIncomeAndTaxCalcRes[,"GrossIncome"],
IncTax=rowSums(AllIncomeAndTaxCalcRes[,IncomeTax,drop=FALSE]),
NI=rowSums(AllIncomeAndTaxCalcRes[,NI,drop=FALSE]),
IncTaxAndNI=rowSums(AllIncomeAndTaxCalcRes[,IncomeTaxAndNI,drop=FALSE]),
Benefits=rowSums(AllIncomeAndTaxCalcRes[,Benefits,drop=FALSE]),
PropertyTaxes=rowSums(AllIncomeAndTaxCalcRes[,PropertyTaxes,drop=FALSE]),
IndirectTaxes=rowSums(AllIncomeAndTaxCalcRes[,IndirectTaxes,drop=FALSE]))
SummTable=cbind(SummTable,
PostIncTax=(SummTable[,"GrossIncome"]+SummTable[,"IncTax"]),
PostIncTaxAndNI=(SummTable[,"GrossIncome"]+SummTable[,"IncTax"]+AllIncomeAndTaxCalcRes[,"EmployeeNI"]),
PostIncTaxAndNIAndBen=(SummTable[,"GrossIncome"]+SummTable[,"IncTax"]+AllIncomeAndTaxCalcRes[,"EmployeeNI"]+SummTable[,"Benefits"]),
PostAllTaxAndBen=(SummTable[,"GrossIncome"]+SummTable[,"IncTax"]+AllIncomeAndTaxCalcRes[,"EmployeeNI"]+AllIncomeAndTaxCalcRes[,"EmployerNI"]+SummTable[,"Benefits"]+SummTable[,"PropertyTaxes"]+SummTable[,"IndirectTaxes"]),
RevenueRaised=-(SummTable[,"IncTax"]+AllIncomeAndTaxCalcRes[,"EmployeeNI"]+AllIncomeAndTaxCalcRes[,"EmployerNI"]+SummTable[,"Benefits"]+SummTable[,"PropertyTaxes"]+SummTable[,"IndirectTaxes"]))
return(SummTable)
}
TotalRevenueRaised=function(Population,TaxSystem) {
colSums(AllTaxCalc(Population,TaxSystem))
}
ShareOfIncomePerQs <- function(Income){
Dividers=seq(from = 0, to = 1, by = 0.25)
Qs=quantile(Income, Dividers)
Output=tapply(Income,findInterval(Income,Qs, rightmost.closed = TRUE),sum)
names(Output)=paste(Dividers[1:4],"-",Dividers[2:5])
return(Output/sum(Output))
}
ExtractElement = function(ElementName, ListOfAnalytics, SpecificTaxes=NULL ){
n=length(ListOfAnalytics)
IntList=vector("list", n)
#VariableNames=vector("character", n)
GetElement=function(TaxAnalysis) do.call("$",list(TaxAnalysis,ElementName))
for(i in seq_len(n)) {
IntList[[i]]=GetElement(ListOfAnalytics[[i]])
# VariableNames[i]=ListOfAnalytics[[i]]$TaxSystemName
}
OutputList=sapply(X=IntList,FUN=unlist)
#if(length(dim((OutputList))) == 2) colnames(OutputList)=(VariableNames)
#if(length(dim(OutputList)) == 1) names(OutputList)=(VariableNames)
if(is.vector(OutputList)&&!is.null(names(ListOfAnalytics))) names(OutputList)=names(ListOfAnalytics)
if(is.matrix(OutputList)&&!is.null(names(ListOfAnalytics))) colnames(OutputList)=names(ListOfAnalytics)
if(!is.null(SpecificTaxes)) OutputList=OutputList[SpecificTaxes,]
OutputList
}
GraphsAndTables= function(AnalyticsExisting, AnalyticsNew) {
ExistingSystemSummary = c(AnalyticsExisting$GrossIncome, AnalyticsExisting$TotalSummary["RevenueRaised"], AnalyticsExisting$TotalSummary["PostAllTaxAndBenIncome"])
NewSystemSummary = c(AnalyticsNew$GrossIncome, AnalyticsNew$TotalSummary["RevenueRaised"], AnalyticsNew$TotalSummary["PostAllTaxAndBenIncome"])
Summary=data.frame(ExistingSystemSummary=ExistingSystemSummary,NewSystemSummary=NewSystemSummary)
colnames(Summary) <- c("Existing Tax System", "New Tax System")
rownames(Summary) <- c("Gross Income", "Total net tax bill","Income after all taxes and benefits")
RevenueByTaxSummary <- round(data.frame(AnalyticsExisting$RevenueRaisedByTax,AnalyticsNew$RevenueRaisedByTax),2)
colnames(RevenueByTaxSummary) <- c("Existing Tax System", "New Tax System")
cols <- rainbow(length(RevenueByTaxSummary[,1]))
par(mar=c(10,4,5,4)+.1, srt=90)
barplot1 <- barplot(RevenueByTaxSummary[,1],width=2, beside=FALSE,main="Existing Tax System Breakdown",names.arg=row.names(RevenueByTaxSummary),ylab = "Tax Paid (£)",col=cols,las=2,ylim = c(0,max(RevenueByTaxSummary[,1])+5000))
text(x=barplot1,y=as.numeric(RevenueByTaxSummary[,1]),label =as.numeric(RevenueByTaxSummary[,1]) ,pos=4,cex=1.1,col="black",las=1)
barplot2 <- barplot(RevenueByTaxSummary[,2], width = 2, beside=FALSE,names.arg=row.names(RevenueByTaxSummary),main="New Tax System Breakdown", ylab = "Tax Paid (£)", col=cols,las=2,ylim=c(0,max(RevenueByTaxSummary[,2])+5000))
text(x=barplot2,y=as.numeric(RevenueByTaxSummary[,2]),label =as.numeric(RevenueByTaxSummary[,2]),pos=4,cex=1.1,col="black",las=1)
kable((round(Summary,0)), caption = paste0("Effect of tax systems"))
}
|
tok<-"ZsgzPKqkVyrdJeHfPpuCaCmtfFhtimYv"
|
/tokens.R
|
no_license
|
wente011/St.-Paul
|
R
| false
| false
| 39
|
r
|
tok<-"ZsgzPKqkVyrdJeHfPpuCaCmtfFhtimYv"
|
plot2 <- function(){
##download and load in data
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url = fileURL, destfile = "./data.zip")
unzip(zipfile = "./data.zip")
data <- read.table(file = "./household_power_consumption.txt", sep = ";", header = TRUE)
##change formats to help graph
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
data2 <- data[((data$Date == as.Date("2007/02/01")) | (data$Date == as.Date("2007/02/02"))),]
data2$Global_active_power <- as.numeric(as.character((data2$Global_active_power)))
data2$dateTime <- strptime(x = paste(data2$Date, data2$Time, sep = " "), format = "%Y-%m-%d %H:%M:%S")
## create graph
plot(x = data2$dateTime, y = data2$Global_active_power, type = "l" , xlab = "", ylab = "Global Active Power (kilowatts)")
##create png
dev.copy(device = png, file = "plot2.png", height = 480, width = 480)
dev.off()
}
|
/plot2.R
|
no_license
|
husainArafat/ExData_Plotting1
|
R
| false
| false
| 1,007
|
r
|
plot2 <- function(){
##download and load in data
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url = fileURL, destfile = "./data.zip")
unzip(zipfile = "./data.zip")
data <- read.table(file = "./household_power_consumption.txt", sep = ";", header = TRUE)
##change formats to help graph
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
data2 <- data[((data$Date == as.Date("2007/02/01")) | (data$Date == as.Date("2007/02/02"))),]
data2$Global_active_power <- as.numeric(as.character((data2$Global_active_power)))
data2$dateTime <- strptime(x = paste(data2$Date, data2$Time, sep = " "), format = "%Y-%m-%d %H:%M:%S")
## create graph
plot(x = data2$dateTime, y = data2$Global_active_power, type = "l" , xlab = "", ylab = "Global Active Power (kilowatts)")
##create png
dev.copy(device = png, file = "plot2.png", height = 480, width = 480)
dev.off()
}
|
par(mfrow=c(2,2))
plot(dt$timestamp,dt$Global_active_power,type = "l" ,ylab = "Global Active Power (kilowatts)" ,xlab = "")
plot(dt$timestamp,dt$Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(dt$timestamp,dt$Sub_metering_1,type = "l" ,ylab = "Energy sub metering" ,xlab = "")
lines(dt$timestamp,dt$Sub_metering_2,col="red")
lines(dt$timestamp,dt$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), cex=.5, bty="n")
plot(dt$timestamp,dt$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.copy(png, file="./ExData_Plotting1/plot4.png", width=480, height=480)
dev.off()
|
/plot4.R
|
no_license
|
amoawad87/ExData_Plotting1
|
R
| false
| false
| 705
|
r
|
par(mfrow=c(2,2))
plot(dt$timestamp,dt$Global_active_power,type = "l" ,ylab = "Global Active Power (kilowatts)" ,xlab = "")
plot(dt$timestamp,dt$Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(dt$timestamp,dt$Sub_metering_1,type = "l" ,ylab = "Energy sub metering" ,xlab = "")
lines(dt$timestamp,dt$Sub_metering_2,col="red")
lines(dt$timestamp,dt$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), cex=.5, bty="n")
plot(dt$timestamp,dt$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.copy(png, file="./ExData_Plotting1/plot4.png", width=480, height=480)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/new_doc_with_preamble.R
\name{new_doc_with_preamble}
\alias{new_doc_with_preamble}
\title{Clean the environment and run the active script}
\usage{
new_doc_with_preamble(preamble = default_preamble)
}
\description{
Clean the environment and run the active script
}
|
/man/new_doc_with_preamble.Rd
|
no_license
|
graiffgarcia/addinexamplesRG
|
R
| false
| true
| 342
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/new_doc_with_preamble.R
\name{new_doc_with_preamble}
\alias{new_doc_with_preamble}
\title{Clean the environment and run the active script}
\usage{
new_doc_with_preamble(preamble = default_preamble)
}
\description{
Clean the environment and run the active script
}
|
# More than natural variation?
#######################################################################################################################
#
# Let's see if the apparent trend in the plot is something more than natural variation. Fit a linear model
# called m_bty to predict average professor score by average beauty rating and add the line to your plot using
# abline(m_bty)
#
#######################################################################################################################
# Your initial plot:
plot(evals$score ~ jitter(evals$bty_avg))
# Construct the linear model:
m_bty <- lm(formula= score ~ bty_avg, data=evals)
# Plot your linear model on your plot:
abline(m_bty)
|
/dataCamp/openCourses/dataAnalysisAndStatisticalInference/9_multipleLineaRegression/9_moreThanNaturalVariation.R
|
permissive
|
odonnmi/learnNPractice
|
R
| false
| false
| 716
|
r
|
# More than natural variation?
#######################################################################################################################
#
# Let's see if the apparent trend in the plot is something more than natural variation. Fit a linear model
# called m_bty to predict average professor score by average beauty rating and add the line to your plot using
# abline(m_bty)
#
#######################################################################################################################
# Your initial plot:
plot(evals$score ~ jitter(evals$bty_avg))
# Construct the linear model:
m_bty <- lm(formula= score ~ bty_avg, data=evals)
# Plot your linear model on your plot:
abline(m_bty)
|
# Export Module
#' module exportPeaks UI function
#' @export
exportPeaksUI <- function(id){
ns <- NS(id)
fluidRow(
column(10, HTML("<h4>Export</h4>")) ,
column(10,uiOutput(ns("runName"))) ,
column(10,downloadButton(ns("ExportPeaks"), "Export peaks (xlsx)")),
HTML("<br> ")
)
}
#' module exportPeaks server function
#' @export
exportPeaks <- function(input,output,session, exportPeaksTable, colors, markers, markersList, seqdates, selected.sample ) {
ns <- session$ns
runName <- reactive( {
sample.date <- seqdates()[selected.sample()]
shortName <- markersList()[marker.file == basename(markers())][['runName']]
paste(shortName, sample.date ,sep = "-")
})
output$runName <- renderUI({
textInput(ns("runName"), "Run name", value = runName())
})
output$ExportPeaks <- downloadHandler(
filename = function(file) {
paste0(input$runName, ".xlsx")
},
content = function(conn){
result.table <- unique(data.table(
"Sample Name" = selected.sample(),
"run name" = input$runName,
"Marker" = exportPeaksTable()$system,
"Dye" = exportPeaksTable()$dye
))
# max number of detected peaks per system
supcols <- max(table(exportPeaksTable()$system))
for (i in 1:supcols) {
newsize <- paste("Size", i)
newheight <- paste("Height", i)
newbin <- paste("bin", i)
result.table[[newsize]] <- NA
result.table[[newheight]] <- NA
result.table[[newbin]] <- NA
result.table[[newsize]] <- as.numeric(result.table[[newsize]])
result.table[[newheight]] <- as.numeric(result.table[[newheight]])
result.table[[newbin]] <- as.character(result.table[[newheight]])
}
# print(exportPeaksTable())
for (i in 1:nrow(result.table)) {
# get system
systemi <- result.table[i][['Marker']]
peaks <- exportPeaksTable()[system == systemi]
result.table[Marker == systemi][["Dye"]] <- toupper(substr(colors[[result.table[Marker == systemi][["Dye"]]]]$cval, 1,1))
for (j in 1:nrow(peaks)) {
result.table[Marker == systemi][[paste("Size", j)]] <- peaks$size[j]
result.table[Marker == systemi][[paste("Height", j)]] <- peaks$height[j]
result.table[Marker == systemi][[paste("bin", j)]] <- peaks$bin[j]
}
}
## create xls workbook and save to sheets
wb = loadWorkbook(conn,create=TRUE)
createSheet(wb,name="Peaks")
writeWorksheet(wb,result.table,sheet = "Peaks")
setColumnWidth(wb, sheet = "Peaks", column = 1:ncol(result.table), width = -1)
saveWorkbook(wb)
}
,contentType="application/xls"
)
return(1)
}
|
/inst/shiny/exportPeaks.R
|
no_license
|
sybrohee/pickpeak
|
R
| false
| false
| 2,850
|
r
|
# Export Module
#' module exportPeaks UI function
#' @export
exportPeaksUI <- function(id){
ns <- NS(id)
fluidRow(
column(10, HTML("<h4>Export</h4>")) ,
column(10,uiOutput(ns("runName"))) ,
column(10,downloadButton(ns("ExportPeaks"), "Export peaks (xlsx)")),
HTML("<br> ")
)
}
#' module exportPeaks server function
#' @export
exportPeaks <- function(input,output,session, exportPeaksTable, colors, markers, markersList, seqdates, selected.sample ) {
ns <- session$ns
runName <- reactive( {
sample.date <- seqdates()[selected.sample()]
shortName <- markersList()[marker.file == basename(markers())][['runName']]
paste(shortName, sample.date ,sep = "-")
})
output$runName <- renderUI({
textInput(ns("runName"), "Run name", value = runName())
})
output$ExportPeaks <- downloadHandler(
filename = function(file) {
paste0(input$runName, ".xlsx")
},
content = function(conn){
result.table <- unique(data.table(
"Sample Name" = selected.sample(),
"run name" = input$runName,
"Marker" = exportPeaksTable()$system,
"Dye" = exportPeaksTable()$dye
))
# max number of detected peaks per system
supcols <- max(table(exportPeaksTable()$system))
for (i in 1:supcols) {
newsize <- paste("Size", i)
newheight <- paste("Height", i)
newbin <- paste("bin", i)
result.table[[newsize]] <- NA
result.table[[newheight]] <- NA
result.table[[newbin]] <- NA
result.table[[newsize]] <- as.numeric(result.table[[newsize]])
result.table[[newheight]] <- as.numeric(result.table[[newheight]])
result.table[[newbin]] <- as.character(result.table[[newheight]])
}
# print(exportPeaksTable())
for (i in 1:nrow(result.table)) {
# get system
systemi <- result.table[i][['Marker']]
peaks <- exportPeaksTable()[system == systemi]
result.table[Marker == systemi][["Dye"]] <- toupper(substr(colors[[result.table[Marker == systemi][["Dye"]]]]$cval, 1,1))
for (j in 1:nrow(peaks)) {
result.table[Marker == systemi][[paste("Size", j)]] <- peaks$size[j]
result.table[Marker == systemi][[paste("Height", j)]] <- peaks$height[j]
result.table[Marker == systemi][[paste("bin", j)]] <- peaks$bin[j]
}
}
## create xls workbook and save to sheets
wb = loadWorkbook(conn,create=TRUE)
createSheet(wb,name="Peaks")
writeWorksheet(wb,result.table,sheet = "Peaks")
setColumnWidth(wb, sheet = "Peaks", column = 1:ncol(result.table), width = -1)
saveWorkbook(wb)
}
,contentType="application/xls"
)
return(1)
}
|
#### Required packages ####
if (!require("ggplot2")) install.packages("ggplot2")
if (!require("gratia")) install.packages("gratia")
if (!require("signs")) install.packages("signs") # signs_format
#### Required data ####
load("./data/GAMM.Rda")
#### SI Figure 5 ####
plt1 <- qq_plot(Mod, method="simulate", point_col="steelblue",
point_alpha = 0.7, alpha=0.0)+
labs(title = NULL, subtitle = NULL)+
scale_y_continuous(limits= c(-0.8, 0.8),
labels = signs_format(accuracy = .1))+
scale_x_continuous(labels = signs_format(accuracy = .1))+
theme(panel.grid = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black", size=1),
axis.text = element_text(colour = "black", size=12),
axis.title = element_text(colour = "black", size = 14),
)
df <- data.frame(log_fitted= fitted(Mod),
residuals=resid(Mod, type= "deviance"))
# head(df)
plt2 <- ggplot(df, aes(x=log_fitted, y=residuals)) +
geom_point(alpha=0.7, colour="steelblue") +
labs(x="Linear predictor", y="Deviance residuals") +
geom_hline(yintercept=0, linetype="solid", color="red", size=0.5) +
scale_y_continuous(limits= c(-0.8, 0.6),
labels = signs_format(accuracy = .1)) +
theme(panel.grid = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black", size=1),
axis.text = element_text(colour = "black", size=12),
axis.title = element_text(colour = "black",size = 14),
)
#### Merge figures ####
SuppInfFig5 <- ggarrange(plt1, plt2, labels = c("a)","b)"))
#### Save file ####
tiff("figures/Supp_Inf_Fig5.tiff", height = 1400, width =2100, res=300)
annotate_figure(SuppInfFig5,
top = text_grob(paste0("Supplementary Fig. 5: Generalized additive mixed effects model residuals\n"),
color = "black", face = "bold",
size = 12, hjust = 0, x=0.01,just="left"))
dev.off()
|
/figures/SupplementaryInformationFigures/script_Supp_Inf_Fig5.R
|
no_license
|
mcwimm/GRINanalysis
|
R
| false
| false
| 2,072
|
r
|
#### Required packages ####
if (!require("ggplot2")) install.packages("ggplot2")
if (!require("gratia")) install.packages("gratia")
if (!require("signs")) install.packages("signs") # signs_format
#### Required data ####
load("./data/GAMM.Rda")
#### SI Figure 5 ####
plt1 <- qq_plot(Mod, method="simulate", point_col="steelblue",
point_alpha = 0.7, alpha=0.0)+
labs(title = NULL, subtitle = NULL)+
scale_y_continuous(limits= c(-0.8, 0.8),
labels = signs_format(accuracy = .1))+
scale_x_continuous(labels = signs_format(accuracy = .1))+
theme(panel.grid = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black", size=1),
axis.text = element_text(colour = "black", size=12),
axis.title = element_text(colour = "black", size = 14),
)
df <- data.frame(log_fitted= fitted(Mod),
residuals=resid(Mod, type= "deviance"))
# head(df)
plt2 <- ggplot(df, aes(x=log_fitted, y=residuals)) +
geom_point(alpha=0.7, colour="steelblue") +
labs(x="Linear predictor", y="Deviance residuals") +
geom_hline(yintercept=0, linetype="solid", color="red", size=0.5) +
scale_y_continuous(limits= c(-0.8, 0.6),
labels = signs_format(accuracy = .1)) +
theme(panel.grid = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black", size=1),
axis.text = element_text(colour = "black", size=12),
axis.title = element_text(colour = "black",size = 14),
)
#### Merge figures ####
SuppInfFig5 <- ggarrange(plt1, plt2, labels = c("a)","b)"))
#### Save file ####
tiff("figures/Supp_Inf_Fig5.tiff", height = 1400, width =2100, res=300)
annotate_figure(SuppInfFig5,
top = text_grob(paste0("Supplementary Fig. 5: Generalized additive mixed effects model residuals\n"),
color = "black", face = "bold",
size = 12, hjust = 0, x=0.01,just="left"))
dev.off()
|
## Verify and set Working Directory
getwd()
setwd("D:/E_Learn_Course_Data_Science_and_Analytics/Exploratory_Data_Analysis")
## Read the database
Full_database <- read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?",
check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
Full_database$Date <- as.Date(Full_database$Date, format="%d/%m/%Y")
## Subset the data to only the required dates
Subset_data <- subset(Full_database, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(Full_database)
## Convert the dates
datetime <- paste(as.Date(Subset_data$Date), Subset_data$Time)
Subset_data$Datetime <- as.POSIXct(datetime)
## Plot 2
plot(Subset_data$Global_active_power~Subset_data$Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
## Saving the plot 2 to png file
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off()
|
/plot2.R
|
no_license
|
DominicTanJX/Exploratory-Data-Analysis
|
R
| false
| false
| 929
|
r
|
## Verify and set Working Directory
getwd()
setwd("D:/E_Learn_Course_Data_Science_and_Analytics/Exploratory_Data_Analysis")
## Read the database
Full_database <- read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?",
check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
Full_database$Date <- as.Date(Full_database$Date, format="%d/%m/%Y")
## Subset the data to only the required dates
Subset_data <- subset(Full_database, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(Full_database)
## Convert the dates
datetime <- paste(as.Date(Subset_data$Date), Subset_data$Time)
Subset_data$Datetime <- as.POSIXct(datetime)
## Plot 2
plot(Subset_data$Global_active_power~Subset_data$Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
## Saving the plot 2 to png file
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kimma_cleaning.R
\name{kimma_cleaning}
\alias{kimma_cleaning}
\title{kmFit data cleaning}
\usage{
kimma_cleaning(
dat = NULL,
kin = NULL,
patientID = "ptID",
libraryID = "libID",
counts = NULL,
meta = NULL,
genes = NULL,
subset.var = NULL,
subset.lvl = NULL,
subset.genes = NULL
)
}
\arguments{
\item{dat}{EList object output by voom( ). Contains counts (dat$E), meta (dat$targets), and genes (dat$genes).}
\item{kin}{Matrix with pairwise kinship values between individuals. Must be numeric with rownames.}
\item{patientID}{Character of variable name to match dat$targets to kinship row and column names.}
\item{libraryID}{Character of variable name to match dat$targets to dat$E colnames
Alternate data if not using EList object}
\item{counts}{Matrix of normalized expression. Rows are genes, columns are libraries.}
\item{meta}{Matrix or data frame of sample and individual metadata.}
\item{genes}{Matrix or data frame of gene metadata.
Subset data (optional)}
\item{subset.var}{Character list of variable name(s) to filter data by.}
\item{subset.lvl}{Character list of variable value(s) or level(s) to filter data to. Must match order of subset.var}
\item{subset.genes}{Character vector of genes to include in models.}
}
\value{
Data frame formatted for use in kmFit
}
\description{
Data
}
\keyword{internal}
|
/man/kimma_cleaning.Rd
|
permissive
|
kdillmcfarland/kimma
|
R
| false
| true
| 1,421
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kimma_cleaning.R
\name{kimma_cleaning}
\alias{kimma_cleaning}
\title{kmFit data cleaning}
\usage{
kimma_cleaning(
dat = NULL,
kin = NULL,
patientID = "ptID",
libraryID = "libID",
counts = NULL,
meta = NULL,
genes = NULL,
subset.var = NULL,
subset.lvl = NULL,
subset.genes = NULL
)
}
\arguments{
\item{dat}{EList object output by voom( ). Contains counts (dat$E), meta (dat$targets), and genes (dat$genes).}
\item{kin}{Matrix with pairwise kinship values between individuals. Must be numeric with rownames.}
\item{patientID}{Character of variable name to match dat$targets to kinship row and column names.}
\item{libraryID}{Character of variable name to match dat$targets to dat$E colnames
Alternate data if not using EList object}
\item{counts}{Matrix of normalized expression. Rows are genes, columns are libraries.}
\item{meta}{Matrix or data frame of sample and individual metadata.}
\item{genes}{Matrix or data frame of gene metadata.
Subset data (optional)}
\item{subset.var}{Character list of variable name(s) to filter data by.}
\item{subset.lvl}{Character list of variable value(s) or level(s) to filter data to. Must match order of subset.var}
\item{subset.genes}{Character vector of genes to include in models.}
}
\value{
Data frame formatted for use in kmFit
}
\description{
Data
}
\keyword{internal}
|
# read data
con <- file("./data/household_power_consumption.txt")
data <- read.table(text = grep("^[1,2]/2/2007", readLines(con), value=TRUE), sep = ";", col.names = c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), na.strings = "?")
close(con)
data$DateTime <- strptime(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
# Plot 3
png(filename = "./data-plots/plot3.png", width = 480, height = 480, units="px")
with(data,plot(DateTime, Sub_metering_1, ylab = "Energy sub metering", xlab="", type = "l"))
with(data,lines(DateTime, Sub_metering_2, col = "red"))
with(data,lines(DateTime, Sub_metering_3, col = "blue"))
legend("topright", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex=0.9, lwd = 1)
dev.off()
|
/exploratory-data-analysis/week-1/ExData_Plotting1/plot3.R
|
no_license
|
gohed/datascience-coursera
|
R
| false
| false
| 871
|
r
|
# read data
con <- file("./data/household_power_consumption.txt")
data <- read.table(text = grep("^[1,2]/2/2007", readLines(con), value=TRUE), sep = ";", col.names = c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), na.strings = "?")
close(con)
data$DateTime <- strptime(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
# Plot 3
png(filename = "./data-plots/plot3.png", width = 480, height = 480, units="px")
with(data,plot(DateTime, Sub_metering_1, ylab = "Energy sub metering", xlab="", type = "l"))
with(data,lines(DateTime, Sub_metering_2, col = "red"))
with(data,lines(DateTime, Sub_metering_3, col = "blue"))
legend("topright", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex=0.9, lwd = 1)
dev.off()
|
#' @title Rolling a device
#' @description Simulates rolling the specified device.
#' @param device object of class "device", with at least two sides and appropriate probability values
#' @param times the number of times the device is to be rolled (must be a positive integer); default 1
#' @return The observed rolls
#' @export
roll <- function(device, times = 1) {
check_times(times)
sample <- sample(device$sides,
size = times,
replace = TRUE,
prob = device$prob)
l <- list("rolls" = sample,
"sides" = device$sides,
"prob" = device$prob,
"total" = times)
class(l) <- "rolls"
return(l)
}
#' @export
check_times <- function(times) {
if((times %% 1) != 0) {
stop("times must be an integer")
} else if(times < 1) {
stop("sides must be a positive integer")
} else {
return(TRUE)
}
}
#' @export
print.rolls <- function(x) {
cat("object 'rolls' \n\n")
list = list(
rolls = x$"rolls"
)
print(list)
invisible(x)
}
#####################################
########### summary.rolls ###########
#####################################
#' @export
summary.rolls <- function(x) {
proportions <- as.vector(table(x$rolls) / x$total)
freqs <- data.frame(side = x$sides,
count = as.vector(table(x$rolls)),
prop = proportions)
obj <- list(freqs = freqs)
class(obj) <- "summary.rolls"
obj
}
#' @export
print.summary.rolls <- function(x) {
cat('summary "rolls"\n\n')
print(x$freqs)
invisible(x)
}
|
/R/rolls.R
|
no_license
|
epm927/roller
|
R
| false
| false
| 1,562
|
r
|
#' @title Rolling a device
#' @description Simulates rolling the specified device.
#' @param device object of class "device", with at least two sides and appropriate probability values
#' @param times the number of times the device is to be rolled (must be a positive integer); default 1
#' @return The observed rolls
#' @export
roll <- function(device, times = 1) {
check_times(times)
sample <- sample(device$sides,
size = times,
replace = TRUE,
prob = device$prob)
l <- list("rolls" = sample,
"sides" = device$sides,
"prob" = device$prob,
"total" = times)
class(l) <- "rolls"
return(l)
}
#' @export
check_times <- function(times) {
if((times %% 1) != 0) {
stop("times must be an integer")
} else if(times < 1) {
stop("sides must be a positive integer")
} else {
return(TRUE)
}
}
#' @export
print.rolls <- function(x) {
cat("object 'rolls' \n\n")
list = list(
rolls = x$"rolls"
)
print(list)
invisible(x)
}
#####################################
########### summary.rolls ###########
#####################################
#' @export
summary.rolls <- function(x) {
proportions <- as.vector(table(x$rolls) / x$total)
freqs <- data.frame(side = x$sides,
count = as.vector(table(x$rolls)),
prop = proportions)
obj <- list(freqs = freqs)
class(obj) <- "summary.rolls"
obj
}
#' @export
print.summary.rolls <- function(x) {
cat('summary "rolls"\n\n')
print(x$freqs)
invisible(x)
}
|
#' Retrieve v facility info data from ghg database
#'
#' @param FACILITY_ID e.g. '1001781'. See Details.
#' @param REPORTING_YEAR e.g. '2012'. See Details.
#' @param FACILITY_NAME e.g. 'INVISTA S.a r.l.'. See Details.
#' @param NUM_NITRIC_ACID_TRAINS e.g. '1'. See Details.
#' @export
ghg_v_facility_info <- function(FACILITY_ID = NULL, REPORTING_YEAR = NULL, FACILITY_NAME = NULL,
NUM_NITRIC_ACID_TRAINS = NULL) {
args <- list(FACILITY_ID = FACILITY_ID, REPORTING_YEAR = REPORTING_YEAR, FACILITY_NAME = FACILITY_NAME,
NUM_NITRIC_ACID_TRAINS = NUM_NITRIC_ACID_TRAINS)
ret <- envir_get("v_facility_info", args)
ret
}
|
/R/ghg_v_facility_info.R
|
no_license
|
markwh/envirofacts
|
R
| false
| false
| 655
|
r
|
#' Retrieve v facility info data from ghg database
#'
#' @param FACILITY_ID e.g. '1001781'. See Details.
#' @param REPORTING_YEAR e.g. '2012'. See Details.
#' @param FACILITY_NAME e.g. 'INVISTA S.a r.l.'. See Details.
#' @param NUM_NITRIC_ACID_TRAINS e.g. '1'. See Details.
#' @export
ghg_v_facility_info <- function(FACILITY_ID = NULL, REPORTING_YEAR = NULL, FACILITY_NAME = NULL,
NUM_NITRIC_ACID_TRAINS = NULL) {
args <- list(FACILITY_ID = FACILITY_ID, REPORTING_YEAR = REPORTING_YEAR, FACILITY_NAME = FACILITY_NAME,
NUM_NITRIC_ACID_TRAINS = NUM_NITRIC_ACID_TRAINS)
ret <- envir_get("v_facility_info", args)
ret
}
|
.mime_part_finalizer <- function(x) {
if (!is.null(x$file))
file.remove(x$file)
}
.mime_part <- function(headers, file=NULL, text=NULL) {
if (!is.null(file) && !is.null(text))
stop("Can only provide file or text for mime part.")
e <- environment()
reg.finalizer(e, .mime_part_finalizer, onexit=TRUE)
class(e) <- "mime_part"
e
}
.write_mime_part <- function(mp, con=stdout()) {
writeLines(paste(names(mp$headers), unlist(mp$headers), sep=": "),
con, sep="\r\n")
writeLines("", con, sep="\r\n")
if (is.null(mp$file))
writeLines(mp$text, con)
else
writeLines(readLines(mp$file), con, sep="\r\n")
}
.file_attachment <- function(fn, name,
type="application/octet-stream",
disposition="attachment") {
if (missing(name))
name <- basename(fn)
b64file <- tempfile()
base64::encode(fn, b64file)
header <- list("Content-Type"=type,
"Content-Disposition"=sprintf("%s; filename=%s",
disposition, name),
"Content-Transfer-Encoding"="base64")
.mime_part(header=header, file=b64file)
}
.plot_attachment <- function(plt, name=deparse(substitute(plt)), device, ...) {
fn <- tempfile()
device(file=fn, ...)
print(plt)
dev.off()
## FIXME: Guess content type from device!
res <- .file_attachment(fn, name, type="application/pdf")
file.remove(fn)
res
}
##' Create a MIME part
##'
##' @param x Object to include
##' @param name Name of mime part. Usually the filename of the
##' attachment as displayed by the e-mail client.
##' @param ... Possible further arguments for \code{mime_part}
##' implementations.
##' @return An S3 \code{mime_part} object.
##' @export
mime_part <- function(x, name, ...)
UseMethod("mime_part", x)
##' Default MIME part method
##'
##' Creates a string representation of the object \code{x} using
##' \code{dput}. This representation is then turned into a file
##' attachment.
##'
##' @param x R object
##' @param name Filename used for attachment (sans the .R extension)
##' @param ... Ignored.
##' @return An S3 \code{mime_part} object.
##'
##' @method mime_part default
##' @S3method mime_part default
mime_part.default <- function(x, name, ...) {
str <- dput(x)
.mime_part(headers=list(
"Content-Type"="text/plain",
"Content-Disposition"=sprintf("attachment; file=%s.R", name)),
text=str)
}
##' Creates a MIME part from a trellis plot object
##'
##' Writes a PDF file of the plot defined by \code{x} and turns this
##' PDF file into a file attachment.
##'
##' @param x A \code{trellis} (lattice) object
##' @param name Name of attachment (sans .pdf extension).
##' @param device Graphics device used to render the plot. Defaults to
##' \code{pdf}.
##' @param ... Ignored.
##' @return An S3 \code{mime_part} object.
##'
##' @method mime_part trellis
##' @S3method mime_part trellis
mime_part.trellis <- function(x, name=deparse(substitute(x)), device=pdf, ...)
.plot_attachment(x, name=name, device=device, ...)
##' Creates a MIME part from a ggplot2 plot object
##'
##' Writes a PDF file of the plot defined by \code{x} and turns this
##' PDF file into a file attachment.
##'
##' @param x A \code{ggplot} object
##' @param name Name of attachment (sans .pdf extension).
##' @param device Graphics device used to render the plot. Defaults to
##' \code{pdf}.
##' @param ... Ignored.
##' @return An S3 \code{mime_part} object.
##'
##' @method mime_part ggplot
##' @S3method mime_part ggplot
mime_part.ggplot <- function(x, name=deparse(substitute(x)), device=pdf, ...)
.plot_attachment(x, name=name, device=device, ...)
##' Create a MIME part from a matrix.
##'
##' @param x Matrix
##' @param name Basename of file attachment that is generated.
##' @param ... Ignored.
##' @return An S3 \code{mime_part} object
##'
##' @method mime_part matrix
##' @S3method mime_part matrix
mime_part.matrix <- function(x, name=deparse(substitute(x)), ...) {
f <- tempfile()
on.exit(file.remove(f))
write.table(x, file=f, ...)
.file_attachment(f, name=sprintf("%s.txt", name), type="text/plain")
}
##' Create a MIME part from a \code{data.frame}.
##'
##' @param x A \code{data.frame}.
##' @param name Basename of file attachment that is generated.
##' @param ... Ignored.
##' @return An S3 \code{mime_part} object.
##'
##' @method mime_part data.frame
##' @S3method mime_part data.frame
mime_part.data.frame <- function(x, name=deparse(substitute(x)), ...) {
f <- tempfile()
on.exit(file.remove(f))
write.table(x, file=f, ...)
.file_attachment(f, name=sprintf("%s.txt", name), type="text/plain")
}
##' Create a MIME part from a character string. If the string matches
##' a filename, a MIME part containing that file is returned instead.
##'
##' @param x Character string, possibly a filename.
##' @param name Name of attachment.
##' @param ... Ignored.
##' @return An S3 \code{mime_part} object.
##'
##' @method mime_part character
##' @S3method mime_part character
mime_part.character <- function(x, name, ...) {
if (length(x) == 1 && file.exists(x)) {
.file_attachment(x, name, ...)
} else {
.mime_part(headers=list(
"Content-Type"="text/plain",
"Content-Disposition"="inline"),
text=paste(x, collapse="\r\n"))
}
}
|
/R/mime_part.R
|
no_license
|
ryninho/sendmailR-1
|
R
| false
| false
| 5,360
|
r
|
.mime_part_finalizer <- function(x) {
if (!is.null(x$file))
file.remove(x$file)
}
.mime_part <- function(headers, file=NULL, text=NULL) {
if (!is.null(file) && !is.null(text))
stop("Can only provide file or text for mime part.")
e <- environment()
reg.finalizer(e, .mime_part_finalizer, onexit=TRUE)
class(e) <- "mime_part"
e
}
.write_mime_part <- function(mp, con=stdout()) {
writeLines(paste(names(mp$headers), unlist(mp$headers), sep=": "),
con, sep="\r\n")
writeLines("", con, sep="\r\n")
if (is.null(mp$file))
writeLines(mp$text, con)
else
writeLines(readLines(mp$file), con, sep="\r\n")
}
.file_attachment <- function(fn, name,
type="application/octet-stream",
disposition="attachment") {
if (missing(name))
name <- basename(fn)
b64file <- tempfile()
base64::encode(fn, b64file)
header <- list("Content-Type"=type,
"Content-Disposition"=sprintf("%s; filename=%s",
disposition, name),
"Content-Transfer-Encoding"="base64")
.mime_part(header=header, file=b64file)
}
.plot_attachment <- function(plt, name=deparse(substitute(plt)), device, ...) {
fn <- tempfile()
device(file=fn, ...)
print(plt)
dev.off()
## FIXME: Guess content type from device!
res <- .file_attachment(fn, name, type="application/pdf")
file.remove(fn)
res
}
##' Create a MIME part
##'
##' @param x Object to include
##' @param name Name of mime part. Usually the filename of the
##' attachment as displayed by the e-mail client.
##' @param ... Possible further arguments for \code{mime_part}
##' implementations.
##' @return An S3 \code{mime_part} object.
##' @export
mime_part <- function(x, name, ...)
UseMethod("mime_part", x)
##' Default MIME part method
##'
##' Creates a string representation of the object \code{x} using
##' \code{dput}. This representation is then turned into a file
##' attachment.
##'
##' @param x R object
##' @param name Filename used for attachment (sans the .R extension)
##' @param ... Ignored.
##' @return An S3 \code{mime_part} object.
##'
##' @method mime_part default
##' @S3method mime_part default
mime_part.default <- function(x, name, ...) {
str <- dput(x)
.mime_part(headers=list(
"Content-Type"="text/plain",
"Content-Disposition"=sprintf("attachment; file=%s.R", name)),
text=str)
}
##' Creates a MIME part from a trellis plot object
##'
##' Writes a PDF file of the plot defined by \code{x} and turns this
##' PDF file into a file attachment.
##'
##' @param x A \code{trellis} (lattice) object
##' @param name Name of attachment (sans .pdf extension).
##' @param device Graphics device used to render the plot. Defaults to
##' \code{pdf}.
##' @param ... Ignored.
##' @return An S3 \code{mime_part} object.
##'
##' @method mime_part trellis
##' @S3method mime_part trellis
mime_part.trellis <- function(x, name=deparse(substitute(x)), device=pdf, ...)
.plot_attachment(x, name=name, device=device, ...)
##' Creates a MIME part from a ggplot2 plot object
##'
##' Writes a PDF file of the plot defined by \code{x} and turns this
##' PDF file into a file attachment.
##'
##' @param x A \code{ggplot} object
##' @param name Name of attachment (sans .pdf extension).
##' @param device Graphics device used to render the plot. Defaults to
##' \code{pdf}.
##' @param ... Ignored.
##' @return An S3 \code{mime_part} object.
##'
##' @method mime_part ggplot
##' @S3method mime_part ggplot
mime_part.ggplot <- function(x, name=deparse(substitute(x)), device=pdf, ...)
.plot_attachment(x, name=name, device=device, ...)
##' Create a MIME part from a matrix.
##'
##' @param x Matrix
##' @param name Basename of file attachment that is generated.
##' @param ... Ignored.
##' @return An S3 \code{mime_part} object
##'
##' @method mime_part matrix
##' @S3method mime_part matrix
mime_part.matrix <- function(x, name=deparse(substitute(x)), ...) {
f <- tempfile()
on.exit(file.remove(f))
write.table(x, file=f, ...)
.file_attachment(f, name=sprintf("%s.txt", name), type="text/plain")
}
##' Create a MIME part from a \code{data.frame}.
##'
##' @param x A \code{data.frame}.
##' @param name Basename of file attachment that is generated.
##' @param ... Ignored.
##' @return An S3 \code{mime_part} object.
##'
##' @method mime_part data.frame
##' @S3method mime_part data.frame
mime_part.data.frame <- function(x, name=deparse(substitute(x)), ...) {
f <- tempfile()
on.exit(file.remove(f))
write.table(x, file=f, ...)
.file_attachment(f, name=sprintf("%s.txt", name), type="text/plain")
}
##' Create a MIME part from a character string. If the string matches
##' a filename, a MIME part containing that file is returned instead.
##'
##' @param x Character string, possibly a filename.
##' @param name Name of attachment.
##' @param ... Ignored.
##' @return An S3 \code{mime_part} object.
##'
##' @method mime_part character
##' @S3method mime_part character
mime_part.character <- function(x, name, ...) {
if (length(x) == 1 && file.exists(x)) {
.file_attachment(x, name, ...)
} else {
.mime_part(headers=list(
"Content-Type"="text/plain",
"Content-Disposition"="inline"),
text=paste(x, collapse="\r\n"))
}
}
|
#######------------------------------------------------------------------------------- #######
#######-------- W E E K - 4 ( D A T A C L E A N I N G E X C E R C I S E ) -------- #######
#######------------------------------------------------------------------------------- #######
# Load: activity labels & Data columns only columns 2
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")[,2]
features <- read.table("./UCI HAR Dataset/features.txt")[,2]
#######------------------------------------------------------------------------------- #######
# Extract only the measurements on the mean and standard deviation for each measurement.
extract_features <- grepl("mean|std", features)
#######------------------------------------------------------------------------------- #######
# Load and process X_test & y_test dataset
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
names(X_test) = features
#######------------------------------------------------------------------------------- #######
# Extract only the measurements on the mean and standard deviation for each measurement.
X_test = X_test[,extract_features]
# Load activity labels
y_test[,2] = activity_labels[y_test[,1]]
names(y_test) = c("Activity_ID", "Activity_Label")
names(subject_test) = "subject"
#######------------------------------------------------------------------------------- #######
# Join Data by columns
test_data <- cbind(as.data.table(subject_test), y_test, X_test)
#######------------------------------------------------------------------------------- #######
# Load and process X_train & y_train dataset into variables
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
names(X_train) = features
#######------------------------------------------------------------------------------- #######
# Extract only the measurements on the mean and standard deviation for each measurement.
X_train = X_train[,extract_features]
# Load activity data Y into variables
y_train[,2] = activity_labels[y_train[,1]]
names(y_train) = c("Activity_ID", "Activity_Label")
names(subject_train) = "subject"
#######------------------------------------------------------------------------------- #######
# Bind data X and Y
train_data <- cbind(as.data.table(subject_train), y_train, X_train)
# Merge test and train data
data = rbind(test_data, train_data)
#######------------------------------------------------------------------------------- #######
id_labels = c("subject", "Activity_ID", "Activity_Label")
data_labels = setdiff(colnames(data), id_labels)
melt_data = melt(data, id = id_labels, measure.vars = data_labels)
#######------------------------------------------------------------------------------- #######
# Apply mean function to dataset using dcast function
tidy_data = dcast(melt_data, subject + Activity_Label ~ variable, mean)
#######------------------------------------------------------------------------------- #######
#Create o/p file with no row names
write.table(tidy_data, file = "./tidy_data.txt",row.names = FALSE)
#######------------------------------------------------------------------------------- #######
|
/run_analysis.R
|
no_license
|
koushikcoursera/Datacleaning-Repo
|
R
| false
| false
| 3,439
|
r
|
#######------------------------------------------------------------------------------- #######
#######-------- W E E K - 4 ( D A T A C L E A N I N G E X C E R C I S E ) -------- #######
#######------------------------------------------------------------------------------- #######
# Load: activity labels & Data columns only columns 2
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")[,2]
features <- read.table("./UCI HAR Dataset/features.txt")[,2]
#######------------------------------------------------------------------------------- #######
# Extract only the measurements on the mean and standard deviation for each measurement.
extract_features <- grepl("mean|std", features)
#######------------------------------------------------------------------------------- #######
# Load and process X_test & y_test dataset
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
names(X_test) = features
#######------------------------------------------------------------------------------- #######
# Extract only the measurements on the mean and standard deviation for each measurement.
X_test = X_test[,extract_features]
# Load activity labels
y_test[,2] = activity_labels[y_test[,1]]
names(y_test) = c("Activity_ID", "Activity_Label")
names(subject_test) = "subject"
#######------------------------------------------------------------------------------- #######
# Join Data by columns
test_data <- cbind(as.data.table(subject_test), y_test, X_test)
#######------------------------------------------------------------------------------- #######
# Load and process X_train & y_train dataset into variables
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
names(X_train) = features
#######------------------------------------------------------------------------------- #######
# Extract only the measurements on the mean and standard deviation for each measurement.
X_train = X_train[,extract_features]
# Load activity data Y into variables
y_train[,2] = activity_labels[y_train[,1]]
names(y_train) = c("Activity_ID", "Activity_Label")
names(subject_train) = "subject"
#######------------------------------------------------------------------------------- #######
# Bind data X and Y
train_data <- cbind(as.data.table(subject_train), y_train, X_train)
# Merge test and train data
data = rbind(test_data, train_data)
#######------------------------------------------------------------------------------- #######
id_labels = c("subject", "Activity_ID", "Activity_Label")
data_labels = setdiff(colnames(data), id_labels)
melt_data = melt(data, id = id_labels, measure.vars = data_labels)
#######------------------------------------------------------------------------------- #######
# Apply mean function to dataset using dcast function
tidy_data = dcast(melt_data, subject + Activity_Label ~ variable, mean)
#######------------------------------------------------------------------------------- #######
#Create o/p file with no row names
write.table(tidy_data, file = "./tidy_data.txt",row.names = FALSE)
#######------------------------------------------------------------------------------- #######
|
#Given input in the form of draws from the posterior distributions for Model 3,
#generate a sequence of simulated flows.
setup=function(){
if(file.exists("C:/Users/jonazose")){
setwd("C:/Users/jonazose/Dropbox/RA/Code/flows/flows_git")
}else if(file.exists("C:/Users/Jon-Everyday")){
setwd("C:/Users/Jon-Everyday/Dropbox/RA/Code/flows/flows_git/")
}else if(file.exists("C:/Users/Jon")){
setwd("C:/Users/Jon/Dropbox/RA/Code/flows/flows_git/")
}else{
setwd("/homes/jonazose/RA/flows_git/flows/")
}
#Read in data
rawDat=scan("./Abel/flows_Decadal.txt")
countryList<<-scan("./Abel/countryList_Decadal.txt",what="",sep=" ")
flowArray<<-array(rawDat[4:length(rawDat)],dim=rawDat[1:3])
rm(rawDat)
rawPopDat=scan("./Abel/popDatMatrix_Decadal.txt");
popDatMatrix<<-matrix(rawPopDat[3:length(rawPopDat)],nrow=rawPopDat[1],ncol=rawPopDat[2])
#Toss out some countries that aren't in the CEPII database.
#Guam (GUM), Mayotte (MYT), and US Virgin Islands (VIR)
tossOutIndices=which(countryList %in% c("GUM","MYT","VIR"))
flowArray<<-flowArray[-tossOutIndices,-tossOutIndices,];
shortCountryList<<-countryList[-tossOutIndices];
popDatMatrix<<-popDatMatrix[-tossOutIndices,]
nm=length(shortCountryList);
#####################
#Read in CEPII stuff#
#####################
if(!file.exists("distanceArray.txt")){#If we didn't already make the distance array
distData=read.csv("dist_cepii.csv",header=TRUE)
modifiedCountryList=shortCountryList;
modifiedCountryList[which(shortCountryList=="COD")]="ZAR"#COD in WorldBank is ZAR in CEPII
modifiedCountryList[which(shortCountryList=="TLS")]="TMP"#TLS in WorldBank is TMP in CEPII
modifiedCountryList[which(shortCountryList=="PSE")]="PAL"#PSE in WorldBank is PAL in CEPII
distanceArray<<-array(0,dim=c(nm,nm,12))
cat("Constructing distance matrix\n")
for(i in 1:nm){
cat(i,"\n")
for(j in 1:nm){
distanceArray[i,j,]=as.numeric(distData[which(distData$iso_o==modifiedCountryList[i]
& distData$iso_d==modifiedCountryList[j]),
3:14])
}
}
write(dim(distanceArray),"distanceArray.txt")
write(distanceArray,"distanceArray.txt",append=TRUE)
}else{#If we did make the distance array already, just read it in.
distanceArrayDat=scan("distanceArray.txt")
distanceArray<<-array(distanceArrayDat[4:length(distanceArrayDat)],
dim=distanceArrayDat[1:3]);
}
#Convert everything to vector form
vectorLength=nm*(nm-1);#Keep track of the length of a single year's worth of data
flowMatrix<<-matrix(0,nrow=dim(flowArray)[3],ncol=vectorLength);#Construct a matrix
#where each row is a single year's data
for(i in 1:dim(flowArray)[3]){
M=flowArray[,,i];
flowMatrix[i,]=M[row(M)!=col(M)]
}
flowMatrix<<-flowMatrix;
originMatrix<<-matrix(rep(shortCountryList,nm),nrow=nm,byrow=FALSE);
originVector<<-originMatrix[row(originMatrix)!=col(originMatrix)];
destMatrix<<-t(originMatrix);
destVector<<-destMatrix[row(destMatrix)!=col(destMatrix)];
distanceMatrix<<-matrix(0,nrow=12,ncol=vectorLength);
for(i in 1:12){
M=distanceArray[,,i];
distanceMatrix[i,]=M[row(M)!=col(M)];
}
distanceMatrix<<-distanceMatrix;
}
setup();
#Use for decadal data
y=as.vector(flowMatrix[c(2,3,4),]);
x=as.vector(flowMatrix[c(1,2,3),]);
#Construct vectors of origin and destination populations at beginning of the decade
oVec=numeric(0);
dVec=numeric(0);
for(j in 1:3){
temp=rep(0,length(originVector));
for(i in 1:length(originVector)){
countryIndex=which(shortCountryList==originVector[i])
temp[i]=popDatMatrix[countryIndex,j]
}
oVec=c(oVec,temp);
temp=rep(0,length(destVector));
for(i in 1:length(destVector)){
countryIndex=which(shortCountryList==destVector[i])
temp[i]=popDatMatrix[countryIndex,j]
}
dVec=c(dVec,temp);
}
generateFlows5=function(history, paramFile){
#Generate predictions and return a matrix of medians and 80% and 95% confidence bounds
#First, read in the posterior draws for alpha1 and alpha2.
paramData=scan(paramFile);
#Split into components
nPosteriorDraws=length(paramData)/8;
alpha1=paramData[1:nPosteriorDraws];
alpha2=paramData[(nPosteriorDraws+1):(2*nPosteriorDraws)];
alpha3=paramData[(2*nPosteriorDraws+1):(3*nPosteriorDraws)];
beta=paramData[(3*nPosteriorDraws+1):(4*nPosteriorDraws)];
delta1=paramData[(4*nPosteriorDraws+1):(5*nPosteriorDraws)];
delta2=paramData[(5*nPosteriorDraws+1):(6*nPosteriorDraws)];
delta3=paramData[(6*nPosteriorDraws+1):(7*nPosteriorDraws)];
delta4=paramData[(7*nPosteriorDraws+1):(8*nPosteriorDraws)];
#Initialize a matrix for simulation results
simulatedFlows=matrix(0,nrow=nPosteriorDraws,ncol=length(history));
for(i in 1:nPosteriorDraws){
temp=rep(0,length(history));
lambdaVec = exp(alpha1[i]*log(history+1)+alpha2[i]*log(oVec)+alpha3[i]*log(dVec)+beta[i]+
delta1[i]*rep(distanceMatrix[1,],3)+
delta2[i]*rep(distanceMatrix[1,],3)+
delta3[i]*rep(distanceMatrix[1,],3)+
delta4[i]*rep(distanceMatrix[1,],3))
temp=rpois(n=length(history),lambda=lambdaVec)
simulatedFlows[i,]=temp;
}
resultMatrix=matrix(0,nrow=length(x),ncol=5);
for(i in 1:length(x)){
resultMatrix[i,]=quantile(simulatedFlows[,i],c(0.025,0.1,0.5,0.9,0.975))
}
return(resultMatrix);
}
r=generateFlows5(x, "./Output/model5Output.txt")
#Return a mean absolute value of the median predictions minus the true values
MAE=mean(abs(y-r[,3]));
cat("MAE =",MAE);
MAE_log=mean(abs(log(y+1)-log(r[,3]+1)));
cat("MAE_log =",MAE_log)
maxAbsError=max(abs(y-r[,3]));
cat("Max error =",maxAbsError);
#Interval scores
#80% I.S.
is80=sum(r[,4]-r[,2]);
for(i in 1:nrow(r)){
if(y[i]<r[i,2]){is80=is80+2/0.2*(r[i,2]-y[i]);}
if(y[i]>r[i,4]){is80=is80+2/0.2*(y[i]-r[i,4]);}
}
is80=is80/length(y)
cat("80% Interval score =",is80)
#95% I.S.
is95=sum(r[,5]-r[,1]);
for(i in 1:nrow(r)){
if(y[i]<r[i,1]){is95=is95+2/0.05*(r[i,1]-y[i]);}
if(y[i]>r[i,5]){is95=is95+2/0.05*(y[i]-r[i,5]);}
}
is95=is95/length(y)
cat("95% Interval score =",is95)
#######################
#Confidence interval inclusion
#######################
#80% CI
inclusion80=(y>=r[,2])&(y<=r[,4])
cat("80% Inclusion =",mean(inclusion80))
inclusion95=(y>=r[,1])&(y<=r[,5])
cat("95% Inclusion =",mean(inclusion95))
|
/simulateModel5.R
|
no_license
|
jazose/flows
|
R
| false
| false
| 6,519
|
r
|
#Given input in the form of draws from the posterior distributions for Model 3,
#generate a sequence of simulated flows.
setup=function(){
if(file.exists("C:/Users/jonazose")){
setwd("C:/Users/jonazose/Dropbox/RA/Code/flows/flows_git")
}else if(file.exists("C:/Users/Jon-Everyday")){
setwd("C:/Users/Jon-Everyday/Dropbox/RA/Code/flows/flows_git/")
}else if(file.exists("C:/Users/Jon")){
setwd("C:/Users/Jon/Dropbox/RA/Code/flows/flows_git/")
}else{
setwd("/homes/jonazose/RA/flows_git/flows/")
}
#Read in data
rawDat=scan("./Abel/flows_Decadal.txt")
countryList<<-scan("./Abel/countryList_Decadal.txt",what="",sep=" ")
flowArray<<-array(rawDat[4:length(rawDat)],dim=rawDat[1:3])
rm(rawDat)
rawPopDat=scan("./Abel/popDatMatrix_Decadal.txt");
popDatMatrix<<-matrix(rawPopDat[3:length(rawPopDat)],nrow=rawPopDat[1],ncol=rawPopDat[2])
#Toss out some countries that aren't in the CEPII database.
#Guam (GUM), Mayotte (MYT), and US Virgin Islands (VIR)
tossOutIndices=which(countryList %in% c("GUM","MYT","VIR"))
flowArray<<-flowArray[-tossOutIndices,-tossOutIndices,];
shortCountryList<<-countryList[-tossOutIndices];
popDatMatrix<<-popDatMatrix[-tossOutIndices,]
nm=length(shortCountryList);
#####################
#Read in CEPII stuff#
#####################
if(!file.exists("distanceArray.txt")){#If we didn't already make the distance array
distData=read.csv("dist_cepii.csv",header=TRUE)
modifiedCountryList=shortCountryList;
modifiedCountryList[which(shortCountryList=="COD")]="ZAR"#COD in WorldBank is ZAR in CEPII
modifiedCountryList[which(shortCountryList=="TLS")]="TMP"#TLS in WorldBank is TMP in CEPII
modifiedCountryList[which(shortCountryList=="PSE")]="PAL"#PSE in WorldBank is PAL in CEPII
distanceArray<<-array(0,dim=c(nm,nm,12))
cat("Constructing distance matrix\n")
for(i in 1:nm){
cat(i,"\n")
for(j in 1:nm){
distanceArray[i,j,]=as.numeric(distData[which(distData$iso_o==modifiedCountryList[i]
& distData$iso_d==modifiedCountryList[j]),
3:14])
}
}
write(dim(distanceArray),"distanceArray.txt")
write(distanceArray,"distanceArray.txt",append=TRUE)
}else{#If we did make the distance array already, just read it in.
distanceArrayDat=scan("distanceArray.txt")
distanceArray<<-array(distanceArrayDat[4:length(distanceArrayDat)],
dim=distanceArrayDat[1:3]);
}
#Convert everything to vector form
vectorLength=nm*(nm-1);#Keep track of the length of a single year's worth of data
flowMatrix<<-matrix(0,nrow=dim(flowArray)[3],ncol=vectorLength);#Construct a matrix
#where each row is a single year's data
for(i in 1:dim(flowArray)[3]){
M=flowArray[,,i];
flowMatrix[i,]=M[row(M)!=col(M)]
}
flowMatrix<<-flowMatrix;
originMatrix<<-matrix(rep(shortCountryList,nm),nrow=nm,byrow=FALSE);
originVector<<-originMatrix[row(originMatrix)!=col(originMatrix)];
destMatrix<<-t(originMatrix);
destVector<<-destMatrix[row(destMatrix)!=col(destMatrix)];
distanceMatrix<<-matrix(0,nrow=12,ncol=vectorLength);
for(i in 1:12){
M=distanceArray[,,i];
distanceMatrix[i,]=M[row(M)!=col(M)];
}
distanceMatrix<<-distanceMatrix;
}
setup();
#Use for decadal data
y=as.vector(flowMatrix[c(2,3,4),]);
x=as.vector(flowMatrix[c(1,2,3),]);
#Construct vectors of origin and destination populations at beginning of the decade
oVec=numeric(0);
dVec=numeric(0);
for(j in 1:3){
temp=rep(0,length(originVector));
for(i in 1:length(originVector)){
countryIndex=which(shortCountryList==originVector[i])
temp[i]=popDatMatrix[countryIndex,j]
}
oVec=c(oVec,temp);
temp=rep(0,length(destVector));
for(i in 1:length(destVector)){
countryIndex=which(shortCountryList==destVector[i])
temp[i]=popDatMatrix[countryIndex,j]
}
dVec=c(dVec,temp);
}
generateFlows5=function(history, paramFile){
#Generate predictions and return a matrix of medians and 80% and 95% confidence bounds
#First, read in the posterior draws for alpha1 and alpha2.
paramData=scan(paramFile);
#Split into components
nPosteriorDraws=length(paramData)/8;
alpha1=paramData[1:nPosteriorDraws];
alpha2=paramData[(nPosteriorDraws+1):(2*nPosteriorDraws)];
alpha3=paramData[(2*nPosteriorDraws+1):(3*nPosteriorDraws)];
beta=paramData[(3*nPosteriorDraws+1):(4*nPosteriorDraws)];
delta1=paramData[(4*nPosteriorDraws+1):(5*nPosteriorDraws)];
delta2=paramData[(5*nPosteriorDraws+1):(6*nPosteriorDraws)];
delta3=paramData[(6*nPosteriorDraws+1):(7*nPosteriorDraws)];
delta4=paramData[(7*nPosteriorDraws+1):(8*nPosteriorDraws)];
#Initialize a matrix for simulation results
simulatedFlows=matrix(0,nrow=nPosteriorDraws,ncol=length(history));
for(i in 1:nPosteriorDraws){
temp=rep(0,length(history));
lambdaVec = exp(alpha1[i]*log(history+1)+alpha2[i]*log(oVec)+alpha3[i]*log(dVec)+beta[i]+
delta1[i]*rep(distanceMatrix[1,],3)+
delta2[i]*rep(distanceMatrix[1,],3)+
delta3[i]*rep(distanceMatrix[1,],3)+
delta4[i]*rep(distanceMatrix[1,],3))
temp=rpois(n=length(history),lambda=lambdaVec)
simulatedFlows[i,]=temp;
}
resultMatrix=matrix(0,nrow=length(x),ncol=5);
for(i in 1:length(x)){
resultMatrix[i,]=quantile(simulatedFlows[,i],c(0.025,0.1,0.5,0.9,0.975))
}
return(resultMatrix);
}
r=generateFlows5(x, "./Output/model5Output.txt")
#Return a mean absolute value of the median predictions minus the true values
MAE=mean(abs(y-r[,3]));
cat("MAE =",MAE);
MAE_log=mean(abs(log(y+1)-log(r[,3]+1)));
cat("MAE_log =",MAE_log)
maxAbsError=max(abs(y-r[,3]));
cat("Max error =",maxAbsError);
#Interval scores
#80% I.S.
is80=sum(r[,4]-r[,2]);
for(i in 1:nrow(r)){
if(y[i]<r[i,2]){is80=is80+2/0.2*(r[i,2]-y[i]);}
if(y[i]>r[i,4]){is80=is80+2/0.2*(y[i]-r[i,4]);}
}
is80=is80/length(y)
cat("80% Interval score =",is80)
#95% I.S.
is95=sum(r[,5]-r[,1]);
for(i in 1:nrow(r)){
if(y[i]<r[i,1]){is95=is95+2/0.05*(r[i,1]-y[i]);}
if(y[i]>r[i,5]){is95=is95+2/0.05*(y[i]-r[i,5]);}
}
is95=is95/length(y)
cat("95% Interval score =",is95)
#######################
#Confidence interval inclusion
#######################
#80% CI
inclusion80=(y>=r[,2])&(y<=r[,4])
cat("80% Inclusion =",mean(inclusion80))
inclusion95=(y>=r[,1])&(y<=r[,5])
cat("95% Inclusion =",mean(inclusion95))
|
context("fake_request")
test_that('URL parsing works', {
req <- fake_request('http://www.example.com')
expect_equal(req[['rook.url_scheme']], 'http')
req <- fake_request('www.example.com')
expect_equal(req[['rook.url_scheme']], 'http')
req <- fake_request('https://www.example.com')
expect_equal(req[['rook.url_scheme']], 'https')
req <- fake_request('http://www.example.com')
expect_equal(req[['SERVER_NAME']], 'www.example.com')
req <- fake_request('http://www.example.com:80')
expect_equal(req[['SERVER_NAME']], 'www.example.com')
req <- fake_request('http://www.example.com/new/path/')
expect_equal(req[['SERVER_NAME']], 'www.example.com')
req <- fake_request('http://www.sub.example.com')
expect_equal(req[['SERVER_NAME']], 'www.sub.example.com')
req <- fake_request('http://www.example.com?qurey=string')
expect_equal(req[['SERVER_NAME']], 'www.example.com')
req <- fake_request('http://www.example.com')
expect_equal(req[['SERVER_PORT']], '80')
req <- fake_request('https://www.example.com')
expect_equal(req[['SERVER_PORT']], '443')
req <- fake_request('http://www.example.com:8080')
expect_equal(req[['SERVER_PORT']], '8080')
req <- fake_request('http://www.example.com:8080/new/path/')
expect_equal(req[['SERVER_PORT']], '8080')
req <- fake_request('http://www.example.com:8080?query=string')
expect_equal(req[['SERVER_PORT']], '8080')
req <- fake_request('http://www.example.com')
expect_equal(req[['PATH_INFO']], '/')
req <- fake_request('http://www.example.com/')
expect_equal(req[['PATH_INFO']], '/')
req <- fake_request('http://www.example.com/a/path/')
expect_equal(req[['PATH_INFO']], '/a/path/')
req <- fake_request('http://www.example.com/a/index.html')
expect_equal(req[['PATH_INFO']], '/a/index.html')
req <- fake_request('http://www.example.com/a/path/?query=string')
expect_equal(req[['PATH_INFO']], '/a/path/')
req <- fake_request('http://www.example.com/a/index.html?query=string')
expect_equal(req[['PATH_INFO']], '/a/index.html')
req <- fake_request('http://www.example.com?query=string')
expect_equal(req[['PATH_INFO']], '/')
req <- fake_request('http://www.example.com/a/mount/', appLocation = '/a/mount/')
expect_equal(req[['PATH_INFO']], '/')
req <- fake_request('http://www.example.com/a/mount/', appLocation = '/a/')
expect_equal(req[['PATH_INFO']], '/mount/')
expect_error(fake_request('http://www.example.com/a/mount/', appLocation = '/wrong/mount/'))
req <- fake_request('http://www.example.com')
expect_equal(req[['QUERY_STRING']], '')
req <- fake_request('http://www.example.com?query=string')
expect_equal(req[['QUERY_STRING']], 'query=string')
req <- fake_request('http://www.example.com/a/path/?query=string')
expect_equal(req[['QUERY_STRING']], 'query=string')
})
test_that('Headers are assigned', {
req <- fake_request('http://www.example.com', headers = list('accept_encoding' = 'gzip, deflate'))
expect_equal(req[['HTTP_ACCEPT_ENCODING']], 'gzip, deflate')
expect_error(fake_request('http://www.example.com', headers = list('accept_encoding' = letters[1:4])))
req <- fake_request('http://www.example.com', headers = list('dnt' = 1))
expect_equal(req[['HTTP_DNT']], '1')
})
test_that('Content gets assigned', {
req <- fake_request('http://www.example.com')
expect_is(req$rook.input, 'NullInputStreamFake')
expect_equal(req$rook.input$read_lines(), character())
expect_equal(req$rook.input$read(), raw())
expect_null(req$rook.input$rewind())
expect_equal(req$rook.input$read(l = 0), raw())
expect_null(req$rook.input$close())
expect_is(req$rook.errors, 'ErrorStreamFake')
expect_null(req$rook.errors$cat())
expect_null(req$rook.errors$flush())
req <- fake_request('http://www.example.com', content = 'Multiple\nlines\nof content')
expect_equal(req$rook.input$read_lines(), c('Multiple', 'lines', 'of content'))
expect_equal(req$rook.input$rewind(), 26)
expect_equal(rawToChar(req$rook.input$read()), 'Multiple\nlines\nof content\n')
expect_equal(req$rook.input$read(l = 0), raw())
expect_null(req$rook.input$close())
req <- fake_request('http://www.example.com', content = c('Multiple', 'lines', 'of content'))
expect_equal(req$rook.input$read_lines(), c('Multiple', 'lines', 'of content'))
req$rook.input$close()
})
test_that('ellipsis gets assigned', {
req <- fake_request('http://www.example.com', fiery.version = '0.1.0')
expect_equal(req$fiery.version, '0.1.0')
})
|
/tests/testthat/test-fake_request.R
|
no_license
|
aespar21/fiery
|
R
| false
| false
| 4,659
|
r
|
context("fake_request")
test_that('URL parsing works', {
req <- fake_request('http://www.example.com')
expect_equal(req[['rook.url_scheme']], 'http')
req <- fake_request('www.example.com')
expect_equal(req[['rook.url_scheme']], 'http')
req <- fake_request('https://www.example.com')
expect_equal(req[['rook.url_scheme']], 'https')
req <- fake_request('http://www.example.com')
expect_equal(req[['SERVER_NAME']], 'www.example.com')
req <- fake_request('http://www.example.com:80')
expect_equal(req[['SERVER_NAME']], 'www.example.com')
req <- fake_request('http://www.example.com/new/path/')
expect_equal(req[['SERVER_NAME']], 'www.example.com')
req <- fake_request('http://www.sub.example.com')
expect_equal(req[['SERVER_NAME']], 'www.sub.example.com')
req <- fake_request('http://www.example.com?qurey=string')
expect_equal(req[['SERVER_NAME']], 'www.example.com')
req <- fake_request('http://www.example.com')
expect_equal(req[['SERVER_PORT']], '80')
req <- fake_request('https://www.example.com')
expect_equal(req[['SERVER_PORT']], '443')
req <- fake_request('http://www.example.com:8080')
expect_equal(req[['SERVER_PORT']], '8080')
req <- fake_request('http://www.example.com:8080/new/path/')
expect_equal(req[['SERVER_PORT']], '8080')
req <- fake_request('http://www.example.com:8080?query=string')
expect_equal(req[['SERVER_PORT']], '8080')
req <- fake_request('http://www.example.com')
expect_equal(req[['PATH_INFO']], '/')
req <- fake_request('http://www.example.com/')
expect_equal(req[['PATH_INFO']], '/')
req <- fake_request('http://www.example.com/a/path/')
expect_equal(req[['PATH_INFO']], '/a/path/')
req <- fake_request('http://www.example.com/a/index.html')
expect_equal(req[['PATH_INFO']], '/a/index.html')
req <- fake_request('http://www.example.com/a/path/?query=string')
expect_equal(req[['PATH_INFO']], '/a/path/')
req <- fake_request('http://www.example.com/a/index.html?query=string')
expect_equal(req[['PATH_INFO']], '/a/index.html')
req <- fake_request('http://www.example.com?query=string')
expect_equal(req[['PATH_INFO']], '/')
req <- fake_request('http://www.example.com/a/mount/', appLocation = '/a/mount/')
expect_equal(req[['PATH_INFO']], '/')
req <- fake_request('http://www.example.com/a/mount/', appLocation = '/a/')
expect_equal(req[['PATH_INFO']], '/mount/')
expect_error(fake_request('http://www.example.com/a/mount/', appLocation = '/wrong/mount/'))
req <- fake_request('http://www.example.com')
expect_equal(req[['QUERY_STRING']], '')
req <- fake_request('http://www.example.com?query=string')
expect_equal(req[['QUERY_STRING']], 'query=string')
req <- fake_request('http://www.example.com/a/path/?query=string')
expect_equal(req[['QUERY_STRING']], 'query=string')
})
test_that('Headers are assigned', {
req <- fake_request('http://www.example.com', headers = list('accept_encoding' = 'gzip, deflate'))
expect_equal(req[['HTTP_ACCEPT_ENCODING']], 'gzip, deflate')
expect_error(fake_request('http://www.example.com', headers = list('accept_encoding' = letters[1:4])))
req <- fake_request('http://www.example.com', headers = list('dnt' = 1))
expect_equal(req[['HTTP_DNT']], '1')
})
test_that('Content gets assigned', {
req <- fake_request('http://www.example.com')
expect_is(req$rook.input, 'NullInputStreamFake')
expect_equal(req$rook.input$read_lines(), character())
expect_equal(req$rook.input$read(), raw())
expect_null(req$rook.input$rewind())
expect_equal(req$rook.input$read(l = 0), raw())
expect_null(req$rook.input$close())
expect_is(req$rook.errors, 'ErrorStreamFake')
expect_null(req$rook.errors$cat())
expect_null(req$rook.errors$flush())
req <- fake_request('http://www.example.com', content = 'Multiple\nlines\nof content')
expect_equal(req$rook.input$read_lines(), c('Multiple', 'lines', 'of content'))
expect_equal(req$rook.input$rewind(), 26)
expect_equal(rawToChar(req$rook.input$read()), 'Multiple\nlines\nof content\n')
expect_equal(req$rook.input$read(l = 0), raw())
expect_null(req$rook.input$close())
req <- fake_request('http://www.example.com', content = c('Multiple', 'lines', 'of content'))
expect_equal(req$rook.input$read_lines(), c('Multiple', 'lines', 'of content'))
req$rook.input$close()
})
test_that('ellipsis gets assigned', {
req <- fake_request('http://www.example.com', fiery.version = '0.1.0')
expect_equal(req$fiery.version, '0.1.0')
})
|
showmodel <- function(model, verbose = interactive()) {
if (verbose) {
cat(model)
cat("\n")
}
}
ks <- function(a, model, alternative = c("two.sided", "less", "greater"), verbose = FALSE) {
showmodel(model, verbose)
if (is.matrix(a)) {
r <- ks.matrix(a, model, alternative)
} else {
r <- ks.vector(a, model, alternative)
}
return(r)
}
ks.matrix <- function(a, model, alternative = c("two.sided", "less", "greater"), verbose = FALSE) {
apply(a, 1, function (x) ks.vector(x, model, alternative))
}
#' @importFrom kolmim pkolm
ks.vector <- function(a, model, alternative = c("two.sided", "less", "greater"), verbose = FALSE) {
if (any(a == Inf)) return(list(ks = c("KS.stat" = Inf, "KS.pval" = 0), "fit_c" = NA, peak.count = -Inf))
cdf_c = eval(parse(text=paste("cdf", model, sep = "_")))
fit.result <- fit.distribution(a, model)
if(is.null(fit.result) || is.null(fit.result$result)) return (list(ks = c("KS.stat" = Inf, "KS.pval" = 0), "fit_c" = NA, peak.count = -Inf))
fit_c <- fit.result$result
if (model %in% c("P", "NB", "G", "MG")) {
r <- ks_statistics(a, fit_c, cdf_c)
} else if (model %in% c("ZIP", "ZINB", "ZIG", "ZIlogG", "ZIMG", "ZIlogMG")) {
r <- ks_statistics_ZI(a, fit_c, cdf_c)
} else if (model %in% c("LTMG", "LTG")) {
if (isTRUE(sum(abs(fit_c) == 0))) {
return(list(ks = c("KS.stat" = 1, "KS.pval" = 0), "fit_c" = NA, peak.count = -Inf))
}
r <- ks_statistics_Zcut(a, fit_c, cdf_c)
} else if (model %in% c("BP")) {
warning("Something wrong with BP ")
return(list(ks = c("KS.stat" = 1, "KS.pval" = 0), "fit_c" = NA, peak.count = -Inf))
} else {
browser()
}
n <- r$n
x <- r$y - (0:(n - 1)) / n
alternative <- match.arg(alternative)
STATISTIC <- switch(alternative, two.sided = max(c(x, 1 / n - x)), greater = max(1 / n - x), less = max(x))
ks_cut <- ks_determination(n)
if (isTRUE(STATISTIC < ks_cut)) {
PVAL <- 1 - kolmim::pkolm(STATISTIC, n)
} else {
PVAL <- 0
}
return(list(ks = c("KS.stat" = STATISTIC, "KS.pval" = PVAL), "fit_c" = fit_c, peak.count = fit.result$peak.count))
}
ks_statistics <- function(a, fit_c, cdf_c = cdf_norm) {
n <- length(a)
x <- cdf_c(sort(a), fit_c)
return (list("n" = n, "y" = x))
}
ks_statistics_ZI <- function(a, fit_c, cdf_c = cdf_ZIG) {
aa1 <- a[which(a > 0)]
p <- mean(a == 0)
n <- length(aa1)
x <- (cdf_c(sort(aa1), fit_c) - p) / (1 - p)
return (list("n" = n, "y" = x))
}
ks_statistics_Zcut <- function(a, fit_c, cdf_c = cdf_LTMG1) {
Zcut <- min(a[which(a != min(a))])
aa <- a
aa[which(aa < Zcut)] <- Zcut - 1
aa1 <- aa[which(aa >= Zcut)]
p <- mean(aa < Zcut)
n <- length(aa1)
x <- (cdf_c(sort(aa1), fit_c) - p) / (1 - p)
return (list("n" = n, "y" = x))
}
#' @importFrom kolmim pkolm
ks_determination <- function(n) {
pp <- min(1 / n, 0.1)
pp_unit <- pp
N <- 0
ss <- 0
while(ss < 0.5) {
N <- N + 1
STAT <- pp_unit * N
ss <- kolmim::pkolm(STAT, n)
}
return(STAT)
}
#' M3S Fit
#'
#' @param x
#'
#' @param normalization
#' @param distribution models
#'
#' @export
M3Sfit <- function(x, normalization = c("none", "logplus", "log", "cpm.none", "cpm.logplus", "cpm.log"), distribution = c("G", "MG", "LTMG", "LTG")) {
return (fit.distribution(normalize(x, normalization), distribution))
}
fit.distribution <- function(x, distribution = c("G", "MG", "LTMG", "LTG")) {
if (any(x == Inf)) return()
fitf_c = eval(parse(text=paste("fit", distribution, sep = "_")))
if (distribution %in% c("P", "NB", "G", "MG", "ZIP", "ZINB", "ZIG", "ZIlogG", "ZIMG", "ZIlogMG")) {
result <- fitf_c(x)
} else if (distribution %in% c("LTMG", "LTG")) {
Zcut <- min(x[which(x != min(x))])
aa <- x
aa[which(aa < Zcut)] <- Zcut - 1
result <- fitf_c(aa, Zcut)
} else {
return()
}
if(!is.list(result)) {
result <- list(result = result, peak.count = 1)
}
return (result)
}
|
/R/ks.R
|
no_license
|
zy26/M3S
|
R
| false
| false
| 3,918
|
r
|
showmodel <- function(model, verbose = interactive()) {
if (verbose) {
cat(model)
cat("\n")
}
}
ks <- function(a, model, alternative = c("two.sided", "less", "greater"), verbose = FALSE) {
showmodel(model, verbose)
if (is.matrix(a)) {
r <- ks.matrix(a, model, alternative)
} else {
r <- ks.vector(a, model, alternative)
}
return(r)
}
ks.matrix <- function(a, model, alternative = c("two.sided", "less", "greater"), verbose = FALSE) {
apply(a, 1, function (x) ks.vector(x, model, alternative))
}
#' @importFrom kolmim pkolm
ks.vector <- function(a, model, alternative = c("two.sided", "less", "greater"), verbose = FALSE) {
if (any(a == Inf)) return(list(ks = c("KS.stat" = Inf, "KS.pval" = 0), "fit_c" = NA, peak.count = -Inf))
cdf_c = eval(parse(text=paste("cdf", model, sep = "_")))
fit.result <- fit.distribution(a, model)
if(is.null(fit.result) || is.null(fit.result$result)) return (list(ks = c("KS.stat" = Inf, "KS.pval" = 0), "fit_c" = NA, peak.count = -Inf))
fit_c <- fit.result$result
if (model %in% c("P", "NB", "G", "MG")) {
r <- ks_statistics(a, fit_c, cdf_c)
} else if (model %in% c("ZIP", "ZINB", "ZIG", "ZIlogG", "ZIMG", "ZIlogMG")) {
r <- ks_statistics_ZI(a, fit_c, cdf_c)
} else if (model %in% c("LTMG", "LTG")) {
if (isTRUE(sum(abs(fit_c) == 0))) {
return(list(ks = c("KS.stat" = 1, "KS.pval" = 0), "fit_c" = NA, peak.count = -Inf))
}
r <- ks_statistics_Zcut(a, fit_c, cdf_c)
} else if (model %in% c("BP")) {
warning("Something wrong with BP ")
return(list(ks = c("KS.stat" = 1, "KS.pval" = 0), "fit_c" = NA, peak.count = -Inf))
} else {
browser()
}
n <- r$n
x <- r$y - (0:(n - 1)) / n
alternative <- match.arg(alternative)
STATISTIC <- switch(alternative, two.sided = max(c(x, 1 / n - x)), greater = max(1 / n - x), less = max(x))
ks_cut <- ks_determination(n)
if (isTRUE(STATISTIC < ks_cut)) {
PVAL <- 1 - kolmim::pkolm(STATISTIC, n)
} else {
PVAL <- 0
}
return(list(ks = c("KS.stat" = STATISTIC, "KS.pval" = PVAL), "fit_c" = fit_c, peak.count = fit.result$peak.count))
}
ks_statistics <- function(a, fit_c, cdf_c = cdf_norm) {
n <- length(a)
x <- cdf_c(sort(a), fit_c)
return (list("n" = n, "y" = x))
}
ks_statistics_ZI <- function(a, fit_c, cdf_c = cdf_ZIG) {
aa1 <- a[which(a > 0)]
p <- mean(a == 0)
n <- length(aa1)
x <- (cdf_c(sort(aa1), fit_c) - p) / (1 - p)
return (list("n" = n, "y" = x))
}
ks_statistics_Zcut <- function(a, fit_c, cdf_c = cdf_LTMG1) {
Zcut <- min(a[which(a != min(a))])
aa <- a
aa[which(aa < Zcut)] <- Zcut - 1
aa1 <- aa[which(aa >= Zcut)]
p <- mean(aa < Zcut)
n <- length(aa1)
x <- (cdf_c(sort(aa1), fit_c) - p) / (1 - p)
return (list("n" = n, "y" = x))
}
#' @importFrom kolmim pkolm
ks_determination <- function(n) {
pp <- min(1 / n, 0.1)
pp_unit <- pp
N <- 0
ss <- 0
while(ss < 0.5) {
N <- N + 1
STAT <- pp_unit * N
ss <- kolmim::pkolm(STAT, n)
}
return(STAT)
}
#' M3S Fit
#'
#' @param x
#'
#' @param normalization
#' @param distribution models
#'
#' @export
M3Sfit <- function(x, normalization = c("none", "logplus", "log", "cpm.none", "cpm.logplus", "cpm.log"), distribution = c("G", "MG", "LTMG", "LTG")) {
return (fit.distribution(normalize(x, normalization), distribution))
}
fit.distribution <- function(x, distribution = c("G", "MG", "LTMG", "LTG")) {
if (any(x == Inf)) return()
fitf_c = eval(parse(text=paste("fit", distribution, sep = "_")))
if (distribution %in% c("P", "NB", "G", "MG", "ZIP", "ZINB", "ZIG", "ZIlogG", "ZIMG", "ZIlogMG")) {
result <- fitf_c(x)
} else if (distribution %in% c("LTMG", "LTG")) {
Zcut <- min(x[which(x != min(x))])
aa <- x
aa[which(aa < Zcut)] <- Zcut - 1
result <- fitf_c(aa, Zcut)
} else {
return()
}
if(!is.list(result)) {
result <- list(result = result, peak.count = 1)
}
return (result)
}
|
#!/usr/local/bin/Rscript
library(beadarray)
library(limma)
library(biomaRt)
library(illuminaMousev2BeadID.db)
#load the data
BSData <- get(load("results/BSData.quantile.RData"))
E <- exprs(BSData)
design<-matrix(0,nrow=(ncol(E)), ncol=2)
colnames(design) <- c("MLA_NS","MLA_Neurons")
rownames(design) <- colnames(E)
design[1:5,1] <- 1
design[6:8,2] <- 1
cont.matrix<-makeContrasts(NSvNeuron=MLA_Neurons-MLA_NS, levels=design)
fit<-lmFit(E, design)
fit<-contrasts.fit(fit, cont.matrix)
ebFit<-eBayes(fit)
ids = rownames(E)
symbol <- mget(ids, illuminaMousev2BeadIDSYMBOL, ifnotfound = NA)
sum(sapply(symbol, length) > 1)
symbol <- as.character(symbol)
length(which(symbol=="NA"))
ensembl = mget(ids, illuminaMousev2BeadIDENSEMBL, ifnotfound = NA)
length(which(is.na(ensembl)))
sum(sapply(ensembl, length) > 1)
crosshyb <- which(( sapply(ensembl, length) ) > 1)
length(crosshyb)
ensembl[crosshyb] <- NA
ensembl <- as.character(ensembl)
ensembl[ensembl=="NA"] = NA
length(which(is.na(ensembl)))
ensmart <- useMart("ensembl", dataset="mmusculus_gene_ensembl")
filters <- "ensembl_gene_id"
values <- ensembl[!is.na(ensembl)]
attributes <- c("ensembl_gene_id", "chromosome_name","start_position", "end_position", "strand", "description")
ens.anno <- getBM(filters=filters, values=values, attributes=attributes, mart=ensmart)
rownames(ens.anno)<-ens.anno[,1]
anno <- data.frame(
ID = as.character(ids),
EnsemblID = ensembl,
symbol=symbol,
ens.anno[ensembl,],
stringsAsFactors=F
)
rownames(anno) <- anno[,"ID"]
ebFit$genes = anno
write.fit(ebFit, file="limma_ebfit.csv", adjust="BH")
data<-read.table("limma_ebfit.csv", sep="\t", header=T)
new.data<- topTable(ebFit, number=nrow(E))
rownames(new.data)<-new.data$ID
new.data<-new.data[order(new.data[,"P.Value"]),]
write.csv(new.data,"limma_results.csv",row.names=F)
|
/scripts/microarrayanalysis.R
|
no_license
|
Bongomountainthesis/mla_neurons_lumixpn
|
R
| false
| false
| 1,940
|
r
|
#!/usr/local/bin/Rscript
library(beadarray)
library(limma)
library(biomaRt)
library(illuminaMousev2BeadID.db)
#load the data
BSData <- get(load("results/BSData.quantile.RData"))
E <- exprs(BSData)
design<-matrix(0,nrow=(ncol(E)), ncol=2)
colnames(design) <- c("MLA_NS","MLA_Neurons")
rownames(design) <- colnames(E)
design[1:5,1] <- 1
design[6:8,2] <- 1
cont.matrix<-makeContrasts(NSvNeuron=MLA_Neurons-MLA_NS, levels=design)
fit<-lmFit(E, design)
fit<-contrasts.fit(fit, cont.matrix)
ebFit<-eBayes(fit)
ids = rownames(E)
symbol <- mget(ids, illuminaMousev2BeadIDSYMBOL, ifnotfound = NA)
sum(sapply(symbol, length) > 1)
symbol <- as.character(symbol)
length(which(symbol=="NA"))
ensembl = mget(ids, illuminaMousev2BeadIDENSEMBL, ifnotfound = NA)
length(which(is.na(ensembl)))
sum(sapply(ensembl, length) > 1)
crosshyb <- which(( sapply(ensembl, length) ) > 1)
length(crosshyb)
ensembl[crosshyb] <- NA
ensembl <- as.character(ensembl)
ensembl[ensembl=="NA"] = NA
length(which(is.na(ensembl)))
ensmart <- useMart("ensembl", dataset="mmusculus_gene_ensembl")
filters <- "ensembl_gene_id"
values <- ensembl[!is.na(ensembl)]
attributes <- c("ensembl_gene_id", "chromosome_name","start_position", "end_position", "strand", "description")
ens.anno <- getBM(filters=filters, values=values, attributes=attributes, mart=ensmart)
rownames(ens.anno)<-ens.anno[,1]
anno <- data.frame(
ID = as.character(ids),
EnsemblID = ensembl,
symbol=symbol,
ens.anno[ensembl,],
stringsAsFactors=F
)
rownames(anno) <- anno[,"ID"]
ebFit$genes = anno
write.fit(ebFit, file="limma_ebfit.csv", adjust="BH")
data<-read.table("limma_ebfit.csv", sep="\t", header=T)
new.data<- topTable(ebFit, number=nrow(E))
rownames(new.data)<-new.data$ID
new.data<-new.data[order(new.data[,"P.Value"]),]
write.csv(new.data,"limma_results.csv",row.names=F)
|
brmsfit <- function(formula = NULL, family = "", link = "", data.name = "",
data = data.frame(), model = "", exclude = NULL,
prior = list(), ranef = NULL, autocor = NULL,
partial = NULL, cov.ranef = NULL, fit = NA,
algorithm = "sampling") {
# brmsfit class
x <- nlist(formula, family, link, data.name, data, model, exclude, prior,
ranef, autocor, partial, cov.ranef, fit, algorithm)
class(x) <- "brmsfit"
x
}
brmssummary <- function(formula = NULL, family = "", link = "",
data.name = "", group = NULL, nobs = NULL,
ngrps = NULL, chains = 1, iter = 2000,
warmup = 500, thin = 1, sampler = "",
autocor = NULL, fixed = NULL, random = list(),
cor_pars = NULL, spec_pars = NULL,
mult_pars = NULL, WAIC = "Not computed",
algorithm = "sampling") {
# brmssummary class
x <- nlist(formula, family, link, data.name, group, nobs, ngrps, chains,
iter, warmup, thin, sampler, autocor, fixed,
random, cor_pars, spec_pars, mult_pars, WAIC, algorithm)
class(x) <- "brmssummary"
x
}
#' Extract variance and correlation components
#'
#' This function calculates the estimated standard deviations,
#' correlations and covariances of the random-effects terms
#' in a mixed-effects model of class \code{brmsfit}.
#' For linear models, the residual standard deviations,
#' correlations and covariances are also returned.
#'
#' @aliases VarCorr.brmsfit
#'
#' @param x An object usually of class \code{brmsift}.
#' @param estimate A character vector specifying which coefficients
#' (e.g., "mean", "median", "sd", or "quantile")
#' should be calculated for the random effects.
#' @param as.list logical; Indicates if covariance
#' and correlation matrices should be returned as
#' lists of matrices (the default), or as 3-dimensional arrays.
#' We recommend not to set \code{as.list} to \code{FALSE}.
#' @param ... Further arguments to be passed to the functions
#' specified in \code{estimate}
#'
#'
#' @return An object of class \code{brmsVarCorr},
#' which is a list of lists (one per grouping factor),
#' each containing 3 elements: a matrix containing the standard deviations,
#' a list of correlation matrices, and a list of covariance matrices.
#' Can be coerced to a \code{data.frame} by using the \code{as.data.frame} method.
#'
#' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com}
#'
#' @examples
#' \dontrun{
#' fit <- brm(count ~ log_Age_c + log_Base4_c * Trt_c + (1+Trt_c|visit),
#' data = epilepsy, family = "poisson", chains = 1)
#' ## return the means of random effects covariances
#' (vc <- VarCorr(fit))
#' as.data.frame(vc)
#'
#' ## return 2.5% and 97.5% quantiles of random effects covariances
#' VarCorr(fit, estimate = "quantile", probs = c(0.025, 0.975))
#' }
#'
#' @export
VarCorr <- function(x, ...)
UseMethod("VarCorr")
#' Non-linear hypothesis testing
#'
#' Perform non-linear hypothesis testing of fixed effects parameters
#'
#' @aliases hypothesis.brmsfit
#'
#' @param x An \code{R} object typically of class \code{brmsfit}
#' @param hypothesis A character vector specifying one or more
#' non-linear hypothesis concerning parameters of the model
#' @param class A string specifying the class of parameters being tested.
#' Default is "b" for fixed effects.
#' Other typical options are "sd" or "cor".
#' If \code{class = NULL}, all parameters can be tested
#' against each other, but have to be specified with their full name
#' (see also \code{\link[brms:parnames]{parnames}})
#' @param group Name of a grouping factor to evaluate only
#' random effects parameters related to this grouping factor.
#' Ignored if \code{class} is not \code{"sd"} or \code{"cor"}.
#' @param alpha the alpha-level of the tests (default is 0.05)
#' @param ignore_prior A flag indicating if prior distributions
#' should also be plotted. Only used if priors were specified on
#' the relevant parameters.
#' @inheritParams plot.brmsfit
#' @param ... Currently ignored
#'
#' @details Among others, \code{hypothesis} computes an
#' evidence ratio for each hypothesis.
#' For a directed hypothesis, this is just the posterior probability
#' under the hypothesis against its alternative.
#' For an undirected (i.e. point) hypothesis the evidence ratio
#' is a Bayes factor between the hypothesis and its alternative.
#' In order to calculate this Bayes factor, all parameters related
#' to the hypothesis must have proper priors
#' and argument \code{sample.prior} of function \code{brm}
#' must be set to \code{TRUE}.
#' When interpreting Bayes factors, make sure
#' that your priors are reasonable and carefully chosen,
#' as the result will depend heavily on the priors.
#' It particular, avoid using default priors.
#'
#' @return Summary statistics of the posterior distributions
#' related to the hypotheses.
#'
#' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com}
#'
#' @examples
#' \dontrun{
#' ## define priors
#' prior <- c(set_prior("normal(0,2)", class = "b"),
#' set_prior("student_t(10,0,1)", class = "sigma"),
#' set_prior("student_t(10,0,1)", class = "sd"))
#'
#' ## fit a linear mixed effects models
#' fit <- brm(rating ~ treat + period + carry + (1+treat|subject),
#' data = inhaler, family = gaussian(),
#' prior = prior, sample.prior = TRUE)
#'
#' ## perform two-sided hypothesis testing
#' (hyp1 <- hypothesis(fit, "treat = period + carry"))
#' plot(hyp1)
#' hypothesis(fit, "exp(treat) - 3 = 0", alpha = 0.01)
#'
#' ## perform one-sided hypothesis testing
#' hypothesis(fit, "period + carry - 3 < 0")
#'
## compare random effects standard deviations
#' hypothesis(fit, "treat < Intercept",
#' class = "sd", group = "subject")
#'
#' ## test the amount of random intercept variance on all variance
#' h <- paste("sd_subject_Intercept^2 / (sd_subject_Intercept^2 +",
#' "sd_subject_treat^2 + sigma_rating^2) = 0")
#' (hyp2 <- hypothesis(fit, h, class = NULL))
#' plot(hyp2)
#'
#' ## test more than one hypothesis at once
#' (hyp3 <- hypothesis(fit, c("treat = period + carry",
#' "2 * period - treat = 0")))
#' plot(hyp3, ignore_prior = TRUE)
#' }
#'
#' @export
hypothesis <- function(x, hypothesis, ...)
UseMethod("hypothesis")
#' Extract posterior samples
#'
#' Extract posterior samples of specified parameters
#'
#' @aliases posterior.samples posterior_samples.brmsfit posterior.samples.brmsfit
#'
#' @param x An \code{R} object typically of class \code{brmsfit}
#' @param pars Names of parameters for which posterior samples
#' should be returned, as given by a character vector or regular expressions.
#' By default, all posterior samples of all parameters are extracted
#' @param parameters A deprecated alias of \code{pars}
#' @param exact_match Indicates whether parameter names
#' should be matched exactly or treated as regular expression.
#' Default is \code{FALSE}.
#' @param add_chains A flag indicating if the returned data.frame
#' should contain information on the chains
#' @param subset A numeric vector indicating the rows
#' (i.e., posterior samples) to be returned.
#' If \code{NULL} (the default), all posterior samples are returned.
#' @param as.matrix Should the output be a \code{matrix}
#' instead of a \code{data.frame}? Defaults to \code{FALSE}
#' @param ... additional arguments
#'
#' @details Currently there are methods for \code{brmsfit} objects.
#' @return A data frame containing the posterior samples,
#' with one column per parameter.
#'
#' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com}
#'
#' @examples
#' \dontrun{
#' fit <- brm(rating ~ treat + period + carry + (1|subject),
#' data = inhaler, family = "cumulative")
#'
#' #extract posterior samples of fixed effects
#' samples1 <- posterior_samples(fit, "^b")
#' head(samples1)
#'
#' #extract posterior samples of standard deviations of random effects
#' samples2 <- posterior_samples(fit, "^sd")
#' head(samples2)
#' }
#'
#' @export
posterior_samples <- function(x, pars = NA, ...)
UseMethod("posterior_samples")
# deprecated alias of posterior_samples
#' @export
posterior.samples <- function(x, pars = NA, ...)
UseMethod("posterior_samples")
#' Extract prior samples
#'
#' Extract prior samples of specified parameters
#'
#' @aliases prior_samples.brmsfit
#'
#' @param x An \code{R} object typically of class \code{brmsfit}
#' @param pars Names of parameters for which prior samples should be returned,
#' as given by a character vector or regular expressions.
#' By default, all prior samples are extracted
#' @param parameters A deprecated alias of \code{pars}
#' @param ... Currently ignored
#'
#' @details To make use of this function,
#' the model must contain samples of prior distributions.
#' This can be ensured by setting \code{sample.prior = TRUE}
#' in function \code{brm}.
#' Currently there are methods for \code{brmsfit} objects.
#' @return A data frame containing the prior samples.
#'
#' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com}
#'
#' @examples
#' \dontrun{
#' fit <- brm(rating ~ treat + period + carry + (1|subject),
#' data = inhaler, family = "cumulative",
#' prior = set_prior("normal(0,2)", class = "b"),
#' sample.prior = TRUE)
#'
#' #extract all prior samples
#' samples1 <- prior_samples(fit)
#' head(samples1)
#'
#' #extract prior samples for the fixed effect of \code{treat}.
#' samples2 <- posterior_samples(fit, "b_treat")
#' head(samples2)
#' }
#'
#' @export
prior_samples <- function(x, pars = NA, ...)
UseMethod("prior_samples")
#' Extract Parameter Names
#'
#' Extract all parameter names of a given model.
#'
#' @aliases par.names parnames.brmsfit par.names.brmsfit
#'
#' @param x An \code{R} object
#' @param ... Further arguments passed to or from other methods
#'
#' @details Currently there are methods for \code{brmsfit}
#' and \code{formula} objects.
#' @return A character vector containing the parameter names of the model.
#'
#' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com}
#'
#' @export
parnames <- function(x, ...)
UseMethod("parnames")
# deprecated alias of parnames
#' @export
par.names <- function(x, ...)
UseMethod("parnames")
#' Compute the WAIC
#'
#' Compute the Watanabe-Akaike Information Criterion
#' based on the posterior likelihood by using the \pkg{loo} package
#'
#' @aliases WAIC.brmsfit
#'
#' @param x A fitted model object typically of class \code{brmsfit}.
#' @param ... Optionally more fitted model objects.
#' @param compare A flag indicating if the WAICs
#' of the models should be compared to each other.
#'
#' @details When comparing models fitted to the same data,
#' the smaller the WAIC, the better the fit.
#' @return If just one object is provided, an object of class \code{ic}.
#' If multiple objects are provided, an object of class \code{iclist}.
#'
#' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com}
#'
#' @examples
#' \dontrun{
#' #model with fixed effects only
#' fit1 <- brm(rating ~ treat + period + carry,
#' data = inhaler, family = "gaussian")
#' WAIC(fit1)
#'
#' #model with an additional random intercept for subjects
#' fit2 <- brm(rating ~ treat + period + carry + (1|subject),
#' data = inhaler, family = "gaussian")
#' #compare both models
#' WAIC(fit1, fit2)
#' }
#'
#' @references
#' Vehtari, A., Gelman, A., and Gabry, J. (2015).
#' Efficient implementation of leave-one-out cross-validation
#' and WAIC for evaluating fitted Bayesian models.
#'
#' Gelman, A., Hwang, J., & Vehtari, A. (2014).
#' Understanding predictive information criteria for Bayesian models.
#' Statistics and Computing, 24, 997-1016.
#'
#' Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation
#' and widely applicable information criterion in singular learning theory.
#' The Journal of Machine Learning Research, 11, 3571-3594.
#'
#' @export
WAIC <- function(x, ..., compare = TRUE)
UseMethod("WAIC")
#' Compute LOO
#'
#' Compute Leave-one-out cross-validation based on the posterior likelihood
#' by using the \pkg{loo} package
#'
#' @aliases LOO.brmsfit
#'
#' @inheritParams WAIC
#' @param cores The number of cores to use for parallelization.
#' This can be set for an entire R session
#' by \code{options(loo.cores = NUMBER)}.
#' The default is \code{\link[parallel:detectCores]{detectCores()}}.
#' @param wcp,wtrunc Parameters used for
#' the Pareto smoothed importance sampling.
#' See \code{\link[loo:loo]{loo}} for details.
#'
#' @details When comparing models fitted to the same data,
#' the smaller the LOO, the better the fit.
#' @return If just one object is provided, an object of class \code{ic}.
#' If multiple objects are provided, an object of class \code{iclist}.
#'
#' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com}
#'
#' @examples
#' \dontrun{
#' #model with fixed effects only
#' fit1 <- brm(rating ~ treat + period + carry,
#' data = inhaler, family = "gaussian")
#' LOO(fit1)
#'
#' #model with an additional random intercept for subjects
#' fit2 <- brm(rating ~ treat + period + carry + (1|subject),
#' data = inhaler, family = "gaussian")
#' #compare both models
#' LOO(fit1, fit2)
#' }
#'
#' @references
#' Vehtari, A., Gelman, A., and Gabry, J. (2015).
#' Efficient implementation of leave-one-out cross-validation
#' and WAIC for evaluating fitted Bayesian models.
#'
#' Gelman, A., Hwang, J., & Vehtari, A. (2014).
#' Understanding predictive information criteria for Bayesian models.
#' Statistics and Computing, 24, 997-1016.
#'
#' Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation
#' and widely applicable information criterion in singular learning theory.
#' The Journal of Machine Learning Research, 11, 3571-3594.
#'
#' @export
LOO <- function(x, ..., compare = TRUE)
UseMethod("LOO")
#' Interface to \pkg{shinystan}
#'
#' Provide an interface to \pkg{shinystan} for models fitted with \pkg{brms}
#'
#' @aliases launch_shiny.brmsfit
#'
#' @param x A fitted model object typically of class \code{brmsfit}.
#' @param rstudio Only relevant for RStudio users.
#' The default (\code{rstudio=FALSE}) is to launch the app
#' in the default web browser rather than RStudio's pop-up Viewer.
#' Users can change the default to \code{TRUE}
#' by setting the global option \cr \code{options(shinystan.rstudio = TRUE)}.
#' @param ... Optional arguments to pass to \code{\link[shiny:runApp]{runApp}}
#'
#' @return An S4 shinystan object
#'
#' @examples
#' \dontrun{
#' fit <- brm(rating ~ treat + period + carry + (1|subject),
#' data = inhaler, family = "gaussian")
#' launch_shiny(fit)
#' }
#'
#' @seealso \code{\link[shinystan:launch_shinystan]{launch_shinystan}}
#'
#' @export
launch_shiny <- function(x, rstudio = getOption("shinystan.rstudio"), ...)
UseMethod("launch_shiny")
#' Extract Stan Model Code
#'
#' Extract the model code in Stan language
#'
#' @aliases stancode.brmsfit
#'
#' @param object An object of class \code{brmsfit}
#' @param ... Currently ignored
#'
#' @return model code in stan language for further processing.
#'
#' @export
stancode <- function(object, ...)
UseMethod("stancode")
#' Extract Data passed to Stan
#'
#' Extract all data that was used by Stan to fit the model
#'
#' @aliases standata.brmsfit
#'
#' @param object An object of class \code{brmsfit}
#' @param ... Currently ignored
#'
#' @return A named list containing the data passed to Stan
#'
#' @export
standata <- function(object, ...)
UseMethod("standata")
#' Various Plotting Functions implemented in \pkg{rstan}
#'
#' Conveniant way to call plotting functions
#' implemented in the \pkg{rstan} package.
#'
#' @inheritParams posterior_samples
#' @param object An R object typically of class \code{brmsfit}
#' @param pars Names of parameters to be plotted,
#' as given by a character vector or regular expressions.
#' By default, the first 10 parameters are plotted.
#' @param type The type of the plot.
#' Supported types are (as names) \code{plot},
#' \code{trace}, \code{hist}, \code{dens}, \code{scat},
#' \code{diag}, \code{rhat}, \code{ess}, \code{mcse}, \code{ac}.
#' For an overview on the various plot types see
#' \code{\link[rstan:plotting-functions]{plotting-functions}}.
#' @param quiet A flag indicating whether messages
#' produced by \pkg{ggplot2} during the plotting process
#' should be silenced. Default is \code{FALSE}.
#' @param ... Additional arguments passed to the plotting functions.
#'
#' @return A \code{\link[ggplot2:ggplot]{ggplot}} object
#' that can be further customized using the \pkg{ggplot2} package.
#'
#' @details Instead of using \code{stanplot(<brmsfit-object>)},
#' the plotting functions can be called directly
#' via \code{stan_<plot-type>(<brmsfit-object>$fit)}.
#' For more details on the plotting functions see
#' \code{\link[rstan:stan_plot]{Plots}} as well as
#' \code{\link[rstan:stan_diag]{Diagnostic plots}}.
#' Note that the plotting functions themselves
#' only accept full parameter names,
#' while \code{stanplot} allows for partial matching
#' and regular expressions.
#' You should also consider using
#' the \pkg{shinystan} package available via method
#' \code{\link[brms:launch_shiny]{launch_shiny}}
#' in \pkg{brms} for flexible and interactive visual analysis.
#'
#' @examples
#' \dontrun{
#' model <- brm(count ~ log_Age_c + log_Base4_c * Trt_c
#' + (1|patient) + (1|visit),
#' data = epilepsy, family = "poisson")
#' # plot 95% CIs
#' stanplot(model, type = "plot", ci_level = 0.95)
#' # equivalent to
#' stan_plot(model$fit, ci_level = 0.95)
#'
#' # only show fixed effects in the plots
#' # this will not work when calling stan_plot directly
#' stanplot(model, pars = "^b", type = "plot", ci_level = 0.95)
#'
#' # plot some diagnostics on the sampler
#' stanplot(model, type = "diag")
#' # equivalent to
#' stan_diag(model$fit)
#' }
#'
#' @export
stanplot <- function(object, pars, ...)
UseMethod("stanplot")
|
/R/generics.R
|
no_license
|
paulhendricks/brms
|
R
| false
| false
| 18,655
|
r
|
brmsfit <- function(formula = NULL, family = "", link = "", data.name = "",
data = data.frame(), model = "", exclude = NULL,
prior = list(), ranef = NULL, autocor = NULL,
partial = NULL, cov.ranef = NULL, fit = NA,
algorithm = "sampling") {
# brmsfit class
x <- nlist(formula, family, link, data.name, data, model, exclude, prior,
ranef, autocor, partial, cov.ranef, fit, algorithm)
class(x) <- "brmsfit"
x
}
brmssummary <- function(formula = NULL, family = "", link = "",
data.name = "", group = NULL, nobs = NULL,
ngrps = NULL, chains = 1, iter = 2000,
warmup = 500, thin = 1, sampler = "",
autocor = NULL, fixed = NULL, random = list(),
cor_pars = NULL, spec_pars = NULL,
mult_pars = NULL, WAIC = "Not computed",
algorithm = "sampling") {
# brmssummary class
x <- nlist(formula, family, link, data.name, group, nobs, ngrps, chains,
iter, warmup, thin, sampler, autocor, fixed,
random, cor_pars, spec_pars, mult_pars, WAIC, algorithm)
class(x) <- "brmssummary"
x
}
#' Extract variance and correlation components
#'
#' This function calculates the estimated standard deviations,
#' correlations and covariances of the random-effects terms
#' in a mixed-effects model of class \code{brmsfit}.
#' For linear models, the residual standard deviations,
#' correlations and covariances are also returned.
#'
#' @aliases VarCorr.brmsfit
#'
#' @param x An object usually of class \code{brmsift}.
#' @param estimate A character vector specifying which coefficients
#' (e.g., "mean", "median", "sd", or "quantile")
#' should be calculated for the random effects.
#' @param as.list logical; Indicates if covariance
#' and correlation matrices should be returned as
#' lists of matrices (the default), or as 3-dimensional arrays.
#' We recommend not to set \code{as.list} to \code{FALSE}.
#' @param ... Further arguments to be passed to the functions
#' specified in \code{estimate}
#'
#'
#' @return An object of class \code{brmsVarCorr},
#' which is a list of lists (one per grouping factor),
#' each containing 3 elements: a matrix containing the standard deviations,
#' a list of correlation matrices, and a list of covariance matrices.
#' Can be coerced to a \code{data.frame} by using the \code{as.data.frame} method.
#'
#' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com}
#'
#' @examples
#' \dontrun{
#' fit <- brm(count ~ log_Age_c + log_Base4_c * Trt_c + (1+Trt_c|visit),
#' data = epilepsy, family = "poisson", chains = 1)
#' ## return the means of random effects covariances
#' (vc <- VarCorr(fit))
#' as.data.frame(vc)
#'
#' ## return 2.5% and 97.5% quantiles of random effects covariances
#' VarCorr(fit, estimate = "quantile", probs = c(0.025, 0.975))
#' }
#'
#' @export
VarCorr <- function(x, ...)
UseMethod("VarCorr")
#' Non-linear hypothesis testing
#'
#' Perform non-linear hypothesis testing of fixed effects parameters
#'
#' @aliases hypothesis.brmsfit
#'
#' @param x An \code{R} object typically of class \code{brmsfit}
#' @param hypothesis A character vector specifying one or more
#' non-linear hypothesis concerning parameters of the model
#' @param class A string specifying the class of parameters being tested.
#' Default is "b" for fixed effects.
#' Other typical options are "sd" or "cor".
#' If \code{class = NULL}, all parameters can be tested
#' against each other, but have to be specified with their full name
#' (see also \code{\link[brms:parnames]{parnames}})
#' @param group Name of a grouping factor to evaluate only
#' random effects parameters related to this grouping factor.
#' Ignored if \code{class} is not \code{"sd"} or \code{"cor"}.
#' @param alpha the alpha-level of the tests (default is 0.05)
#' @param ignore_prior A flag indicating if prior distributions
#' should also be plotted. Only used if priors were specified on
#' the relevant parameters.
#' @inheritParams plot.brmsfit
#' @param ... Currently ignored
#'
#' @details Among others, \code{hypothesis} computes an
#' evidence ratio for each hypothesis.
#' For a directed hypothesis, this is just the posterior probability
#' under the hypothesis against its alternative.
#' For an undirected (i.e. point) hypothesis the evidence ratio
#' is a Bayes factor between the hypothesis and its alternative.
#' In order to calculate this Bayes factor, all parameters related
#' to the hypothesis must have proper priors
#' and argument \code{sample.prior} of function \code{brm}
#' must be set to \code{TRUE}.
#' When interpreting Bayes factors, make sure
#' that your priors are reasonable and carefully chosen,
#' as the result will depend heavily on the priors.
#' It particular, avoid using default priors.
#'
#' @return Summary statistics of the posterior distributions
#' related to the hypotheses.
#'
#' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com}
#'
#' @examples
#' \dontrun{
#' ## define priors
#' prior <- c(set_prior("normal(0,2)", class = "b"),
#' set_prior("student_t(10,0,1)", class = "sigma"),
#' set_prior("student_t(10,0,1)", class = "sd"))
#'
#' ## fit a linear mixed effects models
#' fit <- brm(rating ~ treat + period + carry + (1+treat|subject),
#' data = inhaler, family = gaussian(),
#' prior = prior, sample.prior = TRUE)
#'
#' ## perform two-sided hypothesis testing
#' (hyp1 <- hypothesis(fit, "treat = period + carry"))
#' plot(hyp1)
#' hypothesis(fit, "exp(treat) - 3 = 0", alpha = 0.01)
#'
#' ## perform one-sided hypothesis testing
#' hypothesis(fit, "period + carry - 3 < 0")
#'
## compare random effects standard deviations
#' hypothesis(fit, "treat < Intercept",
#' class = "sd", group = "subject")
#'
#' ## test the amount of random intercept variance on all variance
#' h <- paste("sd_subject_Intercept^2 / (sd_subject_Intercept^2 +",
#' "sd_subject_treat^2 + sigma_rating^2) = 0")
#' (hyp2 <- hypothesis(fit, h, class = NULL))
#' plot(hyp2)
#'
#' ## test more than one hypothesis at once
#' (hyp3 <- hypothesis(fit, c("treat = period + carry",
#' "2 * period - treat = 0")))
#' plot(hyp3, ignore_prior = TRUE)
#' }
#'
#' @export
hypothesis <- function(x, hypothesis, ...)
UseMethod("hypothesis")
#' Extract posterior samples
#'
#' Extract posterior samples of specified parameters
#'
#' @aliases posterior.samples posterior_samples.brmsfit posterior.samples.brmsfit
#'
#' @param x An \code{R} object typically of class \code{brmsfit}
#' @param pars Names of parameters for which posterior samples
#' should be returned, as given by a character vector or regular expressions.
#' By default, all posterior samples of all parameters are extracted
#' @param parameters A deprecated alias of \code{pars}
#' @param exact_match Indicates whether parameter names
#' should be matched exactly or treated as regular expression.
#' Default is \code{FALSE}.
#' @param add_chains A flag indicating if the returned data.frame
#' should contain information on the chains
#' @param subset A numeric vector indicating the rows
#' (i.e., posterior samples) to be returned.
#' If \code{NULL} (the default), all posterior samples are returned.
#' @param as.matrix Should the output be a \code{matrix}
#' instead of a \code{data.frame}? Defaults to \code{FALSE}
#' @param ... additional arguments
#'
#' @details Currently there are methods for \code{brmsfit} objects.
#' @return A data frame containing the posterior samples,
#' with one column per parameter.
#'
#' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com}
#'
#' @examples
#' \dontrun{
#' fit <- brm(rating ~ treat + period + carry + (1|subject),
#' data = inhaler, family = "cumulative")
#'
#' #extract posterior samples of fixed effects
#' samples1 <- posterior_samples(fit, "^b")
#' head(samples1)
#'
#' #extract posterior samples of standard deviations of random effects
#' samples2 <- posterior_samples(fit, "^sd")
#' head(samples2)
#' }
#'
#' @export
posterior_samples <- function(x, pars = NA, ...)
UseMethod("posterior_samples")
# deprecated alias of posterior_samples
#' @export
posterior.samples <- function(x, pars = NA, ...)
UseMethod("posterior_samples")
#' Extract prior samples
#'
#' Extract prior samples of specified parameters
#'
#' @aliases prior_samples.brmsfit
#'
#' @param x An \code{R} object typically of class \code{brmsfit}
#' @param pars Names of parameters for which prior samples should be returned,
#' as given by a character vector or regular expressions.
#' By default, all prior samples are extracted
#' @param parameters A deprecated alias of \code{pars}
#' @param ... Currently ignored
#'
#' @details To make use of this function,
#' the model must contain samples of prior distributions.
#' This can be ensured by setting \code{sample.prior = TRUE}
#' in function \code{brm}.
#' Currently there are methods for \code{brmsfit} objects.
#' @return A data frame containing the prior samples.
#'
#' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com}
#'
#' @examples
#' \dontrun{
#' fit <- brm(rating ~ treat + period + carry + (1|subject),
#' data = inhaler, family = "cumulative",
#' prior = set_prior("normal(0,2)", class = "b"),
#' sample.prior = TRUE)
#'
#' #extract all prior samples
#' samples1 <- prior_samples(fit)
#' head(samples1)
#'
#' #extract prior samples for the fixed effect of \code{treat}.
#' samples2 <- posterior_samples(fit, "b_treat")
#' head(samples2)
#' }
#'
#' @export
prior_samples <- function(x, pars = NA, ...)
UseMethod("prior_samples")
#' Extract Parameter Names
#'
#' Extract all parameter names of a given model.
#'
#' @aliases par.names parnames.brmsfit par.names.brmsfit
#'
#' @param x An \code{R} object
#' @param ... Further arguments passed to or from other methods
#'
#' @details Currently there are methods for \code{brmsfit}
#' and \code{formula} objects.
#' @return A character vector containing the parameter names of the model.
#'
#' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com}
#'
#' @export
parnames <- function(x, ...)
UseMethod("parnames")
# deprecated alias of parnames
#' @export
par.names <- function(x, ...)
UseMethod("parnames")
#' Compute the WAIC
#'
#' Compute the Watanabe-Akaike Information Criterion
#' based on the posterior likelihood by using the \pkg{loo} package
#'
#' @aliases WAIC.brmsfit
#'
#' @param x A fitted model object typically of class \code{brmsfit}.
#' @param ... Optionally more fitted model objects.
#' @param compare A flag indicating if the WAICs
#' of the models should be compared to each other.
#'
#' @details When comparing models fitted to the same data,
#' the smaller the WAIC, the better the fit.
#' @return If just one object is provided, an object of class \code{ic}.
#' If multiple objects are provided, an object of class \code{iclist}.
#'
#' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com}
#'
#' @examples
#' \dontrun{
#' #model with fixed effects only
#' fit1 <- brm(rating ~ treat + period + carry,
#' data = inhaler, family = "gaussian")
#' WAIC(fit1)
#'
#' #model with an additional random intercept for subjects
#' fit2 <- brm(rating ~ treat + period + carry + (1|subject),
#' data = inhaler, family = "gaussian")
#' #compare both models
#' WAIC(fit1, fit2)
#' }
#'
#' @references
#' Vehtari, A., Gelman, A., and Gabry, J. (2015).
#' Efficient implementation of leave-one-out cross-validation
#' and WAIC for evaluating fitted Bayesian models.
#'
#' Gelman, A., Hwang, J., & Vehtari, A. (2014).
#' Understanding predictive information criteria for Bayesian models.
#' Statistics and Computing, 24, 997-1016.
#'
#' Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation
#' and widely applicable information criterion in singular learning theory.
#' The Journal of Machine Learning Research, 11, 3571-3594.
#'
#' @export
WAIC <- function(x, ..., compare = TRUE)
UseMethod("WAIC")
#' Compute LOO
#'
#' Compute Leave-one-out cross-validation based on the posterior likelihood
#' by using the \pkg{loo} package
#'
#' @aliases LOO.brmsfit
#'
#' @inheritParams WAIC
#' @param cores The number of cores to use for parallelization.
#' This can be set for an entire R session
#' by \code{options(loo.cores = NUMBER)}.
#' The default is \code{\link[parallel:detectCores]{detectCores()}}.
#' @param wcp,wtrunc Parameters used for
#' the Pareto smoothed importance sampling.
#' See \code{\link[loo:loo]{loo}} for details.
#'
#' @details When comparing models fitted to the same data,
#' the smaller the LOO, the better the fit.
#' @return If just one object is provided, an object of class \code{ic}.
#' If multiple objects are provided, an object of class \code{iclist}.
#'
#' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com}
#'
#' @examples
#' \dontrun{
#' #model with fixed effects only
#' fit1 <- brm(rating ~ treat + period + carry,
#' data = inhaler, family = "gaussian")
#' LOO(fit1)
#'
#' #model with an additional random intercept for subjects
#' fit2 <- brm(rating ~ treat + period + carry + (1|subject),
#' data = inhaler, family = "gaussian")
#' #compare both models
#' LOO(fit1, fit2)
#' }
#'
#' @references
#' Vehtari, A., Gelman, A., and Gabry, J. (2015).
#' Efficient implementation of leave-one-out cross-validation
#' and WAIC for evaluating fitted Bayesian models.
#'
#' Gelman, A., Hwang, J., & Vehtari, A. (2014).
#' Understanding predictive information criteria for Bayesian models.
#' Statistics and Computing, 24, 997-1016.
#'
#' Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation
#' and widely applicable information criterion in singular learning theory.
#' The Journal of Machine Learning Research, 11, 3571-3594.
#'
#' @export
LOO <- function(x, ..., compare = TRUE)
UseMethod("LOO")
#' Interface to \pkg{shinystan}
#'
#' Provide an interface to \pkg{shinystan} for models fitted with \pkg{brms}
#'
#' @aliases launch_shiny.brmsfit
#'
#' @param x A fitted model object typically of class \code{brmsfit}.
#' @param rstudio Only relevant for RStudio users.
#' The default (\code{rstudio=FALSE}) is to launch the app
#' in the default web browser rather than RStudio's pop-up Viewer.
#' Users can change the default to \code{TRUE}
#' by setting the global option \cr \code{options(shinystan.rstudio = TRUE)}.
#' @param ... Optional arguments to pass to \code{\link[shiny:runApp]{runApp}}
#'
#' @return An S4 shinystan object
#'
#' @examples
#' \dontrun{
#' fit <- brm(rating ~ treat + period + carry + (1|subject),
#' data = inhaler, family = "gaussian")
#' launch_shiny(fit)
#' }
#'
#' @seealso \code{\link[shinystan:launch_shinystan]{launch_shinystan}}
#'
#' @export
launch_shiny <- function(x, rstudio = getOption("shinystan.rstudio"), ...)
UseMethod("launch_shiny")
#' Extract Stan Model Code
#'
#' Extract the model code in Stan language
#'
#' @aliases stancode.brmsfit
#'
#' @param object An object of class \code{brmsfit}
#' @param ... Currently ignored
#'
#' @return model code in stan language for further processing.
#'
#' @export
stancode <- function(object, ...)
UseMethod("stancode")
#' Extract Data passed to Stan
#'
#' Extract all data that was used by Stan to fit the model
#'
#' @aliases standata.brmsfit
#'
#' @param object An object of class \code{brmsfit}
#' @param ... Currently ignored
#'
#' @return A named list containing the data passed to Stan
#'
#' @export
standata <- function(object, ...)
UseMethod("standata")
#' Various Plotting Functions implemented in \pkg{rstan}
#'
#' Conveniant way to call plotting functions
#' implemented in the \pkg{rstan} package.
#'
#' @inheritParams posterior_samples
#' @param object An R object typically of class \code{brmsfit}
#' @param pars Names of parameters to be plotted,
#' as given by a character vector or regular expressions.
#' By default, the first 10 parameters are plotted.
#' @param type The type of the plot.
#' Supported types are (as names) \code{plot},
#' \code{trace}, \code{hist}, \code{dens}, \code{scat},
#' \code{diag}, \code{rhat}, \code{ess}, \code{mcse}, \code{ac}.
#' For an overview on the various plot types see
#' \code{\link[rstan:plotting-functions]{plotting-functions}}.
#' @param quiet A flag indicating whether messages
#' produced by \pkg{ggplot2} during the plotting process
#' should be silenced. Default is \code{FALSE}.
#' @param ... Additional arguments passed to the plotting functions.
#'
#' @return A \code{\link[ggplot2:ggplot]{ggplot}} object
#' that can be further customized using the \pkg{ggplot2} package.
#'
#' @details Instead of using \code{stanplot(<brmsfit-object>)},
#' the plotting functions can be called directly
#' via \code{stan_<plot-type>(<brmsfit-object>$fit)}.
#' For more details on the plotting functions see
#' \code{\link[rstan:stan_plot]{Plots}} as well as
#' \code{\link[rstan:stan_diag]{Diagnostic plots}}.
#' Note that the plotting functions themselves
#' only accept full parameter names,
#' while \code{stanplot} allows for partial matching
#' and regular expressions.
#' You should also consider using
#' the \pkg{shinystan} package available via method
#' \code{\link[brms:launch_shiny]{launch_shiny}}
#' in \pkg{brms} for flexible and interactive visual analysis.
#'
#' @examples
#' \dontrun{
#' model <- brm(count ~ log_Age_c + log_Base4_c * Trt_c
#' + (1|patient) + (1|visit),
#' data = epilepsy, family = "poisson")
#' # plot 95% CIs
#' stanplot(model, type = "plot", ci_level = 0.95)
#' # equivalent to
#' stan_plot(model$fit, ci_level = 0.95)
#'
#' # only show fixed effects in the plots
#' # this will not work when calling stan_plot directly
#' stanplot(model, pars = "^b", type = "plot", ci_level = 0.95)
#'
#' # plot some diagnostics on the sampler
#' stanplot(model, type = "diag")
#' # equivalent to
#' stan_diag(model$fit)
#' }
#'
#' @export
stanplot <- function(object, pars, ...)
UseMethod("stanplot")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/batchFunc.R
\name{batchDepth}
\alias{batchDepth}
\title{Calulate depth.}
\usage{
batchDepth(bamFiles, bedFile, outDir, thread, mapqFilter)
}
\arguments{
\item{bamFiles}{A character string of BAM file paths seperated by comma(,).}
\item{bedFile}{A character string of BED file path.}
\item{outDir}{Path of directory to write to.}
\item{thread}{Integer, number of thread.}
\item{mapqFilter}{A numeric value, least map quality of reads to calculate.}
}
\description{
Calulate depth.
}
|
/man/batchDepth.Rd
|
no_license
|
sanadamakomi/exonCNV
|
R
| false
| true
| 564
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/batchFunc.R
\name{batchDepth}
\alias{batchDepth}
\title{Calulate depth.}
\usage{
batchDepth(bamFiles, bedFile, outDir, thread, mapqFilter)
}
\arguments{
\item{bamFiles}{A character string of BAM file paths seperated by comma(,).}
\item{bedFile}{A character string of BED file path.}
\item{outDir}{Path of directory to write to.}
\item{thread}{Integer, number of thread.}
\item{mapqFilter}{A numeric value, least map quality of reads to calculate.}
}
\description{
Calulate depth.
}
|
n=3
png('./figures/F6_com_allacts_types_hd.png', width=1200*n, height=900*n, res=96)
par(oma=c(6,5,22,4),
par(mar=c(2,1,1,1)),
bg=background.color,
bty='n',
family='Montserrat')
cdirs$cdirs<-as.numeric(as.character(cdirs$cdirs))
### main plot with dots
plot(xlim=c(1,18), ylim=c(0,900), NULL, axes=F, xlab="", ylab="", main='', cex.main=1.4, col.main=dark.color, font.main=2)
axis (2, font=1, tck=0.1, col=background.color, col.axis=dark.color, at=seq(0,900,100),line=-0.5, labels=seq(0,900,100), cex.axis=1.2*n)
abline(h=seq(0,900,100), col='grey')
abline(v=c(8.5 ,18.5), col='grey')
axis (1, font=1, tck=-0.1, col=background.color, col.axis=dark.color, at=1:18,line=-1.5, labels=regs$year.sem, cex.axis=1*n)
#lines (x=1:30, y = dirs$dirs, lwd=3, col=main.color3)
rect (xleft =seq(0.6, 17.6,1), xright =seq(1.4, 18.4,1), ybottom=rep(0,18), ytop=regs$regs, border=main.color2, col=main.color2)
rect (xleft =seq(0.68, 17.68,1), xright =seq(1.32, 18.32,1), ybottom=rep(0,18), ytop=regsi$regsi, border=main.color3, col=main.color3)
rect (xleft =seq(0.68, 17.68,1), xright =seq(1.32, 18.32,1), ybottom=rep(0,18), ytop=regsd$regsd, border=main.color3, col=main.color)
lines (x=1:8, y=rep(mean(regs$regs[1:8]),8), lwd=8, lty=2, col=main.color2)
lines (x=9:18, y=rep(mean(regs$regs[9:18]),10), lwd=8, lty=2, col=main.color2)
lines (x=1:8, y=rep(mean(regsi$regsi[1:8]),8), lwd=8, lty=2, col=main.color3)
lines (x=9:18, y=rep(mean(regsi$regsi[9:18]),10), lwd=8, lty=2, col=main.color3)
lines (x=1:8, y=rep(mean(regsd$regsd[1:8]),8), lwd=8, lty=2, col=main.color)
lines (x=9:18, y=rep(mean(regsd$regsd[9:18]),10), lwd=8, lty=2, col=main.color)
#title
mtext(expression(bold('Legislative Productivity of the European Union, 2004-2019: Types of acts')),
line=5*n+2, font=1, at=0.025, col=dark.color, cex=1.8*n, adj=0, padj=1, outer=T)
mtext(expression(phantom ('Red bars ') * ' show the number of all legislative acts adopted by the Commission. ' * phantom (' Green bars ') * 'show only'),
line=2.5*n+2, font=3, at=0.025, col=dark.color, cex=1.2*n, adj=0, padj=1, outer=T)
mtext(expression('Red bars ' * phantom (' show the number of all legislative acts adopted by the Commission. Green bars show only')),
line=2.5*n+2, font=3, at=0.025, col=main.color2, cex=1.2*n, adj=0, padj=1, outer=T)
mtext(expression(phantom ('Red bars show the number of all legislative acts adopted by the Commission. ') * ' Green bars ' * phantom('show only')),
line=2.5*n+2, font=3, at=0.025, col=main.color, cex=1.2*n, adj=0, padj=1, outer=T)
mtext(expression('delegated ones and ' * phantom (' blue bars') * ' add implementing ones. The ' * phantom (' ----- ') * ' lines show the averages per EP term.'),
line=1*n+2, font=3, at=0.025, col=dark.color, cex=1.2*n, adj=0, padj=1, outer=T)
mtext(expression(phantom('delegated ones and blue bars add implementing ones. The ') * ' ----- ' * phantom(' lines show the averages per EP term.')),
line=1*n+2, font=3, at=0.025, col=main.color2, cex=1.2*n, adj=0, padj=1, outer=T)
mtext(expression(phantom('delegated ones and ') * ' blue bars' * phantom (' add implementing ones. The ----- lines show the averages per EP term.')),
line=1*n+2, font=3, at=0.025, col=main.color3, cex=1.2*n, adj=0, padj=1, outer=T)
#signature
mtext(text=expression("- dimiter " * phantom(".eu") * " -"),
side=1, line=2, outer=T, font=1, family='Cairo', col=dark.color, cex=1*n, adj=1, padj=1)
mtext(text=expression(phantom("- dimiter ") * ".eu" * phantom(" -")),
side=1, line=2, outer=T, font=1, family='Cairo', col=main.color3, cex=1*n, adj=1, padj=1)
#data
mtext(text=expression("Data: " * phantom(" Own extraction from EUR-Lex")),
side=1, line=2, outer=T, font=1, family='Cairo', col=dark.color, cex=1*n, adj=0, padj=1)
mtext(text=expression(phantom("Data: ") * " Own extraction from EUR-Lex"),
side=1, line=2, outer=T, font=1, family='Cairo', col=main.color3, cex=1*n, adj=0, padj=1)
text('Eighth European Parliament', x=14, y=895, cex=1.2*n, col=dark.color, font=3)
text('Seventh European Parliament', x=4, y=895, cex=1.2*n, col=dark.color, font=3)
dev.off()
|
/scripts/05_6 viz com all acts types hd.R
|
no_license
|
demetriodor/eur-lex
|
R
| false
| false
| 4,221
|
r
|
n=3
png('./figures/F6_com_allacts_types_hd.png', width=1200*n, height=900*n, res=96)
par(oma=c(6,5,22,4),
par(mar=c(2,1,1,1)),
bg=background.color,
bty='n',
family='Montserrat')
cdirs$cdirs<-as.numeric(as.character(cdirs$cdirs))
### main plot with dots
plot(xlim=c(1,18), ylim=c(0,900), NULL, axes=F, xlab="", ylab="", main='', cex.main=1.4, col.main=dark.color, font.main=2)
axis (2, font=1, tck=0.1, col=background.color, col.axis=dark.color, at=seq(0,900,100),line=-0.5, labels=seq(0,900,100), cex.axis=1.2*n)
abline(h=seq(0,900,100), col='grey')
abline(v=c(8.5 ,18.5), col='grey')
axis (1, font=1, tck=-0.1, col=background.color, col.axis=dark.color, at=1:18,line=-1.5, labels=regs$year.sem, cex.axis=1*n)
#lines (x=1:30, y = dirs$dirs, lwd=3, col=main.color3)
rect (xleft =seq(0.6, 17.6,1), xright =seq(1.4, 18.4,1), ybottom=rep(0,18), ytop=regs$regs, border=main.color2, col=main.color2)
rect (xleft =seq(0.68, 17.68,1), xright =seq(1.32, 18.32,1), ybottom=rep(0,18), ytop=regsi$regsi, border=main.color3, col=main.color3)
rect (xleft =seq(0.68, 17.68,1), xright =seq(1.32, 18.32,1), ybottom=rep(0,18), ytop=regsd$regsd, border=main.color3, col=main.color)
lines (x=1:8, y=rep(mean(regs$regs[1:8]),8), lwd=8, lty=2, col=main.color2)
lines (x=9:18, y=rep(mean(regs$regs[9:18]),10), lwd=8, lty=2, col=main.color2)
lines (x=1:8, y=rep(mean(regsi$regsi[1:8]),8), lwd=8, lty=2, col=main.color3)
lines (x=9:18, y=rep(mean(regsi$regsi[9:18]),10), lwd=8, lty=2, col=main.color3)
lines (x=1:8, y=rep(mean(regsd$regsd[1:8]),8), lwd=8, lty=2, col=main.color)
lines (x=9:18, y=rep(mean(regsd$regsd[9:18]),10), lwd=8, lty=2, col=main.color)
#title
mtext(expression(bold('Legislative Productivity of the European Union, 2004-2019: Types of acts')),
line=5*n+2, font=1, at=0.025, col=dark.color, cex=1.8*n, adj=0, padj=1, outer=T)
mtext(expression(phantom ('Red bars ') * ' show the number of all legislative acts adopted by the Commission. ' * phantom (' Green bars ') * 'show only'),
line=2.5*n+2, font=3, at=0.025, col=dark.color, cex=1.2*n, adj=0, padj=1, outer=T)
mtext(expression('Red bars ' * phantom (' show the number of all legislative acts adopted by the Commission. Green bars show only')),
line=2.5*n+2, font=3, at=0.025, col=main.color2, cex=1.2*n, adj=0, padj=1, outer=T)
mtext(expression(phantom ('Red bars show the number of all legislative acts adopted by the Commission. ') * ' Green bars ' * phantom('show only')),
line=2.5*n+2, font=3, at=0.025, col=main.color, cex=1.2*n, adj=0, padj=1, outer=T)
mtext(expression('delegated ones and ' * phantom (' blue bars') * ' add implementing ones. The ' * phantom (' ----- ') * ' lines show the averages per EP term.'),
line=1*n+2, font=3, at=0.025, col=dark.color, cex=1.2*n, adj=0, padj=1, outer=T)
mtext(expression(phantom('delegated ones and blue bars add implementing ones. The ') * ' ----- ' * phantom(' lines show the averages per EP term.')),
line=1*n+2, font=3, at=0.025, col=main.color2, cex=1.2*n, adj=0, padj=1, outer=T)
mtext(expression(phantom('delegated ones and ') * ' blue bars' * phantom (' add implementing ones. The ----- lines show the averages per EP term.')),
line=1*n+2, font=3, at=0.025, col=main.color3, cex=1.2*n, adj=0, padj=1, outer=T)
#signature
mtext(text=expression("- dimiter " * phantom(".eu") * " -"),
side=1, line=2, outer=T, font=1, family='Cairo', col=dark.color, cex=1*n, adj=1, padj=1)
mtext(text=expression(phantom("- dimiter ") * ".eu" * phantom(" -")),
side=1, line=2, outer=T, font=1, family='Cairo', col=main.color3, cex=1*n, adj=1, padj=1)
#data
mtext(text=expression("Data: " * phantom(" Own extraction from EUR-Lex")),
side=1, line=2, outer=T, font=1, family='Cairo', col=dark.color, cex=1*n, adj=0, padj=1)
mtext(text=expression(phantom("Data: ") * " Own extraction from EUR-Lex"),
side=1, line=2, outer=T, font=1, family='Cairo', col=main.color3, cex=1*n, adj=0, padj=1)
text('Eighth European Parliament', x=14, y=895, cex=1.2*n, col=dark.color, font=3)
text('Seventh European Parliament', x=4, y=895, cex=1.2*n, col=dark.color, font=3)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bin2factor.R
\name{step_bin2factor}
\alias{step_bin2factor}
\alias{tidy.step_bin2factor}
\title{Create a Factors from A Dummy Variable}
\usage{
step_bin2factor(recipe, ..., role = NA, trained = FALSE,
levels = c("yes", "no"), ref_first = TRUE, columns = NULL,
skip = FALSE, id = rand_id("bin2factor"))
\method{tidy}{step_bin2factor}(x, ...)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the
sequence of operations for this recipe.}
\item{...}{Selector functions that choose which variables will
be converted. See \code{\link[=selections]{selections()}} for more details. For
the \code{tidy} method, these are not currently used.}
\item{role}{Not used by this step since no new variables are
created.}
\item{trained}{A logical to indicate if the quantities for
preprocessing have been estimated.}
\item{levels}{A length 2 character string that indicate the
factor levels for the 1's (in the first position) and the zeros
(second)}
\item{ref_first}{Logical. Should the first level, which replaces
1's, be the factor reference level?}
\item{columns}{A vector with the selected variable names. This
is \code{NULL} until computed by \code{\link[=prep.recipe]{prep.recipe()}}.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[=bake.recipe]{bake.recipe()}}? While all operations are baked
when \code{\link[=prep.recipe]{prep.recipe()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations}
\item{id}{A character string that is unique to this step to identify it.}
\item{x}{A \code{step_bin2factor} object.}
}
\value{
An updated version of \code{recipe} with the new step
added to the sequence of existing steps (if any). For the
\code{tidy} method, a tibble with columns \code{terms} (the
columns that will be affected).
}
\description{
\code{step_bin2factor} creates a \emph{specification} of a
recipe step that will create a two-level factor from a single
dummy variable.
}
\details{
This operation may be useful for situations where a
binary piece of information may need to be represented as
categorical instead of numeric. For example, naive Bayes models
would do better to have factor predictors so that the binomial
distribution is modeled in stead of a Gaussian probability
density of numeric binary data. Note that the numeric data is
only verified to be numeric (and does not count levels).
}
\examples{
data(covers)
rec <- recipe(~ description, covers) \%>\%
step_regex(description, pattern = "(rock|stony)", result = "rocks") \%>\%
step_regex(description, pattern = "(rock|stony)", result = "more_rocks") \%>\%
step_bin2factor(rocks)
tidy(rec, number = 3)
rec <- prep(rec, training = covers)
results <- bake(rec, new_data = covers)
table(results$rocks, results$more_rocks)
tidy(rec, number = 3)
}
\concept{preprocessing dummy_variables factors}
\keyword{datagen}
|
/man/step_bin2factor.Rd
|
no_license
|
Athospd/recipes
|
R
| false
| true
| 3,104
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bin2factor.R
\name{step_bin2factor}
\alias{step_bin2factor}
\alias{tidy.step_bin2factor}
\title{Create a Factors from A Dummy Variable}
\usage{
step_bin2factor(recipe, ..., role = NA, trained = FALSE,
levels = c("yes", "no"), ref_first = TRUE, columns = NULL,
skip = FALSE, id = rand_id("bin2factor"))
\method{tidy}{step_bin2factor}(x, ...)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the
sequence of operations for this recipe.}
\item{...}{Selector functions that choose which variables will
be converted. See \code{\link[=selections]{selections()}} for more details. For
the \code{tidy} method, these are not currently used.}
\item{role}{Not used by this step since no new variables are
created.}
\item{trained}{A logical to indicate if the quantities for
preprocessing have been estimated.}
\item{levels}{A length 2 character string that indicate the
factor levels for the 1's (in the first position) and the zeros
(second)}
\item{ref_first}{Logical. Should the first level, which replaces
1's, be the factor reference level?}
\item{columns}{A vector with the selected variable names. This
is \code{NULL} until computed by \code{\link[=prep.recipe]{prep.recipe()}}.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[=bake.recipe]{bake.recipe()}}? While all operations are baked
when \code{\link[=prep.recipe]{prep.recipe()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations}
\item{id}{A character string that is unique to this step to identify it.}
\item{x}{A \code{step_bin2factor} object.}
}
\value{
An updated version of \code{recipe} with the new step
added to the sequence of existing steps (if any). For the
\code{tidy} method, a tibble with columns \code{terms} (the
columns that will be affected).
}
\description{
\code{step_bin2factor} creates a \emph{specification} of a
recipe step that will create a two-level factor from a single
dummy variable.
}
\details{
This operation may be useful for situations where a
binary piece of information may need to be represented as
categorical instead of numeric. For example, naive Bayes models
would do better to have factor predictors so that the binomial
distribution is modeled in stead of a Gaussian probability
density of numeric binary data. Note that the numeric data is
only verified to be numeric (and does not count levels).
}
\examples{
data(covers)
rec <- recipe(~ description, covers) \%>\%
step_regex(description, pattern = "(rock|stony)", result = "rocks") \%>\%
step_regex(description, pattern = "(rock|stony)", result = "more_rocks") \%>\%
step_bin2factor(rocks)
tidy(rec, number = 3)
rec <- prep(rec, training = covers)
results <- bake(rec, new_data = covers)
table(results$rocks, results$more_rocks)
tidy(rec, number = 3)
}
\concept{preprocessing dummy_variables factors}
\keyword{datagen}
|
require(shiny)
require(shinyjs)
require(shinyWidgets)
require(shinyFiles)
require(shinyBS)
linebreaks <- function(n){HTML(strrep(br(), n))}
shinyUI(fluidPage(theme = "bootstrap.css",
useShinyjs(),
titlePanel("Welcome to the Stock Synthesis data-limited tool (SS-DL tool)"),
h4(p(strong("This tool uses the Stock Synthesis framework to implement a ",tags$a(href="javascript:window.open('SS-DL-approaches.html', '_blank','width=600,height=400')", "variety of types"), "of models."))),
h5(p("Any suggested changes or requests? Please submit an issue with the recommendation" ,tags$a(href="https://github.com/shcaba/SS-DL-tool/issues", "here"))),
br(),
sidebarLayout(
sidebarPanel(
style = "position:fixed;width:30%;height: 90vh; overflow-y: scroll;",
shinyjs::hidden(wellPanel(id="Data_panel",
h4(strong("Choose data file")),
fluidRow(column(width=12,fileInput('file2', 'Catch time series',
accept = c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'.csv'
)
))),
fluidRow(column(width=12,fileInput('file1', 'Length composition',
accept = c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'.csv'
)
))),
fluidRow(column(width=12,fileInput('file3', 'Age composition',
accept = c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'.csv'
)
))),
#Mute for now, pull back in when index methods are ready
fileInput('file4', 'Abundance index',
accept = c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'.csv'
)
),
h4(strong("Clear data files")),
fluidRow(column(width=3,actionButton("reset_ct", "Catches")),
column(width=3,actionButton("reset_lt", "Length")),
column(width=3,actionButton("reset_age", "Ages")),
column(width=3,actionButton("reset_index", "Index"))),
)
),
shinyjs::hidden(wellPanel(id="Existing_files",
fluidRow(column(width=10,checkboxInput("user_model","Use existing model files?",FALSE))),
h5(em("Do not use this option with catch only models.")),
h5(em("Make sure the model run is in the Scenarios folder. Put that folder name in the Scenario name input and run the model.")),
h5(em("Using an existing model allows you to do run complex and custom model runs outside the options of the SS-DL tool, but still use some of the quick features.")),
h5(em("Examples are applying jitters or any of the additional SS options that do not modify the data or control files.")),
h5(em("To do sensitivity runs for pre-existing models, make a copy of the folder and re-name it, then make desired changes in the data and/or control files.")),
)
),
shinyjs::hidden(wellPanel(id="panel_Ct_F_LO",
h4(strong("Use constant catch or estimate fishing mortality directly?")),
h5(em("Using constant catch assumes the same catch in all years in order to fit the length composition data (similar to LBSPR, but the model integrates the fit of each year, not each year separately)")),
h5(em("It provides a long-term average response (F) to estimating stock status, and thus useful with non-continous years of sampling.")),
h5(em("Fishing rate can also be directly estimated from the length composition with no assumption in catches (similar to the LIME approach).")),
h5(em("This approach is a more variable reponse to estimating stock status, and is best used with continuous years of contemporary data. Recruitment can also be estimated.")),
h5(em("Recruitment can also be estimated in both approaches.")),
fluidRow(column(width=10,selectInput("Ct_F_LO_select","Approach",c("Choose an option","Constant Catch","Estimate F")))),
)
),
shinyjs::hidden(wellPanel(id="panel_ct_wt_LO",
h4(strong("Weight fleet lengths by relative catch")),
h5(em("The relative catch contribution needs specification with multiple length-only fleets")),
h5(em("Example: Two fleets, with fleet 2 catching 2 times the amount as fleet 1, the entry would be 1,2.")),
h5(em("Each entry will be relative to the highest value.")),
fluidRow(column(width=10,textInput("Wt_fleet_Ct","Relative catch values",value=""))),
)
),
shinyjs::hidden(wellPanel(id="panel_data_wt_lt",
h4(strong("Data-weighting")),
h5(em("Data weighting balances information content of biological data with model structure")),
h5(em("Data weighting balances across factors (e.g, fleets, sex, etc.)")),
h5(em("The default value is equally weighting among fleets based on input effective sample size inputs")),
h5(em("If using an existing model, chose 'None' to maintain the same weighting as the existing model")),
# fluidRow(column(width=10,prettyCheckbox(
# inputId = "dirichlet",
# label = "Use Dirichlet weighting?",
# value=FALSE,
# shape="curve",
# icon = icon("check"),
# animation="smooth"),
# bigger=TRUE),
# fill=TRUE),
awesomeRadio(
inputId = "Data_wt",
label = "Choose data-weighting option",
choices = c("None","Dirichlet", "Francis", "McAllister-Ianelli"),
selected = "None",
status = "warning"
)
# fluidRow(column(width=6, prettyCheckbox(
# inputId = "dirichlet",
# label = "Use Dirichlet weighting?",
# shape = "round",
# outline = TRUE,
# status = "info"))),
# fluidRow(column(width=6, prettyCheckbox(
# inputId = "Francis_wt",
# label = "Use Francis weighting?",
# shape = "round",
# outline = TRUE,
# status = "info"))),
# fluidRow(column(width=6, prettyCheckbox(
# inputId = "MI_wt",
# label = "Use McAllister and Ianelli?",
# shape = "round",
# outline = TRUE,
# status = "info"))),
# h5(em("After the first run, you can check the Francis or harmonic mean methods for suggested weightings")),
# fluidRow(column(width=6,textInput("Lt_datawts", "Lengths weights by fleet", value=""))),
)
),
shinyjs::hidden(wellPanel(id="panel_SSLO_LH",
h4(strong("Life history inputs")),
wellPanel(id="panel_SSLO_fixed",
h5(em("Female")),
fluidRow(column(width=6,numericInput("M_f", "Natural mortality", value=NA,min=0, max=10000, step=0.00001))),
fluidRow(column(width=6,numericInput("Linf_f", "Asymptotic size (Linf)", value=NA,min=0, max=10000, step=0.001)),
column(width=6,numericInput("k_f","Growth coefficient k", value=NA,min=0, max=10000, step=0.00001))),
fluidRow(column(width=6,numericInput("t0_f","Age at length 0 (t0)", value=NA,min=-100, max=10000, step=0.001)),
column(width=6,textInput("CV_lt_f","CV at length (young then old)", value="0.1,0.1"))),
fluidRow(column(width=6,numericInput("L50_f", "Length at 50% maturity", value=NA,min=0, max=10000, step=0.001)),
column(width=6,numericInput("L95_f","Length at 95% maturity", value=NA,min=0, max=10000, step=0.001))),
#h5("Fecundity-length relationship F=aL^b. Set equal to the W-L relationship to get fecundity equivalent to spawning biomass"),
#fluidRow(column(width=6,numericInput("Fec_a_f", "Coefficient a", value=0.00001,min=0, max=10000, step=-0.01)),
# column(width=6,numericInput("Fec_b_f","Exponent b", value=3,min=0, max=10000, step=0.01))),
# fluidRow(column(width=6,numericInput("WLa_f", "Weight-Length alpha", value=0.00001,min=0, max=10000, step=0.000000001)),
# column(width=6,numericInput("WLb_f","Weight-length beta", value=3,min=0, max=10000, step=0.01))),
),
fluidRow(column(width=6,checkboxInput("male_parms","Males specific values?",FALSE)),
column(width=6,checkboxInput("male_offset","Males offset from females (log(m/f)?",FALSE))),
# fluidRow(column(width=10,prettyCheckbox("male_parms","Males specific values?",
# value=FALSE,
# shape="curve",
# icon = icon("check"),
# animation="smooth"),
# bigger=TRUE),
# fill=TRUE,
# status="default"),
#fluidRow(column(width=7, h3("Males specific values?")),column(width=2,checkboxInput("male_parms","Males specific values?",FALSE,width="150%"))),
wellPanel(
uiOutput("Male_parms_inputs_label"),
uiOutput("Male_parms_inputs1"),
uiOutput("Male_parms_inputs2"),
uiOutput("Male_parms_inputs3")
# uiOutput("Male_parms_inputs4")
),
)
),
shinyjs::hidden(wellPanel(id="panel_SS_LH_fixed_est_tog",
fluidRow(column(width=10,switchInput("est_parms","Estimate parameters?",
value=FALSE,
onLabel = "YES",
offLabel = "NO",
onStatus = "success",
offStatus = "danger"))),
h5("Parameters can either be fixed (i.e., set to specific value) or estimated."),
h5("Estimating parameters is a data-based approach to determining life history values"),
h5("Estimating parameters can also propagate parameter uncertainty into derived model outputs"),
br(),
h5("When estimating parameters with catch and length (SS-CL) models consider:"),
tags$ul(tags$li(h5(p("It is recommended to run the model first by fixing parameters.")))),
tags$ul(tags$li(h5(p("Then run likelihood profiles to see if the data contain any information on parameters.")))),
tags$ul(tags$li(h5(p("Parameters that seem informed by the data (i.e., result in realistic values) are good candidates for estimation.")))),
tags$ul(tags$li(h5(p("The most likely parameters to have information from fishery-based lengths are Linf and M.")))),
h5("Not all parameters need be estimated. Fix parameters by turning the phase negative (e.g., -1)"),
h5(p("Natural mortality is an often difficult value to obtain. Consider using",tags$a(href="https://connect.fisheries.noaa.gov/natural-mortality-tool/", "The Natural Mortality Tool"), "to either obtain natural mortality values or developing a prior for use in estimating natural mortality. The Github repository for it can be found",tags$a(href="https://github.com/shcaba/Natural-Mortality-Tool", "here"),".")),
br(),
h5("Load life history values instead of inputting them?"),
uiOutput("LH_load_file"),
)),
shinyjs::hidden(wellPanel(id="panel_SS_LH_fixed",
h4(strong("Life history inputs")),
wellPanel(id="panel_SS_fixed",
h5(em("Female")),
fluidRow(column(width=6,numericInput("M_f_fix", "Natural mortality", value=NA,min=0, max=10000, step=0.00001))),
fluidRow(column(width=6,numericInput("Linf_f_fix", "Asymptotic size (Linf)", value=NA,min=0, max=10000, step=0.001)),
column(width=6,numericInput("k_f_fix","Growth coefficient k", value=NA,min=0, max=10000, step=0.00001))),
fluidRow(column(width=6,numericInput("t0_f_fix","Age at length 0 (t0)", value=NA,min=0, max=10000, step=0.001)),
column(width=6,textInput("CV_lt_f_fix","CV at length (young then old)", value="0.1,0.1"))),
fluidRow(column(width=6,numericInput("L50_f_fix", "Length at 50% maturity", value=NA,min=0, max=10000, step=0.001)),
column(width=6,numericInput("L95_f_fix","Length at 95% maturity", value=NA,min=0, max=10000, step=0.001))),
h5("Length-weight relationship W=aL^b. Weight is in kg and length in cm."),
fluidRow(column(width=6,numericInput("WLa_f_fix", "a in W=aL^b", value=0.00001,min=0, max=10000, step=0.000000001)),
column(width=6,numericInput("WLb_f_fix","b in W=aL^b", value=3,min=0, max=10000, step=0.0001))),
h5("Length-fecundity relationship F=aL^b. Fecundity is measured in number of eggs or pups. Set equal to the length-weight relationship to get fecundity equivalent to spawning biomass."),
fluidRow(column(width=6,numericInput("Fec_a_f_fix", "a in F=aL^b", value=0.00001,min=0, max=10000, step=0.000000001)),
column(width=6,numericInput("Fec_b_f_fix","b in F=aL^b", value=3,min=0, max=10000, step=0.0001))),
),
fluidRow(column(width=10,checkboxInput("male_parms_fix","Males specific values?",FALSE)),
column(width=10,checkboxInput("male_offset_fix","Males offset from females (log(m/f)?",FALSE))),
#fluidRow(column(width=7, h3("Males specific values?")),column(width=2,checkboxInput("male_parms","Males specific values?",FALSE,width="150%"))),
wellPanel(
uiOutput("Male_parms_inputs_label_fix"),
uiOutput("Male_parms_inputs1_fix"),
uiOutput("Male_parms_inputs2_fix"),
uiOutput("Male_parms_inputs3_fix"),
uiOutput("Male_parms_inputs4_fix")
),
)
),
shinyjs::hidden(wellPanel(id="panel_SS_LH_est",
h4(strong("Life history inputs")),
# fluidRow(column(width=10,switchInput("est_parms2","Estimate parameters?",value=TRUE))),
wellPanel(id="panel_SS_est",
h4(em("Female")),
dropdownButton(
selectInput("M_f_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("M_f_mean", "Mean", value=NA,min=0, max=10000, step=0.00001),
numericInput("M_f_SD", "SD", value=0,min=0, max=10000, step=0.00001),
numericInput("M_f_phase", "Phase", value=-1,min=-999, max=10, step=1),
circle = FALSE, right=TRUE, status = "danger", icon = icon("skull-crossbones"), width = "300px",label="Natural mortality"
),
br(),
h5(strong("Growth")),
dropdownButton(
selectInput("Linf_f_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("Linf_f_mean", "Mean", value=NA,min=0, max=10000, step=0.001),
numericInput("Linf_f_SD", "SD", value=0,min=0, max=10000, step=0.0001),
numericInput("Linf_f_phase", "Phase", value=-1,min=-999, max=10, step=1),
circle = FALSE, right=TRUE, status = "danger", icon = icon("infinity"), width = "300px",label="Linf: Asymptotic size"
),
br(),
dropdownButton(
selectInput("k_f_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("k_f_mean", "Mean", value=NA,min=0, max=10000, step=0.00001),
numericInput("k_f_SD", "SD", value=0,min=0, max=10000, step=0.00001),
numericInput("k_f_phase", "Phase", value=-1,min=-999, max=10, step=1),
circle = FALSE, right=TRUE, status = "danger", icon = icon("ruler-horizontal"), width = "300px",label="k: VB growth coefficient"
),
br(),
dropdownButton(
selectInput("t0_f_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("t0_f_mean", "Mean", value=NA,min=-100, max=10000, step=0.001),
numericInput("t0_f_SD", "SD", value=0,min=0, max=10000, step=0.001),
numericInput("t0_f_phase", "Phase", value=-1,min=-999, max=10, step=1),
circle = FALSE, right=TRUE, status = "danger", icon = icon("baby-carriage"), width = "300px",label="t0: Age at size 0"
),
br(),
dropdownButton(
selectInput("CV_lt_f_young_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("CV_lt_f_young_mean", "Mean", value=0.1,min=0, max=10000, step=0.0001),
numericInput("CV_lt_f_young_SD", "SD", value=0,min=0, max=10000, step=0.0001),
numericInput("CV_lt_f_young_phase", "Phase", value=-1,min=-999, max=10, step=1),
circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length (young)"
),
br(),
dropdownButton(
selectInput("CV_lt_f_old_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("CV_lt_f_old_mean", "Mean", value=0.1,min=0, max=10000, step=0.0001),
numericInput("CV_lt_f_old_SD", "SD", value=0,min=0, max=10000, step=0.0001),
numericInput("CV_lt_f_old_phase", "Phase", value=-1,min=-999, max=10, step=1),
circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length (old)"
),
h5(strong("Maturity and weight-length relationships")),
fluidRow(column(width=6,numericInput("L50_f_est", "Length at 50% maturity", value=NA,min=0, max=10000, step=0.001)),
column(width=6,numericInput("L95_f_est","Length at 95% maturity", value=NA,min=0, max=10000, step=0.001))),
h5("Length-weight relationship W=aL^b. Weight is in kg and length in cm."),
fluidRow(column(width=6,numericInput("WLa_f_est", "a in W=aL^b", value=0.00001,min=0, max=10000, step=0.000000001)),
column(width=6,numericInput("WLb_f_est","b in W=aL^b", value=3,min=0, max=10000, step=0.001))),
h5("Length-fecundity relationship F=aL^b. Fecundity is measured in number of eggs or pups. Set equal to the length-weight relationship to get fecundity equivalent to spawning biomass."),
fluidRow(column(width=6,numericInput("Fec_a_f_est", "a in F=aL^b", value=0.00001,min=0, max=10000, step=0.000000001)),
column(width=6,numericInput("Fec_b_f_est","b in F=aL^b", value=3,min=0, max=10000, step=0.001))),
),
fluidRow(column(width=6,checkboxInput("male_parms_est","Males specific values?",FALSE)),
column(width=6,checkboxInput("male_offset_est","Males offset from females (log(m/f))?",FALSE))),
#fluidRow(column(width=7, h3("Males specific values?")),column(width=2,checkboxInput("male_parms","Males specific values?",FALSE,width="150%"))),
wellPanel(
uiOutput("Male_parms_inputs_label_est"),
uiOutput("Male_parms_inputs_M_est"),
uiOutput("Male_parms_inputs_space1"),
uiOutput("Male_parms_inputs_Growth_label"),
uiOutput("Male_parms_inputs_Linf_est"),
uiOutput("Male_parms_inputs_space2"),
uiOutput("Male_parms_inputs_k_est"),
uiOutput("Male_parms_inputs_space3"),
uiOutput("Male_parms_inputs_t0_est"),
uiOutput("Male_parms_inputs_space4"),
uiOutput("Male_parms_inputs_CV_est_young"),
br(),
uiOutput("Male_parms_inputs_CV_est_old"),
uiOutput("Male_parms_inputs_space5"),
uiOutput("Male_parms_inputs_WL_est")
),
)
),
# shinyjs::hidden(wellPanel(id="panel_SS_est",
# h3("Life history inputs"),
# fluidRow(column(width=10,switchInput("est_parms2","Fix parameters?"))),
# wellPanel(
# h4(em("Female")),
# fluidRow(column(width=6,numericInput("Nages","Max. age", value=NA,min=1, max=1000, step=1))),
# h5(strong("Natural mortality")),
# fluidRow(column(width=4,style='padding:1px;',align="center", selectInput("M_prior","Prior type",c("lognormal","truncated normal","uniform","beta"))),
# column(width=3,style='padding:2px;',align="center",numericInput("M_f_mean", "Mean", value=NA,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',align="center",numericInput("M_f_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("M_f_phase", "Phase", value=-1,min=-999, max=10, step=0.001))),
# h5(strong("Growth")),
# h5(strong("Linf")),
# fluidRow(column(width=4,style='padding:1px;',align="center",selectInput("Linf_f_prior","Prior type",c("lognormal","truncated normal","uniform","beta"))),
# column(width=3,style='padding:2px;',align="center",numericInput("Linf_f_mean", "Mean", value=NA,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',align="center",numericInput("Linf_f_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("Linf_f_phase", "Phase", value=-1,min=-999, max=10, step=0.001))),
# h5(strong("k")),
# fluidRow(column(width=4,style='padding:2px;',selectInput("k_f_prior","Prior type",c("lognormal","truncated normal","uniform","beta"))),
# column(width=3,style='padding:2px;',numericInput("k_f_mean", "Mean", value=NA,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',numericInput("k_f_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("k_f_phase", "Phase", value=-1,min=-999, max=10, step=0.001))),
# h5(strong("t0")),
# fluidRow(column(width=4,style='padding:2px;',selectInput("t0_f_prior","Prior type",c("lognormal","truncated normal","uniform","beta"))),
# column(width=3,style='padding:2px;',numericInput("t0_f_mean", "Mean", value=NA,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',numericInput("t0_f_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("t0_f_phase", "Phase", value=-1,min=-999, max=10, step=0.001))),
# h5(strong("Length CV")),
# fluidRow(column(width=4,style='padding:2px;',selectInput("CV_f_prior","Prior type",c("lognormal","truncated normal","uniform","beta"))),
# column(width=3,style='padding:2px;',numericInput("CV_f_mean", "Mean", value=0.1,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',numericInput("CV_f_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("CV_f_phase", "Phase", value=-1,min=-999, max=10, step=0.001))),
# h5(strong("Maturity and weight-length relationships")),
# fluidRow(column(width=6,numericInput("L50_f", "Length at 50% maturity", value=NA,min=0, max=10000, step=0.01)),
# column(width=6,numericInput("L95_f","Length at 95% maturity", value=NA,min=0, max=10000, step=0.01))),
# fluidRow(column(width=6,numericInput("Fec_a", "Wt-based fec coeff", value=1,min=0, max=10000, step=-0.01)),
# column(width=6,numericInput("Fec_b","Wt-based fec exp", value=1,min=0, max=10000, step=0.01))),
# fluidRow(column(width=6,numericInput("WLa_f", "Weight-Length alpha", value=0.00001,min=0, max=10000, step=0.000000001)),
# column(width=6,numericInput("WLb_f","Weight-length beta", value=3,min=0, max=10000, step=0.01))),
# ),
# fluidRow(column(width=10,checkboxInput("male_parms_est","Males specific values?",FALSE))),
# #fluidRow(column(width=7, h3("Males specific values?")),column(width=2,checkboxInput("male_parms","Males specific values?",FALSE,width="150%"))),
# wellPanel(
# uiOutput("Male_parms_inputs_label_est"),
# uiOutput("Male_parms_inputs1_est"),
# uiOutput("Male_parms_inputs2_est"),
# uiOutput("Male_parms_inputs3_est"),
# uiOutput("Male_parms_inputs4_est")
# ),
# )
# ),
shinyjs::hidden(wellPanel(id="panel_SSS",
h4(strong("Life history inputs")),
h5(em("If using the uniform prior, low and high range go in the mean and SD input, respectively.")),
wellPanel(
h4(em("Female")),
h5(strong("Natural mortality")),
dropdownButton(
selectInput("M_prior_sss","Prior type",c("lognormal","normal","uniform","no prior")),
numericInput("M_f_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.00001),
numericInput("M_f_SD_sss", "SD", value=0.44,min=0, max=10000, step=0.00001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("skull-crossbones"), width = "300px",label="Natural mortality"
),
h5(strong("Growth")),
dropdownButton(
selectInput("Linf_f_prior_sss","Prior type",c("no prior","normal")),
numericInput("Linf_f_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.001),
numericInput("Linf_f_SD_sss", "SD", value=0,min=0, max=10000, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("infinity"), width = "300px",label="Linf: Asymptotic size"
),
br(),
dropdownButton(
selectInput("k_f_prior_sss","Prior type",c("no prior","normal")),
numericInput("k_f_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.00001),
numericInput("k_f_SD_sss", "SD", value=0,min=0, max=10000, step=0.00001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("ruler-horizontal"), width = "300px",label="k: VB growth coefficient"
),
br(),
fluidRow(column(width=6,numericInput("Linf_k_cor_sss", "Correlation between Linf and k", value=-0.9,min=-1, max=1, step=0.001))),
br(),
dropdownButton(
selectInput("t0_f_prior_sss","Prior type",c("no prior","normal")),
numericInput("t0_f_mean_sss", "Mean", value=NA,min=-100, max=10000, step=0.001),
numericInput("t0_f_SD_sss", "SD", value=0,min=0, max=1000, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("baby-carriage"), width = "300px",label="t0: Age at size 0"
),
h5(em("Length CV")),
dropdownButton(
selectInput("CV_lt_f_young_prior_sss","Prior type",c("no prior")),
numericInput("CV_lt_f_young_mean_sss", "Mean", value=0.1,min=0, max=10000, step=0.0001),
numericInput("CV_lt_f_young_SD_sss", "SD", value=0,min=0, max=10000, step=0.0001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length"
),
dropdownButton(
selectInput("CV_lt_f_old_prior_sss","Prior type",c("no prior")),
numericInput("CV_lt_f_old_mean_sss", "Mean", value=0.1,min=0, max=10000, step=0.0001),
numericInput("CV_lt_f_old_SD_sss", "SD", value=0,min=0, max=10000, step=0.0001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length"
),
h5(strong("Maturity and weight-length relationships")),
fluidRow(column(width=6,numericInput("L50_f_sss", "Length at 50% maturity", value=NA,min=0, max=10000, step=0.001)),
column(width=6,numericInput("L95_f_sss","Length at 95% maturity", value=NA,min=0, max=10000, step=0.001))),
h5("Length-weight relationship W=aL^b. Weight is in kg and length in cm."),
fluidRow(column(width=6,numericInput("WLa_f_sss", "a in W=aL^b", value=0.00001,min=0, max=10000, step=0.000000001)),
column(width=6,numericInput("WLb_f_sss","b in W=aL^b", value=3,min=0, max=10000, step=0.001))),
h5("Length-fecundity relationship F=aL^b. Fecundity is measured in number of eggs or pups. Set equal to the length-weight relationship to get fecundity equivalent to spawning biomass."),
fluidRow(column(width=6,numericInput("Fec_a_f_sss", "a in F=aL^b", value=0.00001,min=0, max=10000, step=0.000000001)),
column(width=6,numericInput("Fec_b_f_sss","b in F=aL^b", value=3,min=0, max=10000, step=0.001))),
),
fluidRow(column(width=10,checkboxInput("male_parms_SSS","Males specific values?",FALSE)),
column(width=10,checkboxInput("male_offset_SSS","Males offset to females (log(m/f)?",FALSE))),
#fluidRow(column(width=7, h3("Males specific values?")),column(width=2,checkboxInput("male_parms_SSS","Males specific values?",FALSE,width="150%"))),
wellPanel(
uiOutput("Male_parms_inputs_label_SSS"),
uiOutput("Male_parms_inputs_M_SSS"),
uiOutput("Male_parms_inputs_space1_SSS"),
uiOutput("Male_parms_inputs_Growth_label_SSS"),
uiOutput("Male_parms_inputs_Linf_SSS"),
uiOutput("Male_parms_inputs_space2_SSS"),
uiOutput("Male_parms_inputs_k_SSS"),
uiOutput("Male_parms_inputs_space3_SSS"),
uiOutput("Male_parms_inputs_t0_SSS"),
uiOutput("Male_parms_inputs_space4_SSS"),
uiOutput("Male_parms_inputs_CV_young_SSS"),
uiOutput("Male_parms_inputs_CV_old_SSS"),
uiOutput("Male_parms_inputs_WL_SSS")
),
)
),
#SSS Stock status input
shinyjs::hidden(wellPanel(id="panel_SS_stock_status",
h4(strong("Relative stock status")),
#wellPanel(
fluidRow(column(width=6,numericInput("status_year", "Relative stock status year", value=NA,min=1000, max=3000, step=1))),
dropdownButton(
selectInput("Depl_prior_sss","Prior type",c("beta","lognormal","truncated normal","uniform","no prior")),
numericInput("Depl_mean_sss", "Mean", value=NA,min=0.001, max=1, step=0.001),
numericInput("Depl_SD_sss", "SD", value=0.2,min=0, max=1000, step=0.001),
circle = FALSE, status = "danger", icon = icon("battery-half"), width = "300px",label="Relative Stock Status"
)
)
),
################################
#Stock-recruitment/Productivity#
################################
shinyjs::hidden(wellPanel(id="panel_SSS_prod",
h4(strong("Stock-recruitment parameters")),
br(),
fluidRow(column(width=6,numericInput("lnR0_sss", "Initial recruitment (lnR0)", value=7,min=0.01, max=20, step=0.01))),
dropdownButton(
selectInput("h_prior_sss","Steepness",c("symmetric beta","beta","truncated normal","truncated lognormal","uniform","no prior")),
numericInput("h_mean_sss", "Mean", value=0.7,min=0.2, max=1, step=0.001),
numericInput("h_SD_sss", "SD", value=0.15,min=0, max=10000, step=0.001),
circle = FALSE, status = "danger", icon = icon("recycle"), width = "300px",label="Steepness"
),
)
),
shinyjs::hidden(wellPanel(id="panel_SS_LO_prod",
h4(strong("Stock-recruitment parameters")),
# wellPanel(
fluidRow(column(width=6,numericInput("h_LO","Steepness", value=0.7,min=0.2, max=1, step=0.01))),
# ),
)
),
shinyjs::hidden(wellPanel(id="panel_SS_prod_fixed",
h4(strong("Stock-recruitment parameters")),
# wellPanel(
fluidRow(column(width=6,numericInput("h","Steepness", value=0.7,min=0.2, max=1, step=0.01)),
column(width=6,numericInput("lnR0", "Initial recruitment (lnR0)", value=7,min=0.01, max=20, step=0.01))),
# ),
)
),
shinyjs::hidden(wellPanel(id="panel_SS_prod_est",
h4(strong("Stock-recruitment parameters")),
# wellPanel(
dropdownButton(
selectInput("h_ss_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("h_mean_ss", "Mean", value=0.7,min=0.2, max=1, step=0.001),
numericInput("h_SD_ss", "SD", value=0.15,min=0, max=10000, step=0.001),
numericInput("h_phase", "Phase", value=-1,min=-999, max=10, step=0.001),
circle = FALSE, status = "danger", icon = icon("recycle"), width = "300px",label="Steepness"
),
fluidRow(column(width=6,numericInput("lnR0_est", "Initial recruitment (lnR0)", value=9,min=0, max=20, step=0.01))),
)),
# fluidRow(column(width=4,style='padding:1px;',align="center", selectInput("h_ss_prior","Steepness",c("beta","symmetric beta","truncated normal","trunc lognormal","uniform"))),
# column(width=3,style='padding:2px;',align="center",numericInput("h_mean_ss", "Mean", value=0.7,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',align="center",numericInput("h_SD_ss", "SD", value=0.15,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("h_phase", "Phase", value=-1,min=-999, max=10, step=0.001))),
# fluidRow(column(width=6,numericInput("lnR0_est", "Initial recruitment (lnR0)", value=9,min=0, max=20, step=0.01))),
# # ),
# )
# ),
#Recruitment estimation
shinyjs::hidden(wellPanel(id="panel_SS_recdevs",
fluidRow(column(width=10,checkboxInput("rec_choice","Estimate recruitment?",FALSE))),
wellPanel(
# fluidRow(column(width=8,offset=-10, h3("Estimate recruitment?")),column(width=6,checkboxInput("rec_choice","",FALSE))),
# fluidRow(column(width=8, h3("Estimate recruitment?")),column(width=4,radioButtons("rec_choice","",FALSE))),
tags$style("
.checkbox { /* checkbox is a div class*/
# line-height: 10px;
margin-bottom: 0px;
margin-left: 0px;
font-size: 20px;
}
input[type='checkbox']{ /* style for checkboxes */
width: 20px; /*Desired width*/
height: 20px; /*Desired height*/
line-height: 100px;
# span {
# margin-left: 30px; /*set the margin, so boxes don't overlap labels*/
# line-height: 30px;
}
}"),
uiOutput("Rec_options1"),
uiOutput("Rec_options6"),
uiOutput("Rec_options2"),
fluidRow(column(width=10,checkboxInput("biasC_choice","Bias correct recruitments?",FALSE))),
h5("Years of no bias correction"),
uiOutput("Rec_options3"),
h5("Years of bias correction"),
uiOutput("Rec_options4"),
uiOutput("Rec_options5")
),
)
),
#Selectivity
shinyjs::hidden(wellPanel(id="panel_selectivity_sss",
# wellPanel(
h4(strong("Selectivity")),
h5("Enter parameter values for each fleet and survey."),
h5("Example using 50% selectivity with two fleets: Inputs could be 35,40 for starting values."),
p("If using a mix of logistic and dome-shaped selectivities, select", strong("dome-shaped"),"and use the default values (10000,0.0001,0.9999 for the additonal parameters, respectively) to achieve a logistic shape for any given fleet."),
br(),
fluidRow(selectInput("Sel_choice_sss","Length selectivity type",c("Logistic","Dome-shaped"))),
uiOutput("Sel_parms1_sss"),
uiOutput("Sel_parms2_sss"),
uiOutput("Sel_parms3_sss")
)
),
shinyjs::hidden(wellPanel(id="panel_selectivity",
# wellPanel(
h4(strong("Selectivity")),
h5("Enter parameter and phase values for each fleet and survey."),
h5("Example using 50% selectivity with two fleets: Inputs could be 35,40 and 2,2 for starting values and phases respectively."),
h5("The phase input indicates estimated parameters. To fix the parameter, set the phase value to a negative number."),
p("If using a mix of logistic and dome-shaped selectivities, select", strong("dome-shaped"),"and fix (i.e., use a negative phase) the provided default values (10000,0.0001,0.9999 for the additonal parameters, respectively) to achieve a logistic shape for any given fleet."),
br(),
fluidRow(selectInput("Sel_choice","Length selectivity type",c("Logistic","Dome-shaped"))),
# fluidRow(column(width=6,numericInput("Sel50", "Length at 50% Selectivity", value=NA,min=0, max=10000, step=0.01)),
# column(width=6,numericInput("Sel50_phase","Estimation phase", value=1,min=-1000, max=10, step=1))),
# fluidRow(column(width=6,numericInput("Selpeak", "Length at Peak Selectvity", value=NA,min=0, max=10000, step=0.01)),
# column(width=6,numericInput("Selpeak_phase","Estimation phase", value=1,min=-1000, max=10, step=1))),
#if(input$Sel_choice=="Logistic")
# {uiOutput("Sel_logistic")},
#if(input$Sel_choice=="Dome-shaped")
# {uiOutput("Sel_domed")}
uiOutput("Sel_parms1"),
uiOutput("Sel_parms2"),
uiOutput("Sel_parms3"),
uiOutput("Sel_parms4"),
uiOutput("Sel_parms5")
# fluidRow(checkboxInput("Sex_lt_sel","Sex-specific selectivity?",FALSE)),
# fluidRow(checkboxInput("age_sel_choice","Age-based selectivity?",FALSE))
# ),
)
),
#Jitter inputs
# shinyjs::hidden(wellPanel(id="panel_SS_jitter1",
shinyjs::hidden(wellPanel(id="panel_SS_jitter",
fluidRow(column(width=10,checkboxInput("jitter_choice","Jitter starting values?",FALSE))),
uiOutput("Jitter_value"),
h5("Jittering refers to changing the input starting values."),
h5("Jittering provides a quick way to adjust starting values for two main purposes:"),
tags$ul(tags$li(h5(p("Start the model at different values to assist model convergence.")))),
tags$ul(tags$li(h5(p("Validate global versus local model convergence. This requires running many models at different jittered starting values to make sure a lower minimized likelihood value is not found. If a lower likelihood value is found, that would be considered the best fit model.")))),
h5("Run just 1 jitter value to find a converged model. Then run multiple jittered models to confrim that model is the best fit model."),
)
),
#Reference Points
shinyjs::hidden(wellPanel(id="panel_RPs",
fluidRow(column(width=10,checkboxInput("RP_choices","Define reference points?",FALSE))),
uiOutput("RP_selection1"),
uiOutput("RP_selection2")
)
),
#Forecast options
shinyjs::hidden(wellPanel(id="panel_Forecasts",
fluidRow(column(width=10,checkboxInput("Forecast_choice","Define forecasts?",FALSE))),
uiOutput("Forecasts")
)
),
shinyjs::hidden(wellPanel(id="panel_Mod_dims",
h4(strong("Model dimensions: years and ages")),
h5(p(em("Starting year values based on first year of data inputs"))),
# tags$ul(tags$li(h5(p(em("If catch data is used, starting and ending model years are based on the time series of catch"))))),
# tags$ul(tags$li(h5(p(em("If using only length or age data, starting model year is based on earliest year minus age at 95% Linf"))))),
# h5(p(em("Start year recommendations are:"))),
# tags$ul(tags$li(h5(p(em("If length data only, count the year back from the first year of length data based on maximum age likely contained in length data"))))),
# tags$ul(tags$li(h5(p(em("If using catch data, use the first year of catches"))))),
# h5(p(em(""))),
uiOutput("Model_dims1"),
# uiOutput("Model_dims2"),
)
),
shinyjs::hidden(wellPanel(id="panel_advanced_SS",
h4(strong("Additional SS options")),
h5(p(strong("Additional SS commands can be found ",tags$a(href="javascript:window.open('SS_commands.html', '_blank','width=600,height=400')", " here")))),
#fluidRow(column(width=10,checkboxInput("advance_ss_click","Advanced SS options",FALSE))),
popify(uiOutput("AdvancedSS_nohess"),"Run -nohess option","Turning off the Hessian option skips over asymptotic variance and speeds the model up. Use this option to more quickly explore models. Estimate variance once you are done exploring."),
popify(uiOutput("AdvancedSS_addcomms"),"Custom SS run commands","Click the advanced SS commands link above to get options. One interesting option is -hess_step which attempts to make the model gradient 0. This should be run once a final model is found."),
popify(uiOutput("AdvancedSS_addcomms_comms"),"Custom SS run commands"),
popify(uiOutput("AdvancedSS_noplots"),"Output plots","Diagnostic and results plots are produced by default. This switch turns those plots off in case you want to speed up the model run."),
popify(uiOutput("AdvancedSS_noestabs"),"No Executive Summary tables","Executive Summary tables take time to make, but provide summary tables of the model. When exploring models, it is better to turn this off to speed up the model run."),
popify(uiOutput("AdvancedSS_par"),"Switch to use the ss.par file","The ss.par file contains all parameter values used in the previous model run. It can be handy to run models from the par file to confirm you have reached the best fit model. The par file can also be used to expedite forecasts by turning the maximum phase to zero (see next option) and using the par file."),
popify(uiOutput("AdvancedSS_phase0"),"Maximum phase = 0","Setting maximum phase to 0 turns off all parameter estimation and is useful when forecasting catch into the future. Couple with using the ss.par file (see above option)."),
popify(uiOutput("AdvancedSS_datanew"),"Switch starter file to use the data_echo.ss_new file.","This file is a copy of the data file used in the last run. It has additional notes for inputs, and can be modified to run new scenarios, if desired."),
popify(uiOutput("AdvancedSS_controlnew"),"Switch starter file to use the control.ss_new file.","This file is a copy of the control file used in the previous run, but starting values are the ending values of the previous model. Like the ss.par file, it can be used to run from where the last model finished, but also provides a convenient way to change other parameter specifications."),
popify(uiOutput("AdvancedSS_forecastnew"),"Overwrite the forecast.ss file with the forecast.ss_new file.","This file is a copy of the forecast file used in the previous run. In the event you want to use this file, this switch will overwite the forecast file content with what is in the forecast.ss_new."),
popify(uiOutput("AdvancedSS_Sex3options"),"Sex=3 option for biological compositions","This switch changes the per sex biological compositions (sex = 1 for females and 2 for males) into a two sex length composition that retains the overall biological composition of the sample. This may add additional information on the underlying sex ratio of the population, but should be tested against using the biological compositions by sex. Choose to apply this method to lengths and/or ages."),
fluidRow(column(width=6,uiOutput("AdvancedSS_Sex3")),
column(width=6,uiOutput("AdvancedSS_AgeSex3"))),
#popify(uiOutput("AdvancedSS_Sex3"),"Sex=3 option for lengths","This switch changes the per sex length compositions (sex = 1 for females and 2 for males) into a two sex length composition that retains the overall length composition of the sample. This may add additional information on the underlying sex ratio of the population, but should be tested against using the length compositions by sex."),
#popify(uiOutput("AdvancedSS_AgeSex3"),"Sex=3 option for ages","This switch changes the per sex age compositions (sex = 1 for females and 2 for males) into a two sex length composition that retains the overall length composition of the sample. This may add additional information on the underlying sex ratio of the population, but should be tested against using the age compositions by sex."),
popify(uiOutput("AdvancedSS_Indexvar"),"Added index variance.","The input index variance is often underestimated. This option allows more variance to be added to each index in order to improve model fit. This is a type of weighting option for indices. The added variance is the same for each year of a particular index, but can be different across indices. Beware large index outliers that may overinflate added variances in order to get the model to fit that one data point."),
popify(uiOutput("AdvancedSS_ageerror"),"Ageing error matrices","Add as many custom ageing error matrices as needed. See the folders 'Example data files' < 'ageing error matrices' for examples of the ageing error input"),
uiOutput("AdvancedSS_ageerror_in"),
popify(uiOutput("AdvancedSS_Ctunits"),"Catch units","The default assumption is that catch is in metric tons (option 1), but if they are in numbers, option 2 should be used."),
popify(uiOutput("AdvancedSS_Ctunitsfleets"),"Enter units","1=biomass; 2=numbers. Enter one of these numbers for each removal fleet."),
popify(uiOutput("AdvancedSS_GT1"),"Growth morphs","Stock Synthesis can track multiple growth morphs distributed around a central morph. The default here is tracking 5 morphs, but using only 1 is also common. This switch moves from 5 to 1 morph."),
#uiOutput("AdvancedSS_retro_choice"),
#uiOutput("AdvancedSS_retro_years"),
h5(p("Define modelled length bins. Default values are by 2 cm bin ranging from 4 to 25% above the Linf value. If using conditional age at lengths, length bins must be consistent with these population bins, not the length data bins.")),
h5(p(em("Inputs must be smaller and larger than the length composition bins. Input values will be overridden to meet this requirement"))),
uiOutput("AdvancedSS_Ltbin"),
br(),
h5(p("Define plus group age. Default value is based on the female natural mortality value.")),
uiOutput("AdvancedSS_Nages")
# prettyCheckbox(
# inputId = "no_hess", label = "Turn off Hessian",
# shape = "round", outline = TRUE, status = "info"),
# prettyCheckbox(
# inputId = "no_plots_tables", label = "Turn off plots and tables",
# shape = "round", outline = TRUE, status = "info"),
# prettyCheckbox(
# inputId = "GT1", label = "Use only one growth type",
# shape = "round", outline = TRUE, status = "info"),
# prettyCheckbox(
# inputId = "Sex3", label = "Retain sex ratio in length compositions (Sex = 3)",
# shape = "round", outline = TRUE, status = "info"),
)
),
shinyjs::hidden(wellPanel(id="panel_advanced_user_SS",
h4(strong("Additional SS options")),
#fluidRow(column(width=10,checkboxInput("advance_ss_click","Advanced SS options",FALSE))),
popify(uiOutput("AdvancedSS_nohess_user"),"Run -nohess option","Turning off the Hessian option skips over asymptotic variance and speeds the model up. Use this option to more quickly explore models. Estimate variance once you are done exploring."),
popify(uiOutput("AdvancedSS_addcomms_user"),"Custom SS run commands","Click the advanced SS commands link above to get options. One interesting option is -hess_step which attempts to make the model gradient 0. This should be run once a final model is found."),
popify(uiOutput("AdvancedSS_addcomms_comms_user"),"Custom SS run commands","Click the advanced SS commands link above to get options. One interesting option is -hess_step which attempts to make the model gradient 0. This should be run once a final model is found."),
popify(uiOutput("AdvancedSS_noplots_user"),"Output plots","Diagnostic and results plots are produced by default. This switch turns those plots off in case you want to speed up the model run."),
popify(uiOutput("AdvancedSS_noestabs_user"),"No Executive Summary tables","Executive Summary tables take time to make, but provide summary tables of the model. When exploring models, it is better to turn this off to speed up the model run."),
popify(uiOutput("AdvancedSS_par_user"),"Switch to use the ss.par file","The ss.par file contains all parameter values used in the previous model run. It can be handy to run models from the par file to confirm you have reached the best fit model. The par file can also be used to expedite forecasts by turning the maximum phase to zero (see next option) and using the par file."),
popify(uiOutput("AdvancedSS_phase0_user"),"Maximum phase = 0","Setting maximum phase to 0 turns off all parameter estimation and is useful when forecasting catch into the future. Couple with using the ss.par file (see above option)."),
popify(uiOutput("AdvancedSS_datanew_user"),"Switch starter file to use the data_echo.ss_new file.","This file is a copy of the data file used in the last run. It has additional notes for inputs, and can be modified to run new scenarios, if desired."),
popify(uiOutput("AdvancedSS_controlnew_user"),"Switch starter file to use the control.ss_new file.","This file is a copy of the control file used in the previous run, but starting values are the ending values of the previous model. Like the ss.par file, it can be used to run from where the last model finished, but also provides a convenient way to change other parameter specifications."),
popify(uiOutput("AdvancedSS_forecastnew_user"),"Overwrite the forecast.ss file with the forecast.ss_new file.","This file is a copy of the forecast file used in the previous run. In the event you want to use this file, this switch will overwite the forecast file content with what is in the forecast.ss_new."),
#uiOutput("AdvancedSS_retro_choice_user"),
#uiOutput("AdvancedSS_retro_years_user")
)
),
shinyjs::hidden(wellPanel(id="panel_advanced_SSS",
h4(strong("Additional SS options")),
h5(strong("Choosing catch units")),
h6(strong("Default is biomass (in MT), but click below button to specify for each fleet.")),
#fluidRow(column(width=10,checkboxInput("advance_ss_click","Advanced SS options",FALSE))),
uiOutput("AdvancedSS_Ctunits_SSS"),
uiOutput("AdvancedSS_Ctunitsfleets_SSS"),
h5(strong("Add additional growth platoons?")),
uiOutput("AdvancedSS_GT5_SSS")
)
),
#SSS iterations
shinyjs::hidden(wellPanel(id="panel_SSS_reps",
h4(strong("SSS run specifications")),
fluidRow(column(width=10,numericInput("SSS_reps", "Number of SSS iterations", value=1000,min=1, max=1000000, step=1))),
fluidRow(column(width=10,numericInput("SSS_seed", "Seed number for draws", value=19,min=1, max=1000000, step=1)))
)
),
#wellPanel(
shinyjs::hidden(awesomeRadio(
inputId = "OS_choice",
label = "Which OS?",
choices = c("Windows","Mac","Linux"),
selected = "Windows",
inline=TRUE,
status = "warning")),
#),
shinyjs::hidden(wellPanel(id="Scenario_panel",
h4(strong("Scenario name")),
fluidRow(column(width=8,textInput("Scenario_name", strong("Choose the name of your scenario"), value="Scenario_1"))),
h5(p(em("Each scenario folder is saved. Changing the scenario name therefore creates a new folder of results."))),
h5(p(em("Using different scenario names when changing data or parameter values allows easy sensitivity exploration."))),
h5(p(strong("For Mac and Linux users, do not leave spaces in the Scenario name."))),
br(),
h4(strong("Select a folder to copy results")),
h5(p(em("Results are copied from the 'Scenarios' folder"))),
h5(p(em("Required to access results if using the online version"))),
shinyDirButton(
id="Modelout_dir",
label="Select model folder",
title="Choose folder to copy model scenario"
),
)
),
shinyjs::hidden(actionButton("run_SS",strong("Run Model"),
width="100%",
icon("circle-play"),
style="font-size:120%;border:2px solid;color:#FFFFFF;background:#658D1B")),
shinyjs::hidden(actionButton("run_SSS",strong("Run SSS"),
width="100%",
icon("circle-play"),
style="font-size:120%;border:2px solid;color:#FFFFFF; background:#236192")),
####################
### Other panels ###
####################
########################
### Model efficiency ###
########################
shinyjs::hidden(wellPanel(id="Modeff_panel",
h4(strong("Model efficiency and Bayesian analysis")),
h5(em("Using ",tags$a(href="https://github.com/Cole-Monnahan-NOAA/adnuts", "AD NUTS"),"to improve model efficiency and performance.")),
h5(em("This can also offer speedier Bayesian approaches.")),
br(),
h5(strong("Choose folder of model to evaluate")),
h5(em("")),
shinyDirButton(
"ModEff_dir",
label="Select model folder",
title="Choose folder of model to evaluate"
),
br(),
#uiOutput("ModEff_model_pick"),
br(),
h5(strong("Choose method to use")),
h5(("There are two main Bayesian methods to choose from:")),
tags$ul(tags$li(h5(p(em("Random walk Metropolis (RWM). This method is useful to quickly explore parameter behavior."))))),
tags$ul(tags$li(h5(p(em("No u-turn (Nuts). This method can quickly run Bayesian models once an efficient model is established. No thinning necessary, as that is part of the algorithm"))))),
h5(("The following is a recommended work flow to find and run an efficient Bayesian model:")),
tags$ul(tags$li(h5(p(em("Optimize you model with a short Bayesian run. Click the 'optimize model' button."))))),
tags$ul(tags$li(h5(p(em("Run your optimized model using RWM with 2000 iterations and thin = 10, then use the pairs plots to look for parameters that don't change value across kept draws."))))),
tags$ul(tags$li(h5(p(em("Evaluate the produced pairs plots to look for parameters that don't change value across kept draws."))))),
tags$ul(tags$li(h5(p(em("Parameters that don't move should be fixed in the model, and the model re-optimized."))))),
tags$ul(tags$li(h5(p(em("Run the RWM model again and continue looking for and fixing any non-moving parameters."))))),
br(),
h5(("Once the model specification is finalized, you can")),
tags$ul(tags$li(h5(p(em("Re-run with Hessian (go back to the first tab to re-run model) to get asymptotic variance estimates. "))))),
tags$ul(tags$li(h5(p(em("Re-optimize, then consider using the NUTS option with 1000 iterations or keep running the RWM option until convergence criteria are reached by increasing the thinning value and/or number of iterations. This produces a Bayesian representation of uncertainty."))))),
br(),
h5(("Good model convergence is indicated when:")),
tags$ul(tags$li(h5(p(em("Minimum effective sample size (ESS) > 200"))))),
tags$ul(tags$li(h5(p(em("Rhat <1.1, This measures the ratio of overestimated to underestimated variance."))))),
awesomeCheckbox(
inputId = "Opt_mod",
label = "Optimize model?",
value = TRUE,
status = "danger"
),
h5(("One should include model optimization before running the evaluation methods below if this is the first run of a given model specification, including if parameters have been fixed since the last exploration.")),
br(),
# awesomeCheckbox(
# inputId = "run_stanout",
# label = "Run Stan GUI?",
# value = TRUE,
# status = "danger"
# ),
# br(),
h5(strong("Choose method and evaluation inputs")),
awesomeRadio(
inputId = "ModEff_choice",
label = "",
choices = c("RWM", "Nuts"),
selected="RWM",
inline = TRUE,
status = "success"
),
fluidRow(column(width=5,numericInput("iter", "How many iterations to run?", value=2000,min=1, max=1000000000000, step=1)),
column(width=5,numericInput("thin","Thinning (RWM only): # of iterations to keep?", value=10,min=1, max=1000000000, step=10))),
actionButton("run_adnuts",strong("Run model"),
width="100%",
icon("circle-play"),
style="font-size:120%;border:2px solid;color:#FFFFFF; background:#236192"),
br(),
br(),
h5(strong("Once the model is finished, further model diagnostics are available by loading the fit_model.RData object created (use the load() function in R) and running launch_shinyadmb(fit_model).")),
h5(strong("This cannot be done while the SS-DL tool is open, so either use another R terminal or close the SS-DL app.")),
)),
###########################
### Likelihood profiles ###
###########################
shinyjs::hidden(wellPanel(id="Profile_panel",
h4(strong("Run likelihood profiles")),
h5(em("Likelihood profiles are a powerful way to understand the information content of data and sensitivity of models to parameter uncertainty.")),
h5(em("A likelihood profile fixes a chosen parameter to a specified set of values in a reference model. The reference model will maintain estimation of any other parameters estimated in the reference model.")),
h5(em("For example, natural mortality (M) could be profiled over the value 0.1 to 0.3 at steps of 0.01. This creates 21 model runs that fix M to different values while keeping all other specifications the same as the reference model.")),
h5(em("For each model run, the likelihood value and derived outputs are retained for analysis.")),
h5(em("Any likelihood values >1.96 units from the minimum value are identify as models statistically less supported by the data. ")),
h5(em("Plots with the profiled parameter values compared to the likelihood values and derived model outputs indicate how much information is contained in the model for the parameter and how sensitive the model is to parameters values resulting non-statistically different models.")),
br(),
h5(strong("Choose folder of scenario to run profile")),
#shinyFilesButton("LikeProf_dir", "Select scenario", "Choose folder containing model scenarios", multiple = F),
shinyDirButton(
id="LP_dir",
label="Select scenario",
title="Choose folder containing model scenarios"
),
br(),
br(),
h4(("Individual likelihood profiles- each parameter run independently.")),
h5(em("If choosing multiple parameters to individually profile over, entries should be done in order of the parameters shown and separated by a comma (e.g., 0.1, 0.3).")),
h5(em("The range of values must also include the value of the model being used. If not, the profile will not run.")),
uiOutput("LikeProf_model_picks"),
# fluidRow(selectInput("Profile_choice_choice","Parameter to profile",c("Steepness","lnR0","Natural mortality","Linf","k"))),
fluidRow(column(width=4,textInput("Prof_Low_val", "Low value", value="")),
column(width=4,textInput("Prof_Hi_val", "High value", value="")),
column(width=4,textInput("Prof_step","Sequence step", value=""))),
#br(),
actionButton("run_Profiles",strong("Run Likelihood Profile"),
width="100%",
icon("circle-play"),
style="font-size:120%;border:2px solid;color:#FFFFFF; background:#236192"),
br(),
br(),
br(),
h4(("Another option is to run simultaneous likelihood profiles.")),
h5(em("For example, Linf and k are negatively correlated and should be changing together in a profile.")),
h5(em("Read in .csv file with vectors of values for each parameter.")),
h5(em("See the 'Multi_profile_headers.csv' file for parameter header names. Erase columns not used and fill in rows with values for each likelihood run.")),
# fluidRow(column(width=10,checkboxInput("multi_profile","Apply all vectors in one profile?",FALSE))),
uiOutput("Profile_multi_values"),
fluidRow(column(width=5,numericInput("TRP_multi_like", "Target reference point (max=1; 0= no plot)? ", value=0,min=0, max=1, step=0.001)),
column(width=5,numericInput("LRP_multi_like","Limit reference point (max=1; 0= no plot)?", value=0,min=0, max=1, step=0.001))),
fluidRow(column(width=10,checkboxInput("Hess_multi_like","Include uncertainty estimation?",TRUE))),
# fluidRow(column(width=4,numericInput("Prof_Low_val", "Low value", value=NA,min=0, max=10000, step=0.001)),
# column(width=4,numericInput("Prof_Hi_val", "High value", value=NA,min=0, max=10000, step=0.001)),
# column(width=4,numericInput("Prof_step","Sequence step", value=NA,min=0, max=10000, step=0.001))),
#fluidRow(column(width=8,textInput("Profile_plot_file", strong("Label plot file"), value="Profile X"))),
#h5(strong("Choose folder of scenario to run profile")),
#uiOutput("LikeProf_dir_out"),
actionButton("run_MultiProfiles",strong("Run Likelihood Multi-Profile"),
width="100%",
icon("circle-play"),
style="font-size:120%;border:2px solid;color:#FFFFFF; background:#236192"),
)),
###############################
######## Retrospectives #######
###############################
shinyjs::hidden(wellPanel(id="Retro_panel",
h4(strong("Retrospective comparisons and plots")),
h5(em("Retrospecitive modelling means sequentially removing one year of data up to a specified number of years (e.g., -10 years).")),
h5(em("To make these comparisons, choose first the directory containing models, then the models to compare.")),
h5(em("A time series plot of comparisons are shown in the main panel to the right for the follwing model outputs:")),
tags$ul(tags$li(h5(p(em("Spawning output"))))),
tags$ul(tags$li(h5(p(em("Relative spawning output"))))),
tags$ul(tags$li(h5(p(em("Recruitment"))))),
h5(em("A complete compliment of comparison plots (along with the plot on the right) are saved in the chosen folder labeled 'retro'")),
#h5(strong(em("Retrospective Comparison Plots"))),
br(),
h5(strong("Choose folder containing model for retrospective analysis")),
shinyDirButton(
id="Retro_dir",
label="Select folder",
title="Choose folder containing model scenarios"
),
br(),
br(),
#h4(strong("Comparison plot label")),
h5(strong("Define what years to perform retrospective analysis. Input as a negative integer (e.g., -1 mean remove one year of data)")),
fluidRow(column(width=6,numericInput("first_retro_year_in", "1st retrospective year", value=-1,min=-500, max=0, step=1)),
column(width=6,numericInput("final_retro_year_in","Last retrospective year", value=-5,min=-500, max=0, step=1))),
#fluidRow(column(width=8,textInput("Sensi_comp_file", strong("Label comparison plot file"), value="Comparison 1"))),
#br(),
#br(),
actionButton("run_Retro_comps",strong("Run Retrospective Comparisons"),
width="100%",
icon("circle-play"),
style="font-size:120%;border:2px solid;color:#FFFFFF; background:#236192"),
)),
###############################
### Sensitivity comparisons ###
###############################
shinyjs::hidden(wellPanel(id="Sensi_Comparison_panel",
h4(strong("Sensitivity comparison plots")),
h5(em("Comparing models offers insight into how changing data or model specification change model outputs.")),
h5(em("To make these comparisons, choose first the directory containing models, then the models to compare.")),
h5(em("A time series plot of comparisons are shown in the main panel to the right for the follwing model outputs:")),
tags$ul(tags$li(h5(p(em("Spawning output"))))),
tags$ul(tags$li(h5(p(em("Relative spawning output"))))),
tags$ul(tags$li(h5(p(em("Recruitment"))))),
h5(em("A complete compliment of comparison plots (along with the plot on the right) are saved in the chosen directory in a folder labeled")),
#h5(strong(em("Sensitivity Comparison Plots"))),
br(),
h5(strong("Choose folder containing model scenarios")),
shinyDirButton(
id="Sensi_dir",
label="Select directory",
title="Choose folder containing model scenarios"
),
br(),
br(),
#h4(strong("Comparison plot label")),
uiOutput("Sensi_model_Ref"),
uiOutput("Sensi_model_picks"),
#fluidRow(column(width=10,checkboxInput("Sensi_uncertainty_choice","Include uncertainty intervals in plots?",TRUE))),
h5(strong("Add reference points to spawning output plots. Blank input adds no line.")),
fluidRow(column(width=5,numericInput("Sensi_TRP", "Target", value=NA,min=0, max=1, step=0.001)),
column(width=5,numericInput("Sensi_LRP","Limit", value=NA,min=0, max=1, step=0.001))),
h5(strong("Sensitivity relative error plot features")),
h5(strong("Add y-axis limits, vertical subheader breaks, sensitivity group subheaders, text size and positioning")),
fluidRow(column(width=6,numericInput("SensiRE_ymin", strong("Minimum y-axis value"), value=-1,min=-100, max=100, step=0.01)),
column(width=6,numericInput("SensiRE_ymax",strong("Maximum y-axis value"), value=1,min=-100, max=100, step=0.01 ))),
fluidRow(column(width=8,textInput("SensiRE_breaks", strong("Subheader vertical break positions"), value=" "))),
fluidRow(column(width=8,textInput("SensiRE_headers", strong("Subheader names"), value=" ")),
column(width=4,numericInput("SensiRE_headers_text", strong("Text size"), value=2,min=0.01, max=10, step=0.01))),
fluidRow(column(width=6,textInput("SensiRE_xcenter", strong("Vertical (x) centering of headers"), value=" ")),
column(width=6,textInput("SensiRE_ycenter",strong("Horizontal (y) centering of headers"), value=" "))),
fluidRow(column(width=8,textInput("Sensi_comp_file", strong("Comparison plot folder name"), value="Comparison 1"))),
#br(),
#br(),
actionButton("run_Sensi_comps",strong("Run Sensitivity Comparisons"),
width="100%",
icon("circle-play"),
style="font-size:120%;border:2px solid;color:#FFFFFF; background:#236192"),
)),
######################
### Ensemble panel ###
######################
shinyjs::hidden(wellPanel(id="Ensemble_panel",
h4(strong("Ensemble modelling")),
h5(em("Ensemble modelling allows the user to combine multiple models into one weighted distribution of outputs.")),
h5(em("User first chooses the models to combine, then how to combine them via model weights.")),
h5(em("For example, if 3 models are chosen, weights of 1,1,1 defines equal weights.")),
h5(em("If the middle model should have twice the weight of the others, 1,2,1 is the weighting input.")),
br(),
h5(strong("Choose folder containing models to combine")),
h5(em("")),
shinyDirButton(
id="Ensemble_dir",
label="Select directory",
title="Choose folder containing models to combine"
),
br(),
br(),
#h4(strong("Ensemble label")),
fluidRow(column(width=8,textInput("Ensemble_file", strong("Label ensemble model file"), value="Ensemble 1"))),
uiOutput("Ensemble_model_picks"),
fluidRow(column(width=10,textInput("Ensemble_wts","Relative scenario weights",value=""))),
actionButton("run_Ensemble",strong("Create Ensemble Model"),
width="100%",
icon("circle-play"),
style="font-size:120%;border:2px solid;color:#FFFFFF; background:#236192"),
)),
),
###########################################
###########################################
###########################################
mainPanel(
tabsetPanel(id="tabs",
# navbarPage(id="tabs",
tabPanel("Data and Parameters",
textOutput("catch_plots_label"),
uiOutput("Ctplot_it"),
textOutput("lt_comp_plots_label"),
uiOutput("Ltplot_it"),
textOutput("marginal_age_comp_plots_label"),
uiOutput("Ageplot_it_marginal"),
textOutput("conditional_age_comp_plots_label"),
uiOutput("Ageplot_it_cond"),
#plotOutput("Ageplot"),
textOutput("index_plots_label"),
uiOutput("Indexplot_it"),
h4("Life history"),
column(6,plotOutput("Mplot")),
column(6,plotOutput("VBGFplot")),
h6("."),
#uiOutput("AdvancedSS_nohess_user"),
uiOutput("Dep_plot_title"),
uiOutput("Dep_plot_it"),
#linebreaks(30),
h4("Selectivity"),
plotOutput("Selplot"),
plotOutput("Selplot_SSS"),
value=1),
tabPanel("SSS Model output",
h4("Full model output is contained in the SSS_out.DMP and SSSoutput.DMP files in the specific model scenario folder."),
h5("The SSS_out.DMP contains the prior and posterior values from the model, as well as the catch limits (Overfishing limint (OFL) and Allowable Biological Catch (ABC))."),
h5("The SSSoutput.DMP contains a list of the complete report files for each SSS run."),
br(),
h5(strong("Prior and Posterior input plots")),
plotOutput("SSS_priors_post"),
h5(strong("Prior and Posterior growth parameter plots")),
plotOutput("SSS_growth_priors_post"),
h5(strong("Catch limit plots")),
plotOutput("SSS_OFL_plot"),
plotOutput("SSS_ABC_plot"),
value=11),
tabPanel("Model output",
h4("Full model output is contained in the Report.sso file. The following reports are meant for quick examination."),
h4("Checking model convergence. Check also fit to length composition data"),
tableOutput("converge.grad"),
tableOutput("converge.covar"),
tableOutput("converge.dec"),
tags$head(tags$style("#converge.grad{color: black;
font-size: 20px;
font-style: italic;
}"
)
),
tags$head(tags$style("#converge.dec{color: green;
font-size: 20px;
font-style: italic;
}"
)
),
br(),
#h4("Relative spawning output"),
tableOutput("SSout_relSB_table"),
br(),
#h4("Fishing intensity"),
tableOutput("SSout_F_table"),
br(),
#h4("Estimated parameters"),
tableOutput("Parameters_table"),
br(),
#h4("Time series"),
tableOutput("SSout_table"),
value=2),
tabPanel("Model efficiency",
h4("Evaluate model for parameterization choices and convergence."),
h5("Model summary. Check if ESS>200 and Rhat<1.1."),
textOutput("fit.model.summary"),
br(),
br(),
br(),
h4("Pairs plot of slow mixing parameters"),
plotOutput("pairs_slow"),
br(),
br(),
br(),
br(),
br(),
h4("Pairs plot of fast mixing parameters"),
plotOutput("pairs_fast"),
value=12),
tabPanel("Jitter exploration",
plotOutput("Jitterplot"),
h4("Blue values indicate minimum likelihood values; red indicate values higher than the minimum."),
h4("Any iteration with a blue value can be used as the new best-fit (reference) model."),
plotOutput("Jittercompplot1"),
h4("Comparison of spawning output among jittered models. Model 0 is the initial model; numbered models are the sequential jittered models."),
plotOutput("Jittercompplot2"),
h4("Comparison of relative stock status among jittered models. Model 0 is the initial model; numbered models are the sequential jittered models."),
value=3),
tabPanel("Likelihood profile",
h4("Full likelihood profile outputs and plots can be found in the 'Profile' folder of the chosen scenario."),
h5("Each profile folder will be labeled with the parameter name appended."),
h5("Results below are from the one of the profile parameters to show what type of plots are found in the folders."),
plotOutput("LikeProf_plot_modout"),
br(),
br(),
plotOutput("LikeProf_plot_Piner"),
br(),
br(),
plotOutput("LikeProf_plot_SO"),
br(),
br(),
plotOutput("LikeProf_plot_SOt_SO0"),
br(),
br(),
h5("Multiple likelihood profile plot. Blue dot indicates reference model"),
plotOutput("LikeProf_multiplot"),
value=4),
tabPanel("Retrospectives",
imageOutput("Retro_comp_plotSB"),
imageOutput("Retro_comp_plotBratio"),
value=5),
tabPanel("Sensitivity Plots",
# uiOutput("Sensi_comp_plot"),
h4("Time series sensitivity plots"),
imageOutput("Sensi_comp_plot",width="50%"),
linebreaks(8),
headerPanel(""),
h4("Relative change (Scenario relative to reference model) sensitivity plots for 5 different metrics"),
h5("Unfished spawning output and terminal year spawning output measure scale"),
h5("Relative biomass is a stock status measure"),
h5("FMSY is a measure of productivity; MSY is a measure of productivity and scale"),
h5(paste("For more details on interpreting these plots, please see "),tags$a("Cope and Gertseva 2020",target="_blank",href="CopeandGertseva2020.pdf")),
imageOutput("SensiRE_comp_plot",height="auto"),
linebreaks(8),
headerPanel(""),
h4("Log relative change (scenario relative to reference model) sensitivity plots for 5 different metrics"),
h5("Log relative change addresses the assymetry in possible relative change (infinity at the highest and 0 at the lowest)."),
imageOutput("SensiRElog_comp_plot",height="auto"),
value=6),
tabPanel("Ensemble models",
plotOutput("Ensemble_plots"),
plotOutput("Ensemble_plots_SO_ts"),
plotOutput("Ensemble_plots_Bratio_ts"),
value=7)
)
)
)
)
)
|
/.history/ui_20230816025256.r
|
no_license
|
shcaba/SS-DL-tool
|
R
| false
| false
| 75,180
|
r
|
require(shiny)
require(shinyjs)
require(shinyWidgets)
require(shinyFiles)
require(shinyBS)
linebreaks <- function(n){HTML(strrep(br(), n))}
shinyUI(fluidPage(theme = "bootstrap.css",
useShinyjs(),
titlePanel("Welcome to the Stock Synthesis data-limited tool (SS-DL tool)"),
h4(p(strong("This tool uses the Stock Synthesis framework to implement a ",tags$a(href="javascript:window.open('SS-DL-approaches.html', '_blank','width=600,height=400')", "variety of types"), "of models."))),
h5(p("Any suggested changes or requests? Please submit an issue with the recommendation" ,tags$a(href="https://github.com/shcaba/SS-DL-tool/issues", "here"))),
br(),
sidebarLayout(
sidebarPanel(
style = "position:fixed;width:30%;height: 90vh; overflow-y: scroll;",
shinyjs::hidden(wellPanel(id="Data_panel",
h4(strong("Choose data file")),
fluidRow(column(width=12,fileInput('file2', 'Catch time series',
accept = c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'.csv'
)
))),
fluidRow(column(width=12,fileInput('file1', 'Length composition',
accept = c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'.csv'
)
))),
fluidRow(column(width=12,fileInput('file3', 'Age composition',
accept = c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'.csv'
)
))),
#Mute for now, pull back in when index methods are ready
fileInput('file4', 'Abundance index',
accept = c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'.csv'
)
),
h4(strong("Clear data files")),
fluidRow(column(width=3,actionButton("reset_ct", "Catches")),
column(width=3,actionButton("reset_lt", "Length")),
column(width=3,actionButton("reset_age", "Ages")),
column(width=3,actionButton("reset_index", "Index"))),
)
),
shinyjs::hidden(wellPanel(id="Existing_files",
fluidRow(column(width=10,checkboxInput("user_model","Use existing model files?",FALSE))),
h5(em("Do not use this option with catch only models.")),
h5(em("Make sure the model run is in the Scenarios folder. Put that folder name in the Scenario name input and run the model.")),
h5(em("Using an existing model allows you to do run complex and custom model runs outside the options of the SS-DL tool, but still use some of the quick features.")),
h5(em("Examples are applying jitters or any of the additional SS options that do not modify the data or control files.")),
h5(em("To do sensitivity runs for pre-existing models, make a copy of the folder and re-name it, then make desired changes in the data and/or control files.")),
)
),
shinyjs::hidden(wellPanel(id="panel_Ct_F_LO",
h4(strong("Use constant catch or estimate fishing mortality directly?")),
h5(em("Using constant catch assumes the same catch in all years in order to fit the length composition data (similar to LBSPR, but the model integrates the fit of each year, not each year separately)")),
h5(em("It provides a long-term average response (F) to estimating stock status, and thus useful with non-continous years of sampling.")),
h5(em("Fishing rate can also be directly estimated from the length composition with no assumption in catches (similar to the LIME approach).")),
h5(em("This approach is a more variable reponse to estimating stock status, and is best used with continuous years of contemporary data. Recruitment can also be estimated.")),
h5(em("Recruitment can also be estimated in both approaches.")),
fluidRow(column(width=10,selectInput("Ct_F_LO_select","Approach",c("Choose an option","Constant Catch","Estimate F")))),
)
),
shinyjs::hidden(wellPanel(id="panel_ct_wt_LO",
h4(strong("Weight fleet lengths by relative catch")),
h5(em("The relative catch contribution needs specification with multiple length-only fleets")),
h5(em("Example: Two fleets, with fleet 2 catching 2 times the amount as fleet 1, the entry would be 1,2.")),
h5(em("Each entry will be relative to the highest value.")),
fluidRow(column(width=10,textInput("Wt_fleet_Ct","Relative catch values",value=""))),
)
),
shinyjs::hidden(wellPanel(id="panel_data_wt_lt",
h4(strong("Data-weighting")),
h5(em("Data weighting balances information content of biological data with model structure")),
h5(em("Data weighting balances across factors (e.g, fleets, sex, etc.)")),
h5(em("The default value is equally weighting among fleets based on input effective sample size inputs")),
h5(em("If using an existing model, chose 'None' to maintain the same weighting as the existing model")),
# fluidRow(column(width=10,prettyCheckbox(
# inputId = "dirichlet",
# label = "Use Dirichlet weighting?",
# value=FALSE,
# shape="curve",
# icon = icon("check"),
# animation="smooth"),
# bigger=TRUE),
# fill=TRUE),
awesomeRadio(
inputId = "Data_wt",
label = "Choose data-weighting option",
choices = c("None","Dirichlet", "Francis", "McAllister-Ianelli"),
selected = "None",
status = "warning"
)
# fluidRow(column(width=6, prettyCheckbox(
# inputId = "dirichlet",
# label = "Use Dirichlet weighting?",
# shape = "round",
# outline = TRUE,
# status = "info"))),
# fluidRow(column(width=6, prettyCheckbox(
# inputId = "Francis_wt",
# label = "Use Francis weighting?",
# shape = "round",
# outline = TRUE,
# status = "info"))),
# fluidRow(column(width=6, prettyCheckbox(
# inputId = "MI_wt",
# label = "Use McAllister and Ianelli?",
# shape = "round",
# outline = TRUE,
# status = "info"))),
# h5(em("After the first run, you can check the Francis or harmonic mean methods for suggested weightings")),
# fluidRow(column(width=6,textInput("Lt_datawts", "Lengths weights by fleet", value=""))),
)
),
shinyjs::hidden(wellPanel(id="panel_SSLO_LH",
h4(strong("Life history inputs")),
wellPanel(id="panel_SSLO_fixed",
h5(em("Female")),
fluidRow(column(width=6,numericInput("M_f", "Natural mortality", value=NA,min=0, max=10000, step=0.00001))),
fluidRow(column(width=6,numericInput("Linf_f", "Asymptotic size (Linf)", value=NA,min=0, max=10000, step=0.001)),
column(width=6,numericInput("k_f","Growth coefficient k", value=NA,min=0, max=10000, step=0.00001))),
fluidRow(column(width=6,numericInput("t0_f","Age at length 0 (t0)", value=NA,min=-100, max=10000, step=0.001)),
column(width=6,textInput("CV_lt_f","CV at length (young then old)", value="0.1,0.1"))),
fluidRow(column(width=6,numericInput("L50_f", "Length at 50% maturity", value=NA,min=0, max=10000, step=0.001)),
column(width=6,numericInput("L95_f","Length at 95% maturity", value=NA,min=0, max=10000, step=0.001))),
#h5("Fecundity-length relationship F=aL^b. Set equal to the W-L relationship to get fecundity equivalent to spawning biomass"),
#fluidRow(column(width=6,numericInput("Fec_a_f", "Coefficient a", value=0.00001,min=0, max=10000, step=-0.01)),
# column(width=6,numericInput("Fec_b_f","Exponent b", value=3,min=0, max=10000, step=0.01))),
# fluidRow(column(width=6,numericInput("WLa_f", "Weight-Length alpha", value=0.00001,min=0, max=10000, step=0.000000001)),
# column(width=6,numericInput("WLb_f","Weight-length beta", value=3,min=0, max=10000, step=0.01))),
),
fluidRow(column(width=6,checkboxInput("male_parms","Males specific values?",FALSE)),
column(width=6,checkboxInput("male_offset","Males offset from females (log(m/f)?",FALSE))),
# fluidRow(column(width=10,prettyCheckbox("male_parms","Males specific values?",
# value=FALSE,
# shape="curve",
# icon = icon("check"),
# animation="smooth"),
# bigger=TRUE),
# fill=TRUE,
# status="default"),
#fluidRow(column(width=7, h3("Males specific values?")),column(width=2,checkboxInput("male_parms","Males specific values?",FALSE,width="150%"))),
wellPanel(
uiOutput("Male_parms_inputs_label"),
uiOutput("Male_parms_inputs1"),
uiOutput("Male_parms_inputs2"),
uiOutput("Male_parms_inputs3")
# uiOutput("Male_parms_inputs4")
),
)
),
shinyjs::hidden(wellPanel(id="panel_SS_LH_fixed_est_tog",
fluidRow(column(width=10,switchInput("est_parms","Estimate parameters?",
value=FALSE,
onLabel = "YES",
offLabel = "NO",
onStatus = "success",
offStatus = "danger"))),
h5("Parameters can either be fixed (i.e., set to specific value) or estimated."),
h5("Estimating parameters is a data-based approach to determining life history values"),
h5("Estimating parameters can also propagate parameter uncertainty into derived model outputs"),
br(),
h5("When estimating parameters with catch and length (SS-CL) models consider:"),
tags$ul(tags$li(h5(p("It is recommended to run the model first by fixing parameters.")))),
tags$ul(tags$li(h5(p("Then run likelihood profiles to see if the data contain any information on parameters.")))),
tags$ul(tags$li(h5(p("Parameters that seem informed by the data (i.e., result in realistic values) are good candidates for estimation.")))),
tags$ul(tags$li(h5(p("The most likely parameters to have information from fishery-based lengths are Linf and M.")))),
h5("Not all parameters need be estimated. Fix parameters by turning the phase negative (e.g., -1)"),
h5(p("Natural mortality is an often difficult value to obtain. Consider using",tags$a(href="https://connect.fisheries.noaa.gov/natural-mortality-tool/", "The Natural Mortality Tool"), "to either obtain natural mortality values or developing a prior for use in estimating natural mortality. The Github repository for it can be found",tags$a(href="https://github.com/shcaba/Natural-Mortality-Tool", "here"),".")),
br(),
h5("Load life history values instead of inputting them?"),
uiOutput("LH_load_file"),
)),
shinyjs::hidden(wellPanel(id="panel_SS_LH_fixed",
h4(strong("Life history inputs")),
wellPanel(id="panel_SS_fixed",
h5(em("Female")),
fluidRow(column(width=6,numericInput("M_f_fix", "Natural mortality", value=NA,min=0, max=10000, step=0.00001))),
fluidRow(column(width=6,numericInput("Linf_f_fix", "Asymptotic size (Linf)", value=NA,min=0, max=10000, step=0.001)),
column(width=6,numericInput("k_f_fix","Growth coefficient k", value=NA,min=0, max=10000, step=0.00001))),
fluidRow(column(width=6,numericInput("t0_f_fix","Age at length 0 (t0)", value=NA,min=0, max=10000, step=0.001)),
column(width=6,textInput("CV_lt_f_fix","CV at length (young then old)", value="0.1,0.1"))),
fluidRow(column(width=6,numericInput("L50_f_fix", "Length at 50% maturity", value=NA,min=0, max=10000, step=0.001)),
column(width=6,numericInput("L95_f_fix","Length at 95% maturity", value=NA,min=0, max=10000, step=0.001))),
h5("Length-weight relationship W=aL^b. Weight is in kg and length in cm."),
fluidRow(column(width=6,numericInput("WLa_f_fix", "a in W=aL^b", value=0.00001,min=0, max=10000, step=0.000000001)),
column(width=6,numericInput("WLb_f_fix","b in W=aL^b", value=3,min=0, max=10000, step=0.0001))),
h5("Length-fecundity relationship F=aL^b. Fecundity is measured in number of eggs or pups. Set equal to the length-weight relationship to get fecundity equivalent to spawning biomass."),
fluidRow(column(width=6,numericInput("Fec_a_f_fix", "a in F=aL^b", value=0.00001,min=0, max=10000, step=0.000000001)),
column(width=6,numericInput("Fec_b_f_fix","b in F=aL^b", value=3,min=0, max=10000, step=0.0001))),
),
fluidRow(column(width=10,checkboxInput("male_parms_fix","Males specific values?",FALSE)),
column(width=10,checkboxInput("male_offset_fix","Males offset from females (log(m/f)?",FALSE))),
#fluidRow(column(width=7, h3("Males specific values?")),column(width=2,checkboxInput("male_parms","Males specific values?",FALSE,width="150%"))),
wellPanel(
uiOutput("Male_parms_inputs_label_fix"),
uiOutput("Male_parms_inputs1_fix"),
uiOutput("Male_parms_inputs2_fix"),
uiOutput("Male_parms_inputs3_fix"),
uiOutput("Male_parms_inputs4_fix")
),
)
),
shinyjs::hidden(wellPanel(id="panel_SS_LH_est",
h4(strong("Life history inputs")),
# fluidRow(column(width=10,switchInput("est_parms2","Estimate parameters?",value=TRUE))),
wellPanel(id="panel_SS_est",
h4(em("Female")),
dropdownButton(
selectInput("M_f_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("M_f_mean", "Mean", value=NA,min=0, max=10000, step=0.00001),
numericInput("M_f_SD", "SD", value=0,min=0, max=10000, step=0.00001),
numericInput("M_f_phase", "Phase", value=-1,min=-999, max=10, step=1),
circle = FALSE, right=TRUE, status = "danger", icon = icon("skull-crossbones"), width = "300px",label="Natural mortality"
),
br(),
h5(strong("Growth")),
dropdownButton(
selectInput("Linf_f_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("Linf_f_mean", "Mean", value=NA,min=0, max=10000, step=0.001),
numericInput("Linf_f_SD", "SD", value=0,min=0, max=10000, step=0.0001),
numericInput("Linf_f_phase", "Phase", value=-1,min=-999, max=10, step=1),
circle = FALSE, right=TRUE, status = "danger", icon = icon("infinity"), width = "300px",label="Linf: Asymptotic size"
),
br(),
dropdownButton(
selectInput("k_f_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("k_f_mean", "Mean", value=NA,min=0, max=10000, step=0.00001),
numericInput("k_f_SD", "SD", value=0,min=0, max=10000, step=0.00001),
numericInput("k_f_phase", "Phase", value=-1,min=-999, max=10, step=1),
circle = FALSE, right=TRUE, status = "danger", icon = icon("ruler-horizontal"), width = "300px",label="k: VB growth coefficient"
),
br(),
dropdownButton(
selectInput("t0_f_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("t0_f_mean", "Mean", value=NA,min=-100, max=10000, step=0.001),
numericInput("t0_f_SD", "SD", value=0,min=0, max=10000, step=0.001),
numericInput("t0_f_phase", "Phase", value=-1,min=-999, max=10, step=1),
circle = FALSE, right=TRUE, status = "danger", icon = icon("baby-carriage"), width = "300px",label="t0: Age at size 0"
),
br(),
dropdownButton(
selectInput("CV_lt_f_young_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("CV_lt_f_young_mean", "Mean", value=0.1,min=0, max=10000, step=0.0001),
numericInput("CV_lt_f_young_SD", "SD", value=0,min=0, max=10000, step=0.0001),
numericInput("CV_lt_f_young_phase", "Phase", value=-1,min=-999, max=10, step=1),
circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length (young)"
),
br(),
dropdownButton(
selectInput("CV_lt_f_old_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("CV_lt_f_old_mean", "Mean", value=0.1,min=0, max=10000, step=0.0001),
numericInput("CV_lt_f_old_SD", "SD", value=0,min=0, max=10000, step=0.0001),
numericInput("CV_lt_f_old_phase", "Phase", value=-1,min=-999, max=10, step=1),
circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length (old)"
),
h5(strong("Maturity and weight-length relationships")),
fluidRow(column(width=6,numericInput("L50_f_est", "Length at 50% maturity", value=NA,min=0, max=10000, step=0.001)),
column(width=6,numericInput("L95_f_est","Length at 95% maturity", value=NA,min=0, max=10000, step=0.001))),
h5("Length-weight relationship W=aL^b. Weight is in kg and length in cm."),
fluidRow(column(width=6,numericInput("WLa_f_est", "a in W=aL^b", value=0.00001,min=0, max=10000, step=0.000000001)),
column(width=6,numericInput("WLb_f_est","b in W=aL^b", value=3,min=0, max=10000, step=0.001))),
h5("Length-fecundity relationship F=aL^b. Fecundity is measured in number of eggs or pups. Set equal to the length-weight relationship to get fecundity equivalent to spawning biomass."),
fluidRow(column(width=6,numericInput("Fec_a_f_est", "a in F=aL^b", value=0.00001,min=0, max=10000, step=0.000000001)),
column(width=6,numericInput("Fec_b_f_est","b in F=aL^b", value=3,min=0, max=10000, step=0.001))),
),
fluidRow(column(width=6,checkboxInput("male_parms_est","Males specific values?",FALSE)),
column(width=6,checkboxInput("male_offset_est","Males offset from females (log(m/f))?",FALSE))),
#fluidRow(column(width=7, h3("Males specific values?")),column(width=2,checkboxInput("male_parms","Males specific values?",FALSE,width="150%"))),
wellPanel(
uiOutput("Male_parms_inputs_label_est"),
uiOutput("Male_parms_inputs_M_est"),
uiOutput("Male_parms_inputs_space1"),
uiOutput("Male_parms_inputs_Growth_label"),
uiOutput("Male_parms_inputs_Linf_est"),
uiOutput("Male_parms_inputs_space2"),
uiOutput("Male_parms_inputs_k_est"),
uiOutput("Male_parms_inputs_space3"),
uiOutput("Male_parms_inputs_t0_est"),
uiOutput("Male_parms_inputs_space4"),
uiOutput("Male_parms_inputs_CV_est_young"),
br(),
uiOutput("Male_parms_inputs_CV_est_old"),
uiOutput("Male_parms_inputs_space5"),
uiOutput("Male_parms_inputs_WL_est")
),
)
),
# shinyjs::hidden(wellPanel(id="panel_SS_est",
# h3("Life history inputs"),
# fluidRow(column(width=10,switchInput("est_parms2","Fix parameters?"))),
# wellPanel(
# h4(em("Female")),
# fluidRow(column(width=6,numericInput("Nages","Max. age", value=NA,min=1, max=1000, step=1))),
# h5(strong("Natural mortality")),
# fluidRow(column(width=4,style='padding:1px;',align="center", selectInput("M_prior","Prior type",c("lognormal","truncated normal","uniform","beta"))),
# column(width=3,style='padding:2px;',align="center",numericInput("M_f_mean", "Mean", value=NA,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',align="center",numericInput("M_f_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("M_f_phase", "Phase", value=-1,min=-999, max=10, step=0.001))),
# h5(strong("Growth")),
# h5(strong("Linf")),
# fluidRow(column(width=4,style='padding:1px;',align="center",selectInput("Linf_f_prior","Prior type",c("lognormal","truncated normal","uniform","beta"))),
# column(width=3,style='padding:2px;',align="center",numericInput("Linf_f_mean", "Mean", value=NA,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',align="center",numericInput("Linf_f_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("Linf_f_phase", "Phase", value=-1,min=-999, max=10, step=0.001))),
# h5(strong("k")),
# fluidRow(column(width=4,style='padding:2px;',selectInput("k_f_prior","Prior type",c("lognormal","truncated normal","uniform","beta"))),
# column(width=3,style='padding:2px;',numericInput("k_f_mean", "Mean", value=NA,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',numericInput("k_f_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("k_f_phase", "Phase", value=-1,min=-999, max=10, step=0.001))),
# h5(strong("t0")),
# fluidRow(column(width=4,style='padding:2px;',selectInput("t0_f_prior","Prior type",c("lognormal","truncated normal","uniform","beta"))),
# column(width=3,style='padding:2px;',numericInput("t0_f_mean", "Mean", value=NA,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',numericInput("t0_f_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("t0_f_phase", "Phase", value=-1,min=-999, max=10, step=0.001))),
# h5(strong("Length CV")),
# fluidRow(column(width=4,style='padding:2px;',selectInput("CV_f_prior","Prior type",c("lognormal","truncated normal","uniform","beta"))),
# column(width=3,style='padding:2px;',numericInput("CV_f_mean", "Mean", value=0.1,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',numericInput("CV_f_SD", "SD", value=0,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("CV_f_phase", "Phase", value=-1,min=-999, max=10, step=0.001))),
# h5(strong("Maturity and weight-length relationships")),
# fluidRow(column(width=6,numericInput("L50_f", "Length at 50% maturity", value=NA,min=0, max=10000, step=0.01)),
# column(width=6,numericInput("L95_f","Length at 95% maturity", value=NA,min=0, max=10000, step=0.01))),
# fluidRow(column(width=6,numericInput("Fec_a", "Wt-based fec coeff", value=1,min=0, max=10000, step=-0.01)),
# column(width=6,numericInput("Fec_b","Wt-based fec exp", value=1,min=0, max=10000, step=0.01))),
# fluidRow(column(width=6,numericInput("WLa_f", "Weight-Length alpha", value=0.00001,min=0, max=10000, step=0.000000001)),
# column(width=6,numericInput("WLb_f","Weight-length beta", value=3,min=0, max=10000, step=0.01))),
# ),
# fluidRow(column(width=10,checkboxInput("male_parms_est","Males specific values?",FALSE))),
# #fluidRow(column(width=7, h3("Males specific values?")),column(width=2,checkboxInput("male_parms","Males specific values?",FALSE,width="150%"))),
# wellPanel(
# uiOutput("Male_parms_inputs_label_est"),
# uiOutput("Male_parms_inputs1_est"),
# uiOutput("Male_parms_inputs2_est"),
# uiOutput("Male_parms_inputs3_est"),
# uiOutput("Male_parms_inputs4_est")
# ),
# )
# ),
shinyjs::hidden(wellPanel(id="panel_SSS",
h4(strong("Life history inputs")),
h5(em("If using the uniform prior, low and high range go in the mean and SD input, respectively.")),
wellPanel(
h4(em("Female")),
h5(strong("Natural mortality")),
dropdownButton(
selectInput("M_prior_sss","Prior type",c("lognormal","normal","uniform","no prior")),
numericInput("M_f_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.00001),
numericInput("M_f_SD_sss", "SD", value=0.44,min=0, max=10000, step=0.00001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("skull-crossbones"), width = "300px",label="Natural mortality"
),
h5(strong("Growth")),
dropdownButton(
selectInput("Linf_f_prior_sss","Prior type",c("no prior","normal")),
numericInput("Linf_f_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.001),
numericInput("Linf_f_SD_sss", "SD", value=0,min=0, max=10000, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("infinity"), width = "300px",label="Linf: Asymptotic size"
),
br(),
dropdownButton(
selectInput("k_f_prior_sss","Prior type",c("no prior","normal")),
numericInput("k_f_mean_sss", "Mean", value=NA,min=0, max=10000, step=0.00001),
numericInput("k_f_SD_sss", "SD", value=0,min=0, max=10000, step=0.00001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("ruler-horizontal"), width = "300px",label="k: VB growth coefficient"
),
br(),
fluidRow(column(width=6,numericInput("Linf_k_cor_sss", "Correlation between Linf and k", value=-0.9,min=-1, max=1, step=0.001))),
br(),
dropdownButton(
selectInput("t0_f_prior_sss","Prior type",c("no prior","normal")),
numericInput("t0_f_mean_sss", "Mean", value=NA,min=-100, max=10000, step=0.001),
numericInput("t0_f_SD_sss", "SD", value=0,min=0, max=1000, step=0.001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("baby-carriage"), width = "300px",label="t0: Age at size 0"
),
h5(em("Length CV")),
dropdownButton(
selectInput("CV_lt_f_young_prior_sss","Prior type",c("no prior")),
numericInput("CV_lt_f_young_mean_sss", "Mean", value=0.1,min=0, max=10000, step=0.0001),
numericInput("CV_lt_f_young_SD_sss", "SD", value=0,min=0, max=10000, step=0.0001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length"
),
dropdownButton(
selectInput("CV_lt_f_old_prior_sss","Prior type",c("no prior")),
numericInput("CV_lt_f_old_mean_sss", "Mean", value=0.1,min=0, max=10000, step=0.0001),
numericInput("CV_lt_f_old_SD_sss", "SD", value=0,min=0, max=10000, step=0.0001),
circle = FALSE, right=TRUE, status = "danger", icon = icon("dice"), width = "300px",label="CV at length"
),
h5(strong("Maturity and weight-length relationships")),
fluidRow(column(width=6,numericInput("L50_f_sss", "Length at 50% maturity", value=NA,min=0, max=10000, step=0.001)),
column(width=6,numericInput("L95_f_sss","Length at 95% maturity", value=NA,min=0, max=10000, step=0.001))),
h5("Length-weight relationship W=aL^b. Weight is in kg and length in cm."),
fluidRow(column(width=6,numericInput("WLa_f_sss", "a in W=aL^b", value=0.00001,min=0, max=10000, step=0.000000001)),
column(width=6,numericInput("WLb_f_sss","b in W=aL^b", value=3,min=0, max=10000, step=0.001))),
h5("Length-fecundity relationship F=aL^b. Fecundity is measured in number of eggs or pups. Set equal to the length-weight relationship to get fecundity equivalent to spawning biomass."),
fluidRow(column(width=6,numericInput("Fec_a_f_sss", "a in F=aL^b", value=0.00001,min=0, max=10000, step=0.000000001)),
column(width=6,numericInput("Fec_b_f_sss","b in F=aL^b", value=3,min=0, max=10000, step=0.001))),
),
fluidRow(column(width=10,checkboxInput("male_parms_SSS","Males specific values?",FALSE)),
column(width=10,checkboxInput("male_offset_SSS","Males offset to females (log(m/f)?",FALSE))),
#fluidRow(column(width=7, h3("Males specific values?")),column(width=2,checkboxInput("male_parms_SSS","Males specific values?",FALSE,width="150%"))),
wellPanel(
uiOutput("Male_parms_inputs_label_SSS"),
uiOutput("Male_parms_inputs_M_SSS"),
uiOutput("Male_parms_inputs_space1_SSS"),
uiOutput("Male_parms_inputs_Growth_label_SSS"),
uiOutput("Male_parms_inputs_Linf_SSS"),
uiOutput("Male_parms_inputs_space2_SSS"),
uiOutput("Male_parms_inputs_k_SSS"),
uiOutput("Male_parms_inputs_space3_SSS"),
uiOutput("Male_parms_inputs_t0_SSS"),
uiOutput("Male_parms_inputs_space4_SSS"),
uiOutput("Male_parms_inputs_CV_young_SSS"),
uiOutput("Male_parms_inputs_CV_old_SSS"),
uiOutput("Male_parms_inputs_WL_SSS")
),
)
),
#SSS Stock status input
shinyjs::hidden(wellPanel(id="panel_SS_stock_status",
h4(strong("Relative stock status")),
#wellPanel(
fluidRow(column(width=6,numericInput("status_year", "Relative stock status year", value=NA,min=1000, max=3000, step=1))),
dropdownButton(
selectInput("Depl_prior_sss","Prior type",c("beta","lognormal","truncated normal","uniform","no prior")),
numericInput("Depl_mean_sss", "Mean", value=NA,min=0.001, max=1, step=0.001),
numericInput("Depl_SD_sss", "SD", value=0.2,min=0, max=1000, step=0.001),
circle = FALSE, status = "danger", icon = icon("battery-half"), width = "300px",label="Relative Stock Status"
)
)
),
################################
#Stock-recruitment/Productivity#
################################
shinyjs::hidden(wellPanel(id="panel_SSS_prod",
h4(strong("Stock-recruitment parameters")),
br(),
fluidRow(column(width=6,numericInput("lnR0_sss", "Initial recruitment (lnR0)", value=7,min=0.01, max=20, step=0.01))),
dropdownButton(
selectInput("h_prior_sss","Steepness",c("symmetric beta","beta","truncated normal","truncated lognormal","uniform","no prior")),
numericInput("h_mean_sss", "Mean", value=0.7,min=0.2, max=1, step=0.001),
numericInput("h_SD_sss", "SD", value=0.15,min=0, max=10000, step=0.001),
circle = FALSE, status = "danger", icon = icon("recycle"), width = "300px",label="Steepness"
),
)
),
shinyjs::hidden(wellPanel(id="panel_SS_LO_prod",
h4(strong("Stock-recruitment parameters")),
# wellPanel(
fluidRow(column(width=6,numericInput("h_LO","Steepness", value=0.7,min=0.2, max=1, step=0.01))),
# ),
)
),
shinyjs::hidden(wellPanel(id="panel_SS_prod_fixed",
h4(strong("Stock-recruitment parameters")),
# wellPanel(
fluidRow(column(width=6,numericInput("h","Steepness", value=0.7,min=0.2, max=1, step=0.01)),
column(width=6,numericInput("lnR0", "Initial recruitment (lnR0)", value=7,min=0.01, max=20, step=0.01))),
# ),
)
),
shinyjs::hidden(wellPanel(id="panel_SS_prod_est",
h4(strong("Stock-recruitment parameters")),
# wellPanel(
dropdownButton(
selectInput("h_ss_prior","Prior type",c("no prior","symmetric beta", "beta","lognormal","gamma","normal")),
numericInput("h_mean_ss", "Mean", value=0.7,min=0.2, max=1, step=0.001),
numericInput("h_SD_ss", "SD", value=0.15,min=0, max=10000, step=0.001),
numericInput("h_phase", "Phase", value=-1,min=-999, max=10, step=0.001),
circle = FALSE, status = "danger", icon = icon("recycle"), width = "300px",label="Steepness"
),
fluidRow(column(width=6,numericInput("lnR0_est", "Initial recruitment (lnR0)", value=9,min=0, max=20, step=0.01))),
)),
# fluidRow(column(width=4,style='padding:1px;',align="center", selectInput("h_ss_prior","Steepness",c("beta","symmetric beta","truncated normal","trunc lognormal","uniform"))),
# column(width=3,style='padding:2px;',align="center",numericInput("h_mean_ss", "Mean", value=0.7,min=0, max=10000, step=0.001)),
# column(width=3,style='padding:2px;',align="center",numericInput("h_SD_ss", "SD", value=0.15,min=0, max=10000, step=0.001)),
# column(width=2,style='padding:2px;',align="center",numericInput("h_phase", "Phase", value=-1,min=-999, max=10, step=0.001))),
# fluidRow(column(width=6,numericInput("lnR0_est", "Initial recruitment (lnR0)", value=9,min=0, max=20, step=0.01))),
# # ),
# )
# ),
#Recruitment estimation
shinyjs::hidden(wellPanel(id="panel_SS_recdevs",
fluidRow(column(width=10,checkboxInput("rec_choice","Estimate recruitment?",FALSE))),
wellPanel(
# fluidRow(column(width=8,offset=-10, h3("Estimate recruitment?")),column(width=6,checkboxInput("rec_choice","",FALSE))),
# fluidRow(column(width=8, h3("Estimate recruitment?")),column(width=4,radioButtons("rec_choice","",FALSE))),
tags$style("
.checkbox { /* checkbox is a div class*/
# line-height: 10px;
margin-bottom: 0px;
margin-left: 0px;
font-size: 20px;
}
input[type='checkbox']{ /* style for checkboxes */
width: 20px; /*Desired width*/
height: 20px; /*Desired height*/
line-height: 100px;
# span {
# margin-left: 30px; /*set the margin, so boxes don't overlap labels*/
# line-height: 30px;
}
}"),
uiOutput("Rec_options1"),
uiOutput("Rec_options6"),
uiOutput("Rec_options2"),
fluidRow(column(width=10,checkboxInput("biasC_choice","Bias correct recruitments?",FALSE))),
h5("Years of no bias correction"),
uiOutput("Rec_options3"),
h5("Years of bias correction"),
uiOutput("Rec_options4"),
uiOutput("Rec_options5")
),
)
),
#Selectivity
shinyjs::hidden(wellPanel(id="panel_selectivity_sss",
# wellPanel(
h4(strong("Selectivity")),
h5("Enter parameter values for each fleet and survey."),
h5("Example using 50% selectivity with two fleets: Inputs could be 35,40 for starting values."),
p("If using a mix of logistic and dome-shaped selectivities, select", strong("dome-shaped"),"and use the default values (10000,0.0001,0.9999 for the additonal parameters, respectively) to achieve a logistic shape for any given fleet."),
br(),
fluidRow(selectInput("Sel_choice_sss","Length selectivity type",c("Logistic","Dome-shaped"))),
uiOutput("Sel_parms1_sss"),
uiOutput("Sel_parms2_sss"),
uiOutput("Sel_parms3_sss")
)
),
shinyjs::hidden(wellPanel(id="panel_selectivity",
# wellPanel(
h4(strong("Selectivity")),
h5("Enter parameter and phase values for each fleet and survey."),
h5("Example using 50% selectivity with two fleets: Inputs could be 35,40 and 2,2 for starting values and phases respectively."),
h5("The phase input indicates estimated parameters. To fix the parameter, set the phase value to a negative number."),
p("If using a mix of logistic and dome-shaped selectivities, select", strong("dome-shaped"),"and fix (i.e., use a negative phase) the provided default values (10000,0.0001,0.9999 for the additonal parameters, respectively) to achieve a logistic shape for any given fleet."),
br(),
fluidRow(selectInput("Sel_choice","Length selectivity type",c("Logistic","Dome-shaped"))),
# fluidRow(column(width=6,numericInput("Sel50", "Length at 50% Selectivity", value=NA,min=0, max=10000, step=0.01)),
# column(width=6,numericInput("Sel50_phase","Estimation phase", value=1,min=-1000, max=10, step=1))),
# fluidRow(column(width=6,numericInput("Selpeak", "Length at Peak Selectvity", value=NA,min=0, max=10000, step=0.01)),
# column(width=6,numericInput("Selpeak_phase","Estimation phase", value=1,min=-1000, max=10, step=1))),
#if(input$Sel_choice=="Logistic")
# {uiOutput("Sel_logistic")},
#if(input$Sel_choice=="Dome-shaped")
# {uiOutput("Sel_domed")}
uiOutput("Sel_parms1"),
uiOutput("Sel_parms2"),
uiOutput("Sel_parms3"),
uiOutput("Sel_parms4"),
uiOutput("Sel_parms5")
# fluidRow(checkboxInput("Sex_lt_sel","Sex-specific selectivity?",FALSE)),
# fluidRow(checkboxInput("age_sel_choice","Age-based selectivity?",FALSE))
# ),
)
),
#Jitter inputs
# shinyjs::hidden(wellPanel(id="panel_SS_jitter1",
shinyjs::hidden(wellPanel(id="panel_SS_jitter",
fluidRow(column(width=10,checkboxInput("jitter_choice","Jitter starting values?",FALSE))),
uiOutput("Jitter_value"),
h5("Jittering refers to changing the input starting values."),
h5("Jittering provides a quick way to adjust starting values for two main purposes:"),
tags$ul(tags$li(h5(p("Start the model at different values to assist model convergence.")))),
tags$ul(tags$li(h5(p("Validate global versus local model convergence. This requires running many models at different jittered starting values to make sure a lower minimized likelihood value is not found. If a lower likelihood value is found, that would be considered the best fit model.")))),
h5("Run just 1 jitter value to find a converged model. Then run multiple jittered models to confrim that model is the best fit model."),
)
),
#Reference Points
shinyjs::hidden(wellPanel(id="panel_RPs",
fluidRow(column(width=10,checkboxInput("RP_choices","Define reference points?",FALSE))),
uiOutput("RP_selection1"),
uiOutput("RP_selection2")
)
),
#Forecast options
shinyjs::hidden(wellPanel(id="panel_Forecasts",
fluidRow(column(width=10,checkboxInput("Forecast_choice","Define forecasts?",FALSE))),
uiOutput("Forecasts")
)
),
shinyjs::hidden(wellPanel(id="panel_Mod_dims",
h4(strong("Model dimensions: years and ages")),
h5(p(em("Starting year values based on first year of data inputs"))),
# tags$ul(tags$li(h5(p(em("If catch data is used, starting and ending model years are based on the time series of catch"))))),
# tags$ul(tags$li(h5(p(em("If using only length or age data, starting model year is based on earliest year minus age at 95% Linf"))))),
# h5(p(em("Start year recommendations are:"))),
# tags$ul(tags$li(h5(p(em("If length data only, count the year back from the first year of length data based on maximum age likely contained in length data"))))),
# tags$ul(tags$li(h5(p(em("If using catch data, use the first year of catches"))))),
# h5(p(em(""))),
uiOutput("Model_dims1"),
# uiOutput("Model_dims2"),
)
),
shinyjs::hidden(wellPanel(id="panel_advanced_SS",
h4(strong("Additional SS options")),
h5(p(strong("Additional SS commands can be found ",tags$a(href="javascript:window.open('SS_commands.html', '_blank','width=600,height=400')", " here")))),
#fluidRow(column(width=10,checkboxInput("advance_ss_click","Advanced SS options",FALSE))),
popify(uiOutput("AdvancedSS_nohess"),"Run -nohess option","Turning off the Hessian option skips over asymptotic variance and speeds the model up. Use this option to more quickly explore models. Estimate variance once you are done exploring."),
popify(uiOutput("AdvancedSS_addcomms"),"Custom SS run commands","Click the advanced SS commands link above to get options. One interesting option is -hess_step which attempts to make the model gradient 0. This should be run once a final model is found."),
popify(uiOutput("AdvancedSS_addcomms_comms"),"Custom SS run commands"),
popify(uiOutput("AdvancedSS_noplots"),"Output plots","Diagnostic and results plots are produced by default. This switch turns those plots off in case you want to speed up the model run."),
popify(uiOutput("AdvancedSS_noestabs"),"No Executive Summary tables","Executive Summary tables take time to make, but provide summary tables of the model. When exploring models, it is better to turn this off to speed up the model run."),
popify(uiOutput("AdvancedSS_par"),"Switch to use the ss.par file","The ss.par file contains all parameter values used in the previous model run. It can be handy to run models from the par file to confirm you have reached the best fit model. The par file can also be used to expedite forecasts by turning the maximum phase to zero (see next option) and using the par file."),
popify(uiOutput("AdvancedSS_phase0"),"Maximum phase = 0","Setting maximum phase to 0 turns off all parameter estimation and is useful when forecasting catch into the future. Couple with using the ss.par file (see above option)."),
popify(uiOutput("AdvancedSS_datanew"),"Switch starter file to use the data_echo.ss_new file.","This file is a copy of the data file used in the last run. It has additional notes for inputs, and can be modified to run new scenarios, if desired."),
popify(uiOutput("AdvancedSS_controlnew"),"Switch starter file to use the control.ss_new file.","This file is a copy of the control file used in the previous run, but starting values are the ending values of the previous model. Like the ss.par file, it can be used to run from where the last model finished, but also provides a convenient way to change other parameter specifications."),
popify(uiOutput("AdvancedSS_forecastnew"),"Overwrite the forecast.ss file with the forecast.ss_new file.","This file is a copy of the forecast file used in the previous run. In the event you want to use this file, this switch will overwite the forecast file content with what is in the forecast.ss_new."),
popify(uiOutput("AdvancedSS_Sex3options"),"Sex=3 option for biological compositions","This switch changes the per sex biological compositions (sex = 1 for females and 2 for males) into a two sex length composition that retains the overall biological composition of the sample. This may add additional information on the underlying sex ratio of the population, but should be tested against using the biological compositions by sex. Choose to apply this method to lengths and/or ages."),
fluidRow(column(width=6,uiOutput("AdvancedSS_Sex3")),
column(width=6,uiOutput("AdvancedSS_AgeSex3"))),
#popify(uiOutput("AdvancedSS_Sex3"),"Sex=3 option for lengths","This switch changes the per sex length compositions (sex = 1 for females and 2 for males) into a two sex length composition that retains the overall length composition of the sample. This may add additional information on the underlying sex ratio of the population, but should be tested against using the length compositions by sex."),
#popify(uiOutput("AdvancedSS_AgeSex3"),"Sex=3 option for ages","This switch changes the per sex age compositions (sex = 1 for females and 2 for males) into a two sex length composition that retains the overall length composition of the sample. This may add additional information on the underlying sex ratio of the population, but should be tested against using the age compositions by sex."),
popify(uiOutput("AdvancedSS_Indexvar"),"Added index variance.","The input index variance is often underestimated. This option allows more variance to be added to each index in order to improve model fit. This is a type of weighting option for indices. The added variance is the same for each year of a particular index, but can be different across indices. Beware large index outliers that may overinflate added variances in order to get the model to fit that one data point."),
popify(uiOutput("AdvancedSS_ageerror"),"Ageing error matrices","Add as many custom ageing error matrices as needed. See the folders 'Example data files' < 'ageing error matrices' for examples of the ageing error input"),
uiOutput("AdvancedSS_ageerror_in"),
popify(uiOutput("AdvancedSS_Ctunits"),"Catch units","The default assumption is that catch is in metric tons (option 1), but if they are in numbers, option 2 should be used."),
popify(uiOutput("AdvancedSS_Ctunitsfleets"),"Enter units","1=biomass; 2=numbers. Enter one of these numbers for each removal fleet."),
popify(uiOutput("AdvancedSS_GT1"),"Growth morphs","Stock Synthesis can track multiple growth morphs distributed around a central morph. The default here is tracking 5 morphs, but using only 1 is also common. This switch moves from 5 to 1 morph."),
#uiOutput("AdvancedSS_retro_choice"),
#uiOutput("AdvancedSS_retro_years"),
h5(p("Define modelled length bins. Default values are by 2 cm bin ranging from 4 to 25% above the Linf value. If using conditional age at lengths, length bins must be consistent with these population bins, not the length data bins.")),
h5(p(em("Inputs must be smaller and larger than the length composition bins. Input values will be overridden to meet this requirement"))),
uiOutput("AdvancedSS_Ltbin"),
br(),
h5(p("Define plus group age. Default value is based on the female natural mortality value.")),
uiOutput("AdvancedSS_Nages")
# prettyCheckbox(
# inputId = "no_hess", label = "Turn off Hessian",
# shape = "round", outline = TRUE, status = "info"),
# prettyCheckbox(
# inputId = "no_plots_tables", label = "Turn off plots and tables",
# shape = "round", outline = TRUE, status = "info"),
# prettyCheckbox(
# inputId = "GT1", label = "Use only one growth type",
# shape = "round", outline = TRUE, status = "info"),
# prettyCheckbox(
# inputId = "Sex3", label = "Retain sex ratio in length compositions (Sex = 3)",
# shape = "round", outline = TRUE, status = "info"),
)
),
shinyjs::hidden(wellPanel(id="panel_advanced_user_SS",
h4(strong("Additional SS options")),
#fluidRow(column(width=10,checkboxInput("advance_ss_click","Advanced SS options",FALSE))),
popify(uiOutput("AdvancedSS_nohess_user"),"Run -nohess option","Turning off the Hessian option skips over asymptotic variance and speeds the model up. Use this option to more quickly explore models. Estimate variance once you are done exploring."),
popify(uiOutput("AdvancedSS_addcomms_user"),"Custom SS run commands","Click the advanced SS commands link above to get options. One interesting option is -hess_step which attempts to make the model gradient 0. This should be run once a final model is found."),
popify(uiOutput("AdvancedSS_addcomms_comms_user"),"Custom SS run commands","Click the advanced SS commands link above to get options. One interesting option is -hess_step which attempts to make the model gradient 0. This should be run once a final model is found."),
popify(uiOutput("AdvancedSS_noplots_user"),"Output plots","Diagnostic and results plots are produced by default. This switch turns those plots off in case you want to speed up the model run."),
popify(uiOutput("AdvancedSS_noestabs_user"),"No Executive Summary tables","Executive Summary tables take time to make, but provide summary tables of the model. When exploring models, it is better to turn this off to speed up the model run."),
popify(uiOutput("AdvancedSS_par_user"),"Switch to use the ss.par file","The ss.par file contains all parameter values used in the previous model run. It can be handy to run models from the par file to confirm you have reached the best fit model. The par file can also be used to expedite forecasts by turning the maximum phase to zero (see next option) and using the par file."),
popify(uiOutput("AdvancedSS_phase0_user"),"Maximum phase = 0","Setting maximum phase to 0 turns off all parameter estimation and is useful when forecasting catch into the future. Couple with using the ss.par file (see above option)."),
popify(uiOutput("AdvancedSS_datanew_user"),"Switch starter file to use the data_echo.ss_new file.","This file is a copy of the data file used in the last run. It has additional notes for inputs, and can be modified to run new scenarios, if desired."),
popify(uiOutput("AdvancedSS_controlnew_user"),"Switch starter file to use the control.ss_new file.","This file is a copy of the control file used in the previous run, but starting values are the ending values of the previous model. Like the ss.par file, it can be used to run from where the last model finished, but also provides a convenient way to change other parameter specifications."),
popify(uiOutput("AdvancedSS_forecastnew_user"),"Overwrite the forecast.ss file with the forecast.ss_new file.","This file is a copy of the forecast file used in the previous run. In the event you want to use this file, this switch will overwite the forecast file content with what is in the forecast.ss_new."),
#uiOutput("AdvancedSS_retro_choice_user"),
#uiOutput("AdvancedSS_retro_years_user")
)
),
shinyjs::hidden(wellPanel(id="panel_advanced_SSS",
h4(strong("Additional SS options")),
h5(strong("Choosing catch units")),
h6(strong("Default is biomass (in MT), but click below button to specify for each fleet.")),
#fluidRow(column(width=10,checkboxInput("advance_ss_click","Advanced SS options",FALSE))),
uiOutput("AdvancedSS_Ctunits_SSS"),
uiOutput("AdvancedSS_Ctunitsfleets_SSS"),
h5(strong("Add additional growth platoons?")),
uiOutput("AdvancedSS_GT5_SSS")
)
),
#SSS iterations
shinyjs::hidden(wellPanel(id="panel_SSS_reps",
h4(strong("SSS run specifications")),
fluidRow(column(width=10,numericInput("SSS_reps", "Number of SSS iterations", value=1000,min=1, max=1000000, step=1))),
fluidRow(column(width=10,numericInput("SSS_seed", "Seed number for draws", value=19,min=1, max=1000000, step=1)))
)
),
#wellPanel(
shinyjs::hidden(awesomeRadio(
inputId = "OS_choice",
label = "Which OS?",
choices = c("Windows","Mac","Linux"),
selected = "Windows",
inline=TRUE,
status = "warning")),
#),
shinyjs::hidden(wellPanel(id="Scenario_panel",
h4(strong("Scenario name")),
fluidRow(column(width=8,textInput("Scenario_name", strong("Choose the name of your scenario"), value="Scenario_1"))),
h5(p(em("Each scenario folder is saved. Changing the scenario name therefore creates a new folder of results."))),
h5(p(em("Using different scenario names when changing data or parameter values allows easy sensitivity exploration."))),
h5(p(strong("For Mac and Linux users, do not leave spaces in the Scenario name."))),
br(),
h4(strong("Select a folder to copy results")),
h5(p(em("Results are copied from the 'Scenarios' folder"))),
h5(p(em("Required to access results if using the online version"))),
shinyDirButton(
id="Modelout_dir",
label="Select model folder",
title="Choose folder to copy model scenario"
),
)
),
shinyjs::hidden(actionButton("run_SS",strong("Run Model"),
width="100%",
icon("circle-play"),
style="font-size:120%;border:2px solid;color:#FFFFFF;background:#658D1B")),
shinyjs::hidden(actionButton("run_SSS",strong("Run SSS"),
width="100%",
icon("circle-play"),
style="font-size:120%;border:2px solid;color:#FFFFFF; background:#236192")),
####################
### Other panels ###
####################
########################
### Model efficiency ###
########################
shinyjs::hidden(wellPanel(id="Modeff_panel",
h4(strong("Model efficiency and Bayesian analysis")),
h5(em("Using ",tags$a(href="https://github.com/Cole-Monnahan-NOAA/adnuts", "AD NUTS"),"to improve model efficiency and performance.")),
h5(em("This can also offer speedier Bayesian approaches.")),
br(),
h5(strong("Choose folder of model to evaluate")),
h5(em("")),
shinyDirButton(
"ModEff_dir",
label="Select model folder",
title="Choose folder of model to evaluate"
),
br(),
#uiOutput("ModEff_model_pick"),
br(),
h5(strong("Choose method to use")),
h5(("There are two main Bayesian methods to choose from:")),
tags$ul(tags$li(h5(p(em("Random walk Metropolis (RWM). This method is useful to quickly explore parameter behavior."))))),
tags$ul(tags$li(h5(p(em("No u-turn (Nuts). This method can quickly run Bayesian models once an efficient model is established. No thinning necessary, as that is part of the algorithm"))))),
h5(("The following is a recommended work flow to find and run an efficient Bayesian model:")),
tags$ul(tags$li(h5(p(em("Optimize you model with a short Bayesian run. Click the 'optimize model' button."))))),
tags$ul(tags$li(h5(p(em("Run your optimized model using RWM with 2000 iterations and thin = 10, then use the pairs plots to look for parameters that don't change value across kept draws."))))),
tags$ul(tags$li(h5(p(em("Evaluate the produced pairs plots to look for parameters that don't change value across kept draws."))))),
tags$ul(tags$li(h5(p(em("Parameters that don't move should be fixed in the model, and the model re-optimized."))))),
tags$ul(tags$li(h5(p(em("Run the RWM model again and continue looking for and fixing any non-moving parameters."))))),
br(),
h5(("Once the model specification is finalized, you can")),
tags$ul(tags$li(h5(p(em("Re-run with Hessian (go back to the first tab to re-run model) to get asymptotic variance estimates. "))))),
tags$ul(tags$li(h5(p(em("Re-optimize, then consider using the NUTS option with 1000 iterations or keep running the RWM option until convergence criteria are reached by increasing the thinning value and/or number of iterations. This produces a Bayesian representation of uncertainty."))))),
br(),
h5(("Good model convergence is indicated when:")),
tags$ul(tags$li(h5(p(em("Minimum effective sample size (ESS) > 200"))))),
tags$ul(tags$li(h5(p(em("Rhat <1.1, This measures the ratio of overestimated to underestimated variance."))))),
awesomeCheckbox(
inputId = "Opt_mod",
label = "Optimize model?",
value = TRUE,
status = "danger"
),
h5(("One should include model optimization before running the evaluation methods below if this is the first run of a given model specification, including if parameters have been fixed since the last exploration.")),
br(),
# awesomeCheckbox(
# inputId = "run_stanout",
# label = "Run Stan GUI?",
# value = TRUE,
# status = "danger"
# ),
# br(),
h5(strong("Choose method and evaluation inputs")),
awesomeRadio(
inputId = "ModEff_choice",
label = "",
choices = c("RWM", "Nuts"),
selected="RWM",
inline = TRUE,
status = "success"
),
fluidRow(column(width=5,numericInput("iter", "How many iterations to run?", value=2000,min=1, max=1000000000000, step=1)),
column(width=5,numericInput("thin","Thinning (RWM only): # of iterations to keep?", value=10,min=1, max=1000000000, step=10))),
actionButton("run_adnuts",strong("Run model"),
width="100%",
icon("circle-play"),
style="font-size:120%;border:2px solid;color:#FFFFFF; background:#236192"),
br(),
br(),
h5(strong("Once the model is finished, further model diagnostics are available by loading the fit_model.RData object created (use the load() function in R) and running launch_shinyadmb(fit_model).")),
h5(strong("This cannot be done while the SS-DL tool is open, so either use another R terminal or close the SS-DL app.")),
)),
###########################
### Likelihood profiles ###
###########################
shinyjs::hidden(wellPanel(id="Profile_panel",
h4(strong("Run likelihood profiles")),
h5(em("Likelihood profiles are a powerful way to understand the information content of data and sensitivity of models to parameter uncertainty.")),
h5(em("A likelihood profile fixes a chosen parameter to a specified set of values in a reference model. The reference model will maintain estimation of any other parameters estimated in the reference model.")),
h5(em("For example, natural mortality (M) could be profiled over the value 0.1 to 0.3 at steps of 0.01. This creates 21 model runs that fix M to different values while keeping all other specifications the same as the reference model.")),
h5(em("For each model run, the likelihood value and derived outputs are retained for analysis.")),
h5(em("Any likelihood values >1.96 units from the minimum value are identify as models statistically less supported by the data. ")),
h5(em("Plots with the profiled parameter values compared to the likelihood values and derived model outputs indicate how much information is contained in the model for the parameter and how sensitive the model is to parameters values resulting non-statistically different models.")),
br(),
h5(strong("Choose folder of scenario to run profile")),
#shinyFilesButton("LikeProf_dir", "Select scenario", "Choose folder containing model scenarios", multiple = F),
shinyDirButton(
id="LP_dir",
label="Select scenario",
title="Choose folder containing model scenarios"
),
br(),
br(),
h4(("Individual likelihood profiles- each parameter run independently.")),
h5(em("If choosing multiple parameters to individually profile over, entries should be done in order of the parameters shown and separated by a comma (e.g., 0.1, 0.3).")),
h5(em("The range of values must also include the value of the model being used. If not, the profile will not run.")),
uiOutput("LikeProf_model_picks"),
# fluidRow(selectInput("Profile_choice_choice","Parameter to profile",c("Steepness","lnR0","Natural mortality","Linf","k"))),
fluidRow(column(width=4,textInput("Prof_Low_val", "Low value", value="")),
column(width=4,textInput("Prof_Hi_val", "High value", value="")),
column(width=4,textInput("Prof_step","Sequence step", value=""))),
#br(),
actionButton("run_Profiles",strong("Run Likelihood Profile"),
width="100%",
icon("circle-play"),
style="font-size:120%;border:2px solid;color:#FFFFFF; background:#236192"),
br(),
br(),
br(),
h4(("Another option is to run simultaneous likelihood profiles.")),
h5(em("For example, Linf and k are negatively correlated and should be changing together in a profile.")),
h5(em("Read in .csv file with vectors of values for each parameter.")),
h5(em("See the 'Multi_profile_headers.csv' file for parameter header names. Erase columns not used and fill in rows with values for each likelihood run.")),
# fluidRow(column(width=10,checkboxInput("multi_profile","Apply all vectors in one profile?",FALSE))),
uiOutput("Profile_multi_values"),
fluidRow(column(width=5,numericInput("TRP_multi_like", "Target reference point (max=1; 0= no plot)? ", value=0,min=0, max=1, step=0.001)),
column(width=5,numericInput("LRP_multi_like","Limit reference point (max=1; 0= no plot)?", value=0,min=0, max=1, step=0.001))),
fluidRow(column(width=10,checkboxInput("Hess_multi_like","Include uncertainty estimation?",TRUE))),
# fluidRow(column(width=4,numericInput("Prof_Low_val", "Low value", value=NA,min=0, max=10000, step=0.001)),
# column(width=4,numericInput("Prof_Hi_val", "High value", value=NA,min=0, max=10000, step=0.001)),
# column(width=4,numericInput("Prof_step","Sequence step", value=NA,min=0, max=10000, step=0.001))),
#fluidRow(column(width=8,textInput("Profile_plot_file", strong("Label plot file"), value="Profile X"))),
#h5(strong("Choose folder of scenario to run profile")),
#uiOutput("LikeProf_dir_out"),
actionButton("run_MultiProfiles",strong("Run Likelihood Multi-Profile"),
width="100%",
icon("circle-play"),
style="font-size:120%;border:2px solid;color:#FFFFFF; background:#236192"),
)),
###############################
######## Retrospectives #######
###############################
shinyjs::hidden(wellPanel(id="Retro_panel",
h4(strong("Retrospective comparisons and plots")),
h5(em("Retrospecitive modelling means sequentially removing one year of data up to a specified number of years (e.g., -10 years).")),
h5(em("To make these comparisons, choose first the directory containing models, then the models to compare.")),
h5(em("A time series plot of comparisons are shown in the main panel to the right for the follwing model outputs:")),
tags$ul(tags$li(h5(p(em("Spawning output"))))),
tags$ul(tags$li(h5(p(em("Relative spawning output"))))),
tags$ul(tags$li(h5(p(em("Recruitment"))))),
h5(em("A complete compliment of comparison plots (along with the plot on the right) are saved in the chosen folder labeled 'retro'")),
#h5(strong(em("Retrospective Comparison Plots"))),
br(),
h5(strong("Choose folder containing model for retrospective analysis")),
shinyDirButton(
id="Retro_dir",
label="Select folder",
title="Choose folder containing model scenarios"
),
br(),
br(),
#h4(strong("Comparison plot label")),
h5(strong("Define what years to perform retrospective analysis. Input as a negative integer (e.g., -1 mean remove one year of data)")),
fluidRow(column(width=6,numericInput("first_retro_year_in", "1st retrospective year", value=-1,min=-500, max=0, step=1)),
column(width=6,numericInput("final_retro_year_in","Last retrospective year", value=-5,min=-500, max=0, step=1))),
#fluidRow(column(width=8,textInput("Sensi_comp_file", strong("Label comparison plot file"), value="Comparison 1"))),
#br(),
#br(),
actionButton("run_Retro_comps",strong("Run Retrospective Comparisons"),
width="100%",
icon("circle-play"),
style="font-size:120%;border:2px solid;color:#FFFFFF; background:#236192"),
)),
###############################
### Sensitivity comparisons ###
###############################
shinyjs::hidden(wellPanel(id="Sensi_Comparison_panel",
h4(strong("Sensitivity comparison plots")),
h5(em("Comparing models offers insight into how changing data or model specification change model outputs.")),
h5(em("To make these comparisons, choose first the directory containing models, then the models to compare.")),
h5(em("A time series plot of comparisons are shown in the main panel to the right for the follwing model outputs:")),
tags$ul(tags$li(h5(p(em("Spawning output"))))),
tags$ul(tags$li(h5(p(em("Relative spawning output"))))),
tags$ul(tags$li(h5(p(em("Recruitment"))))),
h5(em("A complete compliment of comparison plots (along with the plot on the right) are saved in the chosen directory in a folder labeled")),
#h5(strong(em("Sensitivity Comparison Plots"))),
br(),
h5(strong("Choose folder containing model scenarios")),
shinyDirButton(
id="Sensi_dir",
label="Select directory",
title="Choose folder containing model scenarios"
),
br(),
br(),
#h4(strong("Comparison plot label")),
uiOutput("Sensi_model_Ref"),
uiOutput("Sensi_model_picks"),
#fluidRow(column(width=10,checkboxInput("Sensi_uncertainty_choice","Include uncertainty intervals in plots?",TRUE))),
h5(strong("Add reference points to spawning output plots. Blank input adds no line.")),
fluidRow(column(width=5,numericInput("Sensi_TRP", "Target", value=NA,min=0, max=1, step=0.001)),
column(width=5,numericInput("Sensi_LRP","Limit", value=NA,min=0, max=1, step=0.001))),
h5(strong("Sensitivity relative error plot features")),
h5(strong("Add y-axis limits, vertical subheader breaks, sensitivity group subheaders, text size and positioning")),
fluidRow(column(width=6,numericInput("SensiRE_ymin", strong("Minimum y-axis value"), value=-1,min=-100, max=100, step=0.01)),
column(width=6,numericInput("SensiRE_ymax",strong("Maximum y-axis value"), value=1,min=-100, max=100, step=0.01 ))),
fluidRow(column(width=8,textInput("SensiRE_breaks", strong("Subheader vertical break positions"), value=" "))),
fluidRow(column(width=8,textInput("SensiRE_headers", strong("Subheader names"), value=" ")),
column(width=4,numericInput("SensiRE_headers_text", strong("Text size"), value=2,min=0.01, max=10, step=0.01))),
fluidRow(column(width=6,textInput("SensiRE_xcenter", strong("Vertical (x) centering of headers"), value=" ")),
column(width=6,textInput("SensiRE_ycenter",strong("Horizontal (y) centering of headers"), value=" "))),
fluidRow(column(width=8,textInput("Sensi_comp_file", strong("Comparison plot folder name"), value="Comparison 1"))),
#br(),
#br(),
actionButton("run_Sensi_comps",strong("Run Sensitivity Comparisons"),
width="100%",
icon("circle-play"),
style="font-size:120%;border:2px solid;color:#FFFFFF; background:#236192"),
)),
######################
### Ensemble panel ###
######################
shinyjs::hidden(wellPanel(id="Ensemble_panel",
h4(strong("Ensemble modelling")),
h5(em("Ensemble modelling allows the user to combine multiple models into one weighted distribution of outputs.")),
h5(em("User first chooses the models to combine, then how to combine them via model weights.")),
h5(em("For example, if 3 models are chosen, weights of 1,1,1 defines equal weights.")),
h5(em("If the middle model should have twice the weight of the others, 1,2,1 is the weighting input.")),
br(),
h5(strong("Choose folder containing models to combine")),
h5(em("")),
shinyDirButton(
id="Ensemble_dir",
label="Select directory",
title="Choose folder containing models to combine"
),
br(),
br(),
#h4(strong("Ensemble label")),
fluidRow(column(width=8,textInput("Ensemble_file", strong("Label ensemble model file"), value="Ensemble 1"))),
uiOutput("Ensemble_model_picks"),
fluidRow(column(width=10,textInput("Ensemble_wts","Relative scenario weights",value=""))),
actionButton("run_Ensemble",strong("Create Ensemble Model"),
width="100%",
icon("circle-play"),
style="font-size:120%;border:2px solid;color:#FFFFFF; background:#236192"),
)),
),
###########################################
###########################################
###########################################
mainPanel(
tabsetPanel(id="tabs",
# navbarPage(id="tabs",
tabPanel("Data and Parameters",
textOutput("catch_plots_label"),
uiOutput("Ctplot_it"),
textOutput("lt_comp_plots_label"),
uiOutput("Ltplot_it"),
textOutput("marginal_age_comp_plots_label"),
uiOutput("Ageplot_it_marginal"),
textOutput("conditional_age_comp_plots_label"),
uiOutput("Ageplot_it_cond"),
#plotOutput("Ageplot"),
textOutput("index_plots_label"),
uiOutput("Indexplot_it"),
h4("Life history"),
column(6,plotOutput("Mplot")),
column(6,plotOutput("VBGFplot")),
h6("."),
#uiOutput("AdvancedSS_nohess_user"),
uiOutput("Dep_plot_title"),
uiOutput("Dep_plot_it"),
#linebreaks(30),
h4("Selectivity"),
plotOutput("Selplot"),
plotOutput("Selplot_SSS"),
value=1),
tabPanel("SSS Model output",
h4("Full model output is contained in the SSS_out.DMP and SSSoutput.DMP files in the specific model scenario folder."),
h5("The SSS_out.DMP contains the prior and posterior values from the model, as well as the catch limits (Overfishing limint (OFL) and Allowable Biological Catch (ABC))."),
h5("The SSSoutput.DMP contains a list of the complete report files for each SSS run."),
br(),
h5(strong("Prior and Posterior input plots")),
plotOutput("SSS_priors_post"),
h5(strong("Prior and Posterior growth parameter plots")),
plotOutput("SSS_growth_priors_post"),
h5(strong("Catch limit plots")),
plotOutput("SSS_OFL_plot"),
plotOutput("SSS_ABC_plot"),
value=11),
tabPanel("Model output",
h4("Full model output is contained in the Report.sso file. The following reports are meant for quick examination."),
h4("Checking model convergence. Check also fit to length composition data"),
tableOutput("converge.grad"),
tableOutput("converge.covar"),
tableOutput("converge.dec"),
tags$head(tags$style("#converge.grad{color: black;
font-size: 20px;
font-style: italic;
}"
)
),
tags$head(tags$style("#converge.dec{color: green;
font-size: 20px;
font-style: italic;
}"
)
),
br(),
#h4("Relative spawning output"),
tableOutput("SSout_relSB_table"),
br(),
#h4("Fishing intensity"),
tableOutput("SSout_F_table"),
br(),
#h4("Estimated parameters"),
tableOutput("Parameters_table"),
br(),
#h4("Time series"),
tableOutput("SSout_table"),
value=2),
tabPanel("Model efficiency",
h4("Evaluate model for parameterization choices and convergence."),
h5("Model summary. Check if ESS>200 and Rhat<1.1."),
textOutput("fit.model.summary"),
br(),
br(),
br(),
h4("Pairs plot of slow mixing parameters"),
plotOutput("pairs_slow"),
br(),
br(),
br(),
br(),
br(),
h4("Pairs plot of fast mixing parameters"),
plotOutput("pairs_fast"),
value=12),
tabPanel("Jitter exploration",
plotOutput("Jitterplot"),
h4("Blue values indicate minimum likelihood values; red indicate values higher than the minimum."),
h4("Any iteration with a blue value can be used as the new best-fit (reference) model."),
plotOutput("Jittercompplot1"),
h4("Comparison of spawning output among jittered models. Model 0 is the initial model; numbered models are the sequential jittered models."),
plotOutput("Jittercompplot2"),
h4("Comparison of relative stock status among jittered models. Model 0 is the initial model; numbered models are the sequential jittered models."),
value=3),
tabPanel("Likelihood profile",
h4("Full likelihood profile outputs and plots can be found in the 'Profile' folder of the chosen scenario."),
h5("Each profile folder will be labeled with the parameter name appended."),
h5("Results below are from the one of the profile parameters to show what type of plots are found in the folders."),
plotOutput("LikeProf_plot_modout"),
br(),
br(),
plotOutput("LikeProf_plot_Piner"),
br(),
br(),
plotOutput("LikeProf_plot_SO"),
br(),
br(),
plotOutput("LikeProf_plot_SOt_SO0"),
br(),
br(),
h5("Multiple likelihood profile plot. Blue dot indicates reference model"),
plotOutput("LikeProf_multiplot"),
value=4),
tabPanel("Retrospectives",
imageOutput("Retro_comp_plotSB"),
imageOutput("Retro_comp_plotBratio"),
value=5),
tabPanel("Sensitivity Plots",
# uiOutput("Sensi_comp_plot"),
h4("Time series sensitivity plots"),
imageOutput("Sensi_comp_plot",width="50%"),
linebreaks(8),
headerPanel(""),
h4("Relative change (Scenario relative to reference model) sensitivity plots for 5 different metrics"),
h5("Unfished spawning output and terminal year spawning output measure scale"),
h5("Relative biomass is a stock status measure"),
h5("FMSY is a measure of productivity; MSY is a measure of productivity and scale"),
h5(paste("For more details on interpreting these plots, please see "),tags$a("Cope and Gertseva 2020",target="_blank",href="CopeandGertseva2020.pdf")),
imageOutput("SensiRE_comp_plot",height="auto"),
linebreaks(8),
headerPanel(""),
h4("Log relative change (scenario relative to reference model) sensitivity plots for 5 different metrics"),
h5("Log relative change addresses the assymetry in possible relative change (infinity at the highest and 0 at the lowest)."),
imageOutput("SensiRElog_comp_plot",height="auto"),
value=6),
tabPanel("Ensemble models",
plotOutput("Ensemble_plots"),
plotOutput("Ensemble_plots_SO_ts"),
plotOutput("Ensemble_plots_Bratio_ts"),
value=7)
)
)
)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/size_of_selfloops.R
\name{size_of_selfloops}
\alias{size_of_selfloops}
\title{Metric: Size of selfloops}
\usage{
size_of_selfloops(eventlog, type = c("repeat", "redo"),
level_of_analysis = c("log", "case", "activity", "resource",
"resource-acitivty"))
}
\arguments{
\item{eventlog}{The event log to be used. An object of class
\code{eventlog}.}
\item{type}{Type of selfloops. I.e. repeat or redo}
\item{level_of_analysis}{At which level the analysis of selfloops should be performed: log, case, activity, resource, resource activity.}
}
\description{
Provides summary statistics on the sizes of selfloops at the level of activity types, cases, traces or log. A selfloop of size x refers to the occurrence of x consecutive events
of that activity type.
}
|
/man/size_of_selfloops.Rd
|
no_license
|
strategist922/edeaR
|
R
| false
| true
| 838
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/size_of_selfloops.R
\name{size_of_selfloops}
\alias{size_of_selfloops}
\title{Metric: Size of selfloops}
\usage{
size_of_selfloops(eventlog, type = c("repeat", "redo"),
level_of_analysis = c("log", "case", "activity", "resource",
"resource-acitivty"))
}
\arguments{
\item{eventlog}{The event log to be used. An object of class
\code{eventlog}.}
\item{type}{Type of selfloops. I.e. repeat or redo}
\item{level_of_analysis}{At which level the analysis of selfloops should be performed: log, case, activity, resource, resource activity.}
}
\description{
Provides summary statistics on the sizes of selfloops at the level of activity types, cases, traces or log. A selfloop of size x refers to the occurrence of x consecutive events
of that activity type.
}
|
\name{quantile}
\alias{quantile}
\alias{quantile-methods}
\alias{quantile,BoundedDensity-method}
\alias{quantile,Chen99Kernel-method}
\alias{quantile,MicroBetaChen99Kernel-method}
\alias{quantile,MacroBetaChen99Kernel-method}
\alias{quantile,BoundaryKernel-method}
\alias{quantile,NoBoundaryKernel-method}
\alias{quantile,Muller91BoundaryKernel-method}
\alias{quantile,JonesCorrectionMuller91BoundaryKernel-method}
\alias{quantile,Muller94BoundaryKernel-method}
\alias{quantile,JonesCorrectionMuller94BoundaryKernel-method}
\alias{quantile,NormalizedBoundaryKernel-method}
\alias{quantile,Vitale-method}
\alias{quantile,BrVitale-method}
\alias{quantile,KakizawaB1-method}
\alias{quantile,KakizawaB2-method}
\alias{quantile,KakizawaB3-method}
\alias{quantile,HirukawaJLNKernel-method}
\alias{quantile,HirukawaTSKernel-method}
\alias{quantile,MacroBetaHirukawaJLNKernel-method}
\alias{quantile,MacroBetaHirukawaTSKernel-method}
\title{Quantile}
\description{
Quantile function for the given bounded density object.
}
\section{Methods}{
\describe{
\item{\code{quantile(x,p)}}{
%% ~~describe this method here~~
}
}}
\arguments{
\item{\code{x}}{A bounded density estimator. See all the accepted classes here by running the command \code{getSubclasses("BoundedDensity")}.This parameter is named \code{x} instead of \code{.Object} to agree with other already defined density methods.}
\item{\code{p}}{Vector of probabilities}
}
\keyword{methods}
%\keyword{ ~~ other possible keyword(s) ~~ }
|
/man/quantile.Rd
|
no_license
|
cran/bde
|
R
| false
| false
| 1,489
|
rd
|
\name{quantile}
\alias{quantile}
\alias{quantile-methods}
\alias{quantile,BoundedDensity-method}
\alias{quantile,Chen99Kernel-method}
\alias{quantile,MicroBetaChen99Kernel-method}
\alias{quantile,MacroBetaChen99Kernel-method}
\alias{quantile,BoundaryKernel-method}
\alias{quantile,NoBoundaryKernel-method}
\alias{quantile,Muller91BoundaryKernel-method}
\alias{quantile,JonesCorrectionMuller91BoundaryKernel-method}
\alias{quantile,Muller94BoundaryKernel-method}
\alias{quantile,JonesCorrectionMuller94BoundaryKernel-method}
\alias{quantile,NormalizedBoundaryKernel-method}
\alias{quantile,Vitale-method}
\alias{quantile,BrVitale-method}
\alias{quantile,KakizawaB1-method}
\alias{quantile,KakizawaB2-method}
\alias{quantile,KakizawaB3-method}
\alias{quantile,HirukawaJLNKernel-method}
\alias{quantile,HirukawaTSKernel-method}
\alias{quantile,MacroBetaHirukawaJLNKernel-method}
\alias{quantile,MacroBetaHirukawaTSKernel-method}
\title{Quantile}
\description{
Quantile function for the given bounded density object.
}
\section{Methods}{
\describe{
\item{\code{quantile(x,p)}}{
%% ~~describe this method here~~
}
}}
\arguments{
\item{\code{x}}{A bounded density estimator. See all the accepted classes here by running the command \code{getSubclasses("BoundedDensity")}.This parameter is named \code{x} instead of \code{.Object} to agree with other already defined density methods.}
\item{\code{p}}{Vector of probabilities}
}
\keyword{methods}
%\keyword{ ~~ other possible keyword(s) ~~ }
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
context("broadcast variables")
# JavaSparkContext handle
sparkSession <- sparkR.session()
sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession)
# Partitioned data
nums <- 1:2
rrdd <- parallelize(sc, nums, 2L)
test_that("using broadcast variable", {
randomMat <- matrix(nrow = 10, ncol = 10, data = rnorm(100))
randomMatBr <- broadcast(sc, randomMat)
useBroadcast <- function(x) {
sum(SparkR:::value(randomMatBr) * x)
}
actual <- collectRDD(lapply(rrdd, useBroadcast))
expected <- list(sum(randomMat) * 1, sum(randomMat) * 2)
expect_equal(actual, expected)
})
test_that("without using broadcast variable", {
randomMat <- matrix(nrow = 10, ncol = 10, data = rnorm(100))
useBroadcast <- function(x) {
sum(randomMat * x)
}
actual <- collectRDD(lapply(rrdd, useBroadcast))
expected <- list(sum(randomMat) * 1, sum(randomMat) * 2)
expect_equal(actual, expected)
})
|
/R/pkg/inst/tests/testthat/test_broadcast.R
|
permissive
|
ElfoLiNk/spark
|
R
| false
| false
| 1,724
|
r
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
context("broadcast variables")
# JavaSparkContext handle
sparkSession <- sparkR.session()
sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession)
# Partitioned data
nums <- 1:2
rrdd <- parallelize(sc, nums, 2L)
test_that("using broadcast variable", {
randomMat <- matrix(nrow = 10, ncol = 10, data = rnorm(100))
randomMatBr <- broadcast(sc, randomMat)
useBroadcast <- function(x) {
sum(SparkR:::value(randomMatBr) * x)
}
actual <- collectRDD(lapply(rrdd, useBroadcast))
expected <- list(sum(randomMat) * 1, sum(randomMat) * 2)
expect_equal(actual, expected)
})
test_that("without using broadcast variable", {
randomMat <- matrix(nrow = 10, ncol = 10, data = rnorm(100))
useBroadcast <- function(x) {
sum(randomMat * x)
}
actual <- collectRDD(lapply(rrdd, useBroadcast))
expected <- list(sum(randomMat) * 1, sum(randomMat) * 2)
expect_equal(actual, expected)
})
|
library(arules)
#
# data1 <- bookings.made.v.grouped[ , c("Populationdensity.ERPat30June.persons.km2", "RegionLocation" , "ClubsInSuburb" ,
# "recently.advertised", "Medianequivalisedtotalhouseholdincome.weekly.AUD" , "MedianAge.Persons.years",
# "WorkingAgePopulation.aged15.64years", "MaleFemalePerc", "Australiancitizen.Perc", "Total.Opening.Hours",
# "nbr.courts", "has.Lights", "has.clay", "has.grass", "has.hot.shot", "has.indoor", "has.hard", "Utilisation.Category")]
data1 <- data.model[ ,c( "State","Populationdensity.ERPat30June.persons.km2", "DistanceToCBD.km.truncated",
"recently.advertised" , "Medianequivalisedtotalhouseholdincome.weekly.AUD" , "MedianAge.Persons.years",
"WorkingAgePopulation.aged15.64years", "MaleFemalePerc", "Australiancitizen.Perc",
"has.Lights", "has.hot.shot", "MembersByCourt", "court.options", "SecularBeliefs.Perc",
"Utilisation.Category")]#"Utilisation.Category",
nums <- unlist(lapply(data1, is.numeric))
#dplyr::select_if(data1, is.numeric)
for(i in which(nums)){
data1[,i] <- discretize(data1[, i], breaks = 3, labels = c("low", "med", "high"))
print(i)
}
data1[, "Utilisation.Category"] <- factor(data1[,"Utilisation.Category"])
tData <- as (data1, "transactions") # convert to 'transactions' class
# data1[,1] <- discretize(data1[,1], breaks = 5, labels = c("very.low", "low", "med", "high", "very.high"))
# data1[,3] <- factor(data1[,3])
LIST(head(tData, 5))
frequentItems <- eclat (tData, parameter = list(supp = 0.07, maxlen = 10)) # calculates support for frequent items
inspect(frequentItems)
itemFrequencyPlot(tData, topN=20, type="absolute", main="Item Frequency") # plot frequent items
# rules <- apriori (tData, parameter = list(supp = 0.001, conf = 0.5)) # Min Support as 0.001, confidence as 0.8.
# rules_conf <- sort (rules, by="confidence", decreasing=TRUE) # 'high-confidence' rules.
# inspect(head(rules_conf)) # show the support, lift and confidence for all rules
#
# rules_lift <- sort (rules, by="lift", decreasing=TRUE) # 'high-lift' rules.
# inspect(head(rules_lift)) # show the support, lift and confidence for all rules
#-------
#high.utilisation <- apriori(tData, parameter = list(target = "frequent", supp=0.001))
rules <- apriori (data=tData, parameter=list (supp=0.001, conf = 0.2, minlen=2, maxlen=5), appearance = list (default="lhs",rhs="Utilisation.Category=+2"), control = list (verbose=F)) # get rules that lead to buying 'whole milk'
rules_conf <- sort (rules, by="confidence", decreasing=TRUE) # 'high-confidence' rules.
inspect(head(rules_conf, 40))
subsetRules <- which(colSums(is.subset(rules, rules)) > 1) # get subset rules in vector
length(subsetRules) #> 3913
rules <- rules[-subsetRules]
rules_conf <- sort (rules, by="confidence", decreasing=TRUE) # 'high-confidence' rules.
inspect(head(rules_conf, 10))
rules_lift <- sort (rules, by="lift", decreasing=TRUE) # 'high-lift' rules.
inspect(head(rules_lift))
|
/AssocRules.R
|
no_license
|
nader-chmait/BAC.R
|
R
| false
| false
| 3,163
|
r
|
library(arules)
#
# data1 <- bookings.made.v.grouped[ , c("Populationdensity.ERPat30June.persons.km2", "RegionLocation" , "ClubsInSuburb" ,
# "recently.advertised", "Medianequivalisedtotalhouseholdincome.weekly.AUD" , "MedianAge.Persons.years",
# "WorkingAgePopulation.aged15.64years", "MaleFemalePerc", "Australiancitizen.Perc", "Total.Opening.Hours",
# "nbr.courts", "has.Lights", "has.clay", "has.grass", "has.hot.shot", "has.indoor", "has.hard", "Utilisation.Category")]
data1 <- data.model[ ,c( "State","Populationdensity.ERPat30June.persons.km2", "DistanceToCBD.km.truncated",
"recently.advertised" , "Medianequivalisedtotalhouseholdincome.weekly.AUD" , "MedianAge.Persons.years",
"WorkingAgePopulation.aged15.64years", "MaleFemalePerc", "Australiancitizen.Perc",
"has.Lights", "has.hot.shot", "MembersByCourt", "court.options", "SecularBeliefs.Perc",
"Utilisation.Category")]#"Utilisation.Category",
nums <- unlist(lapply(data1, is.numeric))
#dplyr::select_if(data1, is.numeric)
for(i in which(nums)){
data1[,i] <- discretize(data1[, i], breaks = 3, labels = c("low", "med", "high"))
print(i)
}
data1[, "Utilisation.Category"] <- factor(data1[,"Utilisation.Category"])
tData <- as (data1, "transactions") # convert to 'transactions' class
# data1[,1] <- discretize(data1[,1], breaks = 5, labels = c("very.low", "low", "med", "high", "very.high"))
# data1[,3] <- factor(data1[,3])
LIST(head(tData, 5))
frequentItems <- eclat (tData, parameter = list(supp = 0.07, maxlen = 10)) # calculates support for frequent items
inspect(frequentItems)
itemFrequencyPlot(tData, topN=20, type="absolute", main="Item Frequency") # plot frequent items
# rules <- apriori (tData, parameter = list(supp = 0.001, conf = 0.5)) # Min Support as 0.001, confidence as 0.8.
# rules_conf <- sort (rules, by="confidence", decreasing=TRUE) # 'high-confidence' rules.
# inspect(head(rules_conf)) # show the support, lift and confidence for all rules
#
# rules_lift <- sort (rules, by="lift", decreasing=TRUE) # 'high-lift' rules.
# inspect(head(rules_lift)) # show the support, lift and confidence for all rules
#-------
#high.utilisation <- apriori(tData, parameter = list(target = "frequent", supp=0.001))
rules <- apriori (data=tData, parameter=list (supp=0.001, conf = 0.2, minlen=2, maxlen=5), appearance = list (default="lhs",rhs="Utilisation.Category=+2"), control = list (verbose=F)) # get rules that lead to buying 'whole milk'
rules_conf <- sort (rules, by="confidence", decreasing=TRUE) # 'high-confidence' rules.
inspect(head(rules_conf, 40))
subsetRules <- which(colSums(is.subset(rules, rules)) > 1) # get subset rules in vector
length(subsetRules) #> 3913
rules <- rules[-subsetRules]
rules_conf <- sort (rules, by="confidence", decreasing=TRUE) # 'high-confidence' rules.
inspect(head(rules_conf, 10))
rules_lift <- sort (rules, by="lift", decreasing=TRUE) # 'high-lift' rules.
inspect(head(rules_lift))
|
# Calibrations on v1.6.12
sModelCalibrations <- c(
"name IsHyst data aggreg ParamFinalR",
"GR1A FALSE L0123001 %Y 9.112500e-01",
"GR2M FALSE L0123001 %Y%m 2.598228e+02;9.975000e-01",
"GR4J FALSE L0123001 NA 2.236316e+02;5.781516e-01;9.751439e+01;2.217718e+00",
"GR5J FALSE L0123001 NA 2.203864e+02;8.944531e-01;9.356407e+01;1.762872e+00;4.846427e-01",
"GR6J FALSE L0123001 NA 1.928762e+02;6.933087e-01;4.917833e+01;2.214542e+00;5.088240e-01;6.814626e+00",
"CemaNeigeGR4J FALSE L0123001 NA 2.043839e+02;5.781516e-01;1.025141e+02;2.217718e+00;0.000000e+00;1.490479e+01",
"CemaNeigeGR5J FALSE L0123001 NA 1.983434e+02;8.747758e-01;9.849443e+01;1.768769e+00;4.824825e-01;2.002002e-02;1.505459e+01",
"CemaNeigeGR6J FALSE L0123001 NA 1.830941e+02;5.551637e-01;6.034029e+01;2.217718e+00;4.760000e-01;6.049647e+00;2.002002e-02;1.520589e+01",
"CemaNeigeGR4J TRUE L0123001 NA 2.085127e+02;5.781516e-01;1.025141e+02;2.227477e+00;2.252252e-02;8.599316e+00;1.345000e+01;1.000000e+00",
"CemaNeigeGR5J TRUE L0123001 NA 2.023502e+02;9.015250e-01;9.849443e+01;1.788288e+00;4.834835e-01;2.252252e-02;8.599316e+00;1.345000e+01;1.000000e+00",
"CemaNeigeGR6J TRUE L0123001 NA 1.886701e+02;5.666293e-01;6.034029e+01;2.227477e+00;4.760000e-01;5.989452e+00;2.052052e-02;8.599316e+00;1.220000e+01;1.000000e+00",
"GR4H FALSE L0123003 NA 7.116766e+02;-1.158469e+00;1.505561e+02;4.686093e+00",
"GR5H FALSE L0123003 NA 8.040022e+02;-1.898488e-01;1.377525e+02;3.043663e+00;1.951163e-01",
"CemaNeigeGR4H FALSE L0123003 NA 1.581284e+03;-8.457959e-01;2.299844e+02;5.000000e-01;9.475779e-03;9.482445e+01",
"CemaNeigeGR5H FALSE L0123003 NA 3.267232e+01;-5.092029e+00;3.384799e+02;1.578534e+00;2.074272e-01;1.501502e-03;4.369420e+00",
"CemaNeigeGR4H TRUE L0123003 NA 1.746044e+03;-7.052756e-01;2.228887e+02;3.377089e+00;0.000000e+00;5.116962e+01;1.204664e+01;5.052849e-01",
"CemaNeigeGR5H TRUE L0123003 NA 6.717382e+01;-1.522839e+00;1.393246e+02;2.493137e+00;2.333041e-01;1.216408e-03;3.328200e-01;5.369605e+01;9.800789e-01"
)
dfModels <- read.table(text = paste(sModelCalibrations, collapse = "\n"), header = TRUE)
PrepareCalibration <- function(model) {
model <- as.list(model)
sModel <- paste0("RunModel_", model$name)
sIM_FUN_MOD <- sModel
if (model$data == "L0123003") {
# hourly time step database
dates <- c("2004-01-01 00:00", "2004-12-31 23:00", "2005-01-01 00:00", "2008-12-31 23:00")
date_format = "%Y-%m-%d %H:%M"
TempMean <- fakeHourlyTemp()
} else {
# yearly, monthly, daily time step databases
dates <- c("1985-01-01", "1985-12-31", "1986-01-01", "2012-12-31")
date_format <- "%Y-%m-%d"
if (!is.na(model$aggreg)) {
# Aggregation on monthly and yearly databases
sIM_FUN_MOD <- "RunModel_GR4J" # CreateInputsModel with daily data
date_format <- model$aggreg
}
}
## loading catchment data
data(list = model$data)
if (model$data != "L0123003") TempMean <- BasinObs$T
# preparation of the InputsModel object
InputsModel <- CreateInputsModel(FUN_MOD = sIM_FUN_MOD,
DatesR = BasinObs$DatesR,
Precip = BasinObs$P,
PotEvap = BasinObs$E,
TempMean = TempMean,
ZInputs = median(BasinInfo$HypsoData),
HypsoData = BasinInfo$HypsoData,
NLayers = 5)
if (!is.na(model$aggreg)) {
# conversion of InputsModel to target time step
InputsModel <- SeriesAggreg(InputsModel, Format = model$aggreg)
dfQobs <- SeriesAggreg(data.frame(DatesR = BasinObs$DatesR, Qmm = BasinObs$Qmm),
Format = model$aggreg, ConvertFun = "sum")
Obs <- dfQobs$Qmm
} else {
Obs <- BasinObs$Qmm
}
# calibration period selection
dates <- sapply(dates, function(x) format(as.Date(x), format = date_format))
Ind_WarmUp <- seq(
which(format(InputsModel$DatesR, format = date_format)==dates[1]),
which(format(InputsModel$DatesR, format = date_format)==dates[2])
)
Ind_Run <- seq(
which(format(InputsModel$DatesR, format = date_format)==dates[3]),
which(format(InputsModel$DatesR, format = date_format)==dates[4])
)
# preparation of the RunOptions object
suppressWarnings(
RunOptions <- CreateRunOptions(
FUN_MOD = sModel,
InputsModel = InputsModel,
IndPeriod_Run = Ind_Run,
IndPeriod_WarmUp = Ind_WarmUp,
IsHyst = as.logical(model$IsHyst)
)
)
# calibration criterion: preparation of the InputsCrit object
InputsCrit <- CreateInputsCrit(FUN_CRIT = ErrorCrit_NSE, InputsModel = InputsModel,
RunOptions = RunOptions, Obs = Obs[Ind_Run])
# preparation of CalibOptions object
CalibOptions <- CreateCalibOptions(sModel, IsHyst = as.logical(model$IsHyst))
return(environment())
}
ModelCalibration <- function(model) {
e <- PrepareCalibration(model)
for(n in ls(e, all.names=TRUE)) assign(n, get(n, e))
# calibration
suppressWarnings(OutputsCalib <- Calibration(InputsModel = InputsModel, RunOptions = RunOptions,
InputsCrit = InputsCrit, CalibOptions = CalibOptions,
FUN_MOD = sModel))
OutputsCalib$ParamFinalR
}
#' Create Fake hourly temperature from daily temperatures in L0123001
#'
#' @param start_date [character] start date in format "%Y-%m-%d"
#' @param end_date [character] end date in format "%Y-%m-%d"
#' @return [numeric] hourly temperature time series between `start_date` and `end_date`
fakeHourlyTemp <- function(start_date = "2004-01-01", end_date = "2008-12-31") {
dates <- as.POSIXct(c(start_date, end_date), tz = "UTC")
data(L0123002)
indJ <- seq.int(which(BasinObs$DatesR == as.POSIXct(dates[1])),
which(BasinObs$DatesR == as.POSIXct(dates[2])))
TJ <- BasinObs$T[indJ]
TH <- approx((seq.int(length(TJ)) - 1) * 24,TJ,
seq.int(length(TJ) * 24 ) - 1,
rule = 2)$y
varT_1J <- -sin(0:23/24 * 2 * pi) # Temp min at 6 and max at 18
varT <- rep(varT_1J, length(TJ))
TH <- TH + varT * 5 # For a mean daily amplitude of 10°
TH
}
|
/tests/testthat/helper_scheduled_Calibration.R
|
no_license
|
cran/airGR
|
R
| false
| false
| 6,566
|
r
|
# Calibrations on v1.6.12
sModelCalibrations <- c(
"name IsHyst data aggreg ParamFinalR",
"GR1A FALSE L0123001 %Y 9.112500e-01",
"GR2M FALSE L0123001 %Y%m 2.598228e+02;9.975000e-01",
"GR4J FALSE L0123001 NA 2.236316e+02;5.781516e-01;9.751439e+01;2.217718e+00",
"GR5J FALSE L0123001 NA 2.203864e+02;8.944531e-01;9.356407e+01;1.762872e+00;4.846427e-01",
"GR6J FALSE L0123001 NA 1.928762e+02;6.933087e-01;4.917833e+01;2.214542e+00;5.088240e-01;6.814626e+00",
"CemaNeigeGR4J FALSE L0123001 NA 2.043839e+02;5.781516e-01;1.025141e+02;2.217718e+00;0.000000e+00;1.490479e+01",
"CemaNeigeGR5J FALSE L0123001 NA 1.983434e+02;8.747758e-01;9.849443e+01;1.768769e+00;4.824825e-01;2.002002e-02;1.505459e+01",
"CemaNeigeGR6J FALSE L0123001 NA 1.830941e+02;5.551637e-01;6.034029e+01;2.217718e+00;4.760000e-01;6.049647e+00;2.002002e-02;1.520589e+01",
"CemaNeigeGR4J TRUE L0123001 NA 2.085127e+02;5.781516e-01;1.025141e+02;2.227477e+00;2.252252e-02;8.599316e+00;1.345000e+01;1.000000e+00",
"CemaNeigeGR5J TRUE L0123001 NA 2.023502e+02;9.015250e-01;9.849443e+01;1.788288e+00;4.834835e-01;2.252252e-02;8.599316e+00;1.345000e+01;1.000000e+00",
"CemaNeigeGR6J TRUE L0123001 NA 1.886701e+02;5.666293e-01;6.034029e+01;2.227477e+00;4.760000e-01;5.989452e+00;2.052052e-02;8.599316e+00;1.220000e+01;1.000000e+00",
"GR4H FALSE L0123003 NA 7.116766e+02;-1.158469e+00;1.505561e+02;4.686093e+00",
"GR5H FALSE L0123003 NA 8.040022e+02;-1.898488e-01;1.377525e+02;3.043663e+00;1.951163e-01",
"CemaNeigeGR4H FALSE L0123003 NA 1.581284e+03;-8.457959e-01;2.299844e+02;5.000000e-01;9.475779e-03;9.482445e+01",
"CemaNeigeGR5H FALSE L0123003 NA 3.267232e+01;-5.092029e+00;3.384799e+02;1.578534e+00;2.074272e-01;1.501502e-03;4.369420e+00",
"CemaNeigeGR4H TRUE L0123003 NA 1.746044e+03;-7.052756e-01;2.228887e+02;3.377089e+00;0.000000e+00;5.116962e+01;1.204664e+01;5.052849e-01",
"CemaNeigeGR5H TRUE L0123003 NA 6.717382e+01;-1.522839e+00;1.393246e+02;2.493137e+00;2.333041e-01;1.216408e-03;3.328200e-01;5.369605e+01;9.800789e-01"
)
dfModels <- read.table(text = paste(sModelCalibrations, collapse = "\n"), header = TRUE)
PrepareCalibration <- function(model) {
model <- as.list(model)
sModel <- paste0("RunModel_", model$name)
sIM_FUN_MOD <- sModel
if (model$data == "L0123003") {
# hourly time step database
dates <- c("2004-01-01 00:00", "2004-12-31 23:00", "2005-01-01 00:00", "2008-12-31 23:00")
date_format = "%Y-%m-%d %H:%M"
TempMean <- fakeHourlyTemp()
} else {
# yearly, monthly, daily time step databases
dates <- c("1985-01-01", "1985-12-31", "1986-01-01", "2012-12-31")
date_format <- "%Y-%m-%d"
if (!is.na(model$aggreg)) {
# Aggregation on monthly and yearly databases
sIM_FUN_MOD <- "RunModel_GR4J" # CreateInputsModel with daily data
date_format <- model$aggreg
}
}
## loading catchment data
data(list = model$data)
if (model$data != "L0123003") TempMean <- BasinObs$T
# preparation of the InputsModel object
InputsModel <- CreateInputsModel(FUN_MOD = sIM_FUN_MOD,
DatesR = BasinObs$DatesR,
Precip = BasinObs$P,
PotEvap = BasinObs$E,
TempMean = TempMean,
ZInputs = median(BasinInfo$HypsoData),
HypsoData = BasinInfo$HypsoData,
NLayers = 5)
if (!is.na(model$aggreg)) {
# conversion of InputsModel to target time step
InputsModel <- SeriesAggreg(InputsModel, Format = model$aggreg)
dfQobs <- SeriesAggreg(data.frame(DatesR = BasinObs$DatesR, Qmm = BasinObs$Qmm),
Format = model$aggreg, ConvertFun = "sum")
Obs <- dfQobs$Qmm
} else {
Obs <- BasinObs$Qmm
}
# calibration period selection
dates <- sapply(dates, function(x) format(as.Date(x), format = date_format))
Ind_WarmUp <- seq(
which(format(InputsModel$DatesR, format = date_format)==dates[1]),
which(format(InputsModel$DatesR, format = date_format)==dates[2])
)
Ind_Run <- seq(
which(format(InputsModel$DatesR, format = date_format)==dates[3]),
which(format(InputsModel$DatesR, format = date_format)==dates[4])
)
# preparation of the RunOptions object
suppressWarnings(
RunOptions <- CreateRunOptions(
FUN_MOD = sModel,
InputsModel = InputsModel,
IndPeriod_Run = Ind_Run,
IndPeriod_WarmUp = Ind_WarmUp,
IsHyst = as.logical(model$IsHyst)
)
)
# calibration criterion: preparation of the InputsCrit object
InputsCrit <- CreateInputsCrit(FUN_CRIT = ErrorCrit_NSE, InputsModel = InputsModel,
RunOptions = RunOptions, Obs = Obs[Ind_Run])
# preparation of CalibOptions object
CalibOptions <- CreateCalibOptions(sModel, IsHyst = as.logical(model$IsHyst))
return(environment())
}
ModelCalibration <- function(model) {
e <- PrepareCalibration(model)
for(n in ls(e, all.names=TRUE)) assign(n, get(n, e))
# calibration
suppressWarnings(OutputsCalib <- Calibration(InputsModel = InputsModel, RunOptions = RunOptions,
InputsCrit = InputsCrit, CalibOptions = CalibOptions,
FUN_MOD = sModel))
OutputsCalib$ParamFinalR
}
#' Create Fake hourly temperature from daily temperatures in L0123001
#'
#' @param start_date [character] start date in format "%Y-%m-%d"
#' @param end_date [character] end date in format "%Y-%m-%d"
#' @return [numeric] hourly temperature time series between `start_date` and `end_date`
fakeHourlyTemp <- function(start_date = "2004-01-01", end_date = "2008-12-31") {
dates <- as.POSIXct(c(start_date, end_date), tz = "UTC")
data(L0123002)
indJ <- seq.int(which(BasinObs$DatesR == as.POSIXct(dates[1])),
which(BasinObs$DatesR == as.POSIXct(dates[2])))
TJ <- BasinObs$T[indJ]
TH <- approx((seq.int(length(TJ)) - 1) * 24,TJ,
seq.int(length(TJ) * 24 ) - 1,
rule = 2)$y
varT_1J <- -sin(0:23/24 * 2 * pi) # Temp min at 6 and max at 18
varT <- rep(varT_1J, length(TJ))
TH <- TH + varT * 5 # For a mean daily amplitude of 10°
TH
}
|
library(VAST)
library(TMB)
library(dplyr)
library(tidyr)
library(reshape)
library(mapdata)
library(ggplot2)
# Directories ----
comp.name <- c("mkapur",'maia kapur')[2]
RootFile <- paste0( "C:/Users/",comp.name ,"/Dropbox/UW/sab-idx/runs/")
DataFile <- paste0( "C:/Users/",comp.name ,"/Dropbox/UW/sab-idx/data/" ) #paste0( RootFile,"Data/")
# Resolution
n_x <- 250 # Number of stations
# Choose species
Species <- "Anoplopoma fimbria"
Species_code <- 'SAB' # switch(Species, "arrowtooth flounder"="arrow", "Pacific ocean perch"="POP")
Surveys_to_include <- c("Triennial", "WCGBTS", "BCs", "BCo",
"BCt", "AK_DOM_LL", "GOA", "EBS")[c(1:4,6:7)] #This will only work for years after 2003
# Date
Date <- Sys.Date()
BaseQ <- c("GOA_late","AK_DOM_late", "AK_DOM_LL","WCGBTS")[3]
Year_Range = c(1980, 2018)
DateFile <- paste0(RootFile,Date,"_nx=",n_x,"_",
paste0(Surveys_to_include, collapse = "_"),
"_baseQ=",BaseQ,
paste0(Year_Range, collapse = "_"),"/")
dir.create(DateFile)
FieldConfig = matrix( c("Omega1"=1, "Epsilon1"=1, "Omega2"=1,
"Epsilon2"=1, "Beta1"="IID", "Beta2"="IID"), nrow=3, byrow=TRUE )
Aniso = FALSE
Version = "VAST_v8_0_0" # get_latest_version( package="VAST" )
OverdispersionConfig = c("Eta1"=0, "Eta2"=0)
ObsModel <- c(2,0) ## gamma for catch, pos only for enctr # 0=normal (log-link); 1=lognormal; 2=gamma; 4=ZANB; 5=ZINB; 11=lognormal-mixture; 12=gamma-mixture
Spatial_Smoother = c("Index", "Smoothed_Index", "Spatiotemporal", "Spatiotemporal_AR")[3]
BC_catchability = c("Separate")[1]
BiasCorr = c(FALSE,TRUE)[1]
Zone = 5
Options = c("Calculate_Range"=TRUE, "Calculate_effective_area"=TRUE,
"SD_site_logdensity"=FALSE)
Use_REML = TRUE
fine_scale = TRUE
create_strata_per_region = TRUE
Method = c("Grid", "Mesh", "Spherical_mesh")[2]
grid_size_km = 25
# Derived - NOTE THAT RhoConfig[1:2] must be 0 when using ObsModel[2]=3: Other options are not coded to work together
if( Spatial_Smoother=="Index" ) RhoConfig = c("Beta1"=0, "Beta2"=0, "Epsilon1"=0, "Epsilon2"=0)
if( Spatial_Smoother=="Smoothed_Index" ) RhoConfig = c("Beta1"=0, "Beta2"=0, "Epsilon1"=2, "Epsilon2"=2)
if( Spatial_Smoother=="Spatiotemporal" ) RhoConfig = c("Beta1"=3, "Beta2"=3, "Epsilon1"=2, "Epsilon2"=2) # Pointwise random walk (epsilon is RW, Beta1 is constant for all years)
if( Spatial_Smoother=="Spatiotemporal_AR" ) RhoConfig = c("Beta1"=0, "Beta2"=0, "Epsilon1"=3, "Epsilon2"=3) # Pointwise autocorrelation (beta is freely estimated in each year)
# Save options for future records
Record <- ThorsonUtilities::bundlelist( c("Version","Method","grid_size_km","n_x","BC_catchability","BaseQ","Use_REML","fine_scale",
"FieldConfig","RhoConfig","OverdispersionConfig", "Year_Range",
"ObsModel","Aniso","fine_scale","Options", "create_strata_per_region") )
save( Record, file=paste0(DateFile,"Record.RData"))
## Create DATA_CPUE ----
## Ensure this roughly matches what is built for VAST WC.
Data_CPUE <- data.frame(stringsAsFactors = FALSE)
if( "WCGBTS" %in% Surveys_to_include ){
## using my data (older)
# WCGBTS <- read.csv( paste0(DataFile,"Catch__NWFSC.Combo_2019-03-15.csv")) #read.csv( paste0(DataFile,"SurveyHaulAndCatchData03To14--HaulFishCatchData03To14.csv"), skip=8, header=TRUE)
# WCGBTS$AreaSwept_km2 <- WCGBTS[,"Area_Swept_ha"]*0.01
# Data1 <- ThorsonUtilities::rename_columns( Data1[,c('Year','Latitude_dd','Longitude_dd','AreaSwept_km2','total_catch_wt_kg',"Vessel")],
# newname=c('Year','Lat','Lon','AreaSwept_km2','Catch_KG',"Vessel"))
## using Kelli's data
load(paste0("C:/Users/", comp.name,"/Dropbox/UW/sab-idx/runs/sabWCVAST/WCGBTS/DatabaseSave.Rdata"))
WCGBTS <- Database #read.csv( paste0(DataFile,"SurveyHaulAndCatchData03To14--HaulFishCatchData03To14.csv"), skip=8, header=TRUE)
Data1 <- WCGBTS %>% select(Year, Lat, Lon, AreaSwept_km2, Catch_KG, Vessel)
Data1$Vessel <- as.character(Data1$Vessel)
Data1$AreaSwept_km2
# Data1 <- WCGBTS[,c('Trawl_id','Year','Latitude_dd','Longitude_dd','AreaSwept_km2','total_catch_wt_kg',"Vessel")]
rm(WCGBTS)
Data1 <- cbind("Survey"="WCGBTS", "Region"="CC", Data1)
Data_CPUE <- rbind( Data_CPUE, Data1 )
rm(Data1)
}
# Load triennial
# Has some problem with multiple replicated samples
## There ARE zeroes here already...
if( "Triennial" %in% Surveys_to_include ){
ThorsonUtilities::LoadFn( paste0(DataFile,"Catch__Triennial_2019-03-15.Rda"))
Data2 <- Out
rm(Out)
Data2 <- cbind(Data2, "AreaSwept_km2"= Data2[,"Area_Swept_ha"]*0.01) #Data2[,'DISTANCE_FISHED']*Data2[,'NET_WIDTH']/1000 )
Data2 <- ThorsonUtilities::rename_columns( Data2[,c('Year','Latitude_dd','Longitude_dd','AreaSwept_km2','total_catch_wt_kg',"Vessel")],
newname=c('Year','Lat','Lon','AreaSwept_km2','Catch_KG',"Vessel"))
## Create timeblocking
Triennial_late <- subset(Data2, Year > 1995) %>% mutate(Survey = 'Triennial_late', Region = "CC")
Triennial_early <- subset(Data2, Year <= 1995) %>% mutate(Survey = 'Triennial_early', Region = "CC")
Data_CPUE = rbind( Data_CPUE, Triennial_late, Triennial_early)
rm(Data2)
}
# Load BC trap survey strs
if( "BCs" %in% Surveys_to_include ){
# Exclude PCOD monitoring survey, which is non-random
# SpeciesCode = switch( Species, "arrowtooth flounder"='ARF_KG', "Pacific ocean perch"='POP_KG' )
BCs <- read.csv(paste0(DataFile,"/BC/BC_sable_survey_data.Aug262019.csv")) %>%
filter(START_LONGITUDE <= 0 & !is.na(CPUE_TRAPS) & !is.na(TOTAL_SABLE_WEIGHT) &
SABLE_SET_TYPE == 'StRS') %>%
## calc area including soak time
mutate(AreaSwept_km2=CPUE_TRAPS*DURATION_MINUTES/10000, ## to put on same scale as others
TRIP_ID2 = paste(SET_YEAR,START_LATITUDE, START_LONGITUDE))
Data3 <- ThorsonUtilities::rename_columns( BCs[,c("TRIP_ID2", 'SABLE_SET_TYPE','SET_YEAR','START_LATITUDE','START_LONGITUDE','AreaSwept_km2',"TOTAL_SABLE_WEIGHT","VESSEL_ID")],
newname=c("TRIP_ID", 'Survey','Year','Lat','Lon','AreaSwept_km2','Catch_KG',"Vessel")) %>%
# complete(Year,nesting(TRIP_ID,Lat,Lon),
# fill = list(Catch_KG = 0.0)) %>%
mutate(Survey = 'BC_StRS', Region = 'BC') %>% select(-TRIP_ID)
Data_CPUE <- rbind( Data_CPUE, Data3 )
rm(Data3); rm(BCs)
}
# Load BC offshore standardized
if( "BCo" %in% Surveys_to_include ){
# Exclude PCOD monitoring survey, which is non-random
# SpeciesCode = switch( Species, "arrowtooth flounder"='ARF_KG', "Pacific ocean perch"='POP_KG' )
BCo <- read.csv(paste0(DataFile,"/BC/BC_sable_survey_data.Aug262019.csv")) %>%
filter(START_LONGITUDE <= 0 & !is.na(CPUE_TRAPS) & !is.na(TOTAL_SABLE_WEIGHT) &
SABLE_SET_TYPE == 'OFFSHORE STANDARDIZED') %>%
## calc area including soak time
mutate(AreaSwept_km2=CPUE_TRAPS*DURATION_MINUTES/10000, ## to put on same scale as others
TRIP_ID2 = paste(SET_YEAR,START_LATITUDE, START_LONGITUDE))
Data3b <- ThorsonUtilities::rename_columns( BCo[,c("TRIP_ID2", 'SABLE_SET_TYPE','SET_YEAR','START_LATITUDE','START_LONGITUDE','AreaSwept_km2',"TOTAL_SABLE_WEIGHT","VESSEL_ID")],
newname=c("TRIP_ID", 'Survey','Year','Lat','Lon','AreaSwept_km2','Catch_KG',"Vessel")) %>%
# complete(Year,nesting(TRIP_ID,Lat,Lon),
# fill = list(Catch_KG = 0.0)) %>%
mutate(Survey = 'BC_OffStd', Region = 'BC') %>% select(-TRIP_ID)
Data_CPUE <- rbind( Data_CPUE, Data3b )
rm(Data3b); rm(BCo)
}
## BC Trawl survey
if( "BCt" %in% Surveys_to_include ){
# Exclude PCOD monitoring survey, which is non-random
# SpeciesCode = switch( Species, "arrowtooth flounder"='ARF_KG', "Pacific ocean perch"='POP_KG' )
BCt <- read.csv(paste0(DataFile,"/BC/BC_trawl_survey_sable_data.Oct312019.csv")) %>%
filter(LONGITUDE <= 0 & !is.na(TOW_LENGTH_M) & !is.na(CATCH_WEIGHT) ) %>%
mutate("AreaSwept_km2"=as.numeric(as.character(TOW_LENGTH_M))/1000)
# BCt <- cbind( BCt, #/1e6) ## to scale effort
# BCt <- BCt[-which(BCt[,'ACTIVITY_DESC']=="HECATE STRAIT PCOD MONITORING TRAWL SURVEY"),]
Data3a <- ThorsonUtilities::rename_columns( BCt[,c("SURVEY_ID", 'SURVEY_DESC','YEAR','LATITUDE','LONGITUDE',
'AreaSwept_km2',"CATCH_WEIGHT","VESSEL_NAME")],
newname=c("TRIP_ID", 'Survey','Year','Lat','Lon','AreaSwept_km2','Catch_KG',"Vessel")) %>%
mutate(Survey = 'BC_TRAWL', Region = 'BC')%>% select(-TRIP_ID)
rm(BCt)
Data_CPUE <- rbind( Data_CPUE, Data3a )
rm(Data3a)
}
if( "AK_DOM_LL" %in% Surveys_to_include ){
# "Your data looks pretty good for the longline survey"
## slow to make the merge; did once and save.
# AK_DOM_LL_Loc <- read.csv( paste0(DataFile,"AK/LLData/catch_summary_view_with_nulls.csv"), header=TRUE,skip = 6) %>%
# filter(Year > 1989)
# ## these have a small fudge factor leading to many dupes. get a mean survey location for each.
# AK_DOM_LL0 <- read.csv( paste0(DataFile,"AK/LLData/SurveyCatchAnalysis.csv"), header = TRUE)
# names(AK_DOM_LL0)[1] <- 'Year'
#
# # ## get station numbers from dom LL
# AK_DOM_LL <- AK_DOM_LL_Loc %>% group_by(Station.Number,Year) %>% dplyr::summarise(meanLat = mean(Start.Latitude..DD.),
# meanLon = mean(Start.Longitude..DD.)) %>%
# select(Station.Number,Year, meanLat, meanLon) %>%
# merge(AK_DOM_LL0,.,
# by = c("Year",'Station.Number'), all.y = TRUE)
# # ## overwrite NA weights to zero
# AK_DOM_LL$Total.Weight..kg.[is.na( AK_DOM_LL$Total.Weight..kg.)] <- 0
# write.csv(AK_DOM_LL, file = paste0(DataFile,"AK/LLData/merged_AK_DOM_LL.csv") )
## manually add vessels
# the domestic LL survey for 1989-1993 was the vessel 'Ocean Prowler',
## starting in 1994-present the 'Alaskan Leader' surveyed even years
## and the 'Ocean Prowler' does odd years.
Data5 <- read.csv(paste0(DataFile,"AK/LLData/merged_AK_DOM_LL.csv") )%>%
mutate(AreaSwept_km2 = 0.01, Vessel = ifelse(Year < 1994, "Ocean Prowler",
ifelse(Year %% 2 == 0,
"Alaskan Leader", "Ocean Prowler" ))) %>%
select(c('Year','meanLat','meanLon','AreaSwept_km2','Total.Weight..kg.','Vessel'))
# Data5 = FishData::add_missing_zeros( data_frame=GOA, Method=ZeroMethod, if_multiple_records=ifelse(ZeroMethod=="Slow",'Error','Combine'),
# unique_sample_ID_colname="TowID", sample_colname="WEIGHT..KG.", species_subset=Species, species_colname="COMMON.NAME" )
Data5 <- ThorsonUtilities::rename_columns( Data5[,c('Year','meanLat','meanLon','AreaSwept_km2','Total.Weight..kg.',"Vessel")],
newname=c('Year','Lat','Lon','AreaSwept_km2','Catch_KG',"Vessel")) %>%
mutate(Survey = 'AK_DOM_LL', Region = 'AK')
Data5$Lon <- ifelse( Data5$Lon > 0, Data5$Lon*-1, Data5$Lon) ## otherwise things in japan
# AK_DOM_late <- subset(Data5, Year > 2009) %>% mutate(Survey = 'AK_DOM_late', "Region" = "AK")
# AK_DOM_early <- subset(Data5, Year <= 2009) %>% mutate(Survey = 'AK_DOM_early', "Region" = "AK")
Data_CPUE <- rbind( Data_CPUE, Data5) #AK_DOM_early, AK_DOM_late )
# Data_CPUE <- rbind( Data_CPUE, AK_DOM_early, AK_DOM_late )
rm(Data5)
}
# Load GOA trawl -- these already have zeros
if( "GOA" %in% Surveys_to_include ){
## DH indicated to use <700m and drop 1984, 1987 and split at 1993
ALL_GOA <- read.csv( paste0(DataFile,"AK/race_cpue_by_haul.csv"), header=TRUE ) %>%
filter( Gear.Depth <= 500 & !(Year %in% c(1984,1987)) )
names(ALL_GOA) <- toupper(names(ALL_GOA))
GOA <- ALL_GOA %>%
filter(SURVEY == 'GOA') %>%
cbind( ., "Vessel" = as.factor(.$VESSEL.NUMBER),
"TowID"=paste(.[,'YEAR'],.[,'STARTING.LATITUDE..DD.'],.[,'STARTING.LONGITUDE..DD.'],sep="_"),
"AreaSwept_km2"=0.01) #0.01
AI <- ALL_GOA %>%
filter(SURVEY == 'AI') %>%
cbind( ., "Vessel" = as.factor(.$VESSEL.NUMBER),
"TowID"=paste(.[,'YEAR'],.[,'STARTING.LATITUDE..DD.'],.[,'STARTING.LONGITUDE..DD.'],sep="_"),
"AreaSwept_km2"=0.01) #0.01
Data4g <- GOA %>% select(c("YEAR","STARTING.LATITUDE..DD.","STARTING.LONGITUDE..DD.","AreaSwept_km2","WEIGHT..KG.","Vessel"))
Data4a <- AI %>% select(c("YEAR","STARTING.LATITUDE..DD.","STARTING.LONGITUDE..DD.","AreaSwept_km2","WEIGHT..KG.","Vessel"))
rm(GOA); rm(AI)
Data4g <- ThorsonUtilities::rename_columns( Data4g[,c('YEAR','STARTING.LATITUDE..DD.','STARTING.LONGITUDE..DD.','AreaSwept_km2','WEIGHT..KG.',"Vessel")],
newname=c('Year','Lat','Lon','AreaSwept_km2','Catch_KG','Vessel'))
Data4a <- ThorsonUtilities::rename_columns( Data4a[,c('YEAR','STARTING.LATITUDE..DD.','STARTING.LONGITUDE..DD.','AreaSwept_km2','WEIGHT..KG.',"Vessel")],
newname=c('Year','Lat','Lon','AreaSwept_km2','Catch_KG','Vessel'))
Data4a$Lon <- ifelse( Data4a$Lon > 0, Data4a$Lon*-1, Data4a$Lon) ## otherwise things in japan
Data4g$Lon <- ifelse( Data4g$Lon > 0, Data4g$Lon*-1, Data4g$Lon) ## otherwise things in japan
## Create timeblocking -- 2010 was a growth morph year break
# GOA_mid <- subset(Data4, Year > 1993 & Year < 2010) %>% mutate(Survey = 'GOA_mid', Region = "GOA")
GOA_late <- subset(Data4g, Year > 1993 ) %>% mutate(Survey = 'GOA_late', Region = "GOA")
GOA_early <- subset(Data4g, Year <= 1993 ) %>% mutate(Survey = 'GOA_early', Region = "GOA")
AI_late <- subset(Data4a, Year > 1993 ) %>% mutate(Survey = 'AI_late', Region = "AI")
AI_early <- subset(Data4a, Year <= 1993 ) %>% mutate(Survey = 'AI_early', Region = "AI")
Data_CPUE <- rbind( Data_CPUE, GOA_early, GOA_late, AI_late, AI_early)
rm(Data4g);rm(Data4a); rm(GOA_late); rm(GOA_early);rm(AI_late); rm(AI_early)
}
Data_Geostat <- Data_CPUE[which(Data_CPUE$Year>=Year_Range[1] & Data_CPUE$Year<=Year_Range[2]),]
## RESCALE THE DATA SO IT IS NOT HUGE
Data_Geostat$Catch_KG <- Data_Geostat$Catch_KG/1000 #(Data_Geostat$Catch_KG - mean(Data_Geostat$Catch_KG))/sd(Data_Geostat$Catch_KG)
Data_Geostat <- na.omit( Data_Geostat )
save(Data_Geostat, file = paste0(DateFile,"/Data_Geostat.Rdata"))
Data_Geostat %>% group_by(Survey) %>% dplyr::summarise(min(Year))
Data_Geostat %>% group_by(Survey) %>% dplyr::summarise(min(Catch_KG), max(Catch_KG)) ## should be 0 to pos
Data_Geostat %>% group_by(Survey) %>% dplyr::summarise(min(AreaSwept_km2), max(AreaSwept_km2)) ## should be positive
Data_Geostat %>% group_by(Survey) %>% dplyr::summarise(min(Year)) ## should be > Year_Range[1]
Region <- NULL
## This is Thorson's -- Kelli had a way of pre-subsetting to have N/S embedded
if( TRUE ){
if(any(c("WCGBTS","Triennial") %in% Surveys_to_include)) Region = c( Region, "California_current")
if("BCs" %in% Surveys_to_include | "BCt" %in% Surveys_to_include) Region = c( Region, "British_Columbia" )
if("GOA" %in% Surveys_to_include) Region = c( Region, "Gulf_of_Alaska" )
if("EBS" %in% Surveys_to_include) Region = c( Region, "Eastern_Bering_Sea" )
if("AK_DOM_LL" %in% Surveys_to_include) Region = c( Region, "Gulf_of_Alaska", "Eastern_Bering_Sea" )
Extrapolation_List <- make_extrapolation_info( Region=Region, strata_to_use=c('SOG','WCVI','QCS','HS','WCHG'),
zone=Zone, create_strata_per_region=create_strata_per_region )
}else{
if(any(c("WCGBTS","Triennial") %in% Surveys_to_include)) Region = c( Region, "California_current")
if("BCs" %in% Surveys_to_include | "BCt" %in% Surveys_to_include) Region = c( Region, "British_Columbia" )
if("GOA" %in% Surveys_to_include) Region = c( Region, "Gulf_of_Alaska" )
if("EBS" %in% Surveys_to_include) Region = c( Region, "Eastern_Bering_Sea" )
if("AK_DOM_LL" %in% Surveys_to_include) Region = c( Region, "Gulf_of_Alaska", "Eastern_Bering_Sea" )
observations_LL <- Data_Geostat[ which(Data_Geostat[,'Region']=="BC"), c('Lat','Lon') ]
Extrapolation_List <- make_extrapolation_info( Region=Region,
observations_LL=observations_LL, zone=Zone, create_strata_per_region=create_strata_per_region )
}
## Make spatial list ----
Spatial_List <- make_spatial_info( n_x=n_x, Lon=Data_Geostat[,'Lon'],
Lat=Data_Geostat[,'Lat'],
Extrapolation_List=Extrapolation_List,
DirPath=DateFile, Save_Results=FALSE,
"knot_method"="grid", refine=FALSE,
fine_scale=fine_scale )
# Plot details
MapDetails_List <- make_map_info( "Region"="Other",
"spatial_list"=Spatial_List,
"Extrapolation_List"=Extrapolation_List )
Year_Set <- min(Data_Geostat[,'Year']):max(Data_Geostat[,'Year'])
# Exclude surveys without any encounters
EncNum_k <- tapply( Data_Geostat[,'Catch_KG'], INDEX=Data_Geostat[,'Survey'],
FUN=function(vec){sum(vec>0)} )
if( any(EncNum_k==0) ){
Which = which( EncNum_k==0 )
Which2Remove = which( Data_Geostat[,'Survey'] %in% names(Which) )
Data_Geostat = Data_Geostat[-Which2Remove,]
Spatial_List$loc_i = Spatial_List$loc_i[-Which2Remove,]
Spatial_List$knot_i = Spatial_List$knot_i[-Which2Remove]
Data_Geostat[,'Survey'] = droplevels( Data_Geostat[,'Survey'] )
}
# Make catchability matrix (Q_i) ----
# The resulting Q_ik will have n-1 columns, with baseQ excluded.
if( length(unique(Data_Geostat[,'Survey']))==1 |
length(unique(Data_Geostat[,'Region'])) == 1){
Q_ik <- matrix(0, ncol=1, nrow=nrow(Data_Geostat))
}else{
Q_ik <- ThorsonUtilities::vector_to_design_matrix( Data_Geostat[,'Survey'] )
if( !(BaseQ %in% colnames(Q_ik)) ) stop("Problem with Q_ik")
Q_ik <- Q_ik[,-which(colnames(Q_ik)==BaseQ),drop=FALSE]
}
head(Q_ik) ## should have ncol == fleets-1
# Plot location of data
png(paste0(DateFile,"/Extrapolation_List.png"), width = 8, height = 6, units = 'in', res = 520)
plot( Extrapolation_List )
dev.off()
png(paste0(DateFile,"/Spatial_List.png"), width = 8, height = 6, units = 'in', res = 520)
plot( Spatial_List )
dev.off()
## from CC version
TmbData <- VAST::make_data(
#"X_itp"=X_itp,
#"X_gtp"=X_gtp,
#"Xconfig_zcp"=Xconfig_zcp,
"Version"=Version,
"Aniso"=Aniso,
"FieldConfig"=FieldConfig,
"OverdispersionConfig" = c("Eta1"=0, "Eta2"=0),
"RhoConfig"=RhoConfig,
"ObsModel"= ObsModel,
"c_i"=rep(0,nrow(Data_Geostat)),
"b_i"=Data_Geostat[,'Catch_KG'],
"a_i"=Data_Geostat[,'AreaSwept_km2'],
"v_i"= as.numeric(as.factor(Data_Geostat[,'Vessel'])),#-1,
"t_i"= Data_Geostat[,'Year'],
"Q_ik" = Q_ik,
"spatial_list"=Spatial_List,
"Options"=Options )
save(TmbData, file = paste0(DateFile,"/TmbData.Rdata"))
# Make TMB object
TmbList <- make_model("build_model"=TRUE, "TmbData"=TmbData, "RunDir"=DateFile,
"Version"=Version, "RhoConfig"=RhoConfig,
"loc_x"=Spatial_List$loc_x, "Method"=Method, "TmbDir"=getwd())
save(TmbList, file = paste0(DateFile,"/TmbList.Rdata"))
# Run model ----
Obj <- TmbList[["Obj"]]
Obj$par['lambda2_k'] ## should not be NA UNLESS length(fleet) == 1/QIK = 1
Obj$par['lambda1_k']
# Obj$par['gamma1_k']
Opt <- TMBhelper::fit_tmb(
obj = Obj,
lower = TmbList[["Lower"]],
upper = TmbList[["Upper"]],
newtonsteps = 1,
getsd = TRUE,
bias.correct = TRUE,
bias.correct.control = list(vars_to_correct = "Index_cyl"),
savedir = DateFile
) # , rel.tol=1e-20
Report <- TmbList$Obj$report()
ParHat <- TmbList$Obj$env$parList()
# Save stuff [NOTE OBJ IS INSIDE SAVE]
Save <- list("Opt"=Opt,
"Report"=Report,
"ParHat"=TmbList$Obj$env$parList(Opt$par),
'Obj' = Obj)
save(Save, file=paste0(DateFile,"Save_original.RData"))
plot_data( Extrapolation_List=Extrapolation_List, Spatial_List=Spatial_List,
Data_Geostat=Data_Geostat, PlotDir=DateFile,
Plot1_name="Data_and_knots.png", Plot2_name="Data_by_year.png", col="red")
# Plot index
Index <- plot_biomass_index( DirName=DateFile,
TmbData=TmbData,
use_biascorr = BiasCorr,
Sdreport=Opt$SD,
Year_Set=Year_Set,
strata_names=c('AllAreas',Region),
plot_log=TRUE, width=6, height=6 ) # , total_area_km2=sum(a_xl[,1])
# load(paste0(DateFile,"Save_original.Rdata"))
Opt <- Save$Opt
Report <- Save$Report
plot_range_index( Sdreport=Opt$SD, Report=Report, Year_Set=Year_Set, TmbData=TmbData,
Znames=colnames(TmbData$Z_xm), PlotDir=DateFile )
# Plot Anisotropy
plot_anisotropy( FileName=paste0(DateFile,"Aniso.png"), Report=Report )
# Plot encounter rate diagnostics
# plot_quantile_diagnostic( Report=Report, TmbData=TmbData, DateFile=DateFile)
# Positive catch rate diagnostics
Q <- plot_quantile_diagnostic( TmbData=TmbData, Report=Report, DateFile=DateFile ) # SpatialDeltaGLMM::
# Pearson residuals diagnostics
plot_residuals( Lat_i=Data_Geostat[,'Lat'], Lon_i=Data_Geostat[,'Lon'],
extrapolation_list = Extrapolation_List,
TmbData=TmbData, Report=Report, Q=Q, savedir=DateFile, spatial_list=Spatial_List )
# Plot density
plot_maps( plot_set=3, Report=Report, PlotDF=MapDetails_List[["PlotDF"]],
working_dir=DateFile, Year_Set=Year_Set )
## plot easting-northing shifts
#To plot effective area occupied, please re-run with Options['Calculate_effective_area']=1
plot_range_index( Sdreport=Save$Opt$SD, Report=Save$Report, Year_Set=Year_Set,
TmbData=TmbData, Znames=colnames(TmbData$Z_xm),
PlotDir=DateFile
)
# source("https://raw.githubusercontent.com/nwfsc-assess/VASTWestCoast/2473eb0ca2c25aa780e39ff1a94e7252d0d335bc/R/summary_nwfsc.R")
source("./R/summary_nwfscMK.r")
TableC <- summary_nwfscMK(obj = Save$Obj,
sdreport = Save$Opt$SD,
savedir = DateFile)[[3]]
TableC %>% data.frame() %>%
exp() %>% round(.,2) %>%
mutate('PAR'=row.names(TableC)) %>%
write.csv(.,file = paste0(DateFile,'tableC_mod.csv'))
|
/R/deprecated_code/draft3_v1.R
|
no_license
|
mkapur/sab-idx
|
R
| false
| false
| 22,573
|
r
|
library(VAST)
library(TMB)
library(dplyr)
library(tidyr)
library(reshape)
library(mapdata)
library(ggplot2)
# Directories ----
comp.name <- c("mkapur",'maia kapur')[2]
RootFile <- paste0( "C:/Users/",comp.name ,"/Dropbox/UW/sab-idx/runs/")
DataFile <- paste0( "C:/Users/",comp.name ,"/Dropbox/UW/sab-idx/data/" ) #paste0( RootFile,"Data/")
# Resolution
n_x <- 250 # Number of stations
# Choose species
Species <- "Anoplopoma fimbria"
Species_code <- 'SAB' # switch(Species, "arrowtooth flounder"="arrow", "Pacific ocean perch"="POP")
Surveys_to_include <- c("Triennial", "WCGBTS", "BCs", "BCo",
"BCt", "AK_DOM_LL", "GOA", "EBS")[c(1:4,6:7)] #This will only work for years after 2003
# Date
Date <- Sys.Date()
BaseQ <- c("GOA_late","AK_DOM_late", "AK_DOM_LL","WCGBTS")[3]
Year_Range = c(1980, 2018)
DateFile <- paste0(RootFile,Date,"_nx=",n_x,"_",
paste0(Surveys_to_include, collapse = "_"),
"_baseQ=",BaseQ,
paste0(Year_Range, collapse = "_"),"/")
dir.create(DateFile)
FieldConfig = matrix( c("Omega1"=1, "Epsilon1"=1, "Omega2"=1,
"Epsilon2"=1, "Beta1"="IID", "Beta2"="IID"), nrow=3, byrow=TRUE )
Aniso = FALSE
Version = "VAST_v8_0_0" # get_latest_version( package="VAST" )
OverdispersionConfig = c("Eta1"=0, "Eta2"=0)
ObsModel <- c(2,0) ## gamma for catch, pos only for enctr # 0=normal (log-link); 1=lognormal; 2=gamma; 4=ZANB; 5=ZINB; 11=lognormal-mixture; 12=gamma-mixture
Spatial_Smoother = c("Index", "Smoothed_Index", "Spatiotemporal", "Spatiotemporal_AR")[3]
BC_catchability = c("Separate")[1]
BiasCorr = c(FALSE,TRUE)[1]
Zone = 5
Options = c("Calculate_Range"=TRUE, "Calculate_effective_area"=TRUE,
"SD_site_logdensity"=FALSE)
Use_REML = TRUE
fine_scale = TRUE
create_strata_per_region = TRUE
Method = c("Grid", "Mesh", "Spherical_mesh")[2]
grid_size_km = 25
# Derived - NOTE THAT RhoConfig[1:2] must be 0 when using ObsModel[2]=3: Other options are not coded to work together
if( Spatial_Smoother=="Index" ) RhoConfig = c("Beta1"=0, "Beta2"=0, "Epsilon1"=0, "Epsilon2"=0)
if( Spatial_Smoother=="Smoothed_Index" ) RhoConfig = c("Beta1"=0, "Beta2"=0, "Epsilon1"=2, "Epsilon2"=2)
if( Spatial_Smoother=="Spatiotemporal" ) RhoConfig = c("Beta1"=3, "Beta2"=3, "Epsilon1"=2, "Epsilon2"=2) # Pointwise random walk (epsilon is RW, Beta1 is constant for all years)
if( Spatial_Smoother=="Spatiotemporal_AR" ) RhoConfig = c("Beta1"=0, "Beta2"=0, "Epsilon1"=3, "Epsilon2"=3) # Pointwise autocorrelation (beta is freely estimated in each year)
# Save options for future records
Record <- ThorsonUtilities::bundlelist( c("Version","Method","grid_size_km","n_x","BC_catchability","BaseQ","Use_REML","fine_scale",
"FieldConfig","RhoConfig","OverdispersionConfig", "Year_Range",
"ObsModel","Aniso","fine_scale","Options", "create_strata_per_region") )
save( Record, file=paste0(DateFile,"Record.RData"))
## Create DATA_CPUE ----
## Ensure this roughly matches what is built for VAST WC.
Data_CPUE <- data.frame(stringsAsFactors = FALSE)
if( "WCGBTS" %in% Surveys_to_include ){
## using my data (older)
# WCGBTS <- read.csv( paste0(DataFile,"Catch__NWFSC.Combo_2019-03-15.csv")) #read.csv( paste0(DataFile,"SurveyHaulAndCatchData03To14--HaulFishCatchData03To14.csv"), skip=8, header=TRUE)
# WCGBTS$AreaSwept_km2 <- WCGBTS[,"Area_Swept_ha"]*0.01
# Data1 <- ThorsonUtilities::rename_columns( Data1[,c('Year','Latitude_dd','Longitude_dd','AreaSwept_km2','total_catch_wt_kg',"Vessel")],
# newname=c('Year','Lat','Lon','AreaSwept_km2','Catch_KG',"Vessel"))
## using Kelli's data
load(paste0("C:/Users/", comp.name,"/Dropbox/UW/sab-idx/runs/sabWCVAST/WCGBTS/DatabaseSave.Rdata"))
WCGBTS <- Database #read.csv( paste0(DataFile,"SurveyHaulAndCatchData03To14--HaulFishCatchData03To14.csv"), skip=8, header=TRUE)
Data1 <- WCGBTS %>% select(Year, Lat, Lon, AreaSwept_km2, Catch_KG, Vessel)
Data1$Vessel <- as.character(Data1$Vessel)
Data1$AreaSwept_km2
# Data1 <- WCGBTS[,c('Trawl_id','Year','Latitude_dd','Longitude_dd','AreaSwept_km2','total_catch_wt_kg',"Vessel")]
rm(WCGBTS)
Data1 <- cbind("Survey"="WCGBTS", "Region"="CC", Data1)
Data_CPUE <- rbind( Data_CPUE, Data1 )
rm(Data1)
}
# Load triennial
# Has some problem with multiple replicated samples
## There ARE zeroes here already...
if( "Triennial" %in% Surveys_to_include ){
ThorsonUtilities::LoadFn( paste0(DataFile,"Catch__Triennial_2019-03-15.Rda"))
Data2 <- Out
rm(Out)
Data2 <- cbind(Data2, "AreaSwept_km2"= Data2[,"Area_Swept_ha"]*0.01) #Data2[,'DISTANCE_FISHED']*Data2[,'NET_WIDTH']/1000 )
Data2 <- ThorsonUtilities::rename_columns( Data2[,c('Year','Latitude_dd','Longitude_dd','AreaSwept_km2','total_catch_wt_kg',"Vessel")],
newname=c('Year','Lat','Lon','AreaSwept_km2','Catch_KG',"Vessel"))
## Create timeblocking
Triennial_late <- subset(Data2, Year > 1995) %>% mutate(Survey = 'Triennial_late', Region = "CC")
Triennial_early <- subset(Data2, Year <= 1995) %>% mutate(Survey = 'Triennial_early', Region = "CC")
Data_CPUE = rbind( Data_CPUE, Triennial_late, Triennial_early)
rm(Data2)
}
# Load BC trap survey strs
if( "BCs" %in% Surveys_to_include ){
# Exclude PCOD monitoring survey, which is non-random
# SpeciesCode = switch( Species, "arrowtooth flounder"='ARF_KG', "Pacific ocean perch"='POP_KG' )
BCs <- read.csv(paste0(DataFile,"/BC/BC_sable_survey_data.Aug262019.csv")) %>%
filter(START_LONGITUDE <= 0 & !is.na(CPUE_TRAPS) & !is.na(TOTAL_SABLE_WEIGHT) &
SABLE_SET_TYPE == 'StRS') %>%
## calc area including soak time
mutate(AreaSwept_km2=CPUE_TRAPS*DURATION_MINUTES/10000, ## to put on same scale as others
TRIP_ID2 = paste(SET_YEAR,START_LATITUDE, START_LONGITUDE))
Data3 <- ThorsonUtilities::rename_columns( BCs[,c("TRIP_ID2", 'SABLE_SET_TYPE','SET_YEAR','START_LATITUDE','START_LONGITUDE','AreaSwept_km2',"TOTAL_SABLE_WEIGHT","VESSEL_ID")],
newname=c("TRIP_ID", 'Survey','Year','Lat','Lon','AreaSwept_km2','Catch_KG',"Vessel")) %>%
# complete(Year,nesting(TRIP_ID,Lat,Lon),
# fill = list(Catch_KG = 0.0)) %>%
mutate(Survey = 'BC_StRS', Region = 'BC') %>% select(-TRIP_ID)
Data_CPUE <- rbind( Data_CPUE, Data3 )
rm(Data3); rm(BCs)
}
# Load BC offshore standardized
if( "BCo" %in% Surveys_to_include ){
# Exclude PCOD monitoring survey, which is non-random
# SpeciesCode = switch( Species, "arrowtooth flounder"='ARF_KG', "Pacific ocean perch"='POP_KG' )
BCo <- read.csv(paste0(DataFile,"/BC/BC_sable_survey_data.Aug262019.csv")) %>%
filter(START_LONGITUDE <= 0 & !is.na(CPUE_TRAPS) & !is.na(TOTAL_SABLE_WEIGHT) &
SABLE_SET_TYPE == 'OFFSHORE STANDARDIZED') %>%
## calc area including soak time
mutate(AreaSwept_km2=CPUE_TRAPS*DURATION_MINUTES/10000, ## to put on same scale as others
TRIP_ID2 = paste(SET_YEAR,START_LATITUDE, START_LONGITUDE))
Data3b <- ThorsonUtilities::rename_columns( BCo[,c("TRIP_ID2", 'SABLE_SET_TYPE','SET_YEAR','START_LATITUDE','START_LONGITUDE','AreaSwept_km2',"TOTAL_SABLE_WEIGHT","VESSEL_ID")],
newname=c("TRIP_ID", 'Survey','Year','Lat','Lon','AreaSwept_km2','Catch_KG',"Vessel")) %>%
# complete(Year,nesting(TRIP_ID,Lat,Lon),
# fill = list(Catch_KG = 0.0)) %>%
mutate(Survey = 'BC_OffStd', Region = 'BC') %>% select(-TRIP_ID)
Data_CPUE <- rbind( Data_CPUE, Data3b )
rm(Data3b); rm(BCo)
}
## BC Trawl survey
if( "BCt" %in% Surveys_to_include ){
# Exclude PCOD monitoring survey, which is non-random
# SpeciesCode = switch( Species, "arrowtooth flounder"='ARF_KG', "Pacific ocean perch"='POP_KG' )
BCt <- read.csv(paste0(DataFile,"/BC/BC_trawl_survey_sable_data.Oct312019.csv")) %>%
filter(LONGITUDE <= 0 & !is.na(TOW_LENGTH_M) & !is.na(CATCH_WEIGHT) ) %>%
mutate("AreaSwept_km2"=as.numeric(as.character(TOW_LENGTH_M))/1000)
# BCt <- cbind( BCt, #/1e6) ## to scale effort
# BCt <- BCt[-which(BCt[,'ACTIVITY_DESC']=="HECATE STRAIT PCOD MONITORING TRAWL SURVEY"),]
Data3a <- ThorsonUtilities::rename_columns( BCt[,c("SURVEY_ID", 'SURVEY_DESC','YEAR','LATITUDE','LONGITUDE',
'AreaSwept_km2',"CATCH_WEIGHT","VESSEL_NAME")],
newname=c("TRIP_ID", 'Survey','Year','Lat','Lon','AreaSwept_km2','Catch_KG',"Vessel")) %>%
mutate(Survey = 'BC_TRAWL', Region = 'BC')%>% select(-TRIP_ID)
rm(BCt)
Data_CPUE <- rbind( Data_CPUE, Data3a )
rm(Data3a)
}
if( "AK_DOM_LL" %in% Surveys_to_include ){
# "Your data looks pretty good for the longline survey"
## slow to make the merge; did once and save.
# AK_DOM_LL_Loc <- read.csv( paste0(DataFile,"AK/LLData/catch_summary_view_with_nulls.csv"), header=TRUE,skip = 6) %>%
# filter(Year > 1989)
# ## these have a small fudge factor leading to many dupes. get a mean survey location for each.
# AK_DOM_LL0 <- read.csv( paste0(DataFile,"AK/LLData/SurveyCatchAnalysis.csv"), header = TRUE)
# names(AK_DOM_LL0)[1] <- 'Year'
#
# # ## get station numbers from dom LL
# AK_DOM_LL <- AK_DOM_LL_Loc %>% group_by(Station.Number,Year) %>% dplyr::summarise(meanLat = mean(Start.Latitude..DD.),
# meanLon = mean(Start.Longitude..DD.)) %>%
# select(Station.Number,Year, meanLat, meanLon) %>%
# merge(AK_DOM_LL0,.,
# by = c("Year",'Station.Number'), all.y = TRUE)
# # ## overwrite NA weights to zero
# AK_DOM_LL$Total.Weight..kg.[is.na( AK_DOM_LL$Total.Weight..kg.)] <- 0
# write.csv(AK_DOM_LL, file = paste0(DataFile,"AK/LLData/merged_AK_DOM_LL.csv") )
## manually add vessels
# the domestic LL survey for 1989-1993 was the vessel 'Ocean Prowler',
## starting in 1994-present the 'Alaskan Leader' surveyed even years
## and the 'Ocean Prowler' does odd years.
Data5 <- read.csv(paste0(DataFile,"AK/LLData/merged_AK_DOM_LL.csv") )%>%
mutate(AreaSwept_km2 = 0.01, Vessel = ifelse(Year < 1994, "Ocean Prowler",
ifelse(Year %% 2 == 0,
"Alaskan Leader", "Ocean Prowler" ))) %>%
select(c('Year','meanLat','meanLon','AreaSwept_km2','Total.Weight..kg.','Vessel'))
# Data5 = FishData::add_missing_zeros( data_frame=GOA, Method=ZeroMethod, if_multiple_records=ifelse(ZeroMethod=="Slow",'Error','Combine'),
# unique_sample_ID_colname="TowID", sample_colname="WEIGHT..KG.", species_subset=Species, species_colname="COMMON.NAME" )
Data5 <- ThorsonUtilities::rename_columns( Data5[,c('Year','meanLat','meanLon','AreaSwept_km2','Total.Weight..kg.',"Vessel")],
newname=c('Year','Lat','Lon','AreaSwept_km2','Catch_KG',"Vessel")) %>%
mutate(Survey = 'AK_DOM_LL', Region = 'AK')
Data5$Lon <- ifelse( Data5$Lon > 0, Data5$Lon*-1, Data5$Lon) ## otherwise things in japan
# AK_DOM_late <- subset(Data5, Year > 2009) %>% mutate(Survey = 'AK_DOM_late', "Region" = "AK")
# AK_DOM_early <- subset(Data5, Year <= 2009) %>% mutate(Survey = 'AK_DOM_early', "Region" = "AK")
Data_CPUE <- rbind( Data_CPUE, Data5) #AK_DOM_early, AK_DOM_late )
# Data_CPUE <- rbind( Data_CPUE, AK_DOM_early, AK_DOM_late )
rm(Data5)
}
# Load GOA trawl -- these already have zeros
if( "GOA" %in% Surveys_to_include ){
## DH indicated to use <700m and drop 1984, 1987 and split at 1993
ALL_GOA <- read.csv( paste0(DataFile,"AK/race_cpue_by_haul.csv"), header=TRUE ) %>%
filter( Gear.Depth <= 500 & !(Year %in% c(1984,1987)) )
names(ALL_GOA) <- toupper(names(ALL_GOA))
GOA <- ALL_GOA %>%
filter(SURVEY == 'GOA') %>%
cbind( ., "Vessel" = as.factor(.$VESSEL.NUMBER),
"TowID"=paste(.[,'YEAR'],.[,'STARTING.LATITUDE..DD.'],.[,'STARTING.LONGITUDE..DD.'],sep="_"),
"AreaSwept_km2"=0.01) #0.01
AI <- ALL_GOA %>%
filter(SURVEY == 'AI') %>%
cbind( ., "Vessel" = as.factor(.$VESSEL.NUMBER),
"TowID"=paste(.[,'YEAR'],.[,'STARTING.LATITUDE..DD.'],.[,'STARTING.LONGITUDE..DD.'],sep="_"),
"AreaSwept_km2"=0.01) #0.01
Data4g <- GOA %>% select(c("YEAR","STARTING.LATITUDE..DD.","STARTING.LONGITUDE..DD.","AreaSwept_km2","WEIGHT..KG.","Vessel"))
Data4a <- AI %>% select(c("YEAR","STARTING.LATITUDE..DD.","STARTING.LONGITUDE..DD.","AreaSwept_km2","WEIGHT..KG.","Vessel"))
rm(GOA); rm(AI)
Data4g <- ThorsonUtilities::rename_columns( Data4g[,c('YEAR','STARTING.LATITUDE..DD.','STARTING.LONGITUDE..DD.','AreaSwept_km2','WEIGHT..KG.',"Vessel")],
newname=c('Year','Lat','Lon','AreaSwept_km2','Catch_KG','Vessel'))
Data4a <- ThorsonUtilities::rename_columns( Data4a[,c('YEAR','STARTING.LATITUDE..DD.','STARTING.LONGITUDE..DD.','AreaSwept_km2','WEIGHT..KG.',"Vessel")],
newname=c('Year','Lat','Lon','AreaSwept_km2','Catch_KG','Vessel'))
Data4a$Lon <- ifelse( Data4a$Lon > 0, Data4a$Lon*-1, Data4a$Lon) ## otherwise things in japan
Data4g$Lon <- ifelse( Data4g$Lon > 0, Data4g$Lon*-1, Data4g$Lon) ## otherwise things in japan
## Create timeblocking -- 2010 was a growth morph year break
# GOA_mid <- subset(Data4, Year > 1993 & Year < 2010) %>% mutate(Survey = 'GOA_mid', Region = "GOA")
GOA_late <- subset(Data4g, Year > 1993 ) %>% mutate(Survey = 'GOA_late', Region = "GOA")
GOA_early <- subset(Data4g, Year <= 1993 ) %>% mutate(Survey = 'GOA_early', Region = "GOA")
AI_late <- subset(Data4a, Year > 1993 ) %>% mutate(Survey = 'AI_late', Region = "AI")
AI_early <- subset(Data4a, Year <= 1993 ) %>% mutate(Survey = 'AI_early', Region = "AI")
Data_CPUE <- rbind( Data_CPUE, GOA_early, GOA_late, AI_late, AI_early)
rm(Data4g);rm(Data4a); rm(GOA_late); rm(GOA_early);rm(AI_late); rm(AI_early)
}
Data_Geostat <- Data_CPUE[which(Data_CPUE$Year>=Year_Range[1] & Data_CPUE$Year<=Year_Range[2]),]
## RESCALE THE DATA SO IT IS NOT HUGE
Data_Geostat$Catch_KG <- Data_Geostat$Catch_KG/1000 #(Data_Geostat$Catch_KG - mean(Data_Geostat$Catch_KG))/sd(Data_Geostat$Catch_KG)
Data_Geostat <- na.omit( Data_Geostat )
save(Data_Geostat, file = paste0(DateFile,"/Data_Geostat.Rdata"))
Data_Geostat %>% group_by(Survey) %>% dplyr::summarise(min(Year))
Data_Geostat %>% group_by(Survey) %>% dplyr::summarise(min(Catch_KG), max(Catch_KG)) ## should be 0 to pos
Data_Geostat %>% group_by(Survey) %>% dplyr::summarise(min(AreaSwept_km2), max(AreaSwept_km2)) ## should be positive
Data_Geostat %>% group_by(Survey) %>% dplyr::summarise(min(Year)) ## should be > Year_Range[1]
Region <- NULL
## This is Thorson's -- Kelli had a way of pre-subsetting to have N/S embedded
if( TRUE ){
if(any(c("WCGBTS","Triennial") %in% Surveys_to_include)) Region = c( Region, "California_current")
if("BCs" %in% Surveys_to_include | "BCt" %in% Surveys_to_include) Region = c( Region, "British_Columbia" )
if("GOA" %in% Surveys_to_include) Region = c( Region, "Gulf_of_Alaska" )
if("EBS" %in% Surveys_to_include) Region = c( Region, "Eastern_Bering_Sea" )
if("AK_DOM_LL" %in% Surveys_to_include) Region = c( Region, "Gulf_of_Alaska", "Eastern_Bering_Sea" )
Extrapolation_List <- make_extrapolation_info( Region=Region, strata_to_use=c('SOG','WCVI','QCS','HS','WCHG'),
zone=Zone, create_strata_per_region=create_strata_per_region )
}else{
if(any(c("WCGBTS","Triennial") %in% Surveys_to_include)) Region = c( Region, "California_current")
if("BCs" %in% Surveys_to_include | "BCt" %in% Surveys_to_include) Region = c( Region, "British_Columbia" )
if("GOA" %in% Surveys_to_include) Region = c( Region, "Gulf_of_Alaska" )
if("EBS" %in% Surveys_to_include) Region = c( Region, "Eastern_Bering_Sea" )
if("AK_DOM_LL" %in% Surveys_to_include) Region = c( Region, "Gulf_of_Alaska", "Eastern_Bering_Sea" )
observations_LL <- Data_Geostat[ which(Data_Geostat[,'Region']=="BC"), c('Lat','Lon') ]
Extrapolation_List <- make_extrapolation_info( Region=Region,
observations_LL=observations_LL, zone=Zone, create_strata_per_region=create_strata_per_region )
}
## Make spatial list ----
Spatial_List <- make_spatial_info( n_x=n_x, Lon=Data_Geostat[,'Lon'],
Lat=Data_Geostat[,'Lat'],
Extrapolation_List=Extrapolation_List,
DirPath=DateFile, Save_Results=FALSE,
"knot_method"="grid", refine=FALSE,
fine_scale=fine_scale )
# Plot details
MapDetails_List <- make_map_info( "Region"="Other",
"spatial_list"=Spatial_List,
"Extrapolation_List"=Extrapolation_List )
Year_Set <- min(Data_Geostat[,'Year']):max(Data_Geostat[,'Year'])
# Exclude surveys without any encounters
EncNum_k <- tapply( Data_Geostat[,'Catch_KG'], INDEX=Data_Geostat[,'Survey'],
FUN=function(vec){sum(vec>0)} )
if( any(EncNum_k==0) ){
Which = which( EncNum_k==0 )
Which2Remove = which( Data_Geostat[,'Survey'] %in% names(Which) )
Data_Geostat = Data_Geostat[-Which2Remove,]
Spatial_List$loc_i = Spatial_List$loc_i[-Which2Remove,]
Spatial_List$knot_i = Spatial_List$knot_i[-Which2Remove]
Data_Geostat[,'Survey'] = droplevels( Data_Geostat[,'Survey'] )
}
# Make catchability matrix (Q_i) ----
# The resulting Q_ik will have n-1 columns, with baseQ excluded.
if( length(unique(Data_Geostat[,'Survey']))==1 |
length(unique(Data_Geostat[,'Region'])) == 1){
Q_ik <- matrix(0, ncol=1, nrow=nrow(Data_Geostat))
}else{
Q_ik <- ThorsonUtilities::vector_to_design_matrix( Data_Geostat[,'Survey'] )
if( !(BaseQ %in% colnames(Q_ik)) ) stop("Problem with Q_ik")
Q_ik <- Q_ik[,-which(colnames(Q_ik)==BaseQ),drop=FALSE]
}
head(Q_ik) ## should have ncol == fleets-1
# Plot location of data
png(paste0(DateFile,"/Extrapolation_List.png"), width = 8, height = 6, units = 'in', res = 520)
plot( Extrapolation_List )
dev.off()
png(paste0(DateFile,"/Spatial_List.png"), width = 8, height = 6, units = 'in', res = 520)
plot( Spatial_List )
dev.off()
## from CC version
TmbData <- VAST::make_data(
#"X_itp"=X_itp,
#"X_gtp"=X_gtp,
#"Xconfig_zcp"=Xconfig_zcp,
"Version"=Version,
"Aniso"=Aniso,
"FieldConfig"=FieldConfig,
"OverdispersionConfig" = c("Eta1"=0, "Eta2"=0),
"RhoConfig"=RhoConfig,
"ObsModel"= ObsModel,
"c_i"=rep(0,nrow(Data_Geostat)),
"b_i"=Data_Geostat[,'Catch_KG'],
"a_i"=Data_Geostat[,'AreaSwept_km2'],
"v_i"= as.numeric(as.factor(Data_Geostat[,'Vessel'])),#-1,
"t_i"= Data_Geostat[,'Year'],
"Q_ik" = Q_ik,
"spatial_list"=Spatial_List,
"Options"=Options )
save(TmbData, file = paste0(DateFile,"/TmbData.Rdata"))
# Make TMB object
TmbList <- make_model("build_model"=TRUE, "TmbData"=TmbData, "RunDir"=DateFile,
"Version"=Version, "RhoConfig"=RhoConfig,
"loc_x"=Spatial_List$loc_x, "Method"=Method, "TmbDir"=getwd())
save(TmbList, file = paste0(DateFile,"/TmbList.Rdata"))
# Run model ----
Obj <- TmbList[["Obj"]]
Obj$par['lambda2_k'] ## should not be NA UNLESS length(fleet) == 1/QIK = 1
Obj$par['lambda1_k']
# Obj$par['gamma1_k']
Opt <- TMBhelper::fit_tmb(
obj = Obj,
lower = TmbList[["Lower"]],
upper = TmbList[["Upper"]],
newtonsteps = 1,
getsd = TRUE,
bias.correct = TRUE,
bias.correct.control = list(vars_to_correct = "Index_cyl"),
savedir = DateFile
) # , rel.tol=1e-20
Report <- TmbList$Obj$report()
ParHat <- TmbList$Obj$env$parList()
# Save stuff [NOTE OBJ IS INSIDE SAVE]
Save <- list("Opt"=Opt,
"Report"=Report,
"ParHat"=TmbList$Obj$env$parList(Opt$par),
'Obj' = Obj)
save(Save, file=paste0(DateFile,"Save_original.RData"))
plot_data( Extrapolation_List=Extrapolation_List, Spatial_List=Spatial_List,
Data_Geostat=Data_Geostat, PlotDir=DateFile,
Plot1_name="Data_and_knots.png", Plot2_name="Data_by_year.png", col="red")
# Plot index
Index <- plot_biomass_index( DirName=DateFile,
TmbData=TmbData,
use_biascorr = BiasCorr,
Sdreport=Opt$SD,
Year_Set=Year_Set,
strata_names=c('AllAreas',Region),
plot_log=TRUE, width=6, height=6 ) # , total_area_km2=sum(a_xl[,1])
# load(paste0(DateFile,"Save_original.Rdata"))
Opt <- Save$Opt
Report <- Save$Report
plot_range_index( Sdreport=Opt$SD, Report=Report, Year_Set=Year_Set, TmbData=TmbData,
Znames=colnames(TmbData$Z_xm), PlotDir=DateFile )
# Plot Anisotropy
plot_anisotropy( FileName=paste0(DateFile,"Aniso.png"), Report=Report )
# Plot encounter rate diagnostics
# plot_quantile_diagnostic( Report=Report, TmbData=TmbData, DateFile=DateFile)
# Positive catch rate diagnostics
Q <- plot_quantile_diagnostic( TmbData=TmbData, Report=Report, DateFile=DateFile ) # SpatialDeltaGLMM::
# Pearson residuals diagnostics
plot_residuals( Lat_i=Data_Geostat[,'Lat'], Lon_i=Data_Geostat[,'Lon'],
extrapolation_list = Extrapolation_List,
TmbData=TmbData, Report=Report, Q=Q, savedir=DateFile, spatial_list=Spatial_List )
# Plot density
plot_maps( plot_set=3, Report=Report, PlotDF=MapDetails_List[["PlotDF"]],
working_dir=DateFile, Year_Set=Year_Set )
## plot easting-northing shifts
#To plot effective area occupied, please re-run with Options['Calculate_effective_area']=1
plot_range_index( Sdreport=Save$Opt$SD, Report=Save$Report, Year_Set=Year_Set,
TmbData=TmbData, Znames=colnames(TmbData$Z_xm),
PlotDir=DateFile
)
# source("https://raw.githubusercontent.com/nwfsc-assess/VASTWestCoast/2473eb0ca2c25aa780e39ff1a94e7252d0d335bc/R/summary_nwfsc.R")
source("./R/summary_nwfscMK.r")
TableC <- summary_nwfscMK(obj = Save$Obj,
sdreport = Save$Opt$SD,
savedir = DateFile)[[3]]
TableC %>% data.frame() %>%
exp() %>% round(.,2) %>%
mutate('PAR'=row.names(TableC)) %>%
write.csv(.,file = paste0(DateFile,'tableC_mod.csv'))
|
\encoding{UTF-8}
\name{Disake-package}
\alias{Disake-package}
\alias{Disake}
\docType{package}
\title{
Discrete associated kernel estimators
}
\description{
Discrete smoothing of probability mass function (p.m.f.) is performed using three discrete associated kernels: DiracDU, Binomial and Discrete Triangular. Two automatic bandwidth selection procedures are implemented: the cross-validation method for the three kernels and the local Bayesian approach for Binomial kernel. Note that DiracDU is used for categorical data, Binomial kernel is appropriate for count data with small or moderate sample sizes, and Discrete Triangular kernel is recommanded for count data with large sample sizes.
}
\details{
\describe{
\item{The estimated p.m.f.:}{ The kernel estimator \eqn{\widehat{f}_n}{\widehat{f}_n} of \eqn{f}{f} is defined as
\deqn{\widehat{f}_n(x) = \frac{1}{n}\sum_{i=1}^{n}{K_{x,h}(X_i)},} where \eqn{K_{x,h}} is one of the kernels defined below.
In practice, we first calculate the normalizing constant
\deqn{{C}_n = \sum_{x\in N}{\widehat{f}_n(x)},} which is not generaly equal to 1. This constant \eqn{{C}_n} is 1 only for Dirac and DiracDU. The estimated p.m.f. is then \eqn{\tilde{f}_n=\widehat{f}_n/C_n}{\tilde{f}_n=\widehat{f}_n/C_n}.
The integrated squared error (ISE) defined by \deqn{{ISE}_0 = \sum_{x\in N}{{\{\tilde{f}_n(x)} - f_0(x)\}^2}} is the criteria used to measure the smoothness of the kernel estimator \eqn{\tilde{f}_n}{\tilde{f}_n} with the empirical p.m.f. \eqn{f_0}{f_0}.
See Kokonendji and Senga Kiessé (2011).
}
}
Given a data sample, the \pkg{Disake} package allows to compute the p.m.f. using one of the three kernel functions: DiracDU, Binomial and Discrete Triangular. The bandwidth parameter is calculated using the cross-validation technique \code{\link{CVbw}}. When the kernel function is Binomial, the bandwidth parameter is also computed using the local Bayesian procedure \code{\link{Baysbw}}. The kernel functions \code{\link{kf}} are defined below.
\describe{
\item{Binomial kernel :}{ Let \eqn{x\in N:= \{0, 1, \ldots \}} and \eqn{{S}_x = \{0, 1, \ldots, x + 1\}}. The Binomial kernel is defined on the support \eqn{{S}_x} by
\deqn{B_{x,h}(y) = \frac {(x+1)!} {y!(x+1-y)!}\left(\frac{x+h}{x+1}\right)^y\left(\frac{1-h}{x+1}\right)^{(x+1-y)}1_{S_{x}}(y),}
where \eqn{ h\in(0, 1]} and \eqn{1_A}{1[A]} denotes the indicator function of A. Note that \eqn{B_{x,h}}{B_[x,h]} is the p.m.f. of the Binomial distribution with its number of trials \eqn{ x+1} and its success probability \eqn{(x+h)/(x+1)}. See Kokonendji and Senga Kiessé (2011).
}
\item{DiracDU kernel :}{ For fixed number of categories \eqn{c\in \{2,3,...\} }, we define \eqn{{S}_{c} = \{0, 1, \ldots, c-1\}}. The DiracDU kernel is defined on \eqn{{S}_{c}} by
\deqn{DU_{x,h;c}(y) = (1 - h)1_{\{x\}}(y)+\frac {h} {c-1}1_{S_{c}\setminus\{x\}}(y),}
where \eqn{x\in {S}_{c} } and \eqn{ h\in(0, 1]}. See Kokonendji and Senga Kiessé (2011), and also Aitchison and Aitken (1976) for multivariate case.
}
\item{Discrete Triangular kernel:}{ For fixed arm \eqn{a\in N}, we define \eqn{{S}_{x,a} = \{x-a,\ldots, x, \ldots, x + a\}}. The Discrete Triangular kernel is defined on \eqn{{S}_{x,a}} by
\deqn{DT_{x,h;a}(y) = \frac {(a+1)^h - |y-x|^h} {P(a,h)}1_{S_{x,a}}(y),}
where \eqn{x\in N}, \eqn{h>0} and \eqn{P(a,h)=(2a+1)(a+1)^h - 2(1+2^h+ \cdots +a^h)} is the normalizing constant. For \eqn{a=0}, the Discrete Triangular kernel \eqn{DT_{x,h;0}}{DT_[x,h;0]} corresponds to the Dirac kernel on \eqn{x}; see Kokonendji et al. (2007), and also Kokonendji and Zocchi (2010) for an asymmetric version of Discrete Triangular.
}
\item{The bandwidth selection:}{ Two functions are implemented to select the bandwidth: cross-validation and local Bayesian procedure. The cross-validation technique \code{\link{CVbw}} is used for DiracDU, Binomial and Discrete Triangular kernels; see Kokonendji and Senga Kiessé (2011). The local Bayesian procedure \code{\link{Baysbw}} is implemented to select the bandwidth for Binomial kernel; see Zougab et al. (2012).
}
}
}
\author{
W. E. Wansouwé , C. C. Kokonendji and D. T. Kolyang
Maintainer: W. E. Wansouwé <ericwansouwe@gmail.com>
}
\references{
Aitchison, J. and Aitken, C.G.G. (1976). Multivariate binary discrimination by the kernel method, \emph{Biometrika} \bold{63}, 413 - 420.
Kokonendji, C.C. and Senga Kiessé, T. (2011). Discrete associated kernel method and extensions,
\emph{Statistical Methodology} \bold{8}, 497 - 516.
Kokonendji, C.C., Senga Kiessé, T. and Zocchi, S.S. (2007). Discrete triangular distributions and non-parametric estimation for
probability mass function,
\emph{Journal of Nonparametric Statistics} \bold{19}, 241 - 254.
Kokonendji, C.C. and Zocchi, S.S. (2010). Extensions of discrete triangular distribution and boundary bias in kernel estimation for discrete functions, \emph{Statistics and Probability Letters} \bold{80}, 1655 - 1662.
Zougab, N., Adjabi, S. and Kokonendji, C.C. (2012). Binomial kernel and Bayes local bandwidth in discrete functions estimation, \emph{ Journal of Nonparametric Statistics } \bold{24}, 783 - 795.
}
\keyword{ package }
|
/man/Disake-package.Rd
|
no_license
|
cran/Disake
|
R
| false
| false
| 5,266
|
rd
|
\encoding{UTF-8}
\name{Disake-package}
\alias{Disake-package}
\alias{Disake}
\docType{package}
\title{
Discrete associated kernel estimators
}
\description{
Discrete smoothing of probability mass function (p.m.f.) is performed using three discrete associated kernels: DiracDU, Binomial and Discrete Triangular. Two automatic bandwidth selection procedures are implemented: the cross-validation method for the three kernels and the local Bayesian approach for Binomial kernel. Note that DiracDU is used for categorical data, Binomial kernel is appropriate for count data with small or moderate sample sizes, and Discrete Triangular kernel is recommanded for count data with large sample sizes.
}
\details{
\describe{
\item{The estimated p.m.f.:}{ The kernel estimator \eqn{\widehat{f}_n}{\widehat{f}_n} of \eqn{f}{f} is defined as
\deqn{\widehat{f}_n(x) = \frac{1}{n}\sum_{i=1}^{n}{K_{x,h}(X_i)},} where \eqn{K_{x,h}} is one of the kernels defined below.
In practice, we first calculate the normalizing constant
\deqn{{C}_n = \sum_{x\in N}{\widehat{f}_n(x)},} which is not generaly equal to 1. This constant \eqn{{C}_n} is 1 only for Dirac and DiracDU. The estimated p.m.f. is then \eqn{\tilde{f}_n=\widehat{f}_n/C_n}{\tilde{f}_n=\widehat{f}_n/C_n}.
The integrated squared error (ISE) defined by \deqn{{ISE}_0 = \sum_{x\in N}{{\{\tilde{f}_n(x)} - f_0(x)\}^2}} is the criteria used to measure the smoothness of the kernel estimator \eqn{\tilde{f}_n}{\tilde{f}_n} with the empirical p.m.f. \eqn{f_0}{f_0}.
See Kokonendji and Senga Kiessé (2011).
}
}
Given a data sample, the \pkg{Disake} package allows to compute the p.m.f. using one of the three kernel functions: DiracDU, Binomial and Discrete Triangular. The bandwidth parameter is calculated using the cross-validation technique \code{\link{CVbw}}. When the kernel function is Binomial, the bandwidth parameter is also computed using the local Bayesian procedure \code{\link{Baysbw}}. The kernel functions \code{\link{kf}} are defined below.
\describe{
\item{Binomial kernel :}{ Let \eqn{x\in N:= \{0, 1, \ldots \}} and \eqn{{S}_x = \{0, 1, \ldots, x + 1\}}. The Binomial kernel is defined on the support \eqn{{S}_x} by
\deqn{B_{x,h}(y) = \frac {(x+1)!} {y!(x+1-y)!}\left(\frac{x+h}{x+1}\right)^y\left(\frac{1-h}{x+1}\right)^{(x+1-y)}1_{S_{x}}(y),}
where \eqn{ h\in(0, 1]} and \eqn{1_A}{1[A]} denotes the indicator function of A. Note that \eqn{B_{x,h}}{B_[x,h]} is the p.m.f. of the Binomial distribution with its number of trials \eqn{ x+1} and its success probability \eqn{(x+h)/(x+1)}. See Kokonendji and Senga Kiessé (2011).
}
\item{DiracDU kernel :}{ For fixed number of categories \eqn{c\in \{2,3,...\} }, we define \eqn{{S}_{c} = \{0, 1, \ldots, c-1\}}. The DiracDU kernel is defined on \eqn{{S}_{c}} by
\deqn{DU_{x,h;c}(y) = (1 - h)1_{\{x\}}(y)+\frac {h} {c-1}1_{S_{c}\setminus\{x\}}(y),}
where \eqn{x\in {S}_{c} } and \eqn{ h\in(0, 1]}. See Kokonendji and Senga Kiessé (2011), and also Aitchison and Aitken (1976) for multivariate case.
}
\item{Discrete Triangular kernel:}{ For fixed arm \eqn{a\in N}, we define \eqn{{S}_{x,a} = \{x-a,\ldots, x, \ldots, x + a\}}. The Discrete Triangular kernel is defined on \eqn{{S}_{x,a}} by
\deqn{DT_{x,h;a}(y) = \frac {(a+1)^h - |y-x|^h} {P(a,h)}1_{S_{x,a}}(y),}
where \eqn{x\in N}, \eqn{h>0} and \eqn{P(a,h)=(2a+1)(a+1)^h - 2(1+2^h+ \cdots +a^h)} is the normalizing constant. For \eqn{a=0}, the Discrete Triangular kernel \eqn{DT_{x,h;0}}{DT_[x,h;0]} corresponds to the Dirac kernel on \eqn{x}; see Kokonendji et al. (2007), and also Kokonendji and Zocchi (2010) for an asymmetric version of Discrete Triangular.
}
\item{The bandwidth selection:}{ Two functions are implemented to select the bandwidth: cross-validation and local Bayesian procedure. The cross-validation technique \code{\link{CVbw}} is used for DiracDU, Binomial and Discrete Triangular kernels; see Kokonendji and Senga Kiessé (2011). The local Bayesian procedure \code{\link{Baysbw}} is implemented to select the bandwidth for Binomial kernel; see Zougab et al. (2012).
}
}
}
\author{
W. E. Wansouwé , C. C. Kokonendji and D. T. Kolyang
Maintainer: W. E. Wansouwé <ericwansouwe@gmail.com>
}
\references{
Aitchison, J. and Aitken, C.G.G. (1976). Multivariate binary discrimination by the kernel method, \emph{Biometrika} \bold{63}, 413 - 420.
Kokonendji, C.C. and Senga Kiessé, T. (2011). Discrete associated kernel method and extensions,
\emph{Statistical Methodology} \bold{8}, 497 - 516.
Kokonendji, C.C., Senga Kiessé, T. and Zocchi, S.S. (2007). Discrete triangular distributions and non-parametric estimation for
probability mass function,
\emph{Journal of Nonparametric Statistics} \bold{19}, 241 - 254.
Kokonendji, C.C. and Zocchi, S.S. (2010). Extensions of discrete triangular distribution and boundary bias in kernel estimation for discrete functions, \emph{Statistics and Probability Letters} \bold{80}, 1655 - 1662.
Zougab, N., Adjabi, S. and Kokonendji, C.C. (2012). Binomial kernel and Bayes local bandwidth in discrete functions estimation, \emph{ Journal of Nonparametric Statistics } \bold{24}, 783 - 795.
}
\keyword{ package }
|
# LIBRARIES
library(plot3D)
# Que desea graficar
listOfChartTypes <- list(
# General charts (they use information from more than one file)
"xEpisode_yOptimalAction", "xEpisode_yAverageReward", "xEpisode_yPercentageOfActions", "xEpisode_yWinProbability", "xEpisode_yAverageSteps", "xEpisode_yAverageStepsWhenWin", "xEpisode_yAverageStepsWhenLoose",
# Specific charts (they use information from one single file)
"histogramOfQvalues", "policyPerformance", "winsInFirstTask"
)
names(listOfChartTypes) <- c(
# General charts (they use information from more than one file)
"xEpisode_yOptimalAction", "xEpisode_yAverageReward", "xEpisode_yPercentageOfActions", "xEpisode_yWinProbability", "xEpisode_yAverageSteps", "xEpisode_yAverageStepsWhenWin", "xEpisode_yAverageStepsWhenLoose",
# Specific charts (they use information from one single file)
"histogramOfQvalues", "policyPerformance", "winsInFirstTask"
)
# Height, widht and resolutions of the picture
size <- 6
res <- 55
# Este metodo es el que habría que llamar. Este metodo asume para la creación de gráficas
# que la carpeta contiene archivos .csv los cuales lee y muestra en una grafica final
# que guardara en una subcarpeta, que debe de existir de antemano, llamada "Images"
crearGrafico <-
function(urlDelExperimento, nombreFicheroSimulacion, typeOfChart) {
# SALVAR EN UN FICHERO EL GRAFICO
savePlot <- TRUE
# Estableciendo el directorio donde se encuentran las simulaciones
#workingExternalDir <- file.path("/Volumes","SGATE_BLACK","TFM","Experimentos","MultiArmBanditProblem","Experimento0")
workingExternalDir <-
urlDelExperimento#file.path("/Users","pedro","Google Drive","Desarrollo","BasicReinforcementLearningAlgorithm","Experimentos","MultiArmBanditProblem","Experimento0")
workingDir <- workingExternalDir
# Nombre de la imagen final que se va a crear
#nombreImagen <- paste(typeOfChart,"_t", numberOfTasks,"_ep",numberOfEpisodes, sep = "")
nombreImagen <- paste(typeOfChart,"_t", 5,"_ep",2, sep = "")
# GRAFICAR
#dev.new()
graficar(workingDir, savePlot, nombreFicheroSimulacion, typeOfChart)
}
###########################################FUNCIONES AUXILIARES############################################
# Esta función grafica los datos del directorio pasado y crea una
# gráfica por cada columna que queramos ver en la fecha que se indique
# graficando solo de fecha-intervalo(horas) a fecha+intervalo(horas).
graficar <-
function(workingDir, savePlot, nombreFicheroSimulacion, chartType) {
#dev.new()
# Debuggin
DEBUG <- FALSE
# Titulo del grafico
chartTitle <- ""
# La matriz que almacenara los datos
results <-
NULL # la matriz es inicializada una única vez en el bucle
simulationParameters <- c()
# Counting files that are not refered to files that store qValues of policies
notPolicyFiles <- 0
finalFiles <- c()
for (iFile in 1:length(nombreFicheroSimulacion)) {
if (!grepl("QValues", nombreFicheroSimulacion[iFile])) {
notPolicyFiles <- notPolicyFiles + 1
finalFiles[notPolicyFiles] <-
nombreFicheroSimulacion[iFile]
}
}
# Nombre de los ficheros que se van a graficar
nombreFicheroSimulacion <- finalFiles
print(length(nombreFicheroSimulacion))
# ================================================================
# GENERAL CHARTS (THEY USE INFORMATION FROM MORE THAN ONE FILE)
# ================================================================
if (identical(listOfChartTypes$xEpisode_yOptimalAction, chartType)
||
identical(listOfChartTypes$xEpisode_yPercentageOfActions, chartType)
||
identical(listOfChartTypes$xEpisode_yWinProbability, chartType)
||
identical(listOfChartTypes$xEpisode_yAverageReward, chartType)
||
identical(listOfChartTypes$xEpisode_yAverageSteps, chartType)
||
identical(listOfChartTypes$xEpisode_yAverageStepsWhenWin, chartType)
||
identical(listOfChartTypes$xEpisode_yAverageStepsWhenLoose, chartType)
||
identical(listOfChartTypes$winsInFirstTask, chartType)) {
# Nombre del plot cuando es salvado
if (savePlot == TRUE) {
if (DEBUG) {
print(workingDir)
}
# Establecemos donde se debe guardar la imagen
#setwd(paste(workingDir, "/Images", sep = ""))
setwd(file.path(workingDir,"Images"))
# Establecemos como se llamara la imagen
nombreImagen <- paste(chartType, sep = "")
nombreImagen <-
paste(nombreImagen,'', sep = "") # uso png para así poder insertar tal cual la imagen en la pagina web
if (DEBUG) {
print(paste("Creando", nombreImagen, "..."))
}
#pdf(paste(nombreImagen,'.pdf', sep = ""))
#png(paste(nombreImagen,'.png', sep = ""))
png(
paste(nombreImagen,'.png', sep = ""), width = size, height = size, units = 'in', res = res
)
# png(paste(nombreImagen,'.png', sep = ""), units="px", width=400, height=400, res=150)
#jpeg(paste(nombreImagen,'.jpeg', sep = ""), quality = 100)
}
# Se genera la matriz de datos
for (iFile in 1:length(nombreFicheroSimulacion)) {
# Extension
extension <- ""
if (!grepl(".csv", nombreFicheroSimulacion[iFile])) {
extension <- ".csv"
}
if (grepl("QValues", nombreFicheroSimulacion[iFile])) {
next()
;
}
# Directorio
directorio <- paste(workingDir, "/",
nombreFicheroSimulacion[iFile], extension, sep = "")
# Cogemos la información del fichero que nos interesa
dataFile <-
read.csv2(file = directorio, head = TRUE, sep = ";")
# Capturando todos los datos del algoritmo
possibleStates <-
as.character(dataFile$possibleStates[dataFile$possibleStates != ""])
possibleActions <-
as.character(dataFile$possibleActions[dataFile$possibleActions != ""])
initialPolicyValue <- dataFile$initialQValues[1]
optimalAction <- dataFile$optimalAction[1]
learningMethod <- dataFile$learningMethod[1]
selectionMethod <- dataFile$selectionMethod[1]
numberOfTasks <- dataFile$numberOfTasks[1]
numberOfEpisodes <- dataFile$numberOfEpisodes[1]
softmax_temperature <- dataFile$softmax_temperature[1]
ucb_c <- dataFile$UCB_c[1]
eGreedy_epsilon_initial <-
dataFile$eGreedy_epsilon_initial[1]
eGreedy_epsilon_final <- dataFile$eGreedy_epsilon_final[1]
qLearning_alpha <- dataFile$qLearning_alpha[1]
qLearning_gamma <- dataFile$qLearning_gamma[1]
taskNumber <- dataFile$taskNumber
episodeNumber <- dataFile$episodeNumber
episodeStepTime <- dataFile$episodeStepTime
lastState <- dataFile$lastState
actionInLastState <- dataFile$actionInLastState
newState <- dataFile$newState
reward <- dataFile$averageReward
percentageOfChosenOptimalActionsPerEpisode <-
dataFile$percentageOfChosenOptimalActionsPerEpisode
percentageOfChosenActionsPerEpisode <-
dataFile$percentageOfChosenActionsPerEpisode
victories <- dataFile$victories
# Inicializamos el array que lleva los parametros de la ejecucion
simulationParameters[iFile] <-
getParameterExpression(dataFile)
# Inicializamos la matriz que almacena los resultados una unica vez
if ((!identical(listOfChartTypes$xEpisode_yPercentageOfActions, chartType))
&&
!(identical(listOfChartTypes$winsInFirstTask, chartType))
) {
if (is.null(results))
results <-
matrix(nrow = numberOfEpisodes + 1, ncol = notPolicyFiles)#length(nombreFicheroSimulacion))
}
else if ((identical(
listOfChartTypes$xEpisode_yPercentageOfActions, chartType
))) {
if (is.null(results))
results <-
matrix(nrow = numberOfEpisodes * length(possibleActions), ncol = notPolicyFiles)#length(nombreFicheroSimulacion))
}
else if ((identical(
listOfChartTypes$winsInFirstTask, chartType
))) {
if (is.null(results))
results <-
matrix(nrow = 1, ncol = length(nombreFicheroSimulacion))
}
# CODIGO MUERTO
else if (FALSE) {
if (is.null(results)) {
results <-
matrix(nrow = numberOfEpisodes + 1 + length(possibleActions), ncol = notPolicyFiles)#length(nombreFicheroSimulacion))
}
}
# Donde se guardaran los resultados de la grafica
partialResult <- c()
# Preparando los datos
# > PERCENTAGE_OF_OPTIMAL_ACTIONS
if (identical(listOfChartTypes$xEpisode_yOptimalAction, chartType)) {
# Usamos un vector para almacenar las medias
percentageOfOptimalActions <- c()
percentageOfOptimalActions[1] <- 0
# Getting the average of all the task
for (iTask in 1:numberOfTasks) {
for (iEpisode in 0:(numberOfEpisodes - 1)) {
# Variable auxiliar para indicar la posicion donde se guarda la media
aux_iEpisode <- iEpisode + 1
# Consigo el indice del episodio para la tarea concreta
index <- (iTask - 1) * numberOfEpisodes + aux_iEpisode
# Consigo el reward para el episodio de la tarea concreta
auxReward <-
as.numeric(as.character(percentageOfChosenOptimalActionsPerEpisode[index]))
# Consigo el index del episodio que estamos tratando
index <-
iEpisode + 2 # se suma 2 porque el priemr elemento es 0 para asi tener en el grafico el zero como referencia
if (is.null(percentageOfOptimalActions[index]) ||
is.na(percentageOfOptimalActions[index])) {
percentageOfOptimalActions[index] <- 0.0
}
# Sumo la recompensa de este episodio a la recompensa global de todas las tareas
percentageOfOptimalActions[index] <-
percentageOfOptimalActions[index] + auxReward
}
# Calculating percentage completed of the process
percentageCompleted <- (iTask * 100 / numberOfTasks)
if (percentageCompleted %% 10 == 0) {
if (DEBUG) {
print(paste(percentageCompleted,"% process completed", sep = ""))
}
}
}
# Asigno el porcentaje de acciones optimas elegidas MEDIA global de todas las tareas
percentageOfOptimalActions <-
percentageOfOptimalActions / numberOfTasks
# Asigno el porcentaje de acciones optimas elegidas MEDIA global progresiva obtenida a lo largo del tiempo
# empezamos desde 2 porque el primer valor no cambia
#for (n in 2:length(averageReward)) {averageReward[n] <- (averageReward[n-1]*(n-1)+averageReward[n])/(n)}
# Guardamos el resultado parcial de lo que se muestra en la grafica
partialResult <- percentageOfOptimalActions
}
# > AVERAGE_REWARD
else if (identical(listOfChartTypes$xEpisode_yAverageReward, chartType)) {
# Usamos un vector para almacenar las medias
averageReward <- c()
averageReward[1] <-
as.numeric(as.character(initialPolicyValue[1]))
# Getting the average of all the task
for (iTask in 1:numberOfTasks) {
for (iEpisode in 0:(numberOfEpisodes - 1)) {
# Variable auxiliar para indicar la posicion donde se guarda la media
aux_iEpisode <- iEpisode + 1
# Consigo el indice del episodio para la tarea concreta
index <- (iTask - 1) * numberOfEpisodes + aux_iEpisode
# Consigo el reward para el episodio de la tarea concreta
auxReward <- as.numeric(as.character(reward[index]))
# Consigo el index del episodio que estamos tratando
index <-
iEpisode + 2 # se suma 2 porque el priemr elemento es 0 para asi tener en el grafico el zero como referencia
if (is.null(averageReward[index]) ||
is.na(averageReward[index])) {
averageReward[index] <- 0.0
}
# Sumo la recompensa de este episodio a la recompensa global de todas las tareas
averageReward[index] <-
averageReward[index] + auxReward
}
# Calculating percentage completed of the process
percentageCompleted <- (iTask * 100 / numberOfTasks)
if (percentageCompleted %% 10 == 0) {
if (DEBUG) {
print(paste(percentageCompleted,"% process completed", sep = ""))
}
}
}
# Asigno la recompensa MEDIA global de todas las tareas
averageReward <- averageReward / numberOfTasks
# Asigno la recompensa MEDIA global progresiva obtenida a lo largo del tiempo
# empezamos desde 2 porque el primer valor no cambia
#for (n in 2:length(averageReward)) {averageReward[n] <- (averageReward[n-1]*(n-1)+averageReward[n])/(n)}
# Guardamos el resultado parcial de lo que se muestra en la grafica
partialResult <- averageReward
}
# > PERCENTAGE_OF_ACTIONS
else if (identical(listOfChartTypes$xEpisode_yPercentageOfActions, chartType)) {
# Matriz que almacena el porcentaje medio de cada accion para cada episodio
percentage <-
matrix(0, nrow = length(possibleActions), ncol = numberOfEpisodes)
# Getting the most common action for each episode
for (iEpisode in 1:(numberOfEpisodes)) {
# Variable auxiliar para indicar la posicion donde se guarda la media
aux_iEpisode <- iEpisode + 1
# Consigo el vector de las diferentes acciones realizadas
# para este episodio en las diferentes tareas
actionVectorForOneEpisode <- c()
for (iTask in 1:(numberOfTasks)) {
# Consigo el indice de las acciones cada tarea en este episodio concreto
index <- (iTask - 1) * numberOfEpisodes + aux_iEpisode
# Añado la accion al vector t Tasks
percentages <-
as.character(percentageOfChosenActionsPerEpisode[index - 1])
percentages <- unlist(strsplit(percentages, "_"))
for (i in 1:length(percentages)) {
percentage[i,iEpisode] <-
percentage[i,iEpisode] + as.double(percentages[i])
}
}
}
# Average of the percentage of all tasks
percentage <- percentage / numberOfTasks
rownames(percentage) <- possibleActions
partialResult <- percentage
}
# > WIN_PROBABILITY
else if (identical(listOfChartTypes$xEpisode_yWinProbability, chartType)) {
# Usamos un vector para almacenar las medias
victoryProbability <- c()
victoryProbability[1] <-
as.numeric(as.character(initialPolicyValue[1]))
# Getting the average of all the task
for (iTask in 1:numberOfTasks) {
for (iEpisode in 0:(numberOfEpisodes - 1)) {
# Variable auxiliar para indicar la posicion donde se guarda la media
aux_iEpisode <- iEpisode + 1
# Consigo el indice del episodio para la tarea concreta
index <- (iTask - 1) * numberOfEpisodes + aux_iEpisode
# Consigo el reward para el episodio de la tarea concreta
gameState <- as.numeric(as.character(victories[index])) # 0 - defeat _ 1 - victory
# Consigo el index del episodio que estamos tratando
index <-
iEpisode + 2 # se suma 2 porque el priemr elemento es 0 para asi tener en el grafico el zero como referencia
if (is.null(victoryProbability[index]) ||
is.na(victoryProbability[index])) {
victoryProbability[index] <- 0.0
}
# Sumo la recompensa de este episodio a la recompensa global de todas las tareas
victoryProbability[index] <-
victoryProbability[index] + gameState
}
# Calculating percentage completed of the process
percentageCompleted <- (iTask * 100 / numberOfTasks)
if (percentageCompleted %% 10 == 0) {
if (DEBUG) {
print(paste(percentageCompleted,"% process completed", sep = ""))
}
}
}
# Asigno probabilidad real basado en el numero de victorias y partidas jugadas
victoryProbability <-
victoryProbability * 100 / numberOfTasks
# Guardamos el resultado parcial de lo que se muestra en la grafica
partialResult <- victoryProbability
}
# > AVERAGE_STEPS
else if (identical(listOfChartTypes$xEpisode_yAverageSteps, chartType)
|| identical(listOfChartTypes$xEpisode_yAverageStepsWhenWin, chartType)
|| identical(listOfChartTypes$xEpisode_yAverageStepsWhenLoose, chartType)) {
# Usamos un vector para almacenar las medias
averageSteps <- c()
averageSteps[1] <-
as.numeric(as.character(initialPolicyValue[1]))
divisor <- c()
divisor[1] <- 1
# Getting the average of all the task
for (iTask in 1:numberOfTasks) {
for (iEpisode in 0:(numberOfEpisodes - 1)) {
# Variable auxiliar para indicar la posicion donde se guarda la media
aux_iEpisode <- iEpisode + 1
# Consigo el indice del episodio para la tarea concreta
index <- (iTask - 1) * numberOfEpisodes + aux_iEpisode
if (identical(listOfChartTypes$xEpisode_yAverageSteps, chartType)
|| (identical(listOfChartTypes$xEpisode_yAverageStepsWhenWin, chartType) && as.numeric(as.character(victories[index])) == 1)
|| (identical(listOfChartTypes$xEpisode_yAverageStepsWhenLoose, chartType) && as.numeric(as.character(victories[index])) == 0)) {
# Consigo el reward para el episodio de la tarea concreta
numberOfStepsInThisEpisode <-
as.numeric(as.character(episodeStepTime[index])) # 0 - defeat _ 1 - victory
# Consigo el index del episodio que estamos tratando
index <-
iEpisode + 2 # se suma 2 porque el primer elemento es 0 para asi tener en el grafico el zero como referencia
if (is.null(averageSteps[index]) ||
is.na(averageSteps[index])) {
averageSteps[index] <- 0.0
}
if (is.null(divisor[index]) ||
is.na(divisor[index])) {
divisor[index] <- 1.0
}
# Sumo la recompensa de este episodio a la recompensa global de todas las tareas
averageSteps[index] <-
averageSteps[index] + numberOfStepsInThisEpisode
divisor[index] <- divisor[index] + 1
}else{
# Consigo el index del episodio que estamos tratando
index <- iEpisode + 2 # se suma 2 porque el primer elemento es 0 para asi tener en el grafico el zero como referencia
if (is.null(averageSteps[index]) ||
is.na(averageSteps[index])) {
averageSteps[index] <- 0.0
}
if (is.null(divisor[index]) ||
is.na(divisor[index])) {
divisor[index] <- 1.0
}
}
}
# Calculating percentage completed of the process
percentageCompleted <- (iTask * 100 / numberOfTasks)
if (percentageCompleted %% 10 == 0) {
if (DEBUG) {
print(paste(percentageCompleted,"% process completed", sep = ""))
}
}
}
# Asigno probabilidad real basado en el numero de victorias y partidas jugadas
averageSteps <- averageSteps / divisor
averageSteps[is.na(averageSteps)] <- 0
if(DEBUG){
print(divisor)
print(averageSteps)
}
# Guardamos el resultado parcial de lo que se muestra en la grafica
partialResult <- averageSteps
}
# > WINS DURING THE LEARNING PHASE
else if (identical(listOfChartTypes$winsInFirstTask, chartType)) {
# results <- matrix(nrow = 1, ncol = length(nombreFicheroSimulacion))
# partialResult <- matrix(nrow = 1, ncol = 1)
partialResult <- 0
# Getting the average of all the task
# for (iTask in 1:numberOfTasks) {
for (iEpisode in 0:(numberOfEpisodes - 1)) {
gameState <- as.numeric(as.character(victories[iEpisode+1])) # 0 - defeat _ 1 - victory
partialResult <- partialResult + gameState
}
# }
}
# Salvamos el resultado parcial en la matriz de resultados
#if(!identical(listOfChartTypes$histogramOfQvalues, chartType)){
results[,iFile] <- c(partialResult)
print(iFile)
print(results)
#}
if (DEBUG) {
print(paste(
"Creating chart ", nombreFicheroSimulacion[iFile], "...", sep = ""
))
}
# Percentage of files that has been processed
percentageCompleted <-
(iFile * 100 / length(nombreFicheroSimulacion))
if(DEBUG){
print(paste(
percentageCompleted,"% process completed of the whole process.", sep = ""
))
}
}
}
# ================================================================
# SPECIFIC CHARTS (THEY USE INFORMATION THAT IS STORED IN A DIFFERENT WAY FROM GENERAL CHARTS)
# ================================================================
else {
# Este tipo de chart solo usa un fichero
nombreFicheroSimulacion <- nombreFicheroSimulacion[1]
# Nombre del plot cuando es salvado
if (savePlot == TRUE) {
if (DEBUG) {
print(workingDir)
}
# Establecemos donde se debe guardar la imagen
#setwd(paste(workingDir, "/Images", sep = ""))
setwd(file.path(workingDir,"Images"))
# Establecemos como se llamara la imagen
nombreImagen <-
paste(chartType,"_",nombreFicheroSimulacion, sep = "")
nombreImagen <-
paste(nombreImagen,'', sep = "") # uso png para así poder insertar tal cual la imagen en la pagina web
if (DEBUG) {
print(paste("Creando", nombreImagen, "..."))
}
#pdf(paste(nombreImagen,'.pdf', sep = ""))
#png(paste(nombreImagen,'.png', sep = ""))
png(
paste(nombreImagen,'.png', sep = ""), width = size, height = size, units = 'in', res = res
)
# png(paste(nombreImagen,'.png', sep = ""), units="px", width=400, height=400, res=150)
#jpeg(paste(nombreImagen,'.jpeg', sep = ""), quality = 100)
}
# Extension
extension <- ""
if (!grepl(".csv", nombreFicheroSimulacion[1])) {
extension <- ".csv"
}
print(paste("Simulation file:",nombreFicheroSimulacion[1]))
# Directorio
directorio <- paste(workingDir, "/",
nombreFicheroSimulacion[1], extension, sep = "")
# Cogemos la información del fichero que nos interesa
dataFile <-
read.csv2(file = directorio, head = TRUE, sep = ";")
# Salvamos el resultado parcial en la matriz de resultados
if (is.null(results)) {
if (ncol(dataFile) == 2)
results <- matrix(nrow = nrow(dataFile))
else
results <-
matrix(nrow = nrow(dataFile), ncol = (ncol(dataFile) - 1))
}else {
print("Error creando la matriz de resultados para la grafica de Q-values")
}
# > HISTOGRAM_OF_Q_VALUES
if (identical(listOfChartTypes$histogramOfQvalues, chartType)) {
# Establecemos en el titulo la mejor accion
chartTitle <- paste("Final policy")
# Simulation parameters (en este caso son todos los estados y acciones posibles)
frameWidthOfSimulationParameters <- 170
actions <- ""
aux <- ""
for (iAction in 1:length(dataFile[,1])) {
aux <- paste(aux, dataFile[iAction,1], "; ", sep = "")
if (nchar(aux) > frameWidthOfSimulationParameters ||
iAction == length(dataFile[,1])) {
actions <- paste(actions, aux, "\n")
aux <- ""
}
}
states <- ""
aux <- ""
stateVector <- names(dataFile)[-1]
for (iState in 1:length(stateVector)) {
aux <- paste(aux, stateVector[iState], "; ", sep = "")
if (nchar(aux) > frameWidthOfSimulationParameters ||
iState == length(stateVector)) {
states <- paste(states, aux, "\n")
aux <- ""
}
}
# Inicializamos el array que lleva los parametros de la ejecucion
#simulationParameters <- c(paste("States:",states), paste("Actions:", actions))
#simulationParameters <- getParameterExpression(dataFile)
# Cogemos la tabla de la politica y la metemos en results
for (iColumn in 1:(ncol(dataFile) - 1)) {
results[,iColumn] <- as.double(as.character(dataFile[,iColumn + 1]))
}
# Normalizamos (tener en cuenta que si haces esto no se veria los datos reales de la politica)
normalizeData <- TRUE
if (normalizeData) {
for (iColumn in 1:(ncol(dataFile) - 1)) {
# Data
x <- c(results[,iColumn])
# Normalized Data
if (max(x) - min(x) == 0) {
normalized = 0
}else{
normalized = (x - min(x)) / (max(x) - min(x))
}
# Now the histogram is scaled
results[,iColumn] <- normalized
}
}
# Damos nombres a las columnas
colnames(results) <- names(dataFile)[-1]
# Damos nombres a las filas
rownames(results) <- dataFile[,1]
# There is only one state
#simulationParameters <- ""
}
# > POLICY_PERFORMANCE
else if (identical(listOfChartTypes$policyPerformance, chartType)) {
# Establecemos en el titulo la mejor accion
chartTitle <-
paste("RMS error between estimate policy and optimal policy")
# La parametrizacion se mostrara dentro del plot
simulationParameters <- ""
# Cogemos la tabla de la politica y la metemos en results
for (iColumn in 1:(ncol(dataFile) - 1)) {
results[,iColumn] <- as.double(as.character(dataFile[,iColumn + 1]))
}
# Damos nombres a las columnas
nameVector <- c()
for (i in 2:length(colnames(dataFile))) {
x <- colnames(dataFile)[i]
nameVector[i - 1] <-
substr(x, 3, regexpr("\\.[^\\.]*$", x)[1] - 1)
}
colnames(results) <- nameVector
# Damos nombres a las filas
rownames(results) <- dataFile[,1]
}
}
# Se crea el grafico
crearChart(
chartTitle, results, leg.txt <-
simulationParameters, listOfChartTypes, chartType, nombreFicheroSimulacion, dataFile
)
# Se salva el plot en el directorio indicado al inicio de este fichero
if (savePlot == TRUE) {
dev.off()
}else{
#dev.off(dev.list()["RStudioGD"])
}
}
# Crea el grafico especifico qeu se pide
crearChart <-
function(title, simulationData, simulationParameters, listOfChartTypes, chartType, nombreFicheroSimulacion, dataFile) {
# Vector de colores
colours <- c(
"Green", "Red", "Black", "Blue", "Orange",
"Magenta", "Yellow","Gray", "Brown", "Pink",
"darkslateblue", "gold4", "dodgerblue4", "darkslategray1",
"deeppink4", "green4", "lightblue4","lightcoral",
"hotpink", "hotpink4","khaki", "lightpink4",
"lightseagreen", "midnightblue", "lightslateblue", "maroon4",
"mediumorchid", "orange4", "plum2", "plum4",
"peru", "seagreen1", "seagreen4", "turquoise1",
"turquoise4", "yellow4", "yellowgreen", "snow4"
)
# Vector final de colores
if (length(nombreFicheroSimulacion) > length(colours))
print("Error: por favor, añade mas colores al vector colours en el metodo crearChart(...)")
#if(length(nombreFicheroSimulacion)==0) print("Error: length zero of length(nombreFicheroSimulacion)")
#else coloresFinales <- colours[1:length(nombreFicheroSimulacion)]
if (ncol(simulationData) == 0)
print("Error: length zero of ncol(simulationData)")
else
coloresFinales <- colours[1:ncol(simulationData)]
# margin bottom
idealBottomMargin <- 16
idealLineNumber <- 12
customBottomMargin <-
idealBottomMargin - (idealBottomMargin - (idealBottomMargin * ncol(simulationData) /
idealLineNumber))
minimunBottomMargin <- 10
if (customBottomMargin < minimunBottomMargin)
customBottomMargin <- minimunBottomMargin
# Las etiquetas qeu se muestran
# > PERCENTAGE_OF_OPTIMAL_ACTIONS
if (identical(listOfChartTypes$xEpisode_yOptimalAction, chartType)) {
# Margenes del grafico
par(mar = c(customBottomMargin,4,2,2))
# Labels
xLabel <-
paste("(", as.character(dataFile$numberOfTasks[1]), " tasks) ", "Episodes",sep =
"")
yLabel <- "% Optimal action"
# Se muestran los resultados
matplot(
xlab = xLabel, ylab = yLabel, simulationData, type = "l", lty = 1, pch =
1,col = coloresFinales, ylim = c(0,100)
) #plot
# Se muestra un titulo
# README: de momento no uso titulo
}
# > AVERAGE_REWARD
else if (identical(listOfChartTypes$xEpisode_yAverageReward, chartType)) {
# Margenes del grafico
par(mar = c(customBottomMargin,4,2,2))
# Labels
xLabel <-
paste("(", as.character(dataFile$numberOfTasks[1]), " tasks) ", "Episodes",sep =
"")
yLabel <- "Average reward"
# Se muestran los resultados
matplot(
xlab = xLabel, ylab = yLabel, simulationData, type = "l", lty = 1, pch =
1, col = coloresFinales
) #plot
# Se muestra un titulo
# README: de momento no uso titulo
}
# > WINNING_PROBABILITY
else if (identical(listOfChartTypes$xEpisode_yWinProbability, chartType)) {
# Margenes del grafico
par(mar = c(customBottomMargin,4,2,2))
# Labels
xLabel <-
paste("(", as.character(dataFile$numberOfTasks[1]), " tasks) ", "Episodes",sep =
"")
yLabel <- "Winning probability"
# Se muestran los resultados
matplot(
xlab = xLabel, ylab = yLabel, simulationData, type = "l", lty = 1, pch =
1, col = coloresFinales, ylim = c(0,100)
) #plot
# Se muestra un titulo
# README: de momento no uso titulo
}
# > AVERAGE_STEPS
else if (identical(listOfChartTypes$xEpisode_yAverageSteps, chartType)
|| identical(listOfChartTypes$xEpisode_yAverageStepsWhenWin, chartType)
|| identical(listOfChartTypes$xEpisode_yAverageStepsWhenLoose, chartType)) {
# Margenes del grafico
par(mar = c(customBottomMargin,4,2,2))
# Labels
xLabel <- paste("(", as.character(dataFile$numberOfTasks[1]), " tasks) ", "Episodes",sep = "")
if (identical(listOfChartTypes$xEpisode_yAverageStepsWhenWin, chartType))
yLabel <- "Average steps when win"
else if (identical(listOfChartTypes$xEpisode_yAverageStepsWhenLoose, chartType))
yLabel <- "Average steps when loose"
else
yLabel <- "Average steps"
# Se muestran los resultados
matplot(xlab = xLabel, ylab = yLabel, simulationData, type = "l", lty = 1, pch = 1, col = coloresFinales) #plot
# Se muestra un titulo
# README: de momento no uso titulo
}
# > PERCENTAGE_OF_ACTIONS
else if (identical(listOfChartTypes$xEpisode_yPercentageOfActions, chartType)) {
# Guradado auxiliar
auxSimulationParameters <- simulationParameters
# En caso de querer juntarlo todo en un mismo grafo habria que usar mfrow
#par(mfrow=c(filas,columnas))
for (iFile in 1:ncol(simulationData)) {
# Margenes del grafico
par(mar = c(10,4,2,2))
# Los colores seran segun el numero de acciones y no del numero de ficheros leidos
possibleActions <-
as.character(dataFile$possibleActions[dataFile$possibleActions != ""])
coloresFinales <- colours[1:length(possibleActions)]
# Labels
xLabel <-
paste("(", as.character(dataFile$numberOfTasks[1]), " tasks) ", "Episodes", "\n",sep =
"")
yLabel <- "Percentage of actions"
# Extrayendo la informacion del fichero para cada accion
data <-
t(matrix(
simulationData[,iFile], nrow = length(possibleActions), ncol = dataFile$numberOfEpisodes[1]
))
colnames(data) <- possibleActions
# Se muestran los resultados
matplot(
xlab = xLabel, ylab = yLabel, data, type = "l", lty = 1, pch = 1, col = coloresFinales, ylim = c(0,100)
) #plot
# Que significa cada linea en el grafico
simulationParameters <- possibleActions
# Titulo del grafico
title(
"", sub = auxSimulationParameters[iFile],
cex.main = 2, font.main = 4, col.main = "blue",
cex.sub = 0.75, font.sub = 3, col.sub = "black"
)
# Situamos las leyendas
leg.txt <- simulationParameters
par(
fig = c(0, 1, 0, 1), oma = c(1, 0, 0, 0), mar = c(0, 0, 0, 0), new = TRUE
)
plot(
0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n"
)
legend(
"bottom", legend = leg.txt,
xpd = TRUE, horiz = FALSE, inset = c(0, 0), bty = "o",
pch = c(8, 8), col = coloresFinales, cex = 0.75
)
}
simulationParameters <- ""
}
# > HISTOGRAM_OF_Q_VALUES
else if (identical(listOfChartTypes$histogramOfQvalues, chartType)) {
# Margenes del grafico
numberOfStates <- length(names(dataFile)[-1])
numberOfActions <- nrow(simulationData)
if(DEBUG){
print(paste("Number of states: ",numberOfStates))
}
if (numberOfStates == 1) {
# Margenes del grafico
par(mar = c(4,8,2,2))
# Labels
xLabel <-
""#paste("File:",nombreFicheroSimulacion)#paste("(", as.character(dataFile$numberOfTasks[1]), " tasks) ", "Episodes (most action selected in all tasks)",sep="")
yLabel <- "------"
# Se muestran los resultados
x <- rownames(simulationData)
y <- c()
for (row in 1:nrow(simulationData)) {
y <- rbind(y, simulationData[row,1])
}
# Se muestran los resultados
barplot(
xlab = xLabel, horiz = TRUE, y, names = x, col = coloresFinales, beside = TRUE, las = 1, xlim = c(min(y),max(y))
)
# Se muestra un titulo
title(title)
}
else{
#z <- 1:(numberOfStates*numberOfActions); dim(z) <- c(numberOfStates,numberOfActions)
z <- simulationData
#z <- VADeaths
imageIn3D <- TRUE
if (imageIn3D) {
hist3D(
z = z, border = "black", xlab = "actions", ylab = "states", zlab = "qValue"
)
# CODIGO MUERTO: INTENTO DE PONER LABELS AL HISTOGRAMA EN 3D
if (FALSE) {
hist3D (
x = 1:nrow(z), y = 1:ncol(z), z = z,
bty = "g", phi = 20, theta = -60,
xlab = "", ylab = "", zlab = "", main = "VADeaths",
col = "#0072B2", border = "black", shade = 0.8,
ticktype = "detailed", space = 0.15, d = 2, cex.axis = 1e-9
)
# Use text3D to label x axis
text3D(
x = 1:nrow(z), y = rep(0.0, nrow(z)), z = rep(3, nrow(z)),
labels = rownames(z),
add = TRUE, adj = 0
)
# Use text3D to label y axis
text3D(
x = rep(0, ncol(z)), y = 1:ncol(z), z = rep(0, ncol(z)),
labels = colnames(z),
add = TRUE, adj = 1
)
}
}else{
image2D(
x = 1:3, y = 1:6, z = z, border = "black"
, xlab = c("actions", simulationParameters[2])#rownames(simulationData))
, ylab = c("states", simulationParameters[1])#\n states = {10-armed}\n actions = {tirarPalanca1,tirarPalanca2,tirarPalanca3,tirarPalanca4,tirarPalanca5,\ntirarPalanca6,tirarPalanca7,tirarPalanca8,tirarPalanca9,tirarPalanca10}", ylab="actions",
, clab = c("Q values \n(normalized)")
, main = "Policy"
, xaxt = "n"
, yaxt = "n"
)
}
# Titulo del grafico
title(
main = simulationParameters[1], sub = simulationParameters[1],
cex.main = 0.5, font.main = 4, col.main = "blue",
cex.sub = 0.75, font.sub = 3, col.sub = "black"
)
#simulationParameters <- ""
}
}
# > POLICY_PERFORMANCE
else if (identical(listOfChartTypes$policyPerformance, chartType)) {
# Margenes del grafico
par(mar = c(5,4,2,2))
# Getting the parameter label in axis X
parameterFeatures <- getKeyWords(rownames(simulationData)[1])
parameter1 = parameterFeatures[1]
nameParameter1 <- parameterFeatures[2]
# Labels
xLabel <-
parameter1#bquote(lambda~"=") #bquote("Q-LEARNING_LAMBDA("~Q[0] == .(as.character(dataFile$initialQValues[1]))~","~alpha == .(as.character(dataFile$qLearning_alpha))~","~gamma==.(as.character(dataFile$qLearning_gamma))~","~lambda==.(as.character(dataFile$qLearning_lambda))~")")
yLabel <- "RMS error over 10 first episodes"
# Se muestran los resultados
matplot(
xlab = xLabel, ylab = yLabel, y = simulationData,
type = "l", lty = 1, pch = 1, col = coloresFinales
,xlim = c(1,length(rownames(simulationData)))
,xaxt = "n"
) #plot
# Getting values of parameter1
valuesParameter1 <- c()
for (i in 1:length(rownames(simulationData))) {
value <- rownames(simulationData)[i]
valuesParameter1[i] <-
getKeyWords(rownames(simulationData)[i])[4]#substr(value,nchar(parameter1)+3,nchar(value)-1)
}
# Putting the real axis
#==========================================================
if(DEBUG){
print(valuesParameter1)
}
axis(
1, at = 1:length(valuesParameter1), labels = valuesParameter1, cex.axis = 1.0
)
#==========================================================
# Putting the second parameter as text in the plot
parameterFeatures <- getKeyWords(colnames(simulationData)[1])
parameter2 = parameterFeatures[1]
nameParameter2 <- parameterFeatures[2]
expressionParameter2 <- parameterFeatures[3]
maxValueInData <- max(simulationData)
for (i in 1:length(colnames(simulationData))) {
xPosition <- length(rownames(simulationData)) - 0.2#-(i-1)#-0.125
#yPosition <- simulationData[length(rownames(simulationData))-i+1, length(colnames(simulationData))-i+1]#+0.002
#yPosition <- simulationData[length(rownames(simulationData)), length(colnames(simulationData))-i+1]#+0.002
yPosition <-
maxValueInData - maxValueInData * 0.025 * (i - 1)
valor <-
as.double(as.character(getKeyWords(colnames(
simulationData
)[i])[4]))
# Text in the graph, it only changes which is the greece letter
if (grepl("alpha", colnames(simulationData)[1]))
text(
x = xPosition, y = yPosition, bquote(alpha ~ "=" ~ .(valor)), cex = 1, col = coloresFinales[length(colnames(simulationData)) -
i + 1]
)
else if (grepl("gamma", colnames(simulationData)[1]))
text(
x = xPosition, y = yPosition, bquote(gamma ~ "=" ~ .(valor)), cex = 1, col = coloresFinales[length(colnames(simulationData)) -
i + 1]
)
else if (grepl("lambda", colnames(simulationData)[1]))
text(
x = xPosition, y = yPosition, bquote(lambda ~ "=" ~ .(valor)), cex = 1, col = coloresFinales[length(colnames(simulationData)) -
i + 1]
)
else if (grepl("epsilon", colnames(simulationData)[1]))
text(
x = xPosition, y = yPosition, bquote(epsilon ~ "=" ~ .(valor)), cex = 1, col = coloresFinales[length(colnames(simulationData)) -
i + 1]
)
}
# There is no need for simulationParameters
simulationParameters <- ""
}# > WINS_IN_FIRST_TASK
else if (identical(listOfChartTypes$winsInFirstTask, chartType)) {
# Margenes del grafico
par(mar = c(customBottomMargin,4,2,2))
# Labels
xLabel <- ""
yLabel <- "Wins in learning phase"
# Se muestran los resultados
identidad <- diag(ncol(simulationData))
simulationData <- (identidad%*%t(simulationData))
simulationData <- t(simulationData)
matplot(xlab = xLabel, ylab = yLabel, simulationData,
type = "p", lty = 1, pch = 21, bg = coloresFinales,
col = coloresFinales,
ylim = c(0,dataFile$numberOfEpisodes[1]),
xaxt='n') #plot
}
# > ERROR
else{
# Labels
xLabel <- "--------"
yLabel <- "--------"
# There is no need for simulationParameters
simulationParameters <- ""
}
# Situamos las leyendas
leg.txt <- simulationParameters
if (#ncol(simulationData)<7 && !is.null(leg.txt) &&
nchar(leg.txt[1]) >= 1) {
idealFontSize <- 0.7
idealLineNumber <- 6
customFontSize <-
idealFontSize + (idealFontSize - (idealFontSize * ncol(simulationData) /
idealLineNumber))
minimunFontSize <- 0.6
if (customFontSize < minimunFontSize)
customFontSize <- minimunFontSize
par(
fig = c(0, 1, 0, 1), oma = c(1, 0, 0, 0), mar = c(0, 0, 0, 0), new = TRUE
)
plot(
0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n"
)
legend(
"bottom", legend = leg.txt,
xpd = TRUE, horiz = FALSE, inset = c(0, 0), bty = "o",
pch = c(8, 8), col = coloresFinales, cex = customFontSize
)
}
}
# Devuelve la leyenda del gráfico qeu se crea
getParameterExpression <- function(dataFile) {
# Metodos
learnerMethod <- as.character(dataFile$learningMethod[1])
selectorMethod <- as.character(dataFile$selectionMethod[1])
selectorMethodForMCTS <- as.character(dataFile$selectionMethodForSimulationMCTS[1])
# Metodos de aprendizaje
listOfLearners <-
list("Q_LEARNING", "Q_LEARNING_LAMBDA", "GRADIENT_BANDIT", "SARSA", "SARSA_LAMBDA")
names(listOfLearners) <-
c("Q_LEARNING", "Q_LEARNING_LAMBDA", "gradientBandit", "SARSA", "SARSA_LAMBDA")
# Metodos de seleccion
listOfActionSelectors <-
list("E_GREEDY", "E_GREEDY_CHANGING_TEMPORALLY", "UCB", "SOFT_MAX", "MONTE_CARLO_TREE_SEARCH")
names(listOfActionSelectors) <-
c("eGreedy", "eGreedyTemporal","UCB", "softmax", "MCTS")
# La expresion resultante variara segun
# el metodo de aprendizaje y seleccion elegidos
# Asignando la expresión al metodo de aprendizaje
learner <- ""
# > QLEARNING
if (identical(listOfLearners$Q_LEARNING, learnerMethod)) {
learner <-
bquote(
# "Map("~.(as.character(dataFile$sizeMap[1]))~"x"~.(as.character(dataFile$sizeMap[1]))~")"~
"Q-Learning("
# ~ Q[0] == .(as.character(dataFile$initialQValues[1])) ~ ","
~ alpha == .(as.character(dataFile$qLearning_alpha)) ~ "," ~ gamma == .(as.character(dataFile$qLearning_gamma)) ~
")"
)
}
# > QLEARNING_LAMBDA
else if (identical(listOfLearners$Q_LEARNING_LAMBDA, learnerMethod)) {
learner <-
bquote(
"Q-LEARNING_LAMBDA(" ~ Q[0] == .(as.character(dataFile$initialQValues[1])) ~
"," ~ alpha == .(as.character(dataFile$qLearning_alpha)) ~ "," ~ gamma ==
.(as.character(dataFile$qLearning_gamma)) ~ "," ~ lambda == .(as.character(dataFile$qLearning_lambda)) ~
")"
)
}
# > SARSA
else if (identical(listOfLearners$SARSA, learnerMethod)) {
learner <-
bquote(
# "Map("~.(as.character(dataFile$sizeMap[1]))~"x"~.(as.character(dataFile$sizeMap[1]))~")"~
"SARSA(" ~ Q[0] == .(as.character(dataFile$initialQValues[1])) ~ "," ~ alpha == .(as.character(dataFile$qLearning_alpha)) ~
"," ~ gamma == .(as.character(dataFile$qLearning_gamma)) ~ ")"
)
}
# > SARSA_LAMBDA
else if (identical(listOfLearners$SARSA_LAMBDA, learnerMethod)) {
learner <-
bquote(
"SARSA_LAMBDA(" ~ Q[0] == .(as.character(dataFile$initialQValues[1])) ~
"," ~ alpha == .(as.character(dataFile$qLearning_alpha)) ~ "," ~ gamma ==
.(as.character(dataFile$qLearning_gamma)) ~ "," ~ lambda == .(as.character(dataFile$qLearning_lambda)) ~
")"
)
}
# > GRADIENT_BANDIT
else if (identical(listOfLearners$gradientBandit, learnerMethod)) {
baselineUsed <- ""
if (as.character(dataFile$softmax_baselineUsed[1]) == " true") {
baselineUsed <- "with_baseline"
}else{
baselineUsed <- "without_baseline"
}
learner <-
bquote("GradientBandit(" ~ alpha == .(as.character(dataFile$qLearning_alpha[1])) ~
"," ~ .(baselineUsed) ~ ")")
}
# > ERROR
else{
learner <- "Learner not found!"
}
# Asignando la expresión al metodo de selección
selector <- ""
# > E_GREEDY
if (identical(listOfActionSelectors$eGreedy, selectorMethod))
selector <-
bquote("eGreedy(" ~ epsilon == .(as.character(dataFile$eGreedy_epsilon_initial[1])) ~
")")
# > E_GREEDY_TEMPORAL
else if (identical(listOfActionSelectors$eGreedyTemporal, selectorMethod))
selector <-
bquote("eGreedyTemporal(" ~ epsilon == ~ "[" ~ .(as.character(dataFile$eGreedy_epsilon_initial[1])) ~
"," ~ .(as.character(dataFile$eGreedy_epsilon_final[1])) ~ "])")
# > UCB (Upper Confident Bound)
else if (identical(listOfActionSelectors$UCB, selectorMethod))
selector <-
bquote("UCB(" ~ c == .(as.character(dataFile$UCB_c[1])) ~ ")")
# > SOFTMAX
else if (identical(listOfActionSelectors$softmax, selectorMethod))
selector <-
bquote("softmax(" ~ tau == .(as.character(dataFile$softmax_temperature[1])) ~
")")
# > MCTS
else if (identical(listOfActionSelectors$MCTS, selectorMethod)){
# > E_GREEDY
if (identical(listOfActionSelectors$eGreedy, selectorMethodForMCTS))
selector <- bquote("MCTS("
# ~ epsilon == .(as.character(dataFile$eGreedy_epsilon_initial[1])) ~ ", "
~ beta == .(as.character(dataFile$maxStepsForSimulationMCTS[1]))
~ ", " ~ theta == .(as.character(dataFile$simulationDepthChargeMCTS[1]))
# ~ ", " ~ .(selectorMethodForMCTS)~ "("
~ ", " ~ epsilon == .(as.character(dataFile$eGreedy_epsilon_initial[1])) ~ ")"
# ~ ")"
)
# > E_GREEDY_TEMPORAL
else if (identical(listOfActionSelectors$eGreedyTemporal, selectorMethodForMCTS))
selector <- bquote("MCTS("
# ~ epsilon == .(as.character(dataFile$eGreedy_epsilon_initial[1])) ~ ", "
~ beta == .(as.character(dataFile$maxStepsForSimulationMCTS[1]))
~ ", " ~ theta == .(as.character(dataFile$simulationDepthChargeMCTS[1]))
# ~ ", " ~ "E_GREEDY" ~ "("
~ ", " ~ epsilon == ~ "[" ~ .(as.character(dataFile$eGreedy_epsilon_initial[1])) ~ "," ~ .(as.character(dataFile$eGreedy_epsilon_final[1])) ~ "])"
# ~ ")"
)
# > UCB (Upper Confident Bound)
else if (identical(listOfActionSelectors$UCB, selectorMethodForMCTS))
selector <- bquote("MCTS("
# ~ epsilon == .(as.character(dataFile$eGreedy_epsilon_initial[1])) ~ ", "
~ beta == .(as.character(dataFile$maxStepsForSimulationMCTS[1]))
~ ", " ~ theta == .(as.character(dataFile$simulationDepthChargeMCTS[1]))
# ~ ", " ~ .(selectorMethodForMCTS) ~ "("
~ ", "~ c == .(as.character(dataFile$UCB_c[1])) ~ ")"
# ~ ")"
)
# > SOFTMAX
else if (identical(listOfActionSelectors$softmax, selectorMethod))
selector <- bquote("MCTS("
# ~ epsilon == .(as.character(selectorMethodForMCTS$eGreedy_epsilon_initial[1])) ~ ", "
~ beta == .(as.character(dataFile$maxStepsForSimulationMCTS[1]))
~ ", " ~ theta == .(as.character(dataFile$simulationDepthChargeMCTS[1]))
# ~ ", " ~ .(selectorMethodForMCTS)~ "("
~ ", " ~ tau == .(as.character(dataFile$softmax_temperature[1])) ~ ")"
# ~ ")"
)
# selector <- paste(selector, test)
# > ERROR
else
selector <- "Selector not found!"
}
# > ERROR
else
selector <- "Selector not found!"
# Creando la expresión final qeu es la combinacion
# del metodo de aprendizaje y el de selección de acciones
expression <- as.expression(bquote(.(learner) ~ .(selector)))
return (expression)
}
# Dada una sentencia en la que existe una palabra clave,
# devuelve la primera ocurrencia de dicha palabra clave.
# Por ejemplo si en la frase viene "alpha" y tenemos
# como palabra clave dentro de esta funcion "alpha, entonces
# obtendremos como resultado "alpha"
getKeyWords <- function(parameter) {
nameParameter <- ""
parameterExpression <- ""
value <-
as.double(substring(
parameter,regexpr("\\.[^\\.]*$",substr(
parameter,1,regexpr("\\.[^\\.]*$", parameter)[1] - 1
))[1] + 1, nchar(parameter)
))
if (is.na(value)) {
value <-
as.double(substring(
parameter,regexpr("\\=[^\\=]*$", parameter)[1] + 1,nchar(parameter) - 1
))
}
if (grepl("alpha", parameter)) {
parameter <- bquote(alpha);
parameterExpression <- expression(alpha)
nameParameter <- "alpha"
}
else if (grepl("gamma", parameter)) {
parameter <- bquote(gamma)
parameterExpression <- expression(gamma)
nameParameter <- "gamma"
}
else if (grepl("lambda", parameter)) {
#value <- as.double(substr(parameter,8,nchar(parameter)))
parameter <- bquote(lambda)
parameterExpression <- expression(lambda)
nameParameter <- "lambda"
}
else if (grepl("epsilon", parameter)) {
parameter <- bquote(epsilon)
parameterExpression <- expression(epsilon)
nameParameter <- "epsilon"
}
else{
value <- as.double(-1)
parameter <- "Error: Parameter not found!"
parameterExpression <- "notFound"
nameParameter <- "notFound"
}
return (c(parameter, nameParameter, parameterExpression, value))
}
|
/bin/Analysis/RLGraficar.R
|
no_license
|
PedroReyes/ReinforcementLearning
|
R
| false
| false
| 53,791
|
r
|
# LIBRARIES
library(plot3D)
# Que desea graficar
listOfChartTypes <- list(
# General charts (they use information from more than one file)
"xEpisode_yOptimalAction", "xEpisode_yAverageReward", "xEpisode_yPercentageOfActions", "xEpisode_yWinProbability", "xEpisode_yAverageSteps", "xEpisode_yAverageStepsWhenWin", "xEpisode_yAverageStepsWhenLoose",
# Specific charts (they use information from one single file)
"histogramOfQvalues", "policyPerformance", "winsInFirstTask"
)
names(listOfChartTypes) <- c(
# General charts (they use information from more than one file)
"xEpisode_yOptimalAction", "xEpisode_yAverageReward", "xEpisode_yPercentageOfActions", "xEpisode_yWinProbability", "xEpisode_yAverageSteps", "xEpisode_yAverageStepsWhenWin", "xEpisode_yAverageStepsWhenLoose",
# Specific charts (they use information from one single file)
"histogramOfQvalues", "policyPerformance", "winsInFirstTask"
)
# Height, widht and resolutions of the picture
size <- 6
res <- 55
# Este metodo es el que habría que llamar. Este metodo asume para la creación de gráficas
# que la carpeta contiene archivos .csv los cuales lee y muestra en una grafica final
# que guardara en una subcarpeta, que debe de existir de antemano, llamada "Images"
crearGrafico <-
function(urlDelExperimento, nombreFicheroSimulacion, typeOfChart) {
# SALVAR EN UN FICHERO EL GRAFICO
savePlot <- TRUE
# Estableciendo el directorio donde se encuentran las simulaciones
#workingExternalDir <- file.path("/Volumes","SGATE_BLACK","TFM","Experimentos","MultiArmBanditProblem","Experimento0")
workingExternalDir <-
urlDelExperimento#file.path("/Users","pedro","Google Drive","Desarrollo","BasicReinforcementLearningAlgorithm","Experimentos","MultiArmBanditProblem","Experimento0")
workingDir <- workingExternalDir
# Nombre de la imagen final que se va a crear
#nombreImagen <- paste(typeOfChart,"_t", numberOfTasks,"_ep",numberOfEpisodes, sep = "")
nombreImagen <- paste(typeOfChart,"_t", 5,"_ep",2, sep = "")
# GRAFICAR
#dev.new()
graficar(workingDir, savePlot, nombreFicheroSimulacion, typeOfChart)
}
###########################################FUNCIONES AUXILIARES############################################
# Esta función grafica los datos del directorio pasado y crea una
# gráfica por cada columna que queramos ver en la fecha que se indique
# graficando solo de fecha-intervalo(horas) a fecha+intervalo(horas).
graficar <-
function(workingDir, savePlot, nombreFicheroSimulacion, chartType) {
#dev.new()
# Debuggin
DEBUG <- FALSE
# Titulo del grafico
chartTitle <- ""
# La matriz que almacenara los datos
results <-
NULL # la matriz es inicializada una única vez en el bucle
simulationParameters <- c()
# Counting files that are not refered to files that store qValues of policies
notPolicyFiles <- 0
finalFiles <- c()
for (iFile in 1:length(nombreFicheroSimulacion)) {
if (!grepl("QValues", nombreFicheroSimulacion[iFile])) {
notPolicyFiles <- notPolicyFiles + 1
finalFiles[notPolicyFiles] <-
nombreFicheroSimulacion[iFile]
}
}
# Nombre de los ficheros que se van a graficar
nombreFicheroSimulacion <- finalFiles
print(length(nombreFicheroSimulacion))
# ================================================================
# GENERAL CHARTS (THEY USE INFORMATION FROM MORE THAN ONE FILE)
# ================================================================
if (identical(listOfChartTypes$xEpisode_yOptimalAction, chartType)
||
identical(listOfChartTypes$xEpisode_yPercentageOfActions, chartType)
||
identical(listOfChartTypes$xEpisode_yWinProbability, chartType)
||
identical(listOfChartTypes$xEpisode_yAverageReward, chartType)
||
identical(listOfChartTypes$xEpisode_yAverageSteps, chartType)
||
identical(listOfChartTypes$xEpisode_yAverageStepsWhenWin, chartType)
||
identical(listOfChartTypes$xEpisode_yAverageStepsWhenLoose, chartType)
||
identical(listOfChartTypes$winsInFirstTask, chartType)) {
# Nombre del plot cuando es salvado
if (savePlot == TRUE) {
if (DEBUG) {
print(workingDir)
}
# Establecemos donde se debe guardar la imagen
#setwd(paste(workingDir, "/Images", sep = ""))
setwd(file.path(workingDir,"Images"))
# Establecemos como se llamara la imagen
nombreImagen <- paste(chartType, sep = "")
nombreImagen <-
paste(nombreImagen,'', sep = "") # uso png para así poder insertar tal cual la imagen en la pagina web
if (DEBUG) {
print(paste("Creando", nombreImagen, "..."))
}
#pdf(paste(nombreImagen,'.pdf', sep = ""))
#png(paste(nombreImagen,'.png', sep = ""))
png(
paste(nombreImagen,'.png', sep = ""), width = size, height = size, units = 'in', res = res
)
# png(paste(nombreImagen,'.png', sep = ""), units="px", width=400, height=400, res=150)
#jpeg(paste(nombreImagen,'.jpeg', sep = ""), quality = 100)
}
# Se genera la matriz de datos
for (iFile in 1:length(nombreFicheroSimulacion)) {
# Extension
extension <- ""
if (!grepl(".csv", nombreFicheroSimulacion[iFile])) {
extension <- ".csv"
}
if (grepl("QValues", nombreFicheroSimulacion[iFile])) {
next()
;
}
# Directorio
directorio <- paste(workingDir, "/",
nombreFicheroSimulacion[iFile], extension, sep = "")
# Cogemos la información del fichero que nos interesa
dataFile <-
read.csv2(file = directorio, head = TRUE, sep = ";")
# Capturando todos los datos del algoritmo
possibleStates <-
as.character(dataFile$possibleStates[dataFile$possibleStates != ""])
possibleActions <-
as.character(dataFile$possibleActions[dataFile$possibleActions != ""])
initialPolicyValue <- dataFile$initialQValues[1]
optimalAction <- dataFile$optimalAction[1]
learningMethod <- dataFile$learningMethod[1]
selectionMethod <- dataFile$selectionMethod[1]
numberOfTasks <- dataFile$numberOfTasks[1]
numberOfEpisodes <- dataFile$numberOfEpisodes[1]
softmax_temperature <- dataFile$softmax_temperature[1]
ucb_c <- dataFile$UCB_c[1]
eGreedy_epsilon_initial <-
dataFile$eGreedy_epsilon_initial[1]
eGreedy_epsilon_final <- dataFile$eGreedy_epsilon_final[1]
qLearning_alpha <- dataFile$qLearning_alpha[1]
qLearning_gamma <- dataFile$qLearning_gamma[1]
taskNumber <- dataFile$taskNumber
episodeNumber <- dataFile$episodeNumber
episodeStepTime <- dataFile$episodeStepTime
lastState <- dataFile$lastState
actionInLastState <- dataFile$actionInLastState
newState <- dataFile$newState
reward <- dataFile$averageReward
percentageOfChosenOptimalActionsPerEpisode <-
dataFile$percentageOfChosenOptimalActionsPerEpisode
percentageOfChosenActionsPerEpisode <-
dataFile$percentageOfChosenActionsPerEpisode
victories <- dataFile$victories
# Inicializamos el array que lleva los parametros de la ejecucion
simulationParameters[iFile] <-
getParameterExpression(dataFile)
# Inicializamos la matriz que almacena los resultados una unica vez
if ((!identical(listOfChartTypes$xEpisode_yPercentageOfActions, chartType))
&&
!(identical(listOfChartTypes$winsInFirstTask, chartType))
) {
if (is.null(results))
results <-
matrix(nrow = numberOfEpisodes + 1, ncol = notPolicyFiles)#length(nombreFicheroSimulacion))
}
else if ((identical(
listOfChartTypes$xEpisode_yPercentageOfActions, chartType
))) {
if (is.null(results))
results <-
matrix(nrow = numberOfEpisodes * length(possibleActions), ncol = notPolicyFiles)#length(nombreFicheroSimulacion))
}
else if ((identical(
listOfChartTypes$winsInFirstTask, chartType
))) {
if (is.null(results))
results <-
matrix(nrow = 1, ncol = length(nombreFicheroSimulacion))
}
# CODIGO MUERTO
else if (FALSE) {
if (is.null(results)) {
results <-
matrix(nrow = numberOfEpisodes + 1 + length(possibleActions), ncol = notPolicyFiles)#length(nombreFicheroSimulacion))
}
}
# Donde se guardaran los resultados de la grafica
partialResult <- c()
# Preparando los datos
# > PERCENTAGE_OF_OPTIMAL_ACTIONS
if (identical(listOfChartTypes$xEpisode_yOptimalAction, chartType)) {
# Usamos un vector para almacenar las medias
percentageOfOptimalActions <- c()
percentageOfOptimalActions[1] <- 0
# Getting the average of all the task
for (iTask in 1:numberOfTasks) {
for (iEpisode in 0:(numberOfEpisodes - 1)) {
# Variable auxiliar para indicar la posicion donde se guarda la media
aux_iEpisode <- iEpisode + 1
# Consigo el indice del episodio para la tarea concreta
index <- (iTask - 1) * numberOfEpisodes + aux_iEpisode
# Consigo el reward para el episodio de la tarea concreta
auxReward <-
as.numeric(as.character(percentageOfChosenOptimalActionsPerEpisode[index]))
# Consigo el index del episodio que estamos tratando
index <-
iEpisode + 2 # se suma 2 porque el priemr elemento es 0 para asi tener en el grafico el zero como referencia
if (is.null(percentageOfOptimalActions[index]) ||
is.na(percentageOfOptimalActions[index])) {
percentageOfOptimalActions[index] <- 0.0
}
# Sumo la recompensa de este episodio a la recompensa global de todas las tareas
percentageOfOptimalActions[index] <-
percentageOfOptimalActions[index] + auxReward
}
# Calculating percentage completed of the process
percentageCompleted <- (iTask * 100 / numberOfTasks)
if (percentageCompleted %% 10 == 0) {
if (DEBUG) {
print(paste(percentageCompleted,"% process completed", sep = ""))
}
}
}
# Asigno el porcentaje de acciones optimas elegidas MEDIA global de todas las tareas
percentageOfOptimalActions <-
percentageOfOptimalActions / numberOfTasks
# Asigno el porcentaje de acciones optimas elegidas MEDIA global progresiva obtenida a lo largo del tiempo
# empezamos desde 2 porque el primer valor no cambia
#for (n in 2:length(averageReward)) {averageReward[n] <- (averageReward[n-1]*(n-1)+averageReward[n])/(n)}
# Guardamos el resultado parcial de lo que se muestra en la grafica
partialResult <- percentageOfOptimalActions
}
# > AVERAGE_REWARD
else if (identical(listOfChartTypes$xEpisode_yAverageReward, chartType)) {
# Usamos un vector para almacenar las medias
averageReward <- c()
averageReward[1] <-
as.numeric(as.character(initialPolicyValue[1]))
# Getting the average of all the task
for (iTask in 1:numberOfTasks) {
for (iEpisode in 0:(numberOfEpisodes - 1)) {
# Variable auxiliar para indicar la posicion donde se guarda la media
aux_iEpisode <- iEpisode + 1
# Consigo el indice del episodio para la tarea concreta
index <- (iTask - 1) * numberOfEpisodes + aux_iEpisode
# Consigo el reward para el episodio de la tarea concreta
auxReward <- as.numeric(as.character(reward[index]))
# Consigo el index del episodio que estamos tratando
index <-
iEpisode + 2 # se suma 2 porque el priemr elemento es 0 para asi tener en el grafico el zero como referencia
if (is.null(averageReward[index]) ||
is.na(averageReward[index])) {
averageReward[index] <- 0.0
}
# Sumo la recompensa de este episodio a la recompensa global de todas las tareas
averageReward[index] <-
averageReward[index] + auxReward
}
# Calculating percentage completed of the process
percentageCompleted <- (iTask * 100 / numberOfTasks)
if (percentageCompleted %% 10 == 0) {
if (DEBUG) {
print(paste(percentageCompleted,"% process completed", sep = ""))
}
}
}
# Asigno la recompensa MEDIA global de todas las tareas
averageReward <- averageReward / numberOfTasks
# Asigno la recompensa MEDIA global progresiva obtenida a lo largo del tiempo
# empezamos desde 2 porque el primer valor no cambia
#for (n in 2:length(averageReward)) {averageReward[n] <- (averageReward[n-1]*(n-1)+averageReward[n])/(n)}
# Guardamos el resultado parcial de lo que se muestra en la grafica
partialResult <- averageReward
}
# > PERCENTAGE_OF_ACTIONS
else if (identical(listOfChartTypes$xEpisode_yPercentageOfActions, chartType)) {
# Matriz que almacena el porcentaje medio de cada accion para cada episodio
percentage <-
matrix(0, nrow = length(possibleActions), ncol = numberOfEpisodes)
# Getting the most common action for each episode
for (iEpisode in 1:(numberOfEpisodes)) {
# Variable auxiliar para indicar la posicion donde se guarda la media
aux_iEpisode <- iEpisode + 1
# Consigo el vector de las diferentes acciones realizadas
# para este episodio en las diferentes tareas
actionVectorForOneEpisode <- c()
for (iTask in 1:(numberOfTasks)) {
# Consigo el indice de las acciones cada tarea en este episodio concreto
index <- (iTask - 1) * numberOfEpisodes + aux_iEpisode
# Añado la accion al vector t Tasks
percentages <-
as.character(percentageOfChosenActionsPerEpisode[index - 1])
percentages <- unlist(strsplit(percentages, "_"))
for (i in 1:length(percentages)) {
percentage[i,iEpisode] <-
percentage[i,iEpisode] + as.double(percentages[i])
}
}
}
# Average of the percentage of all tasks
percentage <- percentage / numberOfTasks
rownames(percentage) <- possibleActions
partialResult <- percentage
}
# > WIN_PROBABILITY
else if (identical(listOfChartTypes$xEpisode_yWinProbability, chartType)) {
# Usamos un vector para almacenar las medias
victoryProbability <- c()
victoryProbability[1] <-
as.numeric(as.character(initialPolicyValue[1]))
# Getting the average of all the task
for (iTask in 1:numberOfTasks) {
for (iEpisode in 0:(numberOfEpisodes - 1)) {
# Variable auxiliar para indicar la posicion donde se guarda la media
aux_iEpisode <- iEpisode + 1
# Consigo el indice del episodio para la tarea concreta
index <- (iTask - 1) * numberOfEpisodes + aux_iEpisode
# Consigo el reward para el episodio de la tarea concreta
gameState <- as.numeric(as.character(victories[index])) # 0 - defeat _ 1 - victory
# Consigo el index del episodio que estamos tratando
index <-
iEpisode + 2 # se suma 2 porque el priemr elemento es 0 para asi tener en el grafico el zero como referencia
if (is.null(victoryProbability[index]) ||
is.na(victoryProbability[index])) {
victoryProbability[index] <- 0.0
}
# Sumo la recompensa de este episodio a la recompensa global de todas las tareas
victoryProbability[index] <-
victoryProbability[index] + gameState
}
# Calculating percentage completed of the process
percentageCompleted <- (iTask * 100 / numberOfTasks)
if (percentageCompleted %% 10 == 0) {
if (DEBUG) {
print(paste(percentageCompleted,"% process completed", sep = ""))
}
}
}
# Asigno probabilidad real basado en el numero de victorias y partidas jugadas
victoryProbability <-
victoryProbability * 100 / numberOfTasks
# Guardamos el resultado parcial de lo que se muestra en la grafica
partialResult <- victoryProbability
}
# > AVERAGE_STEPS
else if (identical(listOfChartTypes$xEpisode_yAverageSteps, chartType)
|| identical(listOfChartTypes$xEpisode_yAverageStepsWhenWin, chartType)
|| identical(listOfChartTypes$xEpisode_yAverageStepsWhenLoose, chartType)) {
# Usamos un vector para almacenar las medias
averageSteps <- c()
averageSteps[1] <-
as.numeric(as.character(initialPolicyValue[1]))
divisor <- c()
divisor[1] <- 1
# Getting the average of all the task
for (iTask in 1:numberOfTasks) {
for (iEpisode in 0:(numberOfEpisodes - 1)) {
# Variable auxiliar para indicar la posicion donde se guarda la media
aux_iEpisode <- iEpisode + 1
# Consigo el indice del episodio para la tarea concreta
index <- (iTask - 1) * numberOfEpisodes + aux_iEpisode
if (identical(listOfChartTypes$xEpisode_yAverageSteps, chartType)
|| (identical(listOfChartTypes$xEpisode_yAverageStepsWhenWin, chartType) && as.numeric(as.character(victories[index])) == 1)
|| (identical(listOfChartTypes$xEpisode_yAverageStepsWhenLoose, chartType) && as.numeric(as.character(victories[index])) == 0)) {
# Consigo el reward para el episodio de la tarea concreta
numberOfStepsInThisEpisode <-
as.numeric(as.character(episodeStepTime[index])) # 0 - defeat _ 1 - victory
# Consigo el index del episodio que estamos tratando
index <-
iEpisode + 2 # se suma 2 porque el primer elemento es 0 para asi tener en el grafico el zero como referencia
if (is.null(averageSteps[index]) ||
is.na(averageSteps[index])) {
averageSteps[index] <- 0.0
}
if (is.null(divisor[index]) ||
is.na(divisor[index])) {
divisor[index] <- 1.0
}
# Sumo la recompensa de este episodio a la recompensa global de todas las tareas
averageSteps[index] <-
averageSteps[index] + numberOfStepsInThisEpisode
divisor[index] <- divisor[index] + 1
}else{
# Consigo el index del episodio que estamos tratando
index <- iEpisode + 2 # se suma 2 porque el primer elemento es 0 para asi tener en el grafico el zero como referencia
if (is.null(averageSteps[index]) ||
is.na(averageSteps[index])) {
averageSteps[index] <- 0.0
}
if (is.null(divisor[index]) ||
is.na(divisor[index])) {
divisor[index] <- 1.0
}
}
}
# Calculating percentage completed of the process
percentageCompleted <- (iTask * 100 / numberOfTasks)
if (percentageCompleted %% 10 == 0) {
if (DEBUG) {
print(paste(percentageCompleted,"% process completed", sep = ""))
}
}
}
# Asigno probabilidad real basado en el numero de victorias y partidas jugadas
averageSteps <- averageSteps / divisor
averageSteps[is.na(averageSteps)] <- 0
if(DEBUG){
print(divisor)
print(averageSteps)
}
# Guardamos el resultado parcial de lo que se muestra en la grafica
partialResult <- averageSteps
}
# > WINS DURING THE LEARNING PHASE
else if (identical(listOfChartTypes$winsInFirstTask, chartType)) {
# results <- matrix(nrow = 1, ncol = length(nombreFicheroSimulacion))
# partialResult <- matrix(nrow = 1, ncol = 1)
partialResult <- 0
# Getting the average of all the task
# for (iTask in 1:numberOfTasks) {
for (iEpisode in 0:(numberOfEpisodes - 1)) {
gameState <- as.numeric(as.character(victories[iEpisode+1])) # 0 - defeat _ 1 - victory
partialResult <- partialResult + gameState
}
# }
}
# Salvamos el resultado parcial en la matriz de resultados
#if(!identical(listOfChartTypes$histogramOfQvalues, chartType)){
results[,iFile] <- c(partialResult)
print(iFile)
print(results)
#}
if (DEBUG) {
print(paste(
"Creating chart ", nombreFicheroSimulacion[iFile], "...", sep = ""
))
}
# Percentage of files that has been processed
percentageCompleted <-
(iFile * 100 / length(nombreFicheroSimulacion))
if(DEBUG){
print(paste(
percentageCompleted,"% process completed of the whole process.", sep = ""
))
}
}
}
# ================================================================
# SPECIFIC CHARTS (THEY USE INFORMATION THAT IS STORED IN A DIFFERENT WAY FROM GENERAL CHARTS)
# ================================================================
else {
# Este tipo de chart solo usa un fichero
nombreFicheroSimulacion <- nombreFicheroSimulacion[1]
# Nombre del plot cuando es salvado
if (savePlot == TRUE) {
if (DEBUG) {
print(workingDir)
}
# Establecemos donde se debe guardar la imagen
#setwd(paste(workingDir, "/Images", sep = ""))
setwd(file.path(workingDir,"Images"))
# Establecemos como se llamara la imagen
nombreImagen <-
paste(chartType,"_",nombreFicheroSimulacion, sep = "")
nombreImagen <-
paste(nombreImagen,'', sep = "") # uso png para así poder insertar tal cual la imagen en la pagina web
if (DEBUG) {
print(paste("Creando", nombreImagen, "..."))
}
#pdf(paste(nombreImagen,'.pdf', sep = ""))
#png(paste(nombreImagen,'.png', sep = ""))
png(
paste(nombreImagen,'.png', sep = ""), width = size, height = size, units = 'in', res = res
)
# png(paste(nombreImagen,'.png', sep = ""), units="px", width=400, height=400, res=150)
#jpeg(paste(nombreImagen,'.jpeg', sep = ""), quality = 100)
}
# Extension
extension <- ""
if (!grepl(".csv", nombreFicheroSimulacion[1])) {
extension <- ".csv"
}
print(paste("Simulation file:",nombreFicheroSimulacion[1]))
# Directorio
directorio <- paste(workingDir, "/",
nombreFicheroSimulacion[1], extension, sep = "")
# Cogemos la información del fichero que nos interesa
dataFile <-
read.csv2(file = directorio, head = TRUE, sep = ";")
# Salvamos el resultado parcial en la matriz de resultados
if (is.null(results)) {
if (ncol(dataFile) == 2)
results <- matrix(nrow = nrow(dataFile))
else
results <-
matrix(nrow = nrow(dataFile), ncol = (ncol(dataFile) - 1))
}else {
print("Error creando la matriz de resultados para la grafica de Q-values")
}
# > HISTOGRAM_OF_Q_VALUES
if (identical(listOfChartTypes$histogramOfQvalues, chartType)) {
# Establecemos en el titulo la mejor accion
chartTitle <- paste("Final policy")
# Simulation parameters (en este caso son todos los estados y acciones posibles)
frameWidthOfSimulationParameters <- 170
actions <- ""
aux <- ""
for (iAction in 1:length(dataFile[,1])) {
aux <- paste(aux, dataFile[iAction,1], "; ", sep = "")
if (nchar(aux) > frameWidthOfSimulationParameters ||
iAction == length(dataFile[,1])) {
actions <- paste(actions, aux, "\n")
aux <- ""
}
}
states <- ""
aux <- ""
stateVector <- names(dataFile)[-1]
for (iState in 1:length(stateVector)) {
aux <- paste(aux, stateVector[iState], "; ", sep = "")
if (nchar(aux) > frameWidthOfSimulationParameters ||
iState == length(stateVector)) {
states <- paste(states, aux, "\n")
aux <- ""
}
}
# Inicializamos el array que lleva los parametros de la ejecucion
#simulationParameters <- c(paste("States:",states), paste("Actions:", actions))
#simulationParameters <- getParameterExpression(dataFile)
# Cogemos la tabla de la politica y la metemos en results
for (iColumn in 1:(ncol(dataFile) - 1)) {
results[,iColumn] <- as.double(as.character(dataFile[,iColumn + 1]))
}
# Normalizamos (tener en cuenta que si haces esto no se veria los datos reales de la politica)
normalizeData <- TRUE
if (normalizeData) {
for (iColumn in 1:(ncol(dataFile) - 1)) {
# Data
x <- c(results[,iColumn])
# Normalized Data
if (max(x) - min(x) == 0) {
normalized = 0
}else{
normalized = (x - min(x)) / (max(x) - min(x))
}
# Now the histogram is scaled
results[,iColumn] <- normalized
}
}
# Damos nombres a las columnas
colnames(results) <- names(dataFile)[-1]
# Damos nombres a las filas
rownames(results) <- dataFile[,1]
# There is only one state
#simulationParameters <- ""
}
# > POLICY_PERFORMANCE
else if (identical(listOfChartTypes$policyPerformance, chartType)) {
# Establecemos en el titulo la mejor accion
chartTitle <-
paste("RMS error between estimate policy and optimal policy")
# La parametrizacion se mostrara dentro del plot
simulationParameters <- ""
# Cogemos la tabla de la politica y la metemos en results
for (iColumn in 1:(ncol(dataFile) - 1)) {
results[,iColumn] <- as.double(as.character(dataFile[,iColumn + 1]))
}
# Damos nombres a las columnas
nameVector <- c()
for (i in 2:length(colnames(dataFile))) {
x <- colnames(dataFile)[i]
nameVector[i - 1] <-
substr(x, 3, regexpr("\\.[^\\.]*$", x)[1] - 1)
}
colnames(results) <- nameVector
# Damos nombres a las filas
rownames(results) <- dataFile[,1]
}
}
# Se crea el grafico
crearChart(
chartTitle, results, leg.txt <-
simulationParameters, listOfChartTypes, chartType, nombreFicheroSimulacion, dataFile
)
# Se salva el plot en el directorio indicado al inicio de este fichero
if (savePlot == TRUE) {
dev.off()
}else{
#dev.off(dev.list()["RStudioGD"])
}
}
# Crea el grafico especifico qeu se pide
crearChart <-
function(title, simulationData, simulationParameters, listOfChartTypes, chartType, nombreFicheroSimulacion, dataFile) {
# Vector de colores
colours <- c(
"Green", "Red", "Black", "Blue", "Orange",
"Magenta", "Yellow","Gray", "Brown", "Pink",
"darkslateblue", "gold4", "dodgerblue4", "darkslategray1",
"deeppink4", "green4", "lightblue4","lightcoral",
"hotpink", "hotpink4","khaki", "lightpink4",
"lightseagreen", "midnightblue", "lightslateblue", "maroon4",
"mediumorchid", "orange4", "plum2", "plum4",
"peru", "seagreen1", "seagreen4", "turquoise1",
"turquoise4", "yellow4", "yellowgreen", "snow4"
)
# Vector final de colores
if (length(nombreFicheroSimulacion) > length(colours))
print("Error: por favor, añade mas colores al vector colours en el metodo crearChart(...)")
#if(length(nombreFicheroSimulacion)==0) print("Error: length zero of length(nombreFicheroSimulacion)")
#else coloresFinales <- colours[1:length(nombreFicheroSimulacion)]
if (ncol(simulationData) == 0)
print("Error: length zero of ncol(simulationData)")
else
coloresFinales <- colours[1:ncol(simulationData)]
# margin bottom
idealBottomMargin <- 16
idealLineNumber <- 12
customBottomMargin <-
idealBottomMargin - (idealBottomMargin - (idealBottomMargin * ncol(simulationData) /
idealLineNumber))
minimunBottomMargin <- 10
if (customBottomMargin < minimunBottomMargin)
customBottomMargin <- minimunBottomMargin
# Las etiquetas qeu se muestran
# > PERCENTAGE_OF_OPTIMAL_ACTIONS
if (identical(listOfChartTypes$xEpisode_yOptimalAction, chartType)) {
# Margenes del grafico
par(mar = c(customBottomMargin,4,2,2))
# Labels
xLabel <-
paste("(", as.character(dataFile$numberOfTasks[1]), " tasks) ", "Episodes",sep =
"")
yLabel <- "% Optimal action"
# Se muestran los resultados
matplot(
xlab = xLabel, ylab = yLabel, simulationData, type = "l", lty = 1, pch =
1,col = coloresFinales, ylim = c(0,100)
) #plot
# Se muestra un titulo
# README: de momento no uso titulo
}
# > AVERAGE_REWARD
else if (identical(listOfChartTypes$xEpisode_yAverageReward, chartType)) {
# Margenes del grafico
par(mar = c(customBottomMargin,4,2,2))
# Labels
xLabel <-
paste("(", as.character(dataFile$numberOfTasks[1]), " tasks) ", "Episodes",sep =
"")
yLabel <- "Average reward"
# Se muestran los resultados
matplot(
xlab = xLabel, ylab = yLabel, simulationData, type = "l", lty = 1, pch =
1, col = coloresFinales
) #plot
# Se muestra un titulo
# README: de momento no uso titulo
}
# > WINNING_PROBABILITY
else if (identical(listOfChartTypes$xEpisode_yWinProbability, chartType)) {
# Margenes del grafico
par(mar = c(customBottomMargin,4,2,2))
# Labels
xLabel <-
paste("(", as.character(dataFile$numberOfTasks[1]), " tasks) ", "Episodes",sep =
"")
yLabel <- "Winning probability"
# Se muestran los resultados
matplot(
xlab = xLabel, ylab = yLabel, simulationData, type = "l", lty = 1, pch =
1, col = coloresFinales, ylim = c(0,100)
) #plot
# Se muestra un titulo
# README: de momento no uso titulo
}
# > AVERAGE_STEPS
else if (identical(listOfChartTypes$xEpisode_yAverageSteps, chartType)
|| identical(listOfChartTypes$xEpisode_yAverageStepsWhenWin, chartType)
|| identical(listOfChartTypes$xEpisode_yAverageStepsWhenLoose, chartType)) {
# Margenes del grafico
par(mar = c(customBottomMargin,4,2,2))
# Labels
xLabel <- paste("(", as.character(dataFile$numberOfTasks[1]), " tasks) ", "Episodes",sep = "")
if (identical(listOfChartTypes$xEpisode_yAverageStepsWhenWin, chartType))
yLabel <- "Average steps when win"
else if (identical(listOfChartTypes$xEpisode_yAverageStepsWhenLoose, chartType))
yLabel <- "Average steps when loose"
else
yLabel <- "Average steps"
# Se muestran los resultados
matplot(xlab = xLabel, ylab = yLabel, simulationData, type = "l", lty = 1, pch = 1, col = coloresFinales) #plot
# Se muestra un titulo
# README: de momento no uso titulo
}
# > PERCENTAGE_OF_ACTIONS
else if (identical(listOfChartTypes$xEpisode_yPercentageOfActions, chartType)) {
# Guradado auxiliar
auxSimulationParameters <- simulationParameters
# En caso de querer juntarlo todo en un mismo grafo habria que usar mfrow
#par(mfrow=c(filas,columnas))
for (iFile in 1:ncol(simulationData)) {
# Margenes del grafico
par(mar = c(10,4,2,2))
# Los colores seran segun el numero de acciones y no del numero de ficheros leidos
possibleActions <-
as.character(dataFile$possibleActions[dataFile$possibleActions != ""])
coloresFinales <- colours[1:length(possibleActions)]
# Labels
xLabel <-
paste("(", as.character(dataFile$numberOfTasks[1]), " tasks) ", "Episodes", "\n",sep =
"")
yLabel <- "Percentage of actions"
# Extrayendo la informacion del fichero para cada accion
data <-
t(matrix(
simulationData[,iFile], nrow = length(possibleActions), ncol = dataFile$numberOfEpisodes[1]
))
colnames(data) <- possibleActions
# Se muestran los resultados
matplot(
xlab = xLabel, ylab = yLabel, data, type = "l", lty = 1, pch = 1, col = coloresFinales, ylim = c(0,100)
) #plot
# Que significa cada linea en el grafico
simulationParameters <- possibleActions
# Titulo del grafico
title(
"", sub = auxSimulationParameters[iFile],
cex.main = 2, font.main = 4, col.main = "blue",
cex.sub = 0.75, font.sub = 3, col.sub = "black"
)
# Situamos las leyendas
leg.txt <- simulationParameters
par(
fig = c(0, 1, 0, 1), oma = c(1, 0, 0, 0), mar = c(0, 0, 0, 0), new = TRUE
)
plot(
0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n"
)
legend(
"bottom", legend = leg.txt,
xpd = TRUE, horiz = FALSE, inset = c(0, 0), bty = "o",
pch = c(8, 8), col = coloresFinales, cex = 0.75
)
}
simulationParameters <- ""
}
# > HISTOGRAM_OF_Q_VALUES
else if (identical(listOfChartTypes$histogramOfQvalues, chartType)) {
# Margenes del grafico
numberOfStates <- length(names(dataFile)[-1])
numberOfActions <- nrow(simulationData)
if(DEBUG){
print(paste("Number of states: ",numberOfStates))
}
if (numberOfStates == 1) {
# Margenes del grafico
par(mar = c(4,8,2,2))
# Labels
xLabel <-
""#paste("File:",nombreFicheroSimulacion)#paste("(", as.character(dataFile$numberOfTasks[1]), " tasks) ", "Episodes (most action selected in all tasks)",sep="")
yLabel <- "------"
# Se muestran los resultados
x <- rownames(simulationData)
y <- c()
for (row in 1:nrow(simulationData)) {
y <- rbind(y, simulationData[row,1])
}
# Se muestran los resultados
barplot(
xlab = xLabel, horiz = TRUE, y, names = x, col = coloresFinales, beside = TRUE, las = 1, xlim = c(min(y),max(y))
)
# Se muestra un titulo
title(title)
}
else{
#z <- 1:(numberOfStates*numberOfActions); dim(z) <- c(numberOfStates,numberOfActions)
z <- simulationData
#z <- VADeaths
imageIn3D <- TRUE
if (imageIn3D) {
hist3D(
z = z, border = "black", xlab = "actions", ylab = "states", zlab = "qValue"
)
# CODIGO MUERTO: INTENTO DE PONER LABELS AL HISTOGRAMA EN 3D
if (FALSE) {
hist3D (
x = 1:nrow(z), y = 1:ncol(z), z = z,
bty = "g", phi = 20, theta = -60,
xlab = "", ylab = "", zlab = "", main = "VADeaths",
col = "#0072B2", border = "black", shade = 0.8,
ticktype = "detailed", space = 0.15, d = 2, cex.axis = 1e-9
)
# Use text3D to label x axis
text3D(
x = 1:nrow(z), y = rep(0.0, nrow(z)), z = rep(3, nrow(z)),
labels = rownames(z),
add = TRUE, adj = 0
)
# Use text3D to label y axis
text3D(
x = rep(0, ncol(z)), y = 1:ncol(z), z = rep(0, ncol(z)),
labels = colnames(z),
add = TRUE, adj = 1
)
}
}else{
image2D(
x = 1:3, y = 1:6, z = z, border = "black"
, xlab = c("actions", simulationParameters[2])#rownames(simulationData))
, ylab = c("states", simulationParameters[1])#\n states = {10-armed}\n actions = {tirarPalanca1,tirarPalanca2,tirarPalanca3,tirarPalanca4,tirarPalanca5,\ntirarPalanca6,tirarPalanca7,tirarPalanca8,tirarPalanca9,tirarPalanca10}", ylab="actions",
, clab = c("Q values \n(normalized)")
, main = "Policy"
, xaxt = "n"
, yaxt = "n"
)
}
# Titulo del grafico
title(
main = simulationParameters[1], sub = simulationParameters[1],
cex.main = 0.5, font.main = 4, col.main = "blue",
cex.sub = 0.75, font.sub = 3, col.sub = "black"
)
#simulationParameters <- ""
}
}
# > POLICY_PERFORMANCE
else if (identical(listOfChartTypes$policyPerformance, chartType)) {
# Margenes del grafico
par(mar = c(5,4,2,2))
# Getting the parameter label in axis X
parameterFeatures <- getKeyWords(rownames(simulationData)[1])
parameter1 = parameterFeatures[1]
nameParameter1 <- parameterFeatures[2]
# Labels
xLabel <-
parameter1#bquote(lambda~"=") #bquote("Q-LEARNING_LAMBDA("~Q[0] == .(as.character(dataFile$initialQValues[1]))~","~alpha == .(as.character(dataFile$qLearning_alpha))~","~gamma==.(as.character(dataFile$qLearning_gamma))~","~lambda==.(as.character(dataFile$qLearning_lambda))~")")
yLabel <- "RMS error over 10 first episodes"
# Se muestran los resultados
matplot(
xlab = xLabel, ylab = yLabel, y = simulationData,
type = "l", lty = 1, pch = 1, col = coloresFinales
,xlim = c(1,length(rownames(simulationData)))
,xaxt = "n"
) #plot
# Getting values of parameter1
valuesParameter1 <- c()
for (i in 1:length(rownames(simulationData))) {
value <- rownames(simulationData)[i]
valuesParameter1[i] <-
getKeyWords(rownames(simulationData)[i])[4]#substr(value,nchar(parameter1)+3,nchar(value)-1)
}
# Putting the real axis
#==========================================================
if(DEBUG){
print(valuesParameter1)
}
axis(
1, at = 1:length(valuesParameter1), labels = valuesParameter1, cex.axis = 1.0
)
#==========================================================
# Putting the second parameter as text in the plot
parameterFeatures <- getKeyWords(colnames(simulationData)[1])
parameter2 = parameterFeatures[1]
nameParameter2 <- parameterFeatures[2]
expressionParameter2 <- parameterFeatures[3]
maxValueInData <- max(simulationData)
for (i in 1:length(colnames(simulationData))) {
xPosition <- length(rownames(simulationData)) - 0.2#-(i-1)#-0.125
#yPosition <- simulationData[length(rownames(simulationData))-i+1, length(colnames(simulationData))-i+1]#+0.002
#yPosition <- simulationData[length(rownames(simulationData)), length(colnames(simulationData))-i+1]#+0.002
yPosition <-
maxValueInData - maxValueInData * 0.025 * (i - 1)
valor <-
as.double(as.character(getKeyWords(colnames(
simulationData
)[i])[4]))
# Text in the graph, it only changes which is the greece letter
if (grepl("alpha", colnames(simulationData)[1]))
text(
x = xPosition, y = yPosition, bquote(alpha ~ "=" ~ .(valor)), cex = 1, col = coloresFinales[length(colnames(simulationData)) -
i + 1]
)
else if (grepl("gamma", colnames(simulationData)[1]))
text(
x = xPosition, y = yPosition, bquote(gamma ~ "=" ~ .(valor)), cex = 1, col = coloresFinales[length(colnames(simulationData)) -
i + 1]
)
else if (grepl("lambda", colnames(simulationData)[1]))
text(
x = xPosition, y = yPosition, bquote(lambda ~ "=" ~ .(valor)), cex = 1, col = coloresFinales[length(colnames(simulationData)) -
i + 1]
)
else if (grepl("epsilon", colnames(simulationData)[1]))
text(
x = xPosition, y = yPosition, bquote(epsilon ~ "=" ~ .(valor)), cex = 1, col = coloresFinales[length(colnames(simulationData)) -
i + 1]
)
}
# There is no need for simulationParameters
simulationParameters <- ""
}# > WINS_IN_FIRST_TASK
else if (identical(listOfChartTypes$winsInFirstTask, chartType)) {
# Margenes del grafico
par(mar = c(customBottomMargin,4,2,2))
# Labels
xLabel <- ""
yLabel <- "Wins in learning phase"
# Se muestran los resultados
identidad <- diag(ncol(simulationData))
simulationData <- (identidad%*%t(simulationData))
simulationData <- t(simulationData)
matplot(xlab = xLabel, ylab = yLabel, simulationData,
type = "p", lty = 1, pch = 21, bg = coloresFinales,
col = coloresFinales,
ylim = c(0,dataFile$numberOfEpisodes[1]),
xaxt='n') #plot
}
# > ERROR
else{
# Labels
xLabel <- "--------"
yLabel <- "--------"
# There is no need for simulationParameters
simulationParameters <- ""
}
# Situamos las leyendas
leg.txt <- simulationParameters
if (#ncol(simulationData)<7 && !is.null(leg.txt) &&
nchar(leg.txt[1]) >= 1) {
idealFontSize <- 0.7
idealLineNumber <- 6
customFontSize <-
idealFontSize + (idealFontSize - (idealFontSize * ncol(simulationData) /
idealLineNumber))
minimunFontSize <- 0.6
if (customFontSize < minimunFontSize)
customFontSize <- minimunFontSize
par(
fig = c(0, 1, 0, 1), oma = c(1, 0, 0, 0), mar = c(0, 0, 0, 0), new = TRUE
)
plot(
0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n"
)
legend(
"bottom", legend = leg.txt,
xpd = TRUE, horiz = FALSE, inset = c(0, 0), bty = "o",
pch = c(8, 8), col = coloresFinales, cex = customFontSize
)
}
}
# Devuelve la leyenda del gráfico qeu se crea
getParameterExpression <- function(dataFile) {
# Metodos
learnerMethod <- as.character(dataFile$learningMethod[1])
selectorMethod <- as.character(dataFile$selectionMethod[1])
selectorMethodForMCTS <- as.character(dataFile$selectionMethodForSimulationMCTS[1])
# Metodos de aprendizaje
listOfLearners <-
list("Q_LEARNING", "Q_LEARNING_LAMBDA", "GRADIENT_BANDIT", "SARSA", "SARSA_LAMBDA")
names(listOfLearners) <-
c("Q_LEARNING", "Q_LEARNING_LAMBDA", "gradientBandit", "SARSA", "SARSA_LAMBDA")
# Metodos de seleccion
listOfActionSelectors <-
list("E_GREEDY", "E_GREEDY_CHANGING_TEMPORALLY", "UCB", "SOFT_MAX", "MONTE_CARLO_TREE_SEARCH")
names(listOfActionSelectors) <-
c("eGreedy", "eGreedyTemporal","UCB", "softmax", "MCTS")
# La expresion resultante variara segun
# el metodo de aprendizaje y seleccion elegidos
# Asignando la expresión al metodo de aprendizaje
learner <- ""
# > QLEARNING
if (identical(listOfLearners$Q_LEARNING, learnerMethod)) {
learner <-
bquote(
# "Map("~.(as.character(dataFile$sizeMap[1]))~"x"~.(as.character(dataFile$sizeMap[1]))~")"~
"Q-Learning("
# ~ Q[0] == .(as.character(dataFile$initialQValues[1])) ~ ","
~ alpha == .(as.character(dataFile$qLearning_alpha)) ~ "," ~ gamma == .(as.character(dataFile$qLearning_gamma)) ~
")"
)
}
# > QLEARNING_LAMBDA
else if (identical(listOfLearners$Q_LEARNING_LAMBDA, learnerMethod)) {
learner <-
bquote(
"Q-LEARNING_LAMBDA(" ~ Q[0] == .(as.character(dataFile$initialQValues[1])) ~
"," ~ alpha == .(as.character(dataFile$qLearning_alpha)) ~ "," ~ gamma ==
.(as.character(dataFile$qLearning_gamma)) ~ "," ~ lambda == .(as.character(dataFile$qLearning_lambda)) ~
")"
)
}
# > SARSA
else if (identical(listOfLearners$SARSA, learnerMethod)) {
learner <-
bquote(
# "Map("~.(as.character(dataFile$sizeMap[1]))~"x"~.(as.character(dataFile$sizeMap[1]))~")"~
"SARSA(" ~ Q[0] == .(as.character(dataFile$initialQValues[1])) ~ "," ~ alpha == .(as.character(dataFile$qLearning_alpha)) ~
"," ~ gamma == .(as.character(dataFile$qLearning_gamma)) ~ ")"
)
}
# > SARSA_LAMBDA
else if (identical(listOfLearners$SARSA_LAMBDA, learnerMethod)) {
learner <-
bquote(
"SARSA_LAMBDA(" ~ Q[0] == .(as.character(dataFile$initialQValues[1])) ~
"," ~ alpha == .(as.character(dataFile$qLearning_alpha)) ~ "," ~ gamma ==
.(as.character(dataFile$qLearning_gamma)) ~ "," ~ lambda == .(as.character(dataFile$qLearning_lambda)) ~
")"
)
}
# > GRADIENT_BANDIT
else if (identical(listOfLearners$gradientBandit, learnerMethod)) {
baselineUsed <- ""
if (as.character(dataFile$softmax_baselineUsed[1]) == " true") {
baselineUsed <- "with_baseline"
}else{
baselineUsed <- "without_baseline"
}
learner <-
bquote("GradientBandit(" ~ alpha == .(as.character(dataFile$qLearning_alpha[1])) ~
"," ~ .(baselineUsed) ~ ")")
}
# > ERROR
else{
learner <- "Learner not found!"
}
# Asignando la expresión al metodo de selección
selector <- ""
# > E_GREEDY
if (identical(listOfActionSelectors$eGreedy, selectorMethod))
selector <-
bquote("eGreedy(" ~ epsilon == .(as.character(dataFile$eGreedy_epsilon_initial[1])) ~
")")
# > E_GREEDY_TEMPORAL
else if (identical(listOfActionSelectors$eGreedyTemporal, selectorMethod))
selector <-
bquote("eGreedyTemporal(" ~ epsilon == ~ "[" ~ .(as.character(dataFile$eGreedy_epsilon_initial[1])) ~
"," ~ .(as.character(dataFile$eGreedy_epsilon_final[1])) ~ "])")
# > UCB (Upper Confident Bound)
else if (identical(listOfActionSelectors$UCB, selectorMethod))
selector <-
bquote("UCB(" ~ c == .(as.character(dataFile$UCB_c[1])) ~ ")")
# > SOFTMAX
else if (identical(listOfActionSelectors$softmax, selectorMethod))
selector <-
bquote("softmax(" ~ tau == .(as.character(dataFile$softmax_temperature[1])) ~
")")
# > MCTS
else if (identical(listOfActionSelectors$MCTS, selectorMethod)){
# > E_GREEDY
if (identical(listOfActionSelectors$eGreedy, selectorMethodForMCTS))
selector <- bquote("MCTS("
# ~ epsilon == .(as.character(dataFile$eGreedy_epsilon_initial[1])) ~ ", "
~ beta == .(as.character(dataFile$maxStepsForSimulationMCTS[1]))
~ ", " ~ theta == .(as.character(dataFile$simulationDepthChargeMCTS[1]))
# ~ ", " ~ .(selectorMethodForMCTS)~ "("
~ ", " ~ epsilon == .(as.character(dataFile$eGreedy_epsilon_initial[1])) ~ ")"
# ~ ")"
)
# > E_GREEDY_TEMPORAL
else if (identical(listOfActionSelectors$eGreedyTemporal, selectorMethodForMCTS))
selector <- bquote("MCTS("
# ~ epsilon == .(as.character(dataFile$eGreedy_epsilon_initial[1])) ~ ", "
~ beta == .(as.character(dataFile$maxStepsForSimulationMCTS[1]))
~ ", " ~ theta == .(as.character(dataFile$simulationDepthChargeMCTS[1]))
# ~ ", " ~ "E_GREEDY" ~ "("
~ ", " ~ epsilon == ~ "[" ~ .(as.character(dataFile$eGreedy_epsilon_initial[1])) ~ "," ~ .(as.character(dataFile$eGreedy_epsilon_final[1])) ~ "])"
# ~ ")"
)
# > UCB (Upper Confident Bound)
else if (identical(listOfActionSelectors$UCB, selectorMethodForMCTS))
selector <- bquote("MCTS("
# ~ epsilon == .(as.character(dataFile$eGreedy_epsilon_initial[1])) ~ ", "
~ beta == .(as.character(dataFile$maxStepsForSimulationMCTS[1]))
~ ", " ~ theta == .(as.character(dataFile$simulationDepthChargeMCTS[1]))
# ~ ", " ~ .(selectorMethodForMCTS) ~ "("
~ ", "~ c == .(as.character(dataFile$UCB_c[1])) ~ ")"
# ~ ")"
)
# > SOFTMAX
else if (identical(listOfActionSelectors$softmax, selectorMethod))
selector <- bquote("MCTS("
# ~ epsilon == .(as.character(selectorMethodForMCTS$eGreedy_epsilon_initial[1])) ~ ", "
~ beta == .(as.character(dataFile$maxStepsForSimulationMCTS[1]))
~ ", " ~ theta == .(as.character(dataFile$simulationDepthChargeMCTS[1]))
# ~ ", " ~ .(selectorMethodForMCTS)~ "("
~ ", " ~ tau == .(as.character(dataFile$softmax_temperature[1])) ~ ")"
# ~ ")"
)
# selector <- paste(selector, test)
# > ERROR
else
selector <- "Selector not found!"
}
# > ERROR
else
selector <- "Selector not found!"
# Creando la expresión final qeu es la combinacion
# del metodo de aprendizaje y el de selección de acciones
expression <- as.expression(bquote(.(learner) ~ .(selector)))
return (expression)
}
# Dada una sentencia en la que existe una palabra clave,
# devuelve la primera ocurrencia de dicha palabra clave.
# Por ejemplo si en la frase viene "alpha" y tenemos
# como palabra clave dentro de esta funcion "alpha, entonces
# obtendremos como resultado "alpha"
getKeyWords <- function(parameter) {
nameParameter <- ""
parameterExpression <- ""
value <-
as.double(substring(
parameter,regexpr("\\.[^\\.]*$",substr(
parameter,1,regexpr("\\.[^\\.]*$", parameter)[1] - 1
))[1] + 1, nchar(parameter)
))
if (is.na(value)) {
value <-
as.double(substring(
parameter,regexpr("\\=[^\\=]*$", parameter)[1] + 1,nchar(parameter) - 1
))
}
if (grepl("alpha", parameter)) {
parameter <- bquote(alpha);
parameterExpression <- expression(alpha)
nameParameter <- "alpha"
}
else if (grepl("gamma", parameter)) {
parameter <- bquote(gamma)
parameterExpression <- expression(gamma)
nameParameter <- "gamma"
}
else if (grepl("lambda", parameter)) {
#value <- as.double(substr(parameter,8,nchar(parameter)))
parameter <- bquote(lambda)
parameterExpression <- expression(lambda)
nameParameter <- "lambda"
}
else if (grepl("epsilon", parameter)) {
parameter <- bquote(epsilon)
parameterExpression <- expression(epsilon)
nameParameter <- "epsilon"
}
else{
value <- as.double(-1)
parameter <- "Error: Parameter not found!"
parameterExpression <- "notFound"
nameParameter <- "notFound"
}
return (c(parameter, nameParameter, parameterExpression, value))
}
|
org.par <- par("ask" = TRUE)
# simulated data to test
set.seed(10)
ftime <- rexp(200)
fstatus <- sample(0:2, 200, replace = TRUE)
cov <- data.frame(
x1 = runif(200),
x2 = runif(200),
x3 = runif(200)
)
library(rms)
dd <- datadist(cov)
options(datadist = "dd")
fit1 <- cph(Surv(ftime, fstatus == 1) ~ x1 + x2 + x3, data = cov)
fit2 <- cph(Surv(ftime, fstatus == 2) ~ x1 + x2 + x3, data = cov)
forestplotRegrObj(regr.obj = fit1, new_page = TRUE)
library(forestplot)
forestplotRegrObj(
regr.obj = list(fit1, fit2),
legend = c("Status = 1", "Status = 2"),
legend_args = fpLegend(title = "Type of regression"),
new_page = TRUE
)
modifyNameFunction <- function(x) {
if (x == "x1") {
return("Covariate A")
}
if (x == "x2") {
return(expression(paste("My ", beta[2])))
}
return(x)
}
forestplotRegrObj(
regr.obj = list(fit1, fit2),
col = fpColors(box = c("darkblue", "darkred")),
variablesOfInterest.regexp = "(x2|x3)",
legend = c("First model", "Second model"),
legend_args = fpLegend(title = "Models"),
rowname.fn = modifyNameFunction, new_page = TRUE
)
par(org.par)
|
/inst/examples/forestplotRegrObj_example.R
|
no_license
|
yadevi/Greg
|
R
| false
| false
| 1,111
|
r
|
org.par <- par("ask" = TRUE)
# simulated data to test
set.seed(10)
ftime <- rexp(200)
fstatus <- sample(0:2, 200, replace = TRUE)
cov <- data.frame(
x1 = runif(200),
x2 = runif(200),
x3 = runif(200)
)
library(rms)
dd <- datadist(cov)
options(datadist = "dd")
fit1 <- cph(Surv(ftime, fstatus == 1) ~ x1 + x2 + x3, data = cov)
fit2 <- cph(Surv(ftime, fstatus == 2) ~ x1 + x2 + x3, data = cov)
forestplotRegrObj(regr.obj = fit1, new_page = TRUE)
library(forestplot)
forestplotRegrObj(
regr.obj = list(fit1, fit2),
legend = c("Status = 1", "Status = 2"),
legend_args = fpLegend(title = "Type of regression"),
new_page = TRUE
)
modifyNameFunction <- function(x) {
if (x == "x1") {
return("Covariate A")
}
if (x == "x2") {
return(expression(paste("My ", beta[2])))
}
return(x)
}
forestplotRegrObj(
regr.obj = list(fit1, fit2),
col = fpColors(box = c("darkblue", "darkred")),
variablesOfInterest.regexp = "(x2|x3)",
legend = c("First model", "Second model"),
legend_args = fpLegend(title = "Models"),
rowname.fn = modifyNameFunction, new_page = TRUE
)
par(org.par)
|
setwd("C:\\Users\\jack-\\Desktop")
getwd()
r <- read.csv("TeamTolandCSV.csv")
str(r)
messages <- subset(r, select=c("Date","Time","Sender","Message"))
levels(messages$Sender)
newmess <- messages[(messages$Sender == "Andrew Morton") |
(messages$Sender == "Angela Toland")|
(messages$Sender == "Christine O'Hara")|
(messages$Sender == "Emma Steeples")|
(messages$Sender == "Fiona Morton")|
(messages$Sender == "Fraser McKechnie")|
(messages$Sender == "Granny")|
(messages$Sender == "Jack McKechnie")|
(messages$Sender == "Jaclyn Toland")|
(messages$Sender == "Katherine Morton")|
(messages$Sender == "Katie Morton")|
(messages$Sender == "Liam O'Hara")|
(messages$Sender == "Liam Toland")|
(messages$Sender == "Marion O'Hara")|
(messages$Sender == "Matthew Steeples")|
(messages$Sender == "Michael Toland")|
(messages$Sender == "Nicola Toland")|
(messages$Sender == "Roddy McKechnie")|
(messages$Sender == "Sarah O'Hara")|
(messages$Sender == "Susan McKechnie")|
(messages$Sender == "Tricia Steeples"),]
sort(table(newmess$Sender))
library(ggplot2)
library(plyr)
#---------------- MESSAGES PER PERSON -------------
senders <- count(newmess,'Sender')
senders <- senders[order(senders$freq,decreasing=T),]
# Plot messages per person
plot <- ggplot(data = senders, aes(x=reorder(Sender, -freq),y=freq))
messages.per.person <- plot + geom_bar(stat="identity",aes(fill=freq)) +
scale_fill_gradient2(low='orange', mid='Red', high='Orange') +
theme(axis.text.x = element_text(angle = 90, hjust = 1),
legend.position = c(0.9,0.8)) +
xlab("Name") +
ylab("No. of messages sent") +
geom_text(aes(label=freq,vjust=-0.25)) +
ggtitle("Number of Messages Sent Per Person")
messages.per.person
#------------- MESSAGES OVER THE DAY -------------
#Prep data
times <- newmess
options(max.print=1000000)
times[times$Time] <- substring(time$Time,start=2,stop)
times$Time <- as.factor(substr(newmess$Time,2,3))
times <- count(times,'Time')
times <- times[order(times$Time,decreasing=F),]
colnames(times) <- c("1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","23")
#Plot data
messages.over.time <- ggplot(data = times, aes(x=Time,y=freq,group=1))
messages.over.time <- messages.over.time + geom_point() + geom_smooth(span=0.3,fill="green",colour="darkred")
messages.over.time <- messages.over.time + ylab("No. of messages sent")+ theme(axis.text.x = element_text(angle = 90)) +
ggtitle("Message Frequency Over The Day")
messages.over.time
#------ Number of messages with length -----
install.packages("stringr")
library("stringr")
messlen <- newmess
messlen$Message <- str_length(messlen$Message)
messlen
head(messlen,n=100)
#Plot the data
u <- ggplot(data = messlen, aes(x = Sender,y = Message, colour = Sender))
u + geom_boxplot()
u + geom_boxplot(size=1.2)
u + geom_boxplot(size=1.2) + geom_point()
message.length <- u + geom_boxplot(size=1.2) + geom_jitter() +
theme(legend.position = "none",axis.text.x = element_text(angle = 90))
message.length
#------------- Who sends messages at what time --------
install.packages("lubridate")
library(lubridate)
#Prepare the data
time.pop <- messlen
time.pop$Date <- weekdays(as.Date(time.pop$Date,'%d/%m/%Y'))
time.pop$Date
days <- count(time.pop$Date)
days <- days[order(days$freq,decreasing=T),]
days
#Plot the data
plot <- ggplot(data=days,aes(x=reorder(x,-freq),y=freq))
plot + geom_bar(stat="identity",aes(fill=freq)) +
scale_fill_gradient2(mid='red', high='green') +
theme(axis.text.x = element_text(angle = 90),legend.position = c(0.9,0.88)) +
xlab("Day of the week") +
ylab("No. of messages sent") +
geom_text(aes(label=freq,vjust=-0.25)) +
ggtitle("Days of the Week")
#----------- Most photos sent -----------
photos <- newmess
head(photos$Message)
#photos$Message<- grepl("image omitted",photos$Message,)
tfvect <- as.vector(grepl("image omitted",photos$Message))
photos["is.image"] <- tfvect
head(photos)
photos <- photos[photos$is.image == T, ]
photo.count <- count(photos$Sender)
photo.count <- photo.count[order(photo.count$freq,decreasing=T),]
photo.count
|
/Whatsapp Chat Plots.R
|
no_license
|
JackMcKechnie/WhatsApp-Chat-Plots
|
R
| false
| false
| 4,744
|
r
|
setwd("C:\\Users\\jack-\\Desktop")
getwd()
r <- read.csv("TeamTolandCSV.csv")
str(r)
messages <- subset(r, select=c("Date","Time","Sender","Message"))
levels(messages$Sender)
newmess <- messages[(messages$Sender == "Andrew Morton") |
(messages$Sender == "Angela Toland")|
(messages$Sender == "Christine O'Hara")|
(messages$Sender == "Emma Steeples")|
(messages$Sender == "Fiona Morton")|
(messages$Sender == "Fraser McKechnie")|
(messages$Sender == "Granny")|
(messages$Sender == "Jack McKechnie")|
(messages$Sender == "Jaclyn Toland")|
(messages$Sender == "Katherine Morton")|
(messages$Sender == "Katie Morton")|
(messages$Sender == "Liam O'Hara")|
(messages$Sender == "Liam Toland")|
(messages$Sender == "Marion O'Hara")|
(messages$Sender == "Matthew Steeples")|
(messages$Sender == "Michael Toland")|
(messages$Sender == "Nicola Toland")|
(messages$Sender == "Roddy McKechnie")|
(messages$Sender == "Sarah O'Hara")|
(messages$Sender == "Susan McKechnie")|
(messages$Sender == "Tricia Steeples"),]
sort(table(newmess$Sender))
library(ggplot2)
library(plyr)
#---------------- MESSAGES PER PERSON -------------
senders <- count(newmess,'Sender')
senders <- senders[order(senders$freq,decreasing=T),]
# Plot messages per person
plot <- ggplot(data = senders, aes(x=reorder(Sender, -freq),y=freq))
messages.per.person <- plot + geom_bar(stat="identity",aes(fill=freq)) +
scale_fill_gradient2(low='orange', mid='Red', high='Orange') +
theme(axis.text.x = element_text(angle = 90, hjust = 1),
legend.position = c(0.9,0.8)) +
xlab("Name") +
ylab("No. of messages sent") +
geom_text(aes(label=freq,vjust=-0.25)) +
ggtitle("Number of Messages Sent Per Person")
messages.per.person
#------------- MESSAGES OVER THE DAY -------------
#Prep data
times <- newmess
options(max.print=1000000)
times[times$Time] <- substring(time$Time,start=2,stop)
times$Time <- as.factor(substr(newmess$Time,2,3))
times <- count(times,'Time')
times <- times[order(times$Time,decreasing=F),]
colnames(times) <- c("1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","23")
#Plot data
messages.over.time <- ggplot(data = times, aes(x=Time,y=freq,group=1))
messages.over.time <- messages.over.time + geom_point() + geom_smooth(span=0.3,fill="green",colour="darkred")
messages.over.time <- messages.over.time + ylab("No. of messages sent")+ theme(axis.text.x = element_text(angle = 90)) +
ggtitle("Message Frequency Over The Day")
messages.over.time
#------ Number of messages with length -----
install.packages("stringr")
library("stringr")
messlen <- newmess
messlen$Message <- str_length(messlen$Message)
messlen
head(messlen,n=100)
#Plot the data
u <- ggplot(data = messlen, aes(x = Sender,y = Message, colour = Sender))
u + geom_boxplot()
u + geom_boxplot(size=1.2)
u + geom_boxplot(size=1.2) + geom_point()
message.length <- u + geom_boxplot(size=1.2) + geom_jitter() +
theme(legend.position = "none",axis.text.x = element_text(angle = 90))
message.length
#------------- Who sends messages at what time --------
install.packages("lubridate")
library(lubridate)
#Prepare the data
time.pop <- messlen
time.pop$Date <- weekdays(as.Date(time.pop$Date,'%d/%m/%Y'))
time.pop$Date
days <- count(time.pop$Date)
days <- days[order(days$freq,decreasing=T),]
days
#Plot the data
plot <- ggplot(data=days,aes(x=reorder(x,-freq),y=freq))
plot + geom_bar(stat="identity",aes(fill=freq)) +
scale_fill_gradient2(mid='red', high='green') +
theme(axis.text.x = element_text(angle = 90),legend.position = c(0.9,0.88)) +
xlab("Day of the week") +
ylab("No. of messages sent") +
geom_text(aes(label=freq,vjust=-0.25)) +
ggtitle("Days of the Week")
#----------- Most photos sent -----------
photos <- newmess
head(photos$Message)
#photos$Message<- grepl("image omitted",photos$Message,)
tfvect <- as.vector(grepl("image omitted",photos$Message))
photos["is.image"] <- tfvect
head(photos)
photos <- photos[photos$is.image == T, ]
photo.count <- count(photos$Sender)
photo.count <- photo.count[order(photo.count$freq,decreasing=T),]
photo.count
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{disease}
\alias{disease}
\title{Disease Data}
\format{A dataframe with 196 rows and 4 columns.
\describe{
\item{\code{disease_status}}{Indicator of whether the disease is present.
Coded as: \itemize{
\item{\code{1}}{Disease is present}
\item{\code{2}}{Disease is absent}
}
}
\item{\code{age}}{Age in years.}
\item{\code{ses}}{Socio-economic status of household. One of: \itemize{
\item{\code{upper}}
\item{\code{middle}}
\item{\code{lower}}
}}
\item{\code{sector}}{The sector within the city. One of: \itemize{
\item{\code{sector1}}
\item{\code{sector2}}
}}
}}
\usage{
data("disease")
}
\description{
In a health study to investigate an epidemic outbreak of a
disease that is spread by mosquitoes, individuals are randomly sampled
within two sectors in a city to determine if the person had recently
contracted the disease under study. This was ascertained by the
interviewer, who asked pertinent questions to assess whether certain
specific symptoms associated with the disease were present during the
specified period.
}
\note{
The documentation does not auto-update. If one changes the base file,
this documentation will no longer remain accurate.
36-202 Lab 11 Data.
}
\keyword{datasets}
|
/man/disease.Rd
|
permissive
|
frank113/cmu202
|
R
| false
| true
| 1,398
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{disease}
\alias{disease}
\title{Disease Data}
\format{A dataframe with 196 rows and 4 columns.
\describe{
\item{\code{disease_status}}{Indicator of whether the disease is present.
Coded as: \itemize{
\item{\code{1}}{Disease is present}
\item{\code{2}}{Disease is absent}
}
}
\item{\code{age}}{Age in years.}
\item{\code{ses}}{Socio-economic status of household. One of: \itemize{
\item{\code{upper}}
\item{\code{middle}}
\item{\code{lower}}
}}
\item{\code{sector}}{The sector within the city. One of: \itemize{
\item{\code{sector1}}
\item{\code{sector2}}
}}
}}
\usage{
data("disease")
}
\description{
In a health study to investigate an epidemic outbreak of a
disease that is spread by mosquitoes, individuals are randomly sampled
within two sectors in a city to determine if the person had recently
contracted the disease under study. This was ascertained by the
interviewer, who asked pertinent questions to assess whether certain
specific symptoms associated with the disease were present during the
specified period.
}
\note{
The documentation does not auto-update. If one changes the base file,
this documentation will no longer remain accurate.
36-202 Lab 11 Data.
}
\keyword{datasets}
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170294e+295, 3.84343773421094e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613103477-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 282
|
r
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170294e+295, 3.84343773421094e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
#################### RIQUEZA E ABUNDANCIA ####################
# dados BCI
### area = 10 hectares
### total de individuos com DAP maior ou igual a 10 cm = 4510
### riqueza = 170
require(vegan)
data(BCI)
dim(BCI[1:10,apply(BCI[1:10,],2,sum)!=0])[2] #numero de especies encontradas em 10 plots (10 hectares)
sum(BCI[1:10,apply(BCI[1:10,],2,sum)!=0]) #abundancia total nos 10 plots
# dados Amazonia (Steege 2013)
### area =
### total de individuos com DAP maior ou igual a 10 cm =
### riqueza =
# dados Mata Atlantica (TREECO)
### area = 10 hectares
### total de individuos com DAP maior ou igual a 10 cm = 4817 e 4920
### riqueza = 108 e 325
mata<-read.table("tabmata.csv",header=T,sep=",")
mata10<-mata[mata$effort_ha==10&(mata$dbh_cutoff=="DBH>=10.0cm"|mata$dbh_cutoff=="DBH>10.0cm"),]
head(mata10)
mata10
# dados Inventario Floristico Rio Grande do Sul
# dados Sta Barbara
stababi<-read.table("stababi.csv",header=T,sep=",")
str(stababi)
head(stababi)
stababi10<-stababi[stababi$dbh>=10,]
str(stababi10)
#### CONCLUSAO
### Fixar abundancia total em 5000 individuos
### Variar a riqueza (hipercubo) de 5 a 500 especies
#################### TAXA DE MUTACAO ####################
# Rosindell 2015
### taxa de mutacao variou de 10e-2 a 10e-7
#### CONCLUSAO
### Variar taxa de mutacao no hipercubo de 0 a 10e-6
#################### NUMERO DE PROPAGULOS ####################
#################### TOTAL POR INDIVIDUO #####################
|
/hipercubo/parametros.R
|
no_license
|
luisanovara/simula-neutra-step
|
R
| false
| false
| 1,453
|
r
|
#################### RIQUEZA E ABUNDANCIA ####################
# dados BCI
### area = 10 hectares
### total de individuos com DAP maior ou igual a 10 cm = 4510
### riqueza = 170
require(vegan)
data(BCI)
dim(BCI[1:10,apply(BCI[1:10,],2,sum)!=0])[2] #numero de especies encontradas em 10 plots (10 hectares)
sum(BCI[1:10,apply(BCI[1:10,],2,sum)!=0]) #abundancia total nos 10 plots
# dados Amazonia (Steege 2013)
### area =
### total de individuos com DAP maior ou igual a 10 cm =
### riqueza =
# dados Mata Atlantica (TREECO)
### area = 10 hectares
### total de individuos com DAP maior ou igual a 10 cm = 4817 e 4920
### riqueza = 108 e 325
mata<-read.table("tabmata.csv",header=T,sep=",")
mata10<-mata[mata$effort_ha==10&(mata$dbh_cutoff=="DBH>=10.0cm"|mata$dbh_cutoff=="DBH>10.0cm"),]
head(mata10)
mata10
# dados Inventario Floristico Rio Grande do Sul
# dados Sta Barbara
stababi<-read.table("stababi.csv",header=T,sep=",")
str(stababi)
head(stababi)
stababi10<-stababi[stababi$dbh>=10,]
str(stababi10)
#### CONCLUSAO
### Fixar abundancia total em 5000 individuos
### Variar a riqueza (hipercubo) de 5 a 500 especies
#################### TAXA DE MUTACAO ####################
# Rosindell 2015
### taxa de mutacao variou de 10e-2 a 10e-7
#### CONCLUSAO
### Variar taxa de mutacao no hipercubo de 0 a 10e-6
#################### NUMERO DE PROPAGULOS ####################
#################### TOTAL POR INDIVIDUO #####################
|
library(bio3d)
### Name: dccm.pca
### Title: Dynamic Cross-Correlation from Principal Component Analysis
### Aliases: dccm.pca
### Keywords: analysis
### ** Examples
## Not run:
##D ##-- Read example trajectory file
##D trtfile <- system.file("examples/hivp.dcd", package="bio3d")
##D trj <- read.dcd(trtfile)
##D
##D ## Read the starting PDB file to determine atom correspondence
##D pdbfile <- system.file("examples/hivp.pdb", package="bio3d")
##D pdb <- read.pdb(pdbfile)
##D
##D ## Select residues 24 to 27 and 85 to 90 in both chains
##D inds <- atom.select(pdb, resno=c(24:27,85:90), elety='CA')
##D
##D ## lsq fit of trj on pdb
##D xyz <- fit.xyz(pdb$xyz, trj, fixed.inds=inds$xyz, mobile.inds=inds$xyz)
##D
##D ## Do PCA
##D pca <- pca.xyz(xyz)
##D
##D ## DCCM: only use first 10 PCs
##D cij <- dccm(pca, pc = c(1:10))
##D
##D ## Plot DCCM
##D plot(cij)
##D
##D ## DCCM: remove first 10 PCs
##D cij <- dccm(pca, pc = -c(1:10))
##D
##D ## Plot DCCM
##D plot(cij)
## End(Not run)
|
/data/genthat_extracted_code/bio3d/examples/dccm.pca.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,002
|
r
|
library(bio3d)
### Name: dccm.pca
### Title: Dynamic Cross-Correlation from Principal Component Analysis
### Aliases: dccm.pca
### Keywords: analysis
### ** Examples
## Not run:
##D ##-- Read example trajectory file
##D trtfile <- system.file("examples/hivp.dcd", package="bio3d")
##D trj <- read.dcd(trtfile)
##D
##D ## Read the starting PDB file to determine atom correspondence
##D pdbfile <- system.file("examples/hivp.pdb", package="bio3d")
##D pdb <- read.pdb(pdbfile)
##D
##D ## Select residues 24 to 27 and 85 to 90 in both chains
##D inds <- atom.select(pdb, resno=c(24:27,85:90), elety='CA')
##D
##D ## lsq fit of trj on pdb
##D xyz <- fit.xyz(pdb$xyz, trj, fixed.inds=inds$xyz, mobile.inds=inds$xyz)
##D
##D ## Do PCA
##D pca <- pca.xyz(xyz)
##D
##D ## DCCM: only use first 10 PCs
##D cij <- dccm(pca, pc = c(1:10))
##D
##D ## Plot DCCM
##D plot(cij)
##D
##D ## DCCM: remove first 10 PCs
##D cij <- dccm(pca, pc = -c(1:10))
##D
##D ## Plot DCCM
##D plot(cij)
## End(Not run)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/internal.R
\name{get_dataset}
\alias{get_dataset}
\title{Internal function that retrieves a dataset from AzureML.}
\usage{
get_dataset(x, h, ...)
}
\arguments{
\item{x}{a list or data.frame with \code{DownloadLocation} and \code{DataTypeId} fields}
\item{h}{optional curl handle}
\item{...}{additional parameters to pass to \code{read.table}}
}
\value{
a data.frame
}
\description{
Internal function that retrieves a dataset from AzureML.
}
\keyword{Internal}
|
/man/get_dataset.Rd
|
no_license
|
rmhorton/AzureML
|
R
| false
| false
| 549
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/internal.R
\name{get_dataset}
\alias{get_dataset}
\title{Internal function that retrieves a dataset from AzureML.}
\usage{
get_dataset(x, h, ...)
}
\arguments{
\item{x}{a list or data.frame with \code{DownloadLocation} and \code{DataTypeId} fields}
\item{h}{optional curl handle}
\item{...}{additional parameters to pass to \code{read.table}}
}
\value{
a data.frame
}
\description{
Internal function that retrieves a dataset from AzureML.
}
\keyword{Internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_D3partitionR.R
\name{set_continuous_color_scale}
\alias{set_continuous_color_scale}
\title{Add a custom discrete color scale}
\usage{
set_continuous_color_scale(D3partitionR_object, color_palette)
}
\arguments{
\item{D3partitionR_object}{The D3partitionR object to which the data should be appended}
\item{color_palette}{a vector of two colors, the first one is use on the bottom of the scale, the other on the top.}
}
\value{
A D3partitionR object
}
\description{
Add a custom discrete color scale
}
|
/man/set_continuous_color_scale.Rd
|
no_license
|
AntoineGuillot2/D3partitionR
|
R
| false
| true
| 586
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_D3partitionR.R
\name{set_continuous_color_scale}
\alias{set_continuous_color_scale}
\title{Add a custom discrete color scale}
\usage{
set_continuous_color_scale(D3partitionR_object, color_palette)
}
\arguments{
\item{D3partitionR_object}{The D3partitionR object to which the data should be appended}
\item{color_palette}{a vector of two colors, the first one is use on the bottom of the scale, the other on the top.}
}
\value{
A D3partitionR object
}
\description{
Add a custom discrete color scale
}
|
/5.Correlation Analysis of Classified Data/Correlation Analysis of Classified Data.R
|
no_license
|
luweihao/Nonparametric-statistics
|
R
| false
| false
| 2,612
|
r
| ||
library(tidyverse) # Utilities
library(openxlsx) # Excel exports
# Params
departement <- '75' # Département de votre commune
commune <- '75105' # Code INSEE de votre commune/arrondissement
iris <- c('751052003', '751051905', '751051906') # Vos IRIS
# Import data
dipl90 <- read.csv("data/diplomes-1990.csv") %>% as_tibble()
dipl99 <- read.csv("data/diplomes-1999.csv") %>% as_tibble()
dipl10 <- read.csv("data/diplomes-2010.csv") %>% as_tibble()
dipl15 <- read.csv("data/diplomes-2015.csv") %>% as_tibble()
# Simplify and merge
dipl90<- dipl90 %>%
select(-DEP,
-REG,
-TYP_IRIS,
-INDIC,
-INFRA,
-COMP9099,
-NOM_COM,
-DCIRISLI,
-IRIS,
-NOM_IRIS) %>%
rename(IRIS = DCOMIRIS,
COM = DEPCOM)
dipl99 <- dipl99 %>%
select(-DEP,
-REG,
-TYP_IRIS,
-INDIC,
-INFRA,
-COMP9099,
-NOM_COM,
-DCIRISLI,
-IRIS,
-NOM_IRIS) %>%
rename(IRIS = DCOMIRIS,
COM = DEPCOM)
dipl10 <- dipl10 %>%
select(-DEP,
-REG,
-UU2010,
-LIBCOM,
-TRIRIS,
-GRD_QUART,
-LIBIRIS,
-TYP_IRIS,
-MODIF_IRIS,
-LAB_IRIS)
dipl15 <- dipl15 %>%
select(-DEP,
-REG,
-UU2010,
-LIBCOM,
-TRIRIS,
-GRD_QUART,
-LIBIRIS,
-TYP_IRIS,
-MODIF_IRIS,
-LAB_IRIS)
dipl <- dipl90 %>%
full_join(dipl99) %>%
full_join(dipl10) %>%
full_join(dipl15)
# Prepare data
dipl <- dipl %>%
mutate(P90_NSCOL15P = AF90T15P - AF90TETU) %>% # 15+ 1990
mutate(P99_NSCOL15P = AF99T15P - AF99TETU) %>% # 15+ 1999
mutate(P90_NSCOL15P_S = AF90TSUP + AF90TBA2) %>% # 15+ dipl du sup 1990
mutate(P99_NSCOL15P_S = AF99TSUP + AF99TBA2) %>% # 15+ dipl du sup 1999
mutate(P10_NSCOL15P_S = P10_NSCOL15P_SUP + P10_NSCOL15P_BACP2) %>% # 15+ dipl du sup 2010
mutate(P15_NSCOL15P_S = P15_NSCOL15P_SUP) # 15+ dipl du sup 2015
sup <- dipl %>%
select(COM, IRIS,
P90_NSCOL15P,
P99_NSCOL15P,
P10_NSCOL15P,
P15_NSCOL15P,
P90_NSCOL15P_S,
P99_NSCOL15P_S,
P10_NSCOL15P_S,
P15_NSCOL15P_S)
# Paris
sup75 <- sup %>%
summarise(P90_NSCOL15P_S = sum(P90_NSCOL15P_S, na.rm = TRUE) / sum(P90_NSCOL15P, na.rm = TRUE) * 100,
P99_NSCOL15P_S = sum(P99_NSCOL15P_S, na.rm = TRUE) / sum(P99_NSCOL15P, na.rm = TRUE) * 100,
P10_NSCOL15P_S = sum(P10_NSCOL15P_S, na.rm = TRUE) / sum(P10_NSCOL15P, na.rm = TRUE) * 100,
P15_NSCOL15P_S = sum(P15_NSCOL15P_S, na.rm = TRUE) / sum(P15_NSCOL15P, na.rm = TRUE) * 100) %>%
mutate(id = 75)
# 5e arr
sup75105 <- sup %>%
filter(COM == '75105') %>%
group_by(COM) %>%
summarise(P90_NSCOL15P_S = sum(P90_NSCOL15P_S, na.rm = TRUE) / sum(P90_NSCOL15P, na.rm = TRUE) * 100,
P99_NSCOL15P_S = sum(P99_NSCOL15P_S, na.rm = TRUE) / sum(P99_NSCOL15P, na.rm = TRUE) * 100,
P10_NSCOL15P_S = sum(P10_NSCOL15P_S, na.rm = TRUE) / sum(P10_NSCOL15P, na.rm = TRUE) * 100,
P15_NSCOL15P_S = sum(P15_NSCOL15P_S, na.rm = TRUE) / sum(P15_NSCOL15P, na.rm = TRUE) * 100) %>%
rename(id = COM)
# Our Neighborhood
sup751050000 <- sup %>%
filter(IRIS %in% c('751052003', '751051905', '751051906')) %>%
summarise(P90_NSCOL15P_S = sum(P90_NSCOL15P_S, na.rm = TRUE) / sum(P90_NSCOL15P, na.rm = TRUE) * 100,
P99_NSCOL15P_S = sum(P99_NSCOL15P_S, na.rm = TRUE) / sum(P99_NSCOL15P, na.rm = TRUE) * 100,
P10_NSCOL15P_S = sum(P10_NSCOL15P_S, na.rm = TRUE) / sum(P10_NSCOL15P, na.rm = TRUE) * 100,
P15_NSCOL15P_S = sum(P15_NSCOL15P_S, na.rm = TRUE) / sum(P15_NSCOL15P, na.rm = TRUE) * 100) %>%
mutate(id = 751050000)
# Our IRIS
supIRIS <- sup %>%
filter(IRIS %in% c('751052003', '751051905', '751051906')) %>%
group_by(IRIS) %>%
summarise(P90_NSCOL15P_S = sum(P90_NSCOL15P_S, na.rm = TRUE) / sum(P90_NSCOL15P, na.rm = TRUE) * 100,
P99_NSCOL15P_S = sum(P99_NSCOL15P_S, na.rm = TRUE) / sum(P99_NSCOL15P, na.rm = TRUE) * 100,
P10_NSCOL15P_S = sum(P10_NSCOL15P_S, na.rm = TRUE) / sum(P10_NSCOL15P, na.rm = TRUE) * 100,
P15_NSCOL15P_S = sum(P15_NSCOL15P_S, na.rm = TRUE) / sum(P15_NSCOL15P, na.rm = TRUE) * 100) %>%
rename(id = IRIS)
sup <- sup75 %>%
full_join(sup75105) %>%
full_join(sup751050000) %>%
full_join(supIRIS) %>%
select(id, everything())
write.xlsx(sup, "results/diplomes.xlsx")
sup <- sup %>%
rename('1990' = P90_NSCOL15P_S,
'1999' = P99_NSCOL15P_S,
'2010' = P10_NSCOL15P_S,
'2015' = P15_NSCOL15P_S) %>%
pivot_longer(c('1990', '1999', '2010', '2015'),
names_to = 'year',
values_to = 'NSCOL15P_S') %>%
mutate(id = as.factor(id),
year = as.numeric(year))
supFIL <- sup %>%
filter(id %in% c(75, 75105, 751050000))
ggplot(supFIL, aes(x = year, y = NSCOL15P_S, color = id, linetype = id, alpha = id)) +
#geom_col(position = "dodge") +
geom_line() +
geom_point() +
scale_linetype_manual(values = c('75' = 'dotted',
'75105' = 'dashed',
'751050000' = 'solid',
'751052003' = 'solid',
'751051905' = 'solid',
'751051906' = 'solid')) +
scale_alpha_manual(values = c('75' = 1,
'75105' = 1,
'751050000' = 1,
'751052003' = .4,
'751051905' = .4,
'751051906' = .4))
|
/dipl.R
|
no_license
|
gordesch/diag
|
R
| false
| false
| 5,679
|
r
|
library(tidyverse) # Utilities
library(openxlsx) # Excel exports
# Params
departement <- '75' # Département de votre commune
commune <- '75105' # Code INSEE de votre commune/arrondissement
iris <- c('751052003', '751051905', '751051906') # Vos IRIS
# Import data
dipl90 <- read.csv("data/diplomes-1990.csv") %>% as_tibble()
dipl99 <- read.csv("data/diplomes-1999.csv") %>% as_tibble()
dipl10 <- read.csv("data/diplomes-2010.csv") %>% as_tibble()
dipl15 <- read.csv("data/diplomes-2015.csv") %>% as_tibble()
# Simplify and merge
dipl90<- dipl90 %>%
select(-DEP,
-REG,
-TYP_IRIS,
-INDIC,
-INFRA,
-COMP9099,
-NOM_COM,
-DCIRISLI,
-IRIS,
-NOM_IRIS) %>%
rename(IRIS = DCOMIRIS,
COM = DEPCOM)
dipl99 <- dipl99 %>%
select(-DEP,
-REG,
-TYP_IRIS,
-INDIC,
-INFRA,
-COMP9099,
-NOM_COM,
-DCIRISLI,
-IRIS,
-NOM_IRIS) %>%
rename(IRIS = DCOMIRIS,
COM = DEPCOM)
dipl10 <- dipl10 %>%
select(-DEP,
-REG,
-UU2010,
-LIBCOM,
-TRIRIS,
-GRD_QUART,
-LIBIRIS,
-TYP_IRIS,
-MODIF_IRIS,
-LAB_IRIS)
dipl15 <- dipl15 %>%
select(-DEP,
-REG,
-UU2010,
-LIBCOM,
-TRIRIS,
-GRD_QUART,
-LIBIRIS,
-TYP_IRIS,
-MODIF_IRIS,
-LAB_IRIS)
dipl <- dipl90 %>%
full_join(dipl99) %>%
full_join(dipl10) %>%
full_join(dipl15)
# Prepare data
dipl <- dipl %>%
mutate(P90_NSCOL15P = AF90T15P - AF90TETU) %>% # 15+ 1990
mutate(P99_NSCOL15P = AF99T15P - AF99TETU) %>% # 15+ 1999
mutate(P90_NSCOL15P_S = AF90TSUP + AF90TBA2) %>% # 15+ dipl du sup 1990
mutate(P99_NSCOL15P_S = AF99TSUP + AF99TBA2) %>% # 15+ dipl du sup 1999
mutate(P10_NSCOL15P_S = P10_NSCOL15P_SUP + P10_NSCOL15P_BACP2) %>% # 15+ dipl du sup 2010
mutate(P15_NSCOL15P_S = P15_NSCOL15P_SUP) # 15+ dipl du sup 2015
sup <- dipl %>%
select(COM, IRIS,
P90_NSCOL15P,
P99_NSCOL15P,
P10_NSCOL15P,
P15_NSCOL15P,
P90_NSCOL15P_S,
P99_NSCOL15P_S,
P10_NSCOL15P_S,
P15_NSCOL15P_S)
# Paris
sup75 <- sup %>%
summarise(P90_NSCOL15P_S = sum(P90_NSCOL15P_S, na.rm = TRUE) / sum(P90_NSCOL15P, na.rm = TRUE) * 100,
P99_NSCOL15P_S = sum(P99_NSCOL15P_S, na.rm = TRUE) / sum(P99_NSCOL15P, na.rm = TRUE) * 100,
P10_NSCOL15P_S = sum(P10_NSCOL15P_S, na.rm = TRUE) / sum(P10_NSCOL15P, na.rm = TRUE) * 100,
P15_NSCOL15P_S = sum(P15_NSCOL15P_S, na.rm = TRUE) / sum(P15_NSCOL15P, na.rm = TRUE) * 100) %>%
mutate(id = 75)
# 5e arr
sup75105 <- sup %>%
filter(COM == '75105') %>%
group_by(COM) %>%
summarise(P90_NSCOL15P_S = sum(P90_NSCOL15P_S, na.rm = TRUE) / sum(P90_NSCOL15P, na.rm = TRUE) * 100,
P99_NSCOL15P_S = sum(P99_NSCOL15P_S, na.rm = TRUE) / sum(P99_NSCOL15P, na.rm = TRUE) * 100,
P10_NSCOL15P_S = sum(P10_NSCOL15P_S, na.rm = TRUE) / sum(P10_NSCOL15P, na.rm = TRUE) * 100,
P15_NSCOL15P_S = sum(P15_NSCOL15P_S, na.rm = TRUE) / sum(P15_NSCOL15P, na.rm = TRUE) * 100) %>%
rename(id = COM)
# Our Neighborhood
sup751050000 <- sup %>%
filter(IRIS %in% c('751052003', '751051905', '751051906')) %>%
summarise(P90_NSCOL15P_S = sum(P90_NSCOL15P_S, na.rm = TRUE) / sum(P90_NSCOL15P, na.rm = TRUE) * 100,
P99_NSCOL15P_S = sum(P99_NSCOL15P_S, na.rm = TRUE) / sum(P99_NSCOL15P, na.rm = TRUE) * 100,
P10_NSCOL15P_S = sum(P10_NSCOL15P_S, na.rm = TRUE) / sum(P10_NSCOL15P, na.rm = TRUE) * 100,
P15_NSCOL15P_S = sum(P15_NSCOL15P_S, na.rm = TRUE) / sum(P15_NSCOL15P, na.rm = TRUE) * 100) %>%
mutate(id = 751050000)
# Our IRIS
supIRIS <- sup %>%
filter(IRIS %in% c('751052003', '751051905', '751051906')) %>%
group_by(IRIS) %>%
summarise(P90_NSCOL15P_S = sum(P90_NSCOL15P_S, na.rm = TRUE) / sum(P90_NSCOL15P, na.rm = TRUE) * 100,
P99_NSCOL15P_S = sum(P99_NSCOL15P_S, na.rm = TRUE) / sum(P99_NSCOL15P, na.rm = TRUE) * 100,
P10_NSCOL15P_S = sum(P10_NSCOL15P_S, na.rm = TRUE) / sum(P10_NSCOL15P, na.rm = TRUE) * 100,
P15_NSCOL15P_S = sum(P15_NSCOL15P_S, na.rm = TRUE) / sum(P15_NSCOL15P, na.rm = TRUE) * 100) %>%
rename(id = IRIS)
sup <- sup75 %>%
full_join(sup75105) %>%
full_join(sup751050000) %>%
full_join(supIRIS) %>%
select(id, everything())
write.xlsx(sup, "results/diplomes.xlsx")
sup <- sup %>%
rename('1990' = P90_NSCOL15P_S,
'1999' = P99_NSCOL15P_S,
'2010' = P10_NSCOL15P_S,
'2015' = P15_NSCOL15P_S) %>%
pivot_longer(c('1990', '1999', '2010', '2015'),
names_to = 'year',
values_to = 'NSCOL15P_S') %>%
mutate(id = as.factor(id),
year = as.numeric(year))
supFIL <- sup %>%
filter(id %in% c(75, 75105, 751050000))
ggplot(supFIL, aes(x = year, y = NSCOL15P_S, color = id, linetype = id, alpha = id)) +
#geom_col(position = "dodge") +
geom_line() +
geom_point() +
scale_linetype_manual(values = c('75' = 'dotted',
'75105' = 'dashed',
'751050000' = 'solid',
'751052003' = 'solid',
'751051905' = 'solid',
'751051906' = 'solid')) +
scale_alpha_manual(values = c('75' = 1,
'75105' = 1,
'751050000' = 1,
'751052003' = .4,
'751051905' = .4,
'751051906' = .4))
|
#' Write HTML report for DESeq2 analyses
#'
#' Write HTML report from graphs and tables created during the analysis with DESeq2
#'
#' @param target target \code{data.frame} of the project returned by \code{loadTargetFile()}
#' @param counts \code{matrix} of counts returned by \code{loadCountData()}
#' @param out.DESeq2 the result of \code{run.DESeq2()}
#' @param summaryResults the result of \code{summarizeResults.DESeq2()}
#' @param majSequences the result of \code{descriptionPlots()}
#' @param workDir working directory
#' @param projectName name of the project
#' @param author name of the author of the analysis
#' @param targetFile path to the target file
#' @param rawDir path to the directory containing the counts files
#' @param featuresToRemove vector of features to remove from the counts matrix
#' @param varInt factor of interest (biological condition)
#' @param condRef reference condition for the factor of interest
#' @param batch variable to take as a batch effect
#' @param fitType mean-variance relationship: \code{"parametric"} (default) or \code{"local"}
#' @param cooksCutoff outliers detection threshold
#' @param independentFiltering \code{TRUE} or \code{FALSE} to perform the independent filtering or not
#' @param alpha threshold of statistical significance
#' @param pAdjustMethod p-value adjustment method: \code{"BH"} or \code{"BY"} for instance
#' @param typeTrans transformation for PCA/clustering: \code{"VST"} or \code{"rlog"}
#' @param locfunc \code{"median"} (default) or \code{"shorth"} to estimate the size factors
#' @param colors vector of colors of each biological condition on the plots
#' @details This function generates the HTML report for a statistical analysis with DESeq2. It uses the tables and graphs created during the workflow as well as the parameters defined at the beginning of the script.
#' @author Hugo Varet
writeReport.DESeq2 <- function(target, counts, out.DESeq2, summaryResults, majSequences, OutDir, projectName, author, targetFile, rawDir, featuresToRemove, varInt, condRef, batch, fitType,cooksCutoff, independentFiltering, alpha, pAdjustMethod, typeTrans, locfunc, colors)
{
output=paste0(projectName, "_deseq2-multifactorial_report.html")
knit2html(input="/report_DESeq2.rmd",
output=output, quiet=TRUE, title="Statistical report")
# delete unwanted directory/file
unlink("cache",force=TRUE,recursive=TRUE)
unlink(paste0("report_DESeq2.md"),force=TRUE)
cat("HTML report created\n")
}
|
/deseq2_multi/3.0/writeReport.DESeq2.r
|
no_license
|
cyverse/docker-builds
|
R
| false
| false
| 2,490
|
r
|
#' Write HTML report for DESeq2 analyses
#'
#' Write HTML report from graphs and tables created during the analysis with DESeq2
#'
#' @param target target \code{data.frame} of the project returned by \code{loadTargetFile()}
#' @param counts \code{matrix} of counts returned by \code{loadCountData()}
#' @param out.DESeq2 the result of \code{run.DESeq2()}
#' @param summaryResults the result of \code{summarizeResults.DESeq2()}
#' @param majSequences the result of \code{descriptionPlots()}
#' @param workDir working directory
#' @param projectName name of the project
#' @param author name of the author of the analysis
#' @param targetFile path to the target file
#' @param rawDir path to the directory containing the counts files
#' @param featuresToRemove vector of features to remove from the counts matrix
#' @param varInt factor of interest (biological condition)
#' @param condRef reference condition for the factor of interest
#' @param batch variable to take as a batch effect
#' @param fitType mean-variance relationship: \code{"parametric"} (default) or \code{"local"}
#' @param cooksCutoff outliers detection threshold
#' @param independentFiltering \code{TRUE} or \code{FALSE} to perform the independent filtering or not
#' @param alpha threshold of statistical significance
#' @param pAdjustMethod p-value adjustment method: \code{"BH"} or \code{"BY"} for instance
#' @param typeTrans transformation for PCA/clustering: \code{"VST"} or \code{"rlog"}
#' @param locfunc \code{"median"} (default) or \code{"shorth"} to estimate the size factors
#' @param colors vector of colors of each biological condition on the plots
#' @details This function generates the HTML report for a statistical analysis with DESeq2. It uses the tables and graphs created during the workflow as well as the parameters defined at the beginning of the script.
#' @author Hugo Varet
writeReport.DESeq2 <- function(target, counts, out.DESeq2, summaryResults, majSequences, OutDir, projectName, author, targetFile, rawDir, featuresToRemove, varInt, condRef, batch, fitType,cooksCutoff, independentFiltering, alpha, pAdjustMethod, typeTrans, locfunc, colors)
{
output=paste0(projectName, "_deseq2-multifactorial_report.html")
knit2html(input="/report_DESeq2.rmd",
output=output, quiet=TRUE, title="Statistical report")
# delete unwanted directory/file
unlink("cache",force=TRUE,recursive=TRUE)
unlink(paste0("report_DESeq2.md"),force=TRUE)
cat("HTML report created\n")
}
|
library(optparse)
option_list <- list(
make_option(c("-g", "--gene_list"), type="character", default=NULL,
help="gene list to begin with",
metavar="character"),
make_option(c("-a", "--asc_cutoff"), type="numeric", default=50,
help="ASC cutoff (will ignore asc observations below this cutoff)",
metavar="character"),
make_option(c("-s", "--asc_nobs_cutoff"), type="numeric", default=15,
help="Ignore genes with fewer than asc_nobs_cutoff number of ASC observations",
metavar="character"),
make_option(c("-c", "--gene_col"), type="character", default=NULL,
help="the column name of gene in gene_list",
metavar="character"),
make_option(c("-o", "--output"), type="character", default=NULL,
help="Output file",
metavar="character"),
make_option(c("-n", "--count_data_rds"), type="character", default=NULL,
help="Count data RDS (prepared in preprocessing)",
metavar="character")
)
opt_parser <- OptionParser(option_list=option_list)
opt <- parse_args(opt_parser)
library(dplyr)
trim_dot = function(ss) {
unlist(lapply(strsplit(ss, '\\.'), function(x) { x[1] }))
}
dl = readRDS(opt$count_data_rds)
# we impose restriction on TRC
trc_cutoff = 100
trc_nobs_cutoff = floor(0.8 * ncol(dl$df_trc))
# restriction on ASC
asc_cutoff = opt$asc_cutoff
asc_nobs_cutoff = opt$asc_nobs_cutoff
trc = rowSums(dl$df_trc >= trc_cutoff)
asc = rowSums(dl$df_ase1 >= asc_cutoff & dl$df_ase2 >= asc_cutoff)
df = data.frame(n_good_trc = trc, n_good_asc = asc, gene = rownames(dl$df_trc))
df = df %>% mutate(pass_trc_qc = n_good_trc >= trc_nobs_cutoff, pass_asc_qc = n_good_asc >= asc_nobs_cutoff)
trc_median_of_indiv_pass_qc = apply(dl$df_trc, 1, function(x) {
median(x[x >= trc_cutoff])
})
df2 = df %>% filter(pass_asc_qc, pass_trc_qc) %>% select(-pass_asc_qc, -pass_trc_qc)
df2$median_trc = trc_median_of_indiv_pass_qc[match(df2$gene, names(trc_median_of_indiv_pass_qc))]
print(head(df2))
gene_list = read.table(opt$gene_list, header = T, sep = '\t')
df2 = df2[ df2$gene %in% trim_dot(as.character(gene_list[, opt$gene_col])), ]
gz1 <- gzfile(opt$output, "w")
write.table(df2 %>% select(gene, median_trc), gz1, row = F, col = T, quo = F, sep = '\t')
close(gz1)
|
/mixfine/gene_qc.R
|
permissive
|
liangyy/mixqtl-gtex
|
R
| false
| false
| 2,370
|
r
|
library(optparse)
option_list <- list(
make_option(c("-g", "--gene_list"), type="character", default=NULL,
help="gene list to begin with",
metavar="character"),
make_option(c("-a", "--asc_cutoff"), type="numeric", default=50,
help="ASC cutoff (will ignore asc observations below this cutoff)",
metavar="character"),
make_option(c("-s", "--asc_nobs_cutoff"), type="numeric", default=15,
help="Ignore genes with fewer than asc_nobs_cutoff number of ASC observations",
metavar="character"),
make_option(c("-c", "--gene_col"), type="character", default=NULL,
help="the column name of gene in gene_list",
metavar="character"),
make_option(c("-o", "--output"), type="character", default=NULL,
help="Output file",
metavar="character"),
make_option(c("-n", "--count_data_rds"), type="character", default=NULL,
help="Count data RDS (prepared in preprocessing)",
metavar="character")
)
opt_parser <- OptionParser(option_list=option_list)
opt <- parse_args(opt_parser)
library(dplyr)
trim_dot = function(ss) {
unlist(lapply(strsplit(ss, '\\.'), function(x) { x[1] }))
}
dl = readRDS(opt$count_data_rds)
# we impose restriction on TRC
trc_cutoff = 100
trc_nobs_cutoff = floor(0.8 * ncol(dl$df_trc))
# restriction on ASC
asc_cutoff = opt$asc_cutoff
asc_nobs_cutoff = opt$asc_nobs_cutoff
trc = rowSums(dl$df_trc >= trc_cutoff)
asc = rowSums(dl$df_ase1 >= asc_cutoff & dl$df_ase2 >= asc_cutoff)
df = data.frame(n_good_trc = trc, n_good_asc = asc, gene = rownames(dl$df_trc))
df = df %>% mutate(pass_trc_qc = n_good_trc >= trc_nobs_cutoff, pass_asc_qc = n_good_asc >= asc_nobs_cutoff)
trc_median_of_indiv_pass_qc = apply(dl$df_trc, 1, function(x) {
median(x[x >= trc_cutoff])
})
df2 = df %>% filter(pass_asc_qc, pass_trc_qc) %>% select(-pass_asc_qc, -pass_trc_qc)
df2$median_trc = trc_median_of_indiv_pass_qc[match(df2$gene, names(trc_median_of_indiv_pass_qc))]
print(head(df2))
gene_list = read.table(opt$gene_list, header = T, sep = '\t')
df2 = df2[ df2$gene %in% trim_dot(as.character(gene_list[, opt$gene_col])), ]
gz1 <- gzfile(opt$output, "w")
write.table(df2 %>% select(gene, median_trc), gz1, row = F, col = T, quo = F, sep = '\t')
close(gz1)
|
## A set of two functions, one that cache the inverse of a matrix and another
## one that solves the inverse of a matrix and retrieves the cached inverse
## if the inverse has already been calculated.
## This function creates a matrix object that can cache the inverse of a
## given matrix.
makeCacheMatrix <- function( x = matrix() ) {
## Initialization of the inverse matrix.
m <- NULL
## Function that assign makeCacheMatrix the input variable.
set <- function(matrix) {
x <<- matrix
m <<- NULL
}
## Function that returns x.
get <- function() x
## Function that assign the inverse of the matrix to m.
setInverse <- function(inverse) {
m <<- inverse
}
## Function that returns the matrix m.
getInverse <- function() m
## makeCacheMatrix function returns a list of four elements, each
##element is a function defined earlier.
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function solves the inverse matrix for the one returned
## by the previous function makeCacheMatrix. If it has already
## been solved and the matrix does not changed it will retrieve
## the cached inverse
cacheSolve <- function(x, ...) {
## Assign the inverse matrix of x to m
m <- x$getInverse()
## Checking if it has been already solved an inverse for x
if( !is.null(m) ) {
message("Retrieving cached inverse")
return(m)
}
## Reads the matrix
df <- x$get()
## Solves the inverse for the matrix
m <- solve(df) %*% df
## Sets the inverse matrix for x
x$setInverse(m)
## Return the inverse matrix for x
m
}
## Example for using the above functions.
##Generating a 5 by 5 random matrix.
set.seed(12345)
matrix <- stats::rnorm(25)
dim(matrix) <- c(5,5)
matrix
## Cache the generated matrix.
cachedMatrix = makeCacheMatrix(matrix)
## Call the cacheSolve function twice to show that first it
## solves the inverse and second time it returns the cached
## inverse matrix.
cacheSolve(cachedMatrix)
cacheSolve(cachedMatrix)
|
/cachematrix.R
|
no_license
|
derzse/ProgrammingAssignment2
|
R
| false
| false
| 2,015
|
r
|
## A set of two functions, one that cache the inverse of a matrix and another
## one that solves the inverse of a matrix and retrieves the cached inverse
## if the inverse has already been calculated.
## This function creates a matrix object that can cache the inverse of a
## given matrix.
makeCacheMatrix <- function( x = matrix() ) {
## Initialization of the inverse matrix.
m <- NULL
## Function that assign makeCacheMatrix the input variable.
set <- function(matrix) {
x <<- matrix
m <<- NULL
}
## Function that returns x.
get <- function() x
## Function that assign the inverse of the matrix to m.
setInverse <- function(inverse) {
m <<- inverse
}
## Function that returns the matrix m.
getInverse <- function() m
## makeCacheMatrix function returns a list of four elements, each
##element is a function defined earlier.
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function solves the inverse matrix for the one returned
## by the previous function makeCacheMatrix. If it has already
## been solved and the matrix does not changed it will retrieve
## the cached inverse
cacheSolve <- function(x, ...) {
## Assign the inverse matrix of x to m
m <- x$getInverse()
## Checking if it has been already solved an inverse for x
if( !is.null(m) ) {
message("Retrieving cached inverse")
return(m)
}
## Reads the matrix
df <- x$get()
## Solves the inverse for the matrix
m <- solve(df) %*% df
## Sets the inverse matrix for x
x$setInverse(m)
## Return the inverse matrix for x
m
}
## Example for using the above functions.
##Generating a 5 by 5 random matrix.
set.seed(12345)
matrix <- stats::rnorm(25)
dim(matrix) <- c(5,5)
matrix
## Cache the generated matrix.
cachedMatrix = makeCacheMatrix(matrix)
## Call the cacheSolve function twice to show that first it
## solves the inverse and second time it returns the cached
## inverse matrix.
cacheSolve(cachedMatrix)
cacheSolve(cachedMatrix)
|
library(heemod)
### Name: define_surv_table
### Title: Define a survival distribution based on explicit survival
### probabilities
### Aliases: define_surv_table define_surv_table.data.frame
### define_surv_table.character
### ** Examples
x <- data.frame(time = c(0, 1, 5, 10), survival = c(1, 0.9, 0.7, 0.5))
define_surv_table(x)
|
/data/genthat_extracted_code/heemod/examples/define_surv_table.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 346
|
r
|
library(heemod)
### Name: define_surv_table
### Title: Define a survival distribution based on explicit survival
### probabilities
### Aliases: define_surv_table define_surv_table.data.frame
### define_surv_table.character
### ** Examples
x <- data.frame(time = c(0, 1, 5, 10), survival = c(1, 0.9, 0.7, 0.5))
define_surv_table(x)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mediaconvert_service.R
\name{mediaconvert}
\alias{mediaconvert}
\title{AWS Elemental MediaConvert}
\usage{
mediaconvert()
}
\description{
AWS Elemental MediaConvert
}
\section{Operations}{
\tabular{ll}{
\link[=mediaconvert_associate_certificate]{associate_certificate} \tab Associates an AWS Certificate Manager (ACM) Amazon Resource Name (ARN) with AWS Elemental MediaConvert \cr
\link[=mediaconvert_cancel_job]{cancel_job} \tab Permanently cancel a job \cr
\link[=mediaconvert_create_job]{create_job} \tab Create a new transcoding job \cr
\link[=mediaconvert_create_job_template]{create_job_template} \tab Create a new job template \cr
\link[=mediaconvert_create_preset]{create_preset} \tab Create a new preset \cr
\link[=mediaconvert_create_queue]{create_queue} \tab Create a new transcoding queue \cr
\link[=mediaconvert_delete_job_template]{delete_job_template} \tab Permanently delete a job template you have created \cr
\link[=mediaconvert_delete_preset]{delete_preset} \tab Permanently delete a preset you have created \cr
\link[=mediaconvert_delete_queue]{delete_queue} \tab Permanently delete a queue you have created \cr
\link[=mediaconvert_describe_endpoints]{describe_endpoints} \tab Send an request with an empty body to the regional API endpoint to get your account API endpoint \cr
\link[=mediaconvert_disassociate_certificate]{disassociate_certificate} \tab Removes an association between the Amazon Resource Name (ARN) of an AWS Certificate Manager (ACM) certificate and an AWS Elemental MediaConvert resource\cr
\link[=mediaconvert_get_job]{get_job} \tab Retrieve the JSON for a specific completed transcoding job \cr
\link[=mediaconvert_get_job_template]{get_job_template} \tab Retrieve the JSON for a specific job template \cr
\link[=mediaconvert_get_preset]{get_preset} \tab Retrieve the JSON for a specific preset \cr
\link[=mediaconvert_get_queue]{get_queue} \tab Retrieve the JSON for a specific queue \cr
\link[=mediaconvert_list_job_templates]{list_job_templates} \tab Retrieve a JSON array of up to twenty of your job templates \cr
\link[=mediaconvert_list_jobs]{list_jobs} \tab Retrieve a JSON array of up to twenty of your most recently created jobs \cr
\link[=mediaconvert_list_presets]{list_presets} \tab Retrieve a JSON array of up to twenty of your presets \cr
\link[=mediaconvert_list_queues]{list_queues} \tab Retrieve a JSON array of up to twenty of your queues \cr
\link[=mediaconvert_list_tags_for_resource]{list_tags_for_resource} \tab Retrieve the tags for a MediaConvert resource \cr
\link[=mediaconvert_tag_resource]{tag_resource} \tab Add tags to a MediaConvert queue, preset, or job template \cr
\link[=mediaconvert_untag_resource]{untag_resource} \tab Remove tags from a MediaConvert queue, preset, or job template \cr
\link[=mediaconvert_update_job_template]{update_job_template} \tab Modify one of your existing job templates \cr
\link[=mediaconvert_update_preset]{update_preset} \tab Modify one of your existing presets \cr
\link[=mediaconvert_update_queue]{update_queue} \tab Modify one of your existing queues
}
}
\examples{
\donttest{svc <- mediaconvert()
svc$associate_certificate(
Foo = 123
)}
}
|
/paws/man/mediaconvert.Rd
|
permissive
|
peoplecure/paws
|
R
| false
| true
| 3,233
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mediaconvert_service.R
\name{mediaconvert}
\alias{mediaconvert}
\title{AWS Elemental MediaConvert}
\usage{
mediaconvert()
}
\description{
AWS Elemental MediaConvert
}
\section{Operations}{
\tabular{ll}{
\link[=mediaconvert_associate_certificate]{associate_certificate} \tab Associates an AWS Certificate Manager (ACM) Amazon Resource Name (ARN) with AWS Elemental MediaConvert \cr
\link[=mediaconvert_cancel_job]{cancel_job} \tab Permanently cancel a job \cr
\link[=mediaconvert_create_job]{create_job} \tab Create a new transcoding job \cr
\link[=mediaconvert_create_job_template]{create_job_template} \tab Create a new job template \cr
\link[=mediaconvert_create_preset]{create_preset} \tab Create a new preset \cr
\link[=mediaconvert_create_queue]{create_queue} \tab Create a new transcoding queue \cr
\link[=mediaconvert_delete_job_template]{delete_job_template} \tab Permanently delete a job template you have created \cr
\link[=mediaconvert_delete_preset]{delete_preset} \tab Permanently delete a preset you have created \cr
\link[=mediaconvert_delete_queue]{delete_queue} \tab Permanently delete a queue you have created \cr
\link[=mediaconvert_describe_endpoints]{describe_endpoints} \tab Send an request with an empty body to the regional API endpoint to get your account API endpoint \cr
\link[=mediaconvert_disassociate_certificate]{disassociate_certificate} \tab Removes an association between the Amazon Resource Name (ARN) of an AWS Certificate Manager (ACM) certificate and an AWS Elemental MediaConvert resource\cr
\link[=mediaconvert_get_job]{get_job} \tab Retrieve the JSON for a specific completed transcoding job \cr
\link[=mediaconvert_get_job_template]{get_job_template} \tab Retrieve the JSON for a specific job template \cr
\link[=mediaconvert_get_preset]{get_preset} \tab Retrieve the JSON for a specific preset \cr
\link[=mediaconvert_get_queue]{get_queue} \tab Retrieve the JSON for a specific queue \cr
\link[=mediaconvert_list_job_templates]{list_job_templates} \tab Retrieve a JSON array of up to twenty of your job templates \cr
\link[=mediaconvert_list_jobs]{list_jobs} \tab Retrieve a JSON array of up to twenty of your most recently created jobs \cr
\link[=mediaconvert_list_presets]{list_presets} \tab Retrieve a JSON array of up to twenty of your presets \cr
\link[=mediaconvert_list_queues]{list_queues} \tab Retrieve a JSON array of up to twenty of your queues \cr
\link[=mediaconvert_list_tags_for_resource]{list_tags_for_resource} \tab Retrieve the tags for a MediaConvert resource \cr
\link[=mediaconvert_tag_resource]{tag_resource} \tab Add tags to a MediaConvert queue, preset, or job template \cr
\link[=mediaconvert_untag_resource]{untag_resource} \tab Remove tags from a MediaConvert queue, preset, or job template \cr
\link[=mediaconvert_update_job_template]{update_job_template} \tab Modify one of your existing job templates \cr
\link[=mediaconvert_update_preset]{update_preset} \tab Modify one of your existing presets \cr
\link[=mediaconvert_update_queue]{update_queue} \tab Modify one of your existing queues
}
}
\examples{
\donttest{svc <- mediaconvert()
svc$associate_certificate(
Foo = 123
)}
}
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
calc_area <- reactive({
(input$Diameter/2)^2*pi
})
output$Area <- renderText(calc_area())
})
|
/server.R
|
no_license
|
jkodavasal/DDP_W4_ShinyApp
|
R
| false
| false
| 451
|
r
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
calc_area <- reactive({
(input$Diameter/2)^2*pi
})
output$Area <- renderText(calc_area())
})
|
testlist <- list(x = NA_integer_, y = 976894522L)
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609962676-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 123
|
r
|
testlist <- list(x = NA_integer_, y = 976894522L)
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
u = function(q){
a=0
for (i in 1:21){
a[i]=(q[i]-11 + i)^2
}
return(sum(a))
}
du = function(q){
b = 0
for (i in 1:21){
b[i] = 2*(q[i] - 11 + i)
}
return(b)
}
hmc <- function(param_init, U, dU, e, L, iters) {
# hamiltonian monte carlo
#
# arguments #
# param_init:
#
mom_history <- matrix(data = NA, nrow = iters+1, ncol = length(param_init))
param_history <- matrix(data = NA, nrow = iters+1, ncol = length(param_init))
param_history[1, ] <- param_init
param_curr <- param_history[1, ]
for (k in 2:iters) {
q <- param_curr
p <- rnorm(length(param_curr), 0, 1)
p_curr <- p
mom_history[k, ] <- p_curr
# leapfrog steps
p <- hmc_step(p, dU(q), -e / 2)
for (i in 1:L) {
q <- hmc_step(q, p, e)
if (i != L) p <- hmc_step(p, dU(q), -e)
}
p <- -hmc_step(p, dU(q), -e / 2) #the negation is unnecessary
# start end energies
U_curr <- U(param_curr)
K_curr <- sum(p_curr ^ 2) / 2
U_prop <- U(q)
K_prop <- sum(p^2) / 2
# accept-reject
if (runif(1) < exp(U_curr + K_curr - U_prop - K_prop)) param_curr <- q
param_history[k, ] <- param_curr
}
# return(list(samples = param_history, momentum = mom_history))
return(param_history)
}
hmc_step <- function(a, b, e) {
# move a by e * b
a + e * b
}
set.seed(3)
qtracker = hmc(rep(0,21), u, du, 0.18, 10, 10000)
qtracker = qtracker[1:10000,]
length(unique(qtracker))/(21*10000)
xtracker = matrix(NA, nrow = 10000, ncol = 21)
xtracker[1,] = rep(0,21)
for (i in 2:10000){
xold = xtracker[i-1,]
xprop = xold + rnorm(length(xold), sd=0.3)
reject = runif(1)
if (-u(xprop) +u(xold) > log(reject)) xold = xprop
xtracker[i,] = xold
}
length(unique(xtracker))/(21*10000)
par(mfrow=c(4,6))
for(i in 1:21){
plot(qtracker[,i], ylab = c("q", i))
}
par(mfrow=c(4,6))
for(i in 1:21){
plot(xtracker[,i], xlab = c("x", i))
}
par(mfrow=c(4,6))
for(i in 1:21){
acf(xtracker[2500:10000,i], main = c("x",i))
}
par(mfrow=c(4,6))
for(i in 1:21){
acf(qtracker[2500:10000,i], main = c("q",i))
}
sum(colMeans(qtracker[2500:10000,]) - 10:-10)^2
sum(colMeans(xtracker[2500:10000,]) - 10:-10)^2
effectiveSize(mcmc(xtracker))
effectiveSize(mcmc(qtracker))
microbenchmark(qtracker = hmc(rep(0,21), u, du, 0.2, 10, 10000))
microbenchmark(
{xtracker[1,] = rep(0,21)
for (i in 2:10000){
xold = xtracker[i-1,]
xprop = xold + rnorm(length(xold), sd=0.3)
reject = runif(1)
if (-u(xprop) +u(xold) > log(reject)) xold = xprop
xtracker[i,] = xold
}
})
set.seed(42)
gmicro = microbenchmark(
{
gtracker = matrix(NA, nrow = 10000, ncol = 21)
gtracker[1,] = rep(0,21)
for (i in 2:10000){
for(j in 1:21)
gtracker[i,j] = rnorm(1, mean = 11 -j, sd = 1/sqrt(2))
}
})
xsizetime = mean(effectiveSize(xtracker))/median(xmicro$time)*10^(9)
qsizetime =mean(effectiveSize(qtracker))/median(qmicro$time)*10^(9)
gsizetime =mean(effectiveSize(gtracker))/median(gmicro$time)*10^(9)
qsizetime/xsizetime #HMC is this much computationally efficient than MH
qsizetime/gsizetime #HMC is this much computationally efficient than Gibbs
|
/models/21dimmvn.r
|
no_license
|
delimited0/stat-689-project
|
R
| false
| false
| 3,117
|
r
|
u = function(q){
a=0
for (i in 1:21){
a[i]=(q[i]-11 + i)^2
}
return(sum(a))
}
du = function(q){
b = 0
for (i in 1:21){
b[i] = 2*(q[i] - 11 + i)
}
return(b)
}
hmc <- function(param_init, U, dU, e, L, iters) {
# hamiltonian monte carlo
#
# arguments #
# param_init:
#
mom_history <- matrix(data = NA, nrow = iters+1, ncol = length(param_init))
param_history <- matrix(data = NA, nrow = iters+1, ncol = length(param_init))
param_history[1, ] <- param_init
param_curr <- param_history[1, ]
for (k in 2:iters) {
q <- param_curr
p <- rnorm(length(param_curr), 0, 1)
p_curr <- p
mom_history[k, ] <- p_curr
# leapfrog steps
p <- hmc_step(p, dU(q), -e / 2)
for (i in 1:L) {
q <- hmc_step(q, p, e)
if (i != L) p <- hmc_step(p, dU(q), -e)
}
p <- -hmc_step(p, dU(q), -e / 2) #the negation is unnecessary
# start end energies
U_curr <- U(param_curr)
K_curr <- sum(p_curr ^ 2) / 2
U_prop <- U(q)
K_prop <- sum(p^2) / 2
# accept-reject
if (runif(1) < exp(U_curr + K_curr - U_prop - K_prop)) param_curr <- q
param_history[k, ] <- param_curr
}
# return(list(samples = param_history, momentum = mom_history))
return(param_history)
}
hmc_step <- function(a, b, e) {
# move a by e * b
a + e * b
}
set.seed(3)
qtracker = hmc(rep(0,21), u, du, 0.18, 10, 10000)
qtracker = qtracker[1:10000,]
length(unique(qtracker))/(21*10000)
xtracker = matrix(NA, nrow = 10000, ncol = 21)
xtracker[1,] = rep(0,21)
for (i in 2:10000){
xold = xtracker[i-1,]
xprop = xold + rnorm(length(xold), sd=0.3)
reject = runif(1)
if (-u(xprop) +u(xold) > log(reject)) xold = xprop
xtracker[i,] = xold
}
length(unique(xtracker))/(21*10000)
par(mfrow=c(4,6))
for(i in 1:21){
plot(qtracker[,i], ylab = c("q", i))
}
par(mfrow=c(4,6))
for(i in 1:21){
plot(xtracker[,i], xlab = c("x", i))
}
par(mfrow=c(4,6))
for(i in 1:21){
acf(xtracker[2500:10000,i], main = c("x",i))
}
par(mfrow=c(4,6))
for(i in 1:21){
acf(qtracker[2500:10000,i], main = c("q",i))
}
sum(colMeans(qtracker[2500:10000,]) - 10:-10)^2
sum(colMeans(xtracker[2500:10000,]) - 10:-10)^2
effectiveSize(mcmc(xtracker))
effectiveSize(mcmc(qtracker))
microbenchmark(qtracker = hmc(rep(0,21), u, du, 0.2, 10, 10000))
microbenchmark(
{xtracker[1,] = rep(0,21)
for (i in 2:10000){
xold = xtracker[i-1,]
xprop = xold + rnorm(length(xold), sd=0.3)
reject = runif(1)
if (-u(xprop) +u(xold) > log(reject)) xold = xprop
xtracker[i,] = xold
}
})
set.seed(42)
gmicro = microbenchmark(
{
gtracker = matrix(NA, nrow = 10000, ncol = 21)
gtracker[1,] = rep(0,21)
for (i in 2:10000){
for(j in 1:21)
gtracker[i,j] = rnorm(1, mean = 11 -j, sd = 1/sqrt(2))
}
})
xsizetime = mean(effectiveSize(xtracker))/median(xmicro$time)*10^(9)
qsizetime =mean(effectiveSize(qtracker))/median(qmicro$time)*10^(9)
gsizetime =mean(effectiveSize(gtracker))/median(gmicro$time)*10^(9)
qsizetime/xsizetime #HMC is this much computationally efficient than MH
qsizetime/gsizetime #HMC is this much computationally efficient than Gibbs
|
################################# PROCESAMIENTO DE DATOS FRANCESES ######################################
################################## ELECCIONES LEGISLATIVAS 2012 #######################################
###################################### FAZH ITAM 2017 130435 ############################################
#########################################################################################################
### PREÁMBULO ####
library(tidyverse)
library(magrittr)
library(stringr)
library(forcats)
### DATOS DE VOTACIÓN POR CASILLA ####
legis_12 <- read_csv2(file = "DATOS_BRUTOS/LG12_Bvot_T1T2_FAZH.txt",
col_names = c("VUELTA",
"COD_DEPARTAMENTO","COD_COMUNA","NOM_COMUNA",
"CIRC_LEGIS","COD_CANT",
"CASILLA","INSCRITOS","VOTANTES","VOT_EF",
"NUM_CANDIDATO","APELLIDO_CANDIDATO","NOMBRE_CANDIDATO","ETIQUETA",
"VOT_CANDIDATO"),
col_types = c("ccccccciiicccci"),
locale = locale(encoding = "latin1")) %>%
# Filtramos a las comunas de Francia continental y Córsega
filter(!(substr(COD_DEPARTAMENTO,1,1) == "Z")) %>%
# Filtramos a los resultados de la primera vuelta
filter(VUELTA == 1) %>%
# Modificamos los Nombres de las comunas cuando son Arrondisements de París, Lyon y Marsella
mutate(NOM_COMUNA = if_else(NOM_COMUNA == "Paris",paste("Paris",substr(CASILLA,1,2), sep = " "),
if_else(NOM_COMUNA == "Lyon", paste("Lyon",substr(CASILLA,2,2), sep = " "),
if_else(NOM_COMUNA == "Marseille",paste("Marseille",substr(CASILLA,1,2), sep = " "),
NOM_COMUNA)))) %>%
# Agregamos el CODGEO del INSEE para las comunas (ajustando para los Arrondisements de París, Lyon y Marsella)
mutate(CODGEO = paste(COD_DEPARTAMENTO,COD_COMUNA,sep="")) %>%
mutate(CODGEO = if_else(CODGEO == "75056",paste("751",substr(CASILLA,1,2),sep=""),
if_else(CODGEO == "69123", paste("6938",substr(CASILLA,2,2),sep=""),
if_else(CODGEO == "13055",paste("132",substr(CASILLA,1,2),sep=""),
CODGEO)))) %>%
# Modificamos el código de casillas para hacerlo único
mutate(CASILLA = paste(CODGEO,CASILLA,sep="_"))
# Agregamos los Departamentos
# https://www.insee.fr/fr/information/2560452
legis_12 <- read_delim(file = "DATOS_ADMINISTRATIVOS/depts2012.txt",
delim = "\t",locale = locale(encoding = "latin1")) %>%
transmute(COD_REGION = REGION,COD_DEPARTAMENTO = DEP, NOM_DEPARTAMENTO = NCCENR) %>%
left_join(legis_12,., by = "COD_DEPARTAMENTO")
# Agregamos las Regiones Antiguas
legis_12 <- read_delim(file = "DATOS_ADMINISTRATIVOS/reg2012.txt",
delim = "\t",locale = locale(encoding = "latin1")) %>%
transmute(COD_REGION = REGION, NOM_REGION = NCCENR) %>%
left_join(legis_12,., by = "COD_REGION")
# Agregamos los nombres de las Regiones Nuevas
legis_12 <- read_delim(file = "DATOS_ADMINISTRATIVOS/AUXILIAR_REG_DEPART.csv",
delim = ",",locale = locale(encoding = "latin1")) %>%
select(-NOM_REGION) %>%
left_join(legis_12,., by = "COD_REGION")
# Se ordenan las columnas, conservando solo las que nos importan
legis_12 <- legis_12 %>%
select(CODGEO,
NOM_COMUNA,NOM_DEPARTAMENTO,NOM_REGION,NOM_NVA_REG,
COD_COMUNA,COD_DEPARTAMENTO,COD_REGION,COD_NVA_REG,
CASILLA,INSCRITOS,VOTANTES,VOT_EF,
ETIQUETA, APELLIDO_CANDIDATO, VOT_CANDIDATO)
### BASE DEL CENSO ####
IMG2A_12 <- read_csv2(file = "DATOS_BRUTOS/BTT_TD_IMG2A_2012.txt",
col_names = c("NIVEL","CODGEO","NOM_COM","C_SEXO","C_EDAD4_A","C_MIGRATORIA","C_TIPO_ACT","VALOR"),
col_types = "cccccccd",
locale = locale(decimal_mark = ",", encoding = "latin1"),
skip = 1) %>%
# Atención con los códigos
mutate(C_SEXO = factor(C_SEXO,labels = c("HOMBRES","MUJERES")),
C_EDAD4_A = factor(C_EDAD4_A),
C_TIPO_ACT = factor(C_TIPO_ACT,labels = c("EMPLEADOS","DESEMPLEADOS","RETIRADOS","ESTUDIANTES","HOGAR","OTROS")),
C_MIGRATORIA = factor(C_MIGRATORIA, labels = c("INMIGRANTES","LOCALES"))) %>%
mutate(CAT = paste(C_SEXO,C_TIPO_ACT,C_MIGRATORIA,C_EDAD4_A,sep="_")) %>%
select(CODGEO,NIVEL,CAT,VALOR) %>%
spread(CAT,VALOR)
### ERRORES DE COMUNAS ####
# Para homologar resultados electorales y censo necesitamos las geografías administrativas oficiales para cada base
COMUNAS_12 <- read_delim(file = "DATOS_ADMINISTRATIVOS/comsimp2012.txt",
delim = "\t",locale = locale(encoding = "latin1")) %>%
transmute(CODGEO = paste(DEP,COM,sep=""), NOM_COMUNA = NCCENR, ART = ARTMIN) %>%
# Añadimos el artículo al nombre
mutate(NOM_COMUNA = if_else(is.na(ART),
NOM_COMUNA,
paste(substring(ART,2,nchar(ART)-1),NOM_COMUNA,sep=" "))) %>%
transmute(CODGEO,
NOM_COMUNA = if_else(substring(NOM_COMUNA,2,3)=="' ",
paste(substring(NOM_COMUNA,1,2),substring(NOM_COMUNA,4),sep=""),
NOM_COMUNA)) %>%
# Corregimos las ligaduras OE y oe
mutate(NOM_COMUNA=str_replace_all(NOM_COMUNA,"\u008c","OE")) %>%
mutate(NOM_COMUNA=str_replace_all(NOM_COMUNA,"\u009c","oe"))
COMUNAS_14 <- read_delim(file = "DATOS_ADMINISTRATIVOS/comsimp2014.txt",
delim = "\t",locale = locale(encoding = "latin1")) %>%
transmute(CODGEO = paste(DEP,COM,sep=""), NOM_COMUNA = NCCENR, ART = ARTMIN) %>%
# Añadimos el artículo al nombre
mutate(NOM_COMUNA = if_else(is.na(ART),
NOM_COMUNA,
paste(substring(ART,2,nchar(ART)-1),NOM_COMUNA,sep=" "))) %>%
transmute(CODGEO,
NOM_COMUNA = if_else(substring(NOM_COMUNA,2,3)=="' ",
paste(substring(NOM_COMUNA,1,2),substring(NOM_COMUNA,4),sep=""),
NOM_COMUNA)) %>%
# Corregimos las ligaduras OE y oe
mutate(NOM_COMUNA=str_replace_all(NOM_COMUNA,"\u008c","OE")) %>%
mutate(NOM_COMUNA=str_replace_all(NOM_COMUNA,"\u009c","oe"))
# Utilizamos los nombres oficiales del 2012
legis_12 <- legis_12 %>%
left_join(COMUNAS_12,by="CODGEO") %>%
mutate(NOM_COMUNA.x=if_else(is.na(NOM_COMUNA.y),NOM_COMUNA.x,NOM_COMUNA.y)) %>%
select(-NOM_COMUNA.y) %>%
rename(NOM_COMUNA = NOM_COMUNA.x)
# Comunas que están en los resultados electorales, pero no en el censo y que sí existían en 2012
ERRORES_COMUNAS <- anti_join(legis_12,IMG2A_12,by="CODGEO") %>%
semi_join(COMUNAS_12,by="CODGEO") %>%
select(CODGEO,NOM_COMUNA,NOM_DEPARTAMENTO) %>%
distinct %>%
cbind(MOTIVO_ERROR = NA)
# Errores por que se agregaron a otra comuna
ERRORES_COMUNAS$MOTIVO_ERROR <- c("01/01/2013 : Agnières-en-Dévoluy devient commune déléguée au sein de Dévoluy (commune nouvelle).",
"01/01/2013 : Bénévent-et-Charbillac devient commune déléguée au sein de Saint-Bonnet-en-Champsaur (commune nouvelle).",
"01/01/2013 : La Cluse devient commune déléguée au sein de Dévoluy (commune nouvelle).",
"01/01/2013 : Les Infournas devient commune déléguée au sein de Saint-Bonnet-en-Champsaur (commune nouvelle).",
"01/01/2013 : Saint-Disdier devient commune déléguée au sein de Dévoluy (commune nouvelle).",
"01/01/2013 : Melay devient commune déléguée au sein de Chemillé-Melay (commune nouvelle).",
"01/01/2013 : Montpollin devient commune déléguée au sein de Baugé-en-Anjou (commune nouvelle).",
"01/01/2013 : Pontigné devient commune déléguée au sein de Baugé-en-Anjou (commune nouvelle).",
"01/01/2013 : Saint-Martin-d'Arcé devient commune déléguée au sein de Baugé-en-Anjou (commune nouvelle).",
"01/01/2013 : Le Vieil-Baugé devient commune déléguée au sein de Baugé-en-Anjou (commune nouvelle).",
"01/01/2013 : Vaulandry devient commune déléguée au sein de Clefs-Val d'Anjou (commune nouvelle).",
"28/02/2013 : Pautaines-Augeville devient commune déléguée au sein d'Épizon (commune nouvelle).",
"01/01/2013 : Bourg-de-Thizy devient commune déléguée au sein de Thizy-les-Bourgs (commune nouvelle).",
"01/01/2013 : La Chapelle-de-Mardore devient commune déléguée au sein de Thizy-les-Bourgs (commune nouvelle).",
"01/01/2013 : Mardore devient commune déléguée au sein de Thizy-les-Bourgs (commune nouvelle).",
"01/01/2013 : Marnand devient commune déléguée au sein de Thizy-les-Bourgs (commune nouvelle).",
"01/01/2013 : Nuelles devient commune déléguée au sein de Saint-Germain-Nuelles (commune nouvelle).",
"01/01/2013 : Vitré devient commune déléguée au sein de Beaussais-Vitré (commune nouvelle).",
"01/01/2013 : Voultegon devient commune déléguée au sein de Voulmentin (commune nouvelle).",
"01/01/2013 : Le Magny devient commune déléguée au sein de Fontenoy-le-Château (commune nouvelle).")
# Se revisaron, las agrupaciones en el sitio del INSEE
# En los resultados electorales cambiamos primero los CODGEO para las comunas que se agregaron
# Posteriormente cambiamos los nombres a los existentes en 2014.
legis_12 <- legis_12 %>%
mutate(CODGEO = factor(CODGEO) %>%
fct_collapse("05139" = c("05002","05042","05138","05139"),
"05132" = c("05020","05067","05132"),
"49092" = c("49092","49199"),
"49018" = c("49018","49213","49245","49303","49372"),
"49101" = c("49101","49380"),
"52187" = c("52187","52379"),
"69248" = c("69025","69041","69128","69129","69248"),
"69208" = c("69144","69208"),
"79030" = c("79030","79353"),
"79242" = c("79242","79356"),
"88176" = c("88176","88282")) %>%
as.character) %>%
mutate(COD_COMUNA = if_else(CODGEO == "05139", "139",
if_else(CODGEO == "05132", "132",
if_else(CODGEO == "49092", "092",
if_else(CODGEO == "49018", "018",
if_else(CODGEO == "49101", "101",
if_else(CODGEO == "52187", "187",
if_else(CODGEO == "69248", "248",
if_else(CODGEO == "69208", "208",
if_else(CODGEO == "79030", "030",
if_else(CODGEO == "79242", "242",
if_else(CODGEO == "88176", "176",COD_COMUNA)))))))))))) %>%
mutate(NOM_COMUNA = if_else(CODGEO == "05139", "Dévoluy",
if_else(CODGEO == "05132", "Saint-Bonnet-en-Champsaur",
if_else(CODGEO == "49092", "Chemillé-Melay",
if_else(CODGEO == "49018", "Baugé-en-Anjou",
if_else(CODGEO == "49101", "Clefs-Val d'Anjou",
if_else(CODGEO == "52187", "Épizon",
if_else(CODGEO == "69248", "Thizy-les-Bourgs",
if_else(CODGEO == "69208", "Saint-Germain-Nuelles",
if_else(CODGEO == "79030", "Beaussais-Vitré",
if_else(CODGEO == "79242", "Voulmentin",
if_else(CODGEO == "88176", "Fontenoy-le-Château",NOM_COMUNA))))))))))))
# Comunas que están en el censo, pero no en los resultados electorales y que sí existían en 2012
ERRORES_COMUNAS <- anti_join(IMG2A_12,legis_12,by="CODGEO") %>%
select(CODGEO,NIVEL) %>%
distinct %>%
semi_join(COMUNAS_12,by="CODGEO") %>%
full_join(ERRORES_COMUNAS)
# Errores por ser comunas de las grandes ciudades
ERRORES_COMUNAS <- ERRORES_COMUNAS %>%
mutate(MOTIVO_ERROR = if_else(CODGEO == "13055","Marseille",
if_else(CODGEO == "69123","Lyon",
if_else(CODGEO == "75056","Paris",MOTIVO_ERROR))))
# Errores por ser comunas de alta mar
ERRORES_COMUNAS <- ERRORES_COMUNAS %>%
mutate(MOTIVO_ERROR = if_else(substr(CODGEO,1,2) == "97","Alta Mar",MOTIVO_ERROR))
# Errores por ser "comunas muertas"
ERRORES_COMUNAS <- ERRORES_COMUNAS %>%
mutate(MOTIVO_ERROR = if_else(is.na(MOTIVO_ERROR),"Comune Morte pour la France",MOTIVO_ERROR))
# Se eliminan estas comunas del Censo
IMG2A_12 <- ERRORES_COMUNAS %>%
filter(MOTIVO_ERROR %in% c("Marseille","Lyon","Paris","Alta Mar","Comune Morte pour la France")) %>%
extract2("CODGEO") %>%
{filter(IMG2A_12,!{CODGEO %in% .})}
# Comunas que están en el censo, pero no en los resultados electorales y que sí existían en 2014
ERRORES_COMUNAS <- anti_join(IMG2A_12,legis_12,by="CODGEO") %>%
select(CODGEO,NIVEL) %>%
distinct %>%
semi_join(COMUNAS_14, by="CODGEO") %>%
full_join(ERRORES_COMUNAS)
# Errores por que se agregaron a otra comuna
ERRORES_COMUNAS <- ERRORES_COMUNAS %>%
mutate(MOTIVO_ERROR = if_else(CODGEO == "76095","01/01/2014 : Bihorel est rétablie.
01/01/2012 : Bihorel devient commune déléguée au sein de Bois-Guillaume-Bihorel (commune nouvelle).",
MOTIVO_ERROR))
# Corregimos el error agregando los datos en el censo con el nombre existente en 2012.
# Bihorel (76095) y Bois-Guillaume (76108) => Bois-Guillaume-Bihorel (76108)
IMG2A_12 <- IMG2A_12 %>%
mutate(CODGEO = if_else(CODGEO %in% c("76095","76108"),"76108",CODGEO))
# Agregamos los datos
IMG2A_12 <- IMG2A_12 %>%
group_by(NIVEL,CODGEO) %>%
summarise_if(is.numeric,funs(sum(.,na.rm=TRUE))) %>%
ungroup
# Comunas que están en el censo, pero no en los resultados electorales y que NO existían NI en 2012 NI en 2014
ERRORES_COMUNAS <- anti_join(IMG2A_12,legis_12,by="CODGEO") %>%
select(CODGEO,NIVEL) %>%
distinct %>%
full_join(ERRORES_COMUNAS)
# Conservamos solo las columnas de CODGEO y el motivo del error
ERRORES_COMUNAS <- ERRORES_COMUNAS %>%
select(CODGEO,MOTIVO_ERROR)
### BASE ADMINISTRATIVA DE COMUNAS ####
BASE_COMUNAS <- legis_12 %>%
select(CODGEO:COD_NVA_REG) %>%
distinct %>%
left_join(select(IMG2A_12,CODGEO,NIVEL),by="CODGEO")
### RESULTADOS ELECTORALES POR COMUNA ####
VUELTA_1_LEGIS_12 <- legis_12 %>%
select(CODGEO,INSCRITOS:VOT_CANDIDATO) %>%
group_by(CODGEO,ETIQUETA,APELLIDO_CANDIDATO) %>%
summarise_if(is.numeric,funs(sum(.))) %>%
ungroup %>%
left_join(read_csv("BASES_CANDIDATOS/FAMILIAS_POLITICAS_12_LEGISLATIVAS.csv",
locale = locale(encoding = "latin1")),by="ETIQUETA") %>%
transmute(CODGEO,
INSCRITOS,VOTANTES,PCT_PART = VOTANTES/INSCRITOS,VOT_EF,
FAMILIA_CANDIDATO = FAMILIA, ETIQUETA_CANDIDATO = ETIQUETA, APELLIDO_CANDIDATO,
VOT_CANDIDATO,PCT_CANDIDATO = VOT_CANDIDATO/VOT_EF)
### COMPACTAMOS BASE DEL CENSO ####
IMG2A_12 <- IMG2A_12 %>%
select(-ends_with("00"),-NIVEL) %>%
gather(VARIABLE,PERSONAS,-CODGEO) %>%
separate(VARIABLE,c("SEXO","ACTIVIDAD","COND_MIGRATORIA","EDAD_4A")) %>%
arrange(CODGEO) %>%
filter(PERSONAS > 0) %>%
group_by(CODGEO) %>%
mutate(POB_TOTAL = sum(PERSONAS),
PCT_POB = PERSONAS/POB_TOTAL,
CAT_COMUNA = if_else(POB_TOTAL < 10000,"CHICA","GRANDE"))
### GUARDAR RESULTADOS ####
LEGISLATIVAS_2012 <- list(BASE_COMUNAS = BASE_COMUNAS,
ERRORES_COMUNAS = ERRORES_COMUNAS,
CENSO=IMG2A_12,
VUELTA_1=VUELTA_1_LEGIS_12)
save(LEGISLATIVAS_2012,file = "DATOS_PROCESADOS/LEGISLATIVAS_2012.RData")
|
/SCRIPTS_R/01_PROCESAR_DATOS_FRANCESES/01_PROCESAR_LEGIS_2012.R
|
no_license
|
fazepher/datos-franceses
|
R
| false
| false
| 16,727
|
r
|
################################# PROCESAMIENTO DE DATOS FRANCESES ######################################
################################## ELECCIONES LEGISLATIVAS 2012 #######################################
###################################### FAZH ITAM 2017 130435 ############################################
#########################################################################################################
### PREÁMBULO ####
library(tidyverse)
library(magrittr)
library(stringr)
library(forcats)
### DATOS DE VOTACIÓN POR CASILLA ####
legis_12 <- read_csv2(file = "DATOS_BRUTOS/LG12_Bvot_T1T2_FAZH.txt",
col_names = c("VUELTA",
"COD_DEPARTAMENTO","COD_COMUNA","NOM_COMUNA",
"CIRC_LEGIS","COD_CANT",
"CASILLA","INSCRITOS","VOTANTES","VOT_EF",
"NUM_CANDIDATO","APELLIDO_CANDIDATO","NOMBRE_CANDIDATO","ETIQUETA",
"VOT_CANDIDATO"),
col_types = c("ccccccciiicccci"),
locale = locale(encoding = "latin1")) %>%
# Filtramos a las comunas de Francia continental y Córsega
filter(!(substr(COD_DEPARTAMENTO,1,1) == "Z")) %>%
# Filtramos a los resultados de la primera vuelta
filter(VUELTA == 1) %>%
# Modificamos los Nombres de las comunas cuando son Arrondisements de París, Lyon y Marsella
mutate(NOM_COMUNA = if_else(NOM_COMUNA == "Paris",paste("Paris",substr(CASILLA,1,2), sep = " "),
if_else(NOM_COMUNA == "Lyon", paste("Lyon",substr(CASILLA,2,2), sep = " "),
if_else(NOM_COMUNA == "Marseille",paste("Marseille",substr(CASILLA,1,2), sep = " "),
NOM_COMUNA)))) %>%
# Agregamos el CODGEO del INSEE para las comunas (ajustando para los Arrondisements de París, Lyon y Marsella)
mutate(CODGEO = paste(COD_DEPARTAMENTO,COD_COMUNA,sep="")) %>%
mutate(CODGEO = if_else(CODGEO == "75056",paste("751",substr(CASILLA,1,2),sep=""),
if_else(CODGEO == "69123", paste("6938",substr(CASILLA,2,2),sep=""),
if_else(CODGEO == "13055",paste("132",substr(CASILLA,1,2),sep=""),
CODGEO)))) %>%
# Modificamos el código de casillas para hacerlo único
mutate(CASILLA = paste(CODGEO,CASILLA,sep="_"))
# Agregamos los Departamentos
# https://www.insee.fr/fr/information/2560452
legis_12 <- read_delim(file = "DATOS_ADMINISTRATIVOS/depts2012.txt",
delim = "\t",locale = locale(encoding = "latin1")) %>%
transmute(COD_REGION = REGION,COD_DEPARTAMENTO = DEP, NOM_DEPARTAMENTO = NCCENR) %>%
left_join(legis_12,., by = "COD_DEPARTAMENTO")
# Agregamos las Regiones Antiguas
legis_12 <- read_delim(file = "DATOS_ADMINISTRATIVOS/reg2012.txt",
delim = "\t",locale = locale(encoding = "latin1")) %>%
transmute(COD_REGION = REGION, NOM_REGION = NCCENR) %>%
left_join(legis_12,., by = "COD_REGION")
# Agregamos los nombres de las Regiones Nuevas
legis_12 <- read_delim(file = "DATOS_ADMINISTRATIVOS/AUXILIAR_REG_DEPART.csv",
delim = ",",locale = locale(encoding = "latin1")) %>%
select(-NOM_REGION) %>%
left_join(legis_12,., by = "COD_REGION")
# Se ordenan las columnas, conservando solo las que nos importan
legis_12 <- legis_12 %>%
select(CODGEO,
NOM_COMUNA,NOM_DEPARTAMENTO,NOM_REGION,NOM_NVA_REG,
COD_COMUNA,COD_DEPARTAMENTO,COD_REGION,COD_NVA_REG,
CASILLA,INSCRITOS,VOTANTES,VOT_EF,
ETIQUETA, APELLIDO_CANDIDATO, VOT_CANDIDATO)
### BASE DEL CENSO ####
IMG2A_12 <- read_csv2(file = "DATOS_BRUTOS/BTT_TD_IMG2A_2012.txt",
col_names = c("NIVEL","CODGEO","NOM_COM","C_SEXO","C_EDAD4_A","C_MIGRATORIA","C_TIPO_ACT","VALOR"),
col_types = "cccccccd",
locale = locale(decimal_mark = ",", encoding = "latin1"),
skip = 1) %>%
# Atención con los códigos
mutate(C_SEXO = factor(C_SEXO,labels = c("HOMBRES","MUJERES")),
C_EDAD4_A = factor(C_EDAD4_A),
C_TIPO_ACT = factor(C_TIPO_ACT,labels = c("EMPLEADOS","DESEMPLEADOS","RETIRADOS","ESTUDIANTES","HOGAR","OTROS")),
C_MIGRATORIA = factor(C_MIGRATORIA, labels = c("INMIGRANTES","LOCALES"))) %>%
mutate(CAT = paste(C_SEXO,C_TIPO_ACT,C_MIGRATORIA,C_EDAD4_A,sep="_")) %>%
select(CODGEO,NIVEL,CAT,VALOR) %>%
spread(CAT,VALOR)
### ERRORES DE COMUNAS ####
# Para homologar resultados electorales y censo necesitamos las geografías administrativas oficiales para cada base
COMUNAS_12 <- read_delim(file = "DATOS_ADMINISTRATIVOS/comsimp2012.txt",
delim = "\t",locale = locale(encoding = "latin1")) %>%
transmute(CODGEO = paste(DEP,COM,sep=""), NOM_COMUNA = NCCENR, ART = ARTMIN) %>%
# Añadimos el artículo al nombre
mutate(NOM_COMUNA = if_else(is.na(ART),
NOM_COMUNA,
paste(substring(ART,2,nchar(ART)-1),NOM_COMUNA,sep=" "))) %>%
transmute(CODGEO,
NOM_COMUNA = if_else(substring(NOM_COMUNA,2,3)=="' ",
paste(substring(NOM_COMUNA,1,2),substring(NOM_COMUNA,4),sep=""),
NOM_COMUNA)) %>%
# Corregimos las ligaduras OE y oe
mutate(NOM_COMUNA=str_replace_all(NOM_COMUNA,"\u008c","OE")) %>%
mutate(NOM_COMUNA=str_replace_all(NOM_COMUNA,"\u009c","oe"))
COMUNAS_14 <- read_delim(file = "DATOS_ADMINISTRATIVOS/comsimp2014.txt",
delim = "\t",locale = locale(encoding = "latin1")) %>%
transmute(CODGEO = paste(DEP,COM,sep=""), NOM_COMUNA = NCCENR, ART = ARTMIN) %>%
# Añadimos el artículo al nombre
mutate(NOM_COMUNA = if_else(is.na(ART),
NOM_COMUNA,
paste(substring(ART,2,nchar(ART)-1),NOM_COMUNA,sep=" "))) %>%
transmute(CODGEO,
NOM_COMUNA = if_else(substring(NOM_COMUNA,2,3)=="' ",
paste(substring(NOM_COMUNA,1,2),substring(NOM_COMUNA,4),sep=""),
NOM_COMUNA)) %>%
# Corregimos las ligaduras OE y oe
mutate(NOM_COMUNA=str_replace_all(NOM_COMUNA,"\u008c","OE")) %>%
mutate(NOM_COMUNA=str_replace_all(NOM_COMUNA,"\u009c","oe"))
# Utilizamos los nombres oficiales del 2012
legis_12 <- legis_12 %>%
left_join(COMUNAS_12,by="CODGEO") %>%
mutate(NOM_COMUNA.x=if_else(is.na(NOM_COMUNA.y),NOM_COMUNA.x,NOM_COMUNA.y)) %>%
select(-NOM_COMUNA.y) %>%
rename(NOM_COMUNA = NOM_COMUNA.x)
# Comunas que están en los resultados electorales, pero no en el censo y que sí existían en 2012
ERRORES_COMUNAS <- anti_join(legis_12,IMG2A_12,by="CODGEO") %>%
semi_join(COMUNAS_12,by="CODGEO") %>%
select(CODGEO,NOM_COMUNA,NOM_DEPARTAMENTO) %>%
distinct %>%
cbind(MOTIVO_ERROR = NA)
# Errores por que se agregaron a otra comuna
ERRORES_COMUNAS$MOTIVO_ERROR <- c("01/01/2013 : Agnières-en-Dévoluy devient commune déléguée au sein de Dévoluy (commune nouvelle).",
"01/01/2013 : Bénévent-et-Charbillac devient commune déléguée au sein de Saint-Bonnet-en-Champsaur (commune nouvelle).",
"01/01/2013 : La Cluse devient commune déléguée au sein de Dévoluy (commune nouvelle).",
"01/01/2013 : Les Infournas devient commune déléguée au sein de Saint-Bonnet-en-Champsaur (commune nouvelle).",
"01/01/2013 : Saint-Disdier devient commune déléguée au sein de Dévoluy (commune nouvelle).",
"01/01/2013 : Melay devient commune déléguée au sein de Chemillé-Melay (commune nouvelle).",
"01/01/2013 : Montpollin devient commune déléguée au sein de Baugé-en-Anjou (commune nouvelle).",
"01/01/2013 : Pontigné devient commune déléguée au sein de Baugé-en-Anjou (commune nouvelle).",
"01/01/2013 : Saint-Martin-d'Arcé devient commune déléguée au sein de Baugé-en-Anjou (commune nouvelle).",
"01/01/2013 : Le Vieil-Baugé devient commune déléguée au sein de Baugé-en-Anjou (commune nouvelle).",
"01/01/2013 : Vaulandry devient commune déléguée au sein de Clefs-Val d'Anjou (commune nouvelle).",
"28/02/2013 : Pautaines-Augeville devient commune déléguée au sein d'Épizon (commune nouvelle).",
"01/01/2013 : Bourg-de-Thizy devient commune déléguée au sein de Thizy-les-Bourgs (commune nouvelle).",
"01/01/2013 : La Chapelle-de-Mardore devient commune déléguée au sein de Thizy-les-Bourgs (commune nouvelle).",
"01/01/2013 : Mardore devient commune déléguée au sein de Thizy-les-Bourgs (commune nouvelle).",
"01/01/2013 : Marnand devient commune déléguée au sein de Thizy-les-Bourgs (commune nouvelle).",
"01/01/2013 : Nuelles devient commune déléguée au sein de Saint-Germain-Nuelles (commune nouvelle).",
"01/01/2013 : Vitré devient commune déléguée au sein de Beaussais-Vitré (commune nouvelle).",
"01/01/2013 : Voultegon devient commune déléguée au sein de Voulmentin (commune nouvelle).",
"01/01/2013 : Le Magny devient commune déléguée au sein de Fontenoy-le-Château (commune nouvelle).")
# Se revisaron, las agrupaciones en el sitio del INSEE
# En los resultados electorales cambiamos primero los CODGEO para las comunas que se agregaron
# Posteriormente cambiamos los nombres a los existentes en 2014.
legis_12 <- legis_12 %>%
mutate(CODGEO = factor(CODGEO) %>%
fct_collapse("05139" = c("05002","05042","05138","05139"),
"05132" = c("05020","05067","05132"),
"49092" = c("49092","49199"),
"49018" = c("49018","49213","49245","49303","49372"),
"49101" = c("49101","49380"),
"52187" = c("52187","52379"),
"69248" = c("69025","69041","69128","69129","69248"),
"69208" = c("69144","69208"),
"79030" = c("79030","79353"),
"79242" = c("79242","79356"),
"88176" = c("88176","88282")) %>%
as.character) %>%
mutate(COD_COMUNA = if_else(CODGEO == "05139", "139",
if_else(CODGEO == "05132", "132",
if_else(CODGEO == "49092", "092",
if_else(CODGEO == "49018", "018",
if_else(CODGEO == "49101", "101",
if_else(CODGEO == "52187", "187",
if_else(CODGEO == "69248", "248",
if_else(CODGEO == "69208", "208",
if_else(CODGEO == "79030", "030",
if_else(CODGEO == "79242", "242",
if_else(CODGEO == "88176", "176",COD_COMUNA)))))))))))) %>%
mutate(NOM_COMUNA = if_else(CODGEO == "05139", "Dévoluy",
if_else(CODGEO == "05132", "Saint-Bonnet-en-Champsaur",
if_else(CODGEO == "49092", "Chemillé-Melay",
if_else(CODGEO == "49018", "Baugé-en-Anjou",
if_else(CODGEO == "49101", "Clefs-Val d'Anjou",
if_else(CODGEO == "52187", "Épizon",
if_else(CODGEO == "69248", "Thizy-les-Bourgs",
if_else(CODGEO == "69208", "Saint-Germain-Nuelles",
if_else(CODGEO == "79030", "Beaussais-Vitré",
if_else(CODGEO == "79242", "Voulmentin",
if_else(CODGEO == "88176", "Fontenoy-le-Château",NOM_COMUNA))))))))))))
# Comunas que están en el censo, pero no en los resultados electorales y que sí existían en 2012
ERRORES_COMUNAS <- anti_join(IMG2A_12,legis_12,by="CODGEO") %>%
select(CODGEO,NIVEL) %>%
distinct %>%
semi_join(COMUNAS_12,by="CODGEO") %>%
full_join(ERRORES_COMUNAS)
# Errores por ser comunas de las grandes ciudades
ERRORES_COMUNAS <- ERRORES_COMUNAS %>%
mutate(MOTIVO_ERROR = if_else(CODGEO == "13055","Marseille",
if_else(CODGEO == "69123","Lyon",
if_else(CODGEO == "75056","Paris",MOTIVO_ERROR))))
# Errores por ser comunas de alta mar
ERRORES_COMUNAS <- ERRORES_COMUNAS %>%
mutate(MOTIVO_ERROR = if_else(substr(CODGEO,1,2) == "97","Alta Mar",MOTIVO_ERROR))
# Errores por ser "comunas muertas"
ERRORES_COMUNAS <- ERRORES_COMUNAS %>%
mutate(MOTIVO_ERROR = if_else(is.na(MOTIVO_ERROR),"Comune Morte pour la France",MOTIVO_ERROR))
# Se eliminan estas comunas del Censo
IMG2A_12 <- ERRORES_COMUNAS %>%
filter(MOTIVO_ERROR %in% c("Marseille","Lyon","Paris","Alta Mar","Comune Morte pour la France")) %>%
extract2("CODGEO") %>%
{filter(IMG2A_12,!{CODGEO %in% .})}
# Comunas que están en el censo, pero no en los resultados electorales y que sí existían en 2014
ERRORES_COMUNAS <- anti_join(IMG2A_12,legis_12,by="CODGEO") %>%
select(CODGEO,NIVEL) %>%
distinct %>%
semi_join(COMUNAS_14, by="CODGEO") %>%
full_join(ERRORES_COMUNAS)
# Errores por que se agregaron a otra comuna
ERRORES_COMUNAS <- ERRORES_COMUNAS %>%
mutate(MOTIVO_ERROR = if_else(CODGEO == "76095","01/01/2014 : Bihorel est rétablie.
01/01/2012 : Bihorel devient commune déléguée au sein de Bois-Guillaume-Bihorel (commune nouvelle).",
MOTIVO_ERROR))
# Corregimos el error agregando los datos en el censo con el nombre existente en 2012.
# Bihorel (76095) y Bois-Guillaume (76108) => Bois-Guillaume-Bihorel (76108)
IMG2A_12 <- IMG2A_12 %>%
mutate(CODGEO = if_else(CODGEO %in% c("76095","76108"),"76108",CODGEO))
# Agregamos los datos
IMG2A_12 <- IMG2A_12 %>%
group_by(NIVEL,CODGEO) %>%
summarise_if(is.numeric,funs(sum(.,na.rm=TRUE))) %>%
ungroup
# Comunas que están en el censo, pero no en los resultados electorales y que NO existían NI en 2012 NI en 2014
ERRORES_COMUNAS <- anti_join(IMG2A_12,legis_12,by="CODGEO") %>%
select(CODGEO,NIVEL) %>%
distinct %>%
full_join(ERRORES_COMUNAS)
# Conservamos solo las columnas de CODGEO y el motivo del error
ERRORES_COMUNAS <- ERRORES_COMUNAS %>%
select(CODGEO,MOTIVO_ERROR)
### BASE ADMINISTRATIVA DE COMUNAS ####
BASE_COMUNAS <- legis_12 %>%
select(CODGEO:COD_NVA_REG) %>%
distinct %>%
left_join(select(IMG2A_12,CODGEO,NIVEL),by="CODGEO")
### RESULTADOS ELECTORALES POR COMUNA ####
VUELTA_1_LEGIS_12 <- legis_12 %>%
select(CODGEO,INSCRITOS:VOT_CANDIDATO) %>%
group_by(CODGEO,ETIQUETA,APELLIDO_CANDIDATO) %>%
summarise_if(is.numeric,funs(sum(.))) %>%
ungroup %>%
left_join(read_csv("BASES_CANDIDATOS/FAMILIAS_POLITICAS_12_LEGISLATIVAS.csv",
locale = locale(encoding = "latin1")),by="ETIQUETA") %>%
transmute(CODGEO,
INSCRITOS,VOTANTES,PCT_PART = VOTANTES/INSCRITOS,VOT_EF,
FAMILIA_CANDIDATO = FAMILIA, ETIQUETA_CANDIDATO = ETIQUETA, APELLIDO_CANDIDATO,
VOT_CANDIDATO,PCT_CANDIDATO = VOT_CANDIDATO/VOT_EF)
### COMPACTAMOS BASE DEL CENSO ####
IMG2A_12 <- IMG2A_12 %>%
select(-ends_with("00"),-NIVEL) %>%
gather(VARIABLE,PERSONAS,-CODGEO) %>%
separate(VARIABLE,c("SEXO","ACTIVIDAD","COND_MIGRATORIA","EDAD_4A")) %>%
arrange(CODGEO) %>%
filter(PERSONAS > 0) %>%
group_by(CODGEO) %>%
mutate(POB_TOTAL = sum(PERSONAS),
PCT_POB = PERSONAS/POB_TOTAL,
CAT_COMUNA = if_else(POB_TOTAL < 10000,"CHICA","GRANDE"))
### GUARDAR RESULTADOS ####
LEGISLATIVAS_2012 <- list(BASE_COMUNAS = BASE_COMUNAS,
ERRORES_COMUNAS = ERRORES_COMUNAS,
CENSO=IMG2A_12,
VUELTA_1=VUELTA_1_LEGIS_12)
save(LEGISLATIVAS_2012,file = "DATOS_PROCESADOS/LEGISLATIVAS_2012.RData")
|
library(ggplot2)
library(RColorBrewer)
########73 variable
#reg model---- 73 variables
#X
X<-colnames(full_data_T)[c(2,6:7,16:24,33:93)]
#y
y<-colnames(full_data_T)[15]
##reg model
fmla <- as.formula(paste(y,'~', paste(X, collapse= "+")))
##personal reg and pred ----
full_data_T<-person_pred(full_data_T)
#plot_actual-boxplot
actual_boxplot(full_data_T)
#plot_prediction-boxplot
prediction_boxplot(full_data_T)
#plot_actual -hist
actual_hist(full_data_T,T)
#plot_prediction-hist
prediction_hist(full_data_T,T)
#plot-error actual-pred
error_density(full_data_T)
#cor
cor_plot(full_data_T)
unique(full_data_T$num.boxes.correct)
|
/treatment_73.R
|
no_license
|
rct-research/RCT
|
R
| false
| false
| 635
|
r
|
library(ggplot2)
library(RColorBrewer)
########73 variable
#reg model---- 73 variables
#X
X<-colnames(full_data_T)[c(2,6:7,16:24,33:93)]
#y
y<-colnames(full_data_T)[15]
##reg model
fmla <- as.formula(paste(y,'~', paste(X, collapse= "+")))
##personal reg and pred ----
full_data_T<-person_pred(full_data_T)
#plot_actual-boxplot
actual_boxplot(full_data_T)
#plot_prediction-boxplot
prediction_boxplot(full_data_T)
#plot_actual -hist
actual_hist(full_data_T,T)
#plot_prediction-hist
prediction_hist(full_data_T,T)
#plot-error actual-pred
error_density(full_data_T)
#cor
cor_plot(full_data_T)
unique(full_data_T$num.boxes.correct)
|
setwd("/Users/yfl_ex3/Desktop/KTL_RNA seq")
data_full = read.csv("FPKM.csv")
dim(data_full)
sample_var = colnames(data_full[, 5:16])
DAC_dummy = c(0,0, 0, 1, 1, 1, 0,0,0,1,1,1)
DEX_dummy = c(0,0,0,0,0,0,1,1,1,1,1,1)
sample_data =t(rbind(DAC_dummy, DEX_dummy))
rownames(sample_data) = sample_var
sample_data = as.data.frame((sample_data))
data = as.matrix(data_full[, 5:16])
mod = model.matrix(~as.factor(sample_data$DAC_dummy)*as.factor(sample_data$DEX_dummy))
fit = lm.fit(mod, t(data))
cof = fit$coefficients
cof[, 1]
dim(cof)
max(cof[4, ])
min(cof[4, ])
interact = as.array(cof[4, ])
data_inter = cbind(data_full, interact)
data_inter[1, ]
write.csv(data_inter, file = "KTL_seq.csv")
p_val = vector()
for (i in 1:dim(data)[1]){
fit_single = lm(data[i, ] ~sample_data$DAC_dummy*sample_data$DEX_dummy)
p_val = rbind(p_val, tidy(fit_single)[4, 5])
}
write.csv(p_val, file = "KTL_pval2.csv")
|
/interaction_script.R
|
no_license
|
Sheena0228/ISCIENCE-D-22-03411R1
|
R
| false
| false
| 907
|
r
|
setwd("/Users/yfl_ex3/Desktop/KTL_RNA seq")
data_full = read.csv("FPKM.csv")
dim(data_full)
sample_var = colnames(data_full[, 5:16])
DAC_dummy = c(0,0, 0, 1, 1, 1, 0,0,0,1,1,1)
DEX_dummy = c(0,0,0,0,0,0,1,1,1,1,1,1)
sample_data =t(rbind(DAC_dummy, DEX_dummy))
rownames(sample_data) = sample_var
sample_data = as.data.frame((sample_data))
data = as.matrix(data_full[, 5:16])
mod = model.matrix(~as.factor(sample_data$DAC_dummy)*as.factor(sample_data$DEX_dummy))
fit = lm.fit(mod, t(data))
cof = fit$coefficients
cof[, 1]
dim(cof)
max(cof[4, ])
min(cof[4, ])
interact = as.array(cof[4, ])
data_inter = cbind(data_full, interact)
data_inter[1, ]
write.csv(data_inter, file = "KTL_seq.csv")
p_val = vector()
for (i in 1:dim(data)[1]){
fit_single = lm(data[i, ] ~sample_data$DAC_dummy*sample_data$DEX_dummy)
p_val = rbind(p_val, tidy(fit_single)[4, 5])
}
write.csv(p_val, file = "KTL_pval2.csv")
|
nest.smdm <- function(x, constraints=NULL, weighted=FALSE, decreasing="fill", sort=TRUE){
### Checking inputs ####
if (!is.null(constraints)&length(unique(constraints))==1){
warning("Only one module. Nestedness calculated only for the entire matrix")
constraints = NULL
}
if(is.element(NA, constraints) | is.element(NaN, constraints)){
warning("NA or NaN in constraints. Nestedness calculated only for the entire matrix")
constraints = NULL
}
if (!is.null(constraints)&length(constraints)!=nrow(x)+ncol(x)){
stop("constraints vector is not of the same length that network vertices")
}
if (weighted == FALSE & any(x != 0 & x != 1)){
x[x>0] = 1
warning ("binary metric applied")
}
if (decreasing!="fill" & decreasing!="abund"){
stop("decreasing should be fill or abund")
}
if (!is.null(constraints)){constraints = as.character(constraints)}
if(is.null(dimnames(x))){
xnames=list(paste("R", 1:nrow(x), ""), paste("C", 1:ncol(x), ""))
dimnames(x) <- xnames
}
### Unweighted NODF Function ####
unweightednodf = function (x, constraints){
# Sorting matrix order by row and collumn sums
if (sort==TRUE){tab0=x[sort(rowSums(x), index=TRUE, decreasing=TRUE)$ix,
sort(colSums(x), index=TRUE, decreasing=TRUE)$ix]}
else {tab0=x}
# N for rows
MTrow = rowSums(tab0)
Nrow = matrix(rep(NA, times=nrow(tab0)^2), nrow(tab0), nrow(tab0))
dimnames(Nrow)=list(rownames(tab0), rownames(tab0))
for (jrow in 2:nrow(tab0)){
for (irow in 1:(jrow-1)){
if (MTrow[jrow]>=MTrow[irow]){Nrow[jrow, irow] = 0
} else {
S=0
for(i in 1:ncol(tab0)){
if (tab0[jrow, i]==1&tab0[jrow, i]==tab0[irow, i]) {
S = S+1
}
}
Nrow[jrow, irow] = S*100/MTrow[jrow]
}
}
}
Nrow = Nrow[rownames(x), rownames(x)]
# NODF for rows
NODFrow = mean(Nrow, na.rm = TRUE)
# N for collumns
MTcol = colSums(tab0)
Ncol = matrix(rep(NA, times=ncol(tab0)^2), ncol(tab0), ncol(tab0))
dimnames(Ncol) = list(colnames(tab0), colnames(tab0))
for (jcol in 2:ncol(tab0)){
for (icol in 1:(jcol-1)){
if (MTcol[jcol] >= MTcol[icol]){Ncol[jcol, icol]=0}
else {
S=0
for(i in 1:nrow(tab0)){
if (tab0[i,jcol]==1&tab0[i,jcol]==tab0[i,icol]) {
S=S+1
}
}
Ncol[jcol,icol]=S*100/MTcol[jcol]
}
}
}
Ncol=Ncol[colnames(x),colnames(x)]
# NODF for rows
NODFcol= mean(Ncol,na.rm = TRUE)
# NODF for the entire matrix
NODFmatrix= mean(c(Ncol,Nrow),na.rm=TRUE)
#### NODF SM/DM ###
if (!is.null(constraints)){
# constraints for rows
rowcons=cbind (rownames(x),constraints[1:nrow(x)])
tabrcons=table(rowcons[,1],rowcons[,2])
distrcons= dist(tabrcons,method = "binary")
distrcons= as.matrix (distrcons)
distrcons=distrcons[rownames(x),rownames(x)]
rm(rowcons,tabrcons)
# NODF SM/DM for rows
SM_Nrow=0
SM_nrow=0
DM_Nrow=0
DM_nrow=0
for (i in 1:nrow(x)){
for (j in 1:nrow(x)){
if (!is.na(Nrow[i,j])){
if(distrcons[i,j]==0){
SM_Nrow=SM_Nrow+Nrow[i,j]
SM_nrow=SM_nrow+1
}
else{
DM_Nrow=DM_Nrow+Nrow[i,j]
DM_nrow=DM_nrow+1
}
}
}
}
NODF_SM_row= SM_Nrow/SM_nrow
NODF_DM_row= DM_Nrow/DM_nrow
# constraints for collumns
colcons=cbind (colnames(x),constraints[(nrow(x)+1):length(constraints)])
tabccons=table(colcons[,1],colcons[,2])
distccons= dist(tabccons,method = "binary")
distccons= as.matrix (distccons)
distccons=distccons[colnames(x),colnames(x)]
rm(colcons,tabccons)
# NODF SM/DM for collumns
SM_Ncol=0
SM_ncol=0
DM_Ncol=0
DM_ncol=0
for (i in 1:ncol(x)){
for (j in 1:ncol(x)){
if (!is.na(Ncol[i,j])){
if(distccons[i,j]==0){
SM_Ncol=SM_Ncol+Ncol[i,j]
SM_ncol=SM_ncol+1
}
else{
DM_Ncol=DM_Ncol+Ncol[i,j]
DM_ncol=DM_ncol+1
}
}
}
}
NODF_SM_col= SM_Ncol/SM_ncol
NODF_DM_col= DM_Ncol/DM_ncol
# NODF SM/DM for matrix
NODF_SM_matrix= (SM_Nrow+SM_Ncol)/(SM_nrow+SM_ncol)
NODF_DM_matrix= (DM_Nrow+DM_Ncol)/(DM_nrow+DM_ncol)
# return
return(list(NODFrow=NODFrow,NODFcol=NODFcol, NODFmatrix=NODFmatrix,
NODF_SM_row= NODF_SM_row, NODF_DM_row=NODF_DM_row,
NODF_SM_col= NODF_SM_col, NODF_DM_col=NODF_DM_col,
NODF_SM_matrix= NODF_SM_matrix, NODF_DM_matrix=NODF_DM_matrix))
}
else {
return(list(NODFrow=NODFrow,NODFcol=NODFcol, NODFmatrix=NODFmatrix))}
}
### Weighted NODF function ####
weightednodf=function (x, constraints){
# Sorting matrix order by row and collumn sums
if(sort==TRUE){tab0=x[sort(rowSums(x!=0), index=TRUE, decreasing=TRUE)$ix,
sort(colSums(x!=0), index=TRUE, decreasing=TRUE)$ix]}
else{tab0=x}
# N for rows
MTrow= rowSums(tab0)
Frow= rowSums(tab0!=0)
Nrow= matrix(rep(NA,times=nrow(tab0)^2),nrow(tab0),nrow(tab0))
dimnames(Nrow)=list(rownames(tab0),rownames(tab0))
for (jrow in 2:nrow(tab0)){
for (irow in 1:(jrow-1)){
if (Frow[jrow]>=Frow[irow]){Nrow[jrow,irow]=0}
else {
S=0
for(i in 1:ncol(tab0)){
if (tab0[jrow,i]!=0&tab0[jrow,i]<tab0[irow,i]) {
S=S+1
}
}
Nrow[jrow,irow]=S*100/Frow[jrow]
}
}
}
Nrow=Nrow[rownames(x), rownames(x)]
# WNODF for rows
NODFrow= mean(Nrow,na.rm = TRUE)
# N for collumns
MTcol= colSums(tab0)
Fcol= colSums(tab0!=0)
Ncol= matrix(rep(NA,times=ncol(tab0)^2),ncol(tab0),ncol(tab0))
dimnames(Ncol)=list(colnames(tab0),colnames(tab0))
for (jcol in 2:ncol(tab0)){
for (icol in 1:(jcol-1)){
if (Fcol[jcol]>=Fcol[icol]){Ncol[jcol,icol]=0}
else {
S=0
for(i in 1:nrow(tab0)){
if (tab0[i,jcol]!=0&tab0[i,jcol]<tab0[i,icol]) {
S=S+1
}
}
Ncol[jcol,icol]=S*100/Fcol[jcol]
}
}
}
Ncol=Ncol[colnames(x),colnames(x)]
# WNODF for rows
NODFcol= mean(Ncol,na.rm = TRUE)
# WNODF for the entire matrix
NODFmatrix= mean(c(Ncol,Nrow),na.rm=TRUE)
#### WNODF SM/DM ###
if (!is.null(constraints)){
# constraints for rows
rowcons=cbind (rownames(x),constraints[1:nrow(x)])
tabrcons=table(rowcons[,1],rowcons[,2])
distrcons= dist(tabrcons,method = "binary")
distrcons= as.matrix (distrcons)
distrcons=distrcons[rownames(x),rownames(x)]
rm(rowcons,tabrcons)
# WNODF SM/DM for rows
SM_Nrow=0
SM_nrow=0
DM_Nrow=0
DM_nrow=0
for (i in 1:nrow(x)){
for (j in 1:nrow(x)){
if (!is.na(Nrow[i,j])){
if(distrcons[i,j]==0){
SM_Nrow=SM_Nrow+Nrow[i,j]
SM_nrow=SM_nrow+1
}
else{
DM_Nrow=DM_Nrow+Nrow[i,j]
DM_nrow=DM_nrow+1
}
}
}
}
NODF_SM_row= SM_Nrow/SM_nrow
NODF_DM_row= DM_Nrow/DM_nrow
# constraints for collumns
colcons=cbind (colnames(x),constraints[(nrow(x)+1):length(constraints)])
tabccons=table(colcons[,1],colcons[,2])
distccons= dist(tabccons,method = "binary")
distccons= as.matrix (distccons)
distccons=distccons[colnames(x),colnames(x)]
rm(colcons,tabccons)
# WNODF SM/DM for collumns
SM_Ncol=0
SM_ncol=0
DM_Ncol=0
DM_ncol=0
for (i in 1:ncol(x)){
for (j in 1:ncol(x)){
if (!is.na(Ncol[i,j])){
if(distccons[i,j]==0){
SM_Ncol=SM_Ncol+Ncol[i,j]
SM_ncol=SM_ncol+1
}
else{
DM_Ncol=DM_Ncol+Ncol[i,j]
DM_ncol=DM_ncol+1
}
}
}
}
NODF_SM_col= SM_Ncol/SM_ncol
NODF_DM_col= DM_Ncol/DM_ncol
# WNODF SM/DM for matrix
NODF_SM_matrix= (SM_Nrow+SM_Ncol)/(SM_nrow+SM_ncol)
NODF_DM_matrix= (DM_Nrow+DM_Ncol)/(DM_nrow+DM_ncol)
# return
return(list(WNODFrow=NODFrow,WNODFcol=NODFcol, WNODFmatrix=NODFmatrix,WNODF_SM_row= NODF_SM_row, WNODF_DM_row=NODF_DM_row,WNODF_SM_col= NODF_SM_col, WNODF_DM_col=NODF_DM_col,WNODF_SM_matrix= NODF_SM_matrix, WNODF_DM_matrix=NODF_DM_matrix))
}
else {
return(list(WNODFrow=NODFrow,WNODFcol=NODFcol, WNODFmatrix=NODFmatrix))}
}
### Weighted NODA funcion ####
weightednoda=function (x,constraints){
# Sorting matrix order by row and collumn sums
if(sort==TRUE){tab0=x[sort(rowSums(x), index=TRUE, decreasing=TRUE)$ix,
sort(colSums(x), index=TRUE, decreasing=TRUE)$ix]}
else{tab0=x}
# N for rows
MTrow= rowSums(tab0)
Frow= rowSums(tab0!=0)
Nrow= matrix(rep(NA,times=nrow(tab0)^2),nrow(tab0),nrow(tab0))
dimnames(Nrow)=list(rownames(tab0),rownames(tab0))
for (jrow in 2:nrow(tab0)){
for (irow in 1:(jrow-1)){
if (MTrow[jrow]>=MTrow[irow]){Nrow[jrow,irow]=0}
else {
S=0
for(i in 1:ncol(tab0)){
if (tab0[jrow,i]!=0&tab0[jrow,i]<tab0[irow,i]) {
S=S+1
}
}
Nrow[jrow,irow]=S*100/Frow[jrow]
}
}
}
Nrow=Nrow[rownames(x), rownames(x)]
# WNODA for rows
NODArow= mean(Nrow,na.rm = TRUE)
# N for collumns
MTcol= colSums(tab0)
Fcol= colSums(tab0!=0)
Ncol= matrix(rep(NA,times=ncol(tab0)^2),ncol(tab0),ncol(tab0))
dimnames(Ncol)=list(colnames(tab0),colnames(tab0))
for (jcol in 2:ncol(tab0)){
for (icol in 1:(jcol-1)){
if (MTcol[jcol]>=MTcol[icol]){Ncol[jcol,icol]=0}
else {
S=0
for(i in 1:nrow(tab0)){
if (tab0[i,jcol]!=0&tab0[i,jcol]<tab0[i,icol]) {
S=S+1
}
}
Ncol[jcol,icol]=S*100/Fcol[jcol]
}
}
}
Ncol=Ncol[colnames(x),colnames(x)]
# NODA for rows
NODAcol= mean(Ncol,na.rm = TRUE)
# NODA for the entire matrix
NODAmatrix= mean(c(Ncol,Nrow),na.rm=TRUE)
#### WNODA SM/DM ###
if (!is.null(constraints)){
# constraints for rows
rowcons=cbind (rownames(x),constraints[1:nrow(x)])
tabrcons=table(rowcons[,1],rowcons[,2])
distrcons= dist(tabrcons,method = "binary")
distrcons= as.matrix (distrcons)
distrcons=distrcons[rownames(x),rownames(x)]
rm(rowcons,tabrcons)
# WNODA SM/DM for rows
SM_Nrow=0
SM_nrow=0
DM_Nrow=0
DM_nrow=0
for (i in 1:nrow(x)){
for (j in 1:nrow(x)){
if (!is.na(Nrow[i,j])){
if(distrcons[i,j]==0){
SM_Nrow=SM_Nrow+Nrow[i,j]
SM_nrow=SM_nrow+1
}
else{
DM_Nrow=DM_Nrow+Nrow[i,j]
DM_nrow=DM_nrow+1
}
}
}
}
NODA_SM_row= SM_Nrow/SM_nrow
NODA_DM_row= DM_Nrow/DM_nrow
# constraints for collumns
colcons=cbind (colnames(x),constraints[(nrow(x)+1):length(constraints)])
tabccons=table(colcons[,1],colcons[,2])
distccons= dist(tabccons,method = "binary")
distccons= as.matrix (distccons)
distccons=distccons[colnames(x),colnames(x)]
rm(colcons,tabccons)
# WNODA SM/DM for collumns
SM_Ncol=0
SM_ncol=0
DM_Ncol=0
DM_ncol=0
for (i in 1:ncol(x)){
for (j in 1:ncol(x)){
if (!is.na(Ncol[i,j])){
if(distccons[i,j]==0){
SM_Ncol=SM_Ncol+Ncol[i,j]
SM_ncol=SM_ncol+1
}
else{
DM_Ncol=DM_Ncol+Ncol[i,j]
DM_ncol=DM_ncol+1
}
}
}
}
NODA_SM_col= SM_Ncol/SM_ncol
NODA_DM_col= DM_Ncol/DM_ncol
# WNODA SM/DM for matrix
NODA_SM_matrix= (SM_Nrow+SM_Ncol)/(SM_nrow+SM_ncol)
NODA_DM_matrix= (DM_Nrow+DM_Ncol)/(DM_nrow+DM_ncol)
# return
return(list(WNODArow=NODArow,WNODAcol=NODAcol, WNODAmatrix=NODAmatrix,
WNODA_SM_row= NODA_SM_row, WNODA_DM_row=NODA_DM_row,
WNODA_SM_col= NODA_SM_col, WNODA_DM_col=NODA_DM_col,
WNODA_SM_matrix= NODA_SM_matrix, WNODA_DM_matrix=NODA_DM_matrix))
}
else {
return(list(WNODArow=NODArow,WNODAcol=NODAcol, WNODAmatrix=NODAmatrix))}
}
### Using functions ####
if(decreasing=="abund"){
return(weightednoda(x,constraints))
}
if (decreasing=="fill"){
if (weighted==F){
return(unweightednodf(x,constraints))
}
if (weighted==TRUE){
return(weightednodf(x,constraints))
}
}
}
module2constraints <- function(mod){
# helper function to extract the module to which a species belongs from a computeModule-object
# Note that row 1 and columns 1 and 2 of mod@modules are for book-keeping
# returns a single vector, with first rows and then columns
# This vector contains a number for each module, in the position of the species that belong to this module; i.e. it starts with 4, 3, 1, ... indicating that the first species belongs to module 4, the second to module 3, the third to module 1, and so forth.
apply(mod@modules[-1, -c(1,2)], 2, function(x) which(x > 0))
}
|
/bipartite/R/nest.smdm.R
|
no_license
|
afbarth/how-to-bipartite
|
R
| false
| false
| 14,010
|
r
|
nest.smdm <- function(x, constraints=NULL, weighted=FALSE, decreasing="fill", sort=TRUE){
### Checking inputs ####
if (!is.null(constraints)&length(unique(constraints))==1){
warning("Only one module. Nestedness calculated only for the entire matrix")
constraints = NULL
}
if(is.element(NA, constraints) | is.element(NaN, constraints)){
warning("NA or NaN in constraints. Nestedness calculated only for the entire matrix")
constraints = NULL
}
if (!is.null(constraints)&length(constraints)!=nrow(x)+ncol(x)){
stop("constraints vector is not of the same length that network vertices")
}
if (weighted == FALSE & any(x != 0 & x != 1)){
x[x>0] = 1
warning ("binary metric applied")
}
if (decreasing!="fill" & decreasing!="abund"){
stop("decreasing should be fill or abund")
}
if (!is.null(constraints)){constraints = as.character(constraints)}
if(is.null(dimnames(x))){
xnames=list(paste("R", 1:nrow(x), ""), paste("C", 1:ncol(x), ""))
dimnames(x) <- xnames
}
### Unweighted NODF Function ####
unweightednodf = function (x, constraints){
# Sorting matrix order by row and collumn sums
if (sort==TRUE){tab0=x[sort(rowSums(x), index=TRUE, decreasing=TRUE)$ix,
sort(colSums(x), index=TRUE, decreasing=TRUE)$ix]}
else {tab0=x}
# N for rows
MTrow = rowSums(tab0)
Nrow = matrix(rep(NA, times=nrow(tab0)^2), nrow(tab0), nrow(tab0))
dimnames(Nrow)=list(rownames(tab0), rownames(tab0))
for (jrow in 2:nrow(tab0)){
for (irow in 1:(jrow-1)){
if (MTrow[jrow]>=MTrow[irow]){Nrow[jrow, irow] = 0
} else {
S=0
for(i in 1:ncol(tab0)){
if (tab0[jrow, i]==1&tab0[jrow, i]==tab0[irow, i]) {
S = S+1
}
}
Nrow[jrow, irow] = S*100/MTrow[jrow]
}
}
}
Nrow = Nrow[rownames(x), rownames(x)]
# NODF for rows
NODFrow = mean(Nrow, na.rm = TRUE)
# N for collumns
MTcol = colSums(tab0)
Ncol = matrix(rep(NA, times=ncol(tab0)^2), ncol(tab0), ncol(tab0))
dimnames(Ncol) = list(colnames(tab0), colnames(tab0))
for (jcol in 2:ncol(tab0)){
for (icol in 1:(jcol-1)){
if (MTcol[jcol] >= MTcol[icol]){Ncol[jcol, icol]=0}
else {
S=0
for(i in 1:nrow(tab0)){
if (tab0[i,jcol]==1&tab0[i,jcol]==tab0[i,icol]) {
S=S+1
}
}
Ncol[jcol,icol]=S*100/MTcol[jcol]
}
}
}
Ncol=Ncol[colnames(x),colnames(x)]
# NODF for rows
NODFcol= mean(Ncol,na.rm = TRUE)
# NODF for the entire matrix
NODFmatrix= mean(c(Ncol,Nrow),na.rm=TRUE)
#### NODF SM/DM ###
if (!is.null(constraints)){
# constraints for rows
rowcons=cbind (rownames(x),constraints[1:nrow(x)])
tabrcons=table(rowcons[,1],rowcons[,2])
distrcons= dist(tabrcons,method = "binary")
distrcons= as.matrix (distrcons)
distrcons=distrcons[rownames(x),rownames(x)]
rm(rowcons,tabrcons)
# NODF SM/DM for rows
SM_Nrow=0
SM_nrow=0
DM_Nrow=0
DM_nrow=0
for (i in 1:nrow(x)){
for (j in 1:nrow(x)){
if (!is.na(Nrow[i,j])){
if(distrcons[i,j]==0){
SM_Nrow=SM_Nrow+Nrow[i,j]
SM_nrow=SM_nrow+1
}
else{
DM_Nrow=DM_Nrow+Nrow[i,j]
DM_nrow=DM_nrow+1
}
}
}
}
NODF_SM_row= SM_Nrow/SM_nrow
NODF_DM_row= DM_Nrow/DM_nrow
# constraints for collumns
colcons=cbind (colnames(x),constraints[(nrow(x)+1):length(constraints)])
tabccons=table(colcons[,1],colcons[,2])
distccons= dist(tabccons,method = "binary")
distccons= as.matrix (distccons)
distccons=distccons[colnames(x),colnames(x)]
rm(colcons,tabccons)
# NODF SM/DM for collumns
SM_Ncol=0
SM_ncol=0
DM_Ncol=0
DM_ncol=0
for (i in 1:ncol(x)){
for (j in 1:ncol(x)){
if (!is.na(Ncol[i,j])){
if(distccons[i,j]==0){
SM_Ncol=SM_Ncol+Ncol[i,j]
SM_ncol=SM_ncol+1
}
else{
DM_Ncol=DM_Ncol+Ncol[i,j]
DM_ncol=DM_ncol+1
}
}
}
}
NODF_SM_col= SM_Ncol/SM_ncol
NODF_DM_col= DM_Ncol/DM_ncol
# NODF SM/DM for matrix
NODF_SM_matrix= (SM_Nrow+SM_Ncol)/(SM_nrow+SM_ncol)
NODF_DM_matrix= (DM_Nrow+DM_Ncol)/(DM_nrow+DM_ncol)
# return
return(list(NODFrow=NODFrow,NODFcol=NODFcol, NODFmatrix=NODFmatrix,
NODF_SM_row= NODF_SM_row, NODF_DM_row=NODF_DM_row,
NODF_SM_col= NODF_SM_col, NODF_DM_col=NODF_DM_col,
NODF_SM_matrix= NODF_SM_matrix, NODF_DM_matrix=NODF_DM_matrix))
}
else {
return(list(NODFrow=NODFrow,NODFcol=NODFcol, NODFmatrix=NODFmatrix))}
}
### Weighted NODF function ####
weightednodf=function (x, constraints){
# Sorting matrix order by row and collumn sums
if(sort==TRUE){tab0=x[sort(rowSums(x!=0), index=TRUE, decreasing=TRUE)$ix,
sort(colSums(x!=0), index=TRUE, decreasing=TRUE)$ix]}
else{tab0=x}
# N for rows
MTrow= rowSums(tab0)
Frow= rowSums(tab0!=0)
Nrow= matrix(rep(NA,times=nrow(tab0)^2),nrow(tab0),nrow(tab0))
dimnames(Nrow)=list(rownames(tab0),rownames(tab0))
for (jrow in 2:nrow(tab0)){
for (irow in 1:(jrow-1)){
if (Frow[jrow]>=Frow[irow]){Nrow[jrow,irow]=0}
else {
S=0
for(i in 1:ncol(tab0)){
if (tab0[jrow,i]!=0&tab0[jrow,i]<tab0[irow,i]) {
S=S+1
}
}
Nrow[jrow,irow]=S*100/Frow[jrow]
}
}
}
Nrow=Nrow[rownames(x), rownames(x)]
# WNODF for rows
NODFrow= mean(Nrow,na.rm = TRUE)
# N for collumns
MTcol= colSums(tab0)
Fcol= colSums(tab0!=0)
Ncol= matrix(rep(NA,times=ncol(tab0)^2),ncol(tab0),ncol(tab0))
dimnames(Ncol)=list(colnames(tab0),colnames(tab0))
for (jcol in 2:ncol(tab0)){
for (icol in 1:(jcol-1)){
if (Fcol[jcol]>=Fcol[icol]){Ncol[jcol,icol]=0}
else {
S=0
for(i in 1:nrow(tab0)){
if (tab0[i,jcol]!=0&tab0[i,jcol]<tab0[i,icol]) {
S=S+1
}
}
Ncol[jcol,icol]=S*100/Fcol[jcol]
}
}
}
Ncol=Ncol[colnames(x),colnames(x)]
# WNODF for rows
NODFcol= mean(Ncol,na.rm = TRUE)
# WNODF for the entire matrix
NODFmatrix= mean(c(Ncol,Nrow),na.rm=TRUE)
#### WNODF SM/DM ###
if (!is.null(constraints)){
# constraints for rows
rowcons=cbind (rownames(x),constraints[1:nrow(x)])
tabrcons=table(rowcons[,1],rowcons[,2])
distrcons= dist(tabrcons,method = "binary")
distrcons= as.matrix (distrcons)
distrcons=distrcons[rownames(x),rownames(x)]
rm(rowcons,tabrcons)
# WNODF SM/DM for rows
SM_Nrow=0
SM_nrow=0
DM_Nrow=0
DM_nrow=0
for (i in 1:nrow(x)){
for (j in 1:nrow(x)){
if (!is.na(Nrow[i,j])){
if(distrcons[i,j]==0){
SM_Nrow=SM_Nrow+Nrow[i,j]
SM_nrow=SM_nrow+1
}
else{
DM_Nrow=DM_Nrow+Nrow[i,j]
DM_nrow=DM_nrow+1
}
}
}
}
NODF_SM_row= SM_Nrow/SM_nrow
NODF_DM_row= DM_Nrow/DM_nrow
# constraints for collumns
colcons=cbind (colnames(x),constraints[(nrow(x)+1):length(constraints)])
tabccons=table(colcons[,1],colcons[,2])
distccons= dist(tabccons,method = "binary")
distccons= as.matrix (distccons)
distccons=distccons[colnames(x),colnames(x)]
rm(colcons,tabccons)
# WNODF SM/DM for collumns
SM_Ncol=0
SM_ncol=0
DM_Ncol=0
DM_ncol=0
for (i in 1:ncol(x)){
for (j in 1:ncol(x)){
if (!is.na(Ncol[i,j])){
if(distccons[i,j]==0){
SM_Ncol=SM_Ncol+Ncol[i,j]
SM_ncol=SM_ncol+1
}
else{
DM_Ncol=DM_Ncol+Ncol[i,j]
DM_ncol=DM_ncol+1
}
}
}
}
NODF_SM_col= SM_Ncol/SM_ncol
NODF_DM_col= DM_Ncol/DM_ncol
# WNODF SM/DM for matrix
NODF_SM_matrix= (SM_Nrow+SM_Ncol)/(SM_nrow+SM_ncol)
NODF_DM_matrix= (DM_Nrow+DM_Ncol)/(DM_nrow+DM_ncol)
# return
return(list(WNODFrow=NODFrow,WNODFcol=NODFcol, WNODFmatrix=NODFmatrix,WNODF_SM_row= NODF_SM_row, WNODF_DM_row=NODF_DM_row,WNODF_SM_col= NODF_SM_col, WNODF_DM_col=NODF_DM_col,WNODF_SM_matrix= NODF_SM_matrix, WNODF_DM_matrix=NODF_DM_matrix))
}
else {
return(list(WNODFrow=NODFrow,WNODFcol=NODFcol, WNODFmatrix=NODFmatrix))}
}
### Weighted NODA funcion ####
weightednoda=function (x,constraints){
# Sorting matrix order by row and collumn sums
if(sort==TRUE){tab0=x[sort(rowSums(x), index=TRUE, decreasing=TRUE)$ix,
sort(colSums(x), index=TRUE, decreasing=TRUE)$ix]}
else{tab0=x}
# N for rows
MTrow= rowSums(tab0)
Frow= rowSums(tab0!=0)
Nrow= matrix(rep(NA,times=nrow(tab0)^2),nrow(tab0),nrow(tab0))
dimnames(Nrow)=list(rownames(tab0),rownames(tab0))
for (jrow in 2:nrow(tab0)){
for (irow in 1:(jrow-1)){
if (MTrow[jrow]>=MTrow[irow]){Nrow[jrow,irow]=0}
else {
S=0
for(i in 1:ncol(tab0)){
if (tab0[jrow,i]!=0&tab0[jrow,i]<tab0[irow,i]) {
S=S+1
}
}
Nrow[jrow,irow]=S*100/Frow[jrow]
}
}
}
Nrow=Nrow[rownames(x), rownames(x)]
# WNODA for rows
NODArow= mean(Nrow,na.rm = TRUE)
# N for collumns
MTcol= colSums(tab0)
Fcol= colSums(tab0!=0)
Ncol= matrix(rep(NA,times=ncol(tab0)^2),ncol(tab0),ncol(tab0))
dimnames(Ncol)=list(colnames(tab0),colnames(tab0))
for (jcol in 2:ncol(tab0)){
for (icol in 1:(jcol-1)){
if (MTcol[jcol]>=MTcol[icol]){Ncol[jcol,icol]=0}
else {
S=0
for(i in 1:nrow(tab0)){
if (tab0[i,jcol]!=0&tab0[i,jcol]<tab0[i,icol]) {
S=S+1
}
}
Ncol[jcol,icol]=S*100/Fcol[jcol]
}
}
}
Ncol=Ncol[colnames(x),colnames(x)]
# NODA for rows
NODAcol= mean(Ncol,na.rm = TRUE)
# NODA for the entire matrix
NODAmatrix= mean(c(Ncol,Nrow),na.rm=TRUE)
#### WNODA SM/DM ###
if (!is.null(constraints)){
# constraints for rows
rowcons=cbind (rownames(x),constraints[1:nrow(x)])
tabrcons=table(rowcons[,1],rowcons[,2])
distrcons= dist(tabrcons,method = "binary")
distrcons= as.matrix (distrcons)
distrcons=distrcons[rownames(x),rownames(x)]
rm(rowcons,tabrcons)
# WNODA SM/DM for rows
SM_Nrow=0
SM_nrow=0
DM_Nrow=0
DM_nrow=0
for (i in 1:nrow(x)){
for (j in 1:nrow(x)){
if (!is.na(Nrow[i,j])){
if(distrcons[i,j]==0){
SM_Nrow=SM_Nrow+Nrow[i,j]
SM_nrow=SM_nrow+1
}
else{
DM_Nrow=DM_Nrow+Nrow[i,j]
DM_nrow=DM_nrow+1
}
}
}
}
NODA_SM_row= SM_Nrow/SM_nrow
NODA_DM_row= DM_Nrow/DM_nrow
# constraints for collumns
colcons=cbind (colnames(x),constraints[(nrow(x)+1):length(constraints)])
tabccons=table(colcons[,1],colcons[,2])
distccons= dist(tabccons,method = "binary")
distccons= as.matrix (distccons)
distccons=distccons[colnames(x),colnames(x)]
rm(colcons,tabccons)
# WNODA SM/DM for collumns
SM_Ncol=0
SM_ncol=0
DM_Ncol=0
DM_ncol=0
for (i in 1:ncol(x)){
for (j in 1:ncol(x)){
if (!is.na(Ncol[i,j])){
if(distccons[i,j]==0){
SM_Ncol=SM_Ncol+Ncol[i,j]
SM_ncol=SM_ncol+1
}
else{
DM_Ncol=DM_Ncol+Ncol[i,j]
DM_ncol=DM_ncol+1
}
}
}
}
NODA_SM_col= SM_Ncol/SM_ncol
NODA_DM_col= DM_Ncol/DM_ncol
# WNODA SM/DM for matrix
NODA_SM_matrix= (SM_Nrow+SM_Ncol)/(SM_nrow+SM_ncol)
NODA_DM_matrix= (DM_Nrow+DM_Ncol)/(DM_nrow+DM_ncol)
# return
return(list(WNODArow=NODArow,WNODAcol=NODAcol, WNODAmatrix=NODAmatrix,
WNODA_SM_row= NODA_SM_row, WNODA_DM_row=NODA_DM_row,
WNODA_SM_col= NODA_SM_col, WNODA_DM_col=NODA_DM_col,
WNODA_SM_matrix= NODA_SM_matrix, WNODA_DM_matrix=NODA_DM_matrix))
}
else {
return(list(WNODArow=NODArow,WNODAcol=NODAcol, WNODAmatrix=NODAmatrix))}
}
### Using functions ####
if(decreasing=="abund"){
return(weightednoda(x,constraints))
}
if (decreasing=="fill"){
if (weighted==F){
return(unweightednodf(x,constraints))
}
if (weighted==TRUE){
return(weightednodf(x,constraints))
}
}
}
module2constraints <- function(mod){
# helper function to extract the module to which a species belongs from a computeModule-object
# Note that row 1 and columns 1 and 2 of mod@modules are for book-keeping
# returns a single vector, with first rows and then columns
# This vector contains a number for each module, in the position of the species that belong to this module; i.e. it starts with 4, 3, 1, ... indicating that the first species belongs to module 4, the second to module 3, the third to module 1, and so forth.
apply(mod@modules[-1, -c(1,2)], 2, function(x) which(x > 0))
}
|
setwd("C:/Users/lenovo/Desktop/652/Electronic homework/work3")
MEPS1 <- read.csv("HealthExpend.csv",header=TRUE)
MEPS <- subset(MEPS1,EXPENDOP>0)
library(MASS)
library(VGAM)
# Inference assuming a gamma Distribution
fit.gamma2 <- glm(EXPENDOP~1, data=MEPS,family=Gamma(link=log))
(theta<-exp(coef(fit.gamma2))*gamma.dispersion(fit.gamma2)) #mu=theta/alpha
(alpha<-1/gamma.dispersion(fit.gamma2) )
summary(fit.gamma2, dispersion = gamma.dispersion(fit.gamma2))
# Inference assuming a Pareto Distribution
fit.pareto <- vglm(EXPENDOP~ 1, paretoII, loc=0, data = MEPS)
exp(coef(fit.pareto))
summary(fit.pareto)
head(fitted(fit.pareto))
# Plotting the fit using densities (on a logarithmic scale)
x <- seq(0,12,by=0.01)
plot(density(log(MEPS$EXPENDOP)) ,main="", xlab="Log Expenditures",ylim=c(0, 0.3))
fgamma_ex = dgamma(exp(x), shape = alpha, scale=theta)*exp(x)
lines(x,fgamma_ex,col="blue")
fpareto_ex = dparetoII(exp(x),loc=0,shape = exp(coef(fit.pareto)[2]), scale = exp(coef(fit.pareto)[1]))*exp(x)
lines(x,fpareto_ex,col="purple")
legend("topright", c("gamma", "pareto"), lty=1, col = c("blue","purple"))
# PP Plot
Percentiles <- ecdf(MEPS$EXPENDOP)
par(mfrow=c(1, 2))
Fgamma_ex = pgamma(MEPS$EXPENDOP, shape = alpha, scale=theta)
plot(Percentiles(MEPS$EXPENDOP),Fgamma_ex, xlab="Empirical DF", ylab="Gamma DF",cex=0.4)
abline(0,1)
Fpareto_ex = pparetoII(MEPS$EXPENDOP,loc=0,shape = exp(coef(fit.pareto)[2]), scale = exp(coef(fit.pareto)[1]))
plot(Percentiles(MEPS$EXPENDOP),Fpareto_ex, xlab="Empirical DF", ylab="Pareto DF",cex=0.4)
abline(0,1)
#Kolmogorov-Smirnov # the test statistic is "D"
library(goftest)
ks.test(MEPS$EXPENDOP, "pgamma", shape = alpha, scale=theta)
ks.test(MEPS$EXPENDOP, "pparetoII",loc=0,shape = exp(coef(fit.pareto)[2]), scale = exp(coef(fit.pareto)[1]))
|
/week3/Q2.R
|
no_license
|
yyiyangnju/Loss-Model
|
R
| false
| false
| 1,800
|
r
|
setwd("C:/Users/lenovo/Desktop/652/Electronic homework/work3")
MEPS1 <- read.csv("HealthExpend.csv",header=TRUE)
MEPS <- subset(MEPS1,EXPENDOP>0)
library(MASS)
library(VGAM)
# Inference assuming a gamma Distribution
fit.gamma2 <- glm(EXPENDOP~1, data=MEPS,family=Gamma(link=log))
(theta<-exp(coef(fit.gamma2))*gamma.dispersion(fit.gamma2)) #mu=theta/alpha
(alpha<-1/gamma.dispersion(fit.gamma2) )
summary(fit.gamma2, dispersion = gamma.dispersion(fit.gamma2))
# Inference assuming a Pareto Distribution
fit.pareto <- vglm(EXPENDOP~ 1, paretoII, loc=0, data = MEPS)
exp(coef(fit.pareto))
summary(fit.pareto)
head(fitted(fit.pareto))
# Plotting the fit using densities (on a logarithmic scale)
x <- seq(0,12,by=0.01)
plot(density(log(MEPS$EXPENDOP)) ,main="", xlab="Log Expenditures",ylim=c(0, 0.3))
fgamma_ex = dgamma(exp(x), shape = alpha, scale=theta)*exp(x)
lines(x,fgamma_ex,col="blue")
fpareto_ex = dparetoII(exp(x),loc=0,shape = exp(coef(fit.pareto)[2]), scale = exp(coef(fit.pareto)[1]))*exp(x)
lines(x,fpareto_ex,col="purple")
legend("topright", c("gamma", "pareto"), lty=1, col = c("blue","purple"))
# PP Plot
Percentiles <- ecdf(MEPS$EXPENDOP)
par(mfrow=c(1, 2))
Fgamma_ex = pgamma(MEPS$EXPENDOP, shape = alpha, scale=theta)
plot(Percentiles(MEPS$EXPENDOP),Fgamma_ex, xlab="Empirical DF", ylab="Gamma DF",cex=0.4)
abline(0,1)
Fpareto_ex = pparetoII(MEPS$EXPENDOP,loc=0,shape = exp(coef(fit.pareto)[2]), scale = exp(coef(fit.pareto)[1]))
plot(Percentiles(MEPS$EXPENDOP),Fpareto_ex, xlab="Empirical DF", ylab="Pareto DF",cex=0.4)
abline(0,1)
#Kolmogorov-Smirnov # the test statistic is "D"
library(goftest)
ks.test(MEPS$EXPENDOP, "pgamma", shape = alpha, scale=theta)
ks.test(MEPS$EXPENDOP, "pparetoII",loc=0,shape = exp(coef(fit.pareto)[2]), scale = exp(coef(fit.pareto)[1]))
|
#' List projects or components on OSF
#'
#' List the projects or components associated with a user or contained in the
#' top-level of another OSF project or component.
#'
#' @param x one of the following:
#' * An [`osf_tbl_node`] with a single project or component.
#' * An [`osf_tbl_user`] with a single OSF user.
#' @template filter-pattern
#' @template n_max
#' @template verbose
#'
#' @return An [`osf_tbl_node`] with one row for each OSF project or component,
#' ordered by modification time.
#' @examples
#' \dontrun{
#' # List your recent projects and components
#' osf_retrieve_user("me") %>%
#' osf_ls_nodes()
#'
#' # List the first 10 components in the #ScanAllFish project
#' fish_ctscans <- osf_retrieve_node("ecmz4")
#' osf_ls_nodes(fish_ctscans)
#'
#' # Now just the components with scans of species from the Sphyrna genus
#' osf_ls_nodes(fish_ctscans, pattern = "Sphyrna")
#' }
#' @seealso [`osf_ls_files()`] to generate a list of files and files.
#' @export
osf_ls_nodes <-
function(x,
pattern = NULL,
n_max = 10,
verbose = FALSE) {
UseMethod("osf_ls_nodes")
}
#' @export
osf_ls_nodes.osf_tbl_node <-
function(x,
pattern = NULL,
n_max = 10,
verbose = FALSE) {
x <- make_single(x)
out <- .osf_node_children(
id = as_id(x),
n_max = n_max,
query = filter_nodes(pattern = html_encode(pattern)),
verbose = verbose
)
raise_error(out)
as_osf_tbl(out, "osf_tbl_node")
}
#' @export
osf_ls_nodes.osf_tbl_user <-
function(x,
pattern = NULL,
n_max = 10,
verbose = FALSE) {
x <- make_single(x)
out <- .osf_user_nodes(
id = as_id(x),
n_max = n_max,
query = filter_nodes(pattern = pattern),
verbose = verbose
)
raise_error(out)
as_osf_tbl(out, "osf_tbl_node")
}
|
/R/osf_ls_nodes.R
|
permissive
|
ropensci/osfr
|
R
| false
| false
| 1,837
|
r
|
#' List projects or components on OSF
#'
#' List the projects or components associated with a user or contained in the
#' top-level of another OSF project or component.
#'
#' @param x one of the following:
#' * An [`osf_tbl_node`] with a single project or component.
#' * An [`osf_tbl_user`] with a single OSF user.
#' @template filter-pattern
#' @template n_max
#' @template verbose
#'
#' @return An [`osf_tbl_node`] with one row for each OSF project or component,
#' ordered by modification time.
#' @examples
#' \dontrun{
#' # List your recent projects and components
#' osf_retrieve_user("me") %>%
#' osf_ls_nodes()
#'
#' # List the first 10 components in the #ScanAllFish project
#' fish_ctscans <- osf_retrieve_node("ecmz4")
#' osf_ls_nodes(fish_ctscans)
#'
#' # Now just the components with scans of species from the Sphyrna genus
#' osf_ls_nodes(fish_ctscans, pattern = "Sphyrna")
#' }
#' @seealso [`osf_ls_files()`] to generate a list of files and files.
#' @export
osf_ls_nodes <-
function(x,
pattern = NULL,
n_max = 10,
verbose = FALSE) {
UseMethod("osf_ls_nodes")
}
#' @export
osf_ls_nodes.osf_tbl_node <-
function(x,
pattern = NULL,
n_max = 10,
verbose = FALSE) {
x <- make_single(x)
out <- .osf_node_children(
id = as_id(x),
n_max = n_max,
query = filter_nodes(pattern = html_encode(pattern)),
verbose = verbose
)
raise_error(out)
as_osf_tbl(out, "osf_tbl_node")
}
#' @export
osf_ls_nodes.osf_tbl_user <-
function(x,
pattern = NULL,
n_max = 10,
verbose = FALSE) {
x <- make_single(x)
out <- .osf_user_nodes(
id = as_id(x),
n_max = n_max,
query = filter_nodes(pattern = pattern),
verbose = verbose
)
raise_error(out)
as_osf_tbl(out, "osf_tbl_node")
}
|
\name{soql-package}
\alias{soql-package}
\docType{package}
\title{
\packageTitle{soql}
}
\description{
\packageDescription{soql}
}
\details{
To create a SoQL URL, or just parameters for one, start with \code{soql()}. Then chain the result into other functions, such as \code{soql_where()} or \code{soql_order()}. When you're done, use \code{as.character()} to retrieve the finished URL, for use with any networking package.
}
\author{
\packageAuthor{soql}
Maintainer: \packageMaintainer{soql}
}
\references{
\href{https://dev.socrata.com/}{Documentation for the SODA API}
}
\keyword{ package }
\examples{
if (require(magrittr)) \{
# With pipes
my_url <- soql() \%>\%
soql_where("height > 30") \%>\%
soql_limit(20) \%>\%
as.character()
\} else \{
# Without pipes
soql_chain <- soql()
soql_chain <- soql_where(soql_chain, "height > 30")
soql_chain <- soql_limit(20)
my_url <- as.character(soql_chain)
\}
}
|
/man/soql-package.Rd
|
no_license
|
cran/soql
|
R
| false
| false
| 967
|
rd
|
\name{soql-package}
\alias{soql-package}
\docType{package}
\title{
\packageTitle{soql}
}
\description{
\packageDescription{soql}
}
\details{
To create a SoQL URL, or just parameters for one, start with \code{soql()}. Then chain the result into other functions, such as \code{soql_where()} or \code{soql_order()}. When you're done, use \code{as.character()} to retrieve the finished URL, for use with any networking package.
}
\author{
\packageAuthor{soql}
Maintainer: \packageMaintainer{soql}
}
\references{
\href{https://dev.socrata.com/}{Documentation for the SODA API}
}
\keyword{ package }
\examples{
if (require(magrittr)) \{
# With pipes
my_url <- soql() \%>\%
soql_where("height > 30") \%>\%
soql_limit(20) \%>\%
as.character()
\} else \{
# Without pipes
soql_chain <- soql()
soql_chain <- soql_where(soql_chain, "height > 30")
soql_chain <- soql_limit(20)
my_url <- as.character(soql_chain)
\}
}
|
\name{select.traces}
\alias{select.traces}
\title{
Select traces from a data set according to several criteria
}
\description{
For alignment purposes, it may be useful to select traces which show
clear features, and to throw away traces that contain mainly
noise. This function implements three ways to achieve this: CODA, a
criterion similar to varimax, and a criterion based on the highest
intensity.
}
\usage{
select.traces(X, criterion = c("coda", "var", "int"),
window = 5, smoothing = c("median", "mean"))
}
\arguments{
\item{X}{a data matrix or an array. The first dimension signifies the
traces from which a selection is to be made. If X is a matrix, the
first usually corresponds to samples and the second dimension
is the spectral dimension. If X is an array, the data are
assumed to come from a hyphenated experiment, with the first
dimension the chromatographic dimension, the second the spectral
dimension and the third dimension corresponding to samples}
\item{criterion}{either Windig's CODA algorithm, a criterion
calculating the variances of the length-scaled spectra, or a
criterion giving the height of the highest peak}
\item{window, smoothing}{arguments to the coda function.}
}
\value{
The function returns a list with components
\item{crit.val}{a vector containing the values of the criterion for
all traces. If X is an array, the function is recursively applied
to all samples (elements of the third dimension) - the results are
multiplied to obtain one criterion value per trace}
\item{trace.nrs}{the order of the traces (from large to small)}
}
\details{The CODA criterion in essence selects traces with no baseline
and no spikes, but still containing significant intensities. The
variance criterion aims at something similar: it calculates the
variance (or standard deviation) of every trace after length scaling -
traces with a high value show few highly structured features, whereas
traces with a low value show noise or a significant baseline. The
intensity criterion simply returns the intensity of the highest
peak. The latter two criteria are simpler than CODA but implicitly
assume that the traces have been preprocessed (i.c., spikes have been
removed).
}
\author{Ron Wehrens}
\seealso{\code{\link{coda}}}
\examples{
data(lcms)
ntrace <- dim(lcms)[1]
lcms.selection <- select.traces(lcms[,,1:2], criterion = "var")
good <- lcms.selection$trace.nrs[1]
bad <- lcms.selection$trace.nrs[ntrace]
par(mfrow = c(1,2))
matplot(lcms[good,,1:2], type = 'l', lty = 1)
matplot(lcms[bad,,1:2], type = 'l', lty = 1)
}
\keyword{manip}
|
/Rscript/R/ptw/man/select.traces.Rd
|
permissive
|
xieguigang/mzkit
|
R
| false
| false
| 2,656
|
rd
|
\name{select.traces}
\alias{select.traces}
\title{
Select traces from a data set according to several criteria
}
\description{
For alignment purposes, it may be useful to select traces which show
clear features, and to throw away traces that contain mainly
noise. This function implements three ways to achieve this: CODA, a
criterion similar to varimax, and a criterion based on the highest
intensity.
}
\usage{
select.traces(X, criterion = c("coda", "var", "int"),
window = 5, smoothing = c("median", "mean"))
}
\arguments{
\item{X}{a data matrix or an array. The first dimension signifies the
traces from which a selection is to be made. If X is a matrix, the
first usually corresponds to samples and the second dimension
is the spectral dimension. If X is an array, the data are
assumed to come from a hyphenated experiment, with the first
dimension the chromatographic dimension, the second the spectral
dimension and the third dimension corresponding to samples}
\item{criterion}{either Windig's CODA algorithm, a criterion
calculating the variances of the length-scaled spectra, or a
criterion giving the height of the highest peak}
\item{window, smoothing}{arguments to the coda function.}
}
\value{
The function returns a list with components
\item{crit.val}{a vector containing the values of the criterion for
all traces. If X is an array, the function is recursively applied
to all samples (elements of the third dimension) - the results are
multiplied to obtain one criterion value per trace}
\item{trace.nrs}{the order of the traces (from large to small)}
}
\details{The CODA criterion in essence selects traces with no baseline
and no spikes, but still containing significant intensities. The
variance criterion aims at something similar: it calculates the
variance (or standard deviation) of every trace after length scaling -
traces with a high value show few highly structured features, whereas
traces with a low value show noise or a significant baseline. The
intensity criterion simply returns the intensity of the highest
peak. The latter two criteria are simpler than CODA but implicitly
assume that the traces have been preprocessed (i.c., spikes have been
removed).
}
\author{Ron Wehrens}
\seealso{\code{\link{coda}}}
\examples{
data(lcms)
ntrace <- dim(lcms)[1]
lcms.selection <- select.traces(lcms[,,1:2], criterion = "var")
good <- lcms.selection$trace.nrs[1]
bad <- lcms.selection$trace.nrs[ntrace]
par(mfrow = c(1,2))
matplot(lcms[good,,1:2], type = 'l', lty = 1)
matplot(lcms[bad,,1:2], type = 'l', lty = 1)
}
\keyword{manip}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quantileCI.R
\name{quantileCI}
\alias{quantileCI}
\title{Estimate and confidence intervals of a quantile}
\usage{
quantileCI(y, prob, conf.level = 0.95, method = "binomial", na.rm = FALSE, ...)
}
\arguments{
\item{y}{A vector of sample data}
\item{prob}{Numeric probability in range [0, 1]}
\item{alpha}{Tail probability}
}
\value{
Three-element,named vector: estimate, lowerCI, upperCI
}
\description{
Estimate and confidence intervals of a quantile
}
|
/man/quantileCI.Rd
|
no_license
|
pteetor/tutils
|
R
| false
| true
| 533
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quantileCI.R
\name{quantileCI}
\alias{quantileCI}
\title{Estimate and confidence intervals of a quantile}
\usage{
quantileCI(y, prob, conf.level = 0.95, method = "binomial", na.rm = FALSE, ...)
}
\arguments{
\item{y}{A vector of sample data}
\item{prob}{Numeric probability in range [0, 1]}
\item{alpha}{Tail probability}
}
\value{
Three-element,named vector: estimate, lowerCI, upperCI
}
\description{
Estimate and confidence intervals of a quantile
}
|
# Quadruple Whitching
# Quadruple witching refers to the third Friday of every March, June, September and December.
# On these days, market index futures, market index options, stock options and stock futures
# expire, usually resulting in increased volatility.
# Formatting Data
rm(list=ls())
# Read the data
data=as.data.frame(read.csv("Workbook2.csv"))
dates <- as.Date(as.character(data[,1]),"%d/%m/%y")
dates=format(dates, format="%d/%m/%Y")
data$Dates=dates
# Formatting Data
# Getting dates of 3rd Friday of every month
library("RcppBDT")
dates=c()
for(i in 1991:2017){
dates=append(dates,format(getNthDayOfWeek(third, Fri, Mar, i), format="%d/%m/%Y"))
dates=append(dates,format(getNthDayOfWeek(third, Fri, Jun, i), format="%d/%m/%Y"))
dates=append(dates,format(getNthDayOfWeek(third, Fri, Sep, i), format="%d/%m/%Y"))
dates=append(dates,format(getNthDayOfWeek(third, Fri, Dec, i), format="%d/%m/%Y"))
}
dates=dates[1:(length(dates)-2)]
dates.needed=data[data$Dates %in% dates,]
# Getting dates of 3rd Friday of every month
# Levene Test function
library(Rcmdr)
# store values in variables
value1=dates.needed$Daily.Returns
value2=data$Daily.Returns
# Combine them
y <- c(value1, value2)
# group 1's for them
group <- as.factor(c(rep(1, length(value1)), rep(2, length(value2))))
# perform test
leveneTest(y, group)$`Pr(>F)`[1]
# Levene's Test for Homogeneity of Variance (center = median)
# Df F value Pr(>F)
# group 1 2.5474 0.1105
# 6974
# 0.1105245
|
/Quadruple Witching/Quadruple_Witching.r
|
no_license
|
KshitizSharmaV/Trading_Strategies
|
R
| false
| false
| 1,513
|
r
|
# Quadruple Whitching
# Quadruple witching refers to the third Friday of every March, June, September and December.
# On these days, market index futures, market index options, stock options and stock futures
# expire, usually resulting in increased volatility.
# Formatting Data
rm(list=ls())
# Read the data
data=as.data.frame(read.csv("Workbook2.csv"))
dates <- as.Date(as.character(data[,1]),"%d/%m/%y")
dates=format(dates, format="%d/%m/%Y")
data$Dates=dates
# Formatting Data
# Getting dates of 3rd Friday of every month
library("RcppBDT")
dates=c()
for(i in 1991:2017){
dates=append(dates,format(getNthDayOfWeek(third, Fri, Mar, i), format="%d/%m/%Y"))
dates=append(dates,format(getNthDayOfWeek(third, Fri, Jun, i), format="%d/%m/%Y"))
dates=append(dates,format(getNthDayOfWeek(third, Fri, Sep, i), format="%d/%m/%Y"))
dates=append(dates,format(getNthDayOfWeek(third, Fri, Dec, i), format="%d/%m/%Y"))
}
dates=dates[1:(length(dates)-2)]
dates.needed=data[data$Dates %in% dates,]
# Getting dates of 3rd Friday of every month
# Levene Test function
library(Rcmdr)
# store values in variables
value1=dates.needed$Daily.Returns
value2=data$Daily.Returns
# Combine them
y <- c(value1, value2)
# group 1's for them
group <- as.factor(c(rep(1, length(value1)), rep(2, length(value2))))
# perform test
leveneTest(y, group)$`Pr(>F)`[1]
# Levene's Test for Homogeneity of Variance (center = median)
# Df F value Pr(>F)
# group 1 2.5474 0.1105
# 6974
# 0.1105245
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/legacy.R
\name{Exprq}
\alias{Exprq}
\title{Legacy function}
\usage{
Exprq(...)
}
\arguments{
\item{...}{not used}
}
\description{
Legacy function
}
\concept{legacy}
|
/man/Exprq.Rd
|
no_license
|
HeraclesHX/ryacas
|
R
| false
| true
| 243
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/legacy.R
\name{Exprq}
\alias{Exprq}
\title{Legacy function}
\usage{
Exprq(...)
}
\arguments{
\item{...}{not used}
}
\description{
Legacy function
}
\concept{legacy}
|
# The planets_df data frame from the previous exercise is pre-loaded
# Print out diameter of Mercury (row 1, column 3)
print(planets_df[1,3])
# Print out data for Mars (entire fourth row)
print(planets_df[4,])
|
/DCamp/Select data frame elements.R
|
no_license
|
shinichimatsuda/R_Training
|
R
| false
| false
| 212
|
r
|
# The planets_df data frame from the previous exercise is pre-loaded
# Print out diameter of Mercury (row 1, column 3)
print(planets_df[1,3])
# Print out data for Mars (entire fourth row)
print(planets_df[4,])
|
testlist <- list(bytes1 = integer(0), pmutation = 2.71615461559757e-312)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result)
|
/mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612887209-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 136
|
r
|
testlist <- list(bytes1 = integer(0), pmutation = 2.71615461559757e-312)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result)
|
#' Percent Mediation
#'
#' To obtain the percent of the total effect that is mediated through the specified
#' indirect path: indirect / (total) * 100.
#'
#' @param model mma fit object
#' @param effect the indirect effect to be compared to its direct path
#'
#' @export
perc_med <- function(model, effect){
if (!is.mma(model)){
stop("Model must be mma object", call. = FALSE)
}
ind = model$ind_effects[effect, 3]
dir = model$dir_effects[gsub("-.*$", "", effect), 1]
if ((is.pos(ind) & is.neg(dir)) |
(is.neg(ind) & is.pos(dir))){
warning("The indirect and direct effects are of opposite signs.\nThere is not a meaningful percent of mediation here.", call. = FALSE)
}
val = ind/(dir + ind) * 100
names(val) = effect
val
}
#' Formula Extraction for MMA
#'
#' Extracts the formulas from a mma object
#'
#' @param model mma fit object
#'
#' @export
mma_formulas <- function(model){
if (!is.mma(model)){
stop("Model must be mma object", call. = FALSE)
}
model$model
}
#' Indirect Effects Extraction for MMA
#'
#' Extracts the formulas from a mma object
#'
#' @param model mma fit object
#'
#' @export
mma_ind_effects <- function(model){
if (!is.mma(model)){
stop("Model must be mma object", call. = FALSE)
}
model$ind_effects
}
#' Direct Effects Extraction for MMA
#'
#' Extracts the formulas from a mma object
#'
#' @param model mma fit object
#'
#' @export
mma_dir_effects <- function(model){
if (!is.mma(model)){
stop("Model must be mma object", call. = FALSE)
}
model$dir_effects
}
#' Standardized Indirect Effects Extraction for MMA
#'
#' Extracts the formulas from a mma object
#'
#' @param model mma fit object
#'
#' @export
mma_std_ind_effects <- function(model){
if (!is.mma(model)){
stop("Model must be mma object", call. = FALSE)
}
if (is.na(model$sigma_y)){
stop("Cannot produce standardized effects with non-numeric outcomes", call. = FALSE)
}
model$ind_effects/model$sigma_y
}
#' Standardized Direct Effects Extraction for MMA
#'
#' Extracts the formulas from a mma object
#'
#' @param model mma fit object
#'
#' @export
mma_std_dir_effects <- function(model){
if (!is.mma(model)){
stop("Model must be mma object", call. = FALSE)
}
if (is.na(model$sigma_y)){
stop("Cannot produce standardized effects with non-numeric outcomes", call. = FALSE)
}
model$ind_effects/model$sigma_y
}
|
/R/comps.R
|
no_license
|
TysonStanley/MarginalMediation
|
R
| false
| false
| 2,945
|
r
|
#' Percent Mediation
#'
#' To obtain the percent of the total effect that is mediated through the specified
#' indirect path: indirect / (total) * 100.
#'
#' @param model mma fit object
#' @param effect the indirect effect to be compared to its direct path
#'
#' @export
perc_med <- function(model, effect){
if (!is.mma(model)){
stop("Model must be mma object", call. = FALSE)
}
ind = model$ind_effects[effect, 3]
dir = model$dir_effects[gsub("-.*$", "", effect), 1]
if ((is.pos(ind) & is.neg(dir)) |
(is.neg(ind) & is.pos(dir))){
warning("The indirect and direct effects are of opposite signs.\nThere is not a meaningful percent of mediation here.", call. = FALSE)
}
val = ind/(dir + ind) * 100
names(val) = effect
val
}
#' Formula Extraction for MMA
#'
#' Extracts the formulas from a mma object
#'
#' @param model mma fit object
#'
#' @export
mma_formulas <- function(model){
if (!is.mma(model)){
stop("Model must be mma object", call. = FALSE)
}
model$model
}
#' Indirect Effects Extraction for MMA
#'
#' Extracts the formulas from a mma object
#'
#' @param model mma fit object
#'
#' @export
mma_ind_effects <- function(model){
if (!is.mma(model)){
stop("Model must be mma object", call. = FALSE)
}
model$ind_effects
}
#' Direct Effects Extraction for MMA
#'
#' Extracts the formulas from a mma object
#'
#' @param model mma fit object
#'
#' @export
mma_dir_effects <- function(model){
if (!is.mma(model)){
stop("Model must be mma object", call. = FALSE)
}
model$dir_effects
}
#' Standardized Indirect Effects Extraction for MMA
#'
#' Extracts the formulas from a mma object
#'
#' @param model mma fit object
#'
#' @export
mma_std_ind_effects <- function(model){
if (!is.mma(model)){
stop("Model must be mma object", call. = FALSE)
}
if (is.na(model$sigma_y)){
stop("Cannot produce standardized effects with non-numeric outcomes", call. = FALSE)
}
model$ind_effects/model$sigma_y
}
#' Standardized Direct Effects Extraction for MMA
#'
#' Extracts the formulas from a mma object
#'
#' @param model mma fit object
#'
#' @export
mma_std_dir_effects <- function(model){
if (!is.mma(model)){
stop("Model must be mma object", call. = FALSE)
}
if (is.na(model$sigma_y)){
stop("Cannot produce standardized effects with non-numeric outcomes", call. = FALSE)
}
model$ind_effects/model$sigma_y
}
|
{
x <- 10
y <- 20
sum <- x+y
print(sum)
}
|
/Lab 1/1.4.R
|
no_license
|
TheWrL0ck/R-files
|
R
| false
| false
| 49
|
r
|
{
x <- 10
y <- 20
sum <- x+y
print(sum)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MigConnectivity.R
\docType{package}
\name{MigConnectivity}
\alias{MigConnectivity}
\title{MigConnectivity: A package for quantifying migratory connectivity pattern and
strength for migratory animals}
\description{
The MigConnectivity package allows the user to estimate transition
probabilities for migratory animals between any two phases of the annual
cycle, using a variety of different data types with the function
\code{\link{estTransition}}. Also calculates or estimates the strength of
migratory connectivity (MC), a standardized metric to quantify the extent to
which populations co-occur between two phases of the annual cycle. MC is
independent of data type and accounts for the relative abundance of
populations distributed across a seasonal range. MC is applicable between
any two phases of the annual cycle that are temporally distinct, including
migratory phases. MC can be defined for entire seasonal ranges or for
subsets of those ranges, as long as those subsets of ranges can be further
divided into regional populations. The package includes functions to
estimate MC and the more traditional metric of migratory connectivity
strength (Mantel correlation; rM) incorporating uncertainty from multiple
sources of sampling error. Description of the MC metric can be found in Cohen
et al. (2018).
}
|
/man/MigConnectivity.Rd
|
no_license
|
SMBC-NZP/MigConnectivity
|
R
| false
| true
| 1,392
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MigConnectivity.R
\docType{package}
\name{MigConnectivity}
\alias{MigConnectivity}
\title{MigConnectivity: A package for quantifying migratory connectivity pattern and
strength for migratory animals}
\description{
The MigConnectivity package allows the user to estimate transition
probabilities for migratory animals between any two phases of the annual
cycle, using a variety of different data types with the function
\code{\link{estTransition}}. Also calculates or estimates the strength of
migratory connectivity (MC), a standardized metric to quantify the extent to
which populations co-occur between two phases of the annual cycle. MC is
independent of data type and accounts for the relative abundance of
populations distributed across a seasonal range. MC is applicable between
any two phases of the annual cycle that are temporally distinct, including
migratory phases. MC can be defined for entire seasonal ranges or for
subsets of those ranges, as long as those subsets of ranges can be further
divided into regional populations. The package includes functions to
estimate MC and the more traditional metric of migratory connectivity
strength (Mantel correlation; rM) incorporating uncertainty from multiple
sources of sampling error. Description of the MC metric can be found in Cohen
et al. (2018).
}
|
#' Get vector of node IDs
#' @description Provides information on the node IDs from one or several node data frames, edge data frames, or graph objects.
#' @param ... a collection of node data frames, edge data frames, or a single graph object.
#' @return a vector of node ID values
#' @export get_nodes
get_nodes <- function(...){
objects <- list(...)
# Determine the length of the 'objects' list
length_of_objects <- length(objects)
# If there is more than one object supplied, check for existance of a graph object
if (length_of_objects > 1){
# Determine the classes of the first two objects
class_object_1 <- class(objects[[1]])
class_object_2 <- class(objects[[2]])
if (any("gv_graph" %in% c(class_object_1, class_object_2))){
stop("Only a single graph can be supplied.")
}
}
for (i in 1:length(objects)){
if (i == 1) node_ID <- vector(mode = "character")
object <- objects[[i]]
if (class(object) == "gv_graph"){
object_type <- "gv_graph"
}
if (class(object) == "data.frame"){
if (any(c("nodes", "node", "node_ID") %in% colnames(object))){
object_type <- "node_df"
}
if (any(c("edge_from", "edge_to", "from", "to") %in% colnames(object))){
object_type <- "edge_df"
}
}
if (object_type == "gv_graph"){
object <- object$nodes_df
if ("node" %in% colnames(object)){
nodes_column <- which("node" %in% colnames(object))
} else if ("nodes" %in% colnames(object)){
nodes_column <- which("nodes" %in% colnames(object))
} else if ("node_id" %in% colnames(object)){
nodes_column <- which("node_id" %in% colnames(object))
} else {
stop("There is no column with node ID information.")
}
node_ID <- c(node_ID, object[,nodes_column])
}
if (object_type == "node_df"){
if ("node" %in% colnames(object)){
nodes_column <- which("node" %in% colnames(object))
} else if ("nodes" %in% colnames(object)){
nodes_column <- which("nodes" %in% colnames(object))
} else if ("node_id" %in% colnames(object)){
nodes_column <- which("node_id" %in% colnames(object))
} else {
stop("There is no column with node ID information.")
}
node_ID <- c(node_ID, object[,nodes_column])
}
if (object_type == "edge_df"){
both_from_to_columns <- all(c(any(c("edge_from", "from") %in%
colnames(object))),
any(c("edge_to", "to") %in%
colnames(object)))
if (exists("both_from_to_columns")){
if (both_from_to_columns == TRUE){
from_column <- which(colnames(object) %in% c("edge_from", "from"))[1]
to_column <- which(colnames(object) %in% c("edge_to", "to"))[1]
}
}
node_ID <- c(node_ID, unique(c(object[,from_column],
object[,to_column])))
}
}
all_ID_unique <- ifelse(anyDuplicated(node_ID) == 0, TRUE, FALSE)
if (all_ID_unique == TRUE){
return(node_ID)
}
}
|
/R/get_nodes.R
|
no_license
|
tomroh/DiagrammeR
|
R
| false
| false
| 3,148
|
r
|
#' Get vector of node IDs
#' @description Provides information on the node IDs from one or several node data frames, edge data frames, or graph objects.
#' @param ... a collection of node data frames, edge data frames, or a single graph object.
#' @return a vector of node ID values
#' @export get_nodes
get_nodes <- function(...){
objects <- list(...)
# Determine the length of the 'objects' list
length_of_objects <- length(objects)
# If there is more than one object supplied, check for existance of a graph object
if (length_of_objects > 1){
# Determine the classes of the first two objects
class_object_1 <- class(objects[[1]])
class_object_2 <- class(objects[[2]])
if (any("gv_graph" %in% c(class_object_1, class_object_2))){
stop("Only a single graph can be supplied.")
}
}
for (i in 1:length(objects)){
if (i == 1) node_ID <- vector(mode = "character")
object <- objects[[i]]
if (class(object) == "gv_graph"){
object_type <- "gv_graph"
}
if (class(object) == "data.frame"){
if (any(c("nodes", "node", "node_ID") %in% colnames(object))){
object_type <- "node_df"
}
if (any(c("edge_from", "edge_to", "from", "to") %in% colnames(object))){
object_type <- "edge_df"
}
}
if (object_type == "gv_graph"){
object <- object$nodes_df
if ("node" %in% colnames(object)){
nodes_column <- which("node" %in% colnames(object))
} else if ("nodes" %in% colnames(object)){
nodes_column <- which("nodes" %in% colnames(object))
} else if ("node_id" %in% colnames(object)){
nodes_column <- which("node_id" %in% colnames(object))
} else {
stop("There is no column with node ID information.")
}
node_ID <- c(node_ID, object[,nodes_column])
}
if (object_type == "node_df"){
if ("node" %in% colnames(object)){
nodes_column <- which("node" %in% colnames(object))
} else if ("nodes" %in% colnames(object)){
nodes_column <- which("nodes" %in% colnames(object))
} else if ("node_id" %in% colnames(object)){
nodes_column <- which("node_id" %in% colnames(object))
} else {
stop("There is no column with node ID information.")
}
node_ID <- c(node_ID, object[,nodes_column])
}
if (object_type == "edge_df"){
both_from_to_columns <- all(c(any(c("edge_from", "from") %in%
colnames(object))),
any(c("edge_to", "to") %in%
colnames(object)))
if (exists("both_from_to_columns")){
if (both_from_to_columns == TRUE){
from_column <- which(colnames(object) %in% c("edge_from", "from"))[1]
to_column <- which(colnames(object) %in% c("edge_to", "to"))[1]
}
}
node_ID <- c(node_ID, unique(c(object[,from_column],
object[,to_column])))
}
}
all_ID_unique <- ifelse(anyDuplicated(node_ID) == 0, TRUE, FALSE)
if (all_ID_unique == TRUE){
return(node_ID)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/match_MRNs.R
\name{match_MRNs}
\alias{match_MRNs}
\title{Match MRNs from two EMRs}
\usage{
match_MRNs(x, y)
}
\arguments{
\item{x, y}{Two data frames with columns `mrn`, `first_name`, `last_name`, and
`birth_date` (yyyy-mm-dd)}
}
\value{
A data.frame with column names with suffixes '_x' and '_y' corresponding
to the x and y inputs. The dist column gives the string distance between the
two first names.
}
\description{
Takes two data.frame objects with patient information and matches them
}
\details{
The function cleans up the last names slightly and then matches exactly
on the cleaned up last names and the birth dates. Then fuzzy matching is done
on the first name, using the \code{stringdist} package.
}
\examples{
data(x_EMR)
data(y_EMR)
match_MRNs(x_EMR, y_EMR)
}
|
/man/match_MRNs.Rd
|
no_license
|
NateByers/IUHhelpers
|
R
| false
| true
| 855
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/match_MRNs.R
\name{match_MRNs}
\alias{match_MRNs}
\title{Match MRNs from two EMRs}
\usage{
match_MRNs(x, y)
}
\arguments{
\item{x, y}{Two data frames with columns `mrn`, `first_name`, `last_name`, and
`birth_date` (yyyy-mm-dd)}
}
\value{
A data.frame with column names with suffixes '_x' and '_y' corresponding
to the x and y inputs. The dist column gives the string distance between the
two first names.
}
\description{
Takes two data.frame objects with patient information and matches them
}
\details{
The function cleans up the last names slightly and then matches exactly
on the cleaned up last names and the birth dates. Then fuzzy matching is done
on the first name, using the \code{stringdist} package.
}
\examples{
data(x_EMR)
data(y_EMR)
match_MRNs(x_EMR, y_EMR)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.