content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# Assignment: ASSIGNMENT 2
# Name: OKE, Osmond
# Date: 07/07/2020
## Check your current working directory using `getwd()`
getwd()
dir()
## If the current directory does not contain the `data` directory, set the
## working directory to project root folder (the folder should contain the `data` directory
## Use `setwd()` if needed
setwd("/home/jdoe/Workspaces/dsc520")
## Load the file `data/tidynomicon/person.csv` to `person_df1` using `read.csv`
## Examine the structure of `person_df1` using `str()`
person_df1 <- read.csv(file = 'data/tidynomicon/person.csv')
str(person_df1)
## R interpreted names as factors, which is not the behavior we want
## Load the same file to person_df2 using `read.csv` and setting `stringsAsFactors` to `FALSE`
## Examine the structure of `person_df2` using `str()`
person_df2 <- read.csv(file = 'data/tidynomicon/person.csv', stringsAsFactors = FALSE)
str(person_df2)
## Read the file `data/scores.csv` to `scores_df`
## Display summary statistics using the `summary()` function
scores_df <- read.csv(file = 'data/scores.csv')
summary(scores_df)
## Load the `readxl` library
library("readxl")
## Using the excel_sheets() function from the `readxl` package,
## list the worksheets from the file `data/G04ResultsDetail2004-11-02.xls`
excel_sheets(readxl("data/G04ResultsDetail2004-11-02.xls"))
## Using the `read_excel` function, read the Voter Turnout sheet
## from the `data/G04ResultsDetail2004-11-02.xls`
## Assign the data to the `voter_turnout_df1`
## The header is in the second row, so make sure to skip the first row
## Examine the structure of `voter_turnout_df1` using `str()`
voter_turnout_df1 <- read_excel("`data/G04ResultsDetail2004-11-02.xls", skiprows = "1")
str(voter_turnout_df1)
## Using the `read_excel()` function, read the Voter Turnout sheet
## from `data/G04ResultsDetail2004-11-02.xls`
## Skip the first two rows and manually assign the columns using `col_names`
## Use the names "ward_precint", "ballots_cast", "registered_voters", "voter_turnout"
## Assign the data to the `voter_turnout_df2`
## Examine the structure of `voter_turnout_df2` using `str()`
voter_turnout_df2 <- read_excel("`data/G04ResultsDetail2004-11-02.xls", skiprows = "[1:2]", col_names =["ward_precint", "ballots_cast", "registered_voters", "voter_turnout"])
str(voter_turnout_df2)
## Load the `DBI` library
library(DBI)
## Create a database connection to `data/tidynomicon/example.db` using the dbConnect() function
## The first argument is the database driver which in this case is `RSQLite::SQLite()`
## The second argument is the path to the database file
## Assign the connection to `db` variable
db <- dbConnect(RSQLite::SQLite(), dbname = ":memory:")
## Query the Person table using the `dbGetQuery` function and the
## `SELECT * FROM PERSON;` SQL statement
## Assign the result to the `person_df` variable
## Use `head()` to look at the first few rows of the `person_df` dataframe
person_df <- dbGetQuery(db,"SELECT * FROM PERSON" )
head(person_df, 3)
## List the tables using the `dbListTables()` function
## Assign the result to the `table_names` variable
table_names <- dbListTables(db)
## Read all of the tables at once using the `lapply` function and assign the result to the `tables` variable
## Use `table_names`, `dbReadTable`, and `conn = db` as arguments
## Print out the tables
tables <- lapply(db, "table_nmes" , "dbReadTable" )
tables
## Use the `dbDisconnect` function to disconnect from the database
dbDisconnect(db)
## Import the `jsonlite` library
library(jsonlite)
## Convert the scores_df dataframe to JSON using the `toJSON()` function
toJSON(scores_df)
## Convert the scores dataframe to JSON using the `toJSON()` function with the `pretty=TRUE` option
toJSON(scores_df, pretty = TRUE)
| /assignments/exercise_02_OkeOsmond.R | permissive | OsmondOke/dsc520 | R | false | false | 3,861 | r | # Assignment: ASSIGNMENT 2
# Name: OKE, Osmond
# Date: 07/07/2020
## Check your current working directory using `getwd()`
getwd()
dir()
## If the current directory does not contain the `data` directory, set the
## working directory to project root folder (the folder should contain the `data` directory
## Use `setwd()` if needed
setwd("/home/jdoe/Workspaces/dsc520")
## Load the file `data/tidynomicon/person.csv` to `person_df1` using `read.csv`
## Examine the structure of `person_df1` using `str()`
person_df1 <- read.csv(file = 'data/tidynomicon/person.csv')
str(person_df1)
## R interpreted names as factors, which is not the behavior we want
## Load the same file to person_df2 using `read.csv` and setting `stringsAsFactors` to `FALSE`
## Examine the structure of `person_df2` using `str()`
person_df2 <- read.csv(file = 'data/tidynomicon/person.csv', stringsAsFactors = FALSE)
str(person_df2)
## Read the file `data/scores.csv` to `scores_df`
## Display summary statistics using the `summary()` function
scores_df <- read.csv(file = 'data/scores.csv')
summary(scores_df)
## Load the `readxl` library
library("readxl")
## Using the excel_sheets() function from the `readxl` package,
## list the worksheets from the file `data/G04ResultsDetail2004-11-02.xls`
excel_sheets(readxl("data/G04ResultsDetail2004-11-02.xls"))
## Using the `read_excel` function, read the Voter Turnout sheet
## from the `data/G04ResultsDetail2004-11-02.xls`
## Assign the data to the `voter_turnout_df1`
## The header is in the second row, so make sure to skip the first row
## Examine the structure of `voter_turnout_df1` using `str()`
voter_turnout_df1 <- read_excel("`data/G04ResultsDetail2004-11-02.xls", skiprows = "1")
str(voter_turnout_df1)
## Using the `read_excel()` function, read the Voter Turnout sheet
## from `data/G04ResultsDetail2004-11-02.xls`
## Skip the first two rows and manually assign the columns using `col_names`
## Use the names "ward_precint", "ballots_cast", "registered_voters", "voter_turnout"
## Assign the data to the `voter_turnout_df2`
## Examine the structure of `voter_turnout_df2` using `str()`
voter_turnout_df2 <- read_excel("`data/G04ResultsDetail2004-11-02.xls", skiprows = "[1:2]", col_names =["ward_precint", "ballots_cast", "registered_voters", "voter_turnout"])
str(voter_turnout_df2)
## Load the `DBI` library
library(DBI)
## Create a database connection to `data/tidynomicon/example.db` using the dbConnect() function
## The first argument is the database driver which in this case is `RSQLite::SQLite()`
## The second argument is the path to the database file
## Assign the connection to `db` variable
db <- dbConnect(RSQLite::SQLite(), dbname = ":memory:")
## Query the Person table using the `dbGetQuery` function and the
## `SELECT * FROM PERSON;` SQL statement
## Assign the result to the `person_df` variable
## Use `head()` to look at the first few rows of the `person_df` dataframe
person_df <- dbGetQuery(db,"SELECT * FROM PERSON" )
head(person_df, 3)
## List the tables using the `dbListTables()` function
## Assign the result to the `table_names` variable
table_names <- dbListTables(db)
## Read all of the tables at once using the `lapply` function and assign the result to the `tables` variable
## Use `table_names`, `dbReadTable`, and `conn = db` as arguments
## Print out the tables
tables <- lapply(db, "table_nmes" , "dbReadTable" )
tables
## Use the `dbDisconnect` function to disconnect from the database
dbDisconnect(db)
## Import the `jsonlite` library
library(jsonlite)
## Convert the scores_df dataframe to JSON using the `toJSON()` function
toJSON(scores_df)
## Convert the scores dataframe to JSON using the `toJSON()` function with the `pretty=TRUE` option
toJSON(scores_df, pretty = TRUE)
|
################################################################################
#' Impute NAs using mice.
#'
#' This function is used to fill missing NA values using common imputation methods.
#' The imputation method can be chosen by setting the meth parameter. For a list of
#' all the available imputation methods available, please check the documentation of
#' the mice::mice function (?mice::mice)
#'
#' @param data a dplyr dataframe containing the data to be standardized
#' @param seed random seed for reproducibility purposes
#' @param exclude_variables variables to be excluded from the standardization process. A vector of characters.
#' @param m Number of multiple imputations. The default is m=5.
#' @param maxit A scalar giving the number of iterations. The default is 5
#' @param meth method used for imputation. See mice::mice function description for more information.
#' @param verbose Be verbose?
#' @param ... other parameters passed directly to the mice::mice function
#' @importFrom dplyr select bind_cols as_tibble %>% all_of
#' @return A dplyr dataframe
#' @export
#'
approximate_NAs <- function(data, seed, exclude_variables = c("lon", "lat", "id_pixel"), m = 5, maxit = 50, meth = "pmm", verbose = FALSE, ...)
{
requireNamespace("mice")
# Drop columns not needed
data_to_fill <- data %>% select(!all_of(exclude_variables))
# Impute data using mice
tempData <- mice::mice(data_to_fill, m = m, maxit = maxit, meth = meth, seed = seed, printFlag = verbose, ...)
# Filled data.
filled_data <- mice::complete(tempData, 1)
filled_data <- as_tibble(filled_data)
# Bind excluded columns
filled_data <- data %>% select(all_of(exclude_variables)) %>% bind_cols(filled_data)
# Set attribute for future use. Check no slowdowns
attr(filled_data, "imputed_dataset") <- tempData
# Return
return(filled_data)
}
################################################################################
#' Plot the density of imputed NAs vs actual data
#'
#' Density plot of imputed NAs vs actual data.
#'
#' @param data dataset imputed obtained from the approximate_NAs function,
#' must have an "imputed_dataset" attribute.
#' @importFrom lattice densityplot stripplot
#' @export
#'
plot_density_imputed_na <- function(data)
{
# Mice imputed dataset
tempData <- attr(data, "imputed_dataset")
# First density plot
density_plot_1 <- densityplot(tempData)
# Second density plot
density_plot2 <- stripplot(tempData)
# Print the first plot
print(density_plot_1)
# Wait for user input
readline(prompt = "Press anykey for next plot... It may take a while if there's a lot of data...")
# Print the second plot
print(density_plot2)
}
################################################################################
#' Standardize data before running kmeans
#'
#' This function is used to standardize the data before running the kmeans analysis function.
#' The output of this function is a dataframe ready to be used for the kmeans analysis.
#'
#' @param data a dplyr dataframe containing the data to be standardized
#' @param exclude_variables variables to be excluded from the standardization process. A vector of characters.
#' @importFrom dplyr select %>% as_tibble bind_cols all_of
#' @return A dplyr dataframe
#' @export
#'
standardize_data <- function(data, exclude_variables = c("lon", "lat", "id_pixel"))
{
# Drop columns not needed
data_to_scale <- data %>% select(!all_of(exclude_variables))
# Standardize variables by their range
rge <- sapply(data_to_scale, function(x) diff(range(x)))
scaled_data <- sweep(x = data_to_scale, MARGIN = 2, STATS = rge, FUN = "/")
scaled_data <- as_tibble(scaled_data)
# Bind columns
scaled_data <- data %>% select(all_of(exclude_variables)) %>% bind_cols(scaled_data)
# Attach rge for later use
attr(scaled_data, "rge") <- rge
# Return
return(scaled_data)
}
################################################################################
#' Run kmeans analysis
#'
#' Note: this function takes as argument all the arguments used by the stats::kmeans function.
#'
#' @param x a dataframe or a matrix containing the data. The data should be
#' standardized for better results
#' @param n_centers Number of clusters to be used. A vector such as 2:5
#' @param nstart how many random sets should be chosen?
#' @param seed Random seed set for reproducibility purposes. Numeric, NULL by default. (Integer)
#' @param exclude_variables variables to exclude from kmeans analysis. A vector of characters
#' @param ... Other arguments accepted by the stats::kmeans function.
#' @importFrom clusterSim index.G1
#' @importFrom dplyr select all_of
#' @return An object of class kmeans
#'
#' @export
#'
kmeans_analysis <- function(x, n_centers, nstart = 1, seed = NULL, exclude_variables = c("lon", "lat", "id_pixel"), ...)
{
# Drop columns not needed
x <- x %>% select(!all_of(exclude_variables))
# Set random seed if seed is not NULL
if( ! is.null(seed) ){ set.seed(seed) }
# Output list
results <- list()
# Run the model for each value in n_centers
k <- 1
for(i in n_centers)
{
# Apply kmeans
model <- kmeans(x = x, centers = i, nstart = nstart, ...)
# Add index to model results
model$calinski_harabasz_index <- index.G1(x, model$cluster)
# Add non-scaled centers
model$centers_not_scaled <- model$centers * attr(x, "rge")
# Add results to output list.
results[[k]] <- model
k <- k +1
}
# Set names to output list
names(results) <- as.character(n_centers)
# Return
return(results)
}
################################################################################
#' Plot results.
#'
#' This function is used to plot the number of clusters against the Calinski-Harabasz index
#'
#' @param kmeans_results a list of results obtained from the function kmeans_analysis
#' @importFrom ggplot2 ggplot geom_point geom_line xlab ylab aes
#' @export
#'
plot_results <- function(kmeans_results)
{
# Get the number of clusters used
clusters <- as.numeric(names(kmeans_results))
# Get the Calinski-Harabasz index for each number of clusters as a vector
c_a_index <- sapply(kmeans_results, function(x) x$calinski_harabasz_index)
# Plot results
ggp <- ggplot(data = data.frame(x = clusters, y = c_a_index), mapping = aes(x = x, y = y)) +
geom_point() +
geom_line() +
xlab("Number of clusters") +
ylab("Calinski-Harabasz pseudo F-statistic")
print(ggp)
}
################################################################################
#' Extract results from the kmeans analysis
#'
#' This function is used to extract the best results from the kmeans analysis.
#' The best result is considered to be the result characterized by the highest
#' Calinski-Harabasz index.
#'
#' @param kmeans_results a list of results obtained from the function kmeans_analysis
#' @return A list with the best results (as defined above) of the kmeans analysis.
#' @export
#'
extract_results <- function(kmeans_results)
{
# Get the names of the list (number of clusters used)
clusters <- names(kmeans_results)
# Get the Calinski-Harabasz index for each number of clusters
c_a_index <- sapply(kmeans_results, function(x) x$calinski_harabasz_index)
# Select the number of clusters that maximizes the index
selected_number <- clusters[which.max(c_a_index)]
# Get the results
best_results <- kmeans_results[selected_number][[1]]
# Return
return(best_results)
}
################################################################################
#' Extract results from the kmeans analysis
#'
#' This function is used to extract the best results from the kmeans analysis.
#' The best result is considered to be the result characterized by the highest
#' Calinski-Harabasz index.
#'
#' @param x dataframe used in the kmeans analysis (prior to standardization)
#' @param nc_dataframe main dataframe
#' @param final_kmeans_results list of the final results obtained from the function extract_results
#' @param filename name of the .csv file to export
#' @param export_vars variables to export. A list of characters
#' @param join_by variables to join by. A vector of characters
#' @param unique_id a list of unique identifiers
#' @importFrom dplyr %>% select full_join distinct arrange all_of across
#' @export
#'
export_data <- function(x, nc_dataframe, final_kmeans_results, filename, export_vars = c("lon", "lat", "id_pixel", "gruppo"), join_by = c("id_pixel", "lon", "lat"), unique_id = c("lon", "lat", "id_pixel"))
{
# Attach cluster
x$gruppo <- final_kmeans_results$cluster
# Select unique lon lat and idpixel from main dataframe
new_nc <- nc_dataframe %>% select(all_of(unique_id)) %>% distinct()
# Select only variables to export and join
new_x <- x %>% select(all_of(export_vars)) %>% full_join(new_nc, by = join_by) %>% arrange(across(id_pixel))
# Write data to csv
write.csv(new_x, filename, row.names = FALSE)
# Print
print("Data written to file successfully!")
}
| /R/kmeans-analysis.R | no_license | mick001/qchlorophyll | R | false | false | 9,451 | r | ################################################################################
#' Impute NAs using mice.
#'
#' This function is used to fill missing NA values using common imputation methods.
#' The imputation method can be chosen by setting the meth parameter. For a list of
#' all the available imputation methods available, please check the documentation of
#' the mice::mice function (?mice::mice)
#'
#' @param data a dplyr dataframe containing the data to be standardized
#' @param seed random seed for reproducibility purposes
#' @param exclude_variables variables to be excluded from the standardization process. A vector of characters.
#' @param m Number of multiple imputations. The default is m=5.
#' @param maxit A scalar giving the number of iterations. The default is 5
#' @param meth method used for imputation. See mice::mice function description for more information.
#' @param verbose Be verbose?
#' @param ... other parameters passed directly to the mice::mice function
#' @importFrom dplyr select bind_cols as_tibble %>% all_of
#' @return A dplyr dataframe
#' @export
#'
approximate_NAs <- function(data, seed, exclude_variables = c("lon", "lat", "id_pixel"), m = 5, maxit = 50, meth = "pmm", verbose = FALSE, ...)
{
requireNamespace("mice")
# Drop columns not needed
data_to_fill <- data %>% select(!all_of(exclude_variables))
# Impute data using mice
tempData <- mice::mice(data_to_fill, m = m, maxit = maxit, meth = meth, seed = seed, printFlag = verbose, ...)
# Filled data.
filled_data <- mice::complete(tempData, 1)
filled_data <- as_tibble(filled_data)
# Bind excluded columns
filled_data <- data %>% select(all_of(exclude_variables)) %>% bind_cols(filled_data)
# Set attribute for future use. Check no slowdowns
attr(filled_data, "imputed_dataset") <- tempData
# Return
return(filled_data)
}
################################################################################
#' Plot the density of imputed NAs vs actual data
#'
#' Density plot of imputed NAs vs actual data.
#'
#' @param data dataset imputed obtained from the approximate_NAs function,
#' must have an "imputed_dataset" attribute.
#' @importFrom lattice densityplot stripplot
#' @export
#'
plot_density_imputed_na <- function(data)
{
# Mice imputed dataset
tempData <- attr(data, "imputed_dataset")
# First density plot
density_plot_1 <- densityplot(tempData)
# Second density plot
density_plot2 <- stripplot(tempData)
# Print the first plot
print(density_plot_1)
# Wait for user input
readline(prompt = "Press anykey for next plot... It may take a while if there's a lot of data...")
# Print the second plot
print(density_plot2)
}
################################################################################
#' Standardize data before running kmeans
#'
#' This function is used to standardize the data before running the kmeans analysis function.
#' The output of this function is a dataframe ready to be used for the kmeans analysis.
#'
#' @param data a dplyr dataframe containing the data to be standardized
#' @param exclude_variables variables to be excluded from the standardization process. A vector of characters.
#' @importFrom dplyr select %>% as_tibble bind_cols all_of
#' @return A dplyr dataframe
#' @export
#'
standardize_data <- function(data, exclude_variables = c("lon", "lat", "id_pixel"))
{
# Drop columns not needed
data_to_scale <- data %>% select(!all_of(exclude_variables))
# Standardize variables by their range
rge <- sapply(data_to_scale, function(x) diff(range(x)))
scaled_data <- sweep(x = data_to_scale, MARGIN = 2, STATS = rge, FUN = "/")
scaled_data <- as_tibble(scaled_data)
# Bind columns
scaled_data <- data %>% select(all_of(exclude_variables)) %>% bind_cols(scaled_data)
# Attach rge for later use
attr(scaled_data, "rge") <- rge
# Return
return(scaled_data)
}
################################################################################
#' Run kmeans analysis
#'
#' Note: this function takes as argument all the arguments used by the stats::kmeans function.
#'
#' @param x a dataframe or a matrix containing the data. The data should be
#' standardized for better results
#' @param n_centers Number of clusters to be used. A vector such as 2:5
#' @param nstart how many random sets should be chosen?
#' @param seed Random seed set for reproducibility purposes. Numeric, NULL by default. (Integer)
#' @param exclude_variables variables to exclude from kmeans analysis. A vector of characters
#' @param ... Other arguments accepted by the stats::kmeans function.
#' @importFrom clusterSim index.G1
#' @importFrom dplyr select all_of
#' @return An object of class kmeans
#'
#' @export
#'
kmeans_analysis <- function(x, n_centers, nstart = 1, seed = NULL, exclude_variables = c("lon", "lat", "id_pixel"), ...)
{
# Drop columns not needed
x <- x %>% select(!all_of(exclude_variables))
# Set random seed if seed is not NULL
if( ! is.null(seed) ){ set.seed(seed) }
# Output list
results <- list()
# Run the model for each value in n_centers
k <- 1
for(i in n_centers)
{
# Apply kmeans
model <- kmeans(x = x, centers = i, nstart = nstart, ...)
# Add index to model results
model$calinski_harabasz_index <- index.G1(x, model$cluster)
# Add non-scaled centers
model$centers_not_scaled <- model$centers * attr(x, "rge")
# Add results to output list.
results[[k]] <- model
k <- k +1
}
# Set names to output list
names(results) <- as.character(n_centers)
# Return
return(results)
}
################################################################################
#' Plot results.
#'
#' This function is used to plot the number of clusters against the Calinski-Harabasz index
#'
#' @param kmeans_results a list of results obtained from the function kmeans_analysis
#' @importFrom ggplot2 ggplot geom_point geom_line xlab ylab aes
#' @export
#'
plot_results <- function(kmeans_results)
{
# Get the number of clusters used
clusters <- as.numeric(names(kmeans_results))
# Get the Calinski-Harabasz index for each number of clusters as a vector
c_a_index <- sapply(kmeans_results, function(x) x$calinski_harabasz_index)
# Plot results
ggp <- ggplot(data = data.frame(x = clusters, y = c_a_index), mapping = aes(x = x, y = y)) +
geom_point() +
geom_line() +
xlab("Number of clusters") +
ylab("Calinski-Harabasz pseudo F-statistic")
print(ggp)
}
################################################################################
#' Extract results from the kmeans analysis
#'
#' This function is used to extract the best results from the kmeans analysis.
#' The best result is considered to be the result characterized by the highest
#' Calinski-Harabasz index.
#'
#' @param kmeans_results a list of results obtained from the function kmeans_analysis
#' @return A list with the best results (as defined above) of the kmeans analysis.
#' @export
#'
extract_results <- function(kmeans_results)
{
# Get the names of the list (number of clusters used)
clusters <- names(kmeans_results)
# Get the Calinski-Harabasz index for each number of clusters
c_a_index <- sapply(kmeans_results, function(x) x$calinski_harabasz_index)
# Select the number of clusters that maximizes the index
selected_number <- clusters[which.max(c_a_index)]
# Get the results
best_results <- kmeans_results[selected_number][[1]]
# Return
return(best_results)
}
################################################################################
#' Extract results from the kmeans analysis
#'
#' This function is used to extract the best results from the kmeans analysis.
#' The best result is considered to be the result characterized by the highest
#' Calinski-Harabasz index.
#'
#' @param x dataframe used in the kmeans analysis (prior to standardization)
#' @param nc_dataframe main dataframe
#' @param final_kmeans_results list of the final results obtained from the function extract_results
#' @param filename name of the .csv file to export
#' @param export_vars variables to export. A list of characters
#' @param join_by variables to join by. A vector of characters
#' @param unique_id a list of unique identifiers
#' @importFrom dplyr %>% select full_join distinct arrange all_of across
#' @export
#'
export_data <- function(x, nc_dataframe, final_kmeans_results, filename, export_vars = c("lon", "lat", "id_pixel", "gruppo"), join_by = c("id_pixel", "lon", "lat"), unique_id = c("lon", "lat", "id_pixel"))
{
# Attach cluster
x$gruppo <- final_kmeans_results$cluster
# Select unique lon lat and idpixel from main dataframe
new_nc <- nc_dataframe %>% select(all_of(unique_id)) %>% distinct()
# Select only variables to export and join
new_x <- x %>% select(all_of(export_vars)) %>% full_join(new_nc, by = join_by) %>% arrange(across(id_pixel))
# Write data to csv
write.csv(new_x, filename, row.names = FALSE)
# Print
print("Data written to file successfully!")
}
|
#load library
library("dplyr")
library("ggplot2")
library("lattice")
# Extract dataset
unzip(zipfile = "./repdata_data_activity.zip")
# Load the dataset
# Loading and preprocessing the data
#-------------------
# 1. Load the data (i.e. \color{red}{\verb|read.csv()|}read.csv())
#
dataset = read.table("activity.csv", sep = ",", skip=1)
# Perform occular inspection on the dataset if it has been imported correctly
# View(dataset)
#Set dataset column names
colnames(dataset) = c("steps","date","interval")
#-----------------
# 2. Process/transform the data (if necessary) into a format suitable for your analysis
#
datasetGroupedByDaySums = dataset %>% group_by(date) %>% summarise(Sum = sum(steps, na.rm=FALSE))
#Plot the histogram
hist(datasetGroupedByDaySums$Sum, xlab="Steps per day", ylab="Number of Days", main="Total steps per day")
#---------------------------
# 3. Calculate and report the mean and median of the total number of steps taken per day
#
meanOfStepsPerDay <- mean(datasetGroupedByDaySums$Sum, na.rm = TRUE)
medianOfStepsPerDay <- median(datasetGroupedByDaySums$Sum, na.rm = TRUE)
View(meanOfStepsPerDay)
View(medianOfStepsPerDay)
#-------------------------
#What is the average daily activity pattern?
#------------------------
#1. Make a time series plot of the 5-minute interval (x-axis) and the average number of steps taken, averaged across all days (y-axis)
datasetGroupedByIntervalMeans = dataset %>% filter(!is.na(steps)) %>% group_by(interval) %>% summarise(Mean = mean(steps, na.rm=FALSE))
View(datasetGroupedByIntervalMeans)
ggplot(datasetGroupedByIntervalMeans, aes(x=interval, y=Mean)) +
geom_line(color = "blue")
#------------------------
#2. Which 5-minute interval, on average across all the days in the dataset, contains the maximum number of steps?
View(head(arrange(datasetGroupedByIntervalMeans,desc(datasetGroupedByIntervalMeans$Mean)),1))
# Imputing missing values
# Note that there are a number of days/intervals where there are missing values (coded as \color{red}{\verb|NA|}NA). The presence of missing days may introduce bias into some calculations or summaries of the data.
# ----------------
# 1. Calculate and report the total number of missing values
# in the dataset (i.e. the total number of rows with NAs)
#
sum(is.na(dataset$steps))
# -----------------
# 2. Devise a strategy for filling in all of the missing values in the dataset. The strategy does not need to be sophisticated. For example, you could use the mean/median for that day, or the mean for that 5-minute interval, etc.
#
# I will use the average of the whole dataset so that the plugged in values wont heavily impact the whole dataset
# -------------------------
# 3. Create a new dataset that is equal to the original dataset but with the missing data filled in.
#
filledInNADataset = dataset
filledInNADataset$steps[is.na(filledInNADataset$steps)] <- mean(filledInNADataset$steps, na.rm=TRUE)
# --------------------
# 4. Make a histogram of the total number of
# steps taken each day and Calculate and report
# the mean and median total number of steps taken
# per day. Do these values differ from the estimates
# from the first part of the assignment?
# What is the impact of imputing missing data on
# the estimates of the total daily number of steps?
datasetGroupedByDaySums = dataset %>% group_by(date) %>% summarise(Sum = sum(steps, na.rm=FALSE))
hist(datasetGroupedByDaySums$Sum, xlab="Steps per day", ylab="Number of Days", main="Total steps per day")
mean(datasetGroupedByDaySums$Sum, na.rm = TRUE)
median(datasetGroupedByDaySums$Sum, na.rm = TRUE)
datasetWithoutNAGroupedByDaySums = filledInNADataset %>% group_by(date) %>% summarise(Sum = sum(steps, na.rm=FALSE))
hist(datasetWithoutNAGroupedByDaySums$Sum, xlab="Steps per day", ylab="Number of Days", main="Total steps per day")
mean(datasetWithoutNAGroupedByDaySums$Sum)
median(datasetWithoutNAGroupedByDaySums$Sum)
#
# Are there differences in activity patterns between weekdays and weekends?
# For this part the function may be of some help here. Use the dataset with the filled-in missing values for this part.
#----------------------
# 1. Create a new factor variable in the dataset with two levels - "weekday" and "weekend" indicating whether a given date is a weekday or weekend day.
#
filledInNADataset$weekdayname = weekdays(as.POSIXlt(dataset$date))
filledInNADataset$weekdayname[filledInNADataset$weekdayname=="Saturday" | filledInNADataset$weekdayname=="Sunday"] = "weekend"
filledInNADataset$weekdayname[filledInNADataset$weekdayname!="weekend"] = "weekday"
View(test)
#-----------------------
# 2. Make a panel plot containing a time series plot (i.e. \color{red}{\verb|type = "l"|}type="l") of the 5-minute interval (x-axis) and the average number of steps taken, averaged across all weekday days or weekend days (y-axis). See the README file in the GitHub repository to see an example of what this plot should look like using simulated data.
#
plotNewDataframe <- aggregate(steps ~ weekdayname+interval, data=filledInNADataset, FUN=mean)
xyplot(steps ~ interval | factor(weekdayname),
layout = c(1, 2),
xlab="Interval",
ylab="Number of steps",
type="l",
lty=1,
data=plotNewDataframe)
| /script.R | no_license | GlennMatias/RepData_PeerAssessment1 | R | false | false | 5,273 | r | #load library
library("dplyr")
library("ggplot2")
library("lattice")
# Extract dataset
unzip(zipfile = "./repdata_data_activity.zip")
# Load the dataset
# Loading and preprocessing the data
#-------------------
# 1. Load the data (i.e. \color{red}{\verb|read.csv()|}read.csv())
#
dataset = read.table("activity.csv", sep = ",", skip=1)
# Perform occular inspection on the dataset if it has been imported correctly
# View(dataset)
#Set dataset column names
colnames(dataset) = c("steps","date","interval")
#-----------------
# 2. Process/transform the data (if necessary) into a format suitable for your analysis
#
datasetGroupedByDaySums = dataset %>% group_by(date) %>% summarise(Sum = sum(steps, na.rm=FALSE))
#Plot the histogram
hist(datasetGroupedByDaySums$Sum, xlab="Steps per day", ylab="Number of Days", main="Total steps per day")
#---------------------------
# 3. Calculate and report the mean and median of the total number of steps taken per day
#
meanOfStepsPerDay <- mean(datasetGroupedByDaySums$Sum, na.rm = TRUE)
medianOfStepsPerDay <- median(datasetGroupedByDaySums$Sum, na.rm = TRUE)
View(meanOfStepsPerDay)
View(medianOfStepsPerDay)
#-------------------------
#What is the average daily activity pattern?
#------------------------
#1. Make a time series plot of the 5-minute interval (x-axis) and the average number of steps taken, averaged across all days (y-axis)
datasetGroupedByIntervalMeans = dataset %>% filter(!is.na(steps)) %>% group_by(interval) %>% summarise(Mean = mean(steps, na.rm=FALSE))
View(datasetGroupedByIntervalMeans)
ggplot(datasetGroupedByIntervalMeans, aes(x=interval, y=Mean)) +
geom_line(color = "blue")
#------------------------
#2. Which 5-minute interval, on average across all the days in the dataset, contains the maximum number of steps?
View(head(arrange(datasetGroupedByIntervalMeans,desc(datasetGroupedByIntervalMeans$Mean)),1))
# Imputing missing values
# Note that there are a number of days/intervals where there are missing values (coded as \color{red}{\verb|NA|}NA). The presence of missing days may introduce bias into some calculations or summaries of the data.
# ----------------
# 1. Calculate and report the total number of missing values
# in the dataset (i.e. the total number of rows with NAs)
#
sum(is.na(dataset$steps))
# -----------------
# 2. Devise a strategy for filling in all of the missing values in the dataset. The strategy does not need to be sophisticated. For example, you could use the mean/median for that day, or the mean for that 5-minute interval, etc.
#
# I will use the average of the whole dataset so that the plugged in values wont heavily impact the whole dataset
# -------------------------
# 3. Create a new dataset that is equal to the original dataset but with the missing data filled in.
#
filledInNADataset = dataset
filledInNADataset$steps[is.na(filledInNADataset$steps)] <- mean(filledInNADataset$steps, na.rm=TRUE)
# --------------------
# 4. Make a histogram of the total number of
# steps taken each day and Calculate and report
# the mean and median total number of steps taken
# per day. Do these values differ from the estimates
# from the first part of the assignment?
# What is the impact of imputing missing data on
# the estimates of the total daily number of steps?
datasetGroupedByDaySums = dataset %>% group_by(date) %>% summarise(Sum = sum(steps, na.rm=FALSE))
hist(datasetGroupedByDaySums$Sum, xlab="Steps per day", ylab="Number of Days", main="Total steps per day")
mean(datasetGroupedByDaySums$Sum, na.rm = TRUE)
median(datasetGroupedByDaySums$Sum, na.rm = TRUE)
datasetWithoutNAGroupedByDaySums = filledInNADataset %>% group_by(date) %>% summarise(Sum = sum(steps, na.rm=FALSE))
hist(datasetWithoutNAGroupedByDaySums$Sum, xlab="Steps per day", ylab="Number of Days", main="Total steps per day")
mean(datasetWithoutNAGroupedByDaySums$Sum)
median(datasetWithoutNAGroupedByDaySums$Sum)
#
# Are there differences in activity patterns between weekdays and weekends?
# For this part the function may be of some help here. Use the dataset with the filled-in missing values for this part.
#----------------------
# 1. Create a new factor variable in the dataset with two levels - "weekday" and "weekend" indicating whether a given date is a weekday or weekend day.
#
filledInNADataset$weekdayname = weekdays(as.POSIXlt(dataset$date))
filledInNADataset$weekdayname[filledInNADataset$weekdayname=="Saturday" | filledInNADataset$weekdayname=="Sunday"] = "weekend"
filledInNADataset$weekdayname[filledInNADataset$weekdayname!="weekend"] = "weekday"
View(test)
#-----------------------
# 2. Make a panel plot containing a time series plot (i.e. \color{red}{\verb|type = "l"|}type="l") of the 5-minute interval (x-axis) and the average number of steps taken, averaged across all weekday days or weekend days (y-axis). See the README file in the GitHub repository to see an example of what this plot should look like using simulated data.
#
plotNewDataframe <- aggregate(steps ~ weekdayname+interval, data=filledInNADataset, FUN=mean)
xyplot(steps ~ interval | factor(weekdayname),
layout = c(1, 2),
xlab="Interval",
ylab="Number of steps",
type="l",
lty=1,
data=plotNewDataframe)
|
# Contingency Analysis
# Mary Killilea
# Fall 2015
getwd()
########### Example 9.2 Aspirin and Cancer
# odds ratios
#read the cancer data into R
cancer <- read.csv("ABD_all_data/chapter09/chap09e2AspirinCancer.csv")
head(cancer)
# create a table from the data
cancerTable <- table(cancer$cancer, cancer$aspirinTreatment)
cancerTable
# create a mosaic plot
mosaicplot( t(cancerTable), col = c("firebrick", "goldenrod1"), cex.axis = 1,
sub = "Aspirin treatment", ylab = "Relative frequency", main = "")
??mosaicplot
# Odds ratios with the 95% confidence interval can be calculated
# using a library called epitools
install.packages("epitools", dependencies = TRUE)
library(epitools)
# The odds ration command only works if the library is installed
oddsratio(cancerTable, method = "wald")
# $measure[-1] gets rid of extra information. remove row 1.
oddsratio(cancerTable, method = "wald")$measure[-1,]
# how could you calculate the odds ratio without the above library? from contingency
# table??
# Calculate relative risk using the epitools package.
# The layout expected by the riskratio function is the complete opposite of
# the layout used in the book.
# To use the command with a contingency table in book style (such as cancerTable),
# we need to flip (transpose) the table and reverse the order of the rows.
# We can do this all at once with the following arguments to the riskratio function.
riskratio(t(cancerTable), rev = "both", method = "wald")
riskratio(t(cancerTable), method = "wald", rev = "both")$measure[-1,]
# Notice that the result differs slightly from the relative risk value
# given in the book for these same data (1.007) because rounding error
# here is reduced.
########### Example 9.4
#
# contingency tests - when both variables are categorical,
# used to test whether one variable is "contingent" on the other
# chi square contingency test is most commonly used test of assocation
# between 2 categorical variables
# tests "Goodness of Fit" (GOF) of the data to the null model
# (null model is: variables are independent)
# Chapter 9 Example 3
worm <- read.csv( "ABD_all_data/chapter09/chap09e3ParasiteBrainWarp.csv" )
head(worm)
# what is wrong with this data? didnt' have column title of fate. mislabeled.
# We have made a correction to the data set.
worm <- read.csv( "ABD_all_data/chapter09/chap09e3ParasiteBrainWarp_corrected.csv" )
head(worm)
#Set the preferred order of infection categories in tables and graphs.
worm$infection <- factor(worm$infection, levels = c("uninfected", "light", "high"))
# Create a table
wormTable <- table(worm$fate, worm$infection)
wormTable
addmargins(wormTable)
# transpose data and use mosaic plot
mosaicplot( t(wormTable), col = c("firebrick", "goldenrod1"), cex.axis = 1,
sub = "Infection status", ylab = "Relative frequency")
??t()
# want to test whether the probability of being eaten by birds differs,
# according to infection status
# are categorical variables "infection level" and "being eaten" independent?
# ?
# H_0 = the data are independent!
# have to calculate the expected frequencies for each cell under the null model
test_results = chisq.test( worm$fate, worm$infection,correct = FALSE )
test_results
test_results2 = chisq.test( wormTable )
test_results2
E <- test_results$expected
E
O <- test_results$observed
O
# compare these to table 9.3-2
# what is this calculation?
# ?
(O - E)^2/E
sum((O - E)^2/E)
# chi squared contingency test is a special case of the
# chi squared Goodness of Fit test
# here the probability model is that the variables are independent
# if the table is 2x2, Fishers exact test should be used. like GOF test? 2 cases
# do binomal because exact. exact test for fischer test. 2x2 or 3x3. counts not
# too large. is exact.
# for in cases where expected cell frequencies are too low to meet rules
fisher.test( wormTable )
#Try using fisher test on the following data
vampire <- read.csv("ABD_all_data/chapter09/chap09e5VampireBites.csv")
vampire
vampTable<-table(vampire$estrous, vampire$bitten)
vampTable
fisher.test(vampTable)
| /Source Code/Contingency.R | no_license | kendylbarron/Biostats-Projects | R | false | false | 4,108 | r | # Contingency Analysis
# Mary Killilea
# Fall 2015
getwd()
########### Example 9.2 Aspirin and Cancer
# odds ratios
#read the cancer data into R
cancer <- read.csv("ABD_all_data/chapter09/chap09e2AspirinCancer.csv")
head(cancer)
# create a table from the data
cancerTable <- table(cancer$cancer, cancer$aspirinTreatment)
cancerTable
# create a mosaic plot
mosaicplot( t(cancerTable), col = c("firebrick", "goldenrod1"), cex.axis = 1,
sub = "Aspirin treatment", ylab = "Relative frequency", main = "")
??mosaicplot
# Odds ratios with the 95% confidence interval can be calculated
# using a library called epitools
install.packages("epitools", dependencies = TRUE)
library(epitools)
# The odds ration command only works if the library is installed
oddsratio(cancerTable, method = "wald")
# $measure[-1] gets rid of extra information. remove row 1.
oddsratio(cancerTable, method = "wald")$measure[-1,]
# how could you calculate the odds ratio without the above library? from contingency
# table??
# Calculate relative risk using the epitools package.
# The layout expected by the riskratio function is the complete opposite of
# the layout used in the book.
# To use the command with a contingency table in book style (such as cancerTable),
# we need to flip (transpose) the table and reverse the order of the rows.
# We can do this all at once with the following arguments to the riskratio function.
riskratio(t(cancerTable), rev = "both", method = "wald")
riskratio(t(cancerTable), method = "wald", rev = "both")$measure[-1,]
# Notice that the result differs slightly from the relative risk value
# given in the book for these same data (1.007) because rounding error
# here is reduced.
########### Example 9.4
#
# contingency tests - when both variables are categorical,
# used to test whether one variable is "contingent" on the other
# chi square contingency test is most commonly used test of assocation
# between 2 categorical variables
# tests "Goodness of Fit" (GOF) of the data to the null model
# (null model is: variables are independent)
# Chapter 9 Example 3
worm <- read.csv( "ABD_all_data/chapter09/chap09e3ParasiteBrainWarp.csv" )
head(worm)
# what is wrong with this data? didnt' have column title of fate. mislabeled.
# We have made a correction to the data set.
worm <- read.csv( "ABD_all_data/chapter09/chap09e3ParasiteBrainWarp_corrected.csv" )
head(worm)
#Set the preferred order of infection categories in tables and graphs.
worm$infection <- factor(worm$infection, levels = c("uninfected", "light", "high"))
# Create a table
wormTable <- table(worm$fate, worm$infection)
wormTable
addmargins(wormTable)
# transpose data and use mosaic plot
mosaicplot( t(wormTable), col = c("firebrick", "goldenrod1"), cex.axis = 1,
sub = "Infection status", ylab = "Relative frequency")
??t()
# want to test whether the probability of being eaten by birds differs,
# according to infection status
# are categorical variables "infection level" and "being eaten" independent?
# ?
# H_0 = the data are independent!
# have to calculate the expected frequencies for each cell under the null model
test_results = chisq.test( worm$fate, worm$infection,correct = FALSE )
test_results
test_results2 = chisq.test( wormTable )
test_results2
E <- test_results$expected
E
O <- test_results$observed
O
# compare these to table 9.3-2
# what is this calculation?
# ?
(O - E)^2/E
sum((O - E)^2/E)
# chi squared contingency test is a special case of the
# chi squared Goodness of Fit test
# here the probability model is that the variables are independent
# if the table is 2x2, Fishers exact test should be used. like GOF test? 2 cases
# do binomal because exact. exact test for fischer test. 2x2 or 3x3. counts not
# too large. is exact.
# for in cases where expected cell frequencies are too low to meet rules
fisher.test( wormTable )
#Try using fisher test on the following data
vampire <- read.csv("ABD_all_data/chapter09/chap09e5VampireBites.csv")
vampire
vampTable<-table(vampire$estrous, vampire$bitten)
vampTable
fisher.test(vampTable)
|
#' @title Transform Dataframe to Matrix R
#' @description This function transforms dataframe contains sampling variance to block diagonal matrix R
#' @param var.df dataframe of sampling variances of direct estimators.
#' @param r number of variables
#' @return Block diagonal matrix R
#' @examples NULL
#' @export df2matR
df2matR <- function(var.df , r){
if(dim(var.df)[2] != sum(1:r))
stop("Length of vardir is not appropiate with data")
var.df <- as.data.frame(var.df)
n <- nrow(var.df)
R <- lapply(var.df,diag)
R_1n <- matrix()
for (i in 1:r){
R.row <- R[[i]]
for (j in i:r){
if(i!=j)
R.row <- cbind(R.row, R[[sum((r-i):r) + j - r]])
}
if(i == 1 ){
R_1n <- R.row
} else {
tmp <- matrix(rep(0,n*n*(i-1)), n, n*(i-1))
R.row <- cbind(tmp, R.row)
R_1n <- rbind(R_1n, R.row)
}
}
for(i in 1 : (r*n)){
for(j in i : (r*n)){
if(R_1n[j,i] != R_1n[i,j]){
R_1n[j,i] <- R_1n[i,j]
}
}
}
return(R_1n)
}
| /R/df2matR.R | no_license | cran/msae | R | false | false | 1,044 | r | #' @title Transform Dataframe to Matrix R
#' @description This function transforms dataframe contains sampling variance to block diagonal matrix R
#' @param var.df dataframe of sampling variances of direct estimators.
#' @param r number of variables
#' @return Block diagonal matrix R
#' @examples NULL
#' @export df2matR
df2matR <- function(var.df , r){
if(dim(var.df)[2] != sum(1:r))
stop("Length of vardir is not appropiate with data")
var.df <- as.data.frame(var.df)
n <- nrow(var.df)
R <- lapply(var.df,diag)
R_1n <- matrix()
for (i in 1:r){
R.row <- R[[i]]
for (j in i:r){
if(i!=j)
R.row <- cbind(R.row, R[[sum((r-i):r) + j - r]])
}
if(i == 1 ){
R_1n <- R.row
} else {
tmp <- matrix(rep(0,n*n*(i-1)), n, n*(i-1))
R.row <- cbind(tmp, R.row)
R_1n <- rbind(R_1n, R.row)
}
}
for(i in 1 : (r*n)){
for(j in i : (r*n)){
if(R_1n[j,i] != R_1n[i,j]){
R_1n[j,i] <- R_1n[i,j]
}
}
}
return(R_1n)
}
|
#' predict model by choosing the cross validated hqreg model
#' @param object cross validated hqreg models
#' @param x new data
#' @export
predict.cv.hqreg <- function(object, X, lambda = c("lambda.1se","lambda.min"), type = c("response","coefficients","nvars"), ...) {
type = match.arg(type)
if (is.character(lambda)) {
lambda = match.arg(lambda)
lambda = object[[lambda]]
} else if(!is.numeric(lambda)) stop("Invalid lambda")
predict(object$fit, X, lambda = lambda, type = type, ...)
}
#' coefficient of cross validated models
#' @param object cross validated hqreg model
#' @param lambda choose "lambda.1se" or "lambda.min"
#' @export
coef.cv.hqreg <- function(object, lambda = c("lambda.1se","lambda.min"), ...) {
if (is.character(lambda)) {
lambda = match.arg(lambda)
lambda = object[[lambda]]
} else if(!is.numeric(lambda)) stop("Invalid lambda")
coef(object$fit, lambda = lambda, ...)
}
| /R/predict.cv.hqreg.R | no_license | jsliu/hqreg | R | false | false | 925 | r | #' predict model by choosing the cross validated hqreg model
#' @param object cross validated hqreg models
#' @param x new data
#' @export
predict.cv.hqreg <- function(object, X, lambda = c("lambda.1se","lambda.min"), type = c("response","coefficients","nvars"), ...) {
type = match.arg(type)
if (is.character(lambda)) {
lambda = match.arg(lambda)
lambda = object[[lambda]]
} else if(!is.numeric(lambda)) stop("Invalid lambda")
predict(object$fit, X, lambda = lambda, type = type, ...)
}
#' coefficient of cross validated models
#' @param object cross validated hqreg model
#' @param lambda choose "lambda.1se" or "lambda.min"
#' @export
coef.cv.hqreg <- function(object, lambda = c("lambda.1se","lambda.min"), ...) {
if (is.character(lambda)) {
lambda = match.arg(lambda)
lambda = object[[lambda]]
} else if(!is.numeric(lambda)) stop("Invalid lambda")
coef(object$fit, lambda = lambda, ...)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trunc_inf.R
\name{intForZAll}
\alias{intForZAll}
\title{Compute the truncation set for \eqn{Z} statistic}
\usage{
intForZAll(method = c("cook", "dffits", "lasso"), constraint, v, z, sigma,
outlier.det)
}
\arguments{
\item{method, }{the outlier detection method, must be one of "cook", "dffits", "lasso".}
\item{constraint, }{the constraint in the response.}
\item{v, }{the contrast vector \eqn{\nu}.}
\item{z, }{\eqn{P_\nu^\perp y}.}
\item{sigma, }{the noise level \eqn{\sigma}.}
\item{outlier.det, }{indexes of detected outliers.}
}
\value{
This function returns an "Intervals" object.
}
\description{
This function computes the truncation set for the \eqn{Z} statistc.
}
\details{
Consider the \eqn{Z}-statistic \deqn{Z = \nu^T y / (\sigma ||\nu||_2).}
This function translates the constraints in the response into the truncation
set (which is a union of intervals) for the \eqn{Z} statistic.
}
\keyword{internal}
| /man/intForZAll.Rd | no_license | shuxiaoc/outference | R | false | true | 1,009 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trunc_inf.R
\name{intForZAll}
\alias{intForZAll}
\title{Compute the truncation set for \eqn{Z} statistic}
\usage{
intForZAll(method = c("cook", "dffits", "lasso"), constraint, v, z, sigma,
outlier.det)
}
\arguments{
\item{method, }{the outlier detection method, must be one of "cook", "dffits", "lasso".}
\item{constraint, }{the constraint in the response.}
\item{v, }{the contrast vector \eqn{\nu}.}
\item{z, }{\eqn{P_\nu^\perp y}.}
\item{sigma, }{the noise level \eqn{\sigma}.}
\item{outlier.det, }{indexes of detected outliers.}
}
\value{
This function returns an "Intervals" object.
}
\description{
This function computes the truncation set for the \eqn{Z} statistc.
}
\details{
Consider the \eqn{Z}-statistic \deqn{Z = \nu^T y / (\sigma ||\nu||_2).}
This function translates the constraints in the response into the truncation
set (which is a union of intervals) for the \eqn{Z} statistic.
}
\keyword{internal}
|
################################################################################
.onLoad <- function(libname, pkgname) {
pkg.opt <- list(
bigstatsr.cluster.type = "PSOCK",
bigstatsr.check.args = TRUE,
bigstatsr.block.sizeGB = 1,
bigstatsr.downcast.warning = TRUE,
FBM.dir = tempdir()
)
toset <- !(names(pkg.opt) %in% names(.Options))
if (any(toset)) options(pkg.opt[toset])
}
################################################################################
| /R/zzz.R | no_license | cran/bigstatsr | R | false | false | 539 | r | ################################################################################
.onLoad <- function(libname, pkgname) {
pkg.opt <- list(
bigstatsr.cluster.type = "PSOCK",
bigstatsr.check.args = TRUE,
bigstatsr.block.sizeGB = 1,
bigstatsr.downcast.warning = TRUE,
FBM.dir = tempdir()
)
toset <- !(names(pkg.opt) %in% names(.Options))
if (any(toset)) options(pkg.opt[toset])
}
################################################################################
|
#' @title Order IJDATA spot sequences and growth lines
#' @description Reorders spot sequences and growth lines within object of class \code{\link[=read.ijdata]{IJDATA}}.
#' @details Reorders IJDATA \code{spot.x} and \code{spot.y} and/or \code{gbs.x} and \code{gbs.y} coordinate data.frames. Useful when order of ROIs does not correspond with the desired order of \code{\link{convert.ijdata}} or spot.dist output. Can also be used to print the order of spot sequences and growth lines within IJDATA object (see 'print.order'). In addition the function can also be used to drop spot sequences or growth lines from the data set by leaving out ROI names. In this case a warning is produced to confirm that the user has not forgotten something.
#' @param IJDATA an \code{\link[=read.ijdata]{IJDATA}} object.
#' @param spots a character or numeric vector specifying the desired order of sample spot sequences.
#' @param gbs a character or numeric vector specifying the desired order of growth lines.
#' @param print.order logical. Should the current order of spot sequences and growth lines be printed instead of changing the order?
#' @author Mikko Vihtakari
#' @seealso \code{\link{read.ijdata}} for reading zip files containing ImageJ ROIs.
#'
#' \code{\link{convert.ijdata}} for converting the coordinate information to \link[spatstat]{spatstat} point patterns.
#'
#' @examples data(shellspots)
#' order.ijdata(shellspots, print.order = TRUE) # Prints the current order. Does not change anything
#' dat <- order.ijdata(shellspots, gbs = c(1,3,6:14,4,5,2)) # Changes order of growth bands
#' order.ijdata(dat, print.order = TRUE)
#'
#' ## Subset the first sample spot sequence
#' dat2 <- order.ijdata(shellspots, gbs = 1:13)
#' ## Warning message:
#' ## In order.ijdata(shellspots, gbs = 1:13) :
#' ## Length of gbs does not correspond the number of columns. Data removed.
#' order.ijdata(dat2, print.order = TRUE)
#' @export
order.ijdata <- function(IJDATA, spots = "", gbs = "", print.order = FALSE){
## Debugging parameters, remove when ready
#IJDATA <- dat; spots = NULL; spots = c("Laser", "SIMS1", "SIMS2", "SIMS3"); gbs = ""; print.order = T
## Print order
if(print.order) order.list <- list(spots = coln(IJDATA$spots.x), gbs = coln(IJDATA$gbs.x))
spots.ncol <- ncol(IJDATA$spots.x)
gbs.ncol <- ncol(IJDATA$gbs.x)
### Spots
empty <- all(length(spots) == 1 & spots == "")
## If spots is empty, do not change the order
if(empty) {
} else {
## If spots is a character vector, order by the character vector
if(class(spots) == "character") {
IJDATA$spots.x <- IJDATA$spots.x[spots]
IJDATA$spots.y <- IJDATA$spots.y[spots]
}
## If spots is numeric, order by the numeric values
if(class(spots) == "integer" | class(spots) == "numeric") {
IJDATA$spots.x <- IJDATA$spots.x[spots]
IJDATA$spots.y <- IJDATA$spots.y[spots]
}}
if(all(!empty, length(spots) != spots.ncol)) warning("Length of spots does not correspond the number of columns. Data removed.")
### Gbs
empty <- all(length(gbs) == 1 & gbs == "")
## If gbs is empty, do not change the order
if(empty) {
} else {
## If gbs is a character vector, order by the character vector
if(class(gbs) == "character") {
IJDATA$gbs.x <- IJDATA$gbs.x[gbs]
IJDATA$gbs.y <- IJDATA$gbs.y[gbs]
}
## If gbs is numeric, order by the numeric values
if(class(gbs) == "integer" | class(gbs) == "numeric") {
IJDATA$gbs.x <- IJDATA$gbs.x[gbs]
IJDATA$gbs.y <- IJDATA$gbs.y[gbs]
}}
if(all(!empty, length(gbs) != gbs.ncol)) warning("Length of gbs does not correspond the number of columns. Data removed.")
if(print.order) return(order.list)
if(!print.order) return(IJDATA)}
| /R/order.ijdata.R | no_license | MikkoVihtakari/sclero | R | false | false | 3,640 | r | #' @title Order IJDATA spot sequences and growth lines
#' @description Reorders spot sequences and growth lines within object of class \code{\link[=read.ijdata]{IJDATA}}.
#' @details Reorders IJDATA \code{spot.x} and \code{spot.y} and/or \code{gbs.x} and \code{gbs.y} coordinate data.frames. Useful when order of ROIs does not correspond with the desired order of \code{\link{convert.ijdata}} or spot.dist output. Can also be used to print the order of spot sequences and growth lines within IJDATA object (see 'print.order'). In addition the function can also be used to drop spot sequences or growth lines from the data set by leaving out ROI names. In this case a warning is produced to confirm that the user has not forgotten something.
#' @param IJDATA an \code{\link[=read.ijdata]{IJDATA}} object.
#' @param spots a character or numeric vector specifying the desired order of sample spot sequences.
#' @param gbs a character or numeric vector specifying the desired order of growth lines.
#' @param print.order logical. Should the current order of spot sequences and growth lines be printed instead of changing the order?
#' @author Mikko Vihtakari
#' @seealso \code{\link{read.ijdata}} for reading zip files containing ImageJ ROIs.
#'
#' \code{\link{convert.ijdata}} for converting the coordinate information to \link[spatstat]{spatstat} point patterns.
#'
#' @examples data(shellspots)
#' order.ijdata(shellspots, print.order = TRUE) # Prints the current order. Does not change anything
#' dat <- order.ijdata(shellspots, gbs = c(1,3,6:14,4,5,2)) # Changes order of growth bands
#' order.ijdata(dat, print.order = TRUE)
#'
#' ## Subset the first sample spot sequence
#' dat2 <- order.ijdata(shellspots, gbs = 1:13)
#' ## Warning message:
#' ## In order.ijdata(shellspots, gbs = 1:13) :
#' ## Length of gbs does not correspond the number of columns. Data removed.
#' order.ijdata(dat2, print.order = TRUE)
#' @export
order.ijdata <- function(IJDATA, spots = "", gbs = "", print.order = FALSE){
## Debugging parameters, remove when ready
#IJDATA <- dat; spots = NULL; spots = c("Laser", "SIMS1", "SIMS2", "SIMS3"); gbs = ""; print.order = T
## Print order
if(print.order) order.list <- list(spots = coln(IJDATA$spots.x), gbs = coln(IJDATA$gbs.x))
spots.ncol <- ncol(IJDATA$spots.x)
gbs.ncol <- ncol(IJDATA$gbs.x)
### Spots
empty <- all(length(spots) == 1 & spots == "")
## If spots is empty, do not change the order
if(empty) {
} else {
## If spots is a character vector, order by the character vector
if(class(spots) == "character") {
IJDATA$spots.x <- IJDATA$spots.x[spots]
IJDATA$spots.y <- IJDATA$spots.y[spots]
}
## If spots is numeric, order by the numeric values
if(class(spots) == "integer" | class(spots) == "numeric") {
IJDATA$spots.x <- IJDATA$spots.x[spots]
IJDATA$spots.y <- IJDATA$spots.y[spots]
}}
if(all(!empty, length(spots) != spots.ncol)) warning("Length of spots does not correspond the number of columns. Data removed.")
### Gbs
empty <- all(length(gbs) == 1 & gbs == "")
## If gbs is empty, do not change the order
if(empty) {
} else {
## If gbs is a character vector, order by the character vector
if(class(gbs) == "character") {
IJDATA$gbs.x <- IJDATA$gbs.x[gbs]
IJDATA$gbs.y <- IJDATA$gbs.y[gbs]
}
## If gbs is numeric, order by the numeric values
if(class(gbs) == "integer" | class(gbs) == "numeric") {
IJDATA$gbs.x <- IJDATA$gbs.x[gbs]
IJDATA$gbs.y <- IJDATA$gbs.y[gbs]
}}
if(all(!empty, length(gbs) != gbs.ncol)) warning("Length of gbs does not correspond the number of columns. Data removed.")
if(print.order) return(order.list)
if(!print.order) return(IJDATA)}
|
test.data.table = function(verbose=FALSE, pkg="pkg") {
if (exists("test.data.table",.GlobalEnv,inherits=FALSE)) {
# package developer
if ("package:data.table" %in% search()) stop("data.table package loaded")
if (.Platform$OS.type == "unix" && Sys.info()['sysname'] != "Darwin")
d = path.expand("~/R/gitdatatable/pkg/inst/tests")
else {
if (!pkg %in% dir()) stop(paste(pkg, " not in dir()", sep=""))
d = paste(getwd(),"/", pkg, "/inst/tests",sep="")
}
} else {
# user
d = paste(getNamespaceInfo("data.table","path"),"/tests",sep="")
}
# for (fn in dir(d,"*.[rR]$",full=TRUE)) { # testthat runs those
oldenc = options(encoding="UTF-8")[[1L]] # just for tests 708-712 on Windows
# TO DO: reinstate solution for C locale of CRAN's Mac (R-Forge's Mac is ok)
# oldlocale = Sys.getlocale("LC_CTYPE")
# Sys.setlocale("LC_CTYPE", "") # just for CRAN's Mac to get it off C locale (post to r-devel on 16 Jul 2012)
olddir = setwd(d)
on.exit(setwd(olddir))
for (fn in file.path(d, 'tests.Rraw')) { # not testthat
cat("Running",fn,"\n")
oldverbose = getOption("datatable.verbose")
if (verbose) options(datatable.verbose=TRUE)
sys.source(fn,envir=new.env(parent=.GlobalEnv))
options(data.table.verbose=oldverbose)
# As from v1.7.2, testthat doesn't run the tests.Rraw (hence file name change to .Rraw).
# There were environment issues with system.time() (when run by test_package) that only
# showed up when CRAN maintainers tested on 64bit. Matt spent a long time including
# testing on 64bit in Amazon EC2. Solution was simply to not run the tests.R from
# testthat, which probably makes sense anyway to speed it up a bit (was running twice
# before).
}
options(encoding=oldenc)
# Sys.setlocale("LC_CTYPE", oldlocale)
invisible()
}
## Tests that two data.tables (`target` and `current`) are equivalent.
## This method is used primarily to make life easy with a testing harness
## built around test_that. A call to test_that::{expect_equal|equal} will
## ultimately dispatch to this method when making an "equality" call.
all.equal.data.table <- function(target, current, trim.levels=TRUE, ...) {
target = copy(target)
current = copy(current)
if (trim.levels) {
## drop unused levels
if (length(target)) {
for (i in which(sapply(target, is.factor))) {
.xi = factor(target[[i]])
target[,(i):=.xi]
}
}
if (length(current)) {
for (i in which(sapply(current, is.factor))) {
.xi = factor(current[[i]])
current[,(i):=.xi]
}
}
}
## Trim any extra row.names attributes that came from some inheritence
setattr(target, "row.names", NULL)
setattr(current, "row.names", NULL)
# all.equal uses unclass which doesn't know about external pointers; there
# doesn't seem to be all.equal.externalptr method in base.
setattr(target, ".internal.selfref", NULL)
setattr(current, ".internal.selfref", NULL)
all.equal.list(target, current, ...)
}
| /R/test.data.table.R | no_license | stefanfritsch/data.table | R | false | false | 3,274 | r |
test.data.table = function(verbose=FALSE, pkg="pkg") {
if (exists("test.data.table",.GlobalEnv,inherits=FALSE)) {
# package developer
if ("package:data.table" %in% search()) stop("data.table package loaded")
if (.Platform$OS.type == "unix" && Sys.info()['sysname'] != "Darwin")
d = path.expand("~/R/gitdatatable/pkg/inst/tests")
else {
if (!pkg %in% dir()) stop(paste(pkg, " not in dir()", sep=""))
d = paste(getwd(),"/", pkg, "/inst/tests",sep="")
}
} else {
# user
d = paste(getNamespaceInfo("data.table","path"),"/tests",sep="")
}
# for (fn in dir(d,"*.[rR]$",full=TRUE)) { # testthat runs those
oldenc = options(encoding="UTF-8")[[1L]] # just for tests 708-712 on Windows
# TO DO: reinstate solution for C locale of CRAN's Mac (R-Forge's Mac is ok)
# oldlocale = Sys.getlocale("LC_CTYPE")
# Sys.setlocale("LC_CTYPE", "") # just for CRAN's Mac to get it off C locale (post to r-devel on 16 Jul 2012)
olddir = setwd(d)
on.exit(setwd(olddir))
for (fn in file.path(d, 'tests.Rraw')) { # not testthat
cat("Running",fn,"\n")
oldverbose = getOption("datatable.verbose")
if (verbose) options(datatable.verbose=TRUE)
sys.source(fn,envir=new.env(parent=.GlobalEnv))
options(data.table.verbose=oldverbose)
# As from v1.7.2, testthat doesn't run the tests.Rraw (hence file name change to .Rraw).
# There were environment issues with system.time() (when run by test_package) that only
# showed up when CRAN maintainers tested on 64bit. Matt spent a long time including
# testing on 64bit in Amazon EC2. Solution was simply to not run the tests.R from
# testthat, which probably makes sense anyway to speed it up a bit (was running twice
# before).
}
options(encoding=oldenc)
# Sys.setlocale("LC_CTYPE", oldlocale)
invisible()
}
## Tests that two data.tables (`target` and `current`) are equivalent.
## This method is used primarily to make life easy with a testing harness
## built around test_that. A call to test_that::{expect_equal|equal} will
## ultimately dispatch to this method when making an "equality" call.
all.equal.data.table <- function(target, current, trim.levels=TRUE, ...) {
target = copy(target)
current = copy(current)
if (trim.levels) {
## drop unused levels
if (length(target)) {
for (i in which(sapply(target, is.factor))) {
.xi = factor(target[[i]])
target[,(i):=.xi]
}
}
if (length(current)) {
for (i in which(sapply(current, is.factor))) {
.xi = factor(current[[i]])
current[,(i):=.xi]
}
}
}
## Trim any extra row.names attributes that came from some inheritence
setattr(target, "row.names", NULL)
setattr(current, "row.names", NULL)
# all.equal uses unclass which doesn't know about external pointers; there
# doesn't seem to be all.equal.externalptr method in base.
setattr(target, ".internal.selfref", NULL)
setattr(current, ".internal.selfref", NULL)
all.equal.list(target, current, ...)
}
|
\name{estimateDE}
\alias{estimateDE}
\title{Estimate degrees of differential expression (DE) for individual genes}
\usage{
estimateDE(tcc, test.method, FDR, paired,
full, reduced, # for DESeq2
design, contrast, # for edgeR, DESeq2, voom
coef, # for edgeR, voom
group, cl, # for baySeq
samplesize, # for baySeq, SAMseq
logged, floor, # for WAD
...
)
}
\description{
This function calculates \eqn{p}-values (or the related statistics) for
identifying differentially expressed genes (DEGs) from a
\link{TCC-class} object.
\code{estimateDE} internally calls a specified method
implemented in other R packages.
}
\arguments{
\item{tcc}{\link{TCC-class} object.}
\item{test.method}{character string specifying a method for identifying
DEGs: one of \code{"edger"}, \code{"deseq2"},
\code{"bayseq"}, \code{"voom"}, and \code{"wad"}.
See the "Details" field for detail.
The default is \code{"edger"}.}
\item{FDR}{numeric value (between 0 and 1) specifying the threshold
for determining DEGs.}
\item{paired}{logical. If \code{TRUE}, the input data are regarded as
(two-group) paired samples. If \code{FALSE}, the input data are
regarded as unpaired samples. The default is \code{FALSE}.}
\item{full}{a formula for creating full model described in
DESeq2. The right hand side can involve any column of \code{tcc$group}
is used as the model frame.
See the \code{\link[DESeq2]{nbinomLRT}} function in DESeq2 for details.}
\item{reduced}{a formula for creating reduced model described in DESeq2.
The right hand side can involve any column of \code{tcc$group} is
used as the model frame.
See the \code{\link[DESeq2]{nbinomLRT}} function in DESeq2 for details.}
\item{design}{the argument is used in edgeR, voom (limma) and DESeq2.
For edgeR and voom, it should be the numeric matrix giving the
design matrix for the generalized linear model.
See the \code{\link[edgeR]{glmFit}} function in edgeR or
the \code{\link[limma]{lmFit}} function in limma for details.
For DESeq2, it should be a formula specifying the design of the
experiment. See the \code{\link[DESeq2]{DESeqDataSet}} function
in DESeq2 for details.}
\item{contrast}{the argument is used in edgeR and DESeq2.
For edgeR, numeric vector specifying a contrast of the linear model
coefficients to be tested equal to zero.
See the \code{\link[edgeR]{glmLRT}} function in edgeR for details.
For DESeq2, the argument is same to \code{contrast} which used in
DESeq2 package to retrive the results from Wald test. See the
\code{\link[DESeq2]{results}} function in DESeq2 for details.}
\item{coef}{integer or character vector indicating which coefficients
of the linear model are to be tested equal to zero.
See the \code{\link[edgeR]{glmLRT}} function in edgeR for details.}
\item{group}{numeric or character string identifying the columns in
the \code{tcc$group} for analysis. See the \code{group} argument
of \code{\link[baySeq]{topCounts}} function in baySeq for details.}
\item{cl}{\code{snow} object when using multi processors if
\code{test.method = "bayseq"} is specified.
See the \code{\link[baySeq]{getPriors.NB}} function in baySeq
for details.}
\item{samplesize}{integer specifying the sample size for estimating the
prior parameters if \code{test.method = "bayseq"} (defaults to 10000).}
\item{logged}{logical. If \code{TRUE}, the input data are regarded as
log2-transformed. If \code{FALSE}, the log2-transformation is
performed after the floor setting. The default is
\code{logged = FALSE}.
Ignored if \code{test.method} is not \code{"wad"}.}
\item{floor}{numeric scalar (> 0) specifying the floor value for
taking logarithm. The default is \code{floor = 1}, indicating that
values less than 1 are replaced by 1. Ignored if
\code{logged = TRUE}.
Ignored if \code{test.method} is not \code{"wad"}.}
\item{...}{further paramenters.}
}
\details{
\code{estimaetDE} function is generally used after performing the
\code{\link{calcNormFactors}} function that calculates normalization factors.
\code{estimateDE} constructs a statistical model for differential expression
(DE) analysis with the calculated normalization factors and returns the
\eqn{p}-values (or the derivatives). The individual functions in other
packages are internally called according to the specified
\code{test.method} parameter.
\itemize{
\item \code{test.method = "edger"}\cr
There are two approaches (i.e., exact test and GLM) to identify DEGs
in edgeR. The two approches are implmented in TCC. As a default,
the exact test approach is used for two-group data,
and GLM approach is used for multi-group or multi-factor data.
However, if \code{design} and the one of \code{coef} or
\code{contrast} are given, the GLM approach will be used for
two-group data. \cr
If the exact test approach is used,
\code{\link[edgeR]{estimateCommonDisp}},
\code{\link[edgeR]{estimateTagwiseDisp}}, and
\code{\link[edgeR]{exactTest}} are internally called.\cr
If the GLM approach is used,
\code{\link[edgeR]{estimateGLMCommonDisp}},
\code{\link[edgeR]{estimateGLMTrendedDisp}},\cr
\code{\link[edgeR]{estimateGLMTagwiseDisp}},
\code{\link[edgeR]{glmFit}}, and
\code{\link[edgeR]{glmLRT}}
are internally called.
\item \code{test.method = "deseq2"}\cr
\code{\link[DESeq2]{estimateDispersions}}, and
\code{\link[DESeq2]{nbinomWaldTest}} are internally called for
identifying DEGs.
However, if \code{full} and \code{reduced} are given,
the \code{\link[DESeq2]{nbinomLRT}} will be used.
\item \code{test.method = "bayseq"}\cr
\code{\link[baySeq]{getPriors.NB}} and
\code{\link[baySeq]{getLikelihoods}} in baySeq are internally
called for identifying DEGs.
If \code{paired = TRUE},
\code{\link[baySeq]{getPriors}} and
\code{\link[baySeq]{getLikelihoods}} in baySeq are used.
\item \code{test.method = "voom"}\cr
\code{\link[limma]{voom}}, \code{\link[limma]{lmFit}}, and
\code{\link[limma]{eBayes}} in limma are internally called
for identifying DEGs.
\item \code{test.method = "wad"}\cr
The \code{\link{WAD}} implemented in TCC is used for identifying
DEGs. Since \code{\link{WAD}} outputs test statistics instead of
\eqn{p}-values, the \code{tcc$stat$p.value} and
\code{tcc$stat$q.value} are \code{NA}.
Alternatively, the test statistics are stored in
\code{tcc$stat$testStat} field.
}
}
\value{
A \code{\link{TCC-class}} object containing following fields:
\item{stat$p.value}{numeric vector of \eqn{p}-values.}
\item{stat$q.value}{numeric vector of \eqn{q}-values calculated
based on the \eqn{p}-values using the \code{p.adjust} function
with default parameter settings.}
\item{stat$testStat}{numeric vector of test statistics if
\code{"wad"} is specified.}
\item{stat$rank}{gene rank in order of the \eqn{p}-values or
test statistics.}
\item{estimatedDEG}{numeric vector consisting of 0 or 1
depending on whether each gene is classified
as non-DEG or DEG. The threshold for classifying
DEGs or non-DEGs is preliminarily given as the
\code{FDR} argument.}
}
\examples{
# Analyzing a simulation data for comparing two groups
# (G1 vs. G2) with biological replicates
# The DE analysis is performed by an exact test in edgeR coupled
# with the DEGES/edgeR normalization factors.
# For retrieving the summaries of DE results, we recommend to use
# the getResult function.
data(hypoData)
group <- c(1, 1, 1, 2, 2, 2)
tcc <- new("TCC", hypoData, group)
tcc <- calcNormFactors(tcc, norm.method = "tmm", test.method = "edger",
iteration = 1, FDR = 0.1, floorPDEG = 0.05)
tcc <- estimateDE(tcc, test.method = "edger", FDR = 0.1)
head(tcc$stat$p.value)
head(tcc$stat$q.value)
head(tcc$estimatedDEG)
result <- getResult(tcc)
}
\keyword{methods}
| /man/estimateDE.Rd | no_license | swsoyee/TCC | R | false | false | 8,819 | rd | \name{estimateDE}
\alias{estimateDE}
\title{Estimate degrees of differential expression (DE) for individual genes}
\usage{
estimateDE(tcc, test.method, FDR, paired,
full, reduced, # for DESeq2
design, contrast, # for edgeR, DESeq2, voom
coef, # for edgeR, voom
group, cl, # for baySeq
samplesize, # for baySeq, SAMseq
logged, floor, # for WAD
...
)
}
\description{
This function calculates \eqn{p}-values (or the related statistics) for
identifying differentially expressed genes (DEGs) from a
\link{TCC-class} object.
\code{estimateDE} internally calls a specified method
implemented in other R packages.
}
\arguments{
\item{tcc}{\link{TCC-class} object.}
\item{test.method}{character string specifying a method for identifying
DEGs: one of \code{"edger"}, \code{"deseq2"},
\code{"bayseq"}, \code{"voom"}, and \code{"wad"}.
See the "Details" field for detail.
The default is \code{"edger"}.}
\item{FDR}{numeric value (between 0 and 1) specifying the threshold
for determining DEGs.}
\item{paired}{logical. If \code{TRUE}, the input data are regarded as
(two-group) paired samples. If \code{FALSE}, the input data are
regarded as unpaired samples. The default is \code{FALSE}.}
\item{full}{a formula for creating full model described in
DESeq2. The right hand side can involve any column of \code{tcc$group}
is used as the model frame.
See the \code{\link[DESeq2]{nbinomLRT}} function in DESeq2 for details.}
\item{reduced}{a formula for creating reduced model described in DESeq2.
The right hand side can involve any column of \code{tcc$group} is
used as the model frame.
See the \code{\link[DESeq2]{nbinomLRT}} function in DESeq2 for details.}
\item{design}{the argument is used in edgeR, voom (limma) and DESeq2.
For edgeR and voom, it should be the numeric matrix giving the
design matrix for the generalized linear model.
See the \code{\link[edgeR]{glmFit}} function in edgeR or
the \code{\link[limma]{lmFit}} function in limma for details.
For DESeq2, it should be a formula specifying the design of the
experiment. See the \code{\link[DESeq2]{DESeqDataSet}} function
in DESeq2 for details.}
\item{contrast}{the argument is used in edgeR and DESeq2.
For edgeR, numeric vector specifying a contrast of the linear model
coefficients to be tested equal to zero.
See the \code{\link[edgeR]{glmLRT}} function in edgeR for details.
For DESeq2, the argument is same to \code{contrast} which used in
DESeq2 package to retrive the results from Wald test. See the
\code{\link[DESeq2]{results}} function in DESeq2 for details.}
\item{coef}{integer or character vector indicating which coefficients
of the linear model are to be tested equal to zero.
See the \code{\link[edgeR]{glmLRT}} function in edgeR for details.}
\item{group}{numeric or character string identifying the columns in
the \code{tcc$group} for analysis. See the \code{group} argument
of \code{\link[baySeq]{topCounts}} function in baySeq for details.}
\item{cl}{\code{snow} object when using multi processors if
\code{test.method = "bayseq"} is specified.
See the \code{\link[baySeq]{getPriors.NB}} function in baySeq
for details.}
\item{samplesize}{integer specifying the sample size for estimating the
prior parameters if \code{test.method = "bayseq"} (defaults to 10000).}
\item{logged}{logical. If \code{TRUE}, the input data are regarded as
log2-transformed. If \code{FALSE}, the log2-transformation is
performed after the floor setting. The default is
\code{logged = FALSE}.
Ignored if \code{test.method} is not \code{"wad"}.}
\item{floor}{numeric scalar (> 0) specifying the floor value for
taking logarithm. The default is \code{floor = 1}, indicating that
values less than 1 are replaced by 1. Ignored if
\code{logged = TRUE}.
Ignored if \code{test.method} is not \code{"wad"}.}
\item{...}{further paramenters.}
}
\details{
\code{estimaetDE} function is generally used after performing the
\code{\link{calcNormFactors}} function that calculates normalization factors.
\code{estimateDE} constructs a statistical model for differential expression
(DE) analysis with the calculated normalization factors and returns the
\eqn{p}-values (or the derivatives). The individual functions in other
packages are internally called according to the specified
\code{test.method} parameter.
\itemize{
\item \code{test.method = "edger"}\cr
There are two approaches (i.e., exact test and GLM) to identify DEGs
in edgeR. The two approches are implmented in TCC. As a default,
the exact test approach is used for two-group data,
and GLM approach is used for multi-group or multi-factor data.
However, if \code{design} and the one of \code{coef} or
\code{contrast} are given, the GLM approach will be used for
two-group data. \cr
If the exact test approach is used,
\code{\link[edgeR]{estimateCommonDisp}},
\code{\link[edgeR]{estimateTagwiseDisp}}, and
\code{\link[edgeR]{exactTest}} are internally called.\cr
If the GLM approach is used,
\code{\link[edgeR]{estimateGLMCommonDisp}},
\code{\link[edgeR]{estimateGLMTrendedDisp}},\cr
\code{\link[edgeR]{estimateGLMTagwiseDisp}},
\code{\link[edgeR]{glmFit}}, and
\code{\link[edgeR]{glmLRT}}
are internally called.
\item \code{test.method = "deseq2"}\cr
\code{\link[DESeq2]{estimateDispersions}}, and
\code{\link[DESeq2]{nbinomWaldTest}} are internally called for
identifying DEGs.
However, if \code{full} and \code{reduced} are given,
the \code{\link[DESeq2]{nbinomLRT}} will be used.
\item \code{test.method = "bayseq"}\cr
\code{\link[baySeq]{getPriors.NB}} and
\code{\link[baySeq]{getLikelihoods}} in baySeq are internally
called for identifying DEGs.
If \code{paired = TRUE},
\code{\link[baySeq]{getPriors}} and
\code{\link[baySeq]{getLikelihoods}} in baySeq are used.
\item \code{test.method = "voom"}\cr
\code{\link[limma]{voom}}, \code{\link[limma]{lmFit}}, and
\code{\link[limma]{eBayes}} in limma are internally called
for identifying DEGs.
\item \code{test.method = "wad"}\cr
The \code{\link{WAD}} implemented in TCC is used for identifying
DEGs. Since \code{\link{WAD}} outputs test statistics instead of
\eqn{p}-values, the \code{tcc$stat$p.value} and
\code{tcc$stat$q.value} are \code{NA}.
Alternatively, the test statistics are stored in
\code{tcc$stat$testStat} field.
}
}
\value{
A \code{\link{TCC-class}} object containing following fields:
\item{stat$p.value}{numeric vector of \eqn{p}-values.}
\item{stat$q.value}{numeric vector of \eqn{q}-values calculated
based on the \eqn{p}-values using the \code{p.adjust} function
with default parameter settings.}
\item{stat$testStat}{numeric vector of test statistics if
\code{"wad"} is specified.}
\item{stat$rank}{gene rank in order of the \eqn{p}-values or
test statistics.}
\item{estimatedDEG}{numeric vector consisting of 0 or 1
depending on whether each gene is classified
as non-DEG or DEG. The threshold for classifying
DEGs or non-DEGs is preliminarily given as the
\code{FDR} argument.}
}
\examples{
# Analyzing a simulation data for comparing two groups
# (G1 vs. G2) with biological replicates
# The DE analysis is performed by an exact test in edgeR coupled
# with the DEGES/edgeR normalization factors.
# For retrieving the summaries of DE results, we recommend to use
# the getResult function.
data(hypoData)
group <- c(1, 1, 1, 2, 2, 2)
tcc <- new("TCC", hypoData, group)
tcc <- calcNormFactors(tcc, norm.method = "tmm", test.method = "edger",
iteration = 1, FDR = 0.1, floorPDEG = 0.05)
tcc <- estimateDE(tcc, test.method = "edger", FDR = 0.1)
head(tcc$stat$p.value)
head(tcc$stat$q.value)
head(tcc$estimatedDEG)
result <- getResult(tcc)
}
\keyword{methods}
|
rm(list=ls())
library(gtalibrary)
library(ggplot2)
library(openxlsx)
library(RColorBrewer)
library(dplyr)
library(splitstackshape)
gta_setwd()
chapter.number = 4
chapter.title = 'Covert jumbo protectionism is the norm'
output.path = paste(chapter.number, chapter.title, sep=' - ')
load('data/master_plus.Rdata')
source("0 report production/GTA 24/help files/GTA 24 cutoff and definitions.R")
load("data/support tables/Final goods support table.Rdata")
jumbo.threshold.1 = 10e9
jumbo.threshold.2 = 100e9
approach = 'conservative'
# "EC: GSP for certain countries' interventions as one
# 30038 state.act.id for EC: GSP for certain countries sectors revoked for 2014-2016 period
ec.revoked.gsp.ids = unique(subset(master, state.act.id == '30038')$intervention.id)
# indian export incentive 2.3 trillion
false.jumbos = c(70350, 18891, 16819, 71578, 58794, 18254, 13633, 15366, 13512, 18892)
remove.ids = c(ec.revoked.gsp.ids,false.jumbos)
# coverage by intervention computations -----------------------------------
## importing trade data
trade=subset(final, Year %in% c(2005:2007))[,c("Reporter.un","Partner.un","Year","Tariff.line","Value")]
rm(final)
names(trade)=c("i.un","a.un","year","affected.product","trade.value")
trade=aggregate(trade.value ~ i.un + a.un + affected.product, trade, sum)
trade$trade.value= trade$trade.value/3
# gtalibrary::elig.firms
## preparing GTA data
gta_data_slicer(gta.evaluation = c("red","amber"),
implementation.period = c("2008-11-01",cutoff),
keep.implementation.na = F)
# we need to implement sector-specific search in data slicer
master.sliced = subset(master.sliced, eligible.firms %in% c('all','sector-specific'))
coverage.by.intervention=unique(master.sliced[,c("intervention.id","date.implemented","currently.in.force")])
coverage.by.intervention$year.implemented=year(coverage.by.intervention$date.implemented)
coverage.by.intervention$date.implemented=NULL
coverage.by.intervention$value.usd=NA
coverage.by.intervention$found.trade=T
master.temp=subset(master.sliced, intervention.id %in% coverage.by.intervention$intervention.id)
## Generating the base file
gta_imp_exp_hs_tuples(master.path="master.temp",
master.data.frame = T)
master.tuple=merge(master.tuple, trade, by=c("i.un","a.un","affected.product"))
coverage.by.intervention$value.usd=NULL
coverage.by.intervention=merge(coverage.by.intervention, aggregate(trade.value ~ intervention.id, master.tuple, sum),by="intervention.id", all.x=T)
coverage.by.intervention$found.trade[is.na(coverage.by.intervention$trade.value)]=F
coverage.by.intervention$trade.value[is.na(coverage.by.intervention$trade.value)]=0
trade.coverage.base = subset(coverage.by.intervention, found.trade==T)
# remove ec.gsp.ids but keep highest
trade.coverage.base = subset(trade.coverage.base, trade.value==max(subset(trade.coverage.base,intervention.id %in% ec.revoked.gsp.ids)$trade.value)| !(intervention.id %in% ec.revoked.gsp.ids))
# remove false jumbos
trade.coverage.base = subset(trade.coverage.base, !(intervention.id %in% false.jumbos))
## different subsets
ids.all=unique(master.sliced$intervention.id)
ids.conservative=unique(subset(master.sliced, implementation.level %in% c("national", "supranational") &
eligible.firms %in% c("all", "sector-specific"))$intervention.id)
trade.jumbo.intervention = subset(trade.coverage.base, trade.value >= jumbo.threshold.1 & intervention.id %in% ids.conservative)
jumbo.ids = unique(trade.jumbo.intervention$intervention.id)
save(jumbo.ids, trade.jumbo.intervention, file = paste0("0 report production/GTA 24/data/", output.path,"/trade per jumbo(10bn) intervention.Rdata"))
### XLSX specs
year.range = 2008:2019
gta_colour_palette()
# PDF/CDF plots -----------------------------------------------------------------
for(approach in c("all")){
thresholds = c(0,1e7,1e8,1e9,1e10,1e11,1e12,max(trade.coverage.base$trade.value)+1)
trade.thresholds.by.year = data.frame(Lower.threshold = thresholds[-length(thresholds)], Upper.threshold = thresholds[-1])
if(approach=="all"){
ids=ids.all
cdf.file.name="All interventions - CDF of harmful intervention trade coverage"
pdf.file.name="All interventions - PDF of harmful intervention trade coverage"
table.path = paste0('0 report production/GTA 24/tables & figures/', output.path, '/',chapter.number,'.1 All interventions - Interventions by year and affected trade thresholds.xlsx')
}
if(approach=="conservative"){
ids=ids.conservative
cdf.file.name="Conservative interventions - CDF of harmful intervention trade coverage"
pdf.file.name="Conservative interventions - PDF of harmful intervention trade coverage"
table.path = paste0('0 report production/GTA 24/tables & figures/', output.path, '/',chapter.number,'.1 Conservative interventions - Interventions by year and affected trade thresholds.xlsx')
}
if(approach=="non-conservative"){
ids=setdiff(ids.all,ids.conservative)
cdf.file.name="Non-conservative interventions - CDF of harmful intervention trade coverage"
pdf.file.name="Non-conservative interventions - PDF of harmful intervention trade coverage"
table.path = paste0('0 report production/GTA 24/tables & figures/', output.path, '/',chapter.number,'.1 Non-conservative interventions - Interventions by year and affected trade thresholds.xlsx')
}
## loop data
loop.data=subset(trade.coverage.base, intervention.id %in% ids)
## ecdf
log10.cdf=ggplot(loop.data, aes(x=log10(trade.value))) +
stat_ecdf(geom = "step", position = "identity",size=1.1) + xlab('Trade value in USD') + ylab('Share of discriminatory policy intervention since November 2008') +
ggtitle('Cumulative density function of the value of trade harmed \nby harmful interventions implemented 2008-2019') +
theme(plot.title = element_text(hjust = 0.5)) +
coord_cartesian(xlim = c(5, max(log10(loop.data$trade.value)))+0.01) +
scale_x_continuous(breaks = 5:12,labels= c('100\'000','1 million','10 million','100 million','1 billion', '10 billion', '100 billion', '1 trillion'))+
gta_theme() +
scale_y_continuous(sec.axis = dup_axis())
log10.cdf
gta_plot_saver(plot=log10.cdf,
path=paste("0 report production/GTA 24/tables & figures/",output.path, sep=""),
name=paste(chapter.number, ".1 ",cdf.file.name, sep=""))
## PDF (final version)
trade.values.war.us <- trade.coverage.base[trade.coverage.base$intervention.id %in% trade.war.us,]$trade.value
trade.values.war.chn <- trade.coverage.base[trade.coverage.base$intervention.id %in% trade.war.chn,]$trade.value
trade.values.war.us = log10(trade.values.war.us)
trade.values.war.chn = log10(trade.values.war.chn)
log10.pdf = ggplot() +
geom_density(data=loop.data,aes(x=log10(trade.value)),trim=F, size=1) + xlab('Trade value in billions of USD') + ylab('Probability Density') +
ggtitle('Probability density function of the value of trade harmed \nby harmful interventions implemented 2008-2019') +
theme(plot.title = element_text(hjust = 0.5)) +
coord_cartesian(xlim = c(9, max(log10(loop.data$trade.value)))+0.01) +
scale_x_continuous(breaks = 9:12,labels=c('1','10','100', '1\'000'))+
gta_theme() +
theme(plot.title = element_text(size = 11)) +
geom_vline(aes(xintercept = trade.values.war.chn, color = gta_colour$qualitative[3]), size=0.7,linetype='twodash', show.legend = TRUE) +
geom_vline(aes(xintercept = trade.values.war.us, color = gta_colour$qualitative[6]), size=0.7,linetype='twodash', show.legend = TRUE) +
scale_color_manual(name='',values=gta_colour$qualitative[c(7,2)],labels=c('China harmful interventions \nimplemented in 2018 trade war with the US','US harmful interventions \nimplemented in 2018 trade war with China')) +
scale_y_continuous(sec.axis = dup_axis())
log10.pdf
gta_plot_saver(plot=log10.pdf,
path=paste("0 report production/GTA 24/tables & figures/",output.path, sep=""),
name=paste(chapter.number,".3 ",pdf.file.name, sep=""))
## XLSX
# an XSLX with summary stats about how many interventions affected between x1 and x2 worth of trade for several brackets eg. less than 1bn, 1-2bn, 2-3bn or so
# choose those brackets as they make sense
for (i in 1:nrow(trade.thresholds.by.year)){
for (year in 1:length(year.range)){
trade.thresholds.by.year[i,year+2] = length(which(loop.data[loop.data$year.implemented==year.range[year],]$trade.value > trade.thresholds.by.year$Lower.threshold[i] &
loop.data[loop.data$year.implemented==year.range[year],]$trade.value < trade.thresholds.by.year$Upper.threshold[i]))
names(trade.thresholds.by.year)[year+2] = paste(as.character(year.range[year]),'Number of interventions harming trade between threshold values' )
}
}
colnames(trade.thresholds.by.year)[1:2] = c('Lower Threshold', 'Upper Threshold')
# class(trade.thresholds.by.year$`Lower Threshold`) <- "scientific"
# class(trade.thresholds.by.year$`Upper Threshold`) <- "scientific"
loop.data=loop.data[order(loop.data$trade.value, decreasing=T),c('intervention.id','year.implemented','trade.value')]
xlsx::write.xlsx(trade.thresholds.by.year, file=table.path, row.names = F, sheetName = "Harmed trade" )
# xlsx::write.xlsx(trade.thresholds.by.year, file=table.path, row.names = F, sheetName = "Underlying data", append=T)
}
# Task 1 ---------------------------------------------------------------------
# SE request: For the discriminatory measures imposed over the past 10 years, a bar chart showing the frequency of measures harming 10 billion USD
# or more of trade would be produced. The trade war interventions (enumerated in chapter 2, stored in the GTA 24’s definitions file)
# would be highlighted and compared to other jumbo measures.
trade.war.state.ids = c(china.us.trade.war.act.ids, eu.sg.steel.act.ids)
trade.war.intervention.ids = c(new.actions.intervention.ids ,china.us.trade.war.intervention.ids)
gta_data_slicer()
master.sliced = subset(master.sliced, eligible.firms %in% c('all','sector-specific'))
trade.war.intervention.ids = unique(subset(master.sliced, (gta.evaluation == 'Red')&((state.act.id %in% trade.war.state.ids)|(intervention.id %in% trade.war.intervention.ids)))$intervention.id)
trade.war.us = unique(subset(master.sliced,(implementing.jurisdiction == 'United States of America')&(affected.jurisdiction == 'China')&(gta.evaluation == 'Red')&((state.act.id %in% trade.war.state.ids)|(intervention.id %in% trade.war.intervention.ids)))$intervention.id)
trade.war.chn = unique(subset(master.sliced,(implementing.jurisdiction == 'China')&(affected.jurisdiction == 'United States of America')&(gta.evaluation == 'Red')&((state.act.id %in% trade.war.state.ids)|(intervention.id %in% trade.war.intervention.ids)))$intervention.id)
## plotting
## For the discriminatory measures imposed over the past 10 years, a bar chart showing the frequency of measures harming 10 billion USD or more of trade would be produced.
## The trade war interventions (enumerated in chapter 2, stored in the GTA 24’s definitions file) would be highlighted and compared to other jumbo measures.
# for(approach in c("all", "conservative", "non-conservative")){
approach="conservative"
plot.name = "Number of interventions per year harming trade for over 10bn USD"
if(approach=="all"){
ids=ids.all
fig1.file.name= paste(chapter.number, ".2 All interventions - ",plot.name, sep="")
table.path = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig1.file.name,'.xlsx')
}
if(approach=="conservative"){
ids=ids.conservative
fig1.file.name= paste(chapter.number, ".2 Conservative interventions - ",plot.name, sep="")
table.path = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig1.file.name,'.xlsx')
}
if(approach=="non-conservative"){
ids=setdiff(ids.all,ids.conservative)
fig1.file.name= paste(chapter.number, ".2 Non-conservative interventions - ",plot.name, sep="")
table.path = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig1.file.name,'.xlsx')
}
## loop data
loop.data=subset(trade.coverage.base, intervention.id %in% ids)
threshold = jumbo.threshold.1
annual.jumbos=aggregate(intervention.id ~ year.implemented, subset(loop.data, trade.value>=threshold & !intervention.id %in% trade.war.intervention.ids), function(x) length(unique(x)))
annual.jumbos$intervention.status="2"
tw.jumbos = subset(loop.data, trade.value>=threshold & intervention.id %in% trade.war.intervention.ids)
if (nrow(tw.jumbos)>0){
tw.jumbos=aggregate(intervention.id ~ year.implemented, tw.jumbos , function(x) length(unique(x)))
tw.jumbos$intervention.status="1"
annual.jumbos=rbind(tw.jumbos,annual.jumbos)
color.values = c(gta_colour$qualitative[2:1])
} else {color.values=gta_colour$qualitative[1]}
if (approach == 'conservative'){annual.jumbos.over.200b = aggregate(intervention.id ~ year.implemented, subset(loop.data, trade.value>=200e9), function(x) length(unique(x)))}
if (approach == 'conservative'){annual.jumbos.over.500b = aggregate(intervention.id ~ year.implemented, subset(loop.data, trade.value>=500e9), function(x) length(unique(x)))}
if (approach == 'conservative'){annual.jumbos.over.200b.ids = subset(loop.data, trade.value>=200e9 & !intervention.id %in% false.jumbos)$intervention.id}
if (approach == 'conservative'){annual.jumbos.over.500b.ids = subset(loop.data, trade.value>=500e9 & !intervention.id %in% false.jumbos)$intervention.id}
fig.1 =ggplot(annual.jumbos, aes(x=year.implemented,y=intervention.id,fill=intervention.status)) + geom_col() +
scale_x_continuous(breaks=2008:2019,labels=2008:2019) + xlab('Year of implementation of the harmful intervention') +
ylab(paste('Number of jumbo protectionist measures implemented')) +
scale_fill_manual(name='',values = color.values, labels=c('Trade war interventions','Non-trade war interventions')) +
gta_theme() +
scale_y_continuous(sec.axis = dup_axis())
fig.1
gta_plot_saver(plot=fig.1,
path=paste("0 report production/GTA 24/tables & figures/",output.path, sep=""),
name=fig1.file.name)
annual.jumbos$intervention.status[annual.jumbos$intervention.status=="1"]="Trade war"
annual.jumbos$intervention.status[annual.jumbos$intervention.status=="2"]="Non-trade war"
xlsx::write.xlsx(annual.jumbos, row.names=F, file = table.path)
# }
names(annual.jumbos.over.200b) = c('year.implemented','number.of.interventions')
names(annual.jumbos.over.500b) = c('year.implemented','number.of.interventions')
annual.jumbos.over.200b = rbind(annual.jumbos.over.200b,data.frame(year.implemented = 'all',number.of.interventions = sum(annual.jumbos.over.200b$number.of.interventions)))
annual.jumbos.over.500b = rbind(annual.jumbos.over.500b,data.frame(year.implemented = 'all',number.of.interventions = sum(annual.jumbos.over.500b$number.of.interventions)))
annual.jumbos.over.200b.ids = data.frame(annual.jumbos.over.200b.ids = annual.jumbos.over.200b.ids)
annual.jumbos.over.500b.ids = data.frame(annual.jumbos.over.500b.ids = annual.jumbos.over.500b.ids)
annual.jumbos.over.500b.ids$url = paste0("https://www.globaltradealert.org/intervention/", annual.jumbos.over.500b.ids$annual.jumbos.over.500b.ids)
## NOT IN FINAL REPORT
# xlsx::write.xlsx(annual.jumbos.over.200b, row.names=F, file=paste0('0 report production/GTA 24/tables & figures/',output.path,'/archive/annual jumbos over 200b.xlsx'))
# xlsx::write.xlsx(annual.jumbos.over.200b.ids, row.names=F, file=paste0('0 report production/GTA 24/tables & figures/ppt/ids annual jumbos over 200b.xlsx'))
# xlsx::write.xlsx(annual.jumbos.over.500b, row.names=F, file=paste0('0 report production/GTA 24/tables & figures/',output.path,'/archive/annual jumbos over 500b.xlsx'))
# xlsx::write.xlsx(annual.jumbos.over.500b.ids, row.names=F, file=paste0('0 report production/GTA 24/tables & figures/ppt/ids annual jumbos over 500b.xlsx'))
# Task 2 ------------------------------------------------------------------
# SE request: Please prepare a pie chart of the types of jumbo protectionist measures by MAST category.
# SE never specified a threshold and keeps going back and forward so i just did it for both thresholds of 10b and 100b
# for(approach in c("all", "conservative", "non-conservative")){
approach="conservative"
plot.name = "jumbo threshold MAST Chapter distribution harmful interventions"
if(approach=="all"){
ids=ids.all
fig2.file.name.between.thresholds= paste(chapter.number,".4 All interventions - between ",paste0(as.character(jumbo.threshold.1/1e9), 'bn'),'-',paste0(as.character(jumbo.threshold.2/1e9), 'bn '),' ',plot.name, sep="")
fig2.file.name.over.upper.threshold= paste(chapter.number,".4 All interventions - over ", paste0(as.character(jumbo.threshold.2/1e9), 'bn '),plot.name,' ', sep="")
table.path.between.thresholds = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig2.file.name.between.thresholds,'.xlsx')
table.path.over.upper.threshold = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig2.file.name.over.upper.threshold,'.xlsx')
}
if(approach=="conservative"){
ids=ids.conservative
fig2.file.name.between.thresholds= paste(chapter.number,".4 Conservative interventions - between ",paste0(as.character(jumbo.threshold.1/1e9), 'bn'),'-',paste0(as.character(jumbo.threshold.2/1e9), 'bn '),' ',plot.name, sep="")
fig2.file.name.over.upper.threshold= paste(chapter.number,".4 Conservative interventions - over ", paste0(as.character(jumbo.threshold.2/1e9), 'bn '),plot.name,' ', sep="")
table.path.between.thresholds = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig2.file.name.between.thresholds,'.xlsx')
table.path.over.upper.threshold = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig2.file.name.over.upper.threshold,'.xlsx')
}
if(approach=="non-conservative"){
ids=setdiff(ids.all,ids.conservative)
fig2.file.name.between.thresholds= paste(chapter.number,".4 Non-conservative interventions - between ",paste0(as.character(jumbo.threshold.1/1e9), 'bn'),'-',paste0(as.character(jumbo.threshold.2/1e9), 'bn '),' ',plot.name, sep="")
fig2.file.name.over.upper.threshold= paste(chapter.number,".4 Non-conservative interventions - over ", paste0(as.character(jumbo.threshold.2/1e9), 'bn '),plot.name,' ', sep="")
table.path.between.thresholds = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig2.file.name.between.thresholds,'.xlsx')
table.path.over.upper.threshold = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig2.file.name.over.upper.threshold,'.xlsx')
}
loop.data=subset(trade.coverage.base, intervention.id %in% ids)
# SE: please do make a pie chart for the values between 10e9 and 100e9
## between 10e9 - 100e9 threshold
mast.trade.coverage.base=merge(subset(loop.data,trade.value >= jumbo.threshold.1 & trade.value < jumbo.threshold.2),
unique(master.sliced[,c('intervention.id','mast.chapter')]),
by="intervention.id", all.x=T)
fig.2.data=aggregate(intervention.id ~ mast.chapter, mast.trade.coverage.base, function(x) length(unique(x)))
fig.2.data$perc.value=fig.2.data$intervention.id/sum(fig.2.data$intervention.id)*100
fig.2.data = fig.2.data[order(fig.2.data$perc.value,decreasing=T),]
fig.2.data$col = colorRampPalette(gta_colour$qualitative)(nrow(fig.2.data))
#changing names to fit your ggplot code.
setnames(fig.2.data, "intervention.id","value")
setnames(fig.2.data, "mast.chapter","group")
levels = fig.2.data$group
fig.2.data$group = factor(fig.2.data$group,levels=levels)
cols = fig.2.data[,c('group','col')]
blank_theme <- theme_minimal()+
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.border = element_blank(),
panel.grid=element_blank(),
axis.ticks = element_blank(),
plot.title=element_text(size=14, face="bold")
)
fig.2 <- ggplot(fig.2.data, aes(x="", y=perc.value, fill=group)) +
geom_bar(width = 1, stat = "identity") + coord_polar("y", start=4) + blank_theme + gta_theme() +
scale_fill_manual(name='',values=fig.2.data$col) + xlab('') + ylab('') +
theme(axis.ticks=element_blank(),panel.border = element_blank(),panel.grid=element_blank()) + theme(axis.text.x=element_blank()) +
geom_text(aes(x=1.7,y = perc.value,label = ifelse(perc.value>=1,scales::percent(perc.value/100),'')), size=3,position = position_stack(vjust = 0.5)) +
theme(legend.spacing.x = unit (.5, 'cm'),
panel.background = element_blank(),
axis.line = element_line(colour = "white")
)
fig.2
gta_plot_saver(plot=fig.2,
path=paste("0 report production/GTA 24/tables & figures/",output.path, sep=""),
name= fig2.file.name.between.thresholds)
fig.2.data = fig.2.data[,!(colnames(fig.2.data) %in% c('col'))]
names(fig.2.data) = c('Mast Chapter','Count','Percentage')
xlsx::write.xlsx(fig.2.data, row.names=F, file = table.path.between.thresholds)
## over upper threshold
mast.trade.coverage.base=merge(subset(loop.data,trade.value >= jumbo.threshold.2),
unique(master.sliced[,c('intervention.id','mast.chapter')]),
by="intervention.id", all.x=T)
fig.2.data=aggregate(intervention.id ~ mast.chapter, mast.trade.coverage.base, function(x) length(unique(x)))
fig.2.data$perc.value=fig.2.data$intervention.id/sum(fig.2.data$intervention.id)*100
fig.2.data = fig.2.data[order(fig.2.data$perc.value,decreasing=T),]
fig.2.data$col = colorRampPalette(gta_colour$qualitative)(nrow(fig.2.data))
#changing names to fit your ggplot code.
setnames(fig.2.data, "intervention.id","value")
setnames(fig.2.data, "mast.chapter","group")
fig.2.data$group = factor(fig.2.data$group,levels=levels)
blank_theme <- theme_minimal()+
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.border = element_blank(),
panel.grid=element_blank(),
axis.ticks = element_blank(),
plot.title=element_text(size=14, face="bold"),
panel.grid.major = element_blank(), panel.grid.minor = element_blank()
)
fig.2 <- ggplot(fig.2.data, aes(x="", y=perc.value, fill=group)) +
geom_bar(width = 1, stat = "identity") + coord_polar("y", start=4) + blank_theme + gta_theme() +
scale_fill_manual(name='',values=cols[cols$group %in% fig.2.data$group,]$col) + xlab('') + ylab('') +
theme(axis.ticks=element_blank(),panel.border = element_blank(),panel.grid=element_blank()) + theme(axis.text.x=element_blank()) +
geom_text(aes(x=1.7,y = perc.value,label = ifelse(perc.value>0,scales::percent(perc.value/100),'')), size=3,position = position_stack(vjust = 0.5)) +
theme(legend.spacing.x = unit (.5, 'cm'),
panel.background = element_blank(),
axis.line = element_line(colour = "white"))
fig.2
gta_plot_saver(plot=fig.2,
path=paste("0 report production/GTA 24/tables & figures/",output.path, sep=""),
name= fig2.file.name.over.upper.threshold)
fig.2.data = fig.2.data[,!(colnames(fig.2.data) %in% c('col'))]
names(fig.2.data) = c('Mast Chapter','Count','Percentage')
xlsx::write.xlsx(fig.2.data, row.names=F, file = table.path.over.upper.threshold)
# }
# Task 3 ------------------------------------------------------------------
# SE request: Please compute the number of jumbo protectionist measures that affect only one trading partner.
conservative.trade.coverage.base=subset(trade.coverage.base, intervention.id %in% ids.conservative)
gta_data_slicer(gta.evaluation=c('Red','Amber'),
keep.implementation.na=F,
nr.affected=c(1,1)
)
master.sliced = subset(master.sliced, eligible.firms %in% c('all','sector-specific'))
master.sliced=master.sliced[!is.na(master.sliced$affected.jurisdiction),]
unique.affected.partner.interventions = unique(master.sliced$intervention.id)
unique.affected.partner.jumbo = conservative.trade.coverage.base[conservative.trade.coverage.base$intervention.id %in% unique.affected.partner.interventions,]
total.unique.affected.partner.jumbo = data.frame(thresholds = c('10b','100b'),
'Number of remaining protectionist interventions affecting one partner' =
c(length(which(unique.affected.partner.jumbo$trade.value > jumbo.threshold.1 & unique.affected.partner.jumbo$trade.value <= jumbo.threshold.2)),
length(which(unique.affected.partner.jumbo$trade.value > jumbo.threshold.2))))
xlsx::write.xlsx(total.unique.affected.partner.jumbo, row.names=F, file = paste("0 report production/GTA 24/tables & figures/",output.path,"/",chapter.number,".3 Number of jumbo protectionist interventions affecting one trading partner.xlsx",sep=''))
# Task 4 -----------------------------------------------------------------
# Request: Please send SE the number of jumbo protectionist measures affecting 10 billion USD of trade.
# Add the value of trade affected to that list. If the number is not too large (SE will decide) then please produce a table of all of the jumbo protectionist
# measures listed in descending order of trade coverage today, indicating the implementing jurisdiction, the name of the measure, MAST chapter,
# date of implementation, whether the measure is still in force, and whether the measure affects only 1 trading partner
# (if so, identify which trading partner.)
##
# Modified his request to 100 billion USD
gta_data_slicer()
master.sliced = subset(master.sliced, eligible.firms %in% c('all','sector-specific'))
conservative.trade.coverage.base=subset(trade.coverage.base, intervention.id %in% ids.conservative)
trade.coverage.base.10b.threshold = conservative.trade.coverage.base[conservative.trade.coverage.base$trade.value > jumbo.threshold.1,]
trade.coverage.base.10b.threshold = merge(trade.coverage.base.10b.threshold,
master.sliced[,c('intervention.id','mast.chapter','implementing.jurisdiction','title','date.implemented')],
by ='intervention.id')
trade.coverage.base.10b.threshold = trade.coverage.base.10b.threshold[,c('intervention.id','implementing.jurisdiction','title','mast.chapter','date.implemented','currently.in.force','trade.value')]
trade.coverage.base.10b.threshold$affects.one.partner = 'FALSE'
trade.coverage.base.10b.threshold[trade.coverage.base.10b.threshold$intervention.id %in% unique.affected.partner.interventions,]$affects.one.partner = 'TRUE'
trade.coverage.base.10b.threshold = trade.coverage.base.10b.threshold[order(trade.coverage.base.10b.threshold$trade.value, decreasing=T),]
add.unique.affected.partner = subset(master.sliced, intervention.id %in% unique.affected.partner.interventions)[,c('intervention.id','affected.jurisdiction')]
trade.coverage.base.10b.threshold = merge(trade.coverage.base.10b.threshold, add.unique.affected.partner, by='intervention.id', all.x=T)
trade.coverage.base.10b.threshold = trade.coverage.base.10b.threshold[!duplicated(trade.coverage.base.10b.threshold),]
names(trade.coverage.base.10b.threshold) = c('Intervention ID','Implementing Jurisdiction','Title','Mast Chapter','Implemented Date','Currently in Force','Trade Value','Affects unique partner','Unique affected partner')
# NOT IN FINAL REPORT
# xlsx::write.xlsx(trade.coverage.base.10b.threshold, row.names=F, file = paste("0 report production/GTA 24/tables & figures/",output.path,"/",chapter.number,".4 Table of (10b threshold) jumbo protectionist measures.xlsx",sep=''))
# identify jumbo ids -----------------------------------------------------
conservative.jumbo.threshold.1.ids = unique(subset(conservative.trade.coverage.base,trade.value>jumbo.threshold.1)$intervention.id)
conservative.jumbo.threshold.2.ids = unique(subset(conservative.trade.coverage.base,trade.value>jumbo.threshold.2)$intervention.id)
# number computations for text --------------------------------------------
gta_data_slicer(gta.evaluation = c("red","amber"),
implementation.period = c("2008-11-01",cutoff),
keep.implementation.na = F)
coverage.base=unique(master.sliced[,c("intervention.id","date.implemented","currently.in.force","mast.id")])
coverage.base$year.implemented=year(coverage.base$date.implemented)
coverage.base$date.implemented=NULL
coverage.base$value.usd=NA
coverage.base$found.trade=T
master.temp=subset(master.sliced, intervention.id %in% coverage.base$intervention.id)
## Generating the base file
gta_imp_exp_hs_tuples(master.path="master.temp",
master.data.frame = T)
master.tuple=merge(master.tuple, trade, by=c("i.un","a.un","affected.product"))
coverage.base$value.usd=NULL
coverage.base=merge(coverage.base, aggregate(trade.value ~ intervention.id+i.un+a.un+affected.product, master.tuple, sum),by="intervention.id", all.x=T)
coverage.base$found.trade[is.na(coverage.base$trade.value)]=F
coverage.base$trade.value[is.na(coverage.base$trade.value)]=0
coverage.base = subset(coverage.base, found.trade==T & !intervention.id %in% false.jumbos)
save(coverage.base, file = paste0("0 report production/GTA 24/data/", output.path,"/coverage base.Rdata"))
load(paste0("0 report production/GTA 24/data/", output.path,"/coverage base.Rdata"))
data.path = paste(chapter.number, chapter.title, sep=' - ')
load(paste0("0 report production/GTA 24/data/",data.path,"/conservative jumbos 10bn.Rdata"))
# removal jumbo measures (threshold 10b)
removal.df = data.frame(ten.yr.removal.jumbo.10b = sum(unique(subset(coverage.base, intervention.id %in% conservative.jumbo.threshold.1.ids, select=c('i.un','a.un','affected.product','trade.value')))$trade.value))
removal.df$in.f.removal.jumbo.10b = sum(unique(subset(coverage.base, intervention.id %in% conservative.jumbo.threshold.1.ids & currently.in.force == 'Yes', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
# removal jumbo measures (threshold 100b)
removal.df$all.removal.jumbo.100b = sum(unique(subset(coverage.base, intervention.id %in% conservative.jumbo.threshold.2.ids, select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
removal.df$in.f.removal.jumbo.100b = sum(unique(subset(coverage.base, intervention.id %in% conservative.jumbo.threshold.2.ids & currently.in.force == 'Yes', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
# removal export incentives
removal.df$all.removal.P7.P8 = sum(unique(subset(coverage.base, mast.id %in% c('P7','P8'), select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
removal.df$in.f.removal.P7.P8 = sum(unique(subset(coverage.base, mast.id %in% c('P7','P8') & currently.in.force == 'Yes', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
# removal L
removal.df$all.removal.L = sum(unique(subset(coverage.base, mast.id == 'L', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
removal.df$in.f.removal.L = sum(unique(subset(coverage.base, mast.id == 'L'& currently.in.force == 'Yes', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
# removal TARIFF
removal.df$all.removal.TARIFF = sum(unique(subset(coverage.base, mast.id == 'TARIFF', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
removal.df$in.f.removal.TARIFF = sum(unique(subset(coverage.base, mast.id == 'TARIFF'& currently.in.force == 'Yes', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
# removal p7 & p8 & L & tariff
removal.df$all.removal.p7.p8.L.TARIFF = sum(unique(subset(coverage.base, mast.id %in% c('P7','P8','L','TARIFF'), select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
removal.df$in.f.removal.p7.p8.L.TARIFF = sum(unique(subset(coverage.base, mast.id %in% c('P7','P8','L','TARIFF') & currently.in.force == 'Yes', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
## NOT IN FINAL REPORT
# xlsx::write.xlsx(removal.df, row.names=F, file = paste("0 report production/GTA 24/tables & figures/",output.path,"/Table trade affected with removal Jumbo p7 p8 L TARIFF.xlsx",sep=''))
# removal mast chapters ---------------------------------------------------
coverage.base$mast.chapter = plyr::mapvalues(coverage.base$mast.id, as.character(gtalibrary::int.mast.types$mast.subchapter.id), as.character(gtalibrary::int.mast.types$mast.chapter.id))
removal.in.force.mast.chapters = data.frame()
for (mast in unique(gtalibrary::int.mast.types$mast.chapter.id)){
temp = sum(unique(subset(coverage.base, mast.chapter == mast & currently.in.force == 'Yes', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
removal.in.force.mast.chapters = rbind(removal.in.force.mast.chapters,
data.frame(mast.chapter = mast,
trade.affected = temp))
}
## NOT IN FINAL REPORT
# xlsx::write.xlsx(removal.in.force.mast.chapters, row.names=F, file = paste("0 report production/GTA 24/tables & figures/",output.path,"/Table trade affected with removal mast chapter.xlsx",sep=''))
# removal all in force interventions --------------------------------------
removal.in.force.interventions = data.frame(
in.force.interventions.removed = sum(unique(subset(coverage.base, currently.in.force == 'Yes', select=c('i.un','a.un','affected.product','trade.value')))$trade.value))
## NOT IN FINAL REPORT
# xlsx::write.xlsx(removal.in.force.interventions, row.names=F, file = paste("0 report production/GTA 24/tables & figures/",output.path,"/Table trade affected with removal in force interventions.xlsx",sep=''))
| /code/4 - Covert jumbo protectionism is the norm/Chapter 4 script.R | no_license | global-trade-alert/gta-24 | R | false | false | 34,461 | r | rm(list=ls())
library(gtalibrary)
library(ggplot2)
library(openxlsx)
library(RColorBrewer)
library(dplyr)
library(splitstackshape)
gta_setwd()
chapter.number = 4
chapter.title = 'Covert jumbo protectionism is the norm'
output.path = paste(chapter.number, chapter.title, sep=' - ')
load('data/master_plus.Rdata')
source("0 report production/GTA 24/help files/GTA 24 cutoff and definitions.R")
load("data/support tables/Final goods support table.Rdata")
jumbo.threshold.1 = 10e9
jumbo.threshold.2 = 100e9
approach = 'conservative'
# "EC: GSP for certain countries' interventions as one
# 30038 state.act.id for EC: GSP for certain countries sectors revoked for 2014-2016 period
ec.revoked.gsp.ids = unique(subset(master, state.act.id == '30038')$intervention.id)
# indian export incentive 2.3 trillion
false.jumbos = c(70350, 18891, 16819, 71578, 58794, 18254, 13633, 15366, 13512, 18892)
remove.ids = c(ec.revoked.gsp.ids,false.jumbos)
# coverage by intervention computations -----------------------------------
## importing trade data
trade=subset(final, Year %in% c(2005:2007))[,c("Reporter.un","Partner.un","Year","Tariff.line","Value")]
rm(final)
names(trade)=c("i.un","a.un","year","affected.product","trade.value")
trade=aggregate(trade.value ~ i.un + a.un + affected.product, trade, sum)
trade$trade.value= trade$trade.value/3
# gtalibrary::elig.firms
## preparing GTA data
gta_data_slicer(gta.evaluation = c("red","amber"),
implementation.period = c("2008-11-01",cutoff),
keep.implementation.na = F)
# we need to implement sector-specific search in data slicer
master.sliced = subset(master.sliced, eligible.firms %in% c('all','sector-specific'))
coverage.by.intervention=unique(master.sliced[,c("intervention.id","date.implemented","currently.in.force")])
coverage.by.intervention$year.implemented=year(coverage.by.intervention$date.implemented)
coverage.by.intervention$date.implemented=NULL
coverage.by.intervention$value.usd=NA
coverage.by.intervention$found.trade=T
master.temp=subset(master.sliced, intervention.id %in% coverage.by.intervention$intervention.id)
## Generating the base file
gta_imp_exp_hs_tuples(master.path="master.temp",
master.data.frame = T)
master.tuple=merge(master.tuple, trade, by=c("i.un","a.un","affected.product"))
coverage.by.intervention$value.usd=NULL
coverage.by.intervention=merge(coverage.by.intervention, aggregate(trade.value ~ intervention.id, master.tuple, sum),by="intervention.id", all.x=T)
coverage.by.intervention$found.trade[is.na(coverage.by.intervention$trade.value)]=F
coverage.by.intervention$trade.value[is.na(coverage.by.intervention$trade.value)]=0
trade.coverage.base = subset(coverage.by.intervention, found.trade==T)
# remove ec.gsp.ids but keep highest
trade.coverage.base = subset(trade.coverage.base, trade.value==max(subset(trade.coverage.base,intervention.id %in% ec.revoked.gsp.ids)$trade.value)| !(intervention.id %in% ec.revoked.gsp.ids))
# remove false jumbos
trade.coverage.base = subset(trade.coverage.base, !(intervention.id %in% false.jumbos))
## different subsets
ids.all=unique(master.sliced$intervention.id)
ids.conservative=unique(subset(master.sliced, implementation.level %in% c("national", "supranational") &
eligible.firms %in% c("all", "sector-specific"))$intervention.id)
trade.jumbo.intervention = subset(trade.coverage.base, trade.value >= jumbo.threshold.1 & intervention.id %in% ids.conservative)
jumbo.ids = unique(trade.jumbo.intervention$intervention.id)
save(jumbo.ids, trade.jumbo.intervention, file = paste0("0 report production/GTA 24/data/", output.path,"/trade per jumbo(10bn) intervention.Rdata"))
### XLSX specs
year.range = 2008:2019
gta_colour_palette()
# PDF/CDF plots -----------------------------------------------------------------
for(approach in c("all")){
thresholds = c(0,1e7,1e8,1e9,1e10,1e11,1e12,max(trade.coverage.base$trade.value)+1)
trade.thresholds.by.year = data.frame(Lower.threshold = thresholds[-length(thresholds)], Upper.threshold = thresholds[-1])
if(approach=="all"){
ids=ids.all
cdf.file.name="All interventions - CDF of harmful intervention trade coverage"
pdf.file.name="All interventions - PDF of harmful intervention trade coverage"
table.path = paste0('0 report production/GTA 24/tables & figures/', output.path, '/',chapter.number,'.1 All interventions - Interventions by year and affected trade thresholds.xlsx')
}
if(approach=="conservative"){
ids=ids.conservative
cdf.file.name="Conservative interventions - CDF of harmful intervention trade coverage"
pdf.file.name="Conservative interventions - PDF of harmful intervention trade coverage"
table.path = paste0('0 report production/GTA 24/tables & figures/', output.path, '/',chapter.number,'.1 Conservative interventions - Interventions by year and affected trade thresholds.xlsx')
}
if(approach=="non-conservative"){
ids=setdiff(ids.all,ids.conservative)
cdf.file.name="Non-conservative interventions - CDF of harmful intervention trade coverage"
pdf.file.name="Non-conservative interventions - PDF of harmful intervention trade coverage"
table.path = paste0('0 report production/GTA 24/tables & figures/', output.path, '/',chapter.number,'.1 Non-conservative interventions - Interventions by year and affected trade thresholds.xlsx')
}
## loop data
loop.data=subset(trade.coverage.base, intervention.id %in% ids)
## ecdf
log10.cdf=ggplot(loop.data, aes(x=log10(trade.value))) +
stat_ecdf(geom = "step", position = "identity",size=1.1) + xlab('Trade value in USD') + ylab('Share of discriminatory policy intervention since November 2008') +
ggtitle('Cumulative density function of the value of trade harmed \nby harmful interventions implemented 2008-2019') +
theme(plot.title = element_text(hjust = 0.5)) +
coord_cartesian(xlim = c(5, max(log10(loop.data$trade.value)))+0.01) +
scale_x_continuous(breaks = 5:12,labels= c('100\'000','1 million','10 million','100 million','1 billion', '10 billion', '100 billion', '1 trillion'))+
gta_theme() +
scale_y_continuous(sec.axis = dup_axis())
log10.cdf
gta_plot_saver(plot=log10.cdf,
path=paste("0 report production/GTA 24/tables & figures/",output.path, sep=""),
name=paste(chapter.number, ".1 ",cdf.file.name, sep=""))
## PDF (final version)
trade.values.war.us <- trade.coverage.base[trade.coverage.base$intervention.id %in% trade.war.us,]$trade.value
trade.values.war.chn <- trade.coverage.base[trade.coverage.base$intervention.id %in% trade.war.chn,]$trade.value
trade.values.war.us = log10(trade.values.war.us)
trade.values.war.chn = log10(trade.values.war.chn)
log10.pdf = ggplot() +
geom_density(data=loop.data,aes(x=log10(trade.value)),trim=F, size=1) + xlab('Trade value in billions of USD') + ylab('Probability Density') +
ggtitle('Probability density function of the value of trade harmed \nby harmful interventions implemented 2008-2019') +
theme(plot.title = element_text(hjust = 0.5)) +
coord_cartesian(xlim = c(9, max(log10(loop.data$trade.value)))+0.01) +
scale_x_continuous(breaks = 9:12,labels=c('1','10','100', '1\'000'))+
gta_theme() +
theme(plot.title = element_text(size = 11)) +
geom_vline(aes(xintercept = trade.values.war.chn, color = gta_colour$qualitative[3]), size=0.7,linetype='twodash', show.legend = TRUE) +
geom_vline(aes(xintercept = trade.values.war.us, color = gta_colour$qualitative[6]), size=0.7,linetype='twodash', show.legend = TRUE) +
scale_color_manual(name='',values=gta_colour$qualitative[c(7,2)],labels=c('China harmful interventions \nimplemented in 2018 trade war with the US','US harmful interventions \nimplemented in 2018 trade war with China')) +
scale_y_continuous(sec.axis = dup_axis())
log10.pdf
gta_plot_saver(plot=log10.pdf,
path=paste("0 report production/GTA 24/tables & figures/",output.path, sep=""),
name=paste(chapter.number,".3 ",pdf.file.name, sep=""))
## XLSX
# an XSLX with summary stats about how many interventions affected between x1 and x2 worth of trade for several brackets eg. less than 1bn, 1-2bn, 2-3bn or so
# choose those brackets as they make sense
for (i in 1:nrow(trade.thresholds.by.year)){
for (year in 1:length(year.range)){
trade.thresholds.by.year[i,year+2] = length(which(loop.data[loop.data$year.implemented==year.range[year],]$trade.value > trade.thresholds.by.year$Lower.threshold[i] &
loop.data[loop.data$year.implemented==year.range[year],]$trade.value < trade.thresholds.by.year$Upper.threshold[i]))
names(trade.thresholds.by.year)[year+2] = paste(as.character(year.range[year]),'Number of interventions harming trade between threshold values' )
}
}
colnames(trade.thresholds.by.year)[1:2] = c('Lower Threshold', 'Upper Threshold')
# class(trade.thresholds.by.year$`Lower Threshold`) <- "scientific"
# class(trade.thresholds.by.year$`Upper Threshold`) <- "scientific"
loop.data=loop.data[order(loop.data$trade.value, decreasing=T),c('intervention.id','year.implemented','trade.value')]
xlsx::write.xlsx(trade.thresholds.by.year, file=table.path, row.names = F, sheetName = "Harmed trade" )
# xlsx::write.xlsx(trade.thresholds.by.year, file=table.path, row.names = F, sheetName = "Underlying data", append=T)
}
# Task 1 ---------------------------------------------------------------------
# SE request: For the discriminatory measures imposed over the past 10 years, a bar chart showing the frequency of measures harming 10 billion USD
# or more of trade would be produced. The trade war interventions (enumerated in chapter 2, stored in the GTA 24’s definitions file)
# would be highlighted and compared to other jumbo measures.
trade.war.state.ids = c(china.us.trade.war.act.ids, eu.sg.steel.act.ids)
trade.war.intervention.ids = c(new.actions.intervention.ids ,china.us.trade.war.intervention.ids)
gta_data_slicer()
master.sliced = subset(master.sliced, eligible.firms %in% c('all','sector-specific'))
trade.war.intervention.ids = unique(subset(master.sliced, (gta.evaluation == 'Red')&((state.act.id %in% trade.war.state.ids)|(intervention.id %in% trade.war.intervention.ids)))$intervention.id)
trade.war.us = unique(subset(master.sliced,(implementing.jurisdiction == 'United States of America')&(affected.jurisdiction == 'China')&(gta.evaluation == 'Red')&((state.act.id %in% trade.war.state.ids)|(intervention.id %in% trade.war.intervention.ids)))$intervention.id)
trade.war.chn = unique(subset(master.sliced,(implementing.jurisdiction == 'China')&(affected.jurisdiction == 'United States of America')&(gta.evaluation == 'Red')&((state.act.id %in% trade.war.state.ids)|(intervention.id %in% trade.war.intervention.ids)))$intervention.id)
## plotting
## For the discriminatory measures imposed over the past 10 years, a bar chart showing the frequency of measures harming 10 billion USD or more of trade would be produced.
## The trade war interventions (enumerated in chapter 2, stored in the GTA 24’s definitions file) would be highlighted and compared to other jumbo measures.
# for(approach in c("all", "conservative", "non-conservative")){
approach="conservative"
plot.name = "Number of interventions per year harming trade for over 10bn USD"
if(approach=="all"){
ids=ids.all
fig1.file.name= paste(chapter.number, ".2 All interventions - ",plot.name, sep="")
table.path = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig1.file.name,'.xlsx')
}
if(approach=="conservative"){
ids=ids.conservative
fig1.file.name= paste(chapter.number, ".2 Conservative interventions - ",plot.name, sep="")
table.path = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig1.file.name,'.xlsx')
}
if(approach=="non-conservative"){
ids=setdiff(ids.all,ids.conservative)
fig1.file.name= paste(chapter.number, ".2 Non-conservative interventions - ",plot.name, sep="")
table.path = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig1.file.name,'.xlsx')
}
## loop data
loop.data=subset(trade.coverage.base, intervention.id %in% ids)
threshold = jumbo.threshold.1
annual.jumbos=aggregate(intervention.id ~ year.implemented, subset(loop.data, trade.value>=threshold & !intervention.id %in% trade.war.intervention.ids), function(x) length(unique(x)))
annual.jumbos$intervention.status="2"
tw.jumbos = subset(loop.data, trade.value>=threshold & intervention.id %in% trade.war.intervention.ids)
if (nrow(tw.jumbos)>0){
tw.jumbos=aggregate(intervention.id ~ year.implemented, tw.jumbos , function(x) length(unique(x)))
tw.jumbos$intervention.status="1"
annual.jumbos=rbind(tw.jumbos,annual.jumbos)
color.values = c(gta_colour$qualitative[2:1])
} else {color.values=gta_colour$qualitative[1]}
if (approach == 'conservative'){annual.jumbos.over.200b = aggregate(intervention.id ~ year.implemented, subset(loop.data, trade.value>=200e9), function(x) length(unique(x)))}
if (approach == 'conservative'){annual.jumbos.over.500b = aggregate(intervention.id ~ year.implemented, subset(loop.data, trade.value>=500e9), function(x) length(unique(x)))}
if (approach == 'conservative'){annual.jumbos.over.200b.ids = subset(loop.data, trade.value>=200e9 & !intervention.id %in% false.jumbos)$intervention.id}
if (approach == 'conservative'){annual.jumbos.over.500b.ids = subset(loop.data, trade.value>=500e9 & !intervention.id %in% false.jumbos)$intervention.id}
fig.1 =ggplot(annual.jumbos, aes(x=year.implemented,y=intervention.id,fill=intervention.status)) + geom_col() +
scale_x_continuous(breaks=2008:2019,labels=2008:2019) + xlab('Year of implementation of the harmful intervention') +
ylab(paste('Number of jumbo protectionist measures implemented')) +
scale_fill_manual(name='',values = color.values, labels=c('Trade war interventions','Non-trade war interventions')) +
gta_theme() +
scale_y_continuous(sec.axis = dup_axis())
fig.1
gta_plot_saver(plot=fig.1,
path=paste("0 report production/GTA 24/tables & figures/",output.path, sep=""),
name=fig1.file.name)
annual.jumbos$intervention.status[annual.jumbos$intervention.status=="1"]="Trade war"
annual.jumbos$intervention.status[annual.jumbos$intervention.status=="2"]="Non-trade war"
xlsx::write.xlsx(annual.jumbos, row.names=F, file = table.path)
# }
names(annual.jumbos.over.200b) = c('year.implemented','number.of.interventions')
names(annual.jumbos.over.500b) = c('year.implemented','number.of.interventions')
annual.jumbos.over.200b = rbind(annual.jumbos.over.200b,data.frame(year.implemented = 'all',number.of.interventions = sum(annual.jumbos.over.200b$number.of.interventions)))
annual.jumbos.over.500b = rbind(annual.jumbos.over.500b,data.frame(year.implemented = 'all',number.of.interventions = sum(annual.jumbos.over.500b$number.of.interventions)))
annual.jumbos.over.200b.ids = data.frame(annual.jumbos.over.200b.ids = annual.jumbos.over.200b.ids)
annual.jumbos.over.500b.ids = data.frame(annual.jumbos.over.500b.ids = annual.jumbos.over.500b.ids)
annual.jumbos.over.500b.ids$url = paste0("https://www.globaltradealert.org/intervention/", annual.jumbos.over.500b.ids$annual.jumbos.over.500b.ids)
## NOT IN FINAL REPORT
# xlsx::write.xlsx(annual.jumbos.over.200b, row.names=F, file=paste0('0 report production/GTA 24/tables & figures/',output.path,'/archive/annual jumbos over 200b.xlsx'))
# xlsx::write.xlsx(annual.jumbos.over.200b.ids, row.names=F, file=paste0('0 report production/GTA 24/tables & figures/ppt/ids annual jumbos over 200b.xlsx'))
# xlsx::write.xlsx(annual.jumbos.over.500b, row.names=F, file=paste0('0 report production/GTA 24/tables & figures/',output.path,'/archive/annual jumbos over 500b.xlsx'))
# xlsx::write.xlsx(annual.jumbos.over.500b.ids, row.names=F, file=paste0('0 report production/GTA 24/tables & figures/ppt/ids annual jumbos over 500b.xlsx'))
# Task 2 ------------------------------------------------------------------
# SE request: Please prepare a pie chart of the types of jumbo protectionist measures by MAST category.
# SE never specified a threshold and keeps going back and forward so i just did it for both thresholds of 10b and 100b
# for(approach in c("all", "conservative", "non-conservative")){
approach="conservative"
plot.name = "jumbo threshold MAST Chapter distribution harmful interventions"
if(approach=="all"){
ids=ids.all
fig2.file.name.between.thresholds= paste(chapter.number,".4 All interventions - between ",paste0(as.character(jumbo.threshold.1/1e9), 'bn'),'-',paste0(as.character(jumbo.threshold.2/1e9), 'bn '),' ',plot.name, sep="")
fig2.file.name.over.upper.threshold= paste(chapter.number,".4 All interventions - over ", paste0(as.character(jumbo.threshold.2/1e9), 'bn '),plot.name,' ', sep="")
table.path.between.thresholds = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig2.file.name.between.thresholds,'.xlsx')
table.path.over.upper.threshold = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig2.file.name.over.upper.threshold,'.xlsx')
}
if(approach=="conservative"){
ids=ids.conservative
fig2.file.name.between.thresholds= paste(chapter.number,".4 Conservative interventions - between ",paste0(as.character(jumbo.threshold.1/1e9), 'bn'),'-',paste0(as.character(jumbo.threshold.2/1e9), 'bn '),' ',plot.name, sep="")
fig2.file.name.over.upper.threshold= paste(chapter.number,".4 Conservative interventions - over ", paste0(as.character(jumbo.threshold.2/1e9), 'bn '),plot.name,' ', sep="")
table.path.between.thresholds = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig2.file.name.between.thresholds,'.xlsx')
table.path.over.upper.threshold = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig2.file.name.over.upper.threshold,'.xlsx')
}
if(approach=="non-conservative"){
ids=setdiff(ids.all,ids.conservative)
fig2.file.name.between.thresholds= paste(chapter.number,".4 Non-conservative interventions - between ",paste0(as.character(jumbo.threshold.1/1e9), 'bn'),'-',paste0(as.character(jumbo.threshold.2/1e9), 'bn '),' ',plot.name, sep="")
fig2.file.name.over.upper.threshold= paste(chapter.number,".4 Non-conservative interventions - over ", paste0(as.character(jumbo.threshold.2/1e9), 'bn '),plot.name,' ', sep="")
table.path.between.thresholds = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig2.file.name.between.thresholds,'.xlsx')
table.path.over.upper.threshold = paste0('0 report production/GTA 24/tables & figures/', output.path,'/',fig2.file.name.over.upper.threshold,'.xlsx')
}
loop.data=subset(trade.coverage.base, intervention.id %in% ids)
# SE: please do make a pie chart for the values between 10e9 and 100e9
## between 10e9 - 100e9 threshold
mast.trade.coverage.base=merge(subset(loop.data,trade.value >= jumbo.threshold.1 & trade.value < jumbo.threshold.2),
unique(master.sliced[,c('intervention.id','mast.chapter')]),
by="intervention.id", all.x=T)
fig.2.data=aggregate(intervention.id ~ mast.chapter, mast.trade.coverage.base, function(x) length(unique(x)))
fig.2.data$perc.value=fig.2.data$intervention.id/sum(fig.2.data$intervention.id)*100
fig.2.data = fig.2.data[order(fig.2.data$perc.value,decreasing=T),]
fig.2.data$col = colorRampPalette(gta_colour$qualitative)(nrow(fig.2.data))
#changing names to fit your ggplot code.
setnames(fig.2.data, "intervention.id","value")
setnames(fig.2.data, "mast.chapter","group")
levels = fig.2.data$group
fig.2.data$group = factor(fig.2.data$group,levels=levels)
cols = fig.2.data[,c('group','col')]
blank_theme <- theme_minimal()+
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.border = element_blank(),
panel.grid=element_blank(),
axis.ticks = element_blank(),
plot.title=element_text(size=14, face="bold")
)
fig.2 <- ggplot(fig.2.data, aes(x="", y=perc.value, fill=group)) +
geom_bar(width = 1, stat = "identity") + coord_polar("y", start=4) + blank_theme + gta_theme() +
scale_fill_manual(name='',values=fig.2.data$col) + xlab('') + ylab('') +
theme(axis.ticks=element_blank(),panel.border = element_blank(),panel.grid=element_blank()) + theme(axis.text.x=element_blank()) +
geom_text(aes(x=1.7,y = perc.value,label = ifelse(perc.value>=1,scales::percent(perc.value/100),'')), size=3,position = position_stack(vjust = 0.5)) +
theme(legend.spacing.x = unit (.5, 'cm'),
panel.background = element_blank(),
axis.line = element_line(colour = "white")
)
fig.2
gta_plot_saver(plot=fig.2,
path=paste("0 report production/GTA 24/tables & figures/",output.path, sep=""),
name= fig2.file.name.between.thresholds)
fig.2.data = fig.2.data[,!(colnames(fig.2.data) %in% c('col'))]
names(fig.2.data) = c('Mast Chapter','Count','Percentage')
xlsx::write.xlsx(fig.2.data, row.names=F, file = table.path.between.thresholds)
## over upper threshold
mast.trade.coverage.base=merge(subset(loop.data,trade.value >= jumbo.threshold.2),
unique(master.sliced[,c('intervention.id','mast.chapter')]),
by="intervention.id", all.x=T)
fig.2.data=aggregate(intervention.id ~ mast.chapter, mast.trade.coverage.base, function(x) length(unique(x)))
fig.2.data$perc.value=fig.2.data$intervention.id/sum(fig.2.data$intervention.id)*100
fig.2.data = fig.2.data[order(fig.2.data$perc.value,decreasing=T),]
fig.2.data$col = colorRampPalette(gta_colour$qualitative)(nrow(fig.2.data))
#changing names to fit your ggplot code.
setnames(fig.2.data, "intervention.id","value")
setnames(fig.2.data, "mast.chapter","group")
fig.2.data$group = factor(fig.2.data$group,levels=levels)
blank_theme <- theme_minimal()+
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.border = element_blank(),
panel.grid=element_blank(),
axis.ticks = element_blank(),
plot.title=element_text(size=14, face="bold"),
panel.grid.major = element_blank(), panel.grid.minor = element_blank()
)
fig.2 <- ggplot(fig.2.data, aes(x="", y=perc.value, fill=group)) +
geom_bar(width = 1, stat = "identity") + coord_polar("y", start=4) + blank_theme + gta_theme() +
scale_fill_manual(name='',values=cols[cols$group %in% fig.2.data$group,]$col) + xlab('') + ylab('') +
theme(axis.ticks=element_blank(),panel.border = element_blank(),panel.grid=element_blank()) + theme(axis.text.x=element_blank()) +
geom_text(aes(x=1.7,y = perc.value,label = ifelse(perc.value>0,scales::percent(perc.value/100),'')), size=3,position = position_stack(vjust = 0.5)) +
theme(legend.spacing.x = unit (.5, 'cm'),
panel.background = element_blank(),
axis.line = element_line(colour = "white"))
fig.2
gta_plot_saver(plot=fig.2,
path=paste("0 report production/GTA 24/tables & figures/",output.path, sep=""),
name= fig2.file.name.over.upper.threshold)
fig.2.data = fig.2.data[,!(colnames(fig.2.data) %in% c('col'))]
names(fig.2.data) = c('Mast Chapter','Count','Percentage')
xlsx::write.xlsx(fig.2.data, row.names=F, file = table.path.over.upper.threshold)
# }
# Task 3 ------------------------------------------------------------------
# SE request: Please compute the number of jumbo protectionist measures that affect only one trading partner.
conservative.trade.coverage.base=subset(trade.coverage.base, intervention.id %in% ids.conservative)
gta_data_slicer(gta.evaluation=c('Red','Amber'),
keep.implementation.na=F,
nr.affected=c(1,1)
)
master.sliced = subset(master.sliced, eligible.firms %in% c('all','sector-specific'))
master.sliced=master.sliced[!is.na(master.sliced$affected.jurisdiction),]
unique.affected.partner.interventions = unique(master.sliced$intervention.id)
unique.affected.partner.jumbo = conservative.trade.coverage.base[conservative.trade.coverage.base$intervention.id %in% unique.affected.partner.interventions,]
total.unique.affected.partner.jumbo = data.frame(thresholds = c('10b','100b'),
'Number of remaining protectionist interventions affecting one partner' =
c(length(which(unique.affected.partner.jumbo$trade.value > jumbo.threshold.1 & unique.affected.partner.jumbo$trade.value <= jumbo.threshold.2)),
length(which(unique.affected.partner.jumbo$trade.value > jumbo.threshold.2))))
xlsx::write.xlsx(total.unique.affected.partner.jumbo, row.names=F, file = paste("0 report production/GTA 24/tables & figures/",output.path,"/",chapter.number,".3 Number of jumbo protectionist interventions affecting one trading partner.xlsx",sep=''))
# Task 4 -----------------------------------------------------------------
# Request: Please send SE the number of jumbo protectionist measures affecting 10 billion USD of trade.
# Add the value of trade affected to that list. If the number is not too large (SE will decide) then please produce a table of all of the jumbo protectionist
# measures listed in descending order of trade coverage today, indicating the implementing jurisdiction, the name of the measure, MAST chapter,
# date of implementation, whether the measure is still in force, and whether the measure affects only 1 trading partner
# (if so, identify which trading partner.)
##
# Modified his request to 100 billion USD
gta_data_slicer()
master.sliced = subset(master.sliced, eligible.firms %in% c('all','sector-specific'))
conservative.trade.coverage.base=subset(trade.coverage.base, intervention.id %in% ids.conservative)
trade.coverage.base.10b.threshold = conservative.trade.coverage.base[conservative.trade.coverage.base$trade.value > jumbo.threshold.1,]
trade.coverage.base.10b.threshold = merge(trade.coverage.base.10b.threshold,
master.sliced[,c('intervention.id','mast.chapter','implementing.jurisdiction','title','date.implemented')],
by ='intervention.id')
trade.coverage.base.10b.threshold = trade.coverage.base.10b.threshold[,c('intervention.id','implementing.jurisdiction','title','mast.chapter','date.implemented','currently.in.force','trade.value')]
trade.coverage.base.10b.threshold$affects.one.partner = 'FALSE'
trade.coverage.base.10b.threshold[trade.coverage.base.10b.threshold$intervention.id %in% unique.affected.partner.interventions,]$affects.one.partner = 'TRUE'
trade.coverage.base.10b.threshold = trade.coverage.base.10b.threshold[order(trade.coverage.base.10b.threshold$trade.value, decreasing=T),]
add.unique.affected.partner = subset(master.sliced, intervention.id %in% unique.affected.partner.interventions)[,c('intervention.id','affected.jurisdiction')]
trade.coverage.base.10b.threshold = merge(trade.coverage.base.10b.threshold, add.unique.affected.partner, by='intervention.id', all.x=T)
trade.coverage.base.10b.threshold = trade.coverage.base.10b.threshold[!duplicated(trade.coverage.base.10b.threshold),]
names(trade.coverage.base.10b.threshold) = c('Intervention ID','Implementing Jurisdiction','Title','Mast Chapter','Implemented Date','Currently in Force','Trade Value','Affects unique partner','Unique affected partner')
# NOT IN FINAL REPORT
# xlsx::write.xlsx(trade.coverage.base.10b.threshold, row.names=F, file = paste("0 report production/GTA 24/tables & figures/",output.path,"/",chapter.number,".4 Table of (10b threshold) jumbo protectionist measures.xlsx",sep=''))
# identify jumbo ids -----------------------------------------------------
conservative.jumbo.threshold.1.ids = unique(subset(conservative.trade.coverage.base,trade.value>jumbo.threshold.1)$intervention.id)
conservative.jumbo.threshold.2.ids = unique(subset(conservative.trade.coverage.base,trade.value>jumbo.threshold.2)$intervention.id)
# number computations for text --------------------------------------------
gta_data_slicer(gta.evaluation = c("red","amber"),
implementation.period = c("2008-11-01",cutoff),
keep.implementation.na = F)
coverage.base=unique(master.sliced[,c("intervention.id","date.implemented","currently.in.force","mast.id")])
coverage.base$year.implemented=year(coverage.base$date.implemented)
coverage.base$date.implemented=NULL
coverage.base$value.usd=NA
coverage.base$found.trade=T
master.temp=subset(master.sliced, intervention.id %in% coverage.base$intervention.id)
## Generating the base file
gta_imp_exp_hs_tuples(master.path="master.temp",
master.data.frame = T)
master.tuple=merge(master.tuple, trade, by=c("i.un","a.un","affected.product"))
coverage.base$value.usd=NULL
coverage.base=merge(coverage.base, aggregate(trade.value ~ intervention.id+i.un+a.un+affected.product, master.tuple, sum),by="intervention.id", all.x=T)
coverage.base$found.trade[is.na(coverage.base$trade.value)]=F
coverage.base$trade.value[is.na(coverage.base$trade.value)]=0
coverage.base = subset(coverage.base, found.trade==T & !intervention.id %in% false.jumbos)
save(coverage.base, file = paste0("0 report production/GTA 24/data/", output.path,"/coverage base.Rdata"))
load(paste0("0 report production/GTA 24/data/", output.path,"/coverage base.Rdata"))
data.path = paste(chapter.number, chapter.title, sep=' - ')
load(paste0("0 report production/GTA 24/data/",data.path,"/conservative jumbos 10bn.Rdata"))
# removal jumbo measures (threshold 10b)
removal.df = data.frame(ten.yr.removal.jumbo.10b = sum(unique(subset(coverage.base, intervention.id %in% conservative.jumbo.threshold.1.ids, select=c('i.un','a.un','affected.product','trade.value')))$trade.value))
removal.df$in.f.removal.jumbo.10b = sum(unique(subset(coverage.base, intervention.id %in% conservative.jumbo.threshold.1.ids & currently.in.force == 'Yes', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
# removal jumbo measures (threshold 100b)
removal.df$all.removal.jumbo.100b = sum(unique(subset(coverage.base, intervention.id %in% conservative.jumbo.threshold.2.ids, select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
removal.df$in.f.removal.jumbo.100b = sum(unique(subset(coverage.base, intervention.id %in% conservative.jumbo.threshold.2.ids & currently.in.force == 'Yes', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
# removal export incentives
removal.df$all.removal.P7.P8 = sum(unique(subset(coverage.base, mast.id %in% c('P7','P8'), select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
removal.df$in.f.removal.P7.P8 = sum(unique(subset(coverage.base, mast.id %in% c('P7','P8') & currently.in.force == 'Yes', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
# removal L
removal.df$all.removal.L = sum(unique(subset(coverage.base, mast.id == 'L', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
removal.df$in.f.removal.L = sum(unique(subset(coverage.base, mast.id == 'L'& currently.in.force == 'Yes', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
# removal TARIFF
removal.df$all.removal.TARIFF = sum(unique(subset(coverage.base, mast.id == 'TARIFF', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
removal.df$in.f.removal.TARIFF = sum(unique(subset(coverage.base, mast.id == 'TARIFF'& currently.in.force == 'Yes', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
# removal p7 & p8 & L & tariff
removal.df$all.removal.p7.p8.L.TARIFF = sum(unique(subset(coverage.base, mast.id %in% c('P7','P8','L','TARIFF'), select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
removal.df$in.f.removal.p7.p8.L.TARIFF = sum(unique(subset(coverage.base, mast.id %in% c('P7','P8','L','TARIFF') & currently.in.force == 'Yes', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
## NOT IN FINAL REPORT
# xlsx::write.xlsx(removal.df, row.names=F, file = paste("0 report production/GTA 24/tables & figures/",output.path,"/Table trade affected with removal Jumbo p7 p8 L TARIFF.xlsx",sep=''))
# removal mast chapters ---------------------------------------------------
coverage.base$mast.chapter = plyr::mapvalues(coverage.base$mast.id, as.character(gtalibrary::int.mast.types$mast.subchapter.id), as.character(gtalibrary::int.mast.types$mast.chapter.id))
removal.in.force.mast.chapters = data.frame()
for (mast in unique(gtalibrary::int.mast.types$mast.chapter.id)){
temp = sum(unique(subset(coverage.base, mast.chapter == mast & currently.in.force == 'Yes', select=c('i.un','a.un','affected.product','trade.value')))$trade.value)
removal.in.force.mast.chapters = rbind(removal.in.force.mast.chapters,
data.frame(mast.chapter = mast,
trade.affected = temp))
}
## NOT IN FINAL REPORT
# xlsx::write.xlsx(removal.in.force.mast.chapters, row.names=F, file = paste("0 report production/GTA 24/tables & figures/",output.path,"/Table trade affected with removal mast chapter.xlsx",sep=''))
# removal all in force interventions --------------------------------------
removal.in.force.interventions = data.frame(
in.force.interventions.removed = sum(unique(subset(coverage.base, currently.in.force == 'Yes', select=c('i.un','a.un','affected.product','trade.value')))$trade.value))
## NOT IN FINAL REPORT
# xlsx::write.xlsx(removal.in.force.interventions, row.names=F, file = paste("0 report production/GTA 24/tables & figures/",output.path,"/Table trade affected with removal in force interventions.xlsx",sep=''))
|
############################################################
# #
# Employment status #
# #
############################################################
# Load packages
library(tidyverse)
library(magrittr)
# Palettes
pal_za <- c('#9ecae1', '#3182bd')
scales::show_col(pal_za)
# Load data
data <- read_csv('data/employment.csv')
# Glimpse
glimpse(data)
# Plot
pp <- ggplot(data = data) +
aes(x = employment,
y = estimate_chronic,
ymin = ci.low_chronic,
ymax = ci.high_chronic) +
geom_errorbar(size = 1,
width = 0.25,
colour = '#000000') +
geom_point(size = 8,
shape = 21,
stroke = 1,
fill = pal_za[[2]],
colour = '#000000') +
geom_text(aes(y = 30,
label = paste0('(', unweighted_n, ')')),
size = 6) +
labs(title = 'H: Prevalence by employment status',
subtitle = 'Numbers in parentheses show the unweighted sample sizes',
x = 'Has had employment in the past 12 months',
y = 'Prevalence of chronic pain (%)') +
scale_y_continuous(limits = c(0, 30)) +
theme_bw(base_size = 20) +
theme(panel.grid = element_blank(),
plot.subtitle = element_text(size = 14),
axis.title.y = element_text(margin = margin(r = 1,
unit = 'lines')),
axis.title.x = element_text(margin = margin(t = 1,
unit = 'lines')),
axis.text.y = element_text(colour = '#000000'),
axis.text.x = element_text(colour = '#000000'))
# Save
ggsave(filename = 'figures/supplement-1-H-employment.png',
plot = pp,
height = 8,
width = 8)
| /supplement-plot-H-employment.R | permissive | kamermanpr/za-pain-epidemiology | R | false | false | 1,935 | r | ############################################################
# #
# Employment status #
# #
############################################################
# Load packages
library(tidyverse)
library(magrittr)
# Palettes
pal_za <- c('#9ecae1', '#3182bd')
scales::show_col(pal_za)
# Load data
data <- read_csv('data/employment.csv')
# Glimpse
glimpse(data)
# Plot
pp <- ggplot(data = data) +
aes(x = employment,
y = estimate_chronic,
ymin = ci.low_chronic,
ymax = ci.high_chronic) +
geom_errorbar(size = 1,
width = 0.25,
colour = '#000000') +
geom_point(size = 8,
shape = 21,
stroke = 1,
fill = pal_za[[2]],
colour = '#000000') +
geom_text(aes(y = 30,
label = paste0('(', unweighted_n, ')')),
size = 6) +
labs(title = 'H: Prevalence by employment status',
subtitle = 'Numbers in parentheses show the unweighted sample sizes',
x = 'Has had employment in the past 12 months',
y = 'Prevalence of chronic pain (%)') +
scale_y_continuous(limits = c(0, 30)) +
theme_bw(base_size = 20) +
theme(panel.grid = element_blank(),
plot.subtitle = element_text(size = 14),
axis.title.y = element_text(margin = margin(r = 1,
unit = 'lines')),
axis.title.x = element_text(margin = margin(t = 1,
unit = 'lines')),
axis.text.y = element_text(colour = '#000000'),
axis.text.x = element_text(colour = '#000000'))
# Save
ggsave(filename = 'figures/supplement-1-H-employment.png',
plot = pp,
height = 8,
width = 8)
|
data <- read.csv('energy2lostData_all.csv')
data.pareto <- read.csv('energy2lostData_pareto.csv')
data$buffer.size <- factor(data$buffer.size)
data$buffer.lower <- factor(data$buffer.lower)
data$bitrate <- factor(data$bitrate, levels = c('2', '6', '10'))
data.pareto$bitrate <- factor(data.pareto$bitrate, levels = c('2', '6', '10'))
data <- subset(data, bitrate == 2)
data.pareto <- subset(data.pareto, bitrate == 2)
labeller.bitrate <- function(unit) {
# passed.unit <- substitute(unit)
# function(v) {
# #print(v)
# do.call(expression, lapply(levels(v)[v], function(v) {
# bquote(paste(.(v), ' ', .(passed.unit)))
# }))
# }
function(v) {
do.call(expression, lapply(levels(v['bitrate',]), function(vprime) {
bquote(paste(.(vprime), ' ', .(substitute(passed.unit))))
}))
}
}
p <- ggplot(data) +
geom_line(data = data.pareto, aes(x = wasted.data,
y = power)) +
geom_point(aes(x = wasted.data,
y = power,
color = buffer.lower,
shape = buffer.size)) +
#facet_grid(. ~ bitrate, scale = "free", labeller = labeller.bitrate((Mbit^-1))) +
labs(x = label.wasted.traffic,
y = label.energy,
color = label.buffer.lower,
shape = label.buffer.size) +
scale_color_manual(values = color.palette) +
guides(shape = guide_legend(order = 2), color=guide_legend(order = 1))
save.full.row.plot(p) | /figures/application/lte_video/trade_offs/energy2lostData_deutsch.R | no_license | cschwartz/dissertation | R | false | false | 1,438 | r | data <- read.csv('energy2lostData_all.csv')
data.pareto <- read.csv('energy2lostData_pareto.csv')
data$buffer.size <- factor(data$buffer.size)
data$buffer.lower <- factor(data$buffer.lower)
data$bitrate <- factor(data$bitrate, levels = c('2', '6', '10'))
data.pareto$bitrate <- factor(data.pareto$bitrate, levels = c('2', '6', '10'))
data <- subset(data, bitrate == 2)
data.pareto <- subset(data.pareto, bitrate == 2)
labeller.bitrate <- function(unit) {
# passed.unit <- substitute(unit)
# function(v) {
# #print(v)
# do.call(expression, lapply(levels(v)[v], function(v) {
# bquote(paste(.(v), ' ', .(passed.unit)))
# }))
# }
function(v) {
do.call(expression, lapply(levels(v['bitrate',]), function(vprime) {
bquote(paste(.(vprime), ' ', .(substitute(passed.unit))))
}))
}
}
p <- ggplot(data) +
geom_line(data = data.pareto, aes(x = wasted.data,
y = power)) +
geom_point(aes(x = wasted.data,
y = power,
color = buffer.lower,
shape = buffer.size)) +
#facet_grid(. ~ bitrate, scale = "free", labeller = labeller.bitrate((Mbit^-1))) +
labs(x = label.wasted.traffic,
y = label.energy,
color = label.buffer.lower,
shape = label.buffer.size) +
scale_color_manual(values = color.palette) +
guides(shape = guide_legend(order = 2), color=guide_legend(order = 1))
save.full.row.plot(p) |
# testing data set
sentences <- c("I am Sam.", "Sam I am.")
sentences <- clean_sentences (sentences)
# bigram testing
bigrams <- create_ngrams (sentences, 2)
expect_equal (bigrams [, sum (phrase_count)], 8)
expect_equal (bigrams [phrase == "^ i", phrase_count], 1)
expect_equal (bigrams [phrase == "i am", phrase_count], 2)
# unigram testing
unigrams <- create_ngrams (sentences, 1)
expect_equal (unigrams [, sum (phrase_count)], 10)
expect_equal (unigrams [phrase == "^", phrase_count], 2)
expect_equal (unigrams [phrase == "i", phrase_count], 2)
# contractions should be maintained
unigrams <- create_ngrams ("don't he'll they've she's", 1)
expect_equal (unigrams [, sum (phrase_count)], 4) | /tests/create-ngrams-tests.R | no_license | Hybrid-Rom/swiftkey-capstone | R | false | false | 698 | r |
# testing data set
sentences <- c("I am Sam.", "Sam I am.")
sentences <- clean_sentences (sentences)
# bigram testing
bigrams <- create_ngrams (sentences, 2)
expect_equal (bigrams [, sum (phrase_count)], 8)
expect_equal (bigrams [phrase == "^ i", phrase_count], 1)
expect_equal (bigrams [phrase == "i am", phrase_count], 2)
# unigram testing
unigrams <- create_ngrams (sentences, 1)
expect_equal (unigrams [, sum (phrase_count)], 10)
expect_equal (unigrams [phrase == "^", phrase_count], 2)
expect_equal (unigrams [phrase == "i", phrase_count], 2)
# contractions should be maintained
unigrams <- create_ngrams ("don't he'll they've she's", 1)
expect_equal (unigrams [, sum (phrase_count)], 4) |
install.packages("e1071")
install.packages("leaps")
install.packages("caret")
install.packages("randomForest")
library(e1071)
library(leaps)
library(caret)
library(randomForest)
get_metrics <- function(data) {
if (nrow(data) < 200) {
return("Not enough data to model")
} else {
data <- data[order(data$date, data$name, decreasing = FALSE), ]
count_products = length(unique(data$name))
test_range = c((nrow(data) - count_products*100 + 1):(nrow(data) - count_products))
# the testing range is roughly the last 2 years
predictions_linear <- NA
predictions_svm <- NA
predictions_rf <- NA
data$date <- NULL
data$name <- NULL
for (i in seq(from=min(test_range), to=max(test_range), by=(max(test_range) - min(test_range) + 1))) {
#Model trains only once. (Need to create very costly AWS instances!).
train <- data[1:(i-1), ]
test <- data[i:nrow(data), -which(colnames(data)=="weekly_change")]
b1.null <- lm(weekly_change~1, data=train)
b1.full <- lm(weekly_change ~., data=train)
b1.bs <- step(b1.full, scope=list(lower=b1.null, upper=b1.full), direction="backward")
predictions_linear <- c(predictions_linear, predict(b1.bs, newdata = test))
svm_tune <- tune(svm, weekly_change ~. , data = train, ranges = list(epsilon = c(0.0001, 0.01, 0.1, 1), cost = 1.5^(1:3)))
best_svm_model <- svm_tune$best.model
predictions_svm <- c(predictions_svm, predict(best_svm_model, newdata = test))
cvCtrl = trainControl(method = "repeatedcv",number = 10, repeats = 3, search = "grid")
newGrid = expand.grid(mtry = c(1,3))
classifierRandomForest <- train(weekly_change ~. , data=train, trControl = cvCtrl, method="rf", tuneGrid=newGrid)
curClassifier = classifierRandomForest
predictions_rf = c(predictions_rf, predict(curClassifier, newdata = test))
}
pred_linear <- predictions_linear[2:length(predictions_linear)]
pred_svm <- predictions_svm[2:length(predictions_svm)]
pred_rf <- predictions_rf[2:length(predictions_rf)]
ground_truth = data$weekly_change[(nrow(data) - length(pred_linear) + 1): nrow(data)]
metrics_linear <- t(as.data.frame(metrics(ground_truth, pred_linear)))
metrics_svm <- t(as.data.frame(metrics(ground_truth, pred_svm)))
metrics_rf <- t(as.data.frame(metrics(ground_truth, pred_rf)))
metrics_colnames <- c("Prediction_length", "RMSE", "dir_wrong_percnt", "corr_preds", "big_pos_percnt", "big_neg_percnt", "annual_return")
colnames(metrics_linear) <- metrics_colnames
colnames(metrics_svm) <- metrics_colnames
colnames(metrics_rf) <- metrics_colnames
combined_metrics <- rbind(metrics_linear, metrics_svm, metrics_rf)
for (i in 1:nrow(combined_metrics)) {
combined_metrics[i, 1] = (combined_metrics[i, 1])/count_products
}
return(combined_metrics)
}
}
commodities_rem <- c(3,6,7,8,11,14,19,20,21,23,24,25,26,27,28)
commoditiesone_rem <- c(3,6,7,8,11,14,19,20,21,23,24,25)
financial_rem <- (c(3,6,7,8,9,12,15,20,21,22,23,25,26,27,28,29,30))
financialone_rem <- c(3,6,7,8,9,12,15,20,21,22,23,25,26,27)
# preparing the data sets prior to modeling
currency_s1.1 <- currency_s1.1[, -financialone_rem]
currency_s2 <- currency_s2[, -financial_rem]
currency_s3 <- currency_s3[, -financial_rem]
currency_s4 <- currency_s4[, -financial_rem]
currency_s5.1 <- currency_s5.1[, -financialone_rem]
currency_s6.1 <- currency_s6.1[, -financialone_rem]
bonds_s1.1 <- bonds_s1.1[, -financialone_rem]
bonds_s2 <- bonds_s2[, -financial_rem]
bonds_s3 <- bonds_s3[, -financial_rem]
bonds_s4 <- bonds_s4[, -financial_rem]
bonds_s5.1 <- bonds_s5.1[, -financialone_rem]
bonds_s6.1 <- bonds_s6.1[, -financialone_rem]
US_stocks_s1.1 <- US_stocks_s1.1[, -financialone_rem]
US_stocks_s2 <- US_stocks_s2[, -financial_rem]
US_stocks_s3 <- US_stocks_s3[, -financial_rem]
US_stocks_s4 <- US_stocks_s4[, -financial_rem]
US_stocks_s5.1 <- US_stocks_s5.1[, -financialone_rem]
US_stocks_s6.1 <- US_stocks_s6.1[, -financialone_rem]
all_financials_s1.1 <- all_financials_s1.1[, -financialone_rem]
all_financials_s2 <- all_financials_s2[, -financial_rem]
all_financials_s3 <- all_financials_s3[, -financial_rem]
all_financials_s4 <- all_financials_s4[, -financial_rem]
all_financials_s5.1 <- all_financials_s5.1[, -financialone_rem]
all_financials_s6.1 <- all_financials_s6.1[, -financialone_rem]
grains_s1.1 <- grains_s1.1[, -commoditiesone_rem]
grains_s2 <- grains_s2[, -commodities_rem]
grains_s3 <- grains_s3[, -commodities_rem]
grains_s4 <- grains_s4[, -commodities_rem]
grains_s5.1 <- grains_s5.1[, -commoditiesone_rem]
grains_s6.1 <- grains_s6.1[, -commoditiesone_rem]
metals_s1.1 <- metals_s1.1[, -commoditiesone_rem]
metals_s2 <- metals_s2[, -commodities_rem]
metals_s3 <- metals_s3[, -commodities_rem]
metals_s4 <- metals_s4[, -commodities_rem]
metals_s5.1 <- metals_s5.1[, -commoditiesone_rem]
metals_s6.1 <- metals_s6.1[, -commoditiesone_rem]
meats_s1.1 <- meats_s1.1[, -commoditiesone_rem]
meats_s2 <- meats_s2[, -commodities_rem]
meats_s3 <- meats_s3[, -commodities_rem]
meats_s4 <- meats_s4[, -commodities_rem]
meats_s5.1 <- meats_s5.1[, -commoditiesone_rem]
meats_s6.1 <- meats_s6.1[, -commoditiesone_rem]
softs_s1.1 <- softs_s1.1[, -commoditiesone_rem]
softs_s2 <- softs_s2[, -commodities_rem]
softs_s3 <- softs_s3[, -commodities_rem]
softs_s4 <- softs_s4[, -commodities_rem]
softs_s5.1 <- softs_s5.1[, -commoditiesone_rem]
softs_s6.1 <- softs_s6.1[, -commoditiesone_rem]
energies_s1.1 <- energies_s1.1[, -commoditiesone_rem]
energies_s2 <- energies_s2[, -commodities_rem]
energies_s3 <- energies_s3[, -commodities_rem]
energies_s4 <- energies_s4[, -commodities_rem]
energies_s5.1 <- energies_s5.1[, -commoditiesone_rem]
energies_s6.1 <- energies_s6.1[, -commoditiesone_rem]
all_commodities_s1.1 <- all_commodities_s1.1[, -commoditiesone_rem]
all_commodities_s2 <- all_commodities_s2[, -commodities_rem]
all_commodities_s3 <- all_commodities_s3[, -commodities_rem]
all_commodities_s4 <- all_commodities_s4[, -commodities_rem]
all_commodities_s5.1 <- all_commodities_s5.1[, -commoditiesone_rem]
all_commodities_s6.1 <- all_commodities_s6.1[, -commoditiesone_rem]
# block 1
sfsefsef <- get_metrics(currency_s1.1)
owwsfds <- get_metrics(bonds_s1.1)
ewnfsgvfwssfc <- get_metrics(US_stocks_s1.1)
wdwfwsd <- get_metrics(all_financials_1.1)
sfsefsef <- get_metrics(all_commodities_s1.1)
| /SYS6018 Final Project/AWS - Grouped data set Models/Grouped data sets modeling code.R | no_license | alizaidia/SysProject | R | false | false | 6,692 | r | install.packages("e1071")
install.packages("leaps")
install.packages("caret")
install.packages("randomForest")
library(e1071)
library(leaps)
library(caret)
library(randomForest)
get_metrics <- function(data) {
if (nrow(data) < 200) {
return("Not enough data to model")
} else {
data <- data[order(data$date, data$name, decreasing = FALSE), ]
count_products = length(unique(data$name))
test_range = c((nrow(data) - count_products*100 + 1):(nrow(data) - count_products))
# the testing range is roughly the last 2 years
predictions_linear <- NA
predictions_svm <- NA
predictions_rf <- NA
data$date <- NULL
data$name <- NULL
for (i in seq(from=min(test_range), to=max(test_range), by=(max(test_range) - min(test_range) + 1))) {
#Model trains only once. (Need to create very costly AWS instances!).
train <- data[1:(i-1), ]
test <- data[i:nrow(data), -which(colnames(data)=="weekly_change")]
b1.null <- lm(weekly_change~1, data=train)
b1.full <- lm(weekly_change ~., data=train)
b1.bs <- step(b1.full, scope=list(lower=b1.null, upper=b1.full), direction="backward")
predictions_linear <- c(predictions_linear, predict(b1.bs, newdata = test))
svm_tune <- tune(svm, weekly_change ~. , data = train, ranges = list(epsilon = c(0.0001, 0.01, 0.1, 1), cost = 1.5^(1:3)))
best_svm_model <- svm_tune$best.model
predictions_svm <- c(predictions_svm, predict(best_svm_model, newdata = test))
cvCtrl = trainControl(method = "repeatedcv",number = 10, repeats = 3, search = "grid")
newGrid = expand.grid(mtry = c(1,3))
classifierRandomForest <- train(weekly_change ~. , data=train, trControl = cvCtrl, method="rf", tuneGrid=newGrid)
curClassifier = classifierRandomForest
predictions_rf = c(predictions_rf, predict(curClassifier, newdata = test))
}
pred_linear <- predictions_linear[2:length(predictions_linear)]
pred_svm <- predictions_svm[2:length(predictions_svm)]
pred_rf <- predictions_rf[2:length(predictions_rf)]
ground_truth = data$weekly_change[(nrow(data) - length(pred_linear) + 1): nrow(data)]
metrics_linear <- t(as.data.frame(metrics(ground_truth, pred_linear)))
metrics_svm <- t(as.data.frame(metrics(ground_truth, pred_svm)))
metrics_rf <- t(as.data.frame(metrics(ground_truth, pred_rf)))
metrics_colnames <- c("Prediction_length", "RMSE", "dir_wrong_percnt", "corr_preds", "big_pos_percnt", "big_neg_percnt", "annual_return")
colnames(metrics_linear) <- metrics_colnames
colnames(metrics_svm) <- metrics_colnames
colnames(metrics_rf) <- metrics_colnames
combined_metrics <- rbind(metrics_linear, metrics_svm, metrics_rf)
for (i in 1:nrow(combined_metrics)) {
combined_metrics[i, 1] = (combined_metrics[i, 1])/count_products
}
return(combined_metrics)
}
}
commodities_rem <- c(3,6,7,8,11,14,19,20,21,23,24,25,26,27,28)
commoditiesone_rem <- c(3,6,7,8,11,14,19,20,21,23,24,25)
financial_rem <- (c(3,6,7,8,9,12,15,20,21,22,23,25,26,27,28,29,30))
financialone_rem <- c(3,6,7,8,9,12,15,20,21,22,23,25,26,27)
# preparing the data sets prior to modeling
currency_s1.1 <- currency_s1.1[, -financialone_rem]
currency_s2 <- currency_s2[, -financial_rem]
currency_s3 <- currency_s3[, -financial_rem]
currency_s4 <- currency_s4[, -financial_rem]
currency_s5.1 <- currency_s5.1[, -financialone_rem]
currency_s6.1 <- currency_s6.1[, -financialone_rem]
bonds_s1.1 <- bonds_s1.1[, -financialone_rem]
bonds_s2 <- bonds_s2[, -financial_rem]
bonds_s3 <- bonds_s3[, -financial_rem]
bonds_s4 <- bonds_s4[, -financial_rem]
bonds_s5.1 <- bonds_s5.1[, -financialone_rem]
bonds_s6.1 <- bonds_s6.1[, -financialone_rem]
US_stocks_s1.1 <- US_stocks_s1.1[, -financialone_rem]
US_stocks_s2 <- US_stocks_s2[, -financial_rem]
US_stocks_s3 <- US_stocks_s3[, -financial_rem]
US_stocks_s4 <- US_stocks_s4[, -financial_rem]
US_stocks_s5.1 <- US_stocks_s5.1[, -financialone_rem]
US_stocks_s6.1 <- US_stocks_s6.1[, -financialone_rem]
all_financials_s1.1 <- all_financials_s1.1[, -financialone_rem]
all_financials_s2 <- all_financials_s2[, -financial_rem]
all_financials_s3 <- all_financials_s3[, -financial_rem]
all_financials_s4 <- all_financials_s4[, -financial_rem]
all_financials_s5.1 <- all_financials_s5.1[, -financialone_rem]
all_financials_s6.1 <- all_financials_s6.1[, -financialone_rem]
grains_s1.1 <- grains_s1.1[, -commoditiesone_rem]
grains_s2 <- grains_s2[, -commodities_rem]
grains_s3 <- grains_s3[, -commodities_rem]
grains_s4 <- grains_s4[, -commodities_rem]
grains_s5.1 <- grains_s5.1[, -commoditiesone_rem]
grains_s6.1 <- grains_s6.1[, -commoditiesone_rem]
metals_s1.1 <- metals_s1.1[, -commoditiesone_rem]
metals_s2 <- metals_s2[, -commodities_rem]
metals_s3 <- metals_s3[, -commodities_rem]
metals_s4 <- metals_s4[, -commodities_rem]
metals_s5.1 <- metals_s5.1[, -commoditiesone_rem]
metals_s6.1 <- metals_s6.1[, -commoditiesone_rem]
meats_s1.1 <- meats_s1.1[, -commoditiesone_rem]
meats_s2 <- meats_s2[, -commodities_rem]
meats_s3 <- meats_s3[, -commodities_rem]
meats_s4 <- meats_s4[, -commodities_rem]
meats_s5.1 <- meats_s5.1[, -commoditiesone_rem]
meats_s6.1 <- meats_s6.1[, -commoditiesone_rem]
softs_s1.1 <- softs_s1.1[, -commoditiesone_rem]
softs_s2 <- softs_s2[, -commodities_rem]
softs_s3 <- softs_s3[, -commodities_rem]
softs_s4 <- softs_s4[, -commodities_rem]
softs_s5.1 <- softs_s5.1[, -commoditiesone_rem]
softs_s6.1 <- softs_s6.1[, -commoditiesone_rem]
energies_s1.1 <- energies_s1.1[, -commoditiesone_rem]
energies_s2 <- energies_s2[, -commodities_rem]
energies_s3 <- energies_s3[, -commodities_rem]
energies_s4 <- energies_s4[, -commodities_rem]
energies_s5.1 <- energies_s5.1[, -commoditiesone_rem]
energies_s6.1 <- energies_s6.1[, -commoditiesone_rem]
all_commodities_s1.1 <- all_commodities_s1.1[, -commoditiesone_rem]
all_commodities_s2 <- all_commodities_s2[, -commodities_rem]
all_commodities_s3 <- all_commodities_s3[, -commodities_rem]
all_commodities_s4 <- all_commodities_s4[, -commodities_rem]
all_commodities_s5.1 <- all_commodities_s5.1[, -commoditiesone_rem]
all_commodities_s6.1 <- all_commodities_s6.1[, -commoditiesone_rem]
# block 1
sfsefsef <- get_metrics(currency_s1.1)
owwsfds <- get_metrics(bonds_s1.1)
ewnfsgvfwssfc <- get_metrics(US_stocks_s1.1)
wdwfwsd <- get_metrics(all_financials_1.1)
sfsefsef <- get_metrics(all_commodities_s1.1)
|
testlist <- list(data = structure(c(3.56011975237639e-305, 5.57359594480446e-308, 1.11503775174378e+45, 3.87206539118314e-310, 2.49230169981509e+35, 2.84878985323839e-306, 2.84830445732284e-306), .Dim = c(1L, 7L )), q = -3.38084311184731e+221)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) | /biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610554311-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 315 | r | testlist <- list(data = structure(c(3.56011975237639e-305, 5.57359594480446e-308, 1.11503775174378e+45, 3.87206539118314e-310, 2.49230169981509e+35, 2.84878985323839e-306, 2.84830445732284e-306), .Dim = c(1L, 7L )), q = -3.38084311184731e+221)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) |
#######################################################
### Solutions to week 1 homework
################################################
# Set working director to make it easier to load data
setwd('C:\\Users\\dbarron\\Dropbox\\Advanced Quant')
# Import data
peru <- read.csv('PeruMicroWeek1.csv')
# Review
str(peru)
# Note, has 9 variables, 31 observations
head(peru)
# Note, first three are integers, others numeric. regulated looks like a dummy variable
summary(peru)
## Means & standard deviations
# Simple way
with(peru, mean(avgloanbal))
with(peru, mean(femaleperc, na.rm = TRUE))
with(peru, sd(avgloanbal))
with(peru, sd(femaleperc, na.rm = TRUE))
# Using package dplyr
library(dplyr)
peru %>% summarise_all(funs(mean, sd), na.rm = TRUE)
# Histogram
hist(peru$avgloanbal, xlab = 'Average loan balance')
lapply(peru, hist)
# Scatter plot
plot(selfsuff ~ I(avgloanbal / 10000), data = peru, ylim = c(0.7, 1.8))
abline(lm(selfsuff ~ I(avgloanbal / 10000), data = peru))
## Using ggplot2
library(ggplot2)
ggplot(peru, aes(x = avgloanbal, y = selfsuff)) +
geom_point() +
geom_smooth(method = 'lm')
# Regression
lm1 <- lm(selfsuff ~ I(avgloanbal / 10000) + femaleperc, data = peru)
summary(lm1)
# avgloanbal is statistically significant
#
library(car)
car::vif(lm1)
qqPlot(lm1)
| /Lecture 1/Homework1.R | no_license | dnbarron/Advanced-Quant | R | false | false | 1,311 | r | #######################################################
### Solutions to week 1 homework
################################################
# Set working director to make it easier to load data
setwd('C:\\Users\\dbarron\\Dropbox\\Advanced Quant')
# Import data
peru <- read.csv('PeruMicroWeek1.csv')
# Review
str(peru)
# Note, has 9 variables, 31 observations
head(peru)
# Note, first three are integers, others numeric. regulated looks like a dummy variable
summary(peru)
## Means & standard deviations
# Simple way
with(peru, mean(avgloanbal))
with(peru, mean(femaleperc, na.rm = TRUE))
with(peru, sd(avgloanbal))
with(peru, sd(femaleperc, na.rm = TRUE))
# Using package dplyr
library(dplyr)
peru %>% summarise_all(funs(mean, sd), na.rm = TRUE)
# Histogram
hist(peru$avgloanbal, xlab = 'Average loan balance')
lapply(peru, hist)
# Scatter plot
plot(selfsuff ~ I(avgloanbal / 10000), data = peru, ylim = c(0.7, 1.8))
abline(lm(selfsuff ~ I(avgloanbal / 10000), data = peru))
## Using ggplot2
library(ggplot2)
ggplot(peru, aes(x = avgloanbal, y = selfsuff)) +
geom_point() +
geom_smooth(method = 'lm')
# Regression
lm1 <- lm(selfsuff ~ I(avgloanbal / 10000) + femaleperc, data = peru)
summary(lm1)
# avgloanbal is statistically significant
#
library(car)
car::vif(lm1)
qqPlot(lm1)
|
Arctic <- read.csv("/Users/corahoerstmann/Documents/AWI_ArcticFjords/Submission_CommBio/Supplementary/Table_S5_Trophic_annotation.csv", sep = ",")
SubArctic <- read.csv("/Users/corahoerstmann/Documents/AWI_ArcticFjords/Submission_CommBio/Supplementary/Table_S5_Subarctic.csv", sep = ",")
Temperate <- read.csv("/Users/corahoerstmann/Documents/AWI_ArcticFjords/Submission_CommBio/Supplementary/Table_S5_Trophic_annotation_Temperate.csv", sep = ",")
Trophic_annotation <- read.csv("/Users/corahoerstmann/Documents/AWI_ArcticFjords/Submission_CommBio/Supplementary/Arctic_fjords_taxonomy_network.csv", sep = ",")
Arctic$Tax_6 <- gsub("−", "-", Arctic$Tax_6)
SubArctic$Tax_6 <- gsub("−", "-", SubArctic$Tax_6)
Temperate$Tax_6 <- gsub("−", "-", Temperate$Tax_6)
Arctic <- dplyr::left_join(Arctic, Trophic_annotation, by = "Tax_6")
Arctic_stats <- Arctic%>%group_by(Trophy)%>%count()
Arctic_stats_eukprok <- Arctic%>%group_by(Tax_1)%>%count()
SubArctic <- dplyr::left_join(SubArctic, Trophic_annotation, by = "Tax_6")
SubArctic_stats <- SubArctic%>%group_by(Trophy)%>%count()
SubArctic_stats_eukprok <- SubArctic%>%group_by(Tax_1)%>%count()
Temperate <- dplyr::left_join(Temperate, Trophic_annotation, by = "Tax_6")
Temperate_stats <- Temperate%>%group_by(Trophy)%>%count()
Temperate_stats_eukprok <- Temperate%>%group_by(Tax_1)%>%count()
##only look at het components
het_anno <- c("het", "prelim het")
Arctic_het <- Arctic%>%filter(Trophy == het_anno)
Arctic_het_stats <- Arctic_het%>%group_by(Tax_1)%>%count()
Temperate_het <- Temperate%>%dplyr::filter(Trophy == het_anno)
Temperate_het_stats <- Temperate_het%>%group_by(Tax_1)%>%count()
##Temperate
# f__2(SAR86 clade) 3738 het prol
#f__5(Flavobacteriaceae) 2626 prelim het prok
#Tontoniidae_A 1841 mixo euk
#Dino-Group-II-Clade-2 1804 het euk
#Picozoa_XXX 1781 prelim unknown
#TAGIRI1-lineage 1613 het euk
#f__2(SAR86 clade) 41.72975 het prok
#Ascidiaceihabitans 37.97399 prelim het prok
#f__5(Flavobacteriaceae) 37.47814 het prok
#f__99(NA) 35.99627 het < Rhizaria euk
#f__8(AEGEAN-169 marine group) 35.77350 prelim het prok
#Pseudohongiella 34.36061 prelim het prok
#f__6(Arenicellaceae) 33.93672 prelim het prok
#Planktomarina 33.39229 prelim hhet prok
#Dino-Group-II-Clade-10-and-11 32.40841 het euk
#f__98(MAST-4) 32.25261 het euk
##Subarctic
#SAR92 prelim het
#f9 prelim het
#MAST-3L het
#f4 prelim het
#f11 prelim het
#NS5 prelim het
#Candidatus Actinomarina prelim het
##ARCTIC
#Marinoscillum prelim het
#f__1(Nitrincolaceae) prelim het
#Dictyochophyceae_XX auto
#Candidatus Aquiluna het
#f7 prelim het
#Sulfitobacter prelim het
#f__17(Kordiimonadales) prelim het
#Parmales_env_1 auto
#Ulvibacter het
#IS-44 prelim het (prok)
#SAR92 het
#Brevundimonas het (prok)
#Loktanella prelim het (prok)
| /superseded/network_stats.R | no_license | AGJohnAWI/ArcticPicos | R | false | false | 2,973 | r | Arctic <- read.csv("/Users/corahoerstmann/Documents/AWI_ArcticFjords/Submission_CommBio/Supplementary/Table_S5_Trophic_annotation.csv", sep = ",")
SubArctic <- read.csv("/Users/corahoerstmann/Documents/AWI_ArcticFjords/Submission_CommBio/Supplementary/Table_S5_Subarctic.csv", sep = ",")
Temperate <- read.csv("/Users/corahoerstmann/Documents/AWI_ArcticFjords/Submission_CommBio/Supplementary/Table_S5_Trophic_annotation_Temperate.csv", sep = ",")
Trophic_annotation <- read.csv("/Users/corahoerstmann/Documents/AWI_ArcticFjords/Submission_CommBio/Supplementary/Arctic_fjords_taxonomy_network.csv", sep = ",")
Arctic$Tax_6 <- gsub("−", "-", Arctic$Tax_6)
SubArctic$Tax_6 <- gsub("−", "-", SubArctic$Tax_6)
Temperate$Tax_6 <- gsub("−", "-", Temperate$Tax_6)
Arctic <- dplyr::left_join(Arctic, Trophic_annotation, by = "Tax_6")
Arctic_stats <- Arctic%>%group_by(Trophy)%>%count()
Arctic_stats_eukprok <- Arctic%>%group_by(Tax_1)%>%count()
SubArctic <- dplyr::left_join(SubArctic, Trophic_annotation, by = "Tax_6")
SubArctic_stats <- SubArctic%>%group_by(Trophy)%>%count()
SubArctic_stats_eukprok <- SubArctic%>%group_by(Tax_1)%>%count()
Temperate <- dplyr::left_join(Temperate, Trophic_annotation, by = "Tax_6")
Temperate_stats <- Temperate%>%group_by(Trophy)%>%count()
Temperate_stats_eukprok <- Temperate%>%group_by(Tax_1)%>%count()
##only look at het components
het_anno <- c("het", "prelim het")
Arctic_het <- Arctic%>%filter(Trophy == het_anno)
Arctic_het_stats <- Arctic_het%>%group_by(Tax_1)%>%count()
Temperate_het <- Temperate%>%dplyr::filter(Trophy == het_anno)
Temperate_het_stats <- Temperate_het%>%group_by(Tax_1)%>%count()
##Temperate
# f__2(SAR86 clade) 3738 het prol
#f__5(Flavobacteriaceae) 2626 prelim het prok
#Tontoniidae_A 1841 mixo euk
#Dino-Group-II-Clade-2 1804 het euk
#Picozoa_XXX 1781 prelim unknown
#TAGIRI1-lineage 1613 het euk
#f__2(SAR86 clade) 41.72975 het prok
#Ascidiaceihabitans 37.97399 prelim het prok
#f__5(Flavobacteriaceae) 37.47814 het prok
#f__99(NA) 35.99627 het < Rhizaria euk
#f__8(AEGEAN-169 marine group) 35.77350 prelim het prok
#Pseudohongiella 34.36061 prelim het prok
#f__6(Arenicellaceae) 33.93672 prelim het prok
#Planktomarina 33.39229 prelim hhet prok
#Dino-Group-II-Clade-10-and-11 32.40841 het euk
#f__98(MAST-4) 32.25261 het euk
##Subarctic
#SAR92 prelim het
#f9 prelim het
#MAST-3L het
#f4 prelim het
#f11 prelim het
#NS5 prelim het
#Candidatus Actinomarina prelim het
##ARCTIC
#Marinoscillum prelim het
#f__1(Nitrincolaceae) prelim het
#Dictyochophyceae_XX auto
#Candidatus Aquiluna het
#f7 prelim het
#Sulfitobacter prelim het
#f__17(Kordiimonadales) prelim het
#Parmales_env_1 auto
#Ulvibacter het
#IS-44 prelim het (prok)
#SAR92 het
#Brevundimonas het (prok)
#Loktanella prelim het (prok)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utf8.R
\name{utf8_substr}
\alias{utf8_substr}
\title{Substring of an UTF-8 string}
\usage{
utf8_substr(x, start, stop)
}
\arguments{
\item{x}{Character vector.}
\item{start}{Starting index or indices, recycled to match the length
of \code{x}.}
\item{stop}{Ending index or indices, recycled to match the length of
\code{x}.}
}
\value{
Character vector of the same length as \code{x}, containing
the requested substrings.
}
\description{
This function uses grapheme clusters instaed of Unicode code points in
UTF-8 strings.
}
\examples{
# Five grapheme clusters, select the middle three
str <- paste0(
"\U0001f477\U0001f3ff\u200d\u2640\ufe0f",
"\U0001f477\U0001f3ff",
"\U0001f477\u200d\u2640\ufe0f",
"\U0001f477\U0001f3fb",
"\U0001f477\U0001f3ff")
cat(str)
str24 <- utf8_substr(str, 2, 4)
cat(str24)
}
\seealso{
Other UTF-8 string manipulation:
\code{\link{utf8_graphemes}()},
\code{\link{utf8_nchar}()}
}
\concept{UTF-8 string manipulation}
| /man/utf8_substr.Rd | permissive | isabella232/cli-12 | R | false | true | 1,031 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utf8.R
\name{utf8_substr}
\alias{utf8_substr}
\title{Substring of an UTF-8 string}
\usage{
utf8_substr(x, start, stop)
}
\arguments{
\item{x}{Character vector.}
\item{start}{Starting index or indices, recycled to match the length
of \code{x}.}
\item{stop}{Ending index or indices, recycled to match the length of
\code{x}.}
}
\value{
Character vector of the same length as \code{x}, containing
the requested substrings.
}
\description{
This function uses grapheme clusters instaed of Unicode code points in
UTF-8 strings.
}
\examples{
# Five grapheme clusters, select the middle three
str <- paste0(
"\U0001f477\U0001f3ff\u200d\u2640\ufe0f",
"\U0001f477\U0001f3ff",
"\U0001f477\u200d\u2640\ufe0f",
"\U0001f477\U0001f3fb",
"\U0001f477\U0001f3ff")
cat(str)
str24 <- utf8_substr(str, 2, 4)
cat(str24)
}
\seealso{
Other UTF-8 string manipulation:
\code{\link{utf8_graphemes}()},
\code{\link{utf8_nchar}()}
}
\concept{UTF-8 string manipulation}
|
## Functions that enable caching of inverse of a matrix
## Create a special matrix object, with ability to cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setSolve <- function(solve) m <<- solve
getSolve <- function() m
list (set = set, get = get,
setSolve = setSolve,
getSolve = getSolve)
}
## If inverse already exists, return it.
## else calculate inverse, set and return it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m<- x$getSolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# get the data
data <- x$get()
# Find the inverse
message("solving inverse of matrix")
m <- solve(data, ...)
# Set value of x
x$setSolve(m)
# return
m
}
| /cachematrix.R | no_license | abhinav5/ProgrammingAssignment2 | R | false | false | 854 | r | ## Functions that enable caching of inverse of a matrix
## Create a special matrix object, with ability to cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setSolve <- function(solve) m <<- solve
getSolve <- function() m
list (set = set, get = get,
setSolve = setSolve,
getSolve = getSolve)
}
## If inverse already exists, return it.
## else calculate inverse, set and return it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m<- x$getSolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# get the data
data <- x$get()
# Find the inverse
message("solving inverse of matrix")
m <- solve(data, ...)
# Set value of x
x$setSolve(m)
# return
m
}
|
#SGN
#K-Means Clustering Project
#SGN
rm(list = ls())
##############################################################################################
###### Build an analytical model to create clusters of airline travellers ####################
##############################################################################################
#---------------------Step1: Loading the Data in R
Path<-setwd("C:/Ganesha_Accenture/Ganesha_IVY/R/20180112-20180114 Ivy Data Science Hackshop/03 CLUSTERING/CASE STUDY1/02DATA")
airlines<-read.csv("AirlinesCluster.csv", header = TRUE, as.is = TRUE, na.strings = c(""))
#Understand the data type and summary of each coloumn
str(airlines)
summary(airlines)
#Checking missing values
as.data.frame(colSums(is.na(airlines)))
#Normalizing the Data for clustering
library(caret)
preproc<-preProcess(airlines)
airlinesNorm<-predict(preproc,airlines)
summary(airlinesNorm)
#Hiearchical Clustering
distan<-dist(airlinesNorm, method = "euclidean")
ClusterAirline<-hclust(distan, method = "ward.D")
plot(ClusterAirline)
#Assigning points to the clusters
AirlineCluster<-cutree(ClusterAirline, k = 5)
table(AirlineCluster)
#Computing the average values of the cluster groups
MeanComp<-function(var, clustergrp, meas){
z<-tapply(var, clustergrp, meas)
print(z)
}
Bal_mean<-MeanComp(airlines$Balance, AirlineCluster, mean)
Bal_DaysSinceEnroll<-MeanComp(airlines$DaysSinceEnroll, AirlineCluster, mean)
#Appending the Clusters Assignment
Airlines_H<-data.frame(airlines,AirlineCluster)
write.csv(Airlines_H,"Airlines_Hierarchical.csv", row.names = FALSE)
#k-Means Clustersing
set.seed(88)
k<-5
AirlineCluster_K<-kmeans(airlinesNorm, centers = k,iter.max = 1000)
table(AirlineCluster_K$cluster)
AirlineCluster_K$centers
#Finding out the Mean Values of the Variables in the Clusters
Bal_mean_k<-aggregate(airlines, by=list(cluster=AirlineCluster_K$cluster), mean)
Bal_mean<-MeanComp(airlines$Balance, AirlineCluster, mean)
#Appending the Clusters Assignment
airlines_new_k <- data.frame(airlines, AirlineCluster_K$cluster)
write.csv(airlines_new_k,"Airlines_k-Means.csv", row.names = FALSE)
| /Hackshop/clustering/Clustering_R_V1_AIRLINES.R | no_license | apoorvakarn/R-in-Action | R | false | false | 2,139 | r | #SGN
#K-Means Clustering Project
#SGN
rm(list = ls())
##############################################################################################
###### Build an analytical model to create clusters of airline travellers ####################
##############################################################################################
#---------------------Step1: Loading the Data in R
Path<-setwd("C:/Ganesha_Accenture/Ganesha_IVY/R/20180112-20180114 Ivy Data Science Hackshop/03 CLUSTERING/CASE STUDY1/02DATA")
airlines<-read.csv("AirlinesCluster.csv", header = TRUE, as.is = TRUE, na.strings = c(""))
#Understand the data type and summary of each coloumn
str(airlines)
summary(airlines)
#Checking missing values
as.data.frame(colSums(is.na(airlines)))
#Normalizing the Data for clustering
library(caret)
preproc<-preProcess(airlines)
airlinesNorm<-predict(preproc,airlines)
summary(airlinesNorm)
#Hiearchical Clustering
distan<-dist(airlinesNorm, method = "euclidean")
ClusterAirline<-hclust(distan, method = "ward.D")
plot(ClusterAirline)
#Assigning points to the clusters
AirlineCluster<-cutree(ClusterAirline, k = 5)
table(AirlineCluster)
#Computing the average values of the cluster groups
MeanComp<-function(var, clustergrp, meas){
z<-tapply(var, clustergrp, meas)
print(z)
}
Bal_mean<-MeanComp(airlines$Balance, AirlineCluster, mean)
Bal_DaysSinceEnroll<-MeanComp(airlines$DaysSinceEnroll, AirlineCluster, mean)
#Appending the Clusters Assignment
Airlines_H<-data.frame(airlines,AirlineCluster)
write.csv(Airlines_H,"Airlines_Hierarchical.csv", row.names = FALSE)
#k-Means Clustersing
set.seed(88)
k<-5
AirlineCluster_K<-kmeans(airlinesNorm, centers = k,iter.max = 1000)
table(AirlineCluster_K$cluster)
AirlineCluster_K$centers
#Finding out the Mean Values of the Variables in the Clusters
Bal_mean_k<-aggregate(airlines, by=list(cluster=AirlineCluster_K$cluster), mean)
Bal_mean<-MeanComp(airlines$Balance, AirlineCluster, mean)
#Appending the Clusters Assignment
airlines_new_k <- data.frame(airlines, AirlineCluster_K$cluster)
write.csv(airlines_new_k,"Airlines_k-Means.csv", row.names = FALSE)
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(shinydashboard)
library(leaflet)
library(leaflet.extras)
library(leaflet.minicharts)
library(DT)
library(sp)
library(sf)
library(rgdal)
library(rgeos)
library(dplyr)
library(stplanr)
library(RcppArmadillo)
library(mapview)
library(rmapshaper)
library(doParallel)
ui <- (dashboardPage(
dashboardHeader(title = "Albatross4 Viewer"
),
dashboardSidebar(
sidebarMenu(
menuItem(
"Data Preparation",
tabName = "data",
icon = icon("database")
# menuSubItem("Watersheds", tabName = "m_water", icon = icon("map")),
# menuSubItem("Population", tabName = "m_pop", icon = icon("map"))
),
menuItem(
"View Household",
tabName = "household",
icon = icon("users",lib = "font-awesome"),
menuSubItem("Map", tabName = "map-household", icon = icon("globe")),
# menuSubItem("Graph", tabName = "graph-household", icon = icon("signal")),
menuSubItem("List", tabName = "list-household", icon = icon("table",lib="font-awesome"))
),
menuItem(
"View Schedule",
tabName = "schedule",
icon = icon("list-ol", lib = "font-awesome"),
menuSubItem("Map", tabName = "map-schedule", icon = icon("globe")),
# menuSubItem("Graph", tabName = "graph-schedule", icon = icon("signal")),
menuSubItem("List", tabName = "list-schedule", icon = icon("table",lib="font-awesome"))
),
menuItem(
"Summary Statistics",
tabName = "statistics",
icon = icon("stats",lib = "glyphicon"),
menuSubItem("Map", tabName = "map-summary", icon = icon("globe"))
),
# menuItem(
# "Animated map",
# tabName = "animate",
# icon = icon("facetime-video",lib = "glyphicon")
# ),
menuItem(
"About",
tabName = "about",
icon = icon("envelope",lib = "glyphicon")
)
)
),
dashboardBody(
tags$head(tags$style(type="text/css", "
#loadmessage {
border: 16px solid #f3f3f3; /* Light grey */
border-radius: 50%;
border-top: 16px solid #3c8dbc; /* Blue */
width: 100px;
height: 100px;
-webkit-animation: spin 2s linear infinite;
margin: auto;
margin-left: auto;
position: relative;
z-index: 100;
top: 350px;
left: 600px;
}
@keyframes spin {
` 0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
")),
absolutePanel(
conditionalPanel(condition="$('html').hasClass('shiny-busy')",
tags$div(id="loadmessage"))
),
tabItems(
tabItem(tabName = "data",
tabsetPanel(type = "tabs",
tabPanel("Overview",
img(src="Albatross.jpg", align = "left", width = "50%", height = "50%")
),
tabPanel("Household",
sidebarLayout(
sidebarPanel(
# Input: Select a file ----
fileInput("household", "Choose household data (less than 500MB)",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Input: Select separator ----
radioButtons("householdsep", "Separator",
choices = c(Comma = ",",
Semicolon = ";",
Tab = "\t"),
selected = ","),
# Input: Select quotes ----
radioButtons("householdquote", "Quote",
choices = c(None = "",
"Double Quote" = '"',
"Single Quote" = "'"),
selected = '"'),
# Input: Checkbox if file has header ----
checkboxInput("householdheader", "Header", TRUE),
# Input: Select number of rows to display ----
radioButtons("householddisp", "Display",
choices = c(Head = "head",
All = "all"),
selected = "head")
),
mainPanel(
tableOutput("previewHousehold"),
h3(textOutput("previewHouseholdTotal")),
width = 12
)
)
),
tabPanel("Household coordinates",
sidebarLayout(
sidebarPanel(
# Input: Select a file ----
fileInput("hhcoords", "Choose household coordinates data (less than 500MB)",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Input: Select separator ----
radioButtons("hhcoordssep", "Separator",
choices = c(Comma = ",",
Semicolon = ";",
Tab = "\t"),
selected = ","),
# Input: Select quotes ----
radioButtons("hhcoordsquote", "Quote",
choices = c(None = "",
"Double Quote" = '"',
"Single Quote" = "'"),
selected = '"'),
# Input: Checkbox if file has header ----
checkboxInput("hhcoordsheader", "Header", TRUE),
# Input: Select number of rows to display ----
radioButtons("hhcoordsdisp", "Display",
choices = c(Head = "head",
All = "all"),
selected = "head")
),
mainPanel(
)
)
),
tabPanel("Schedule",
sidebarLayout(
sidebarPanel(
# Input: Select a file ----
fileInput("schedule", "Choose schedule data (less than 500MB)",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Input: Select separator ----
radioButtons("schedulesep", "Separator",
choices = c(Comma = ",",
Semicolon = ";",
Tab = "\t"),
selected = ","),
# Input: Select quotes ----
radioButtons("schedulequote", "Quote",
choices = c(None = "",
"Double Quote" = '"',
"Single Quote" = "'"),
selected = '"'),
# Input: Checkbox if file has header ----
checkboxInput("scheduleheader", "Header", TRUE),
# Input: Select number of rows to display ----
radioButtons("scheduledisp", "Display",
choices = c(Head = "head",
All = "all"),
selected = "head")
),
mainPanel(
tableOutput("previewSchedule"),
h3(textOutput("previewScheduleTotal")),
width = 12
)
)
),
tabPanel("Schedule coordinates",
sidebarLayout(
sidebarPanel(
# Input: Select a file ----
fileInput("scheduleCoords", "Choose schedule coordinates data (less than 500MB)",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Input: Select separator ----
radioButtons("scheduleCoordssep", "Separator",
choices = c(Comma = ",",
Semicolon = ";",
Tab = "\t"),
selected = ","),
# Input: Select quotes ----
radioButtons("scheduleCoordsquote", "Quote",
choices = c(None = "",
"Double Quote" = '"',
"Single Quote" = "'"),
selected = '"'),
# Input: Checkbox if file has header ----
checkboxInput("scheduleCoordsheader", "Header", TRUE),
# Input: Select number of rows to display ----
radioButtons("scheduleCoordsdisp", "Display",
choices = c(Head = "head",
All = "all"),
selected = "head")
),
mainPanel(
tableOutput("previewScheduleCoords"),
h3(textOutput("previewScheduleTotalCoords")),
width = 12
)
)
),
tabPanel("Shape files - PC4",
sidebarLayout(
sidebarPanel(
# Input: Select a file ----
fileInput("shpFilePc4", "Choose shape files (.prj, .shp, .shx, .dbf)",
multiple = TRUE,
accept = c(".prj",
".shp",
".shx",
".dbf"),
placeholder = "4 files"),
helpText("Note: Multiple files should be uploaded at once.")
),
mainPanel(
h3("")
)
)
)
)
),
tabItem(tabName = "list-household",
fluidPage(div(DT::dataTableOutput("householdTotal"),
style = "font-size: 100%; width: 100%"))
),
tabItem(tabName = "list-schedule",
tabsetPanel(type = "tabs",
tabPanel("Schedule",
fluidRow(
column(width = 12,
DT::dataTableOutput("scheduleTotal")
)
)
),
tabPanel("O-D flow",
fluidRow(
column(width = 7,
box(width = 12,
DT::dataTableOutput("scheduleod")
),
downloadButton("downloadDTsched", "Download")
),
column(width = 5,
verticalLayout(
box(width = 12,
verticalLayout(
fluidRow(
column(width = 12,
box(width = 6, title = "Activity type",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("listschedact",label = NULL,
choices = list("Home" = "Home",
"Work" = "Work",
"Business" = "Business",
"Bring/Get" = "BringGet",
"Groceries" = "Groceries",
"Non-Daily Shopping" = "NonGroc",
"Services" = "Services",
"Social" = "Social",
"Leisure" = "Leisure",
"Touring" = "Touring",
"Other" = "Other"
),
selected = c("Home","Work","Business","BringGet",
"Groceries","NonGroc","Services",
"Social","Leisure","Touring","Other"))),
box(width = 6, title = "Transport mode",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("listschedmode",label = NULL,
choices = list("Staying Home" = "Missing",
"Car" = "Car",
"Car as Passenger" = "Car as Passenger",
"Public Transport" = "Public Transport",
"Walking or Biking" = "Walking or Biking"
),
selected = c("Missing","Car","Car as Passenger",
"Public Transport","Walking or Biking")))
)
),
box(width = 12, title = "Time of day",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
sliderInput("listschedtime", label = NULL, min =180 , max = 1620, value = c(180,1620))
),
actionButton("submitscheduleod", "Submit")
# submitButton("Submit")
)
)
)
)
)
)
)
),
tabItem(tabName = "map-household",
fluidRow(
column(width = 8,
box(width = 12,leafletOutput("maphh",width = "100%", height = 750))
),
# box(width = 12, title = "Number of households to query",status = "primary",
# solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
# fluidRow(width = 12,
# column(width = 7,
# checkboxInput("maphhshow", label = "Show only specified", value = FALSE)
# ),
# column(width = 5,
# numericInput("maphhnum", label = NULL, value = 10)
# )
# )
# ),
column(width = 4,
verticalLayout(
box(width = 12, title = "Household information",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
verticalLayout(
fluidRow(
column(width = 12,
verbatimTextOutput("clickedhhId")
),
column(width = 12,
dataTableOutput("clickedhhTable")
)
)
)
)
)
),
actionButton("submitmaphh", "Submit")
# submitButton("Submit")
)
),
tabItem(tabName = "map-schedule",
tabsetPanel(type = "tabs",
tabPanel("Overview"
),
tabPanel("Activity location",
fluidRow(
column(width = 8,
box(width = 12,leafletOutput("mapactloc",width = "100%", height = 750))
),
column(width = 4,
verticalLayout(
box(width = 12,title = "Data filter",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
verticalLayout(
fluidRow(
column(width = 12,
box(width = 6, title = "Activity type",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("mapactlocact",label = NULL,
choices = list("Home" = "Home",
"Work" = "Work",
"Business" = "Business",
"Bring/Get" = "BringGet",
"Groceries" = "Groceries",
"Non-Daily Shopping" = "NonGroc",
"Services" = "Services",
"Social" = "Social",
"Leisure" = "Leisure",
"Touring" = "Touring",
"Other" = "Other"
),
selected = NULL)),
box(width = 6, title = "Transport mode",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("mapactlocmode",label = NULL,
choices = list("Staying Home" = "Missing",
"Car" = "Car",
"Car as Passenger" = "Car as Passenger",
"Public Transport" = "Public Transport",
"Walking or Biking" = "Walking or Biking"
),
selected = NULL))
)
),
box(width = 12, title = "Charging",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
checkboxGroupInput("mapactloccharging", label = NULL,
choices = list("No charging" = "NoCharging",
"Private charging" = "PrivateCharging",
"Public charging" = "PublicCharging",
"Semi-public charging" = "SemiPublicCharging",
"Fast charging" = "FastCharging"
),
selected = c("NoCharging","PrivateCharging", "PublicCharging", "SemiPublicCharging", "FastCharging"))
),
box(width = 12, title = "Time of day",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
sliderInput("mapactloctime", label = NULL, min =180 , max = 1620, value = c(180,1620))
),
actionButton("submitmapactloc", "Submit")
# submitButton("Submit")
)
),
box(width = 12,title = "Activity information",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
verticalLayout(
fluidRow(
column(width = 12,
verbatimTextOutput("clickedactlocId")
),
column(width = 12,
dataTableOutput("clickedactlocTable")
)
)
)
)
)
)
)
),
tabPanel("O-D flow(PC4)",
fluidRow(
column(width = 8,
box(width = 12,leafletOutput("mapodflow",width = "100%", height = 750))
),
column(width = 4,
box(width = 12,title = "Search by O-D pair ID",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
# Input: Specify the trip id to view ----
numericInput("odid", label = NULL, value = 1),
actionButton("update", "Search")
),
hr(),
verticalLayout(
box(width = 12,title = "Data filter",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
verticalLayout(
fluidRow(
column(width = 12,
box(width = 6, title = "Activity type",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("mapodflowact",label = NULL,
choices = list("Home" = "Home",
"Work" = "Work",
"Business" = "Business",
"Bring/Get" = "BringGet",
"Groceries" = "Groceries",
"Non-Daily Shopping" = "NonGroc",
"Services" = "Services",
"Social" = "Social",
"Leisure" = "Leisure",
"Touring" = "Touring",
"Other" = "Other"
),
selected = NULL)),
box(width = 6, title = "Transport mode",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("mapodflowmode",label = NULL,
choices = list("Staying Home" = "Missing",
"Car" = "Car",
"Car as Passenger" = "Car as Passenger",
"Public Transport" = "Public Transport",
"Walking or Biking" = "Walking or Biking"
),
selected = NULL))
)
),
box(width = 12, title = "Charging",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
checkboxGroupInput("mapodflowcharging", label = NULL,
choices = list("No charging" = "NoCharging",
"Private charging" = "PrivateCharging",
"Public charging" = "PublicCharging",
"Semi-public charging" = "SemiPublicCharging",
"Fast charging" = "FastCharging"
),
selected = c("NoCharging","PrivateCharging", "PublicCharging", "SemiPublicCharging", "FastCharging"))
),
box(width = 12, title = "Time of day",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
sliderInput("mapodflowtime", label = NULL, min =180 , max = 1620, value = c(180,1620))
),
box(width = 12, title = "Number of O-D pairs to query",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
fluidRow(width = 12,
helpText("Note: O-D pairs are sorted largest to smallest by n.trips"),
column(width = 7,
checkboxInput("mapodflowshow", label = "Show only specified", value = FALSE)
),
column(width = 5,
numericInput("mapodflownum", label = NULL, value = 10)
)
)
),
actionButton("submitmapodflow", "Submit")
# submitButton("Submit")
)
)
),
box(width = 12, title = "Option",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
downloadButton("dlmapodflow", label = "Export map")
)
)
)
),
# tabPanel("O-D flow(Animated)",
# fluidRow(
# column(width = 8,
# box(width = 12,leafletOutput("mapodflowanim",width = "100%", height = 750))
# ),
# column(width = 4, actionButton("submitmapodflowanim", "Submit")
#
# )
# )
#
# ),
tabPanel("Route-Individual(PC4)",
fluidRow(
column(width = 8,
box(width = 12,leafletOutput("maprouteind",width = "100%", height = 750))
),
column(width = 4,
verticalLayout(
box(width = 12,title = "Data filter",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
verticalLayout(
fluidRow(
column(width = 12,
box(width = 6, title = "Activity type",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("maprouteindact",label = NULL,
choices = list("Home" = "Home",
"Work" = "Work",
"Business" = "Business",
"Bring/Get" = "BringGet",
"Groceries" = "Groceries",
"Non-Daily Shopping" = "NonGroc",
"Services" = "Services",
"Social" = "Social",
"Leisure" = "Leisure",
"Touring" = "Touring",
"Other" = "Other"
),
selected = NULL)),
box(width = 6, title = "Transport mode",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
radioButtons("maprouteindmode",label = NULL,
choices = list("All" = "All",
"Car" = "Car",
"Car as Passenger" = "Car as Passenger",
"Public Transport" = "Public Transport",
"Walking or Biking" = "Walking or Biking"
),
selected = "All"))
)
),
box(width = 12, title = "Charging",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
checkboxGroupInput("maprouteindcharging", label = NULL,
choices = list("No charging" = "NoCharging",
"Private charging" = "PrivateCharging",
"Public charging" = "PublicCharging",
"Semi-public charging" = "SemiPublicCharging",
"Fast charging" = "FastCharging"
),
selected = c("NoCharging","PrivateCharging", "PublicCharging", "SemiPublicCharging", "FastCharging"))
),
box(width = 12, title = "Time of day",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
sliderInput("maprouteindtime", label = NULL, min =180 , max = 1620, value = 800)
),
box(width = 12, title = "Number of O-D pairs to route",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
fluidRow(width = 12,
helpText("Note: O-D pairs are sorted largest to smallest by n.trips"),
column(width = 7,
checkboxInput("maprouteindshow", label = "Show only specified", value = TRUE)
),
column(width = 5,
numericInput("maprouteindnum", label = NULL, value = 5)
)
)
),
actionButton("submitmaprouteind", "Submit")
# submitButton("Submit")
)
)
),
box(width = 12, title = "Route information",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
verticalLayout(
fluidRow(
column(width = 12,
verbatimTextOutput("clickedrouteindId")
),
column(width = 12,
dataTableOutput("clickedrouteindTable")
)
)
)
),
box(width = 12, title = "Option",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
downloadButton("dlmaprouteind", label = "Export map")
)
)
)
),
tabPanel("Route-Individual(PC6)",
fluidRow(
column(width = 8,
box(width = 12,leafletOutput("maprouteindpc6",width = "100%", height = 750))
),
column(width = 4,
verticalLayout(
box(width = 12,title = "Data filter",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
verticalLayout(
fluidRow(
column(width = 12,
box(width = 6, title = "Activity type",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("maprouteindpc6act",label = NULL,
choices = list("Home" = "Home",
"Work" = "Work",
"Business" = "Business",
"Bring/Get" = "BringGet",
"Groceries" = "Groceries",
"Non-Daily Shopping" = "NonGroc",
"Services" = "Services",
"Social" = "Social",
"Leisure" = "Leisure",
"Touring" = "Touring",
"Other" = "Other"
),
selected = NULL)),
box(width = 6, title = "Transport mode",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
radioButtons("maprouteindpc6mode",label = NULL,
choices = list("All" = "All",
"Car" = "Car",
"Car as Passenger" = "Car as Passenger",
"Public Transport" = "Public Transport",
"Walking or Biking" = "Walking or Biking"
),
selected = "All"))
)
),
box(width = 12, title = "Charging",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
checkboxGroupInput("maprouteindpc6charging", label = NULL,
choices = list("No charging" = "NoCharging",
"Private charging" = "PrivateCharging",
"Public charging" = "PublicCharging",
"Semi-public charging" = "SemiPublicCharging",
"Fast charging" = "FastCharging"
),
selected = c("NoCharging","PrivateCharging", "PublicCharging", "SemiPublicCharging", "FastCharging"))
),
box(width = 12, title = "Time of day",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
sliderInput("maprouteindtimepc6", label = NULL, min =180 , max = 1620, value = 800)
),
box(width = 12, title = "Number of trips to route",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
fluidRow(width = 12,
column(width = 7,
checkboxInput("maprouteindpc6show", label = "Show only specified", value = TRUE)
),
column(width = 5,
numericInput("maprouteindpc6num", label = NULL, value = 5)
)
)
),
actionButton("submitmaprouteindpc6", "Submit")
# submitButton("Submit")
)
)
),
box(width = 12, title = "Route information",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
verticalLayout(
fluidRow(
column(width = 12,
verbatimTextOutput("clickedrouteindIdpc6")
),
column(width = 12,
dataTableOutput("clickedrouteindTablepc6")
)
)
)
),
box(width = 12, title = "Option",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
downloadButton("dlmaprouteindpc6", label = "Export map")
)
)
)
),
tabPanel("Route-Aggregated(PC4)",
fluidRow(
column(width = 8,
box(width = 12,leafletOutput("maprouteagg",width = "100%", height = 750))
),
column(width = 4,
verticalLayout(
box(width = 12,title = "Data filter",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
verticalLayout(
fluidRow(
column(width = 12,
box(width = 6, title = "Activity type",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("maprouteaggact",label = NULL,
choices = list("Home" = "Home",
"Work" = "Work",
"Business" = "Business",
"Bring/Get" = "BringGet",
"Groceries" = "Groceries",
"Non-Daily Shopping" = "NonGroc",
"Services" = "Services",
"Social" = "Social",
"Leisure" = "Leisure",
"Touring" = "Touring",
"Other" = "Other"
),
selected = NULL)),
box(width = 6, title = "Transport mode",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
radioButtons("maprouteaggmode",label = NULL,
choices = list("All" = "All",
"Car" = "Car",
"Car as Passenger" = "Car as Passenger",
"Public Transport" = "Public Transport",
"Walking or Biking" = "Walking or Biking"
),
selected = "All"))
)
),
box(width = 12, title = "Charging",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
checkboxGroupInput("maprouteaggcharging", label = NULL,
choices = list("No charging" = "NoCharging",
"Private charging" = "PrivateCharging",
"Public charging" = "PublicCharging",
"Semi-public charging" = "SemiPublicCharging",
"Fast charging" = "FastCharging"
),
selected = c("NoCharging","PrivateCharging", "PublicCharging", "SemiPublicCharging", "FastCharging"))
),
box(width = 12, title = "Time of day",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
sliderInput("maprouteaggtime", label = NULL, min =180 , max = 1620, value = 800)
),
box(width = 12, title = "Number of O-D pairs to route",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
fluidRow(width = 12,
helpText("Note: O-D pairs are sorted largest to smallest by n.trips"),
column(width = 7,
checkboxInput("maprouteaggshow", label = "Show only specified", value = TRUE)
),
column(width = 5,
numericInput("maprouteaggnum", label = NULL, value = 5)
)
)
),
actionButton("submitmaprouteagg", "Submit")
# submitButton("Submit")
)
)
),
box(width = 12, title = "Route information",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
verticalLayout(
fluidRow(
column(width = 12,
verbatimTextOutput("clickedrouteaggId")
),
column(width = 12,
dataTableOutput("clickedrouteaggTable")
)
)
)
),
box(width = 12, title = "Option",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
downloadButton("dlmaprouteagg", label = "Export map")
)
)
)
)
)
),
tabItem(tabName = "map-summary",
tabsetPanel(type = "tabs",
tabPanel("Overview"
),
tabPanel("Activity duration (minutes)",
fluidRow(
column(width = 8,
box(width = 12,leafletOutput("mapact",width = "100%", height = 750))
),
column(width = 4,
verticalLayout(
checkboxGroupInput("checkActivity",
h3("Activity type"),
choices = list("Home" = "ActDur.Home",
"Work" = "ActDur.Work",
"Business" = "ActDur.Business",
"Bring/Get" = "ActDur.BringGet",
"Groceries" = "ActDur.Groceries",
"Non-Daily Shopping" = "ActDur.NonGroc",
"Services" = "ActDur.Services",
"Social" = "ActDur.Social",
"Leisure" = "ActDur.Leisure",
"Touring" = "ActDur.Touring",
"Other" = "ActDur.Other"),
selected = c("ActDur.Business","ActDur.BringGet", "ActDur.Groceries",
"ActDur.NonGroc", "ActDur.Services", "ActDur.Social",
"ActDur.Leisure", "ActDur.Touring", "ActDur.Other"))
# actionButton("submitmapact", "Submit")
# submitButton("Submit")
)
)
)
),
tabPanel("Electricity consumption (kWh)",
fluidRow(
column(width = 8,
box(width = 12,leafletOutput("mapcharging",width = "100%", height = 750))
),
column(width = 4,
verticalLayout(
numericInput("privateCharging", h5("Private charging rate (kWh)"), value = 3.3),
numericInput("publicCharging", h5("Public charging rate (kWh)"), value = 6.6),
numericInput("semiPublicCharging", h5("Semi-pubic charging rate (kWh)"), value = 6.6),
numericInput("fastCharging", h5("Fast charging rate (kWh)"), value = 50)
# actionButton("submitmapcharging", "Submit")
# submitButton("Submit")
)
)
)
)
)
),
tabItem(tabName = "graph-household",
h2("Show graph")
),
tabItem(tabName = "graph-schedule",
h2("Show graph")
),
tabItem(tabName = "about",
h2("About")
),
tabItem(tabName = "animate",
tabsetPanel(type = "tabs",
tabPanel("Overview"
)
# tabPanel("Animate map",
# fluidRow(
# column(width = 8,
# box(width = 12,leafletOutput("mapanim",width = "100%", height = 750))
# ),
# column(width = 4,
# verticalLayout(
# box(width = 12,title = "Data filter",status = "primary",
# solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
# verticalLayout(
# fluidRow(
# column(width = 12,
# box(width = 6, title = "Activity type",status = "primary",
# solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# # Copy the line below to make a set of radio buttons
# checkboxGroupInput("mapanimact",label = NULL,
# choices = list("Home" = "Home",
# "Work" = "Work",
# "Business" = "Business",
# "Bring/Get" = "BringGet",
# "Groceries" = "Groceries",
# "Non-Daily Shopping" = "NonGroc",
# "Services" = "Services",
# "Social" = "Social",
# "Leisure" = "Leisure",
# "Touring" = "Touring",
# "Other" = "Other"
# ),
# selected = c("Home","Work","Business","BringGet",
# "Groceries","NonGroc","Services",
# "Social","Leisure","Touring","Other"))),
# box(width = 6, title = "Transport mode",status = "primary",
# solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# # Copy the line below to make a set of radio buttons
# checkboxGroupInput("mapanimmode",label = NULL,
# choices = list("Staying Home" = "Missing",
# "Car" = "Car",
# "Car as Passenger" = "Car as Passenger",
# "Public Transport" = "Public Transport",
# "Walking or Biking" = "Walking or Biking"
# ),
# selected = c("Missing","Car","Car as Passenger",
# "Public Transport","Walking or Biking")))
# )
# )
# )
# ),
# box(width = 12,title = "Time line",status = "primary",
# solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
# sliderInput("mapanimtime", label = NULL, min = 180, max = 1620,
# value = c(180,195),
# step=15,
# animate= animationOptions(interval = 200, loop = FALSE,
# playButton = NULL, pauseButton = NULL))
# )
# )
# )
# )
# )
)
)
)
)
)) | /ui.R | no_license | tpgjs66/Albatross4Viewer | R | false | false | 76,818 | r |
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(shinydashboard)
library(leaflet)
library(leaflet.extras)
library(leaflet.minicharts)
library(DT)
library(sp)
library(sf)
library(rgdal)
library(rgeos)
library(dplyr)
library(stplanr)
library(RcppArmadillo)
library(mapview)
library(rmapshaper)
library(doParallel)
ui <- (dashboardPage(
dashboardHeader(title = "Albatross4 Viewer"
),
dashboardSidebar(
sidebarMenu(
menuItem(
"Data Preparation",
tabName = "data",
icon = icon("database")
# menuSubItem("Watersheds", tabName = "m_water", icon = icon("map")),
# menuSubItem("Population", tabName = "m_pop", icon = icon("map"))
),
menuItem(
"View Household",
tabName = "household",
icon = icon("users",lib = "font-awesome"),
menuSubItem("Map", tabName = "map-household", icon = icon("globe")),
# menuSubItem("Graph", tabName = "graph-household", icon = icon("signal")),
menuSubItem("List", tabName = "list-household", icon = icon("table",lib="font-awesome"))
),
menuItem(
"View Schedule",
tabName = "schedule",
icon = icon("list-ol", lib = "font-awesome"),
menuSubItem("Map", tabName = "map-schedule", icon = icon("globe")),
# menuSubItem("Graph", tabName = "graph-schedule", icon = icon("signal")),
menuSubItem("List", tabName = "list-schedule", icon = icon("table",lib="font-awesome"))
),
menuItem(
"Summary Statistics",
tabName = "statistics",
icon = icon("stats",lib = "glyphicon"),
menuSubItem("Map", tabName = "map-summary", icon = icon("globe"))
),
# menuItem(
# "Animated map",
# tabName = "animate",
# icon = icon("facetime-video",lib = "glyphicon")
# ),
menuItem(
"About",
tabName = "about",
icon = icon("envelope",lib = "glyphicon")
)
)
),
dashboardBody(
tags$head(tags$style(type="text/css", "
#loadmessage {
border: 16px solid #f3f3f3; /* Light grey */
border-radius: 50%;
border-top: 16px solid #3c8dbc; /* Blue */
width: 100px;
height: 100px;
-webkit-animation: spin 2s linear infinite;
margin: auto;
margin-left: auto;
position: relative;
z-index: 100;
top: 350px;
left: 600px;
}
@keyframes spin {
` 0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
")),
absolutePanel(
conditionalPanel(condition="$('html').hasClass('shiny-busy')",
tags$div(id="loadmessage"))
),
tabItems(
tabItem(tabName = "data",
tabsetPanel(type = "tabs",
tabPanel("Overview",
img(src="Albatross.jpg", align = "left", width = "50%", height = "50%")
),
tabPanel("Household",
sidebarLayout(
sidebarPanel(
# Input: Select a file ----
fileInput("household", "Choose household data (less than 500MB)",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Input: Select separator ----
radioButtons("householdsep", "Separator",
choices = c(Comma = ",",
Semicolon = ";",
Tab = "\t"),
selected = ","),
# Input: Select quotes ----
radioButtons("householdquote", "Quote",
choices = c(None = "",
"Double Quote" = '"',
"Single Quote" = "'"),
selected = '"'),
# Input: Checkbox if file has header ----
checkboxInput("householdheader", "Header", TRUE),
# Input: Select number of rows to display ----
radioButtons("householddisp", "Display",
choices = c(Head = "head",
All = "all"),
selected = "head")
),
mainPanel(
tableOutput("previewHousehold"),
h3(textOutput("previewHouseholdTotal")),
width = 12
)
)
),
tabPanel("Household coordinates",
sidebarLayout(
sidebarPanel(
# Input: Select a file ----
fileInput("hhcoords", "Choose household coordinates data (less than 500MB)",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Input: Select separator ----
radioButtons("hhcoordssep", "Separator",
choices = c(Comma = ",",
Semicolon = ";",
Tab = "\t"),
selected = ","),
# Input: Select quotes ----
radioButtons("hhcoordsquote", "Quote",
choices = c(None = "",
"Double Quote" = '"',
"Single Quote" = "'"),
selected = '"'),
# Input: Checkbox if file has header ----
checkboxInput("hhcoordsheader", "Header", TRUE),
# Input: Select number of rows to display ----
radioButtons("hhcoordsdisp", "Display",
choices = c(Head = "head",
All = "all"),
selected = "head")
),
mainPanel(
)
)
),
tabPanel("Schedule",
sidebarLayout(
sidebarPanel(
# Input: Select a file ----
fileInput("schedule", "Choose schedule data (less than 500MB)",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Input: Select separator ----
radioButtons("schedulesep", "Separator",
choices = c(Comma = ",",
Semicolon = ";",
Tab = "\t"),
selected = ","),
# Input: Select quotes ----
radioButtons("schedulequote", "Quote",
choices = c(None = "",
"Double Quote" = '"',
"Single Quote" = "'"),
selected = '"'),
# Input: Checkbox if file has header ----
checkboxInput("scheduleheader", "Header", TRUE),
# Input: Select number of rows to display ----
radioButtons("scheduledisp", "Display",
choices = c(Head = "head",
All = "all"),
selected = "head")
),
mainPanel(
tableOutput("previewSchedule"),
h3(textOutput("previewScheduleTotal")),
width = 12
)
)
),
tabPanel("Schedule coordinates",
sidebarLayout(
sidebarPanel(
# Input: Select a file ----
fileInput("scheduleCoords", "Choose schedule coordinates data (less than 500MB)",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Input: Select separator ----
radioButtons("scheduleCoordssep", "Separator",
choices = c(Comma = ",",
Semicolon = ";",
Tab = "\t"),
selected = ","),
# Input: Select quotes ----
radioButtons("scheduleCoordsquote", "Quote",
choices = c(None = "",
"Double Quote" = '"',
"Single Quote" = "'"),
selected = '"'),
# Input: Checkbox if file has header ----
checkboxInput("scheduleCoordsheader", "Header", TRUE),
# Input: Select number of rows to display ----
radioButtons("scheduleCoordsdisp", "Display",
choices = c(Head = "head",
All = "all"),
selected = "head")
),
mainPanel(
tableOutput("previewScheduleCoords"),
h3(textOutput("previewScheduleTotalCoords")),
width = 12
)
)
),
tabPanel("Shape files - PC4",
sidebarLayout(
sidebarPanel(
# Input: Select a file ----
fileInput("shpFilePc4", "Choose shape files (.prj, .shp, .shx, .dbf)",
multiple = TRUE,
accept = c(".prj",
".shp",
".shx",
".dbf"),
placeholder = "4 files"),
helpText("Note: Multiple files should be uploaded at once.")
),
mainPanel(
h3("")
)
)
)
)
),
tabItem(tabName = "list-household",
fluidPage(div(DT::dataTableOutput("householdTotal"),
style = "font-size: 100%; width: 100%"))
),
tabItem(tabName = "list-schedule",
tabsetPanel(type = "tabs",
tabPanel("Schedule",
fluidRow(
column(width = 12,
DT::dataTableOutput("scheduleTotal")
)
)
),
tabPanel("O-D flow",
fluidRow(
column(width = 7,
box(width = 12,
DT::dataTableOutput("scheduleod")
),
downloadButton("downloadDTsched", "Download")
),
column(width = 5,
verticalLayout(
box(width = 12,
verticalLayout(
fluidRow(
column(width = 12,
box(width = 6, title = "Activity type",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("listschedact",label = NULL,
choices = list("Home" = "Home",
"Work" = "Work",
"Business" = "Business",
"Bring/Get" = "BringGet",
"Groceries" = "Groceries",
"Non-Daily Shopping" = "NonGroc",
"Services" = "Services",
"Social" = "Social",
"Leisure" = "Leisure",
"Touring" = "Touring",
"Other" = "Other"
),
selected = c("Home","Work","Business","BringGet",
"Groceries","NonGroc","Services",
"Social","Leisure","Touring","Other"))),
box(width = 6, title = "Transport mode",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("listschedmode",label = NULL,
choices = list("Staying Home" = "Missing",
"Car" = "Car",
"Car as Passenger" = "Car as Passenger",
"Public Transport" = "Public Transport",
"Walking or Biking" = "Walking or Biking"
),
selected = c("Missing","Car","Car as Passenger",
"Public Transport","Walking or Biking")))
)
),
box(width = 12, title = "Time of day",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
sliderInput("listschedtime", label = NULL, min =180 , max = 1620, value = c(180,1620))
),
actionButton("submitscheduleod", "Submit")
# submitButton("Submit")
)
)
)
)
)
)
)
),
tabItem(tabName = "map-household",
fluidRow(
column(width = 8,
box(width = 12,leafletOutput("maphh",width = "100%", height = 750))
),
# box(width = 12, title = "Number of households to query",status = "primary",
# solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
# fluidRow(width = 12,
# column(width = 7,
# checkboxInput("maphhshow", label = "Show only specified", value = FALSE)
# ),
# column(width = 5,
# numericInput("maphhnum", label = NULL, value = 10)
# )
# )
# ),
column(width = 4,
verticalLayout(
box(width = 12, title = "Household information",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
verticalLayout(
fluidRow(
column(width = 12,
verbatimTextOutput("clickedhhId")
),
column(width = 12,
dataTableOutput("clickedhhTable")
)
)
)
)
)
),
actionButton("submitmaphh", "Submit")
# submitButton("Submit")
)
),
tabItem(tabName = "map-schedule",
tabsetPanel(type = "tabs",
tabPanel("Overview"
),
tabPanel("Activity location",
fluidRow(
column(width = 8,
box(width = 12,leafletOutput("mapactloc",width = "100%", height = 750))
),
column(width = 4,
verticalLayout(
box(width = 12,title = "Data filter",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
verticalLayout(
fluidRow(
column(width = 12,
box(width = 6, title = "Activity type",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("mapactlocact",label = NULL,
choices = list("Home" = "Home",
"Work" = "Work",
"Business" = "Business",
"Bring/Get" = "BringGet",
"Groceries" = "Groceries",
"Non-Daily Shopping" = "NonGroc",
"Services" = "Services",
"Social" = "Social",
"Leisure" = "Leisure",
"Touring" = "Touring",
"Other" = "Other"
),
selected = NULL)),
box(width = 6, title = "Transport mode",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("mapactlocmode",label = NULL,
choices = list("Staying Home" = "Missing",
"Car" = "Car",
"Car as Passenger" = "Car as Passenger",
"Public Transport" = "Public Transport",
"Walking or Biking" = "Walking or Biking"
),
selected = NULL))
)
),
box(width = 12, title = "Charging",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
checkboxGroupInput("mapactloccharging", label = NULL,
choices = list("No charging" = "NoCharging",
"Private charging" = "PrivateCharging",
"Public charging" = "PublicCharging",
"Semi-public charging" = "SemiPublicCharging",
"Fast charging" = "FastCharging"
),
selected = c("NoCharging","PrivateCharging", "PublicCharging", "SemiPublicCharging", "FastCharging"))
),
box(width = 12, title = "Time of day",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
sliderInput("mapactloctime", label = NULL, min =180 , max = 1620, value = c(180,1620))
),
actionButton("submitmapactloc", "Submit")
# submitButton("Submit")
)
),
box(width = 12,title = "Activity information",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
verticalLayout(
fluidRow(
column(width = 12,
verbatimTextOutput("clickedactlocId")
),
column(width = 12,
dataTableOutput("clickedactlocTable")
)
)
)
)
)
)
)
),
tabPanel("O-D flow(PC4)",
fluidRow(
column(width = 8,
box(width = 12,leafletOutput("mapodflow",width = "100%", height = 750))
),
column(width = 4,
box(width = 12,title = "Search by O-D pair ID",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
# Input: Specify the trip id to view ----
numericInput("odid", label = NULL, value = 1),
actionButton("update", "Search")
),
hr(),
verticalLayout(
box(width = 12,title = "Data filter",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
verticalLayout(
fluidRow(
column(width = 12,
box(width = 6, title = "Activity type",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("mapodflowact",label = NULL,
choices = list("Home" = "Home",
"Work" = "Work",
"Business" = "Business",
"Bring/Get" = "BringGet",
"Groceries" = "Groceries",
"Non-Daily Shopping" = "NonGroc",
"Services" = "Services",
"Social" = "Social",
"Leisure" = "Leisure",
"Touring" = "Touring",
"Other" = "Other"
),
selected = NULL)),
box(width = 6, title = "Transport mode",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("mapodflowmode",label = NULL,
choices = list("Staying Home" = "Missing",
"Car" = "Car",
"Car as Passenger" = "Car as Passenger",
"Public Transport" = "Public Transport",
"Walking or Biking" = "Walking or Biking"
),
selected = NULL))
)
),
box(width = 12, title = "Charging",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
checkboxGroupInput("mapodflowcharging", label = NULL,
choices = list("No charging" = "NoCharging",
"Private charging" = "PrivateCharging",
"Public charging" = "PublicCharging",
"Semi-public charging" = "SemiPublicCharging",
"Fast charging" = "FastCharging"
),
selected = c("NoCharging","PrivateCharging", "PublicCharging", "SemiPublicCharging", "FastCharging"))
),
box(width = 12, title = "Time of day",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
sliderInput("mapodflowtime", label = NULL, min =180 , max = 1620, value = c(180,1620))
),
box(width = 12, title = "Number of O-D pairs to query",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
fluidRow(width = 12,
helpText("Note: O-D pairs are sorted largest to smallest by n.trips"),
column(width = 7,
checkboxInput("mapodflowshow", label = "Show only specified", value = FALSE)
),
column(width = 5,
numericInput("mapodflownum", label = NULL, value = 10)
)
)
),
actionButton("submitmapodflow", "Submit")
# submitButton("Submit")
)
)
),
box(width = 12, title = "Option",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
downloadButton("dlmapodflow", label = "Export map")
)
)
)
),
# tabPanel("O-D flow(Animated)",
# fluidRow(
# column(width = 8,
# box(width = 12,leafletOutput("mapodflowanim",width = "100%", height = 750))
# ),
# column(width = 4, actionButton("submitmapodflowanim", "Submit")
#
# )
# )
#
# ),
tabPanel("Route-Individual(PC4)",
fluidRow(
column(width = 8,
box(width = 12,leafletOutput("maprouteind",width = "100%", height = 750))
),
column(width = 4,
verticalLayout(
box(width = 12,title = "Data filter",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
verticalLayout(
fluidRow(
column(width = 12,
box(width = 6, title = "Activity type",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("maprouteindact",label = NULL,
choices = list("Home" = "Home",
"Work" = "Work",
"Business" = "Business",
"Bring/Get" = "BringGet",
"Groceries" = "Groceries",
"Non-Daily Shopping" = "NonGroc",
"Services" = "Services",
"Social" = "Social",
"Leisure" = "Leisure",
"Touring" = "Touring",
"Other" = "Other"
),
selected = NULL)),
box(width = 6, title = "Transport mode",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
radioButtons("maprouteindmode",label = NULL,
choices = list("All" = "All",
"Car" = "Car",
"Car as Passenger" = "Car as Passenger",
"Public Transport" = "Public Transport",
"Walking or Biking" = "Walking or Biking"
),
selected = "All"))
)
),
box(width = 12, title = "Charging",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
checkboxGroupInput("maprouteindcharging", label = NULL,
choices = list("No charging" = "NoCharging",
"Private charging" = "PrivateCharging",
"Public charging" = "PublicCharging",
"Semi-public charging" = "SemiPublicCharging",
"Fast charging" = "FastCharging"
),
selected = c("NoCharging","PrivateCharging", "PublicCharging", "SemiPublicCharging", "FastCharging"))
),
box(width = 12, title = "Time of day",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
sliderInput("maprouteindtime", label = NULL, min =180 , max = 1620, value = 800)
),
box(width = 12, title = "Number of O-D pairs to route",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
fluidRow(width = 12,
helpText("Note: O-D pairs are sorted largest to smallest by n.trips"),
column(width = 7,
checkboxInput("maprouteindshow", label = "Show only specified", value = TRUE)
),
column(width = 5,
numericInput("maprouteindnum", label = NULL, value = 5)
)
)
),
actionButton("submitmaprouteind", "Submit")
# submitButton("Submit")
)
)
),
box(width = 12, title = "Route information",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
verticalLayout(
fluidRow(
column(width = 12,
verbatimTextOutput("clickedrouteindId")
),
column(width = 12,
dataTableOutput("clickedrouteindTable")
)
)
)
),
box(width = 12, title = "Option",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
downloadButton("dlmaprouteind", label = "Export map")
)
)
)
),
tabPanel("Route-Individual(PC6)",
fluidRow(
column(width = 8,
box(width = 12,leafletOutput("maprouteindpc6",width = "100%", height = 750))
),
column(width = 4,
verticalLayout(
box(width = 12,title = "Data filter",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
verticalLayout(
fluidRow(
column(width = 12,
box(width = 6, title = "Activity type",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("maprouteindpc6act",label = NULL,
choices = list("Home" = "Home",
"Work" = "Work",
"Business" = "Business",
"Bring/Get" = "BringGet",
"Groceries" = "Groceries",
"Non-Daily Shopping" = "NonGroc",
"Services" = "Services",
"Social" = "Social",
"Leisure" = "Leisure",
"Touring" = "Touring",
"Other" = "Other"
),
selected = NULL)),
box(width = 6, title = "Transport mode",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
radioButtons("maprouteindpc6mode",label = NULL,
choices = list("All" = "All",
"Car" = "Car",
"Car as Passenger" = "Car as Passenger",
"Public Transport" = "Public Transport",
"Walking or Biking" = "Walking or Biking"
),
selected = "All"))
)
),
box(width = 12, title = "Charging",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
checkboxGroupInput("maprouteindpc6charging", label = NULL,
choices = list("No charging" = "NoCharging",
"Private charging" = "PrivateCharging",
"Public charging" = "PublicCharging",
"Semi-public charging" = "SemiPublicCharging",
"Fast charging" = "FastCharging"
),
selected = c("NoCharging","PrivateCharging", "PublicCharging", "SemiPublicCharging", "FastCharging"))
),
box(width = 12, title = "Time of day",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
sliderInput("maprouteindtimepc6", label = NULL, min =180 , max = 1620, value = 800)
),
box(width = 12, title = "Number of trips to route",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
fluidRow(width = 12,
column(width = 7,
checkboxInput("maprouteindpc6show", label = "Show only specified", value = TRUE)
),
column(width = 5,
numericInput("maprouteindpc6num", label = NULL, value = 5)
)
)
),
actionButton("submitmaprouteindpc6", "Submit")
# submitButton("Submit")
)
)
),
box(width = 12, title = "Route information",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
verticalLayout(
fluidRow(
column(width = 12,
verbatimTextOutput("clickedrouteindIdpc6")
),
column(width = 12,
dataTableOutput("clickedrouteindTablepc6")
)
)
)
),
box(width = 12, title = "Option",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
downloadButton("dlmaprouteindpc6", label = "Export map")
)
)
)
),
tabPanel("Route-Aggregated(PC4)",
fluidRow(
column(width = 8,
box(width = 12,leafletOutput("maprouteagg",width = "100%", height = 750))
),
column(width = 4,
verticalLayout(
box(width = 12,title = "Data filter",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
verticalLayout(
fluidRow(
column(width = 12,
box(width = 6, title = "Activity type",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
checkboxGroupInput("maprouteaggact",label = NULL,
choices = list("Home" = "Home",
"Work" = "Work",
"Business" = "Business",
"Bring/Get" = "BringGet",
"Groceries" = "Groceries",
"Non-Daily Shopping" = "NonGroc",
"Services" = "Services",
"Social" = "Social",
"Leisure" = "Leisure",
"Touring" = "Touring",
"Other" = "Other"
),
selected = NULL)),
box(width = 6, title = "Transport mode",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# Copy the line below to make a set of radio buttons
radioButtons("maprouteaggmode",label = NULL,
choices = list("All" = "All",
"Car" = "Car",
"Car as Passenger" = "Car as Passenger",
"Public Transport" = "Public Transport",
"Walking or Biking" = "Walking or Biking"
),
selected = "All"))
)
),
box(width = 12, title = "Charging",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
checkboxGroupInput("maprouteaggcharging", label = NULL,
choices = list("No charging" = "NoCharging",
"Private charging" = "PrivateCharging",
"Public charging" = "PublicCharging",
"Semi-public charging" = "SemiPublicCharging",
"Fast charging" = "FastCharging"
),
selected = c("NoCharging","PrivateCharging", "PublicCharging", "SemiPublicCharging", "FastCharging"))
),
box(width = 12, title = "Time of day",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
sliderInput("maprouteaggtime", label = NULL, min =180 , max = 1620, value = 800)
),
box(width = 12, title = "Number of O-D pairs to route",status = "primary",
solidHeader = FALSE,collapsible = TRUE,collapsed = TRUE,
fluidRow(width = 12,
helpText("Note: O-D pairs are sorted largest to smallest by n.trips"),
column(width = 7,
checkboxInput("maprouteaggshow", label = "Show only specified", value = TRUE)
),
column(width = 5,
numericInput("maprouteaggnum", label = NULL, value = 5)
)
)
),
actionButton("submitmaprouteagg", "Submit")
# submitButton("Submit")
)
)
),
box(width = 12, title = "Route information",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
verticalLayout(
fluidRow(
column(width = 12,
verbatimTextOutput("clickedrouteaggId")
),
column(width = 12,
dataTableOutput("clickedrouteaggTable")
)
)
)
),
box(width = 12, title = "Option",status = "primary",
solidHeader = TRUE,collapsible = TRUE,collapsed = TRUE,
downloadButton("dlmaprouteagg", label = "Export map")
)
)
)
)
)
),
tabItem(tabName = "map-summary",
tabsetPanel(type = "tabs",
tabPanel("Overview"
),
tabPanel("Activity duration (minutes)",
fluidRow(
column(width = 8,
box(width = 12,leafletOutput("mapact",width = "100%", height = 750))
),
column(width = 4,
verticalLayout(
checkboxGroupInput("checkActivity",
h3("Activity type"),
choices = list("Home" = "ActDur.Home",
"Work" = "ActDur.Work",
"Business" = "ActDur.Business",
"Bring/Get" = "ActDur.BringGet",
"Groceries" = "ActDur.Groceries",
"Non-Daily Shopping" = "ActDur.NonGroc",
"Services" = "ActDur.Services",
"Social" = "ActDur.Social",
"Leisure" = "ActDur.Leisure",
"Touring" = "ActDur.Touring",
"Other" = "ActDur.Other"),
selected = c("ActDur.Business","ActDur.BringGet", "ActDur.Groceries",
"ActDur.NonGroc", "ActDur.Services", "ActDur.Social",
"ActDur.Leisure", "ActDur.Touring", "ActDur.Other"))
# actionButton("submitmapact", "Submit")
# submitButton("Submit")
)
)
)
),
tabPanel("Electricity consumption (kWh)",
fluidRow(
column(width = 8,
box(width = 12,leafletOutput("mapcharging",width = "100%", height = 750))
),
column(width = 4,
verticalLayout(
numericInput("privateCharging", h5("Private charging rate (kWh)"), value = 3.3),
numericInput("publicCharging", h5("Public charging rate (kWh)"), value = 6.6),
numericInput("semiPublicCharging", h5("Semi-pubic charging rate (kWh)"), value = 6.6),
numericInput("fastCharging", h5("Fast charging rate (kWh)"), value = 50)
# actionButton("submitmapcharging", "Submit")
# submitButton("Submit")
)
)
)
)
)
),
tabItem(tabName = "graph-household",
h2("Show graph")
),
tabItem(tabName = "graph-schedule",
h2("Show graph")
),
tabItem(tabName = "about",
h2("About")
),
tabItem(tabName = "animate",
tabsetPanel(type = "tabs",
tabPanel("Overview"
)
# tabPanel("Animate map",
# fluidRow(
# column(width = 8,
# box(width = 12,leafletOutput("mapanim",width = "100%", height = 750))
# ),
# column(width = 4,
# verticalLayout(
# box(width = 12,title = "Data filter",status = "primary",
# solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
# verticalLayout(
# fluidRow(
# column(width = 12,
# box(width = 6, title = "Activity type",status = "primary",
# solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# # Copy the line below to make a set of radio buttons
# checkboxGroupInput("mapanimact",label = NULL,
# choices = list("Home" = "Home",
# "Work" = "Work",
# "Business" = "Business",
# "Bring/Get" = "BringGet",
# "Groceries" = "Groceries",
# "Non-Daily Shopping" = "NonGroc",
# "Services" = "Services",
# "Social" = "Social",
# "Leisure" = "Leisure",
# "Touring" = "Touring",
# "Other" = "Other"
# ),
# selected = c("Home","Work","Business","BringGet",
# "Groceries","NonGroc","Services",
# "Social","Leisure","Touring","Other"))),
# box(width = 6, title = "Transport mode",status = "primary",
# solidHeader = FALSE,collapsible = TRUE,collapsed = FALSE,
# # Copy the line below to make a set of radio buttons
# checkboxGroupInput("mapanimmode",label = NULL,
# choices = list("Staying Home" = "Missing",
# "Car" = "Car",
# "Car as Passenger" = "Car as Passenger",
# "Public Transport" = "Public Transport",
# "Walking or Biking" = "Walking or Biking"
# ),
# selected = c("Missing","Car","Car as Passenger",
# "Public Transport","Walking or Biking")))
# )
# )
# )
# ),
# box(width = 12,title = "Time line",status = "primary",
# solidHeader = TRUE,collapsible = TRUE,collapsed = FALSE,
# sliderInput("mapanimtime", label = NULL, min = 180, max = 1620,
# value = c(180,195),
# step=15,
# animate= animationOptions(interval = 200, loop = FALSE,
# playButton = NULL, pauseButton = NULL))
# )
# )
# )
# )
# )
)
)
)
)
)) |
## Below are two functions that are used to create a special object that stores inverse matrix
## and caches it. You would test this by inputing the following in the R Console:
# > source('../ProgrammingAssignment2/cachematrix.R') # You would first source this file
# > xm <- matrix(1:4,2,2) # Creates a 2 x 2 matrix
# > cache_matrix_object <- makeCacheMatrix(xm) # Passes matrix and gets special matrix object
# > cacheSolve(cache_matrix_object) # Solves inverse and stores it in 'special' 'cache' object
# > cacheSolve(cache_matrix_object) # Returns a cached inverse
## This is a function that creates a 'special' object made of list of functions that are invoked
## from different enviroment and are belonging to this enviroment. They're used to set 'msolve'
## in runtime of that other enviroment that is calling 'setmsolve()'.
## What this function returns is a list of functions that are available in invoking enviroment.
## This enviroment will be available to that other running enviroment because this 'special'
## object will be passed as a parameter to the running enviroment and that's how we read or
## assign a value to the object in this enviroment.
makeCacheMatrix <- function(x = matrix()) {
# Let's default to NULL so we can test if we get something in cacheSolve
msolve <- NULL
get <- function() { x }
setmsolve <- function(cachemsolve) { msolve <<- cachemsolve }
getmsolve <- function() { msolve }
list(get = get,
setmsolve = setmsolve,
getmsolve = getmsolve)
}
## The following function calculates inverse matrix created with the solve() function.
## However, it first checks to see if the inverse matrix has already been solved.
## If so, it gets the inverse matrix from the cache and skips the computation.
## Otherwise, it calls solve(x$get()) with matrix as a parameter and saves inverse matrix
## into cache by calling x$setmsolve(msolve) before returning msolve which is inverse matrix.
cacheSolve <- function(x, ...) {
# Getting the cached matrix, hopefully not NULL
msolve <- x$getmsolve()
# If not NULL then we got msolve and just return msolve .. done
if(!is.null(msolve)) {
message("getting cached data")
return(msolve)
}
# Lets' get matrix vector x from makeCacheMatrix function 'get'
message("calculate inverse matrix")
msolve <- solve(x$get())
# Let's set msolve in MakeCacheMatrix i.e. pass it as a 'cachemsolve' (look)
# MakeCacheMatrix for the setmsolve
x$setmsolve(msolve)
## Return a matrix that is the inverse of 'x'
msolve
} | /cachematrix.R | no_license | donalddominko/ProgrammingAssignment2 | R | false | false | 2,789 | r | ## Below are two functions that are used to create a special object that stores inverse matrix
## and caches it. You would test this by inputing the following in the R Console:
# > source('../ProgrammingAssignment2/cachematrix.R') # You would first source this file
# > xm <- matrix(1:4,2,2) # Creates a 2 x 2 matrix
# > cache_matrix_object <- makeCacheMatrix(xm) # Passes matrix and gets special matrix object
# > cacheSolve(cache_matrix_object) # Solves inverse and stores it in 'special' 'cache' object
# > cacheSolve(cache_matrix_object) # Returns a cached inverse
## This is a function that creates a 'special' object made of list of functions that are invoked
## from different enviroment and are belonging to this enviroment. They're used to set 'msolve'
## in runtime of that other enviroment that is calling 'setmsolve()'.
## What this function returns is a list of functions that are available in invoking enviroment.
## This enviroment will be available to that other running enviroment because this 'special'
## object will be passed as a parameter to the running enviroment and that's how we read or
## assign a value to the object in this enviroment.
makeCacheMatrix <- function(x = matrix()) {
# Let's default to NULL so we can test if we get something in cacheSolve
msolve <- NULL
get <- function() { x }
setmsolve <- function(cachemsolve) { msolve <<- cachemsolve }
getmsolve <- function() { msolve }
list(get = get,
setmsolve = setmsolve,
getmsolve = getmsolve)
}
## The following function calculates inverse matrix created with the solve() function.
## However, it first checks to see if the inverse matrix has already been solved.
## If so, it gets the inverse matrix from the cache and skips the computation.
## Otherwise, it calls solve(x$get()) with matrix as a parameter and saves inverse matrix
## into cache by calling x$setmsolve(msolve) before returning msolve which is inverse matrix.
cacheSolve <- function(x, ...) {
# Getting the cached matrix, hopefully not NULL
msolve <- x$getmsolve()
# If not NULL then we got msolve and just return msolve .. done
if(!is.null(msolve)) {
message("getting cached data")
return(msolve)
}
# Lets' get matrix vector x from makeCacheMatrix function 'get'
message("calculate inverse matrix")
msolve <- solve(x$get())
# Let's set msolve in MakeCacheMatrix i.e. pass it as a 'cachemsolve' (look)
# MakeCacheMatrix for the setmsolve
x$setmsolve(msolve)
## Return a matrix that is the inverse of 'x'
msolve
} |
library(SAFD)
### Name: decomposer
### Title: Decomposer
### Aliases: decomposer
### Keywords: nonparametric datagen
### ** Examples
#Example:
data(XX)
A<-decomposer(XX[[2]])
A<-decomposer(XX[[1]])
head(A)
| /data/genthat_extracted_code/SAFD/examples/decomposer.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 216 | r | library(SAFD)
### Name: decomposer
### Title: Decomposer
### Aliases: decomposer
### Keywords: nonparametric datagen
### ** Examples
#Example:
data(XX)
A<-decomposer(XX[[2]])
A<-decomposer(XX[[1]])
head(A)
|
\name{visualizePOISSONPlot}
\alias{visualizePOISSONPlot}
\title{Poisson Numbers ggplot Visualization}
\usage{
visualizePOISSONPlot(lambda, t)
}
\description{
this function generates 1000 poisson random numbers and then shows the plot for the pdf of generated random numbers using ggplot.
}
\examples{
visualizePOISSONPlot(10,20)
} | /Project/Phase2/package/familiarDistribiution/man/visualizePOISSONPlot.Rd | no_license | Ashkan-Soleymani98/ProbabilityStatistics---Fall2017-2018 | R | false | false | 332 | rd | \name{visualizePOISSONPlot}
\alias{visualizePOISSONPlot}
\title{Poisson Numbers ggplot Visualization}
\usage{
visualizePOISSONPlot(lambda, t)
}
\description{
this function generates 1000 poisson random numbers and then shows the plot for the pdf of generated random numbers using ggplot.
}
\examples{
visualizePOISSONPlot(10,20)
} |
test_that("function check", {
expect_equal(SR_search_variables(df = data.frame(CUSTOMER_ID = 1001:1010,
CUSTOMER_NAME = LETTERS[1:10],
ADRESSE_ID = 1:10),
var = "id"),
NULL)
})
| /tests/testthat/test-SR_search_variables.R | permissive | samuelreuther/SRfunctions | R | false | false | 331 | r | test_that("function check", {
expect_equal(SR_search_variables(df = data.frame(CUSTOMER_ID = 1001:1010,
CUSTOMER_NAME = LETTERS[1:10],
ADRESSE_ID = 1:10),
var = "id"),
NULL)
})
|
## Dan Auerbach, spring 2017
## --------------------------------------
#### Wetlands-related Thematic extract function
## --------------------------------------
##Extract multigeom spdf against PWA (TWI+PWSL), NLCD2011, and NEF, then convert the per-polygon matrices of pixel values to percentages by class
##see original IRTscreen script (e.g., flipped "*.vals" to "*.lvls"; elements of those direct from native attrib tables; etc.)
##cropping gives minor speedup for single poly geom, but parallelized faster for multirow spdf via beginCluster/endCluster
##note differences in missing data between rasters can mean that summed areas (across classes) are not necessarily identical for a given bank poly
##Elected to focus on PWA due to a) consistency with TWI & PWSL separately b) conceptual simplicity and c) already have CONUS@30m, whereas PWSL only R4@10m
calcLndscpFtr = function(
spdf #target polys (could modify to points)
,pwa = raster("dataRaster/PWA/PWA.tif")
,pwa.lvls = setNames(as.character(0:3), c("no","lo","md","hi"))
,nef = raster("dataRaster/National Ecological Framework/nef3_0b.tif")
,nef.lvls = setNames(as.character(0:3), c("bckg","hubs","crrd","auxC"))
,nlcd = raster("../NHDplus/NLCD2011/nlcd_2011_landcover_2011_edition_2014_10_10.img")
,nlcd.lvls = setNames(as.character(c(0,11,12,21,22,23,24,31,41,42,43,52,71,81,82,90,95)), c("Unclassified","OpenWater","PerennialSnowIce","Developed,OpenSpace","Developed,LowIntensity","Developed,MediumIntensity","Developed,HighIntensity","BarrenLand","DeciduousForest","EvergreenForest","MixedForest","ShrubScrub","Herbaceuous","HayPasture","CultivatedCrops","WoodyWetlands","EmergentHerbaceuousWetlands"))
,nc = 4 #cores
,dirRun = "/Users/dauerbac/Google Drive/404MitigationScreen"
,dirOut = "dataRobjects" #subdirectory (not currently smart about checking/creating)
){
on.exit(setwd(getwd()))
setwd(dirRun)
####internal helper takes target raster, focal spdf, known as.character(raster values), and the name strings associated with those vals
##converts list of 2-col matrices of pixel vals (value/r & weight) into tall matrix rows-per-poly, cols-per-values (named)
##percentages via summed pixels/weights by factor/thematic level
##absAreas via product of percentage, ncells in poly and cell resolution (note potentially diff from gArea on poly * pcts)
rextr = function(r, p, lvl, lbl){
rx = raster::extract(r, spTransform(p, r@crs), weights=T, normalizeWeights=T) #generates list of 2col matrices (colnames="r"&"weight", not parallel "value"&"weight")
sapply(rx, function(m) {
pct = tapply(m[,2], factor(m[,1], levels = lvl, labels = paste0("pct_",lbl)), sum, na.rm=T)
pct = round(pct, 3)
pct[is.na(pct)] = 0 #the tapply only returns NA when all NA for a value/level
c(pct, setNames(round((pct * nrow(m) * prod(res(r))/1e+6),3), paste0("sqkm_",lbl)))
}) -> z
return(t(z))
}
#now execute that against the 3 focal rasters
library(parallel)
beginCluster(nc)
m1 = rextr(pwa, spdf, lvl = pwa.lvls, lbl = names(pwa.lvls))
m2 = rextr(nef, spdf, lvl = nef.lvls, lbl = names(nef.lvls))
m3 = rextr(nlcd, spdf, lvl = nlcd.lvls, lbl = names(nlcd.lvls))
endCluster()
m = cbind(m1,m2,m3)
saveRDS(m, paste0(dirOut,"/", deparse(substitute(spdf)) ,"_LndscpFtr.rds"))
return(m)
} #end calcLndscpFtr
| /func_calcLndscpFtrWtlndThematicExtract.R | permissive | daauerbach/miscR | R | false | false | 3,349 | r | ## Dan Auerbach, spring 2017
## --------------------------------------
#### Wetlands-related Thematic extract function
## --------------------------------------
##Extract multigeom spdf against PWA (TWI+PWSL), NLCD2011, and NEF, then convert the per-polygon matrices of pixel values to percentages by class
##see original IRTscreen script (e.g., flipped "*.vals" to "*.lvls"; elements of those direct from native attrib tables; etc.)
##cropping gives minor speedup for single poly geom, but parallelized faster for multirow spdf via beginCluster/endCluster
##note differences in missing data between rasters can mean that summed areas (across classes) are not necessarily identical for a given bank poly
##Elected to focus on PWA due to a) consistency with TWI & PWSL separately b) conceptual simplicity and c) already have CONUS@30m, whereas PWSL only R4@10m
calcLndscpFtr = function(
spdf #target polys (could modify to points)
,pwa = raster("dataRaster/PWA/PWA.tif")
,pwa.lvls = setNames(as.character(0:3), c("no","lo","md","hi"))
,nef = raster("dataRaster/National Ecological Framework/nef3_0b.tif")
,nef.lvls = setNames(as.character(0:3), c("bckg","hubs","crrd","auxC"))
,nlcd = raster("../NHDplus/NLCD2011/nlcd_2011_landcover_2011_edition_2014_10_10.img")
,nlcd.lvls = setNames(as.character(c(0,11,12,21,22,23,24,31,41,42,43,52,71,81,82,90,95)), c("Unclassified","OpenWater","PerennialSnowIce","Developed,OpenSpace","Developed,LowIntensity","Developed,MediumIntensity","Developed,HighIntensity","BarrenLand","DeciduousForest","EvergreenForest","MixedForest","ShrubScrub","Herbaceuous","HayPasture","CultivatedCrops","WoodyWetlands","EmergentHerbaceuousWetlands"))
,nc = 4 #cores
,dirRun = "/Users/dauerbac/Google Drive/404MitigationScreen"
,dirOut = "dataRobjects" #subdirectory (not currently smart about checking/creating)
){
on.exit(setwd(getwd()))
setwd(dirRun)
####internal helper takes target raster, focal spdf, known as.character(raster values), and the name strings associated with those vals
##converts list of 2-col matrices of pixel vals (value/r & weight) into tall matrix rows-per-poly, cols-per-values (named)
##percentages via summed pixels/weights by factor/thematic level
##absAreas via product of percentage, ncells in poly and cell resolution (note potentially diff from gArea on poly * pcts)
rextr = function(r, p, lvl, lbl){
rx = raster::extract(r, spTransform(p, r@crs), weights=T, normalizeWeights=T) #generates list of 2col matrices (colnames="r"&"weight", not parallel "value"&"weight")
sapply(rx, function(m) {
pct = tapply(m[,2], factor(m[,1], levels = lvl, labels = paste0("pct_",lbl)), sum, na.rm=T)
pct = round(pct, 3)
pct[is.na(pct)] = 0 #the tapply only returns NA when all NA for a value/level
c(pct, setNames(round((pct * nrow(m) * prod(res(r))/1e+6),3), paste0("sqkm_",lbl)))
}) -> z
return(t(z))
}
#now execute that against the 3 focal rasters
library(parallel)
beginCluster(nc)
m1 = rextr(pwa, spdf, lvl = pwa.lvls, lbl = names(pwa.lvls))
m2 = rextr(nef, spdf, lvl = nef.lvls, lbl = names(nef.lvls))
m3 = rextr(nlcd, spdf, lvl = nlcd.lvls, lbl = names(nlcd.lvls))
endCluster()
m = cbind(m1,m2,m3)
saveRDS(m, paste0(dirOut,"/", deparse(substitute(spdf)) ,"_LndscpFtr.rds"))
return(m)
} #end calcLndscpFtr
|
## Script to analyze samples of glucose and corticosterone from ##
## tree swallow adult and nestlings collected in NY, TN, WY, & AK ##
## from 2016-2019. The main purpose is to ask if increase in cort ##
## is directly correlated with increase in glucose. Different ##
## subsets of data are used for different questions becaues not ##
## all samples were always collected and some birds were part of ##
## manipulative experiments that could have influenced measures. ##
## ##
## Code by Conor Taff, last updated 8 November 2021 ##
# Notes ----
## Some differences in samples collected by age, year, and location
# AK, TN, WY: no dex or acth glucose; no nestlings
# NY: dex & acth glucose in some years; acth only in 2019 and only
# on 3rd caputure of females. Some years second or third capture
# did not have glucose measures.
# Nestlings: only from NY in 2019. acth is 3 days after b/s/d series
## Load packages ----
pacman::p_load(plyr, lme4, ggplot2, here, scales, lmerTest, sjPlot, scales, emmeans, emojifont,
tidyverse, raincloudplots, viridis, ggExtra, MASS, rethinking, rptR, DHARMa)
## Load & clean data ----
# ACTH validation experiment data
d_acth_fem <- read.delim(here::here("1_raw_data", "adult_acth_validation.txt"))
d_acth_nest <- read.delim(here::here("1_raw_data", "nestling_acth_validation.txt"))
# Main data
d <- read.delim(here::here("1_raw_data/data_glucose_cort.txt"))
## Substituting 0.47 as the minimum detectable amount of corticosterone.
# In some cases this was done manually when running ELISA, but this is
# checking to make sure all are set the same way here.
d[which(d$b_cort < 0.47), "b_cort"] <- 0.47
d[which(d$s_cort < 0.47), "s_cort"] <- 0.47
d[which(d$d_cort < 0.47), "d_cort"] <- 0.47
## Calculate delta values for cort and glucose
d$s_resp <- d$s_cort - d$b_cort
d$n_feed <- d$d_cort - d$s_cort
d$a_inc <- d$a_cort - d$s_cort
d$gluc_resp <- d$s_gluc - d$b_gluc
d$gluc_feed <- d$d_gluc - d$s_gluc
d$gluc_ainc <- d$a_gluc - d$s_gluc
## Make a separate dataset for adults and exclude any post treatment measures.
# Also make a separate dataset just for New York.
da <- subset(d, d$class == "adult")
# Remove post treatment samples from corticosterone dosed birds
da$post_trt2 <- paste(da$post_trt, da$treatment, sep = "_")
da2 <- subset(da, da$post_trt2 != "yes_CORT_3d")
da2 <- subset(da2, da2$post_trt2 != "yes_CORT_6d")
da2n <- subset(da, da$state == "NY") # New York only
# Make a separate dataframe for nestlings
dn <- subset(d, d$class == "nestling")
dn$post_trt2 <- paste(dn$post_trt, dn$treatment, sep = "_")
## Glucose repeatability ----
da2nx <- subset(da2, is.na(da2$b_gluc) == FALSE & is.na(da2$s_gluc) == FALSE & is.na(da2$gluc_resp) == FALSE)
for(i in 1:nrow(da2nx)){
da2nx$count[i] <- nrow(subset(da2nx, da2nx$band == da2nx$band[i]))
}
da2x <- subset(da2nx, da2nx$count > 1)
r_base <- rpt(b_gluc ~ (1|band), grname = "band", data = da2x, npermut = 0, datatype = "Gaussian")
r_str <- rpt(s_gluc ~ (1|band), grname = "band", data = da2x, npermut = 0, datatype = "Gaussian")
r_resp <- rpt(gluc_resp ~ (1|band), grname = "band", data = da2x, npermut = 0, datatype = "Gaussian")
r_basec <- rpt(b_cort ~ (1|band), grname = "band", data = da2x, npermut = 0, datatype = "Gaussian")
r_strc <- rpt(s_cort ~ (1|band), grname = "band", data = da2x, npermut = 0, datatype = "Gaussian")
r_respc <- rpt(s_resp ~ (1|band), grname = "band", data = da2x, npermut = 0, datatype = "Gaussian")
## Model group means NY ----
recode <- data.frame(type = c("b_cort", "s_cort", "d_cort", "a_cort",
"b_gluc", "s_gluc", "d_gluc", "a_gluc"),
timepoint = rep(c("base", "induce", "dex", "acth"), 2))
recode$timepoint <- factor(recode$timepoint, levels = c("base", "induce", "dex", "acth"))
#Adults
d1 <- da2n %>%
pivot_longer(cols = c("b_cort", "s_cort", "a_cort"), names_to = "type", values_to = "cort")
d1$type <- factor(d1$type, levels = c("b_cort", "s_cort", "a_cort"))
d1$unique <- paste(d1$band, d1$year, d1$date, sep = "_")
d1 <- plyr::join(d1, recode, "type")
m1 <- lmer(cort ~ timepoint + sex + (1|band), data = d1)
m1_em <- as.data.frame(emmeans(m1, "timepoint", lmer.df = "satterthwaite"))
m1_em$type <- factor(c("b_cort", "s_cort", "a_cort"), levels = c("b_cort", "s_cort", "a_cort"))
m1_eml <- pivot_longer(m1_em, cols = c("lower.CL", "upper.CL"), values_to = "y")
d1 <- da2n %>%
pivot_longer(cols = c("b_gluc", "s_gluc", "a_gluc"), names_to = "type", values_to = "glucose")
d1$type <- factor(d1$type, levels = c("b_gluc", "s_gluc", "a_gluc"))
d1$unique <- paste(d1$band, d1$year, d1$date, sep = "_")
d1 <- plyr::join(d1, recode, "type")
m2 <- lmer(glucose ~ timepoint + sex + (1|band), data = d1)
m2_em <- as.data.frame(emmeans(m2, "timepoint", lmer.df = "satterthwaite"))
m2_em$type <- factor(c("b_gluc", "s_gluc", "a_gluc"), levels = c("b_gluc", "s_gluc", "a_gluc"))
m2_eml <- pivot_longer(m2_em, cols = c("lower.CL", "upper.CL"), values_to = "y")
#Nestlings
d1 <- dn %>%
pivot_longer(cols = c("b_cort", "s_cort", "a_cort"), names_to = "type", values_to = "cort")
d1$type <- factor(d1$type, levels = c("b_cort", "s_cort", "a_cort"))
d1$unique <- paste(d1$band, d1$year, d1$date, sep = "_")
d1$nest <- paste(d1$site, d1$box, sep = "_")
d1 <- plyr::join(d1, recode, "type")
m3 <- lmer(cort ~ timepoint + (1|nest), data = d1)
m3_em <- as.data.frame(emmeans(m3, "timepoint", lmer.df = "satterthwaite"))
m3_em$type <- factor(c("b_cort", "s_cort", "a_cort"), levels = c("b_cort", "s_cort", "a_cort"))
m3_eml <- pivot_longer(m3_em, cols = c("lower.CL", "upper.CL"), values_to = "y")
d1 <- dn %>%
pivot_longer(cols = c("b_gluc", "s_gluc", "a_gluc"), names_to = "type", values_to = "glucose")
d1$type <- factor(d1$type, levels = c("b_gluc", "s_gluc", "a_gluc"))
d1$unique <- paste(d1$band, d1$year, d1$date, sep = "_")
d1$nest <- paste(d1$site, d1$box, sep = "_")
d1 <- plyr::join(d1, recode, "type")
m4 <- lmer(glucose ~ timepoint + (1|nest), data = d1)
m4_em <- as.data.frame(emmeans(m4, "timepoint", lmer.df = "satterthwaite"))
m4_em$type <- factor(c("b_gluc", "s_gluc", "a_gluc"), levels = c("b_gluc", "s_gluc", "a_gluc"))
m4_eml <- pivot_longer(m4_em, cols = c("lower.CL", "upper.CL"), values_to = "y")
t1 <- tab_model(m1, m2, m3, m4, show.re.var = FALSE, show.p = FALSE,
dv.labels = c("Adult Corticosterone", "Adult Glucose", "Nestling Corticosterone", "Nestling Glucose"),
pred.labels = c("Intercept (Base / Female)", "Induced", "Post-Cortrosyn", "Sex (Male)"))
saveRDS(t1,
here::here("2_r_scripts/table_s3.rds"))
## Plot group means NY ----
# NY nestling
# Corticosterone
d1 <- dn %>%
pivot_longer(cols = c("b_cort", "s_cort", "a_cort"), names_to = "type", values_to = "cort")
d1$type <- factor(d1$type, levels = c("b_cort", "s_cort", "a_cort"))
d1$unique <- paste(d1$band, d1$year, d1$date, sep = "_")
p1 <- ggplot(data = d1, mapping = aes(x = type, y = cort, fill = type, color = type)) +
geom_boxplot(alpha = 0.2, outlier.shape = NA, position = position_nudge(x = -0.3), width = 0.25) +
geom_jitter(width = 0.1, alpha = 0.4, size = 0.2) +
theme_classic() +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) + guides(fill = FALSE, color = FALSE) +
xlab("") + ylab("Corticosterone (ng/mL)") +
scale_x_discrete(labels = c("Base", "Induced", "Cortrosyn")) +
annotate("text", x = -Inf, y = Inf, label = "A", hjust = -0.5, vjust = 1.5) +
theme(axis.title = element_text(size = 14), axis.text = element_text(size = 13), axis.text.x = element_text(angle = 30, hjust = 1))
# add in confidence intevals from emmeans model
p1 <- p1 + geom_line(data = m3_eml, mapping = aes(x = type, y = y), col = "black", size = 1, position = position_nudge(x = 0.2)) +
geom_point(data = m3_em, mapping = aes(x = type, y = emmean), color = "black", shape = 23, position = position_nudge(x = 0.2))
# Glucose
d1 <- dn %>%
pivot_longer(cols = c("b_gluc", "s_gluc", "a_gluc"), names_to = "type", values_to = "glucose")
d1$type <- factor(d1$type, levels = c("b_gluc", "s_gluc", "a_gluc"))
d1$unique <- paste(d1$band, d1$year, d1$date, sep = "_")
p2 <- ggplot(data = d1, mapping = aes(x = type, y = glucose, fill = type, color = type)) +
geom_boxplot(alpha = 0.2, outlier.shape = NA, position = position_nudge(x = -0.3), width = 0.25) +
geom_jitter(width = 0.1, alpha = 0.4, size = 0.2) +
theme_classic() +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) + guides(fill = FALSE, color = FALSE) +
xlab("") + ylab("Glucose (mg/dl)") +
scale_x_discrete(labels = c("Base", "Induced", "Cortrosyn"))+
annotate("text", x = -Inf, y = Inf, label = "B", hjust = -0.5, vjust = 1.5) +
theme(axis.title = element_text(size = 14), axis.text = element_text(size = 13), axis.text.x = element_text(angle = 30, hjust = 1))
# add in emmeans intervals
p2 <- p2 + geom_line(data = m4_eml, mapping = aes(x = type, y = y), col = "black", size = 1, position = position_nudge(x = 0.2)) +
geom_point(data = m4_em, mapping = aes(x = type, y = emmean), color = "black", shape = 23, position = position_nudge(x = 0.2))
ggsave(here::here("2_r_scripts/figure_2b.pdf"),
ggpubr::ggarrange(p1, p2),
device = "pdf", width = 5, height = 4, units = "in")
# NY adult
# Corticosterone
d1 <- da2n %>%
pivot_longer(cols = c("b_cort", "s_cort", "a_cort"), names_to = "type", values_to = "cort")
d1$type <- factor(d1$type, levels = c("b_cort", "s_cort", "a_cort"))
d1$unique <- paste(d1$band, d1$year, d1$date, sep = "_")
p1 <- ggplot(data = d1, mapping = aes(x = type, y = cort, fill = type, color = type)) +
geom_boxplot(alpha = 0.2, position = position_nudge(x = -0.3), width = 0.25, outlier.shape = NA) +
geom_jitter(width = 0.1, alpha = 0.4, size = 0.2) +
theme_classic() +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) + guides(fill = FALSE, color = FALSE) +
xlab("") + ylab(paste("Corticosterone (ng/mL)")) +
scale_x_discrete(labels = c("Base", "Induced", "Cortrosyn")) +
ylim(0, 110) +
annotate("text", x = -Inf, y = Inf, label = "A", hjust = -0.5, vjust = 1.5) +
theme(axis.title = element_text(size = 14), axis.text = element_text(size = 13), axis.text.x = element_text(angle = 30, hjust = 1))
# add in confidence intevals from emmeans model
p1 <- p1 + geom_line(data = m1_eml, mapping = aes(x = type, y = y), col = "black", size = 1, position = position_nudge(x = 0.2)) +
geom_point(data = m1_em, mapping = aes(x = type, y = emmean), color = "black", shape = 23, position = position_nudge(x = 0.2))
# Glucose
d1 <- da2n %>%
pivot_longer(cols = c("b_gluc", "s_gluc", "a_gluc"), names_to = "type", values_to = "glucose")
d1$type <- factor(d1$type, levels = c("b_gluc", "s_gluc", "a_gluc"))
d1$unique <- paste(d1$band, d1$year, d1$date, sep = "_")
p2 <- ggplot(data = d1, mapping = aes(x = type, y = glucose, fill = type, color = type)) +
geom_boxplot(alpha = 0.2, outlier.shape = NA, width = 0.25, position = position_nudge(x = -0.3)) +
geom_jitter(width = 0.1, alpha = 0.4, size = 0.2) +
theme_classic() +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) + guides(fill = FALSE, color = FALSE) +
xlab("") + ylab("Glucose (mg/dl)") +
scale_x_discrete(labels = c("Base", "Induced", "Cortrosyn"))+
annotate("text", x = -Inf, y = Inf, label = "B", hjust = -0.5, vjust = 1.5) +
theme(axis.title = element_text(size = 14), axis.text = element_text(size = 13), axis.text.x = element_text(angle = 30, hjust = 1))
# add in emmeans intervals
p2 <- p2 + geom_line(data = m2_eml, mapping = aes(x = type, y = y), col = "black", size = 1, position = position_nudge(x = 0.2)) +
geom_point(data = m2_em, mapping = aes(x = type, y = emmean), color = "black", shape = 23, position = position_nudge(x = 0.2))
ggsave(here::here("2_r_scripts/figure_1b.pdf"),
ggpubr::ggarrange(p1, p2),
device = "pdf", width = 5, height = 4, units = "in")
## Plot individual variation NY ----
dny <- rbind(da2n, dn)
p1 <- ggplot(data = dny, mapping = aes(x = b_cort, y = b_gluc, color = class, fill = class)) +
geom_point(alpha = 0.5, size = 0.7) +
geom_smooth(method = "lm") +
theme_classic() +
scale_fill_manual(values = c("slateblue", "orange")) +
scale_color_manual(values = c("slateblue", "orange")) +
#guides(fill = FALSE, color = FALSE) +
theme(legend.position = c(0.8, 0.8), legend.title = element_blank()) +
annotate("text", x = -Inf, y = Inf, label = "A", hjust = -0.5, vjust = 1.5) +
xlab("Baseline cort \n (log ng/mL)") +
ylab("Baseline glucose (mg/dl)") +
theme(axis.title = element_text(size = 14), axis.text = element_text(size = 13),
legend.text = element_text(size = 12)) +
coord_cartesian(xlim = c(0, 20))
p2 <- ggplot(data = dny, mapping = aes(x = s_resp, y = gluc_resp, color = class, fill = class)) +
geom_point(alpha = 0.5, size = 0.7) +
geom_smooth(method = "lm") +
theme_classic() +
scale_fill_manual(values = c("slateblue", "orange")) +
scale_color_manual(values = c("slateblue", "orange")) +
guides(fill = FALSE, color = FALSE) +
annotate("text", x = -Inf, y = Inf, label = "B", hjust = -0.5, vjust = 1.5) +
xlim(-50, 100) +
xlab("Induced - baseline \n corticosterone (ng/mL)") +
ylab("Induced - baseline \n glucose (mg/dl)") +
geom_hline(yintercept = 0, linetype = "dashed", color = "gray60") +
geom_vline(xintercept = 0, linetype = "dashed", color = "gray60") +
theme(axis.title = element_text(size = 14), axis.text = element_text(size = 13))
p3 <- ggplot(data = dny, mapping = aes(x = n_feed, y = gluc_feed, color = class, fill = class)) +
geom_point(alpha = 0.5, size = 0.7) +
geom_smooth(method = "lm") +
theme_classic() +
scale_fill_manual(values = c("slateblue", "orange")) +
scale_color_manual(values = c("slateblue", "orange")) +
guides(fill = FALSE, color = FALSE) +
annotate("text", x = -Inf, y = Inf, label = "C", hjust = -0.5, vjust = 1.5) +
xlim(-100, 25) +
xlab("Induced - post-dex \n corticosterone (ng/mL)") +
ylab("Induced - post-dex \n glucose (mg/dl)") +
geom_hline(yintercept = 0, linetype = "dashed", color = "gray60") +
geom_vline(xintercept = 0, linetype = "dashed", color = "gray60") +
theme(axis.title = element_text(size = 14), axis.text = element_text(size = 13))
p4 <- ggplot(data = dny, mapping = aes(x = a_inc, y = gluc_ainc, color = class, fill = class)) +
geom_point(alpha = 0.5, size = 0.7) +
geom_smooth(method = "lm") +
theme_classic() +
scale_fill_manual(values = c("slateblue", "orange")) +
scale_color_manual(values = c("slateblue", "orange")) +
guides(fill = FALSE, color = FALSE) +
annotate("text", x = -Inf, y = Inf, label = "C", hjust = -0.5, vjust = 1.5) +
xlab("Post-cortosyn - induced \n corticosterone (ng/mL)") +
ylab("Post-cortrosyn - induced \n glucose (mg/dl)") +
geom_hline(yintercept = 0, linetype = "dashed", color = "gray60") +
geom_vline(xintercept = 0, linetype = "dashed", color = "gray60") +
theme(axis.title = element_text(size = 14), axis.text = element_text(size = 13))
#p2m <- ggMarginal(p2, type = "boxplot", margins = "y", groupColour = TRUE, groupFill = TRUE)
#p3m <- ggMarginal(p3, type = "boxplot", margins = "y", groupColour = TRUE, groupFill = TRUE)
#p4m <- ggMarginal(p4, type = "boxplot", margins = "y", groupColour = TRUE, groupFill = TRUE, xparams = list(varwidth = FALSE))
ggsave(here::here("2_r_scripts/figure_3b.pdf"),
ggpubr::ggarrange(p1, p2, p4, nrow = 1, ncol = 3),
device = "pdf", width = 10.5, height = 3.75, units = "in")
## Modeling individual variation NY ----
# Adults
# Baseline
da2n$predictor <- da2n$b_cort
mb <- lmer(b_gluc ~ scale(predictor) + scale(mass) + sex + (1|band), data = da2n)
res_mb <- simulateResiduals(mb)
plot(res_mb)
# plotQQunif(mb)
# plotResiduals(mb)
# Induced
da2n$predictor <- da2n$s_resp
ms <- lmer(gluc_resp ~ scale(predictor) * scale(mass) + sex + (1|band), data = da2n)
# plotQQunif(ms)
# plotResiduals(ms)
# Post-dex
da2n$predictor <- da2n$n_feed
md <- lmer(gluc_feed ~ scale(predictor) + sex + (1|band), data = da2n)
# plotQQunif(md)
# plotResiduals(md)
# Post-cortrosyn
da2n$predictor <- da2n$a_inc
ma <- lm(gluc_ainc ~ scale(predictor) + scale(mass), data = da2n)
# plotQQunif(ma)
# plotResiduals(ma)
ta <- tab_model(mb, ms, ma, show.re.var = FALSE,
dv.labels = c("Baseline Glucose", "Induced - Base Glucose", "Post-Cortrosyn - Induced Glucose"),
pred.labels = c("Intercept", "Corticosterone", "Mass", "Sex (male)", "Corticosterone * Mass"))
saveRDS(ta, here::here("2_r_scripts/table_s4.rds"))
#emmeans(ms, "sex", lmer.df = "Satterthwaite")
# interaction for induced by base
post <- mvrnorm(n = 1e6, mu = fixef(ms), vcov(ms))
r <- seq(-3, 3, 0.1)
mu_neg1 <- sapply(r, function(z)mean(post[, 1] + post[, 2]*z + post[, 3] * -1 + post[, 4] * z * -1))
ci_neg1 <- sapply(r, function(z)HPDI(post[, 1] + post[, 2]*z + post[, 3] * -1 + post[, 4] * z * -1))
mu_zero <- sapply(r, function(z)mean(post[, 1] + post[, 2]*z + post[, 3] * 0 + post[, 4] * z * 0))
ci_zero <- sapply(r, function(z)HPDI(post[, 1] + post[, 2]*z + post[, 3] * 0 + post[, 4] * z * 0))
mu_pos1 <- sapply(r, function(z)mean(post[, 1] + post[, 2]*z + post[, 3] * 1 + post[, 4] * z * 1))
ci_pos1 <- sapply(r, function(z)HPDI(post[, 1] + post[, 2]*z + post[, 3] * 1 + post[, 4] * z * 1))
colss <- viridis(n = 5, option = "C")
pdf(here::here("2_r_scripts/figure_4b.pdf"), width = 6.5, height = 6.5)
plot(r, mu_neg1, lwd = 2, type = "n", xaxs = "i", yaxs = "i", xaxt = "n", yaxt = "n", xlim = c(-2.1, 2.1),
ylim = c(-40, 100), bty = "n", xlab = "Induced - baseline corticosterone (SD units)", ylab = "Induced - baseline glucose (mg/dl)",
cex.lab = 1.5)
axis(1, seq(-5, 5, 1))
axis(2, seq(-500, 500, 20), las = 2)
abline(h = 0, lty = 2, col = "gray60")
shade(ci_neg1, r, col = alpha(colss[2], 0.3))
lines(r, mu_neg1, lwd = 2, col = colss[2])
shade(ci_zero, r, col = alpha(colss[3], 0.3))
lines(r, mu_zero, lwd = 2, col = colss[3])
shade(ci_pos1, r, col = alpha(colss[4], 0.3))
lines(r, mu_pos1, lwd = 2, col = colss[4])
legend(-0.7, -12, c("mass -1 SD", "mass 0 SD", "mass 1 SD"), bty = "n", col = colss[2:4], lwd = 2, cex = 1.2)
dev.off()
# Nestling
dn$nest <- paste(dn$site, dn$box, sep = "_")
# Baseline
dn$predictor <- dn$b_cort
mb <- lmer(b_gluc ~ scale(predictor) * scale(mass) + (1|nest) + (1|ID), data = dn)
# plotQQunif(mb)
# plotResiduals(mb)
# Induced
dn$predictor <- dn$s_resp
ms <- lmer(gluc_resp ~ scale(predictor) + scale(mass) + (1|nest), data = dn)
# Post-dex
dn$predictor <- dn$n_feed
md <- lmer(gluc_feed ~ scale(predictor) * scale(mass) + (1|nest), data = dn)
# Post-cortrosyn
dn$predictor <- dn$a_inc
ma <- lmer(gluc_ainc ~ scale(predictor) + scale(mass) + (1|nest), data = dn)
tn <- tab_model(mb, ms, ma, show.re.var = FALSE,
dv.labels = c("Baseline Glucose", "Induced - Base Glucose",
"Post-Cortrosyn - Induced Glucose"),
pred.labels = c("Intercept", "Corticosterone", "Mass", "Corticosterone * Mass"))
saveRDS(tn, here::here("2_r_scripts/table_s5.rds"))
## Modeling population comparison ----
# models for each state
dwy <- subset(da2, da2$state == "WY")
dak <- subset(da2, da2$state == "AK")
dtn <- subset(da2, da2$state == "TN")
dwy$cort_pred <- dwy$b_cort
mwyb <- lmer(b_gluc ~ scale(cort_pred) * scale(mass) + (1|band), data = subset(dwy, dwy$sex == "F"))
dwy$cort_pred <- dwy$s_resp
mwyr <- lm(gluc_resp ~ scale(cort_pred) * scale(mass), data = subset(dwy, dwy$sex == "F"))
dak$cort_pred <- dak$b_cort
makb <- lmer(b_gluc ~ scale(cort_pred) * scale(mass) + (1|band), data = subset(dak, dak$sex == "F"))
dak$cort_pred <- dak$s_resp
makr <- lmer(gluc_resp ~ scale(cort_pred) * scale(mass) + (1|band), data = subset(dak, dak$sex == "F"))
dtn$cort_pred <- dtn$b_cort
mtnb <- lmer(b_gluc ~ scale(cort_pred) * scale(mass) + (1|band), data = subset(dtn, dtn$sex == "F"))
dtn$cort_pred <- dtn$s_resp
mtnr <- lm(gluc_resp ~ scale(cort_pred) * scale(mass), data = subset(dtn, dtn$sex == "F"))
da2n$cort_pred <- da2n$b_cort
mnyb <- lmer(b_gluc ~ scale(cort_pred) * scale(mass) + (1|band), data = subset(da2n, da2n$sex == "F"))
da2n$cort_pred <- da2n$s_resp
mnyr <- lmer(gluc_resp ~ scale(cort_pred) * scale(mass) + (1|band), data = subset(da2n, da2n$sex == "F"))
tc1 <- tab_model(makb, mnyb, mtnb, mwyb, show.re.var = FALSE,
dv.labels = c("AK Base Glucose", "NY Base Glucose", "TN Base Glucose", "WY Base Glucose"),
pred.labels = c("Intercept", "Base Corticosterone", "Mass", "Corticosterone * Mass"))
tc2 <- tab_model(makr, mnyr, mtnr, mwyr, show.re.var = FALSE,
dv.labels = c("AK Induced - Base Glucose", "NY Induced - Base Glucose", "TN Induced - Base Glucose", "WY Induced - Base Glucose"),
pred.labels = c("Intercept", "Induced - Base Corticosterone", "Mass", "Corticosterone * Mass"))
saveRDS(tc1,
here::here("2_r_scripts/table_s6.rds"))
saveRDS(tc2,
here::here("2_r_scripts/table_s7.rds"))
# Do states differ in glucose levels
md <- lmer(b_gluc ~ state + (1|band), data = da2)
md2 <- lmer(s_gluc ~ state + (1|band), data = da2)
md3 <- lmer(s_gluc - b_gluc ~ state + (1|band), data = da2)
em_md <- as.data.frame(emmeans(md, "state"))
em_md$type <- factor(c("AK", "NY", "TN", "WY"), levels = c("AK", "NY", "TN", "WY"))
em_mdl <- pivot_longer(em_md, cols = c("lower.CL", "upper.CL"), values_to = "y")
em_md2 <- as.data.frame(emmeans(md2, "state"))
em_md2$type <- factor(c("AK", "NY", "TN", "WY"), levels = c("AK", "NY", "TN", "WY"))
em_md2l <- pivot_longer(em_md2, cols = c("lower.CL", "upper.CL"), values_to = "y")
em_md3 <- as.data.frame(emmeans(md3, "state"))
em_md3$type <- factor(c("AK", "NY", "TN", "WY"), levels = c("AK", "NY", "TN", "WY"))
em_md3l <- pivot_longer(em_md3, cols = c("lower.CL", "upper.CL"), values_to = "y")
tab_model(md, md2, md3)
## Plotting population comparison ----
# base glucose
# Base glucose
pop1 <- ggplot(data = da2, mapping = aes(x = state, y = b_gluc, fill = state, color = state)) +
geom_boxplot(alpha = 0.4, outlier.shape = NA, position = position_nudge(x = -0.3), width = 0.25) +
geom_jitter(width = 0.1, alpha = 0.6, size = 0.2) +
theme_classic() +
scale_fill_manual(values = c("slateblue", "coral3", "goldenrod", "purple")) +
scale_color_manual(values = c("slateblue", "coral3", "goldenrod", "purple")) +
xlab("") + ylab("Baseline glucose (mg/dl)") +
guides(color = FALSE, fill = FALSE) +
#scale_x_discrete(labels = c("Base", "Induced", "Dex.", "Cortrosyn"))+
annotate("text", x = -Inf, y = Inf, label = "A", hjust = -0.5, vjust = 1.5) +
theme(axis.title = element_text(size = 14), axis.text.x = element_text(size = 12))
# add in emmeans intervals
pop1 <- pop1 + geom_line(data = em_mdl, mapping = aes(x = state, y = y), col = "black", size = 1, position = position_nudge(x = 0.2)) +
geom_point(data = em_md, mapping = aes(x = state, y = emmean), color = "black", shape = 23, position = position_nudge(x = 0.2))
# Stress glucose
pop2 <- ggplot(data = da2, mapping = aes(x = state, y = s_gluc, fill = state, color = state)) +
geom_boxplot(alpha = 0.4, outlier.shape = NA, position = position_nudge(x = -0.3), width = 0.25) +
geom_jitter(width = 0.1, alpha = 0.6, size = 0.2) +
theme_classic() +
scale_fill_manual(values = c("slateblue", "coral3", "goldenrod", "purple")) +
scale_color_manual(values = c("slateblue", "coral3", "goldenrod", "purple")) +
xlab("") + ylab("Induced glucose (mg/dl)") +
guides(color = FALSE, fill = FALSE) +
#scale_x_discrete(labels = c("Base", "Induced", "Dex.", "Cortrosyn"))+
annotate("text", x = -Inf, y = Inf, label = "B", hjust = -0.5, vjust = 1.5) +
theme(axis.title = element_text(size = 14), axis.text.x = element_text(size = 12))
# add in emmeans intervals
pop2 <- pop2 + geom_line(data = em_md2l, mapping = aes(x = state, y = y), col = "black", size = 1, position = position_nudge(x = 0.2)) +
geom_point(data = em_md2, mapping = aes(x = state, y = emmean), color = "black", shape = 23, position = position_nudge(x = 0.2))
# delta glucose
pop3 <- ggplot(data = da2, mapping = aes(x = state, y = s_gluc - b_gluc, fill = state, color = state)) +
geom_boxplot(alpha = 0.4, outlier.shape = NA, position = position_nudge(x = -0.3), width = 0.25) +
geom_jitter(width = 0.1, alpha = 0.6, size = 0.2) +
theme_classic() +
scale_fill_manual(values = c("slateblue", "coral3", "goldenrod", "purple")) +
scale_color_manual(values = c("slateblue", "coral3", "goldenrod", "purple")) +
xlab("") + ylab("Induced - base glucose (mg/dl)") +
guides(color = FALSE, fill = FALSE) +
#scale_x_discrete(labels = c("Base", "Induced", "Dex.", "Cortrosyn"))+
annotate("text", x = -Inf, y = Inf, label = "C", hjust = -0.5, vjust = 1.5) +
theme(axis.title = element_text(size = 14), axis.text.x = element_text(size = 12))
# add in emmeans intervals
pop3 <- pop3 + geom_line(data = em_md3l, mapping = aes(x = state, y = y), col = "black", size = 1, position = position_nudge(x = 0.2)) +
geom_point(data = em_md3, mapping = aes(x = state, y = emmean), color = "black", shape = 23, position = position_nudge(x = 0.2)) +
geom_hline(yintercept = 0, linetype = "dashed", col = "gray60")
# save figure
ggsave(here::here("2_r_scripts/figure_5b.pdf"),
ggpubr::ggarrange(pop1, pop2, pop3, nrow = 1),
device = "pdf", width = 7.5, height = 4, units = "in")
## Within-individual covariance ----
library(standardize)
da2n$byd <- paste(da2n$band, da2n$year, da2n$date, sep = "_")
da2nw <- subset(da2n, is.na(da2n$b_cort) == FALSE & is.na(da2n$s_cort) == FALSE &
is.na(da2n$b_gluc) == FALSE & is.na(da2n$s_gluc) == FALSE)
for(i in 1:nrow(da2nw)){
da2nw$num_samps[i] <- nrow(subset(da2nw, da2nw$band == da2nw$band[i]))
}
da2nw2 <- subset(da2nw, da2nw$num_samps > 3)
#Standardize within individuals
da2nw2$b_gluc_s <- scale_by(b_gluc ~ as.factor(band), data = da2nw2)
da2nw2$b_cort_s <- scale_by(b_cort ~ as.factor(band), data = da2nw2)
da2nw2$s_gluc_s <- scale_by(s_gluc ~ as.factor(band), data = da2nw2)
da2nw2$s_cort_s <- scale_by(s_cort ~ as.factor(band), data = da2nw2)
da2nw2$s_resp_s <- scale_by(s_resp ~ as.factor(band), data = da2nw2)
da2nw2$gluc_resp_s <- scale_by(gluc_resp ~ as.factor(band), data = da2nw2)
p1 <- ggplot(data = da2nw2, mapping = aes(x = b_cort_s, y = b_gluc_s, by = as.factor(band))) +
geom_point(alpha = 0.5) +
geom_line(stat = "smooth", method = "lm", alpha = 0.9, color = "lightblue") +
theme_classic() +
guides(fill = FALSE, color = FALSE) +
annotate("text", x = -Inf, y = Inf, label = "A", hjust = -0.5, vjust = 1.5) +
geom_smooth(data = da2nw2, mapping = aes(x = b_cort_s, y = b_gluc_s, by = state), method = "lm", color = "coral3",
fill = "coral3") +
geom_hline(yintercept = 0, linetype = "dashed", color = "black") +
ylim(-2.5, 2.5) +
xlab("Within-individual \n base corticosterone") +
ylab("Within-individual \n base glucose") +
theme(axis.title = element_text(size = 13), axis.text = element_text(size = 12))
p2 <- ggplot(data = da2nw2, mapping = aes(x = s_cort_s, y = s_gluc_s, by = as.factor(band))) +
geom_point(alpha = 0.5) +
geom_line(stat = "smooth", method = "lm", alpha = 0.9, color = "lightblue") +
theme_classic() +
guides(fill = FALSE, color = FALSE) +
annotate("text", x = -Inf, y = Inf, label = "B", hjust = -0.5, vjust = 1.5) +
geom_smooth(data = da2nw2, mapping = aes(x = s_cort_s, y = s_gluc_s, by = state), method = "lm", color = "coral3",
fill = "coral3") +
geom_hline(yintercept = 0, linetype = "dashed", color = "black") +
ylim(-2.5, 2.5) +
xlab("Within-individual \n induced corticosterone") +
ylab("Within-individual \n induced glucose") +
theme(axis.title = element_text(size = 13), axis.text = element_text(size = 12))
p3 <- ggplot(data = da2nw2, mapping = aes(x = s_resp_s, y = gluc_resp_s, by = as.factor(band))) +
geom_point(alpha = 0.5) +
geom_line(stat = "smooth", method = "lm", alpha = 0.9, color = "lightblue") +
theme_classic() +
guides(fill = FALSE, color = FALSE) +
annotate("text", x = -Inf, y = Inf, label = "C", hjust = -0.5, vjust = 1.5) +
geom_smooth(data = da2nw2, mapping = aes(x = s_resp_s, y = gluc_resp_s, by = state), method = "lm", color = "coral3",
fill = "coral3") +
geom_hline(yintercept = 0, linetype = "dashed", color = "black") +
ylim(-2.5, 2.1) +
xlab("Within-individual \n \u0394 corticosterone") +
ylab("Within-individual \n \u0394 glucose") +
theme(axis.title = element_text(size = 13), axis.text = element_text(size = 12))
ggpubr::ggarrange(p1, p2, p3, nrow = 1, ncol = 3)
wi_b <- lm(b_gluc_s ~ 0 + b_cort_s, data = da2nw2)
wi_s <- lm(s_gluc_s ~ 0 + s_cort_s, data = da2nw2)
wi_sr <- lm(s_resp_s ~ 0 + gluc_resp_s, data = da2nw2)
tab_model(wi_b, wi_s, wi_sr)
ggsave(here::here("2_r_scripts/figure_6b.pdf"),
ggpubr::ggarrange(p1, p2, p3, nrow = 1, ncol = 3),
device = "pdf", width = 10.5, height = 3.75, units = "in")
## ACTH Validation Models ----
# Nestlings
nest_l <- d_acth_nest %>%
pivot_longer(cols = c("cort1", "cort2", "cort3"), names_to = "timepoint",
values_to = "cort", values_drop_na = TRUE)
nest_l <- as.data.frame(nest_l)
nest_l$treatment <- as.factor(nest_l$treatment)
nest_l$treatment <- relevel(nest_l$treatment, ref = "Saline")
m_n_acth <- lmer(cort ~ timepoint*treatment + (1|band) + (1|unit_box), data = nest_l)
# Adults
fem_l <- d_acth_fem %>%
pivot_longer(cols = c("cort1", "cort2", "cort3"), names_to = "timepoint",
values_to = "cort", values_drop_na = TRUE)
fem_l <- as.data.frame(fem_l)
fem_l$treatment <- gsub("BCC", "Saline", fem_l$treatment)
fem_l$treatment <- gsub("BCA", "ACTH", fem_l$treatment)
fem_l$treatment <- as.factor(fem_l$treatment)
fem_l$treatment <- relevel(fem_l$treatment, ref = "Saline")
m_a_acth <- lmer(cort ~ timepoint*treatment + (1|band), data = fem_l)
# Make a table
t1 <- tab_model(m_n_acth, m_a_acth,
pred.labels = c("Intercept (Baseline)", "Timepoint 2", "Timepoint 3",
"Cortrosyn at Baseline", "Cortrosyn at Timepoint 2", "Cortrosyn at Timepoint 3"),
dv.labels = c("Nestling Corticosterone", "Adult Corticosterone"))
saveRDS(t1, here::here("2_r_scripts/table_s1.rds"))
# Note that I want to put this table in the pdf output supplementary materials, but there is no direct way to
# use sjplot to put html tables into a markdown pdf. I manually saved the html as a pdf to put it in. That
# means if this code is modified the saved pdf file needs to be overwritten with a new version.
## ACTH Validation Plots ----
# These are plotted and saved to file here then pasted into the rmarkdown file for the supplemental materials
# Nestlings
nest_a <- d_acth_nest %>%
pivot_longer(cols = c("cort1", "cort2", "cort3"), names_to = "timepoint",
values_to = "cort", values_drop_na = TRUE) %>%
ggplot(aes(x = timepoint, y = cort, fill = treatment)) +
geom_line(mapping = aes(x = timepoint, y = cort, group = band, color = treatment), alpha = 0.45) +
geom_boxplot(width = 0.25) + theme_classic() + xlab("Minutes After Capture") + ylab("Corticosterone ng/mL") +
scale_x_discrete(labels = c("<3", "15", "30")) + geom_vline(xintercept = 1.15, lty = 2, col = "gray40") +
annotate("text", x = 1.1, y = 40, label = "Cortrosyn or Saline Injection", angle = 90) + labs(fill = "Treatment") +
ggtitle("15 Day Old Nestlings") +
scale_fill_discrete(name = "Treatment", labels = c("Cortrosyn", "Saline")) + guides(color = FALSE) +
theme(legend.position = c(0.12, 0.9))
ggsave(here::here("2_r_scripts/figure_s2b.pdf"), plot = nest_a, width = 6, height = 5, units = "in", device = "pdf")
# Adults
fem_a <- d_acth_fem %>%
pivot_longer(cols = c("cort1", "cort2", "cort3"), names_to = "timepoint",
values_to = "cort", values_drop_na = TRUE) %>%
ggplot(aes(x = timepoint, y = cort, fill = treatment)) +
geom_line(mapping = aes(x = timepoint, y = cort, group = band, color = treatment), alpha = 0.45) +
geom_boxplot(width = 0.25, outlier.size = 0, outlier.stroke = 0) + theme_classic() + xlab("Minutes After Capture") +
ylab("Corticosterone ng/mL") +
scale_x_discrete(labels = c("<3", "30", "60")) + geom_vline(xintercept = 1.15, lty = 2, col = "gray40") +
annotate("text", x = 1.1, y = 45, label = "Saline Injection", angle = 90) +
geom_vline(xintercept = 2.15, lty = 2, col = "gray40") +
annotate("text", x = 2.1, y = 50, label = "Cortrosyn or Saline Injection", angle = 90) +
labs(fill = "Treatment") + ggtitle("Adult Females") +
scale_fill_discrete(name = "Treatment", labels = c("Cortrosyn", "Saline")) + guides(color = FALSE) +
theme(legend.position = c(0.12, 0.9))
ggsave(here::here("2_r_scripts/figure_s1b.pdf"), plot = fem_a, width = 6, height = 5, units = "in", device = "pdf")
| /2_r_scripts/1_main_cort_glucose_analysis.R | no_license | cct663/glucose_cort | R | false | false | 39,362 | r | ## Script to analyze samples of glucose and corticosterone from ##
## tree swallow adult and nestlings collected in NY, TN, WY, & AK ##
## from 2016-2019. The main purpose is to ask if increase in cort ##
## is directly correlated with increase in glucose. Different ##
## subsets of data are used for different questions becaues not ##
## all samples were always collected and some birds were part of ##
## manipulative experiments that could have influenced measures. ##
## ##
## Code by Conor Taff, last updated 8 November 2021 ##
# Notes ----
## Some differences in samples collected by age, year, and location
# AK, TN, WY: no dex or acth glucose; no nestlings
# NY: dex & acth glucose in some years; acth only in 2019 and only
# on 3rd caputure of females. Some years second or third capture
# did not have glucose measures.
# Nestlings: only from NY in 2019. acth is 3 days after b/s/d series
## Load packages ----
pacman::p_load(plyr, lme4, ggplot2, here, scales, lmerTest, sjPlot, scales, emmeans, emojifont,
tidyverse, raincloudplots, viridis, ggExtra, MASS, rethinking, rptR, DHARMa)
## Load & clean data ----
# ACTH validation experiment data
d_acth_fem <- read.delim(here::here("1_raw_data", "adult_acth_validation.txt"))
d_acth_nest <- read.delim(here::here("1_raw_data", "nestling_acth_validation.txt"))
# Main data
d <- read.delim(here::here("1_raw_data/data_glucose_cort.txt"))
## Substituting 0.47 as the minimum detectable amount of corticosterone.
# In some cases this was done manually when running ELISA, but this is
# checking to make sure all are set the same way here.
d[which(d$b_cort < 0.47), "b_cort"] <- 0.47
d[which(d$s_cort < 0.47), "s_cort"] <- 0.47
d[which(d$d_cort < 0.47), "d_cort"] <- 0.47
## Calculate delta values for cort and glucose
d$s_resp <- d$s_cort - d$b_cort
d$n_feed <- d$d_cort - d$s_cort
d$a_inc <- d$a_cort - d$s_cort
d$gluc_resp <- d$s_gluc - d$b_gluc
d$gluc_feed <- d$d_gluc - d$s_gluc
d$gluc_ainc <- d$a_gluc - d$s_gluc
## Make a separate dataset for adults and exclude any post treatment measures.
# Also make a separate dataset just for New York.
da <- subset(d, d$class == "adult")
# Remove post treatment samples from corticosterone dosed birds
da$post_trt2 <- paste(da$post_trt, da$treatment, sep = "_")
da2 <- subset(da, da$post_trt2 != "yes_CORT_3d")
da2 <- subset(da2, da2$post_trt2 != "yes_CORT_6d")
da2n <- subset(da, da$state == "NY") # New York only
# Make a separate dataframe for nestlings
dn <- subset(d, d$class == "nestling")
dn$post_trt2 <- paste(dn$post_trt, dn$treatment, sep = "_")
## Glucose repeatability ----
da2nx <- subset(da2, is.na(da2$b_gluc) == FALSE & is.na(da2$s_gluc) == FALSE & is.na(da2$gluc_resp) == FALSE)
for(i in 1:nrow(da2nx)){
da2nx$count[i] <- nrow(subset(da2nx, da2nx$band == da2nx$band[i]))
}
da2x <- subset(da2nx, da2nx$count > 1)
r_base <- rpt(b_gluc ~ (1|band), grname = "band", data = da2x, npermut = 0, datatype = "Gaussian")
r_str <- rpt(s_gluc ~ (1|band), grname = "band", data = da2x, npermut = 0, datatype = "Gaussian")
r_resp <- rpt(gluc_resp ~ (1|band), grname = "band", data = da2x, npermut = 0, datatype = "Gaussian")
r_basec <- rpt(b_cort ~ (1|band), grname = "band", data = da2x, npermut = 0, datatype = "Gaussian")
r_strc <- rpt(s_cort ~ (1|band), grname = "band", data = da2x, npermut = 0, datatype = "Gaussian")
r_respc <- rpt(s_resp ~ (1|band), grname = "band", data = da2x, npermut = 0, datatype = "Gaussian")
## Model group means NY ----
recode <- data.frame(type = c("b_cort", "s_cort", "d_cort", "a_cort",
"b_gluc", "s_gluc", "d_gluc", "a_gluc"),
timepoint = rep(c("base", "induce", "dex", "acth"), 2))
recode$timepoint <- factor(recode$timepoint, levels = c("base", "induce", "dex", "acth"))
#Adults
d1 <- da2n %>%
pivot_longer(cols = c("b_cort", "s_cort", "a_cort"), names_to = "type", values_to = "cort")
d1$type <- factor(d1$type, levels = c("b_cort", "s_cort", "a_cort"))
d1$unique <- paste(d1$band, d1$year, d1$date, sep = "_")
d1 <- plyr::join(d1, recode, "type")
m1 <- lmer(cort ~ timepoint + sex + (1|band), data = d1)
m1_em <- as.data.frame(emmeans(m1, "timepoint", lmer.df = "satterthwaite"))
m1_em$type <- factor(c("b_cort", "s_cort", "a_cort"), levels = c("b_cort", "s_cort", "a_cort"))
m1_eml <- pivot_longer(m1_em, cols = c("lower.CL", "upper.CL"), values_to = "y")
d1 <- da2n %>%
pivot_longer(cols = c("b_gluc", "s_gluc", "a_gluc"), names_to = "type", values_to = "glucose")
d1$type <- factor(d1$type, levels = c("b_gluc", "s_gluc", "a_gluc"))
d1$unique <- paste(d1$band, d1$year, d1$date, sep = "_")
d1 <- plyr::join(d1, recode, "type")
m2 <- lmer(glucose ~ timepoint + sex + (1|band), data = d1)
m2_em <- as.data.frame(emmeans(m2, "timepoint", lmer.df = "satterthwaite"))
m2_em$type <- factor(c("b_gluc", "s_gluc", "a_gluc"), levels = c("b_gluc", "s_gluc", "a_gluc"))
m2_eml <- pivot_longer(m2_em, cols = c("lower.CL", "upper.CL"), values_to = "y")
#Nestlings
d1 <- dn %>%
pivot_longer(cols = c("b_cort", "s_cort", "a_cort"), names_to = "type", values_to = "cort")
d1$type <- factor(d1$type, levels = c("b_cort", "s_cort", "a_cort"))
d1$unique <- paste(d1$band, d1$year, d1$date, sep = "_")
d1$nest <- paste(d1$site, d1$box, sep = "_")
d1 <- plyr::join(d1, recode, "type")
m3 <- lmer(cort ~ timepoint + (1|nest), data = d1)
m3_em <- as.data.frame(emmeans(m3, "timepoint", lmer.df = "satterthwaite"))
m3_em$type <- factor(c("b_cort", "s_cort", "a_cort"), levels = c("b_cort", "s_cort", "a_cort"))
m3_eml <- pivot_longer(m3_em, cols = c("lower.CL", "upper.CL"), values_to = "y")
d1 <- dn %>%
pivot_longer(cols = c("b_gluc", "s_gluc", "a_gluc"), names_to = "type", values_to = "glucose")
d1$type <- factor(d1$type, levels = c("b_gluc", "s_gluc", "a_gluc"))
d1$unique <- paste(d1$band, d1$year, d1$date, sep = "_")
d1$nest <- paste(d1$site, d1$box, sep = "_")
d1 <- plyr::join(d1, recode, "type")
m4 <- lmer(glucose ~ timepoint + (1|nest), data = d1)
m4_em <- as.data.frame(emmeans(m4, "timepoint", lmer.df = "satterthwaite"))
m4_em$type <- factor(c("b_gluc", "s_gluc", "a_gluc"), levels = c("b_gluc", "s_gluc", "a_gluc"))
m4_eml <- pivot_longer(m4_em, cols = c("lower.CL", "upper.CL"), values_to = "y")
t1 <- tab_model(m1, m2, m3, m4, show.re.var = FALSE, show.p = FALSE,
dv.labels = c("Adult Corticosterone", "Adult Glucose", "Nestling Corticosterone", "Nestling Glucose"),
pred.labels = c("Intercept (Base / Female)", "Induced", "Post-Cortrosyn", "Sex (Male)"))
saveRDS(t1,
here::here("2_r_scripts/table_s3.rds"))
## Plot group means NY ----
# NY nestling
# Corticosterone
d1 <- dn %>%
pivot_longer(cols = c("b_cort", "s_cort", "a_cort"), names_to = "type", values_to = "cort")
d1$type <- factor(d1$type, levels = c("b_cort", "s_cort", "a_cort"))
d1$unique <- paste(d1$band, d1$year, d1$date, sep = "_")
p1 <- ggplot(data = d1, mapping = aes(x = type, y = cort, fill = type, color = type)) +
geom_boxplot(alpha = 0.2, outlier.shape = NA, position = position_nudge(x = -0.3), width = 0.25) +
geom_jitter(width = 0.1, alpha = 0.4, size = 0.2) +
theme_classic() +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) + guides(fill = FALSE, color = FALSE) +
xlab("") + ylab("Corticosterone (ng/mL)") +
scale_x_discrete(labels = c("Base", "Induced", "Cortrosyn")) +
annotate("text", x = -Inf, y = Inf, label = "A", hjust = -0.5, vjust = 1.5) +
theme(axis.title = element_text(size = 14), axis.text = element_text(size = 13), axis.text.x = element_text(angle = 30, hjust = 1))
# add in confidence intevals from emmeans model
p1 <- p1 + geom_line(data = m3_eml, mapping = aes(x = type, y = y), col = "black", size = 1, position = position_nudge(x = 0.2)) +
geom_point(data = m3_em, mapping = aes(x = type, y = emmean), color = "black", shape = 23, position = position_nudge(x = 0.2))
# Glucose
d1 <- dn %>%
pivot_longer(cols = c("b_gluc", "s_gluc", "a_gluc"), names_to = "type", values_to = "glucose")
d1$type <- factor(d1$type, levels = c("b_gluc", "s_gluc", "a_gluc"))
d1$unique <- paste(d1$band, d1$year, d1$date, sep = "_")
p2 <- ggplot(data = d1, mapping = aes(x = type, y = glucose, fill = type, color = type)) +
geom_boxplot(alpha = 0.2, outlier.shape = NA, position = position_nudge(x = -0.3), width = 0.25) +
geom_jitter(width = 0.1, alpha = 0.4, size = 0.2) +
theme_classic() +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) + guides(fill = FALSE, color = FALSE) +
xlab("") + ylab("Glucose (mg/dl)") +
scale_x_discrete(labels = c("Base", "Induced", "Cortrosyn"))+
annotate("text", x = -Inf, y = Inf, label = "B", hjust = -0.5, vjust = 1.5) +
theme(axis.title = element_text(size = 14), axis.text = element_text(size = 13), axis.text.x = element_text(angle = 30, hjust = 1))
# add in emmeans intervals
p2 <- p2 + geom_line(data = m4_eml, mapping = aes(x = type, y = y), col = "black", size = 1, position = position_nudge(x = 0.2)) +
geom_point(data = m4_em, mapping = aes(x = type, y = emmean), color = "black", shape = 23, position = position_nudge(x = 0.2))
ggsave(here::here("2_r_scripts/figure_2b.pdf"),
ggpubr::ggarrange(p1, p2),
device = "pdf", width = 5, height = 4, units = "in")
# NY adult
# Corticosterone
d1 <- da2n %>%
pivot_longer(cols = c("b_cort", "s_cort", "a_cort"), names_to = "type", values_to = "cort")
d1$type <- factor(d1$type, levels = c("b_cort", "s_cort", "a_cort"))
d1$unique <- paste(d1$band, d1$year, d1$date, sep = "_")
p1 <- ggplot(data = d1, mapping = aes(x = type, y = cort, fill = type, color = type)) +
geom_boxplot(alpha = 0.2, position = position_nudge(x = -0.3), width = 0.25, outlier.shape = NA) +
geom_jitter(width = 0.1, alpha = 0.4, size = 0.2) +
theme_classic() +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) + guides(fill = FALSE, color = FALSE) +
xlab("") + ylab(paste("Corticosterone (ng/mL)")) +
scale_x_discrete(labels = c("Base", "Induced", "Cortrosyn")) +
ylim(0, 110) +
annotate("text", x = -Inf, y = Inf, label = "A", hjust = -0.5, vjust = 1.5) +
theme(axis.title = element_text(size = 14), axis.text = element_text(size = 13), axis.text.x = element_text(angle = 30, hjust = 1))
# add in confidence intevals from emmeans model
p1 <- p1 + geom_line(data = m1_eml, mapping = aes(x = type, y = y), col = "black", size = 1, position = position_nudge(x = 0.2)) +
geom_point(data = m1_em, mapping = aes(x = type, y = emmean), color = "black", shape = 23, position = position_nudge(x = 0.2))
# Glucose
d1 <- da2n %>%
pivot_longer(cols = c("b_gluc", "s_gluc", "a_gluc"), names_to = "type", values_to = "glucose")
d1$type <- factor(d1$type, levels = c("b_gluc", "s_gluc", "a_gluc"))
d1$unique <- paste(d1$band, d1$year, d1$date, sep = "_")
p2 <- ggplot(data = d1, mapping = aes(x = type, y = glucose, fill = type, color = type)) +
geom_boxplot(alpha = 0.2, outlier.shape = NA, width = 0.25, position = position_nudge(x = -0.3)) +
geom_jitter(width = 0.1, alpha = 0.4, size = 0.2) +
theme_classic() +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) + guides(fill = FALSE, color = FALSE) +
xlab("") + ylab("Glucose (mg/dl)") +
scale_x_discrete(labels = c("Base", "Induced", "Cortrosyn"))+
annotate("text", x = -Inf, y = Inf, label = "B", hjust = -0.5, vjust = 1.5) +
theme(axis.title = element_text(size = 14), axis.text = element_text(size = 13), axis.text.x = element_text(angle = 30, hjust = 1))
# add in emmeans intervals
p2 <- p2 + geom_line(data = m2_eml, mapping = aes(x = type, y = y), col = "black", size = 1, position = position_nudge(x = 0.2)) +
geom_point(data = m2_em, mapping = aes(x = type, y = emmean), color = "black", shape = 23, position = position_nudge(x = 0.2))
ggsave(here::here("2_r_scripts/figure_1b.pdf"),
ggpubr::ggarrange(p1, p2),
device = "pdf", width = 5, height = 4, units = "in")
## Plot individual variation NY ----
dny <- rbind(da2n, dn)
p1 <- ggplot(data = dny, mapping = aes(x = b_cort, y = b_gluc, color = class, fill = class)) +
geom_point(alpha = 0.5, size = 0.7) +
geom_smooth(method = "lm") +
theme_classic() +
scale_fill_manual(values = c("slateblue", "orange")) +
scale_color_manual(values = c("slateblue", "orange")) +
#guides(fill = FALSE, color = FALSE) +
theme(legend.position = c(0.8, 0.8), legend.title = element_blank()) +
annotate("text", x = -Inf, y = Inf, label = "A", hjust = -0.5, vjust = 1.5) +
xlab("Baseline cort \n (log ng/mL)") +
ylab("Baseline glucose (mg/dl)") +
theme(axis.title = element_text(size = 14), axis.text = element_text(size = 13),
legend.text = element_text(size = 12)) +
coord_cartesian(xlim = c(0, 20))
p2 <- ggplot(data = dny, mapping = aes(x = s_resp, y = gluc_resp, color = class, fill = class)) +
geom_point(alpha = 0.5, size = 0.7) +
geom_smooth(method = "lm") +
theme_classic() +
scale_fill_manual(values = c("slateblue", "orange")) +
scale_color_manual(values = c("slateblue", "orange")) +
guides(fill = FALSE, color = FALSE) +
annotate("text", x = -Inf, y = Inf, label = "B", hjust = -0.5, vjust = 1.5) +
xlim(-50, 100) +
xlab("Induced - baseline \n corticosterone (ng/mL)") +
ylab("Induced - baseline \n glucose (mg/dl)") +
geom_hline(yintercept = 0, linetype = "dashed", color = "gray60") +
geom_vline(xintercept = 0, linetype = "dashed", color = "gray60") +
theme(axis.title = element_text(size = 14), axis.text = element_text(size = 13))
p3 <- ggplot(data = dny, mapping = aes(x = n_feed, y = gluc_feed, color = class, fill = class)) +
geom_point(alpha = 0.5, size = 0.7) +
geom_smooth(method = "lm") +
theme_classic() +
scale_fill_manual(values = c("slateblue", "orange")) +
scale_color_manual(values = c("slateblue", "orange")) +
guides(fill = FALSE, color = FALSE) +
annotate("text", x = -Inf, y = Inf, label = "C", hjust = -0.5, vjust = 1.5) +
xlim(-100, 25) +
xlab("Induced - post-dex \n corticosterone (ng/mL)") +
ylab("Induced - post-dex \n glucose (mg/dl)") +
geom_hline(yintercept = 0, linetype = "dashed", color = "gray60") +
geom_vline(xintercept = 0, linetype = "dashed", color = "gray60") +
theme(axis.title = element_text(size = 14), axis.text = element_text(size = 13))
p4 <- ggplot(data = dny, mapping = aes(x = a_inc, y = gluc_ainc, color = class, fill = class)) +
geom_point(alpha = 0.5, size = 0.7) +
geom_smooth(method = "lm") +
theme_classic() +
scale_fill_manual(values = c("slateblue", "orange")) +
scale_color_manual(values = c("slateblue", "orange")) +
guides(fill = FALSE, color = FALSE) +
annotate("text", x = -Inf, y = Inf, label = "C", hjust = -0.5, vjust = 1.5) +
xlab("Post-cortosyn - induced \n corticosterone (ng/mL)") +
ylab("Post-cortrosyn - induced \n glucose (mg/dl)") +
geom_hline(yintercept = 0, linetype = "dashed", color = "gray60") +
geom_vline(xintercept = 0, linetype = "dashed", color = "gray60") +
theme(axis.title = element_text(size = 14), axis.text = element_text(size = 13))
#p2m <- ggMarginal(p2, type = "boxplot", margins = "y", groupColour = TRUE, groupFill = TRUE)
#p3m <- ggMarginal(p3, type = "boxplot", margins = "y", groupColour = TRUE, groupFill = TRUE)
#p4m <- ggMarginal(p4, type = "boxplot", margins = "y", groupColour = TRUE, groupFill = TRUE, xparams = list(varwidth = FALSE))
ggsave(here::here("2_r_scripts/figure_3b.pdf"),
ggpubr::ggarrange(p1, p2, p4, nrow = 1, ncol = 3),
device = "pdf", width = 10.5, height = 3.75, units = "in")
## Modeling individual variation NY ----
# Adults
# Baseline
da2n$predictor <- da2n$b_cort
mb <- lmer(b_gluc ~ scale(predictor) + scale(mass) + sex + (1|band), data = da2n)
res_mb <- simulateResiduals(mb)
plot(res_mb)
# plotQQunif(mb)
# plotResiduals(mb)
# Induced
da2n$predictor <- da2n$s_resp
ms <- lmer(gluc_resp ~ scale(predictor) * scale(mass) + sex + (1|band), data = da2n)
# plotQQunif(ms)
# plotResiduals(ms)
# Post-dex
da2n$predictor <- da2n$n_feed
md <- lmer(gluc_feed ~ scale(predictor) + sex + (1|band), data = da2n)
# plotQQunif(md)
# plotResiduals(md)
# Post-cortrosyn
da2n$predictor <- da2n$a_inc
ma <- lm(gluc_ainc ~ scale(predictor) + scale(mass), data = da2n)
# plotQQunif(ma)
# plotResiduals(ma)
ta <- tab_model(mb, ms, ma, show.re.var = FALSE,
dv.labels = c("Baseline Glucose", "Induced - Base Glucose", "Post-Cortrosyn - Induced Glucose"),
pred.labels = c("Intercept", "Corticosterone", "Mass", "Sex (male)", "Corticosterone * Mass"))
saveRDS(ta, here::here("2_r_scripts/table_s4.rds"))
#emmeans(ms, "sex", lmer.df = "Satterthwaite")
# interaction for induced by base
post <- mvrnorm(n = 1e6, mu = fixef(ms), vcov(ms))
r <- seq(-3, 3, 0.1)
mu_neg1 <- sapply(r, function(z)mean(post[, 1] + post[, 2]*z + post[, 3] * -1 + post[, 4] * z * -1))
ci_neg1 <- sapply(r, function(z)HPDI(post[, 1] + post[, 2]*z + post[, 3] * -1 + post[, 4] * z * -1))
mu_zero <- sapply(r, function(z)mean(post[, 1] + post[, 2]*z + post[, 3] * 0 + post[, 4] * z * 0))
ci_zero <- sapply(r, function(z)HPDI(post[, 1] + post[, 2]*z + post[, 3] * 0 + post[, 4] * z * 0))
mu_pos1 <- sapply(r, function(z)mean(post[, 1] + post[, 2]*z + post[, 3] * 1 + post[, 4] * z * 1))
ci_pos1 <- sapply(r, function(z)HPDI(post[, 1] + post[, 2]*z + post[, 3] * 1 + post[, 4] * z * 1))
colss <- viridis(n = 5, option = "C")
pdf(here::here("2_r_scripts/figure_4b.pdf"), width = 6.5, height = 6.5)
plot(r, mu_neg1, lwd = 2, type = "n", xaxs = "i", yaxs = "i", xaxt = "n", yaxt = "n", xlim = c(-2.1, 2.1),
ylim = c(-40, 100), bty = "n", xlab = "Induced - baseline corticosterone (SD units)", ylab = "Induced - baseline glucose (mg/dl)",
cex.lab = 1.5)
axis(1, seq(-5, 5, 1))
axis(2, seq(-500, 500, 20), las = 2)
abline(h = 0, lty = 2, col = "gray60")
shade(ci_neg1, r, col = alpha(colss[2], 0.3))
lines(r, mu_neg1, lwd = 2, col = colss[2])
shade(ci_zero, r, col = alpha(colss[3], 0.3))
lines(r, mu_zero, lwd = 2, col = colss[3])
shade(ci_pos1, r, col = alpha(colss[4], 0.3))
lines(r, mu_pos1, lwd = 2, col = colss[4])
legend(-0.7, -12, c("mass -1 SD", "mass 0 SD", "mass 1 SD"), bty = "n", col = colss[2:4], lwd = 2, cex = 1.2)
dev.off()
# Nestling
dn$nest <- paste(dn$site, dn$box, sep = "_")
# Baseline
dn$predictor <- dn$b_cort
mb <- lmer(b_gluc ~ scale(predictor) * scale(mass) + (1|nest) + (1|ID), data = dn)
# plotQQunif(mb)
# plotResiduals(mb)
# Induced
dn$predictor <- dn$s_resp
ms <- lmer(gluc_resp ~ scale(predictor) + scale(mass) + (1|nest), data = dn)
# Post-dex
dn$predictor <- dn$n_feed
md <- lmer(gluc_feed ~ scale(predictor) * scale(mass) + (1|nest), data = dn)
# Post-cortrosyn
dn$predictor <- dn$a_inc
ma <- lmer(gluc_ainc ~ scale(predictor) + scale(mass) + (1|nest), data = dn)
tn <- tab_model(mb, ms, ma, show.re.var = FALSE,
dv.labels = c("Baseline Glucose", "Induced - Base Glucose",
"Post-Cortrosyn - Induced Glucose"),
pred.labels = c("Intercept", "Corticosterone", "Mass", "Corticosterone * Mass"))
saveRDS(tn, here::here("2_r_scripts/table_s5.rds"))
## Modeling population comparison ----
# models for each state
dwy <- subset(da2, da2$state == "WY")
dak <- subset(da2, da2$state == "AK")
dtn <- subset(da2, da2$state == "TN")
dwy$cort_pred <- dwy$b_cort
mwyb <- lmer(b_gluc ~ scale(cort_pred) * scale(mass) + (1|band), data = subset(dwy, dwy$sex == "F"))
dwy$cort_pred <- dwy$s_resp
mwyr <- lm(gluc_resp ~ scale(cort_pred) * scale(mass), data = subset(dwy, dwy$sex == "F"))
dak$cort_pred <- dak$b_cort
makb <- lmer(b_gluc ~ scale(cort_pred) * scale(mass) + (1|band), data = subset(dak, dak$sex == "F"))
dak$cort_pred <- dak$s_resp
makr <- lmer(gluc_resp ~ scale(cort_pred) * scale(mass) + (1|band), data = subset(dak, dak$sex == "F"))
dtn$cort_pred <- dtn$b_cort
mtnb <- lmer(b_gluc ~ scale(cort_pred) * scale(mass) + (1|band), data = subset(dtn, dtn$sex == "F"))
dtn$cort_pred <- dtn$s_resp
mtnr <- lm(gluc_resp ~ scale(cort_pred) * scale(mass), data = subset(dtn, dtn$sex == "F"))
da2n$cort_pred <- da2n$b_cort
mnyb <- lmer(b_gluc ~ scale(cort_pred) * scale(mass) + (1|band), data = subset(da2n, da2n$sex == "F"))
da2n$cort_pred <- da2n$s_resp
mnyr <- lmer(gluc_resp ~ scale(cort_pred) * scale(mass) + (1|band), data = subset(da2n, da2n$sex == "F"))
tc1 <- tab_model(makb, mnyb, mtnb, mwyb, show.re.var = FALSE,
dv.labels = c("AK Base Glucose", "NY Base Glucose", "TN Base Glucose", "WY Base Glucose"),
pred.labels = c("Intercept", "Base Corticosterone", "Mass", "Corticosterone * Mass"))
tc2 <- tab_model(makr, mnyr, mtnr, mwyr, show.re.var = FALSE,
dv.labels = c("AK Induced - Base Glucose", "NY Induced - Base Glucose", "TN Induced - Base Glucose", "WY Induced - Base Glucose"),
pred.labels = c("Intercept", "Induced - Base Corticosterone", "Mass", "Corticosterone * Mass"))
saveRDS(tc1,
here::here("2_r_scripts/table_s6.rds"))
saveRDS(tc2,
here::here("2_r_scripts/table_s7.rds"))
# Do states differ in glucose levels
md <- lmer(b_gluc ~ state + (1|band), data = da2)
md2 <- lmer(s_gluc ~ state + (1|band), data = da2)
md3 <- lmer(s_gluc - b_gluc ~ state + (1|band), data = da2)
em_md <- as.data.frame(emmeans(md, "state"))
em_md$type <- factor(c("AK", "NY", "TN", "WY"), levels = c("AK", "NY", "TN", "WY"))
em_mdl <- pivot_longer(em_md, cols = c("lower.CL", "upper.CL"), values_to = "y")
em_md2 <- as.data.frame(emmeans(md2, "state"))
em_md2$type <- factor(c("AK", "NY", "TN", "WY"), levels = c("AK", "NY", "TN", "WY"))
em_md2l <- pivot_longer(em_md2, cols = c("lower.CL", "upper.CL"), values_to = "y")
em_md3 <- as.data.frame(emmeans(md3, "state"))
em_md3$type <- factor(c("AK", "NY", "TN", "WY"), levels = c("AK", "NY", "TN", "WY"))
em_md3l <- pivot_longer(em_md3, cols = c("lower.CL", "upper.CL"), values_to = "y")
tab_model(md, md2, md3)
## Plotting population comparison ----
# base glucose
# Base glucose
pop1 <- ggplot(data = da2, mapping = aes(x = state, y = b_gluc, fill = state, color = state)) +
geom_boxplot(alpha = 0.4, outlier.shape = NA, position = position_nudge(x = -0.3), width = 0.25) +
geom_jitter(width = 0.1, alpha = 0.6, size = 0.2) +
theme_classic() +
scale_fill_manual(values = c("slateblue", "coral3", "goldenrod", "purple")) +
scale_color_manual(values = c("slateblue", "coral3", "goldenrod", "purple")) +
xlab("") + ylab("Baseline glucose (mg/dl)") +
guides(color = FALSE, fill = FALSE) +
#scale_x_discrete(labels = c("Base", "Induced", "Dex.", "Cortrosyn"))+
annotate("text", x = -Inf, y = Inf, label = "A", hjust = -0.5, vjust = 1.5) +
theme(axis.title = element_text(size = 14), axis.text.x = element_text(size = 12))
# add in emmeans intervals
pop1 <- pop1 + geom_line(data = em_mdl, mapping = aes(x = state, y = y), col = "black", size = 1, position = position_nudge(x = 0.2)) +
geom_point(data = em_md, mapping = aes(x = state, y = emmean), color = "black", shape = 23, position = position_nudge(x = 0.2))
# Stress glucose
pop2 <- ggplot(data = da2, mapping = aes(x = state, y = s_gluc, fill = state, color = state)) +
geom_boxplot(alpha = 0.4, outlier.shape = NA, position = position_nudge(x = -0.3), width = 0.25) +
geom_jitter(width = 0.1, alpha = 0.6, size = 0.2) +
theme_classic() +
scale_fill_manual(values = c("slateblue", "coral3", "goldenrod", "purple")) +
scale_color_manual(values = c("slateblue", "coral3", "goldenrod", "purple")) +
xlab("") + ylab("Induced glucose (mg/dl)") +
guides(color = FALSE, fill = FALSE) +
#scale_x_discrete(labels = c("Base", "Induced", "Dex.", "Cortrosyn"))+
annotate("text", x = -Inf, y = Inf, label = "B", hjust = -0.5, vjust = 1.5) +
theme(axis.title = element_text(size = 14), axis.text.x = element_text(size = 12))
# add in emmeans intervals
pop2 <- pop2 + geom_line(data = em_md2l, mapping = aes(x = state, y = y), col = "black", size = 1, position = position_nudge(x = 0.2)) +
geom_point(data = em_md2, mapping = aes(x = state, y = emmean), color = "black", shape = 23, position = position_nudge(x = 0.2))
# delta glucose
pop3 <- ggplot(data = da2, mapping = aes(x = state, y = s_gluc - b_gluc, fill = state, color = state)) +
geom_boxplot(alpha = 0.4, outlier.shape = NA, position = position_nudge(x = -0.3), width = 0.25) +
geom_jitter(width = 0.1, alpha = 0.6, size = 0.2) +
theme_classic() +
scale_fill_manual(values = c("slateblue", "coral3", "goldenrod", "purple")) +
scale_color_manual(values = c("slateblue", "coral3", "goldenrod", "purple")) +
xlab("") + ylab("Induced - base glucose (mg/dl)") +
guides(color = FALSE, fill = FALSE) +
#scale_x_discrete(labels = c("Base", "Induced", "Dex.", "Cortrosyn"))+
annotate("text", x = -Inf, y = Inf, label = "C", hjust = -0.5, vjust = 1.5) +
theme(axis.title = element_text(size = 14), axis.text.x = element_text(size = 12))
# add in emmeans intervals
pop3 <- pop3 + geom_line(data = em_md3l, mapping = aes(x = state, y = y), col = "black", size = 1, position = position_nudge(x = 0.2)) +
geom_point(data = em_md3, mapping = aes(x = state, y = emmean), color = "black", shape = 23, position = position_nudge(x = 0.2)) +
geom_hline(yintercept = 0, linetype = "dashed", col = "gray60")
# save figure
ggsave(here::here("2_r_scripts/figure_5b.pdf"),
ggpubr::ggarrange(pop1, pop2, pop3, nrow = 1),
device = "pdf", width = 7.5, height = 4, units = "in")
## Within-individual covariance ----
library(standardize)
da2n$byd <- paste(da2n$band, da2n$year, da2n$date, sep = "_")
da2nw <- subset(da2n, is.na(da2n$b_cort) == FALSE & is.na(da2n$s_cort) == FALSE &
is.na(da2n$b_gluc) == FALSE & is.na(da2n$s_gluc) == FALSE)
for(i in 1:nrow(da2nw)){
da2nw$num_samps[i] <- nrow(subset(da2nw, da2nw$band == da2nw$band[i]))
}
da2nw2 <- subset(da2nw, da2nw$num_samps > 3)
#Standardize within individuals
da2nw2$b_gluc_s <- scale_by(b_gluc ~ as.factor(band), data = da2nw2)
da2nw2$b_cort_s <- scale_by(b_cort ~ as.factor(band), data = da2nw2)
da2nw2$s_gluc_s <- scale_by(s_gluc ~ as.factor(band), data = da2nw2)
da2nw2$s_cort_s <- scale_by(s_cort ~ as.factor(band), data = da2nw2)
da2nw2$s_resp_s <- scale_by(s_resp ~ as.factor(band), data = da2nw2)
da2nw2$gluc_resp_s <- scale_by(gluc_resp ~ as.factor(band), data = da2nw2)
p1 <- ggplot(data = da2nw2, mapping = aes(x = b_cort_s, y = b_gluc_s, by = as.factor(band))) +
geom_point(alpha = 0.5) +
geom_line(stat = "smooth", method = "lm", alpha = 0.9, color = "lightblue") +
theme_classic() +
guides(fill = FALSE, color = FALSE) +
annotate("text", x = -Inf, y = Inf, label = "A", hjust = -0.5, vjust = 1.5) +
geom_smooth(data = da2nw2, mapping = aes(x = b_cort_s, y = b_gluc_s, by = state), method = "lm", color = "coral3",
fill = "coral3") +
geom_hline(yintercept = 0, linetype = "dashed", color = "black") +
ylim(-2.5, 2.5) +
xlab("Within-individual \n base corticosterone") +
ylab("Within-individual \n base glucose") +
theme(axis.title = element_text(size = 13), axis.text = element_text(size = 12))
p2 <- ggplot(data = da2nw2, mapping = aes(x = s_cort_s, y = s_gluc_s, by = as.factor(band))) +
geom_point(alpha = 0.5) +
geom_line(stat = "smooth", method = "lm", alpha = 0.9, color = "lightblue") +
theme_classic() +
guides(fill = FALSE, color = FALSE) +
annotate("text", x = -Inf, y = Inf, label = "B", hjust = -0.5, vjust = 1.5) +
geom_smooth(data = da2nw2, mapping = aes(x = s_cort_s, y = s_gluc_s, by = state), method = "lm", color = "coral3",
fill = "coral3") +
geom_hline(yintercept = 0, linetype = "dashed", color = "black") +
ylim(-2.5, 2.5) +
xlab("Within-individual \n induced corticosterone") +
ylab("Within-individual \n induced glucose") +
theme(axis.title = element_text(size = 13), axis.text = element_text(size = 12))
p3 <- ggplot(data = da2nw2, mapping = aes(x = s_resp_s, y = gluc_resp_s, by = as.factor(band))) +
geom_point(alpha = 0.5) +
geom_line(stat = "smooth", method = "lm", alpha = 0.9, color = "lightblue") +
theme_classic() +
guides(fill = FALSE, color = FALSE) +
annotate("text", x = -Inf, y = Inf, label = "C", hjust = -0.5, vjust = 1.5) +
geom_smooth(data = da2nw2, mapping = aes(x = s_resp_s, y = gluc_resp_s, by = state), method = "lm", color = "coral3",
fill = "coral3") +
geom_hline(yintercept = 0, linetype = "dashed", color = "black") +
ylim(-2.5, 2.1) +
xlab("Within-individual \n \u0394 corticosterone") +
ylab("Within-individual \n \u0394 glucose") +
theme(axis.title = element_text(size = 13), axis.text = element_text(size = 12))
ggpubr::ggarrange(p1, p2, p3, nrow = 1, ncol = 3)
wi_b <- lm(b_gluc_s ~ 0 + b_cort_s, data = da2nw2)
wi_s <- lm(s_gluc_s ~ 0 + s_cort_s, data = da2nw2)
wi_sr <- lm(s_resp_s ~ 0 + gluc_resp_s, data = da2nw2)
tab_model(wi_b, wi_s, wi_sr)
ggsave(here::here("2_r_scripts/figure_6b.pdf"),
ggpubr::ggarrange(p1, p2, p3, nrow = 1, ncol = 3),
device = "pdf", width = 10.5, height = 3.75, units = "in")
## ACTH Validation Models ----
# Nestlings
nest_l <- d_acth_nest %>%
pivot_longer(cols = c("cort1", "cort2", "cort3"), names_to = "timepoint",
values_to = "cort", values_drop_na = TRUE)
nest_l <- as.data.frame(nest_l)
nest_l$treatment <- as.factor(nest_l$treatment)
nest_l$treatment <- relevel(nest_l$treatment, ref = "Saline")
m_n_acth <- lmer(cort ~ timepoint*treatment + (1|band) + (1|unit_box), data = nest_l)
# Adults
fem_l <- d_acth_fem %>%
pivot_longer(cols = c("cort1", "cort2", "cort3"), names_to = "timepoint",
values_to = "cort", values_drop_na = TRUE)
fem_l <- as.data.frame(fem_l)
fem_l$treatment <- gsub("BCC", "Saline", fem_l$treatment)
fem_l$treatment <- gsub("BCA", "ACTH", fem_l$treatment)
fem_l$treatment <- as.factor(fem_l$treatment)
fem_l$treatment <- relevel(fem_l$treatment, ref = "Saline")
m_a_acth <- lmer(cort ~ timepoint*treatment + (1|band), data = fem_l)
# Make a table
t1 <- tab_model(m_n_acth, m_a_acth,
pred.labels = c("Intercept (Baseline)", "Timepoint 2", "Timepoint 3",
"Cortrosyn at Baseline", "Cortrosyn at Timepoint 2", "Cortrosyn at Timepoint 3"),
dv.labels = c("Nestling Corticosterone", "Adult Corticosterone"))
saveRDS(t1, here::here("2_r_scripts/table_s1.rds"))
# Note that I want to put this table in the pdf output supplementary materials, but there is no direct way to
# use sjplot to put html tables into a markdown pdf. I manually saved the html as a pdf to put it in. That
# means if this code is modified the saved pdf file needs to be overwritten with a new version.
## ACTH Validation Plots ----
# These are plotted and saved to file here then pasted into the rmarkdown file for the supplemental materials
# Nestlings
nest_a <- d_acth_nest %>%
pivot_longer(cols = c("cort1", "cort2", "cort3"), names_to = "timepoint",
values_to = "cort", values_drop_na = TRUE) %>%
ggplot(aes(x = timepoint, y = cort, fill = treatment)) +
geom_line(mapping = aes(x = timepoint, y = cort, group = band, color = treatment), alpha = 0.45) +
geom_boxplot(width = 0.25) + theme_classic() + xlab("Minutes After Capture") + ylab("Corticosterone ng/mL") +
scale_x_discrete(labels = c("<3", "15", "30")) + geom_vline(xintercept = 1.15, lty = 2, col = "gray40") +
annotate("text", x = 1.1, y = 40, label = "Cortrosyn or Saline Injection", angle = 90) + labs(fill = "Treatment") +
ggtitle("15 Day Old Nestlings") +
scale_fill_discrete(name = "Treatment", labels = c("Cortrosyn", "Saline")) + guides(color = FALSE) +
theme(legend.position = c(0.12, 0.9))
ggsave(here::here("2_r_scripts/figure_s2b.pdf"), plot = nest_a, width = 6, height = 5, units = "in", device = "pdf")
# Adults
fem_a <- d_acth_fem %>%
pivot_longer(cols = c("cort1", "cort2", "cort3"), names_to = "timepoint",
values_to = "cort", values_drop_na = TRUE) %>%
ggplot(aes(x = timepoint, y = cort, fill = treatment)) +
geom_line(mapping = aes(x = timepoint, y = cort, group = band, color = treatment), alpha = 0.45) +
geom_boxplot(width = 0.25, outlier.size = 0, outlier.stroke = 0) + theme_classic() + xlab("Minutes After Capture") +
ylab("Corticosterone ng/mL") +
scale_x_discrete(labels = c("<3", "30", "60")) + geom_vline(xintercept = 1.15, lty = 2, col = "gray40") +
annotate("text", x = 1.1, y = 45, label = "Saline Injection", angle = 90) +
geom_vline(xintercept = 2.15, lty = 2, col = "gray40") +
annotate("text", x = 2.1, y = 50, label = "Cortrosyn or Saline Injection", angle = 90) +
labs(fill = "Treatment") + ggtitle("Adult Females") +
scale_fill_discrete(name = "Treatment", labels = c("Cortrosyn", "Saline")) + guides(color = FALSE) +
theme(legend.position = c(0.12, 0.9))
ggsave(here::here("2_r_scripts/figure_s1b.pdf"), plot = fem_a, width = 6, height = 5, units = "in", device = "pdf")
|
# Hands-on 25
ho25_data <- read.csv('data/ho25_data.csv')
diff <- ho25_data$after - ho25_data$before
pos <- sum(diff > 0)
neg <- sum(diff < 0)
zero <- sum(diff == 0)
binom.test(pos,pos+neg) | /R/hands_on25.R | no_license | yusriy/R_stat_analysis | R | false | false | 192 | r | # Hands-on 25
ho25_data <- read.csv('data/ho25_data.csv')
diff <- ho25_data$after - ho25_data$before
pos <- sum(diff > 0)
neg <- sum(diff < 0)
zero <- sum(diff == 0)
binom.test(pos,pos+neg) |
## ----initialization, include=FALSE---------------------------------------
require(knitr)
opts_chunk$set(fig.path='figure/SO-', echo=FALSE, include=FALSE, fig.lp="fig:", dev='png', dpi=100, fig.show='hold', size='footnotesize', replace.assign=TRUE, width=49)
opts_chunk$set(fig.width=6, fig.height=5, fig.align="center", digits=4)
options(digits=5)
thisFileName <- "WindInSOCRATES"
require(Ranadu, quietly = TRUE, warn.conflicts=FALSE)
require(ggplot2)
require(grid)
require(ggthemes)
require(zoo)
library(scales)
source('chunks/VSpec.R') ## temporary, pending inclusion in Ranadu
source('chunks/removeSpikes.R')
source('chunks/DemingFit.R') ## temporary, pending Ranadu update
source('chunks/SplitDV.R')
refline <- function (vmin=-100, vmax=100) {
lines(c(vmin, vmax), c(vmin, vmax), col='darkorange', lwd=2, lty=2)
}
ReviseProjects <- c('SOCRATES') ## these are the projects to process
Directory <- DataDirectory()
## ----nVarCalc, eval=FALSE------------------------------------------------
##
Directory <- DataDirectory ()
source('chunks/AddWind.R')
for (Project in ReviseProjects) {
## get the list of flights, low rate:
Fl <- sort (list.files (sprintf ("%s%s/", Directory, Project),
sprintf ("%srf...nc$", Project)))
for (fn in Fl[1]) {
fname <- sprintf('%s%s/%s', Directory, Project, fn)
fnew <- sub ('.nc', 'Y.nc', fname)
Z <- file.copy (fname, fnew, overwrite=TRUE) ## BEWARE: overwrites without warning!!
## read variables needed for the calculation
FI <- DataFileInfo (fname, LLrange=FALSE)
## for some old projects:
if (!('GGVSPD' %in% FI$Variables)) {
if ('GGVSPDB' %in% FI$Variables) {
VR [which (VR == 'GGVSPD')] <- 'GGVSPDB'
} else if ('VSPD_A' %in% FI$Variables) {
VR [which (VR == 'GGVSPD')] <- 'VSPD_A'
} else if ('VSPD_G' %in% FI$Variables) {
VR [which (VR == 'GGVSPD')] <- 'VSPD_G'
} else {
print ('ERROR: no VSPD variable found')
exit()
}
}
for (Var in VR) {
if (!(Var %in% FI$Variables)) {
print (sprintf (' required variable %s not found in file %s; skipping...', Var, fname))
exit()
}
}
##
DY <- getNetCDF(fname, VR)
## ----newNetCDF, eval=FALSE-----------------------------------------------
##
source ('chunks/copyAttributes.R')
##
netCDFfile <- nc_open (fnew, write=TRUE)
Dimensions <- attr (Data, "Dimensions")
Dim <- Dimensions[["Time"]]
Rate <- 1
if ("sps25" %in% names (Dimensions)) {
Rate <- 25
Dim <- list(Dimensions[["sps25"]], Dimensions[["Time"]])
}
if ("sps50" %in% names (Dimensions)) {
Rate <- 50
Dim <- list(Dimensions[["sps50"]], Dimensions[["Time"]])
}
addAKY <- TRUE
addGP <- FALSE
addTC <- TRUE
addROC <- TRUE
Data <- AddWind(DY, Rate, addAKY, addGP, addTC, addROC) ## default adds everything
##
DATT <- Data ## save to ensure that attributes are preserved
## variables to add to the netCDF file:
VarNew <- c('AKY', 'WIY', 'QCTC', 'AK_GP', 'SS_GP', 'WIG', 'WDG', 'WSG', 'TASG', 'UXG', 'VYG', 'ROC', 'TASTC', 'WDTC', 'WSTC', 'WITC', 'UXTC', 'VYTC')
VarOld <- c('AKRD', 'WIC', 'QCFC', 'AKRD', 'SSRD', 'WIC', 'WDC', 'WSC', 'TASX', 'UXC', 'VYC', 'GGVSPD', 'TASX', 'WDC', 'WSC', 'WIC', 'UXC', 'VYC')
VarUnits <- c('degrees', 'm/s', 'degrees', 'degrees', 'm/s', 'm/s', 'm/s', 'm/s', 'm/s', 'm/s', 'm/s', 'm/s', 'degrees', 'm/s', 'm/s', 'm/s', 'm/s')
VarStdName <- c('angle-of-attack, CF', 'vertical wind, CF', 'dynamic pressure, pitot-static, corrected',
'angle-of-attack, GP', 'sideslip angle, GP',
'vertical wind, GP', 'wind direction, GP', 'wind speed, GP', 'true airspeed, GP',
'wind longitudinal component, GP', 'wind lateral component, GP', 'rate of climb',
'true airspeed, pitot-static', 'wind direction, pitot-static', 'wind speed, pitot-static',
'vertical wind, pitot-static', 'wind longitudinal component, pitot-static', 'wind lateral component, pitot-static')
VarLongName <- c('angle of attack, complementary-filter',
'vertical wind using comp-filter angle of attack',
'dynamic pressure from the pitot-static sensor, corrected',
'angle of attack from the gustpod',
'sideslip angle from the gustpod',
'vertical wind from the gustpod',
'horizontal wind direction from the gustpod',
'horizontal wind speed from the gustpod',
'true airspeed from the gustpod',
'horizontal wind, longitudinal component, gustpod',
'horizontal wind, lateral component, gustpod',
'rate of climb of the aircraft from pressure',
'true airspeed from the pitot-static sensor',
'wind direction based on the pitot-static airspeed',
'wind speed based on the pitot-static airspeed',
'vertical wind based on TASTC and AKY',
'horizontal wind, longitudinal component, pitot-static',
'horizontal wind, lateral component, pitot-static')
## create the new variables
varCDF <- list ()
for (i in 1:length(VarNew)) {
if (!addAKY && (i <= 2)) {next}
if (!addGP && (i %in% 4:11)) {next}
if (!addTC && (i %in% c(3, 13:18))) {next}
if (!addROC && (i == 12)) {next}
print (sprintf ('new-netcdf %d%% done', as.integer(100*(i-1)/length(VarNew))))
varCDF[[i]] <- ncvar_def (VarNew[i],
units=VarUnits[i],
dim=Dim,
missval=as.single(-32767.), prec='float',
longname=VarLongName[i])
if (i == 1) {
newfile <- ncvar_add (netCDFfile, varCDF[[i]])
} else {
newfile <- ncvar_add (newfile, varCDF[[i]])
}
ATV <- ncatt_get (netCDFfile, VarOld[i])
copy_attributes (ATV, VarNew[i], newfile)
ncatt_put (newfile, VarNew[i], attname="standard_name",
attval=VarStdName[i])
if (Rate == 1) {
ncvar_put (newfile, varCDF[[i]], Data[, VarNew[i]])
} else if (Rate == 25) {
ncvar_put (newfile, varCDF[[i]], Data[, VarNew[i]], count=c(25, nrow(Data)/25))
}
}
nc_close (newfile)
}
}
## ----make-zip-archive, INCLUDE=TRUE, eval=FALSE--------------------------
##
## cat (toLatex(sessionInfo()), file="SessionInfo")
## system (sprintf("zip WindInSOCRATES.zip WindInSOCRATES.Rnw WindInSOCRATES.pdf NoteReSOCRATESwindProcessing.pdf WorkflowWindInSocrates.pdf WAC.bib ./chunks/* SessionInfo"))
##
| /AddVariablesSocrates.R | permissive | WilliamCooper/SocratesQA | R | false | false | 6,417 | r | ## ----initialization, include=FALSE---------------------------------------
require(knitr)
opts_chunk$set(fig.path='figure/SO-', echo=FALSE, include=FALSE, fig.lp="fig:", dev='png', dpi=100, fig.show='hold', size='footnotesize', replace.assign=TRUE, width=49)
opts_chunk$set(fig.width=6, fig.height=5, fig.align="center", digits=4)
options(digits=5)
thisFileName <- "WindInSOCRATES"
require(Ranadu, quietly = TRUE, warn.conflicts=FALSE)
require(ggplot2)
require(grid)
require(ggthemes)
require(zoo)
library(scales)
source('chunks/VSpec.R') ## temporary, pending inclusion in Ranadu
source('chunks/removeSpikes.R')
source('chunks/DemingFit.R') ## temporary, pending Ranadu update
source('chunks/SplitDV.R')
refline <- function (vmin=-100, vmax=100) {
lines(c(vmin, vmax), c(vmin, vmax), col='darkorange', lwd=2, lty=2)
}
ReviseProjects <- c('SOCRATES') ## these are the projects to process
Directory <- DataDirectory()
## ----nVarCalc, eval=FALSE------------------------------------------------
##
Directory <- DataDirectory ()
source('chunks/AddWind.R')
for (Project in ReviseProjects) {
## get the list of flights, low rate:
Fl <- sort (list.files (sprintf ("%s%s/", Directory, Project),
sprintf ("%srf...nc$", Project)))
for (fn in Fl[1]) {
fname <- sprintf('%s%s/%s', Directory, Project, fn)
fnew <- sub ('.nc', 'Y.nc', fname)
Z <- file.copy (fname, fnew, overwrite=TRUE) ## BEWARE: overwrites without warning!!
## read variables needed for the calculation
FI <- DataFileInfo (fname, LLrange=FALSE)
## for some old projects:
if (!('GGVSPD' %in% FI$Variables)) {
if ('GGVSPDB' %in% FI$Variables) {
VR [which (VR == 'GGVSPD')] <- 'GGVSPDB'
} else if ('VSPD_A' %in% FI$Variables) {
VR [which (VR == 'GGVSPD')] <- 'VSPD_A'
} else if ('VSPD_G' %in% FI$Variables) {
VR [which (VR == 'GGVSPD')] <- 'VSPD_G'
} else {
print ('ERROR: no VSPD variable found')
exit()
}
}
for (Var in VR) {
if (!(Var %in% FI$Variables)) {
print (sprintf (' required variable %s not found in file %s; skipping...', Var, fname))
exit()
}
}
##
DY <- getNetCDF(fname, VR)
## ----newNetCDF, eval=FALSE-----------------------------------------------
##
source ('chunks/copyAttributes.R')
##
netCDFfile <- nc_open (fnew, write=TRUE)
Dimensions <- attr (Data, "Dimensions")
Dim <- Dimensions[["Time"]]
Rate <- 1
if ("sps25" %in% names (Dimensions)) {
Rate <- 25
Dim <- list(Dimensions[["sps25"]], Dimensions[["Time"]])
}
if ("sps50" %in% names (Dimensions)) {
Rate <- 50
Dim <- list(Dimensions[["sps50"]], Dimensions[["Time"]])
}
addAKY <- TRUE
addGP <- FALSE
addTC <- TRUE
addROC <- TRUE
Data <- AddWind(DY, Rate, addAKY, addGP, addTC, addROC) ## default adds everything
##
DATT <- Data ## save to ensure that attributes are preserved
## variables to add to the netCDF file:
VarNew <- c('AKY', 'WIY', 'QCTC', 'AK_GP', 'SS_GP', 'WIG', 'WDG', 'WSG', 'TASG', 'UXG', 'VYG', 'ROC', 'TASTC', 'WDTC', 'WSTC', 'WITC', 'UXTC', 'VYTC')
VarOld <- c('AKRD', 'WIC', 'QCFC', 'AKRD', 'SSRD', 'WIC', 'WDC', 'WSC', 'TASX', 'UXC', 'VYC', 'GGVSPD', 'TASX', 'WDC', 'WSC', 'WIC', 'UXC', 'VYC')
VarUnits <- c('degrees', 'm/s', 'degrees', 'degrees', 'm/s', 'm/s', 'm/s', 'm/s', 'm/s', 'm/s', 'm/s', 'm/s', 'degrees', 'm/s', 'm/s', 'm/s', 'm/s')
VarStdName <- c('angle-of-attack, CF', 'vertical wind, CF', 'dynamic pressure, pitot-static, corrected',
'angle-of-attack, GP', 'sideslip angle, GP',
'vertical wind, GP', 'wind direction, GP', 'wind speed, GP', 'true airspeed, GP',
'wind longitudinal component, GP', 'wind lateral component, GP', 'rate of climb',
'true airspeed, pitot-static', 'wind direction, pitot-static', 'wind speed, pitot-static',
'vertical wind, pitot-static', 'wind longitudinal component, pitot-static', 'wind lateral component, pitot-static')
VarLongName <- c('angle of attack, complementary-filter',
'vertical wind using comp-filter angle of attack',
'dynamic pressure from the pitot-static sensor, corrected',
'angle of attack from the gustpod',
'sideslip angle from the gustpod',
'vertical wind from the gustpod',
'horizontal wind direction from the gustpod',
'horizontal wind speed from the gustpod',
'true airspeed from the gustpod',
'horizontal wind, longitudinal component, gustpod',
'horizontal wind, lateral component, gustpod',
'rate of climb of the aircraft from pressure',
'true airspeed from the pitot-static sensor',
'wind direction based on the pitot-static airspeed',
'wind speed based on the pitot-static airspeed',
'vertical wind based on TASTC and AKY',
'horizontal wind, longitudinal component, pitot-static',
'horizontal wind, lateral component, pitot-static')
## create the new variables
varCDF <- list ()
for (i in 1:length(VarNew)) {
if (!addAKY && (i <= 2)) {next}
if (!addGP && (i %in% 4:11)) {next}
if (!addTC && (i %in% c(3, 13:18))) {next}
if (!addROC && (i == 12)) {next}
print (sprintf ('new-netcdf %d%% done', as.integer(100*(i-1)/length(VarNew))))
varCDF[[i]] <- ncvar_def (VarNew[i],
units=VarUnits[i],
dim=Dim,
missval=as.single(-32767.), prec='float',
longname=VarLongName[i])
if (i == 1) {
newfile <- ncvar_add (netCDFfile, varCDF[[i]])
} else {
newfile <- ncvar_add (newfile, varCDF[[i]])
}
ATV <- ncatt_get (netCDFfile, VarOld[i])
copy_attributes (ATV, VarNew[i], newfile)
ncatt_put (newfile, VarNew[i], attname="standard_name",
attval=VarStdName[i])
if (Rate == 1) {
ncvar_put (newfile, varCDF[[i]], Data[, VarNew[i]])
} else if (Rate == 25) {
ncvar_put (newfile, varCDF[[i]], Data[, VarNew[i]], count=c(25, nrow(Data)/25))
}
}
nc_close (newfile)
}
}
## ----make-zip-archive, INCLUDE=TRUE, eval=FALSE--------------------------
##
## cat (toLatex(sessionInfo()), file="SessionInfo")
## system (sprintf("zip WindInSOCRATES.zip WindInSOCRATES.Rnw WindInSOCRATES.pdf NoteReSOCRATESwindProcessing.pdf WorkflowWindInSocrates.pdf WAC.bib ./chunks/* SessionInfo"))
##
|
library("ggplot2")
library("ggalt")
library("magick")
library("ggmap")
library("ggthemes")
library("dplyr")
waxwings <- readr::read_csv("waxwings.csv")
waxwings <- filter(waxwings, name != "Bombycilla japonica",
longitude < 50)
waxwings <- filter(waxwings, !(longitude == 0 & latitude == 0))
############################################################
# #
# map ####
# #
############################################################
wax_map <- map_data("world")
wax_map <- wax_map[wax_map$region != "Antarctica",]
p <- ggplot()
p <- p + geom_map(data = wax_map,
map = wax_map,
aes(x = long, y = lat, map_id = region),
color = "white", fill = "#7f7f7f",
size = 0.05, alpha = 1/4)
p <- p + theme_map()
p <- p + geom_point(aes(longitude, latitude,
col = name),
data = waxwings)
p <- p + ylim(0, 80) +
xlim(-165, 40)
p + theme(legend.position = "none")
ggsave(p, file = "map.png",
width = 8, height = 5)
############################################################
# #
# day of the week ####
# #
############################################################
waxwings <- mutate(waxwings,
wday = lubridate::wday(date, label = TRUE))
waxwings %>%
group_by(update(date, wday = 1), wday) %>%
summarize(n = n()) %>%
ggplot() +
geom_boxplot(aes(wday, n))
ggsave(file = "wday.png", width = 8, height = 6)
############################################################
# #
# second map ####
# #
############################################################
waxwings <- mutate(waxwings,
month = lubridate::month(date))
cedrorum <- filter(waxwings,
name == "Bombycilla cedrorum")
plot_month_cedrorum <- function(df, cedrorum){
p <- ggplot()
p <- p + ggtitle(df$month[1])
p <- p + geom_map(data = wax_map,
map = wax_map,
aes(x = long, y = lat, map_id = region),
color = "white", fill = "#7f7f7f",
size = 0.05, alpha = 1/4)
p <- p + theme_map()
p <- p + geom_point(aes(longitude, latitude),
data = df,
size = .5,
col = "red")
p <- p + ylim(min(cedrorum$latitude),
max(cedrorum$latitude))
p <- p + xlim(min(cedrorum$longitude),
max(cedrorum$longitude))
outfil <- paste0("cedrorum_", df$month[1], ".png")
ggsave(outfil, p, width=5, height=5)
outfil
}
cedrorum_l <- split(cedrorum, cedrorum$month)
cedrorum_l %>%
purrr::map(plot_month_cedrorum, cedrorum = cedrorum) %>%
purrr::map(image_read) %>%
image_join() %>%
image_animate(fps=1) %>%
image_write("cedrorum.gif")
garrulus <- filter(waxwings,
name == "Bombycilla garrulus")
garrulus_l <- split(garrulus, garrulus$month)
garrulus_l %>%
purrr::map(plot_month_cedrorum, cedrorum = garrulus) %>%
purrr::map(image_read) %>%
image_join() %>%
image_animate(fps=1) %>%
image_write("garrulus.gif") | /plot_waxwings.R | no_license | maelle/waxwings | R | false | false | 3,563 | r | library("ggplot2")
library("ggalt")
library("magick")
library("ggmap")
library("ggthemes")
library("dplyr")
waxwings <- readr::read_csv("waxwings.csv")
waxwings <- filter(waxwings, name != "Bombycilla japonica",
longitude < 50)
waxwings <- filter(waxwings, !(longitude == 0 & latitude == 0))
############################################################
# #
# map ####
# #
############################################################
wax_map <- map_data("world")
wax_map <- wax_map[wax_map$region != "Antarctica",]
p <- ggplot()
p <- p + geom_map(data = wax_map,
map = wax_map,
aes(x = long, y = lat, map_id = region),
color = "white", fill = "#7f7f7f",
size = 0.05, alpha = 1/4)
p <- p + theme_map()
p <- p + geom_point(aes(longitude, latitude,
col = name),
data = waxwings)
p <- p + ylim(0, 80) +
xlim(-165, 40)
p + theme(legend.position = "none")
ggsave(p, file = "map.png",
width = 8, height = 5)
############################################################
# #
# day of the week ####
# #
############################################################
waxwings <- mutate(waxwings,
wday = lubridate::wday(date, label = TRUE))
waxwings %>%
group_by(update(date, wday = 1), wday) %>%
summarize(n = n()) %>%
ggplot() +
geom_boxplot(aes(wday, n))
ggsave(file = "wday.png", width = 8, height = 6)
############################################################
# #
# second map ####
# #
############################################################
waxwings <- mutate(waxwings,
month = lubridate::month(date))
cedrorum <- filter(waxwings,
name == "Bombycilla cedrorum")
plot_month_cedrorum <- function(df, cedrorum){
p <- ggplot()
p <- p + ggtitle(df$month[1])
p <- p + geom_map(data = wax_map,
map = wax_map,
aes(x = long, y = lat, map_id = region),
color = "white", fill = "#7f7f7f",
size = 0.05, alpha = 1/4)
p <- p + theme_map()
p <- p + geom_point(aes(longitude, latitude),
data = df,
size = .5,
col = "red")
p <- p + ylim(min(cedrorum$latitude),
max(cedrorum$latitude))
p <- p + xlim(min(cedrorum$longitude),
max(cedrorum$longitude))
outfil <- paste0("cedrorum_", df$month[1], ".png")
ggsave(outfil, p, width=5, height=5)
outfil
}
cedrorum_l <- split(cedrorum, cedrorum$month)
cedrorum_l %>%
purrr::map(plot_month_cedrorum, cedrorum = cedrorum) %>%
purrr::map(image_read) %>%
image_join() %>%
image_animate(fps=1) %>%
image_write("cedrorum.gif")
garrulus <- filter(waxwings,
name == "Bombycilla garrulus")
garrulus_l <- split(garrulus, garrulus$month)
garrulus_l %>%
purrr::map(plot_month_cedrorum, cedrorum = garrulus) %>%
purrr::map(image_read) %>%
image_join() %>%
image_animate(fps=1) %>%
image_write("garrulus.gif") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ts.nrobust.R
\name{ts.nrobust}
\alias{ts.nrobust}
\title{Apply the robust-based Bayesian two-stage causal model with instrumental variables.}
\usage{
ts.nrobust(formula, data, advanced = FALSE, adv.model, b0 = 1,
B0 = 1e-06, g0 = 0, G0 = 1e-06, u0 = 0.001, U0 = 0.001,
e0 = 0.001, E0 = 0.001, v0 = 0, V0 = 100, beta.start = NULL,
gamma.start = NULL, u.start = NULL, e.start = NULL, df.start = 5,
n.chains = 1, n.burnin = floor(n.iter/2), n.iter = 10000,
n.thin = 1, DIC, debug = FALSE, codaPkg = FALSE)
}
\arguments{
\item{formula}{An object of class formula: a symbolic description of the model to be fitted.
The details of the model specification are given under "Details".}
\item{data}{A dataframe with the variables to be used in the model.}
\item{advanced}{Logical; if FALSE (default), the model is specified using the formula argument,
if TRUE, self-defined models can be specified using the adv.model argument.}
\item{adv.model}{Specify the self-defined model. Used when advanced=TRUE.}
\item{b0}{The mean hyperparameter of the normal distribution (prior distribution)
for the first-stage causal model coefficients, i.e., coefficients for the instrumental variables.
This can either be a numerical value or a vector with dimensions equal to the number of coefficients
for the instrumental variables. If this takes a numerical value, then that values will
serve as the mean hyperparameter for all of the coefficients for the instrumental variables.
Default value of 0 is equivalent to a noninformative prior for the normal distributions.
Used when advanced=FALSE.}
\item{B0}{The precision hyperparameter of the normal distribution (prior distribution)
for the first stage causal model coefficients.
This can either be a numerical value or a vector with dimensions equal to the number of coefficients
for the instrumental variables. If this takes a numerical value, then that values will
serve as the precision hyperparameter for all of the coefficients for the instrumental variables.
Default value of 10E+6 is equivalent to a noninformative prior for the normal distributions.
Used when advanced=FALSE.}
\item{g0}{The mean hyperparameter of the normal distribution (prior distribution)
for the second-stage causal model coefficients,
i.e., coefficients for the treatment variable and other regression covariates).
This can either be a numerical value if there is only one treatment variable in the model,
or a if there is a treatment variable and multiple regression covariates,
with dimensions equal to the total number of coefficients for the treatment variable and covariates.
Default value of 0 is equivalent to a noninformative prior for the normal distributions.
Used when advanced=FALSE.}
\item{G0}{The precision hyperparameter of the normal distribution (prior distribution)
for the second-stage causal model coefficients.
This can either be a numerical value if there is only one treatment variable in the model,
or a vector if there is a treatment variable and multiple regression covariates,
with dimensions equal to the total number of coefficients for the treatment variable and covariates.
Default value of 10E+6 is equivalent to a noninformative prior for the normal distributions.
Used when advanced=FALSE.}
\item{u0}{The location hyperparameter of the inverse Gamma distribution (prior for the variance of the
normal distribution on the model residuals at the first stage).
Default of 0.001 is equivalent to the noninformative prior for the inverse Gamma distribution.}
\item{U0}{The shape hyperparameter of the inverse Gamma distribution (prior for the variance of the
normal distribution on the model residuals at the first stage).
Default of 0.001 is equivalent to the noninformative prior for the inverse Gamma distribution.}
\item{e0}{The location hyperparameter of the inverse Gamma distribution (prior for the scale parameter
of Student's t distribution on the model residuals at the second stage).
Default of 0.001 is equivalent to the noninformative prior for the inverse Gamma distribution.}
\item{E0}{The shape hyperparameter of the inverse Gamma distribution (prior for the scale parameter
of Student's t distribution on the model residuals at the second stage).
Default of 0.001 is equivalent to the noninformative prior for the inverse Gamma distribution.}
\item{v0}{The lower boundary hyperparameter of the uniform distribution (prior for the degrees of freedom
parameter of Student's t distribution).}
\item{V0}{The upper boundary hyperparameter of the uniform distribution (prior for the degrees of freedom
parameter of Student's t distribution).}
\item{beta.start}{The starting values for the first-stage causal model coefficients,
i.e., coefficients for the instrumental variables.
This can either be a numerical value or a column vector with dimensions
equal to the number of first-stage coefficients.
The default value of NA will use the OLS estimate of first-stage coefficients as the starting value.
If this is a numerical value, that value will
serve as the starting value mean for all the first-stage beta coefficients.}
\item{gamma.start}{The starting values for the second-stage causal model coefficients,
i.e., coefficients for the treatment variable and the model covariates.
This can either be a numerical value or a column vector with dimensions
equal to the number of second-stage coefficients.
The default value of NA will use the OLS estimate of second-stage coefficients as the starting value.
If this is a numerical value, that value will
serve as the starting value mean for all the second-stage gamma coefficients.}
\item{u.start}{The starting value for the precision hyperparameter of the inverse gamma distribution
(prior for the variance of the normal distribution of the first-stage residual term).
The default value of NA will use the inverse of the residual variance from the OLS estimate of the first-stage model.}
\item{e.start}{The starting value for the precision hyperparameter of the inverse gamma distribution
(prior for the scale parameter of Student's t distribution of the second-stage residual term).
The default value of NA will use the inverse of the residual variance from the OLS estimate
of the second-stage model.}
\item{df.start}{The starting value for the degrees of freedom of Student's t distribution.}
\item{n.chains}{Number of Markov chains. The default is 1.}
\item{n.burnin}{Length of burn in, i.e., number of iterations to discard at the beginning.
Default is n.iter/2, that is, discarding the first half of the simulations.}
\item{n.iter}{Number of total iterations per chain (including burnin). The default is 10000.}
\item{n.thin}{Thinning rate. Must be a positive integer. The default is 1.}
\item{DIC}{Logical; if TRUE (default), compute deviance, pD, and DIC. The rule pD=Dbar-Dhat is used.}
\item{codaPkg}{Logical; if FALSE (default), an object is returned; if TRUE,
file names of the output are returned.}
}
\value{
If \emph{codaPkg=FALSE}(default), returns an object containing summary statistics of
the saved parameters, including
\item{s1.intercept}{Estimate of the intercept from the first stage.}
\item{s1.slopeP}{Estimate of the pth slope from the first stage. }
\item{s2.intercept}{Estimate of the intercept from the second stage.}
\item{s2.slopeP}{Estimate of the pth slope from the second stage (the first slope is always
the \strong{LATE}).}
\item{var.e.s1}{Estimate of the residual variance at the first stage.}
\item{var.e.s2}{Estimate of the residual variance at the second stage.}
\item{df.est}{Estimate of the degrees of freedom for the Student's t distribution.}
\item{DIC}{Deviance Information Criterion.}
If \emph{codaPkg=TRUE}, the returned value is the path for the output file
containing the Markov chain Monte Carlo output.
}
\description{
The \code{ts.nrobust} function applies the robust-based Bayesian two-stage causal model
to the continuous treatment data. The model best suits the outcome data that contain outliers and
are complete or ignorably missing (i.e., missing completely at random or missing at random).
}
\details{
\enumerate{
\item{The formula takes the form \emph{response ~ terms|instrumental_variables}.}
\code{\link{ts.nnormal}} provides a detailed description of the formula rule.
\item{DIC is computed as \emph{mean(deviance)+pD}.}
\item{Prior distributions used in ALMOND.}
\itemize{
\item Causal model coefficients at both stages: normal distributions.
\item The causal model residual at the first stage: normal distribution;
the causal model residual at the second stage: Student's t distribution.
}
}
}
\examples{
\donttest{
# Run the model
model1 <- ts.nrobust(outcome~treatment|instrument,data=subECLSK)
# Run the robust model with the self-defined advanced feature
my.robust.model<- function(){
for (i in 1:N){
mu[i] <- beta0 + beta1*z[i]
x[i] ~ dnorm(mu[i], pre.u1)
muY[i] <- gamma0 + gamma1*mu[i]
y[i] ~ dt(muY[i], pre.u2, df)
}
beta0 ~ dnorm(0,1)
beta1 ~ dnorm(1, 1)
gamma0 ~ dnorm(0, 1)
gamma1 ~ dnorm(.5, 1)
pre.u1 ~ dgamma(.001, .001)
pre.u2 ~ dgamma(.001, .001)
df ~ dunif(0,50)
s1.intercept <- beta0
s1.slope1 <- beta1
s2.intercept <- gamma0
s2.slope1 <- gamma1
df.est <- df
var.e.s1 <- 1/pre.u1
var.e.s2 <- 1/pre.u2
}
model2 <- ts.nrobust(outcome~treatment|instrument,data=subECLSK,
advanced=TRUE,adv.model=my.robust.model)
# Extract the model DIC
model1$DIC
# Extract the MCMC output
ts.nrobust(outcome~treatment|instrument,data=subECLSK,codaPkg=TRUE)
}
}
| /man/ts.nrobust.Rd | no_license | dingjshi/ALMOND | R | false | true | 9,594 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ts.nrobust.R
\name{ts.nrobust}
\alias{ts.nrobust}
\title{Apply the robust-based Bayesian two-stage causal model with instrumental variables.}
\usage{
ts.nrobust(formula, data, advanced = FALSE, adv.model, b0 = 1,
B0 = 1e-06, g0 = 0, G0 = 1e-06, u0 = 0.001, U0 = 0.001,
e0 = 0.001, E0 = 0.001, v0 = 0, V0 = 100, beta.start = NULL,
gamma.start = NULL, u.start = NULL, e.start = NULL, df.start = 5,
n.chains = 1, n.burnin = floor(n.iter/2), n.iter = 10000,
n.thin = 1, DIC, debug = FALSE, codaPkg = FALSE)
}
\arguments{
\item{formula}{An object of class formula: a symbolic description of the model to be fitted.
The details of the model specification are given under "Details".}
\item{data}{A dataframe with the variables to be used in the model.}
\item{advanced}{Logical; if FALSE (default), the model is specified using the formula argument,
if TRUE, self-defined models can be specified using the adv.model argument.}
\item{adv.model}{Specify the self-defined model. Used when advanced=TRUE.}
\item{b0}{The mean hyperparameter of the normal distribution (prior distribution)
for the first-stage causal model coefficients, i.e., coefficients for the instrumental variables.
This can either be a numerical value or a vector with dimensions equal to the number of coefficients
for the instrumental variables. If this takes a numerical value, then that values will
serve as the mean hyperparameter for all of the coefficients for the instrumental variables.
Default value of 0 is equivalent to a noninformative prior for the normal distributions.
Used when advanced=FALSE.}
\item{B0}{The precision hyperparameter of the normal distribution (prior distribution)
for the first stage causal model coefficients.
This can either be a numerical value or a vector with dimensions equal to the number of coefficients
for the instrumental variables. If this takes a numerical value, then that values will
serve as the precision hyperparameter for all of the coefficients for the instrumental variables.
Default value of 10E+6 is equivalent to a noninformative prior for the normal distributions.
Used when advanced=FALSE.}
\item{g0}{The mean hyperparameter of the normal distribution (prior distribution)
for the second-stage causal model coefficients,
i.e., coefficients for the treatment variable and other regression covariates).
This can either be a numerical value if there is only one treatment variable in the model,
or a if there is a treatment variable and multiple regression covariates,
with dimensions equal to the total number of coefficients for the treatment variable and covariates.
Default value of 0 is equivalent to a noninformative prior for the normal distributions.
Used when advanced=FALSE.}
\item{G0}{The precision hyperparameter of the normal distribution (prior distribution)
for the second-stage causal model coefficients.
This can either be a numerical value if there is only one treatment variable in the model,
or a vector if there is a treatment variable and multiple regression covariates,
with dimensions equal to the total number of coefficients for the treatment variable and covariates.
Default value of 10E+6 is equivalent to a noninformative prior for the normal distributions.
Used when advanced=FALSE.}
\item{u0}{The location hyperparameter of the inverse Gamma distribution (prior for the variance of the
normal distribution on the model residuals at the first stage).
Default of 0.001 is equivalent to the noninformative prior for the inverse Gamma distribution.}
\item{U0}{The shape hyperparameter of the inverse Gamma distribution (prior for the variance of the
normal distribution on the model residuals at the first stage).
Default of 0.001 is equivalent to the noninformative prior for the inverse Gamma distribution.}
\item{e0}{The location hyperparameter of the inverse Gamma distribution (prior for the scale parameter
of Student's t distribution on the model residuals at the second stage).
Default of 0.001 is equivalent to the noninformative prior for the inverse Gamma distribution.}
\item{E0}{The shape hyperparameter of the inverse Gamma distribution (prior for the scale parameter
of Student's t distribution on the model residuals at the second stage).
Default of 0.001 is equivalent to the noninformative prior for the inverse Gamma distribution.}
\item{v0}{The lower boundary hyperparameter of the uniform distribution (prior for the degrees of freedom
parameter of Student's t distribution).}
\item{V0}{The upper boundary hyperparameter of the uniform distribution (prior for the degrees of freedom
parameter of Student's t distribution).}
\item{beta.start}{The starting values for the first-stage causal model coefficients,
i.e., coefficients for the instrumental variables.
This can either be a numerical value or a column vector with dimensions
equal to the number of first-stage coefficients.
The default value of NA will use the OLS estimate of first-stage coefficients as the starting value.
If this is a numerical value, that value will
serve as the starting value mean for all the first-stage beta coefficients.}
\item{gamma.start}{The starting values for the second-stage causal model coefficients,
i.e., coefficients for the treatment variable and the model covariates.
This can either be a numerical value or a column vector with dimensions
equal to the number of second-stage coefficients.
The default value of NA will use the OLS estimate of second-stage coefficients as the starting value.
If this is a numerical value, that value will
serve as the starting value mean for all the second-stage gamma coefficients.}
\item{u.start}{The starting value for the precision hyperparameter of the inverse gamma distribution
(prior for the variance of the normal distribution of the first-stage residual term).
The default value of NA will use the inverse of the residual variance from the OLS estimate of the first-stage model.}
\item{e.start}{The starting value for the precision hyperparameter of the inverse gamma distribution
(prior for the scale parameter of Student's t distribution of the second-stage residual term).
The default value of NA will use the inverse of the residual variance from the OLS estimate
of the second-stage model.}
\item{df.start}{The starting value for the degrees of freedom of Student's t distribution.}
\item{n.chains}{Number of Markov chains. The default is 1.}
\item{n.burnin}{Length of burn in, i.e., number of iterations to discard at the beginning.
Default is n.iter/2, that is, discarding the first half of the simulations.}
\item{n.iter}{Number of total iterations per chain (including burnin). The default is 10000.}
\item{n.thin}{Thinning rate. Must be a positive integer. The default is 1.}
\item{DIC}{Logical; if TRUE (default), compute deviance, pD, and DIC. The rule pD=Dbar-Dhat is used.}
\item{codaPkg}{Logical; if FALSE (default), an object is returned; if TRUE,
file names of the output are returned.}
}
\value{
If \emph{codaPkg=FALSE}(default), returns an object containing summary statistics of
the saved parameters, including
\item{s1.intercept}{Estimate of the intercept from the first stage.}
\item{s1.slopeP}{Estimate of the pth slope from the first stage. }
\item{s2.intercept}{Estimate of the intercept from the second stage.}
\item{s2.slopeP}{Estimate of the pth slope from the second stage (the first slope is always
the \strong{LATE}).}
\item{var.e.s1}{Estimate of the residual variance at the first stage.}
\item{var.e.s2}{Estimate of the residual variance at the second stage.}
\item{df.est}{Estimate of the degrees of freedom for the Student's t distribution.}
\item{DIC}{Deviance Information Criterion.}
If \emph{codaPkg=TRUE}, the returned value is the path for the output file
containing the Markov chain Monte Carlo output.
}
\description{
The \code{ts.nrobust} function applies the robust-based Bayesian two-stage causal model
to the continuous treatment data. The model best suits the outcome data that contain outliers and
are complete or ignorably missing (i.e., missing completely at random or missing at random).
}
\details{
\enumerate{
\item{The formula takes the form \emph{response ~ terms|instrumental_variables}.}
\code{\link{ts.nnormal}} provides a detailed description of the formula rule.
\item{DIC is computed as \emph{mean(deviance)+pD}.}
\item{Prior distributions used in ALMOND.}
\itemize{
\item Causal model coefficients at both stages: normal distributions.
\item The causal model residual at the first stage: normal distribution;
the causal model residual at the second stage: Student's t distribution.
}
}
}
\examples{
\donttest{
# Run the model
model1 <- ts.nrobust(outcome~treatment|instrument,data=subECLSK)
# Run the robust model with the self-defined advanced feature
my.robust.model<- function(){
for (i in 1:N){
mu[i] <- beta0 + beta1*z[i]
x[i] ~ dnorm(mu[i], pre.u1)
muY[i] <- gamma0 + gamma1*mu[i]
y[i] ~ dt(muY[i], pre.u2, df)
}
beta0 ~ dnorm(0,1)
beta1 ~ dnorm(1, 1)
gamma0 ~ dnorm(0, 1)
gamma1 ~ dnorm(.5, 1)
pre.u1 ~ dgamma(.001, .001)
pre.u2 ~ dgamma(.001, .001)
df ~ dunif(0,50)
s1.intercept <- beta0
s1.slope1 <- beta1
s2.intercept <- gamma0
s2.slope1 <- gamma1
df.est <- df
var.e.s1 <- 1/pre.u1
var.e.s2 <- 1/pre.u2
}
model2 <- ts.nrobust(outcome~treatment|instrument,data=subECLSK,
advanced=TRUE,adv.model=my.robust.model)
# Extract the model DIC
model1$DIC
# Extract the MCMC output
ts.nrobust(outcome~treatment|instrument,data=subECLSK,codaPkg=TRUE)
}
}
|
# Title : Extracts statistical measurements from displacement maps by pre-defined polygons
# Created by: Konstantin Schellenberg
# Created on: 17.11.2020
# data structure
# storage of gomez' data
# ./ data / disp_gomez / *.tif
# vector geometries
# ./ vector_geometry / glacier.gpkg
# load packages
packages = c("tidyverse", "gridExtra", "grid", "raster", "sf", "exactextractr", "stringr", "data.table", "RColorBrewer", "ggplot2")
lapply(packages, require, character.only = TRUE)
library(ggthemes)
theme_set(theme_minimal())
source("./R/functions.R")
# CREATE PATHS -----------------------------
plotdir = "./plots/glacier_movement"
if (!dir.exists(plotdir)) dir.create(recursive = TRUE)
# LOAD DATA -----------------------------
# glacier vectors
path_sf = "./vector_geometry/glacier.gpkg"
sf = st_read(path_sf, layer = "parts")
extent = st_read(path_sf, layer = "crop_extent")
extent_only_glacier = st_read(path_sf, layer = "crop_onlyglacier")
# displacement data gomez et al.
dir_gom = "./data/disp_gomez"
files_gom = list.files(dir_gom, pattern = ".tif$", full.names = TRUE)
# our displacement data with GAMMA software
dir_disp = "./results/pwr_thresh0.01"
files_disp = list.files(dir_disp, pattern = "\\.mag.geo_32627.tif$", full.names = TRUE)
# Import DEM for slope analyses
dir_dem = "./data/DEM/LMI_Haedarlikan_DEM_16bit_subset_32627.tif"
dem_big = raster(dir_dem)
crs(dem_big)
# Standard Deviation of Cross-Correlation
dir_ccs = "./results/accuracy"
files_ccs = list.files(dir_ccs, pattern = "*ccs.geo_32627.tif$", full.names = TRUE)
# check for crs
#all
if (!as.character(crs(raster(files_disp[1]))) == as.character(crs(sf))) stop()
# RASTER PREPROCESSING -----------------------------
# creating dummy to refer on:
dummy = raster(crs = "+proj=utm +zone=27 +datum=WGS84 +units=m +no_defs",
res = c(30, 30), ext = raster::extent(extent_only_glacier))
dem = dem_big %>% resample(dummy)
slope = raster::terrain(dem, opt = "slope", unit = "degrees")
par(mfrow = (c(1, 2)))
plot(slope)
plot(dem)
plot(sf, add = TRUE)
# crop all disp to extent
#disps = map(files_disp, function(x){
# raster(x) %>%
# resample(dummy)
#})
## stacking
#disp = stack(disps)
#disp_dailyVelocities = disp / 12
#writeRaster(disp, filename = "./results/pwr_thresh0.01/gamma_stack.tif", overwrite = TRUE)
#writeRaster(disp_dailyVelocities, filename = "./results/pwr_thresh0.01/gamma_stack_dailyVelocities.tif", overwrite = TRUE)
#saveRDS(disp, file = "./results/pwr_thresh0.01/gamma_stack.RDS")
disp = readRDS("./results/pwr_thresh0.01/gamma_stack.RDS")
#disp = readRDS("./results/pwr_thres0.1/gamma_stack.RDS")
#rasterVis::levelplot(disp)
# accuracy
#ccss = map(files_ccs, function(x){
# raster(x) %>%
# resample(dummy)
#})
## stacking
#ccs = stack(ccss)
#writeRaster(ccs, filename = "./results/accuracy/ccs_stack.tif", overwrite = TRUE)
#saveRDS(ccs, file = "./results/accuracy/ccs_stack.RDS")
ccs = readRDS(file = "./results/accuracy/ccs_stack.RDS")
# gomez displacements
#goms = map(files_gom, function (x){
# raster(x) %>%
# resample(dummy)
#})
#
## stacking
#gom = stack(goms)
#saveRDS(gom, file = "./data/disp_gomez/gomez_stack.RDS")
gom = readRDS("./data/disp_gomez/gomez_stack.RDS")
#rasterVis::levelplot(gom)
# NAMING -----------------------------
#' test stringr if they match...
# assign dates to raster bands
pairs = names(gom) %>%
stringr::str_extract(pattern = "\\d+.\\d+") %>%
str_split(pattern = "\\.") %>%
map(., ~ as.Date(.x, format = "%Y%m%d"))
disp.pairs = names(disp) %>%
stringr::str_extract(pattern = "\\d+_\\d+") %>%
str_split(pattern = "\\_") %>%
map(., ~ as.Date(.x, format = "%Y%m%d"))
names(pairs) = map(pairs, ~ .x[1] %>% as.character())
names(disp.pairs) = map(disp.pairs, ~ .x[1] %>% as.character())
# calculating date difference
diffs = map_dbl(disp.pairs, ~ diff(as.Date(.x, format = "")))
# some have longer baselines than 12 day, the latter is the usual though
pairs_diff = as.data.frame(disp.pairs) %>%
t() %>%
as.data.frame(row.names = FALSE) %>%
dplyr::select(main = V1, secondary = V2) %>%
mutate(temporal.baseline = diffs, year = year(main),
month = month(main),
week = week(main),
doy = (yday(main) + yday(secondary)) / 2) %>%
arrange(main)
write_csv(pairs_diff, file = "./results/tables/dates.csv")
# EXTRACTION -----------------------------
# for now, exatraction of all gomez displacements, no matter what
tidying = function(exact_extract_output){
# makes a tidy dataframe from exactextract output
ex = exact_extract_output %>% mutate(subset = row_number()) %>%
pivot_longer(-subset, names_to = "dates", values_to = "observation")
# grep dates
col = ex$dates %>% stringr::str_extract(pattern = "\\d+") %>%
str_split(pattern = "\\.") %>%
map(., ~ as.Date(.x, format = "%Y%m%d") %>% as.character()) %>%
flatten_chr() %>%
as.Date()
tidy = ex %>% mutate(start = col) %>%
dplyr::select(c(-dates)) %>%
dplyr::select(c(start, subset, observation)) %>%
mutate_at(vars(subset), as.factor)
return(tidy)
}
extraction_disp = exact_extract(disp, sf, "mean")
extraction_disp_px = exact_extract(disp, sf, include_xy = TRUE)
extraction_ccs = exact_extract(ccs, sf, "mean")
extraction_gomez = exact_extract(gom, sf, "mean")
# subsetwise
gamma = tidying(extraction_disp) %>% mutate(observation = observation / 12) # per day
gomez = tidying(extraction_gomez)
df.ccs = tidying(extraction_ccs)
# add ccs to gamma, confbands added
df.uncertainty = left_join(gamma, df.ccs, by = c("start", "subset")) %>%
dplyr::select(c(start, subset, observation = observation.x, uncertainty = observation.y)) %>%
mutate(lower.confint = observation - 1.96 * uncertainty,
upper.confint = observation + 1.96 * uncertainty)
#' add uncertainty measure here
merged = rbindlist(list(df.uncertainty, gomez), use.names = TRUE, idcol = "dataset", fill = TRUE) %>%
mutate(name = if_else(dataset == 1, "Gamma workflow", "SNAP workflow Goméz et al. 2020") %>% as.factor()) %>%
mutate_at(vars(subset), as.factor)
summary_subset = merged %>% group_by(name, subset) %>% summarise_all(function(x) mean(x, na.rm = TRUE))
summary_observation = df.uncertainty %>% group_by(start) %>% summarise_all(~mean(.x, na.rm = TRUE))
# entire entire glacier
gamma.full = gamma %>% group_by(start) %>% summarise(mean = mean(observation))
gomez.full = gomez %>% group_by(start) %>% summarise(mean = mean(observation))
merged.full = merged %>% group_by(dataset, start) %>% summarise(mean = mean(observation), mean_lower = mean(lower.confint, na.rm = TRUE),
mean_upper = mean(upper.confint, na.rm = TRUE)) %>%
mutate(dataset, name = if_else(dataset == 1,"GAMMA workflow", "SNAP workflow Goméz et al. 2020") %>% as.factor)
# ---------------------------------------------------
# deviation analysis
# average standard deviation of the results
mean(df.ccs$observation)
# bias between gomez and gamma mean glacier velocity
mean(gamma$observation) - mean(gomez$observation)
# Plotting ------------------------------------------
#colr <- colorRampPalette(brewer.pal(9, 'Blues'))
# quick viz
#rasterVis::levelplot(disp,
# margin=FALSE,
# colorkey=list(
# space='bottom',
# labels=list(at=0:20, font=4),
# axis.line=list(col='black'),
# width=0.75
# ),
# par.settings=list(
# strip.border=list(col='transparent'),
# strip.background=list(col='transparent'),
# axis.line=list(col='transparent')
# ),
# scales=list(draw=FALSE),
# col.regions=colr,
# at=seq(0, 20, len=101),
# names.attr=rep('', nlayers(disp)))
(subsets = ggplot() +
geom_sf(aes(fill = as.factor(ID)), sf) +
labs(fill = "Subset area") +
scale_fill_brewer(type = "seq", palette = 1, direction = -1))
ggplot(df.uncertainty, aes(subset, observation)) +
geom_point()
(a1 = ggplot(summary_subset, aes(subset, observation, group = name, fill = name)) +
geom_bar(stat = "identity", position = "dodge") +
theme(legend.position = "bottom")+
labs(fill = "Dataset", x = "Glacier Subset", y = "Mean Velocity [m / day]"))
# Overall movement
ggplot(gomez.full, aes(start, mean)) +
geom_point() +
geom_line()
# ---------------------------
# Uncertainty
# gamma with uncertainty
ggplot(summary_observation, aes(start, observation)) +
geom_point() +
geom_line() +
geom_errorbar(aes(ymin = lower.confint, ymax = upper.confint))
(gg0a = ggplot(df.uncertainty, aes(start, observation, group = subset, color = subset)) +
geom_point(size = 2) +
geom_line() +
#geom_smooth(method = "lm", se = F) +
geom_errorbar(aes(ymin = lower.confint, ymax = upper.confint)) +
labs(x = "Date", y = "Velocity [m / day]", color = "Glacier Subset") +
theme(legend.position = "bottom"))
# only uncertainty
ggplot(df.ccs, aes(start, observation, group = subset, color = subset)) +
geom_point() +
geom_line()
(gg0b = ggplot(merged.full, aes(start, mean, group = name, color = name)) +
geom_point(size = 3) +
geom_line() +
geom_errorbar(aes(ymin = mean_lower, ymax = mean_upper)) +
labs(color = "Dataset", x = "Year", y = "Velocity [m / day]") +
theme(legend.position = "bottom"))
# gomez per subset
(gg1 = ggplot(gomez) +
geom_point(aes(start, observation, group = subset), color = "grey15")+
geom_line(aes(start, observation, group = subset)) +
facet_wrap(~ subset) +
#geom_smooth(aes(start, observation), method = "lm", color = "darkgreen", fill = "grey60") +
theme_minimal() +
labs(title = "Glacier movement by geographical subset",
subtitle = "Data from Gómez et al. (2020)",
y = "Velocity [m / day]", x = "Date") +
theme(strip.text.x = element_text(size = 20),
axis.text.x = element_text(angle = 65)) +
ylim(c(-0.5, 2.5)))
# gamma per subset
#
(gg2 = ggplot(gamma) +
geom_point(aes(start, observation, group = subset), color = "grey15")+
geom_line(aes(start, observation, group = subset)) +
facet_wrap(~ subset) +
#geom_smooth(aes(start, observation), method = "lm", color = "darkgreen", fill = "grey60") +
theme_minimal() +
labs(y = "Velocity [m / day]", x = "Date") +
theme(strip.text.x = element_text(size = 20),
axis.text.x = element_text(angle = 65)))
# merged per subset
(gg3 = ggplot(merged, aes(start, observation, group = name, color = name)) +
geom_point() +
geom_line() +
facet_grid(cols = vars(subset)) +
theme(legend.position = "bottom") +
labs(color = "Dataset", x = "Date", y = "Velocity [m / day]"))
# only 2020
filtered2020 = merged %>% filter(start >= "2020-01-01")
filtered2020 %>% arrange(subset)
# monthly aggregation
months = merged.full %>% mutate(year = year(start), month = month(start))
summary_months = months %>% group_by(month, name) %>% summarise(observation = mean(mean)) %>%
mutate_at(vars(month), as.factor)
(gg4 = ggplot(filtered2020, aes(start, observation, group = subset, color = subset)) +
geom_point() +
geom_line() +
labs(x = "Date", y = "Velocity [m / day]", color = "Glacier Subset") +
theme(legend.position = "bottom"))
# include gomez' dataset
(gg5 = ggplot(summary_months, aes(month, observation, group = name, color = name)) +
geom_point() +
geom_line() +
labs(x = "Date", y = "Velocity [m / day]", color = "Glacier Subset") +
theme(legend.position = "bottom"))
# marrangeGrob(list(subsets, gg3), nrow = 2, ncol = 1, top = "")
ggsave(file.path(plotdir, "Gomez_glacier_movement.png"), plot = gg1, device = "png", scale = 0.5, height = 10, width = 15)
ggsave(file.path(plotdir, "Gamma_glacier_movement.png"), plot = gg2, device = "png", scale = 0.5, height = 10, width = 15)
ggsave(file.path(plotdir, "Gamma_subsetMeans_lines.png"), plot = gg0a, device = "png", height = 5, width = 5)
ggsave(file.path(plotdir, "MergedDS_subsetMeans_lines.png"), plot = gg0b, device = "png", height = 5, width = 5)
ggsave(file.path(plotdir, "MergedDS_subsetFacets.png"), plot = gg3, device = "png", height = 5, width = 10)
ggsave(file.path(plotdir, "Subsets.png"), plot = subsets, device = "png", height = 5, width = 5)
ggsave(file.path(plotdir, "Gamma_glacier_movement_only2020.png"), plot = gg4, device = "png", height = 5, width = 5)
ggsave(file.path(plotdir, "MergedDS_monthly.png"), plot = gg5, device = "png", height = 5, width = 5)
# Analyses
ggsave(a1, filename = file.path(plotdir, "MergedDS_subsetMeans_bars_2020.png"), device = "png", height = 4, width = 10)
# -------------------------------------------------------
# SLOPE ANALYSIS -----------------------------
# aggregate slope to segments
df.slope.extract = exact_extract(x = slope, y = sf, include_xy = TRUE)
dt.slope = rbindlist(df.slope.extract, idcol = "Segment")
dt.slope100 = dt.slope[coverage_fraction == 1.0, .(slope = value, x, y)]
# aggregate DEM to segments
df.dem.extract = exact_extract(x = dem, y = sf, include_xy = TRUE)
dt.dem = rbindlist(df.dem.extract, idcol = "Segment")
dt.dem100 = dt.dem[coverage_fraction == 1.0, .(dem = value, x, y)]
# query those only covering 100 with the glacier segments
# mit displacements verschneiden
dt.disp.extracted = rbindlist(extraction_disp_px, idcol = "Segment")
dt.disp.extracted100 = dt.disp.extracted[coverage_fraction == 1.0, ]
dt.disp.extracted100.select = dt.disp.extracted100[, -c("x", "y")]
ggplot(dt.disp.extracted100, aes(X20160827_20160908.disp.mag.geo_32627))+
geom_histogram()
# fuse here
dt.bind.pre = cbind(dt.slope100, dt.dem100[, .(dem)], dt.disp.extracted100.select)
dt.bind = dt.bind.pre[, -"coverage_fraction"]
# rename variable names
nm = names(dt.bind)
disps = nm[str_detect(nm, pattern = "^X")]
rest = nm[!str_detect(nm, pattern = "^X")]
disps.split = str_split_fixed(disps, pattern = "_", 2)[,1]
vec = c(rest, disps.split)
names(dt.bind) = vec
names(dt.bind)
# to longer format
dt.longer = dt.bind %>% pivot_longer(cols = -c(slope, dem, x, y, Segment))
# -----------------------------------------------------------
# Visualisation of slope to movement
# average slope of the segments
(slope.segment = dt.bind %>% group_by(Segment) %>% summarise(mean.slope = mean(slope), mean.height = mean(dem)))
write_csv(slope.segment, file = "./results/tables/slope_at_segments.csv")
# dem
ggplot(dt.dem100, aes(y, dem)) + geom_point(size = .5)
ggplot(dt.dem100, aes(x, dem)) + geom_point(size = .5)
# slope
ggplot(dt.slope100, aes(y, slope)) + geom_point(size = .5)
ggplot(dt.slope100, aes(x, slope)) + geom_point(size = .5)
# dem x slope
ggplot(dt.bind, aes(dem, slope)) + geom_point() + geom_smooth(method = "gam")
ggplot(dt.longer, aes(slope, value, color = as.factor(name)))+
geom_point() +
xlim(c(0,25)) +
facet_grid(rows = vars(Segment))
ggplot(dt.longer, aes(slope, value, color = as.factor(name)))+
geom_point() | /R/segment_extraction.R | no_license | geonamsoil/gammaGlacierOffset | R | false | false | 15,008 | r | # Title : Extracts statistical measurements from displacement maps by pre-defined polygons
# Created by: Konstantin Schellenberg
# Created on: 17.11.2020
# data structure
# storage of gomez' data
# ./ data / disp_gomez / *.tif
# vector geometries
# ./ vector_geometry / glacier.gpkg
# load packages
packages = c("tidyverse", "gridExtra", "grid", "raster", "sf", "exactextractr", "stringr", "data.table", "RColorBrewer", "ggplot2")
lapply(packages, require, character.only = TRUE)
library(ggthemes)
theme_set(theme_minimal())
source("./R/functions.R")
# CREATE PATHS -----------------------------
plotdir = "./plots/glacier_movement"
if (!dir.exists(plotdir)) dir.create(recursive = TRUE)
# LOAD DATA -----------------------------
# glacier vectors
path_sf = "./vector_geometry/glacier.gpkg"
sf = st_read(path_sf, layer = "parts")
extent = st_read(path_sf, layer = "crop_extent")
extent_only_glacier = st_read(path_sf, layer = "crop_onlyglacier")
# displacement data gomez et al.
dir_gom = "./data/disp_gomez"
files_gom = list.files(dir_gom, pattern = ".tif$", full.names = TRUE)
# our displacement data with GAMMA software
dir_disp = "./results/pwr_thresh0.01"
files_disp = list.files(dir_disp, pattern = "\\.mag.geo_32627.tif$", full.names = TRUE)
# Import DEM for slope analyses
dir_dem = "./data/DEM/LMI_Haedarlikan_DEM_16bit_subset_32627.tif"
dem_big = raster(dir_dem)
crs(dem_big)
# Standard Deviation of Cross-Correlation
dir_ccs = "./results/accuracy"
files_ccs = list.files(dir_ccs, pattern = "*ccs.geo_32627.tif$", full.names = TRUE)
# check for crs
#all
if (!as.character(crs(raster(files_disp[1]))) == as.character(crs(sf))) stop()
# RASTER PREPROCESSING -----------------------------
# creating dummy to refer on:
dummy = raster(crs = "+proj=utm +zone=27 +datum=WGS84 +units=m +no_defs",
res = c(30, 30), ext = raster::extent(extent_only_glacier))
dem = dem_big %>% resample(dummy)
slope = raster::terrain(dem, opt = "slope", unit = "degrees")
par(mfrow = (c(1, 2)))
plot(slope)
plot(dem)
plot(sf, add = TRUE)
# crop all disp to extent
#disps = map(files_disp, function(x){
# raster(x) %>%
# resample(dummy)
#})
## stacking
#disp = stack(disps)
#disp_dailyVelocities = disp / 12
#writeRaster(disp, filename = "./results/pwr_thresh0.01/gamma_stack.tif", overwrite = TRUE)
#writeRaster(disp_dailyVelocities, filename = "./results/pwr_thresh0.01/gamma_stack_dailyVelocities.tif", overwrite = TRUE)
#saveRDS(disp, file = "./results/pwr_thresh0.01/gamma_stack.RDS")
disp = readRDS("./results/pwr_thresh0.01/gamma_stack.RDS")
#disp = readRDS("./results/pwr_thres0.1/gamma_stack.RDS")
#rasterVis::levelplot(disp)
# accuracy
#ccss = map(files_ccs, function(x){
# raster(x) %>%
# resample(dummy)
#})
## stacking
#ccs = stack(ccss)
#writeRaster(ccs, filename = "./results/accuracy/ccs_stack.tif", overwrite = TRUE)
#saveRDS(ccs, file = "./results/accuracy/ccs_stack.RDS")
ccs = readRDS(file = "./results/accuracy/ccs_stack.RDS")
# gomez displacements
#goms = map(files_gom, function (x){
# raster(x) %>%
# resample(dummy)
#})
#
## stacking
#gom = stack(goms)
#saveRDS(gom, file = "./data/disp_gomez/gomez_stack.RDS")
gom = readRDS("./data/disp_gomez/gomez_stack.RDS")
#rasterVis::levelplot(gom)
# NAMING -----------------------------
#' test stringr if they match...
# assign dates to raster bands
pairs = names(gom) %>%
stringr::str_extract(pattern = "\\d+.\\d+") %>%
str_split(pattern = "\\.") %>%
map(., ~ as.Date(.x, format = "%Y%m%d"))
disp.pairs = names(disp) %>%
stringr::str_extract(pattern = "\\d+_\\d+") %>%
str_split(pattern = "\\_") %>%
map(., ~ as.Date(.x, format = "%Y%m%d"))
names(pairs) = map(pairs, ~ .x[1] %>% as.character())
names(disp.pairs) = map(disp.pairs, ~ .x[1] %>% as.character())
# calculating date difference
diffs = map_dbl(disp.pairs, ~ diff(as.Date(.x, format = "")))
# some have longer baselines than 12 day, the latter is the usual though
pairs_diff = as.data.frame(disp.pairs) %>%
t() %>%
as.data.frame(row.names = FALSE) %>%
dplyr::select(main = V1, secondary = V2) %>%
mutate(temporal.baseline = diffs, year = year(main),
month = month(main),
week = week(main),
doy = (yday(main) + yday(secondary)) / 2) %>%
arrange(main)
write_csv(pairs_diff, file = "./results/tables/dates.csv")
# EXTRACTION -----------------------------
# for now, exatraction of all gomez displacements, no matter what
tidying = function(exact_extract_output){
# makes a tidy dataframe from exactextract output
ex = exact_extract_output %>% mutate(subset = row_number()) %>%
pivot_longer(-subset, names_to = "dates", values_to = "observation")
# grep dates
col = ex$dates %>% stringr::str_extract(pattern = "\\d+") %>%
str_split(pattern = "\\.") %>%
map(., ~ as.Date(.x, format = "%Y%m%d") %>% as.character()) %>%
flatten_chr() %>%
as.Date()
tidy = ex %>% mutate(start = col) %>%
dplyr::select(c(-dates)) %>%
dplyr::select(c(start, subset, observation)) %>%
mutate_at(vars(subset), as.factor)
return(tidy)
}
extraction_disp = exact_extract(disp, sf, "mean")
extraction_disp_px = exact_extract(disp, sf, include_xy = TRUE)
extraction_ccs = exact_extract(ccs, sf, "mean")
extraction_gomez = exact_extract(gom, sf, "mean")
# subsetwise
gamma = tidying(extraction_disp) %>% mutate(observation = observation / 12) # per day
gomez = tidying(extraction_gomez)
df.ccs = tidying(extraction_ccs)
# add ccs to gamma, confbands added
df.uncertainty = left_join(gamma, df.ccs, by = c("start", "subset")) %>%
dplyr::select(c(start, subset, observation = observation.x, uncertainty = observation.y)) %>%
mutate(lower.confint = observation - 1.96 * uncertainty,
upper.confint = observation + 1.96 * uncertainty)
#' add uncertainty measure here
merged = rbindlist(list(df.uncertainty, gomez), use.names = TRUE, idcol = "dataset", fill = TRUE) %>%
mutate(name = if_else(dataset == 1, "Gamma workflow", "SNAP workflow Goméz et al. 2020") %>% as.factor()) %>%
mutate_at(vars(subset), as.factor)
summary_subset = merged %>% group_by(name, subset) %>% summarise_all(function(x) mean(x, na.rm = TRUE))
summary_observation = df.uncertainty %>% group_by(start) %>% summarise_all(~mean(.x, na.rm = TRUE))
# entire entire glacier
gamma.full = gamma %>% group_by(start) %>% summarise(mean = mean(observation))
gomez.full = gomez %>% group_by(start) %>% summarise(mean = mean(observation))
merged.full = merged %>% group_by(dataset, start) %>% summarise(mean = mean(observation), mean_lower = mean(lower.confint, na.rm = TRUE),
mean_upper = mean(upper.confint, na.rm = TRUE)) %>%
mutate(dataset, name = if_else(dataset == 1,"GAMMA workflow", "SNAP workflow Goméz et al. 2020") %>% as.factor)
# ---------------------------------------------------
# deviation analysis
# average standard deviation of the results
mean(df.ccs$observation)
# bias between gomez and gamma mean glacier velocity
mean(gamma$observation) - mean(gomez$observation)
# Plotting ------------------------------------------
#colr <- colorRampPalette(brewer.pal(9, 'Blues'))
# quick viz
#rasterVis::levelplot(disp,
# margin=FALSE,
# colorkey=list(
# space='bottom',
# labels=list(at=0:20, font=4),
# axis.line=list(col='black'),
# width=0.75
# ),
# par.settings=list(
# strip.border=list(col='transparent'),
# strip.background=list(col='transparent'),
# axis.line=list(col='transparent')
# ),
# scales=list(draw=FALSE),
# col.regions=colr,
# at=seq(0, 20, len=101),
# names.attr=rep('', nlayers(disp)))
(subsets = ggplot() +
geom_sf(aes(fill = as.factor(ID)), sf) +
labs(fill = "Subset area") +
scale_fill_brewer(type = "seq", palette = 1, direction = -1))
ggplot(df.uncertainty, aes(subset, observation)) +
geom_point()
(a1 = ggplot(summary_subset, aes(subset, observation, group = name, fill = name)) +
geom_bar(stat = "identity", position = "dodge") +
theme(legend.position = "bottom")+
labs(fill = "Dataset", x = "Glacier Subset", y = "Mean Velocity [m / day]"))
# Overall movement
ggplot(gomez.full, aes(start, mean)) +
geom_point() +
geom_line()
# ---------------------------
# Uncertainty
# gamma with uncertainty
ggplot(summary_observation, aes(start, observation)) +
geom_point() +
geom_line() +
geom_errorbar(aes(ymin = lower.confint, ymax = upper.confint))
(gg0a = ggplot(df.uncertainty, aes(start, observation, group = subset, color = subset)) +
geom_point(size = 2) +
geom_line() +
#geom_smooth(method = "lm", se = F) +
geom_errorbar(aes(ymin = lower.confint, ymax = upper.confint)) +
labs(x = "Date", y = "Velocity [m / day]", color = "Glacier Subset") +
theme(legend.position = "bottom"))
# only uncertainty
ggplot(df.ccs, aes(start, observation, group = subset, color = subset)) +
geom_point() +
geom_line()
(gg0b = ggplot(merged.full, aes(start, mean, group = name, color = name)) +
geom_point(size = 3) +
geom_line() +
geom_errorbar(aes(ymin = mean_lower, ymax = mean_upper)) +
labs(color = "Dataset", x = "Year", y = "Velocity [m / day]") +
theme(legend.position = "bottom"))
# gomez per subset
(gg1 = ggplot(gomez) +
geom_point(aes(start, observation, group = subset), color = "grey15")+
geom_line(aes(start, observation, group = subset)) +
facet_wrap(~ subset) +
#geom_smooth(aes(start, observation), method = "lm", color = "darkgreen", fill = "grey60") +
theme_minimal() +
labs(title = "Glacier movement by geographical subset",
subtitle = "Data from Gómez et al. (2020)",
y = "Velocity [m / day]", x = "Date") +
theme(strip.text.x = element_text(size = 20),
axis.text.x = element_text(angle = 65)) +
ylim(c(-0.5, 2.5)))
# gamma per subset
#
(gg2 = ggplot(gamma) +
geom_point(aes(start, observation, group = subset), color = "grey15")+
geom_line(aes(start, observation, group = subset)) +
facet_wrap(~ subset) +
#geom_smooth(aes(start, observation), method = "lm", color = "darkgreen", fill = "grey60") +
theme_minimal() +
labs(y = "Velocity [m / day]", x = "Date") +
theme(strip.text.x = element_text(size = 20),
axis.text.x = element_text(angle = 65)))
# merged per subset
(gg3 = ggplot(merged, aes(start, observation, group = name, color = name)) +
geom_point() +
geom_line() +
facet_grid(cols = vars(subset)) +
theme(legend.position = "bottom") +
labs(color = "Dataset", x = "Date", y = "Velocity [m / day]"))
# only 2020
filtered2020 = merged %>% filter(start >= "2020-01-01")
filtered2020 %>% arrange(subset)
# monthly aggregation
months = merged.full %>% mutate(year = year(start), month = month(start))
summary_months = months %>% group_by(month, name) %>% summarise(observation = mean(mean)) %>%
mutate_at(vars(month), as.factor)
(gg4 = ggplot(filtered2020, aes(start, observation, group = subset, color = subset)) +
geom_point() +
geom_line() +
labs(x = "Date", y = "Velocity [m / day]", color = "Glacier Subset") +
theme(legend.position = "bottom"))
# include gomez' dataset
(gg5 = ggplot(summary_months, aes(month, observation, group = name, color = name)) +
geom_point() +
geom_line() +
labs(x = "Date", y = "Velocity [m / day]", color = "Glacier Subset") +
theme(legend.position = "bottom"))
# marrangeGrob(list(subsets, gg3), nrow = 2, ncol = 1, top = "")
ggsave(file.path(plotdir, "Gomez_glacier_movement.png"), plot = gg1, device = "png", scale = 0.5, height = 10, width = 15)
ggsave(file.path(plotdir, "Gamma_glacier_movement.png"), plot = gg2, device = "png", scale = 0.5, height = 10, width = 15)
ggsave(file.path(plotdir, "Gamma_subsetMeans_lines.png"), plot = gg0a, device = "png", height = 5, width = 5)
ggsave(file.path(plotdir, "MergedDS_subsetMeans_lines.png"), plot = gg0b, device = "png", height = 5, width = 5)
ggsave(file.path(plotdir, "MergedDS_subsetFacets.png"), plot = gg3, device = "png", height = 5, width = 10)
ggsave(file.path(plotdir, "Subsets.png"), plot = subsets, device = "png", height = 5, width = 5)
ggsave(file.path(plotdir, "Gamma_glacier_movement_only2020.png"), plot = gg4, device = "png", height = 5, width = 5)
ggsave(file.path(plotdir, "MergedDS_monthly.png"), plot = gg5, device = "png", height = 5, width = 5)
# Analyses
ggsave(a1, filename = file.path(plotdir, "MergedDS_subsetMeans_bars_2020.png"), device = "png", height = 4, width = 10)
# -------------------------------------------------------
# SLOPE ANALYSIS -----------------------------
# aggregate slope to segments
df.slope.extract = exact_extract(x = slope, y = sf, include_xy = TRUE)
dt.slope = rbindlist(df.slope.extract, idcol = "Segment")
dt.slope100 = dt.slope[coverage_fraction == 1.0, .(slope = value, x, y)]
# aggregate DEM to segments
df.dem.extract = exact_extract(x = dem, y = sf, include_xy = TRUE)
dt.dem = rbindlist(df.dem.extract, idcol = "Segment")
dt.dem100 = dt.dem[coverage_fraction == 1.0, .(dem = value, x, y)]
# query those only covering 100 with the glacier segments
# mit displacements verschneiden
dt.disp.extracted = rbindlist(extraction_disp_px, idcol = "Segment")
dt.disp.extracted100 = dt.disp.extracted[coverage_fraction == 1.0, ]
dt.disp.extracted100.select = dt.disp.extracted100[, -c("x", "y")]
ggplot(dt.disp.extracted100, aes(X20160827_20160908.disp.mag.geo_32627))+
geom_histogram()
# fuse here
dt.bind.pre = cbind(dt.slope100, dt.dem100[, .(dem)], dt.disp.extracted100.select)
dt.bind = dt.bind.pre[, -"coverage_fraction"]
# rename variable names
nm = names(dt.bind)
disps = nm[str_detect(nm, pattern = "^X")]
rest = nm[!str_detect(nm, pattern = "^X")]
disps.split = str_split_fixed(disps, pattern = "_", 2)[,1]
vec = c(rest, disps.split)
names(dt.bind) = vec
names(dt.bind)
# to longer format
dt.longer = dt.bind %>% pivot_longer(cols = -c(slope, dem, x, y, Segment))
# -----------------------------------------------------------
# Visualisation of slope to movement
# average slope of the segments
(slope.segment = dt.bind %>% group_by(Segment) %>% summarise(mean.slope = mean(slope), mean.height = mean(dem)))
write_csv(slope.segment, file = "./results/tables/slope_at_segments.csv")
# dem
ggplot(dt.dem100, aes(y, dem)) + geom_point(size = .5)
ggplot(dt.dem100, aes(x, dem)) + geom_point(size = .5)
# slope
ggplot(dt.slope100, aes(y, slope)) + geom_point(size = .5)
ggplot(dt.slope100, aes(x, slope)) + geom_point(size = .5)
# dem x slope
ggplot(dt.bind, aes(dem, slope)) + geom_point() + geom_smooth(method = "gam")
ggplot(dt.longer, aes(slope, value, color = as.factor(name)))+
geom_point() +
xlim(c(0,25)) +
facet_grid(rows = vars(Segment))
ggplot(dt.longer, aes(slope, value, color = as.factor(name)))+
geom_point() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pjp_data_pob_mun_1996_2019.R
\docType{data}
\name{pjp_data_pob_mun_1996_2019}
\alias{pjp_data_pob_mun_1996_2019}
\title{Población municipal(Padrón INE)}
\format{
A data frame with 11 variables
\itemize{
\item year:
\item INECodMuni: Codigo INE del municipio (5 digitos)
\item INECodMuni.n: Nombre del municipio
\item capital_prov: 1 = el municipio es capital de provincia
\item capital_CCAA: 1 = el municipio es capital de CA
\item INECodProv: Codigo INE de la provincia (2 digitos)
\item INECodProv.n: Nombre de la provincia
\item INECodCCAA: Código INE de la C.A. (2 digitos)
\item NombreCCAA: Nombre de la C.A.
\item poblacion: 3 categorias(Total, Hombresy Mujeres)
\item pob_values: numero de personas
}
}
\source{
\url{http://www.ine.es/dyngs/INEbase/es/operacion.htm?c=Estadistica_C&cid=1254736177011&menu=resultados&idp=1254734710990}
}
\usage{
pjp_data_pob_mun_1996_2019
}
\description{
Para los años 1996, 1998-2019 y para el Total, Hombres y Mujeres
}
\examples{
\dontrun{
pob_mun_1996_2019 <- pjp_data_pob_mun_1996_2019
}
}
\keyword{datasets}
| /man/pjp_data_pob_mun_1996_2019.Rd | permissive | perezp44/pjpv2020.01 | R | false | true | 1,148 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pjp_data_pob_mun_1996_2019.R
\docType{data}
\name{pjp_data_pob_mun_1996_2019}
\alias{pjp_data_pob_mun_1996_2019}
\title{Población municipal(Padrón INE)}
\format{
A data frame with 11 variables
\itemize{
\item year:
\item INECodMuni: Codigo INE del municipio (5 digitos)
\item INECodMuni.n: Nombre del municipio
\item capital_prov: 1 = el municipio es capital de provincia
\item capital_CCAA: 1 = el municipio es capital de CA
\item INECodProv: Codigo INE de la provincia (2 digitos)
\item INECodProv.n: Nombre de la provincia
\item INECodCCAA: Código INE de la C.A. (2 digitos)
\item NombreCCAA: Nombre de la C.A.
\item poblacion: 3 categorias(Total, Hombresy Mujeres)
\item pob_values: numero de personas
}
}
\source{
\url{http://www.ine.es/dyngs/INEbase/es/operacion.htm?c=Estadistica_C&cid=1254736177011&menu=resultados&idp=1254734710990}
}
\usage{
pjp_data_pob_mun_1996_2019
}
\description{
Para los años 1996, 1998-2019 y para el Total, Hombres y Mujeres
}
\examples{
\dontrun{
pob_mun_1996_2019 <- pjp_data_pob_mun_1996_2019
}
}
\keyword{datasets}
|
#==============================================================================
#
# 07_stage1_model.r -- first stage of Bayesian Spatial Following model
#
# Fits the first stage of the model presented in Pablo Barberá's paper "Birds of
# the Same Feather Tweet Together: Bayesian Ideal Point Estimation Using Twitter
# Data", Political Analysis (2015) 23:76–91.
#
# Author: Pablo Barbera
# Source: https://github.com/pablobarbera/twitter_ideology/tree/master/code
#
#==============================================================================
library(rstan)
options(scipen = 20)
# load likes matrix
load("model/matrix_selected.rda")
# parameters for Stan model
n.iter = 250
n.warmup = 100
thin = 1
# show sample
cat(date(), ": fitting", nrow(y), "x", ncol(y), "matrix...\n")
timing = Sys.time()
J = dim(y)[1]
K = dim(y)[2]
N = J * K
jj = rep(1:J, times = K)
kk = rep(1:K, each = J)
stan.code = '
data {
int<lower=1> J; // number of twitter users
int<lower=1> K; // number of elite twitter accounts
int<lower=1> N; // N = J x K
int<lower=1,upper=J> jj[N]; // twitter user for observation n
int<lower=1,upper=K> kk[N]; // elite account for observation n
int<lower=0,upper=1> y[N]; // dummy if user i follows elite j
}
parameters {
vector[K] alpha;
vector[K] phi;
vector[J] theta;
vector[J] beta;
real mu_beta;
real<lower=0.1> sigma_beta;
real mu_phi;
real<lower=0.1> sigma_phi;
real gamma;
}
model {
alpha ~ normal(0, 1);
beta ~ normal(mu_beta, sigma_beta);
phi ~ normal(mu_phi, sigma_phi);
theta ~ normal(0, 1);
for (n in 1:N)
y[n] ~ bernoulli_logit( alpha[kk[n]] + beta[jj[n]] -
gamma * square( theta[jj[n]] - phi[kk[n]] ) );
}
'
stan.data = list(J = J, K = K, N = N, jj = jj, kk = kk, y = c(y))
colK = colSums(y)
rowJ = rowSums(y)
normalize = function(x){
(x-mean(x))/sd(x)
}
## RUNNING MODEL
inits = list(list(alpha = normalize(log(colK + 0.0001)),
beta = normalize(log(rowJ + 0.0001)),
theta = rep(0, J), phi = start.phi,
mu_beta = 0,
sigma_beta = 1,
gamma = 2,
mu_phi = 0,
sigma_phi = 1))
stan.fit = stan(model_code = stan.code,
data = stan.data,
iter = n.iter,
warmup = n.warmup,
chains = 1,
thin = 1,
init = inits,
seed = 83398)
# save results
save(stan.fit, file = "model/stanfit.rda")
cat(date(), ": finished.\n")
print(Sys.time() - timing)
rm(list = ls())
gc()
| /code/07_stage1_model.r | no_license | 3wen/elus | R | false | false | 2,597 | r | #==============================================================================
#
# 07_stage1_model.r -- first stage of Bayesian Spatial Following model
#
# Fits the first stage of the model presented in Pablo Barberá's paper "Birds of
# the Same Feather Tweet Together: Bayesian Ideal Point Estimation Using Twitter
# Data", Political Analysis (2015) 23:76–91.
#
# Author: Pablo Barbera
# Source: https://github.com/pablobarbera/twitter_ideology/tree/master/code
#
#==============================================================================
library(rstan)
options(scipen = 20)
# load likes matrix
load("model/matrix_selected.rda")
# parameters for Stan model
n.iter = 250
n.warmup = 100
thin = 1
# show sample
cat(date(), ": fitting", nrow(y), "x", ncol(y), "matrix...\n")
timing = Sys.time()
J = dim(y)[1]
K = dim(y)[2]
N = J * K
jj = rep(1:J, times = K)
kk = rep(1:K, each = J)
stan.code = '
data {
int<lower=1> J; // number of twitter users
int<lower=1> K; // number of elite twitter accounts
int<lower=1> N; // N = J x K
int<lower=1,upper=J> jj[N]; // twitter user for observation n
int<lower=1,upper=K> kk[N]; // elite account for observation n
int<lower=0,upper=1> y[N]; // dummy if user i follows elite j
}
parameters {
vector[K] alpha;
vector[K] phi;
vector[J] theta;
vector[J] beta;
real mu_beta;
real<lower=0.1> sigma_beta;
real mu_phi;
real<lower=0.1> sigma_phi;
real gamma;
}
model {
alpha ~ normal(0, 1);
beta ~ normal(mu_beta, sigma_beta);
phi ~ normal(mu_phi, sigma_phi);
theta ~ normal(0, 1);
for (n in 1:N)
y[n] ~ bernoulli_logit( alpha[kk[n]] + beta[jj[n]] -
gamma * square( theta[jj[n]] - phi[kk[n]] ) );
}
'
stan.data = list(J = J, K = K, N = N, jj = jj, kk = kk, y = c(y))
colK = colSums(y)
rowJ = rowSums(y)
normalize = function(x){
(x-mean(x))/sd(x)
}
## RUNNING MODEL
inits = list(list(alpha = normalize(log(colK + 0.0001)),
beta = normalize(log(rowJ + 0.0001)),
theta = rep(0, J), phi = start.phi,
mu_beta = 0,
sigma_beta = 1,
gamma = 2,
mu_phi = 0,
sigma_phi = 1))
stan.fit = stan(model_code = stan.code,
data = stan.data,
iter = n.iter,
warmup = n.warmup,
chains = 1,
thin = 1,
init = inits,
seed = 83398)
# save results
save(stan.fit, file = "model/stanfit.rda")
cat(date(), ": finished.\n")
print(Sys.time() - timing)
rm(list = ls())
gc()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/caret_pre_model.R
\docType{data}
\name{caret_pre_model}
\alias{caret_pre_model}
\title{Model set up for train function of package caret}
\format{
An object of class \code{list} of length 17.
}
\usage{
caret_pre_model
}
\description{
\code{caret_pre_model} provides a model setup for function \code{train} of
package caret. It allows for tuning arguments sampfrac, maxdepth, learnrate,
mtry, use.grad and penalty.par.val.
}
\details{
When tuning parameters of \code{pre()} with caret's \code{train()}
function, always use the default S3 method (i.e., specify predictors and response
variables through arguments \code{x} and \code{y}). When \code{train.formula()},
is used (i.e., if \code{formula} and \code{data} arguments are specified),
\code{train} will internally call \code{model.matrix()} on \code{data}, which
will code all categorical (factor) predictor variables as dummy variables,
which will yield a different result than inputting the original factors, for most
tree-based methods.
\code{caret_pre__model$parameters} provides an overview of the parameters that
can be tuned for function \code{pre} using \code{caret}. \code{caret_pre_model$grid}
provides a function for creating a tuning grid (see Examples below).
}
\examples{
\dontrun{
library("caret")
## Prepare data:
airq <- airquality[complete.cases(airquality),]
y <- airq$Ozone
x <- airq[,-1]
## Apply caret with only pre's default settings (trControl and ntrees argument
## are employed here only to reduce computation time):
set.seed(42)
prefit1 <- train(x = x, y = y, method = caret_pre_model,
trControl = trainControl(number = 1),
ntrees = 25L)
prefit1
## Create custom tuneGrid:
set.seed(42)
tuneGrid <- caret_pre_model$grid(x = x, y = y,
maxdepth = 3L:5L,
learnrate = c(.01, .1),
penalty.par.val = c("lambda.1se", "lambda.min"))
tuneGrid
## Apply caret (again, ntrees and trControl set only to reduce computation time):
prefit2 <- train(x = x, y = y, method = caret_pre_model,
trControl = trainControl(number = 1),
tuneGrid = tuneGrid, ntrees = 25L)
prefit2
## Get best tuning parameter values:
prefit2$bestTune
## Get predictions from model with best tuning parameters:
predict(prefit2, newdata = x[1:10, ])
plot(prefit2)
## Obtain tuning grid through random search over the tuning parameter space:
set.seed(42)
tuneGrid2 <- caret_pre_model$grid(x = x, y = y, search = "random", len = 10)
tuneGrid2
set.seed(42)
prefit3 <- train(x = x, y = y, method = caret_pre_model,
trControl = trainControl(number = 1, verboseIter = TRUE),
tuneGrid = tuneGrid2, ntrees = 25L)
prefit3
## Count response:
set.seed(42)
prefit4 <- train(x = x, y = y, method = caret_pre_model,
trControl = trainControl(number = 1),
ntrees = 25L, family = "poisson")
prefit4
## Binary factor response:
y_bin <- factor(airq$Ozone > mean(airq$Ozone))
set.seed(42)
prefit5 <- train(x = x, y = y_bin, method = caret_pre_model,
trControl = trainControl(number = 1),
ntrees = 25L, family = "binomial")
prefit5
## Factor response with > 2 levels:
x_multin <- airq[,-5]
y_multin <- factor(airq$Month)
set.seed(42)
prefit6 <- train(x = x_multin, y = y_multin, method = caret_pre_model,
trControl = trainControl(number = 1),
ntrees = 25L, family = "multinomial")
prefit6
}
}
\keyword{datasets}
| /man/caret_pre_model.Rd | no_license | ZhilinJin/pre | R | false | true | 3,622 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/caret_pre_model.R
\docType{data}
\name{caret_pre_model}
\alias{caret_pre_model}
\title{Model set up for train function of package caret}
\format{
An object of class \code{list} of length 17.
}
\usage{
caret_pre_model
}
\description{
\code{caret_pre_model} provides a model setup for function \code{train} of
package caret. It allows for tuning arguments sampfrac, maxdepth, learnrate,
mtry, use.grad and penalty.par.val.
}
\details{
When tuning parameters of \code{pre()} with caret's \code{train()}
function, always use the default S3 method (i.e., specify predictors and response
variables through arguments \code{x} and \code{y}). When \code{train.formula()},
is used (i.e., if \code{formula} and \code{data} arguments are specified),
\code{train} will internally call \code{model.matrix()} on \code{data}, which
will code all categorical (factor) predictor variables as dummy variables,
which will yield a different result than inputting the original factors, for most
tree-based methods.
\code{caret_pre__model$parameters} provides an overview of the parameters that
can be tuned for function \code{pre} using \code{caret}. \code{caret_pre_model$grid}
provides a function for creating a tuning grid (see Examples below).
}
\examples{
\dontrun{
library("caret")
## Prepare data:
airq <- airquality[complete.cases(airquality),]
y <- airq$Ozone
x <- airq[,-1]
## Apply caret with only pre's default settings (trControl and ntrees argument
## are employed here only to reduce computation time):
set.seed(42)
prefit1 <- train(x = x, y = y, method = caret_pre_model,
trControl = trainControl(number = 1),
ntrees = 25L)
prefit1
## Create custom tuneGrid:
set.seed(42)
tuneGrid <- caret_pre_model$grid(x = x, y = y,
maxdepth = 3L:5L,
learnrate = c(.01, .1),
penalty.par.val = c("lambda.1se", "lambda.min"))
tuneGrid
## Apply caret (again, ntrees and trControl set only to reduce computation time):
prefit2 <- train(x = x, y = y, method = caret_pre_model,
trControl = trainControl(number = 1),
tuneGrid = tuneGrid, ntrees = 25L)
prefit2
## Get best tuning parameter values:
prefit2$bestTune
## Get predictions from model with best tuning parameters:
predict(prefit2, newdata = x[1:10, ])
plot(prefit2)
## Obtain tuning grid through random search over the tuning parameter space:
set.seed(42)
tuneGrid2 <- caret_pre_model$grid(x = x, y = y, search = "random", len = 10)
tuneGrid2
set.seed(42)
prefit3 <- train(x = x, y = y, method = caret_pre_model,
trControl = trainControl(number = 1, verboseIter = TRUE),
tuneGrid = tuneGrid2, ntrees = 25L)
prefit3
## Count response:
set.seed(42)
prefit4 <- train(x = x, y = y, method = caret_pre_model,
trControl = trainControl(number = 1),
ntrees = 25L, family = "poisson")
prefit4
## Binary factor response:
y_bin <- factor(airq$Ozone > mean(airq$Ozone))
set.seed(42)
prefit5 <- train(x = x, y = y_bin, method = caret_pre_model,
trControl = trainControl(number = 1),
ntrees = 25L, family = "binomial")
prefit5
## Factor response with > 2 levels:
x_multin <- airq[,-5]
y_multin <- factor(airq$Month)
set.seed(42)
prefit6 <- train(x = x_multin, y = y_multin, method = caret_pre_model,
trControl = trainControl(number = 1),
ntrees = 25L, family = "multinomial")
prefit6
}
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createFolds.R
\name{.createFolds}
\alias{.createFolds}
\title{Create folds for k-fold validation}
\usage{
.createFolds(y, k = 10, list = TRUE, returnTrain = FALSE)
}
\arguments{
\item{y}{split sample by balancing y}
\item{k}{number of folds}
\item{list}{logical whether to return folds in a list}
\item{returnTrain}{logical whether to return training indices (T)
or the test samples (F)}
}
\description{
Used to create balanced folds with respect to y input
for k-fold validation.
}
\author{
Caret Package
}
| /man/createFolds.Rd | permissive | mbowren/LESYMAP | R | false | true | 589 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createFolds.R
\name{.createFolds}
\alias{.createFolds}
\title{Create folds for k-fold validation}
\usage{
.createFolds(y, k = 10, list = TRUE, returnTrain = FALSE)
}
\arguments{
\item{y}{split sample by balancing y}
\item{k}{number of folds}
\item{list}{logical whether to return folds in a list}
\item{returnTrain}{logical whether to return training indices (T)
or the test samples (F)}
}
\description{
Used to create balanced folds with respect to y input
for k-fold validation.
}
\author{
Caret Package
}
|
library(RanglaPunjab)
### Name: RanglaPunjab
### Title: Palette of 5 Colors
### Aliases: RanglaPunjab
### ** Examples
RanglaPunjab("GoldenTemple")
RanglaPunjab("SohniMahiwal")
RanglaPunjab("Teej")
| /data/genthat_extracted_code/RanglaPunjab/examples/RanglaPunjab.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 204 | r | library(RanglaPunjab)
### Name: RanglaPunjab
### Title: Palette of 5 Colors
### Aliases: RanglaPunjab
### ** Examples
RanglaPunjab("GoldenTemple")
RanglaPunjab("SohniMahiwal")
RanglaPunjab("Teej")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VariablesGroupCreationDTO.r
\docType{data}
\name{VariablesGroupCreationDTO}
\alias{VariablesGroupCreationDTO}
\title{VariablesGroupCreationDTO Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
VariablesGroupCreationDTO
}
\description{
VariablesGroupCreationDTO Class
}
\section{Fields}{
\describe{
\item{\code{uri}}{}
\item{\code{name}}{}
\item{\code{description}}{}
\item{\code{variables}}{}
}}
\keyword{datasets}
| /man/VariablesGroupCreationDTO.Rd | no_license | OpenSILEX/opensilexClientToolsR | R | false | true | 529 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VariablesGroupCreationDTO.r
\docType{data}
\name{VariablesGroupCreationDTO}
\alias{VariablesGroupCreationDTO}
\title{VariablesGroupCreationDTO Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
VariablesGroupCreationDTO
}
\description{
VariablesGroupCreationDTO Class
}
\section{Fields}{
\describe{
\item{\code{uri}}{}
\item{\code{name}}{}
\item{\code{description}}{}
\item{\code{variables}}{}
}}
\keyword{datasets}
|
source("packages.R")
(objs <- load("../PeakSeg-paper/dp.peaks.train.RData"))
(objs <- load("dp.peaks.error.RData"))
(objs <- load("dp.peaks.RData"))
library(dplyr)
good.group.df <- groups %>%
group_by(group) %>%
summarise(region.values=length(unique(regions))) %>%
filter(region.values == 1)
good.groups <- good.group.df$group
min.error <- groups %>%
filter(group %in% good.groups) %>%
group_by(chunk.name, cell.type, algorithm, regions) %>%
summarise(min=min(errors)) %>%
mutate(set.name=sub("/.*", "", chunk.name),
experiment=sub("_.*", "", set.name))
wide <-
dcast(min.error,
chunk.name + cell.type + experiment ~ algorithm,
value.var="min") %>%
mutate(baseline=ifelse(experiment=="H3K36me3",
hmcan.broad.trained, macs.trained),
advantage=baseline-PeakSeg) %>%
arrange(advantage)
zero.error <- data.table(wide) %>%
filter(PeakSeg==0)
biggest <- zero.error %>%
group_by(experiment) %>%
mutate(rank=rank(-advantage)) %>%
filter(rank==1)
biggest$chunk.name[2] <- "H3K36me3_AM_immune/8"
data.frame(biggest)
zero.error %>%
filter(chunk.name=="H3K36me3_AM_immune/8")
## We will make a plot for the window for which we have the biggest
## advantage, for each mark type.
prefix <- "http://cbio.ensmp.fr/~thocking/chip-seq-chunk-db"
chunks.file <- paste0(prefix, "/chunks.RData")
u <- url(chunks.file)
load(u)
close(u)
rownames(chunks) <- with(chunks, paste0(set.name, "/", chunk.id))
ann.colors <-
c(noPeaks="#f6f4bf",
peakStart="#ffafaf",
peakEnd="#ff4c4c",
peaks="#a445ee")
region.list <- list()
count.list <- list()
max.list <- list()
peak.list <- list()
error.list <- list()
text.list <- list()
for(experiment.i in 1:nrow(biggest)){
chunk.info <- biggest[experiment.i, ]
experiment <- as.character(chunk.info$experiment)
chunk.name <- as.character(chunk.info$chunk.name)
more.info <- chunks[chunk.name, ]
chunkChrom <- as.character(more.info$chunkChrom)
cell.type <- as.character(chunk.info$cell.type)
other.algo <-
ifelse(experiment=="H3K4me3", "macs.trained", "hmcan.broad.trained")
algorithms <- c("PeakSeg", other.algo)
param.err <- dp.peaks.train %>%
inner_join(chunk.info) %>%
mutate(param.name=as.character(param.num))
min.params <- param.err %>%
filter(algorithm %in% algorithms) %>%
group_by(algorithm) %>%
filter(seq_along(errors) == which.min(errors))
default.param.val <-
ifelse(other.algo=="macs.trained", "1.30103", "2.30258509299405")
default.param <- param.err %>%
filter(algorithm==other.algo,
param.name==default.param.val) %>%
mutate(algorithm=sub("trained", "default", algorithm))
show.params <- rbind(default.param, min.params)
rownames(show.params) <- show.params$algorithm
## TODO: download peaks and error regions for baseline, plot them
## alongside PeakSeg model.
dp.error <- dp.peaks.error[[chunk.name]][[cell.type]]
dp.param <- show.params["PeakSeg", "param.name"]
dp.regions <- subset(dp.error, param.name==dp.param)
sample.ids <- as.character(unique(dp.error$sample.id))
## Try to show only a subset of samples.
sample.ids <- sprintf("McGill%04d", c(26))
dp.peaks.samples <- dp.peaks[[chunk.name]]
dp.peak.list <- list()
for(sample.id in sample.ids){
dp.peak.list[[sample.id]] <-
data.frame(sample.id, dp.peaks.samples[[sample.id]][[dp.param]]) %>%
select(sample.id, chromStart, chromEnd)
}
## Download count signal data.
counts.file <- file.path("data", chunk.name, "counts.RData")
load(counts.file)
if(experiment=="H3K36me3"){
chromStart <- c(111780000, 111795000, 111900000)
chromEnd <- c(111790000, 111840000, 111960000)
start <- 111550000
end <- 111970000
}else{
chromStart <- c(175459000)
chromEnd <- c(175500000)
start <- 175000000
end <- 175505000
}
dp.regions <- dp.regions %>%
mutate(chromStart=ifelse(chromStart < start, start, chromStart),
chromEnd=ifelse(chromEnd < end, chromEnd, end))
counts <- subset(counts,
start < chromStart &
chromEnd < end)
sample.counts <- counts %>%
filter(sample.id %in% sample.ids) %>%
group_by(sample.id) %>%
mutate(coverage.norm=coverage/max(coverage))
tit <-
sprintf("%s data (%s pattern)",
experiment, ifelse(experiment=="H3K4me3", "sharp", "broad"))
sample.max.df <- sample.counts %>%
group_by(sample.id) %>%
summarise(count=max(coverage),
norm=max(coverage.norm))
sample.max <- sample.max.df$count
names(sample.max) <- as.character(sample.max.df$sample.id)
sample.max.df$chromStart <- sample.counts$chromStart[1]
sample.max.df$chromEnd <- sample.counts$chromEnd[nrow(sample.counts)]
other.params <- subset(show.params, algorithm != "PeakSeg")
trained.param <- subset(other.params, grepl("trained", algorithm))
trained.algo <- as.character(trained.param$algorithm)
label.sample <- "McGill0026"
show.peak.list <-
list(
bad=data.frame(sample.id=label.sample, chromStart, chromEnd),
good=do.call(rbind, dp.peak.list))
label.regions <- subset(dp.regions, sample.id==label.sample)
label.err <- PeakErrorChrom(show.peak.list$bad, label.regions)
label.err$sample.id <- label.sample
show.region.list <-
list(good=dp.regions,
bad=label.err)
compare.region.list <- list()
compare.peak.list <- list()
compare.label.list <- list()
for(algorithm.i in seq_along(show.peak.list)){
peak.df <- show.peak.list[[algorithm.i]]
sample.id <- as.character(peak.df$sample.id)[[1]]
max.count <- sample.max[[sample.id]]
algorithm <- names(show.peak.list)[[algorithm.i]]
short.algo <- sub("[.].*", "", algorithm)
y.mid <- algorithm.i*0.15 + 1.05
count.mid <- algorithm.i * 3 + 32
compare.peak.list[[algorithm]] <-
data.frame(tit, algorithm, y.mid, count.mid, peak.df)
## Also make regions.
height <- 1
region.df <- show.region.list[[algorithm]]
this.param <- show.params[algorithm, ]
compare.label.list[[algorithm]] <- with(region.df, {
data.frame(tit, fp=sum(fp), fn=sum(fn),
algorithm,
y.mid, count.mid,
param.name=this.param$param.name)
})
sample.id <- as.character(region.df$sample.id)
max.count <- sample.max[sample.id]
y.min <- (-algorithm.i*4-height)*max.count/10
y.max <- (-algorithm.i*4+height)*max.count/10
compare.region.list[[algorithm]] <-
data.frame(tit, algorithm, y.mid, count.mid, region.df) %>%
select(tit, sample.id, y.mid, count.mid,
chromStart, chromEnd, annotation, status) %>%
mutate(chromStart=ifelse(chromStart < start, start, chromStart),
chromEnd=ifelse(chromEnd < end, chromEnd, end))
}
error.list[[experiment.i]] <- do.call(rbind, compare.region.list)
peak.list[[experiment.i]] <- do.call(rbind, compare.peak.list)
first <- dp.regions %>%
filter(sample.id==label.sample,
annotation=="peakStart")
text.list[[experiment.i]] <- do.call(rbind, compare.label.list) %>%
mutate(chromStart=first$chromStart[1],
sample.id=label.sample)
algo.colors <-
c(macs.default="#A6CEE3", macs.trained="#1F78B4", #lite dark blue
hmcan.broad.default="#A6CEE3", hmcan.broad.trained="#1F78B4", #lite dark blue
bad="#1F78B4", #lite dark blue
"#B2DF8A", "#33A02C", #green
"#FB9A99", "#E31A1C", #red
"#FDBF6F", "#FF7F00", #orange
"#CAB2D6", PeakSeg="#6A3D9A", #purple
good="#6A3D9A", #purple
"#FFFF99", "#B15928") #yellow/brown
region.list[[experiment.i]] <-
data.frame(tit, subset(dp.regions, sample.id %in% sample.ids))
count.list[[experiment.i]] <-
data.frame(tit, sample.counts)
max.list[[experiment.i]] <-
data.frame(tit, sample.max.df)
}#experiment.i
peaks <- do.call(rbind, peak.list)
txt <- do.call(rbind, text.list)
error <- do.call(rbind, error.list)
counts <- do.call(rbind, count.list)
regions <- do.call(rbind, region.list)
maxes <- do.call(rbind, max.list)
scales <-
data.frame(chromStart=c(175450, 111750),
y=-2,
sample.id="McGill0026",
tit=c("H3K4me3 data (sharp pattern)",
"H3K36me3 data (broad pattern)")) %>%
mutate(chromEnd=chromStart+50)
rect.h <- 0.06
selectedPlot <-
ggplot()+
geom_tallrect(aes(xmin=chromStart/1e3, xmax=chromEnd/1e3,
fill=annotation),
data=regions,
color="grey",
alpha=1/2)+
geom_step(aes(chromStart/1e3, coverage.norm),
data=counts, color="grey50")+
geom_text(aes(chromStart/1e3, 1, label=sprintf("max=%d", count)),
vjust=1, hjust=0, data=maxes, size=3)+
geom_text(aes(chromStart, y, label="scale: 50 kb "),
data=scales, hjust=1, vjust=0.5, size=3)+
geom_segment(aes(chromStart, y, xend=chromEnd, yend=y),
data=scales, size=2)+
geom_text(aes(chromStart/1e3, y.mid,
label=paste0(algorithm, " ")),
data=txt,
##vjust=0.25, size=2,
size=2.5,
hjust=1)+
geom_rect(aes(xmin=chromStart/1e3, xmax=chromEnd/1e3,
ymin=y.mid -rect.h, ymax=y.mid + rect.h,
linetype=status),
data=subset(error, sample.id %in% sample.ids),
fill=NA, color="black", size=0.5)+
scale_linetype_manual("error type",
values=c(correct=0,
"false negative"=3,
"false positive"=1))+
## geom_point(aes(chromStart/1e3, y.mid, color=algorithm),
## data=peaks,
## pch=1, size=2)+
geom_segment(aes(chromStart/1e3, y.mid,
xend=chromEnd/1e3, yend=y.mid,
color=algorithm),
data=peaks, size=1)+
scale_color_manual(values=algo.colors)+
theme_bw()+
theme(panel.margin=grid::unit(0, "cm"))+
facet_grid(. ~ tit, scales="free", space="free_y")+
scale_y_continuous("count of aligned reads",
labels=function(x){
c("0", "max")
},
breaks=c(0, 1))+
guides(color="none")+
xlab(paste("position on chromosome (kb = kilo bases)"))+
scale_fill_manual("label", values=ann.colors,
breaks=names(ann.colors))
rect.h <- 1
selectedPlot <-
ggplot()+
geom_tallrect(aes(xmin=chromStart/1e3, xmax=chromEnd/1e3,
fill=annotation),
data=regions,
color="grey",
alpha=1/2)+
geom_step(aes(chromStart/1e3, coverage),
data=counts, color="grey50")+
geom_text(aes(chromStart, y, label="scale: 50 kb "),
data=scales, hjust=1, vjust=0.5, size=3)+
geom_segment(aes(chromStart, y, xend=chromEnd, yend=y),
data=scales, size=2)+
geom_text(aes(chromStart/1e3, count.mid,
label=paste0(algorithm, " ")),
data=txt,
##vjust=0.25, size=2,
size=2.5,
hjust=1)+
geom_rect(aes(xmin=chromStart/1e3, xmax=chromEnd/1e3,
ymin=count.mid -rect.h, ymax=count.mid + rect.h,
linetype=status),
data=subset(error, sample.id %in% sample.ids),
fill=NA, color="black", size=0.5)+
scale_linetype_manual("error type",
values=c(correct=0,
"false negative"=3,
"false positive"=1))+
geom_segment(aes(chromStart/1e3, count.mid,
xend=chromEnd/1e3, yend=count.mid,
color=algorithm),
data=peaks, size=1)+
scale_color_manual(values=algo.colors)+
theme_bw()+
theme(panel.margin=grid::unit(0, "cm"))+
facet_grid(. ~ tit, scales="free", space="free_y")+
scale_y_continuous("count of aligned reads", breaks=seq(0, 30, by=10))+
guides(color="none")+
xlab(paste("position on chromosome (kb = kilo bases)"))+
scale_fill_manual("label", values=ann.colors,
breaks=names(ann.colors))
print(selectedPlot)
png(png.file <- "figure-good-bad.png",
units="in", res=200, width=8, height=2.5)
print(selectedPlot)
dev.off()
##system(paste("firefox", png.file))
| /figure-good-bad.R | no_license | tdhock/PeakSegFPOP-paper | R | false | false | 12,276 | r | source("packages.R")
(objs <- load("../PeakSeg-paper/dp.peaks.train.RData"))
(objs <- load("dp.peaks.error.RData"))
(objs <- load("dp.peaks.RData"))
library(dplyr)
good.group.df <- groups %>%
group_by(group) %>%
summarise(region.values=length(unique(regions))) %>%
filter(region.values == 1)
good.groups <- good.group.df$group
min.error <- groups %>%
filter(group %in% good.groups) %>%
group_by(chunk.name, cell.type, algorithm, regions) %>%
summarise(min=min(errors)) %>%
mutate(set.name=sub("/.*", "", chunk.name),
experiment=sub("_.*", "", set.name))
wide <-
dcast(min.error,
chunk.name + cell.type + experiment ~ algorithm,
value.var="min") %>%
mutate(baseline=ifelse(experiment=="H3K36me3",
hmcan.broad.trained, macs.trained),
advantage=baseline-PeakSeg) %>%
arrange(advantage)
zero.error <- data.table(wide) %>%
filter(PeakSeg==0)
biggest <- zero.error %>%
group_by(experiment) %>%
mutate(rank=rank(-advantage)) %>%
filter(rank==1)
biggest$chunk.name[2] <- "H3K36me3_AM_immune/8"
data.frame(biggest)
zero.error %>%
filter(chunk.name=="H3K36me3_AM_immune/8")
## We will make a plot for the window for which we have the biggest
## advantage, for each mark type.
prefix <- "http://cbio.ensmp.fr/~thocking/chip-seq-chunk-db"
chunks.file <- paste0(prefix, "/chunks.RData")
u <- url(chunks.file)
load(u)
close(u)
rownames(chunks) <- with(chunks, paste0(set.name, "/", chunk.id))
ann.colors <-
c(noPeaks="#f6f4bf",
peakStart="#ffafaf",
peakEnd="#ff4c4c",
peaks="#a445ee")
region.list <- list()
count.list <- list()
max.list <- list()
peak.list <- list()
error.list <- list()
text.list <- list()
for(experiment.i in 1:nrow(biggest)){
chunk.info <- biggest[experiment.i, ]
experiment <- as.character(chunk.info$experiment)
chunk.name <- as.character(chunk.info$chunk.name)
more.info <- chunks[chunk.name, ]
chunkChrom <- as.character(more.info$chunkChrom)
cell.type <- as.character(chunk.info$cell.type)
other.algo <-
ifelse(experiment=="H3K4me3", "macs.trained", "hmcan.broad.trained")
algorithms <- c("PeakSeg", other.algo)
param.err <- dp.peaks.train %>%
inner_join(chunk.info) %>%
mutate(param.name=as.character(param.num))
min.params <- param.err %>%
filter(algorithm %in% algorithms) %>%
group_by(algorithm) %>%
filter(seq_along(errors) == which.min(errors))
default.param.val <-
ifelse(other.algo=="macs.trained", "1.30103", "2.30258509299405")
default.param <- param.err %>%
filter(algorithm==other.algo,
param.name==default.param.val) %>%
mutate(algorithm=sub("trained", "default", algorithm))
show.params <- rbind(default.param, min.params)
rownames(show.params) <- show.params$algorithm
## TODO: download peaks and error regions for baseline, plot them
## alongside PeakSeg model.
dp.error <- dp.peaks.error[[chunk.name]][[cell.type]]
dp.param <- show.params["PeakSeg", "param.name"]
dp.regions <- subset(dp.error, param.name==dp.param)
sample.ids <- as.character(unique(dp.error$sample.id))
## Try to show only a subset of samples.
sample.ids <- sprintf("McGill%04d", c(26))
dp.peaks.samples <- dp.peaks[[chunk.name]]
dp.peak.list <- list()
for(sample.id in sample.ids){
dp.peak.list[[sample.id]] <-
data.frame(sample.id, dp.peaks.samples[[sample.id]][[dp.param]]) %>%
select(sample.id, chromStart, chromEnd)
}
## Download count signal data.
counts.file <- file.path("data", chunk.name, "counts.RData")
load(counts.file)
if(experiment=="H3K36me3"){
chromStart <- c(111780000, 111795000, 111900000)
chromEnd <- c(111790000, 111840000, 111960000)
start <- 111550000
end <- 111970000
}else{
chromStart <- c(175459000)
chromEnd <- c(175500000)
start <- 175000000
end <- 175505000
}
dp.regions <- dp.regions %>%
mutate(chromStart=ifelse(chromStart < start, start, chromStart),
chromEnd=ifelse(chromEnd < end, chromEnd, end))
counts <- subset(counts,
start < chromStart &
chromEnd < end)
sample.counts <- counts %>%
filter(sample.id %in% sample.ids) %>%
group_by(sample.id) %>%
mutate(coverage.norm=coverage/max(coverage))
tit <-
sprintf("%s data (%s pattern)",
experiment, ifelse(experiment=="H3K4me3", "sharp", "broad"))
sample.max.df <- sample.counts %>%
group_by(sample.id) %>%
summarise(count=max(coverage),
norm=max(coverage.norm))
sample.max <- sample.max.df$count
names(sample.max) <- as.character(sample.max.df$sample.id)
sample.max.df$chromStart <- sample.counts$chromStart[1]
sample.max.df$chromEnd <- sample.counts$chromEnd[nrow(sample.counts)]
other.params <- subset(show.params, algorithm != "PeakSeg")
trained.param <- subset(other.params, grepl("trained", algorithm))
trained.algo <- as.character(trained.param$algorithm)
label.sample <- "McGill0026"
show.peak.list <-
list(
bad=data.frame(sample.id=label.sample, chromStart, chromEnd),
good=do.call(rbind, dp.peak.list))
label.regions <- subset(dp.regions, sample.id==label.sample)
label.err <- PeakErrorChrom(show.peak.list$bad, label.regions)
label.err$sample.id <- label.sample
show.region.list <-
list(good=dp.regions,
bad=label.err)
compare.region.list <- list()
compare.peak.list <- list()
compare.label.list <- list()
for(algorithm.i in seq_along(show.peak.list)){
peak.df <- show.peak.list[[algorithm.i]]
sample.id <- as.character(peak.df$sample.id)[[1]]
max.count <- sample.max[[sample.id]]
algorithm <- names(show.peak.list)[[algorithm.i]]
short.algo <- sub("[.].*", "", algorithm)
y.mid <- algorithm.i*0.15 + 1.05
count.mid <- algorithm.i * 3 + 32
compare.peak.list[[algorithm]] <-
data.frame(tit, algorithm, y.mid, count.mid, peak.df)
## Also make regions.
height <- 1
region.df <- show.region.list[[algorithm]]
this.param <- show.params[algorithm, ]
compare.label.list[[algorithm]] <- with(region.df, {
data.frame(tit, fp=sum(fp), fn=sum(fn),
algorithm,
y.mid, count.mid,
param.name=this.param$param.name)
})
sample.id <- as.character(region.df$sample.id)
max.count <- sample.max[sample.id]
y.min <- (-algorithm.i*4-height)*max.count/10
y.max <- (-algorithm.i*4+height)*max.count/10
compare.region.list[[algorithm]] <-
data.frame(tit, algorithm, y.mid, count.mid, region.df) %>%
select(tit, sample.id, y.mid, count.mid,
chromStart, chromEnd, annotation, status) %>%
mutate(chromStart=ifelse(chromStart < start, start, chromStart),
chromEnd=ifelse(chromEnd < end, chromEnd, end))
}
error.list[[experiment.i]] <- do.call(rbind, compare.region.list)
peak.list[[experiment.i]] <- do.call(rbind, compare.peak.list)
first <- dp.regions %>%
filter(sample.id==label.sample,
annotation=="peakStart")
text.list[[experiment.i]] <- do.call(rbind, compare.label.list) %>%
mutate(chromStart=first$chromStart[1],
sample.id=label.sample)
algo.colors <-
c(macs.default="#A6CEE3", macs.trained="#1F78B4", #lite dark blue
hmcan.broad.default="#A6CEE3", hmcan.broad.trained="#1F78B4", #lite dark blue
bad="#1F78B4", #lite dark blue
"#B2DF8A", "#33A02C", #green
"#FB9A99", "#E31A1C", #red
"#FDBF6F", "#FF7F00", #orange
"#CAB2D6", PeakSeg="#6A3D9A", #purple
good="#6A3D9A", #purple
"#FFFF99", "#B15928") #yellow/brown
region.list[[experiment.i]] <-
data.frame(tit, subset(dp.regions, sample.id %in% sample.ids))
count.list[[experiment.i]] <-
data.frame(tit, sample.counts)
max.list[[experiment.i]] <-
data.frame(tit, sample.max.df)
}#experiment.i
peaks <- do.call(rbind, peak.list)
txt <- do.call(rbind, text.list)
error <- do.call(rbind, error.list)
counts <- do.call(rbind, count.list)
regions <- do.call(rbind, region.list)
maxes <- do.call(rbind, max.list)
scales <-
data.frame(chromStart=c(175450, 111750),
y=-2,
sample.id="McGill0026",
tit=c("H3K4me3 data (sharp pattern)",
"H3K36me3 data (broad pattern)")) %>%
mutate(chromEnd=chromStart+50)
rect.h <- 0.06
selectedPlot <-
ggplot()+
geom_tallrect(aes(xmin=chromStart/1e3, xmax=chromEnd/1e3,
fill=annotation),
data=regions,
color="grey",
alpha=1/2)+
geom_step(aes(chromStart/1e3, coverage.norm),
data=counts, color="grey50")+
geom_text(aes(chromStart/1e3, 1, label=sprintf("max=%d", count)),
vjust=1, hjust=0, data=maxes, size=3)+
geom_text(aes(chromStart, y, label="scale: 50 kb "),
data=scales, hjust=1, vjust=0.5, size=3)+
geom_segment(aes(chromStart, y, xend=chromEnd, yend=y),
data=scales, size=2)+
geom_text(aes(chromStart/1e3, y.mid,
label=paste0(algorithm, " ")),
data=txt,
##vjust=0.25, size=2,
size=2.5,
hjust=1)+
geom_rect(aes(xmin=chromStart/1e3, xmax=chromEnd/1e3,
ymin=y.mid -rect.h, ymax=y.mid + rect.h,
linetype=status),
data=subset(error, sample.id %in% sample.ids),
fill=NA, color="black", size=0.5)+
scale_linetype_manual("error type",
values=c(correct=0,
"false negative"=3,
"false positive"=1))+
## geom_point(aes(chromStart/1e3, y.mid, color=algorithm),
## data=peaks,
## pch=1, size=2)+
geom_segment(aes(chromStart/1e3, y.mid,
xend=chromEnd/1e3, yend=y.mid,
color=algorithm),
data=peaks, size=1)+
scale_color_manual(values=algo.colors)+
theme_bw()+
theme(panel.margin=grid::unit(0, "cm"))+
facet_grid(. ~ tit, scales="free", space="free_y")+
scale_y_continuous("count of aligned reads",
labels=function(x){
c("0", "max")
},
breaks=c(0, 1))+
guides(color="none")+
xlab(paste("position on chromosome (kb = kilo bases)"))+
scale_fill_manual("label", values=ann.colors,
breaks=names(ann.colors))
rect.h <- 1
selectedPlot <-
ggplot()+
geom_tallrect(aes(xmin=chromStart/1e3, xmax=chromEnd/1e3,
fill=annotation),
data=regions,
color="grey",
alpha=1/2)+
geom_step(aes(chromStart/1e3, coverage),
data=counts, color="grey50")+
geom_text(aes(chromStart, y, label="scale: 50 kb "),
data=scales, hjust=1, vjust=0.5, size=3)+
geom_segment(aes(chromStart, y, xend=chromEnd, yend=y),
data=scales, size=2)+
geom_text(aes(chromStart/1e3, count.mid,
label=paste0(algorithm, " ")),
data=txt,
##vjust=0.25, size=2,
size=2.5,
hjust=1)+
geom_rect(aes(xmin=chromStart/1e3, xmax=chromEnd/1e3,
ymin=count.mid -rect.h, ymax=count.mid + rect.h,
linetype=status),
data=subset(error, sample.id %in% sample.ids),
fill=NA, color="black", size=0.5)+
scale_linetype_manual("error type",
values=c(correct=0,
"false negative"=3,
"false positive"=1))+
geom_segment(aes(chromStart/1e3, count.mid,
xend=chromEnd/1e3, yend=count.mid,
color=algorithm),
data=peaks, size=1)+
scale_color_manual(values=algo.colors)+
theme_bw()+
theme(panel.margin=grid::unit(0, "cm"))+
facet_grid(. ~ tit, scales="free", space="free_y")+
scale_y_continuous("count of aligned reads", breaks=seq(0, 30, by=10))+
guides(color="none")+
xlab(paste("position on chromosome (kb = kilo bases)"))+
scale_fill_manual("label", values=ann.colors,
breaks=names(ann.colors))
print(selectedPlot)
png(png.file <- "figure-good-bad.png",
units="in", res=200, width=8, height=2.5)
print(selectedPlot)
dev.off()
##system(paste("firefox", png.file))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rs_get_ind_first_selected_col.R
\name{rs_get_ind_first_selected_row}
\alias{rs_get_ind_first_selected_row}
\title{Get index of the first row in the selection}
\usage{
rs_get_ind_first_selected_row()
}
\description{
Get index of the first row in the selection
}
| /man/rs_get_ind_first_selected_row.Rd | permissive | cran/spAddins | R | false | true | 350 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rs_get_ind_first_selected_col.R
\name{rs_get_ind_first_selected_row}
\alias{rs_get_ind_first_selected_row}
\title{Get index of the first row in the selection}
\usage{
rs_get_ind_first_selected_row()
}
\description{
Get index of the first row in the selection
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getGenome.R
\name{getGenome}
\alias{getGenome}
\title{Genome Retrieval}
\usage{
getGenome(
db = "refseq",
organism,
reference = FALSE,
release = NULL,
gunzip = FALSE,
path = file.path("_ncbi_downloads", "genomes"),
assembly_type = "toplevel"
)
}
\arguments{
\item{db}{a character string specifying the database from which the genome
shall be retrieved:
\itemize{
\item \code{db = "refseq"}
\item \code{db = "genbank"}
\item \code{db = "ensembl"}
}}
\item{organism}{there are three options to characterize an organism:
\itemize{
\item by \code{scientific name}: e.g. \code{organism = "Homo sapiens"}
\item by \code{database specific accession identifier}: e.g. \code{organism = "GCF_000001405.37"} (= NCBI RefSeq identifier for \code{Homo sapiens})
\item by \code{taxonomic identifier from NCBI Taxonomy}: e.g. \code{organism = "9606"} (= taxid of \code{Homo sapiens})
}}
\item{reference}{a logical value indicating whether or not a genome shall be downloaded if it isn't marked in the database as either a reference genome or a representative genome.}
\item{release}{the database release version of ENSEMBL (\code{db = "ensembl"}). Default is \code{release = NULL} meaning
that the most recent database version is used.}
\item{gunzip}{a logical value indicating whether or not files should be unzipped.}
\item{path}{a character string specifying the location (a folder) in which
the corresponding genome shall be stored. Default is
\code{path} = \code{file.path("_ncbi_downloads","genomes")}.}
\item{assembly_type}{a character string specifying from which assembly type the genome
shall be retrieved from (ensembl only, else this argument is ignored):
Default is
\code{assembly_type = "toplevel")}.
This will give you all multi-chromosomes (copies of the same chromosome with small variations).
As an example the toplevel fasta genome in human is over 70 GB uncompressed.
To get primary assembly with 1 chromosome variant per chromosome:
\code{assembly_type = "primary_assembly")}.
As an example, the primary_assembly fasta genome in human is only a few GB uncompressed:}
}
\value{
File path to downloaded genome.
}
\description{
Main genome retrieval function for an organism of interest.
By specifying the scientific name of an organism of interest the
corresponding fasta-file storing the genome of the organism of interest
can be downloaded and stored locally. Genome files can be retrieved from
several databases. In addition, the genome summary statistics for the
retrieved species is stored locally to provide users with
insights regarding the genome assembly quality (see \code{\link{summary_genome}} for details).
This is useful when comparing genomes with large difference in genome assembly qualities.
}
\details{
Internally this function loads the the overview.txt file from NCBI:
refseq: ftp://ftp.ncbi.nlm.nih.gov/genomes/refseq/
genbank: ftp://ftp.ncbi.nlm.nih.gov/genomes/genbank/
and creates a directory '_ncbi_downloads/genomes' to store
the genome of interest as fasta file for future processing.
In case the corresponding fasta file already exists within the
'_ncbi_downloads/genomes' folder and is accessible within the workspace,
no download process will be performed.
}
\examples{
\dontrun{
# download the genome of Arabidopsis thaliana from refseq
# and store the corresponding genome file in '_ncbi_downloads/genomes'
file_path <- getGenome( db = "refseq",
organism = "Arabidopsis thaliana",
path = file.path("_ncbi_downloads","genomes"))
Ath_genome <- read_genome(file_path, format = "fasta")
# download the genome of Arabidopsis thaliana from genbank
# and store the corresponding genome file in '_ncbi_downloads/genomes'
file_path <- getGenome( db = "genbank",
organism = "Arabidopsis thaliana",
path = file.path("_ncbi_downloads","genomes"))
Ath_genome <- read_genome(file_path, format = "fasta")
}
}
\seealso{
\code{\link{getGenomeSet}}, \code{\link{getProteome}}, \code{\link{getCDS}},
\code{\link{getGFF}}, \code{\link{getRNA}}, \code{\link{getRepeatMasker}},
\code{\link{getAssemblyStats}}, \code{\link{summary_genome}},
\code{\link{meta.retrieval}}, \code{\link{meta.retrieval.all}}, \code{\link{read_genome}}
}
\author{
Hajk-Georg Drost
}
| /man/getGenome.Rd | no_license | zpeng1989/biomartr | R | false | true | 4,350 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getGenome.R
\name{getGenome}
\alias{getGenome}
\title{Genome Retrieval}
\usage{
getGenome(
db = "refseq",
organism,
reference = FALSE,
release = NULL,
gunzip = FALSE,
path = file.path("_ncbi_downloads", "genomes"),
assembly_type = "toplevel"
)
}
\arguments{
\item{db}{a character string specifying the database from which the genome
shall be retrieved:
\itemize{
\item \code{db = "refseq"}
\item \code{db = "genbank"}
\item \code{db = "ensembl"}
}}
\item{organism}{there are three options to characterize an organism:
\itemize{
\item by \code{scientific name}: e.g. \code{organism = "Homo sapiens"}
\item by \code{database specific accession identifier}: e.g. \code{organism = "GCF_000001405.37"} (= NCBI RefSeq identifier for \code{Homo sapiens})
\item by \code{taxonomic identifier from NCBI Taxonomy}: e.g. \code{organism = "9606"} (= taxid of \code{Homo sapiens})
}}
\item{reference}{a logical value indicating whether or not a genome shall be downloaded if it isn't marked in the database as either a reference genome or a representative genome.}
\item{release}{the database release version of ENSEMBL (\code{db = "ensembl"}). Default is \code{release = NULL} meaning
that the most recent database version is used.}
\item{gunzip}{a logical value indicating whether or not files should be unzipped.}
\item{path}{a character string specifying the location (a folder) in which
the corresponding genome shall be stored. Default is
\code{path} = \code{file.path("_ncbi_downloads","genomes")}.}
\item{assembly_type}{a character string specifying from which assembly type the genome
shall be retrieved from (ensembl only, else this argument is ignored):
Default is
\code{assembly_type = "toplevel")}.
This will give you all multi-chromosomes (copies of the same chromosome with small variations).
As an example the toplevel fasta genome in human is over 70 GB uncompressed.
To get primary assembly with 1 chromosome variant per chromosome:
\code{assembly_type = "primary_assembly")}.
As an example, the primary_assembly fasta genome in human is only a few GB uncompressed:}
}
\value{
File path to downloaded genome.
}
\description{
Main genome retrieval function for an organism of interest.
By specifying the scientific name of an organism of interest the
corresponding fasta-file storing the genome of the organism of interest
can be downloaded and stored locally. Genome files can be retrieved from
several databases. In addition, the genome summary statistics for the
retrieved species is stored locally to provide users with
insights regarding the genome assembly quality (see \code{\link{summary_genome}} for details).
This is useful when comparing genomes with large difference in genome assembly qualities.
}
\details{
Internally this function loads the the overview.txt file from NCBI:
refseq: ftp://ftp.ncbi.nlm.nih.gov/genomes/refseq/
genbank: ftp://ftp.ncbi.nlm.nih.gov/genomes/genbank/
and creates a directory '_ncbi_downloads/genomes' to store
the genome of interest as fasta file for future processing.
In case the corresponding fasta file already exists within the
'_ncbi_downloads/genomes' folder and is accessible within the workspace,
no download process will be performed.
}
\examples{
\dontrun{
# download the genome of Arabidopsis thaliana from refseq
# and store the corresponding genome file in '_ncbi_downloads/genomes'
file_path <- getGenome( db = "refseq",
organism = "Arabidopsis thaliana",
path = file.path("_ncbi_downloads","genomes"))
Ath_genome <- read_genome(file_path, format = "fasta")
# download the genome of Arabidopsis thaliana from genbank
# and store the corresponding genome file in '_ncbi_downloads/genomes'
file_path <- getGenome( db = "genbank",
organism = "Arabidopsis thaliana",
path = file.path("_ncbi_downloads","genomes"))
Ath_genome <- read_genome(file_path, format = "fasta")
}
}
\seealso{
\code{\link{getGenomeSet}}, \code{\link{getProteome}}, \code{\link{getCDS}},
\code{\link{getGFF}}, \code{\link{getRNA}}, \code{\link{getRepeatMasker}},
\code{\link{getAssemblyStats}}, \code{\link{summary_genome}},
\code{\link{meta.retrieval}}, \code{\link{meta.retrieval.all}}, \code{\link{read_genome}}
}
\author{
Hajk-Georg Drost
}
|
outcome <- read.csv("outcome-of-care-measures.csv", na.strings="Not Available", colClasses = "character")
head(outcome)
outcome_names <- names(outcome)
temp <- outcome$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack
temp_na <- is.na(temp)
mean(temp_na)
outcome[, 11] <- as.numeric(outcome[, 11])
## You may get a warning about NAs being introduced; that is okay
hist(outcome[, 11])
state <- "TX"
any(state == outcome_data$State)
all(state != outcome_data$State)
all(state != states)
# D U M M Y S ------------------------------------------------------------------
outcome <- "pneumonia"
num <- "worst"
state <- "NJ"
character <- "NJ"
| /PA3/playground.R | no_license | EverMN/datasciencecoursera | R | false | false | 648 | r | outcome <- read.csv("outcome-of-care-measures.csv", na.strings="Not Available", colClasses = "character")
head(outcome)
outcome_names <- names(outcome)
temp <- outcome$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack
temp_na <- is.na(temp)
mean(temp_na)
outcome[, 11] <- as.numeric(outcome[, 11])
## You may get a warning about NAs being introduced; that is okay
hist(outcome[, 11])
state <- "TX"
any(state == outcome_data$State)
all(state != outcome_data$State)
all(state != states)
# D U M M Y S ------------------------------------------------------------------
outcome <- "pneumonia"
num <- "worst"
state <- "NJ"
character <- "NJ"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_likelihoods.R
\name{calculate_likelihoods}
\alias{calculate_likelihoods}
\title{Calculates likelihood for every row of data with list of RAM matrices for
every moderator value}
\usage{
calculate_likelihoods(data, RAM_list, moderator_name)
}
\arguments{
\item{data}{dataframe for which likelihoods have to be calculated}
\item{RAM_list}{list of RAM matrices for every moderator value}
\item{moderator_name}{name of the moderator in the dataframe}
}
\value{
vector of loglikelihoods for every row of data
}
\description{
Calculates likelihood for every row of data with list of RAM matrices for
every moderator value
}
| /man/calculate_likelihoods.Rd | permissive | xanthematthijssen/lsembandwidth | R | false | true | 707 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_likelihoods.R
\name{calculate_likelihoods}
\alias{calculate_likelihoods}
\title{Calculates likelihood for every row of data with list of RAM matrices for
every moderator value}
\usage{
calculate_likelihoods(data, RAM_list, moderator_name)
}
\arguments{
\item{data}{dataframe for which likelihoods have to be calculated}
\item{RAM_list}{list of RAM matrices for every moderator value}
\item{moderator_name}{name of the moderator in the dataframe}
}
\value{
vector of loglikelihoods for every row of data
}
\description{
Calculates likelihood for every row of data with list of RAM matrices for
every moderator value
}
|
AnalyseSPIData = function(GBP.file,GOI.file, k = 0.4, writename = "Merge.file.csv"){
# GOIfile and GBP file should be in form ".txt"
GBP.file = read_csv(GBP.file)
GOI.file = read_csv(GOI.file)
Merge.file <- inner_join(GBP.file, GOI.file, by = c("Plate", "Row", "Column", "ORF"), suffix = c(".GBP",".GOI"))
Merge.file
Mean_LGR = c() # calculates the mean LGR of both comparisons
Mean_LGR = rowMeans(Merge.file[c('mean_LGR.GBP', 'mean_LGR.GOI')], na.rm=TRUE)
Mean_Z = c() # calculates the mean Z score of both comparisons
Mean_Z = rowMeans(Merge.file[c('mean_Z_score.GBP', 'mean_Z_score.GOI')], na.rm=TRUE)
Merge.File = mutate( Merge.file, Mean_LGR, Mean_Z)
Merge.File #creative
write.csv(Merge.File, file = sprintf("Merge.File_%s.csv",Var))
SPIplots = function(Merge.File){
A = arrange(Merge.File, desc(Mean_LGR)) #arrange largest to smallest
lim = ceiling(max(A$Mean_LGR)) # find best limits for graph
plot(A$Mean_LGR, xlab = "GFP strains", ylab = "LGR", pch = 19,col = rainbow(1),cex = .4,panel.first=grid(20,10),ylim = c(-1*lim,lim))
abline(h = c(k,-k)) # add lines at ±2
B = filter(Merge.File, (mean_LGR.GOI != "n/a")&(mean_LGR.GBP != "n/a"))
Xlim = ceiling(as.numeric(max(B$mean_LGR.GOI))) # Best limits for graph
Ylim = ceiling(as.numeric(max(B$mean_LGR.GBP)))
Color = (9*(B$Mean_LGR>= k)+1) # Vector of colors, red = SPI, black = non-SPI
Color = 9*(B$Mean_LGR<=- k)+1
Color = (9*(B$Mean_LGR>= k)+1)
plot(B$mean_LGR.GOI ,B$mean_LGR.GBP ,pch = 19,panel.first=grid(12,12),asp =1,cex = .4, xlim = c(-1*Xlim,Xlim),ylim = c(-1*Ylim,Ylim), xlab = "GOI control LGR", ylab = "GBP control LGR", col = Color, main = "Control Correlation" )
abline(h = k ,v= k)
lines(c((k*102),(-k*100)),c((-k*100),(k*102))) # Line showing (x+y)/2 = 2, line showing SPI limit
legend("bottomleft",c("SPI","non-SPI"), col = c(10,1), pch =19)
}
SPIplots(Merge.File)
return(Merge.File)
}
| /ComparisonScript.R | no_license | CinziaK/SPIanalyser | R | false | false | 1,924 | r | AnalyseSPIData = function(GBP.file,GOI.file, k = 0.4, writename = "Merge.file.csv"){
# GOIfile and GBP file should be in form ".txt"
GBP.file = read_csv(GBP.file)
GOI.file = read_csv(GOI.file)
Merge.file <- inner_join(GBP.file, GOI.file, by = c("Plate", "Row", "Column", "ORF"), suffix = c(".GBP",".GOI"))
Merge.file
Mean_LGR = c() # calculates the mean LGR of both comparisons
Mean_LGR = rowMeans(Merge.file[c('mean_LGR.GBP', 'mean_LGR.GOI')], na.rm=TRUE)
Mean_Z = c() # calculates the mean Z score of both comparisons
Mean_Z = rowMeans(Merge.file[c('mean_Z_score.GBP', 'mean_Z_score.GOI')], na.rm=TRUE)
Merge.File = mutate( Merge.file, Mean_LGR, Mean_Z)
Merge.File #creative
write.csv(Merge.File, file = sprintf("Merge.File_%s.csv",Var))
SPIplots = function(Merge.File){
A = arrange(Merge.File, desc(Mean_LGR)) #arrange largest to smallest
lim = ceiling(max(A$Mean_LGR)) # find best limits for graph
plot(A$Mean_LGR, xlab = "GFP strains", ylab = "LGR", pch = 19,col = rainbow(1),cex = .4,panel.first=grid(20,10),ylim = c(-1*lim,lim))
abline(h = c(k,-k)) # add lines at ±2
B = filter(Merge.File, (mean_LGR.GOI != "n/a")&(mean_LGR.GBP != "n/a"))
Xlim = ceiling(as.numeric(max(B$mean_LGR.GOI))) # Best limits for graph
Ylim = ceiling(as.numeric(max(B$mean_LGR.GBP)))
Color = (9*(B$Mean_LGR>= k)+1) # Vector of colors, red = SPI, black = non-SPI
Color = 9*(B$Mean_LGR<=- k)+1
Color = (9*(B$Mean_LGR>= k)+1)
plot(B$mean_LGR.GOI ,B$mean_LGR.GBP ,pch = 19,panel.first=grid(12,12),asp =1,cex = .4, xlim = c(-1*Xlim,Xlim),ylim = c(-1*Ylim,Ylim), xlab = "GOI control LGR", ylab = "GBP control LGR", col = Color, main = "Control Correlation" )
abline(h = k ,v= k)
lines(c((k*102),(-k*100)),c((-k*100),(k*102))) # Line showing (x+y)/2 = 2, line showing SPI limit
legend("bottomleft",c("SPI","non-SPI"), col = c(10,1), pch =19)
}
SPIplots(Merge.File)
return(Merge.File)
}
|
#Suma de los numeros impares de un vector
suma_impar<-function(y){
#Cambio de numeros pares por 0
for (i in 1:length(y)){
if(y[i]%%2==0){
y[i]<-0
}
}
#Suma de los numeros impares
sum_impares<-0
for(i in 1:length(y)){
sum_impares<-sum_impares+y[i]
}
return(sum_impares)
} | /funcion suma numero impares de vector.R | no_license | b62ropaa/R | R | false | false | 300 | r | #Suma de los numeros impares de un vector
suma_impar<-function(y){
#Cambio de numeros pares por 0
for (i in 1:length(y)){
if(y[i]%%2==0){
y[i]<-0
}
}
#Suma de los numeros impares
sum_impares<-0
for(i in 1:length(y)){
sum_impares<-sum_impares+y[i]
}
return(sum_impares)
} |
# Set working directory to results folder
setwd("~/Desktop/HIV/results_broad_sweep/Combo 3")
setwd("~/Desktop/HIV/results_broad_sweep/Combo 1")
Rvalues <- c(150, 175, 187, 191, 192, 193,
194, 195, 200, 210, 220, 230, 260,
270, 280, 290, 300, 310, 320, 330)
####################### Broad Sweep Plot mean and Sd #######################
r_100_99.0_0.6_0.3_0.1 = read.csv("./results99_06_03_01/computed_mean_sd6.csv", sep = ",")
r_100_70.0_25.0_4.0_1.0 = read.csv("./results70_25_4_1/computed_mean_sd6.csv", sep = ",")
r_100_70.0_15.0_13.0_2.0 = read.csv("./results70_15_13_2/computed_mean_sd6.csv", sep = ",")
r_100_50.0_25.0_12.5_12.5 = read.csv("./results50_25_125_125/computed_mean_sd6.csv", sep = ",")
r_85_84.0_0.6_0.3_0.1 = read.csv("./results84_06_03_01/computed_mean_sd6.csv", sep = ",")
r_85_59.5_21.3_3.5_0.8 = read.csv("./results595_2125_35_075/computed_mean_sd6.csv", sep = ",")
r_85_59.5_12.8_11.1_1.6 = read.csv("./results595_1275_1105_16/computed_mean_sd6.csv", sep = ",")
r_85_42.5_21.3_10.6_10.6 = read.csv("./results425_2125_1063_1063/computed_mean_sd6.csv", sep = ",")
r_70_69.0_0.6_0.3_0.1 = read.csv("./results69_06_03_01/computed_mean_sd6.csv", sep = ",")
r_70_49.0_17.1_2.5_0.7 = read.csv("./results49_171_25_07/computed_mean_sd6.csv", sep = ",")
r_70_49.0_10.5_9.10_1.4 = read.csv("./results49_105_910_14/computed_mean_sd6.csv", sep = ",")
r_70_35.0_17.5_8.8_8.8 = read.csv("./results35_175_875_875/computed_mean_sd6.csv", sep = ",")
r_55_54.0_0.6_0.3_0.1 = read.csv("./results54_06_03_01/computed_mean_sd6.csv", sep = ",")
r_55_38.4_29.8_2.2_0.6 = read.csv("./results384_2975_22_055/computed_mean_sd6.csv", sep = ",")
r_55_38.4_8.3_7.2_1.1 = read.csv("./results384_825_715_11/computed_mean_sd6.csv", sep = ",")
r_55_27.5_13.8_6.9_6.9 = read.csv("./results275_1375_688_688/computed_mean_sd6.csv", sep = ",")
r_85_85.0_0_0_0 = read.csv("./results85_0_0_0/computed_mean_sd6.csv", sep = ",")
r_85_59_21.3_3.5_0.9 = read.csv("./results59_213_35_9/computed_mean_sd6.csv", sep = ",")
r_70_70.0_0_0_0 = read.csv("./results70_0_0_0/computed_mean_sd6.csv", sep = ",")
r_55_55.0_0_0_0 = read.csv("./results55_0_0_0/computed_mean_sd6.csv", sep = ",")
r_55_38.4_13.8_2.2_0.6 = read.csv("./results384_138_22_6/computed_mean_sd6.csv", sep = ",")
sims3 = list(r_100_99.0_0.6_0.3_0.1, r_100_70.0_25.0_4.0_1.0, r_100_70.0_15.0_13.0_2.0, r_100_50.0_25.0_12.5_12.5,
r_85_85.0_0_0_0, r_85_59_21.3_3.5_0.9, r_85_59.5_12.8_11.1_1.6, r_85_42.5_21.3_10.6_10.6,
r_70_70.0_0_0_0, r_70_49.0_17.1_2.5_0.7, r_70_49.0_10.5_9.10_1.4, r_70_35.0_17.5_8.8_8.8,
r_55_55.0_0_0_0, r_55_38.4_13.8_2.2_0.6, r_55_38.4_8.3_7.2_1.1, r_55_27.5_13.8_6.9_6.9)
sims12 = list(r_100_99.0_0.6_0.3_0.1, r_100_70.0_25.0_4.0_1.0, r_100_70.0_15.0_13.0_2.0, r_100_50.0_25.0_12.5_12.5,
r_85_84.0_0.6_0.3_0.1, r_85_59.5_21.3_3.5_0.8, r_85_59.5_12.8_11.1_1.6, r_85_42.5_21.3_10.6_10.6,
r_70_69.0_0.6_0.3_0.1, r_70_49.0_17.1_2.5_0.7, r_70_49.0_10.5_9.10_1.4, r_70_35.0_17.5_8.8_8.8,
r_55_54.0_0.6_0.3_0.1, r_55_38.4_29.8_2.2_0.6, r_55_38.4_8.3_7.2_1.1, r_55_27.5_13.8_6.9_6.9)
sims3_names = list("r_100_99.0_0.6_0.3_0.1", "r_100_70.0_25.0_4.0_1.0", "r_100_70.0_15.0_13.0_2.0", "r_100_50.0_25.0_12.5_12.5",
"r_85_85.0_0_0_0", "r_85_59_21.3_3.5_0.9", "r_85_59.5_12.8_11.1_1.6", "r_85_42.5_21.3_10.6_10.6",
"r_70_70.0_0_0_0", "r_70_49.0_17.1_2.5_0.7", "r_70_49.0_10.5_9.10_1.4", "r_70_35.0_17.5_8.8_8.8",
"r_55_55.0_0_0_0", "r_55_38.4_13.8_2.2_0.6", "r_55_38.4_8.3_7.2_1.1", "r_55_27.5_13.8_6.9_6.9")
sims12_names = list("r_100_99.0_0.6_0.3_0.1", "r_100_70.0_25.0_4.0_1.0", "r_100_70.0_15.0_13.0_2.0", "r_100_50.0_25.0_12.5_12.5",
"r_85_84.0_0.6_0.3_0.1", "r_85_59.5_21.3_3.5_0.8", "r_85_59.5_12.8_11.1_1.6", "r_85_42.5_21.3_10.6_10.6",
"r_70_69.0_0.6_0.3_0.1", "r_70_49.0_17.1_2.5_0.7", "r_70_49.0_10.5_9.10_1.4", "r_70_35.0_17.5_8.8_8.8",
"r_55_54.0_0.6_0.3_0.1", "r_55_38.4_29.8_2.2_0.6", "r_55_38.4_8.3_7.2_1.1", "r_55_27.5_13.8_6.9_6.9")
####################### CIed Plot mean and Sd #######################
library(reshape)
library(ggplot2)
# Combo 1
setwd("~/Desktop/HIV/CIed_results/Combo 1")
setwd("~/Desktop/HIV/CIed_results/Combo 2")
setwd("~/Desktop/HIV/CIed_results/Combo 3")
r_70_69.0_0.6_0.3_0.1 = read.csv("./results69_06_03_01/computed_mean_sd6.csv", sep = ",")
r_55_54.0_0.6_0.3_0.1 = read.csv("./results54_06_03_01/computed_mean_sd6.csv", sep = ",")
####################### Graph Setup and Lables #######################
next_sim = 11
x = 1:20
next_graph = "r_70_69.0_0.6_0.3_0.1"
computed_mean_sd = r_70_69.0_0.6_0.3_0.1
computed_mean_sd$total = computed_mean_sd$R1_mean+computed_mean_sd$R2_mean+computed_mean_sd$R3_mean
computed_mean_sd$total_sd = sqrt(computed_mean_sd$R1_sd^2 +computed_mean_sd$R2_sd^2+computed_mean_sd$R3_sd^2)
# Joint Plots
title_parts = strsplit(next_graph, "_")
title = paste('Total Infection Pr (%): ', title_parts[[1]][2],
'
d1 =', title_parts[[1]][3],
', d2 =', title_parts[[1]][4],
', d3 =', title_parts[[1]][5],
', beyond =', title_parts[[1]][6]
, sep = " ")
####################### Stacked graphs #######################
reshaped_data = computed_mean_sd
reshaped_data = reshaped_data[-1]
reshaped_data = reshaped_data[-3]
reshaped_data = reshaped_data[-4]
reshaped_data = reshaped_data[-5]
reshaped_data = reshaped_data[-6]
reshaped_data = reshaped_data[-7]
reshaped_data[2] = reshaped_data[5] - reshaped_data[6]
reshaped_data = reshaped_data[-3]
reshaped_data[1] = reshaped_data[1]*3/10
names(reshaped_data) = c("r", "Infected, not Resistant ", "Resistant to 3", "Total Infected", "Resistant to at least 1")
reshaped_data <- melt(reshaped_data, id="r")
reshaped_data$variable <- factor(reshaped_data$variable, unique(reshaped_data[order(reshaped_data$value, decreasing = T),"variable"]) )
p <- ggplot(reshaped_data, aes(r, value))
p <- p + labs(x = "Effectiveness of triple regimen(%)", y = "# of cells with resistance") + ggtitle(title)
p <- p + geom_area(aes(colour = variable, fill= variable), position = 'identity')
p <- p + theme(legend.position="none")
p <- p + ylim(0, 5200)
p
####################### Line graph with error bars #######################
plot(computed_mean_sd$R1_mean, cex=1,xaxt='n', ylim=c(0, 1800),
xlab='Effectiveness of triple regimen(%)',ylab='# of cells with resistance',
main= title, col='olivedrab3', pch=16, type = 'l')
legend("topleft", inset = 0.03, title = "Resistance to:",
legend = c("1 drug","2 drugs","3 drugs","At least 1"),
lty=c(1,1,1,1),lwd=c(2.5,2.5,2.5,2.5), pch = c(16, 15, 17, 18),
col=c("olivedrab3","orange", "purple", "deepskyblue2"),
cex = 0.8)
points(computed_mean_sd$R1_mean, cex=1,xaxt='n',col='olivedrab3',pch=16)
axis(1, at=x, labels=Rvalues*0.3)
se.up = computed_mean_sd$R1_mean+(computed_mean_sd$R1_sd/2)
se.dn = computed_mean_sd$R1_mean-(computed_mean_sd$R1_sd/2)
arrows(x,se.dn,x,se.up,code=3,length=0.2,angle=90,col='olivedrab3')
lines(computed_mean_sd$R2_mean, cex=1,xaxt='n',col='orange',type = 'l')
points(computed_mean_sd$R2_mean, cex=1,xaxt='n',col='orange',pch=15)
se.up = computed_mean_sd$R2_mean+(computed_mean_sd$R2_sd/2)
se.dn = computed_mean_sd$R2_mean-(computed_mean_sd$R2_sd/2)
arrows(x,se.dn,x,se.up,code=3,length=0.2,angle=90,col='orange')
se.up = computed_mean_sd$R3_mean+(computed_mean_sd$R3_sd/2)
se.dn = computed_mean_sd$R3_mean-(computed_mean_sd$R3_sd/2)
lines(computed_mean_sd$R3_mean, cex=1,xaxt='n',col='purple',type = 'l')
points(computed_mean_sd$R3_mean, cex=1,xaxt='n',col='purple',pch=17)
arrows(x,se.dn,x,se.up,code=3,length=0.2,angle=90,col='purple')
se.up = computed_mean_sd$total+(computed_mean_sd$total_sd/2)
se.dn = computed_mean_sd$total-(computed_mean_sd$total_sd/2)
lines(computed_mean_sd$total, cex=1,xaxt='n',col='deepskyblue2',type = 'l')
points(computed_mean_sd$total, cex=1,xaxt='n',col='deepskyblue2',pch=18)
arrows(x,se.dn,x,se.up,code=3,length=0.2,angle=90,col='deepskyblue2')
| /cellStatesGraphs.R | no_license | p24601/HIV_Simulation | R | false | false | 8,535 | r | # Set working directory to results folder
setwd("~/Desktop/HIV/results_broad_sweep/Combo 3")
setwd("~/Desktop/HIV/results_broad_sweep/Combo 1")
Rvalues <- c(150, 175, 187, 191, 192, 193,
194, 195, 200, 210, 220, 230, 260,
270, 280, 290, 300, 310, 320, 330)
####################### Broad Sweep Plot mean and Sd #######################
r_100_99.0_0.6_0.3_0.1 = read.csv("./results99_06_03_01/computed_mean_sd6.csv", sep = ",")
r_100_70.0_25.0_4.0_1.0 = read.csv("./results70_25_4_1/computed_mean_sd6.csv", sep = ",")
r_100_70.0_15.0_13.0_2.0 = read.csv("./results70_15_13_2/computed_mean_sd6.csv", sep = ",")
r_100_50.0_25.0_12.5_12.5 = read.csv("./results50_25_125_125/computed_mean_sd6.csv", sep = ",")
r_85_84.0_0.6_0.3_0.1 = read.csv("./results84_06_03_01/computed_mean_sd6.csv", sep = ",")
r_85_59.5_21.3_3.5_0.8 = read.csv("./results595_2125_35_075/computed_mean_sd6.csv", sep = ",")
r_85_59.5_12.8_11.1_1.6 = read.csv("./results595_1275_1105_16/computed_mean_sd6.csv", sep = ",")
r_85_42.5_21.3_10.6_10.6 = read.csv("./results425_2125_1063_1063/computed_mean_sd6.csv", sep = ",")
r_70_69.0_0.6_0.3_0.1 = read.csv("./results69_06_03_01/computed_mean_sd6.csv", sep = ",")
r_70_49.0_17.1_2.5_0.7 = read.csv("./results49_171_25_07/computed_mean_sd6.csv", sep = ",")
r_70_49.0_10.5_9.10_1.4 = read.csv("./results49_105_910_14/computed_mean_sd6.csv", sep = ",")
r_70_35.0_17.5_8.8_8.8 = read.csv("./results35_175_875_875/computed_mean_sd6.csv", sep = ",")
r_55_54.0_0.6_0.3_0.1 = read.csv("./results54_06_03_01/computed_mean_sd6.csv", sep = ",")
r_55_38.4_29.8_2.2_0.6 = read.csv("./results384_2975_22_055/computed_mean_sd6.csv", sep = ",")
r_55_38.4_8.3_7.2_1.1 = read.csv("./results384_825_715_11/computed_mean_sd6.csv", sep = ",")
r_55_27.5_13.8_6.9_6.9 = read.csv("./results275_1375_688_688/computed_mean_sd6.csv", sep = ",")
r_85_85.0_0_0_0 = read.csv("./results85_0_0_0/computed_mean_sd6.csv", sep = ",")
r_85_59_21.3_3.5_0.9 = read.csv("./results59_213_35_9/computed_mean_sd6.csv", sep = ",")
r_70_70.0_0_0_0 = read.csv("./results70_0_0_0/computed_mean_sd6.csv", sep = ",")
r_55_55.0_0_0_0 = read.csv("./results55_0_0_0/computed_mean_sd6.csv", sep = ",")
r_55_38.4_13.8_2.2_0.6 = read.csv("./results384_138_22_6/computed_mean_sd6.csv", sep = ",")
sims3 = list(r_100_99.0_0.6_0.3_0.1, r_100_70.0_25.0_4.0_1.0, r_100_70.0_15.0_13.0_2.0, r_100_50.0_25.0_12.5_12.5,
r_85_85.0_0_0_0, r_85_59_21.3_3.5_0.9, r_85_59.5_12.8_11.1_1.6, r_85_42.5_21.3_10.6_10.6,
r_70_70.0_0_0_0, r_70_49.0_17.1_2.5_0.7, r_70_49.0_10.5_9.10_1.4, r_70_35.0_17.5_8.8_8.8,
r_55_55.0_0_0_0, r_55_38.4_13.8_2.2_0.6, r_55_38.4_8.3_7.2_1.1, r_55_27.5_13.8_6.9_6.9)
sims12 = list(r_100_99.0_0.6_0.3_0.1, r_100_70.0_25.0_4.0_1.0, r_100_70.0_15.0_13.0_2.0, r_100_50.0_25.0_12.5_12.5,
r_85_84.0_0.6_0.3_0.1, r_85_59.5_21.3_3.5_0.8, r_85_59.5_12.8_11.1_1.6, r_85_42.5_21.3_10.6_10.6,
r_70_69.0_0.6_0.3_0.1, r_70_49.0_17.1_2.5_0.7, r_70_49.0_10.5_9.10_1.4, r_70_35.0_17.5_8.8_8.8,
r_55_54.0_0.6_0.3_0.1, r_55_38.4_29.8_2.2_0.6, r_55_38.4_8.3_7.2_1.1, r_55_27.5_13.8_6.9_6.9)
sims3_names = list("r_100_99.0_0.6_0.3_0.1", "r_100_70.0_25.0_4.0_1.0", "r_100_70.0_15.0_13.0_2.0", "r_100_50.0_25.0_12.5_12.5",
"r_85_85.0_0_0_0", "r_85_59_21.3_3.5_0.9", "r_85_59.5_12.8_11.1_1.6", "r_85_42.5_21.3_10.6_10.6",
"r_70_70.0_0_0_0", "r_70_49.0_17.1_2.5_0.7", "r_70_49.0_10.5_9.10_1.4", "r_70_35.0_17.5_8.8_8.8",
"r_55_55.0_0_0_0", "r_55_38.4_13.8_2.2_0.6", "r_55_38.4_8.3_7.2_1.1", "r_55_27.5_13.8_6.9_6.9")
sims12_names = list("r_100_99.0_0.6_0.3_0.1", "r_100_70.0_25.0_4.0_1.0", "r_100_70.0_15.0_13.0_2.0", "r_100_50.0_25.0_12.5_12.5",
"r_85_84.0_0.6_0.3_0.1", "r_85_59.5_21.3_3.5_0.8", "r_85_59.5_12.8_11.1_1.6", "r_85_42.5_21.3_10.6_10.6",
"r_70_69.0_0.6_0.3_0.1", "r_70_49.0_17.1_2.5_0.7", "r_70_49.0_10.5_9.10_1.4", "r_70_35.0_17.5_8.8_8.8",
"r_55_54.0_0.6_0.3_0.1", "r_55_38.4_29.8_2.2_0.6", "r_55_38.4_8.3_7.2_1.1", "r_55_27.5_13.8_6.9_6.9")
####################### CIed Plot mean and Sd #######################
library(reshape)
library(ggplot2)
# Combo 1
setwd("~/Desktop/HIV/CIed_results/Combo 1")
setwd("~/Desktop/HIV/CIed_results/Combo 2")
setwd("~/Desktop/HIV/CIed_results/Combo 3")
r_70_69.0_0.6_0.3_0.1 = read.csv("./results69_06_03_01/computed_mean_sd6.csv", sep = ",")
r_55_54.0_0.6_0.3_0.1 = read.csv("./results54_06_03_01/computed_mean_sd6.csv", sep = ",")
####################### Graph Setup and Lables #######################
next_sim = 11
x = 1:20
next_graph = "r_70_69.0_0.6_0.3_0.1"
computed_mean_sd = r_70_69.0_0.6_0.3_0.1
computed_mean_sd$total = computed_mean_sd$R1_mean+computed_mean_sd$R2_mean+computed_mean_sd$R3_mean
computed_mean_sd$total_sd = sqrt(computed_mean_sd$R1_sd^2 +computed_mean_sd$R2_sd^2+computed_mean_sd$R3_sd^2)
# Joint Plots
title_parts = strsplit(next_graph, "_")
title = paste('Total Infection Pr (%): ', title_parts[[1]][2],
'
d1 =', title_parts[[1]][3],
', d2 =', title_parts[[1]][4],
', d3 =', title_parts[[1]][5],
', beyond =', title_parts[[1]][6]
, sep = " ")
####################### Stacked graphs #######################
reshaped_data = computed_mean_sd
reshaped_data = reshaped_data[-1]
reshaped_data = reshaped_data[-3]
reshaped_data = reshaped_data[-4]
reshaped_data = reshaped_data[-5]
reshaped_data = reshaped_data[-6]
reshaped_data = reshaped_data[-7]
reshaped_data[2] = reshaped_data[5] - reshaped_data[6]
reshaped_data = reshaped_data[-3]
reshaped_data[1] = reshaped_data[1]*3/10
names(reshaped_data) = c("r", "Infected, not Resistant ", "Resistant to 3", "Total Infected", "Resistant to at least 1")
reshaped_data <- melt(reshaped_data, id="r")
reshaped_data$variable <- factor(reshaped_data$variable, unique(reshaped_data[order(reshaped_data$value, decreasing = T),"variable"]) )
p <- ggplot(reshaped_data, aes(r, value))
p <- p + labs(x = "Effectiveness of triple regimen(%)", y = "# of cells with resistance") + ggtitle(title)
p <- p + geom_area(aes(colour = variable, fill= variable), position = 'identity')
p <- p + theme(legend.position="none")
p <- p + ylim(0, 5200)
p
####################### Line graph with error bars #######################
plot(computed_mean_sd$R1_mean, cex=1,xaxt='n', ylim=c(0, 1800),
xlab='Effectiveness of triple regimen(%)',ylab='# of cells with resistance',
main= title, col='olivedrab3', pch=16, type = 'l')
legend("topleft", inset = 0.03, title = "Resistance to:",
legend = c("1 drug","2 drugs","3 drugs","At least 1"),
lty=c(1,1,1,1),lwd=c(2.5,2.5,2.5,2.5), pch = c(16, 15, 17, 18),
col=c("olivedrab3","orange", "purple", "deepskyblue2"),
cex = 0.8)
points(computed_mean_sd$R1_mean, cex=1,xaxt='n',col='olivedrab3',pch=16)
axis(1, at=x, labels=Rvalues*0.3)
se.up = computed_mean_sd$R1_mean+(computed_mean_sd$R1_sd/2)
se.dn = computed_mean_sd$R1_mean-(computed_mean_sd$R1_sd/2)
arrows(x,se.dn,x,se.up,code=3,length=0.2,angle=90,col='olivedrab3')
lines(computed_mean_sd$R2_mean, cex=1,xaxt='n',col='orange',type = 'l')
points(computed_mean_sd$R2_mean, cex=1,xaxt='n',col='orange',pch=15)
se.up = computed_mean_sd$R2_mean+(computed_mean_sd$R2_sd/2)
se.dn = computed_mean_sd$R2_mean-(computed_mean_sd$R2_sd/2)
arrows(x,se.dn,x,se.up,code=3,length=0.2,angle=90,col='orange')
se.up = computed_mean_sd$R3_mean+(computed_mean_sd$R3_sd/2)
se.dn = computed_mean_sd$R3_mean-(computed_mean_sd$R3_sd/2)
lines(computed_mean_sd$R3_mean, cex=1,xaxt='n',col='purple',type = 'l')
points(computed_mean_sd$R3_mean, cex=1,xaxt='n',col='purple',pch=17)
arrows(x,se.dn,x,se.up,code=3,length=0.2,angle=90,col='purple')
se.up = computed_mean_sd$total+(computed_mean_sd$total_sd/2)
se.dn = computed_mean_sd$total-(computed_mean_sd$total_sd/2)
lines(computed_mean_sd$total, cex=1,xaxt='n',col='deepskyblue2',type = 'l')
points(computed_mean_sd$total, cex=1,xaxt='n',col='deepskyblue2',pch=18)
arrows(x,se.dn,x,se.up,code=3,length=0.2,angle=90,col='deepskyblue2')
|
#### ---- Project: APHRC Wash Data ----
#### ---- Task: Modeling real data ----
#### ---- Extract predicted effect sizes ----
#### ---- By: Steve and Jonathan ----
#### ---- Date: 2020 Jan 11 (Sat) ----
library(splines)
library(effects)
library(glmmTMB)
load("water_tmb.rda")
## Which scale to plot the predictions
### See ?plot.effects
linearpredictor <- TRUE
## water glm model
mod <- water_tmb_model
### Conditionaled on all other predictors
pred_vars <- attr(terms(model_form), "term.labels")
pred_vars <- pred_vars[!grepl("\\|", pred_vars)]
pred_vars <- gsub(".*\\:|.*\\(|\\,.*|\\).*", "", pred_vars)
names(pred_vars) <- pred_vars
pred_vars
## Code below can extract all the effects but too slow and requires more memory. Use for loop instead
# effect_df <- predictorEffects(mod)
effect_df <- lapply(pred_vars, function(x){
mod <- Effect(x, xlevels = 50, mod = mod, latent = TRUE)
if(linearpredictor){
mod_df <- data.frame(mod$x, fit = mod$fit, lower = mod$lower, upper = mod$upper)
mod_df$method <- "effects"
} else {
mod_df <- as.data.frame(mod)
mod_df$method <- "effects"
}
return(mod_df)
})
save(file = "water_condeffect_tmb.rda"
, effect_df
, scale_mean
, scale_scale
, base_year
)
| /water_condeffect_tmb.R | no_license | CYGUBICKO/hh | R | false | false | 1,218 | r | #### ---- Project: APHRC Wash Data ----
#### ---- Task: Modeling real data ----
#### ---- Extract predicted effect sizes ----
#### ---- By: Steve and Jonathan ----
#### ---- Date: 2020 Jan 11 (Sat) ----
library(splines)
library(effects)
library(glmmTMB)
load("water_tmb.rda")
## Which scale to plot the predictions
### See ?plot.effects
linearpredictor <- TRUE
## water glm model
mod <- water_tmb_model
### Conditionaled on all other predictors
pred_vars <- attr(terms(model_form), "term.labels")
pred_vars <- pred_vars[!grepl("\\|", pred_vars)]
pred_vars <- gsub(".*\\:|.*\\(|\\,.*|\\).*", "", pred_vars)
names(pred_vars) <- pred_vars
pred_vars
## Code below can extract all the effects but too slow and requires more memory. Use for loop instead
# effect_df <- predictorEffects(mod)
effect_df <- lapply(pred_vars, function(x){
mod <- Effect(x, xlevels = 50, mod = mod, latent = TRUE)
if(linearpredictor){
mod_df <- data.frame(mod$x, fit = mod$fit, lower = mod$lower, upper = mod$upper)
mod_df$method <- "effects"
} else {
mod_df <- as.data.frame(mod)
mod_df$method <- "effects"
}
return(mod_df)
})
save(file = "water_condeffect_tmb.rda"
, effect_df
, scale_mean
, scale_scale
, base_year
)
|
/R/EDA/dplyr y ggplots 2.R | no_license | edisongom/Data-Science | R | false | false | 4,294 | r | ||
ozoneData <-read.csv("C:\\Users\\nbhosale\\Downloads\\Big_Data-master\\Big_Data-master\\Ozone_data.csv")
ozoneData
summary(lm(Ozone~Temp+Wind,data=ozoneData)) | /Weatherdata.R | no_license | nbhosale7/BigData | R | false | false | 160 | r | ozoneData <-read.csv("C:\\Users\\nbhosale\\Downloads\\Big_Data-master\\Big_Data-master\\Ozone_data.csv")
ozoneData
summary(lm(Ozone~Temp+Wind,data=ozoneData)) |
## This script describes two functions which demonstrate caching capabilities in R to help
## optimize processing by leveraging cache for base information that is used repeatedly as is
## demonstrated in calculating the inverse of a matrix which can be a very costly operation.
## Two functions defined will demonstrate caching function 1 creates the special matrix object
## and the second uses cache to compute the inverse of the matrix.
## Function makeCacheMatrix:This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix())
{
inv <- NULL
# 1. set the value of the matrix
set <- function(y)
{
x <<- y
inv <<- NULL
}
# 2. get the value of the matrix
get <- function() x
# 3. set the value of inverse of the matrix
setinverse <- function(inverse) inv <<- inverse
# 4. get the value of inverse of the matrix
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## Function cacheSolve:This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...)
{
inv <- x$getinverse()
if(!is.null(inv))
{
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | adityaem/ProgrammingAssignment2 | R | false | false | 1,551 | r | ## This script describes two functions which demonstrate caching capabilities in R to help
## optimize processing by leveraging cache for base information that is used repeatedly as is
## demonstrated in calculating the inverse of a matrix which can be a very costly operation.
## Two functions defined will demonstrate caching function 1 creates the special matrix object
## and the second uses cache to compute the inverse of the matrix.
## Function makeCacheMatrix:This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix())
{
inv <- NULL
# 1. set the value of the matrix
set <- function(y)
{
x <<- y
inv <<- NULL
}
# 2. get the value of the matrix
get <- function() x
# 3. set the value of inverse of the matrix
setinverse <- function(inverse) inv <<- inverse
# 4. get the value of inverse of the matrix
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## Function cacheSolve:This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...)
{
inv <- x$getinverse()
if(!is.null(inv))
{
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
library(nlme)
### Name: ACF.lme
### Title: Autocorrelation Function for lme Residuals
### Aliases: ACF.lme
### Keywords: models
### ** Examples
fm1 <- lme(follicles ~ sin(2*pi*Time) + cos(2*pi*Time),
Ovary, random = ~ sin(2*pi*Time) | Mare)
ACF(fm1, maxLag = 11)
# Pinheiro and Bates, p240-241
fm1Over.lme <- lme(follicles ~ sin(2*pi*Time) +
cos(2*pi*Time), data=Ovary,
random=pdDiag(~sin(2*pi*Time)) )
(ACF.fm1Over <- ACF(fm1Over.lme, maxLag=10))
plot(ACF.fm1Over, alpha=0.01)
| /data/genthat_extracted_code/nlme/examples/ACF.lme.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 515 | r | library(nlme)
### Name: ACF.lme
### Title: Autocorrelation Function for lme Residuals
### Aliases: ACF.lme
### Keywords: models
### ** Examples
fm1 <- lme(follicles ~ sin(2*pi*Time) + cos(2*pi*Time),
Ovary, random = ~ sin(2*pi*Time) | Mare)
ACF(fm1, maxLag = 11)
# Pinheiro and Bates, p240-241
fm1Over.lme <- lme(follicles ~ sin(2*pi*Time) +
cos(2*pi*Time), data=Ovary,
random=pdDiag(~sin(2*pi*Time)) )
(ACF.fm1Over <- ACF(fm1Over.lme, maxLag=10))
plot(ACF.fm1Over, alpha=0.01)
|
library(dplyr)
library(ggplot2)
library(ggthemes)
library(rstan)
options(mc.cores = 2)
# Number of data points
N <- 400
# Let's make three states
mu <- c(3, 6, 9)
sigma <- c(2, 4, 3)
# with probability
Theta <- c(.5, .2, .3)
# Draw which model each belongs to
z <- sample(1:3, size = N, prob = Theta, replace = T)
# Some white noise
epsilon <- rnorm(N)
# Simulate the data using the fact that y ~ normal(mu, sigma) can be
# expressed as y = mu + sigma*epsilon for epsilon ~ normal(0, 1)
y <- mu[z] + sigma[z]*epsilon
data_frame(y, z = as.factor(z)) %>%
ggplot(aes(x = y, fill = z)) +
geom_density(alpha = 0.3) +
theme_economist() +
ggtitle("Three data generating processes")
compiled_model <- stan_model("finite_mixture_linear_regression.stan")
estimated_model <- sampling(
compiled_model,
data = list(N = N, y = y, n_groups = 3),
iter = 6000)
traceplot(estimated_model)
summary(estimated_model)
| /finite_mixture_model.R | no_license | anhnguyendepocen/Statistical_Rethinking_Exercises | R | false | false | 922 | r | library(dplyr)
library(ggplot2)
library(ggthemes)
library(rstan)
options(mc.cores = 2)
# Number of data points
N <- 400
# Let's make three states
mu <- c(3, 6, 9)
sigma <- c(2, 4, 3)
# with probability
Theta <- c(.5, .2, .3)
# Draw which model each belongs to
z <- sample(1:3, size = N, prob = Theta, replace = T)
# Some white noise
epsilon <- rnorm(N)
# Simulate the data using the fact that y ~ normal(mu, sigma) can be
# expressed as y = mu + sigma*epsilon for epsilon ~ normal(0, 1)
y <- mu[z] + sigma[z]*epsilon
data_frame(y, z = as.factor(z)) %>%
ggplot(aes(x = y, fill = z)) +
geom_density(alpha = 0.3) +
theme_economist() +
ggtitle("Three data generating processes")
compiled_model <- stan_model("finite_mixture_linear_regression.stan")
estimated_model <- sampling(
compiled_model,
data = list(N = N, y = y, n_groups = 3),
iter = 6000)
traceplot(estimated_model)
summary(estimated_model)
|
library(randomForest)
n_trees <- 1500
m_try <- 2
df <- read.csv("seas_features.csv", sep=',')
df$label <- factor(df$label,levels = c(1,2,3,4,5), labels = c("1", "2", "3","4","5"))
ind <- sample(2, nrow(df), replace = TRUE, prob=c(0.8, 0.2))
df.rf <- randomForest(label ~ ., data=df[ind == 1,], ntrees=n_trees, mtry= m_try, importance=TRUE)
df.pred <- predict(df.rf, df[ind == 2,])
r <- table(observed = df[ind==2, "label"], predicted = df.pred)
acc <- 0
for (j in 1:5) {
acc <- acc + r[j,j]
}
accuracy <- acc/length(df[ind == 2,1])
imp <- importance(df.rf)
write.csv(imp,"feat_importance.csv")
varImpPlot(df.rf)
| /fs_random_forest.R | no_license | elidina/TesiRepo | R | false | false | 625 | r | library(randomForest)
n_trees <- 1500
m_try <- 2
df <- read.csv("seas_features.csv", sep=',')
df$label <- factor(df$label,levels = c(1,2,3,4,5), labels = c("1", "2", "3","4","5"))
ind <- sample(2, nrow(df), replace = TRUE, prob=c(0.8, 0.2))
df.rf <- randomForest(label ~ ., data=df[ind == 1,], ntrees=n_trees, mtry= m_try, importance=TRUE)
df.pred <- predict(df.rf, df[ind == 2,])
r <- table(observed = df[ind==2, "label"], predicted = df.pred)
acc <- 0
for (j in 1:5) {
acc <- acc + r[j,j]
}
accuracy <- acc/length(df[ind == 2,1])
imp <- importance(df.rf)
write.csv(imp,"feat_importance.csv")
varImpPlot(df.rf)
|
## ----src, eval=FALSE-----------------------------------------------------
# source("http://bioconductor.org/biocLite.R")
# useDevel()
# biocLite("devtools")
# biocLite("BioinformaticsFMRP/Bioc2017.TCGAbiolinks.ELMER",
# dependencies = TRUE, build_vignettes = TRUE)
## ----vignette, eval=FALSE------------------------------------------------
# library("Bioc2017.TCGAbiolinks.ELMER")
# Biobase::openVignette("Bioc2017.TCGAbiolinks.ELMER")
## ----sessioninfo, eval=TRUE----------------------------------------------
sessionInfo()
| /docs/index.R | no_license | BioinformaticsFMRP/Bioc2017.TCGAbiolinks.ELMER | R | false | false | 548 | r | ## ----src, eval=FALSE-----------------------------------------------------
# source("http://bioconductor.org/biocLite.R")
# useDevel()
# biocLite("devtools")
# biocLite("BioinformaticsFMRP/Bioc2017.TCGAbiolinks.ELMER",
# dependencies = TRUE, build_vignettes = TRUE)
## ----vignette, eval=FALSE------------------------------------------------
# library("Bioc2017.TCGAbiolinks.ELMER")
# Biobase::openVignette("Bioc2017.TCGAbiolinks.ELMER")
## ----sessioninfo, eval=TRUE----------------------------------------------
sessionInfo()
|
#
# Copyright (C) 2005-2008 Friedrich Leisch
# $Id: utils.R 228 2017-04-11 07:08:48Z leisch $
#
list2object = function(from, to){
n = names(from)
s = slotNames(to)
p = pmatch(n, s)
if(any(is.na(p)))
stop(paste("\nInvalid slot name(s) for class",
to, ":", paste(n[is.na(p)], collapse=" ")))
names(from) = s[p]
do.call("new", c(from, Class=to))
}
printIter <- function(iter, logLik, label="Log-likelihood",
format="f", width=12)
cat(formatC(iter, width=6),
label, ":", formatC(logLik, width=width, format=format),"\n")
## library(colorspace)
## ORDER=c(1,3,5,7,2,4,6,8)
## dput(x[ORDER])
## x = hcl(seq(0, 360*7/8, length = 8), c=30, l=85)
LightColors <- c("#FAC8D1", "#D4D8AE", "#A3E0D8", "#D5D0F6",
"#EECEB7", "#B5DFBD", "#B2DAEF", "#F1C8EA")
## dput(hcl(seq(0, 360*7/8, length = 8), c=65, l=85)[ORDER])
MedColors <- c("#FFB8CC", "#D4DB76", "#2BEDDC", "#D5CBFF",
"#FFC88F", "#88E99F", "#72E2FF", "#FFB7FF")
## x = hcl(seq(0, 360*7/8, length = 8), c=100, l=65)
FullColors <- c("#FF6C91", "#9DA700", "#00C1A9", "#9F8CFF",
"#DE8C00", "#00BA38", "#00B4F0", "#F564E3")
## x=hcl(seq(0, 360*7/8, length = 8), c=40, l=65)
DarkColors <- c("#CC8D99", "#9DA268", "#4EADA2", "#9E98CA",
"#BE9675", "#71AB7E", "#69A6C0", "#C28DBA")
flxColors <- function(n=1:8, color=c("full","medium", "light","dark"),
grey=FALSE)
{
color <- match.arg(color)
if(color=="light"){
if(grey)
return("#D4D4D4")
else
return(LightColors[n])
}
if(color=="medium"){
if(grey)
return("#D4D4D4")
else
return(MedColors[n])
}
else{
if(grey) return("#9E9E9E")
if(color=="full"){
return(FullColors[n])
}
else{
return(DarkColors[n])
}
}
}
flxPalette <- function(n, ...) flxColors(1:n)
###**********************************************************
getData <- function(x, error=FALSE)
{
if(empty(x@data)){
if(error) stop("Cluster object contains no data.")
z <- NULL
}
else{
z <- x@data@get("designMatrix")
}
z
}
###**********************************************************
## if length(col)<=k first recycle to k, then do col[cluster]
## else simply recycle to number of observations
expandColors <- function(col, object)
{
k <- object@k
if(is.null(col))
col <- flxColors(n=1:min(k, 8) , color="full")
if(length(col) <= k){
col <- rep(col, length=k)
col <- col[object@cluster]
}
else{
col <- rep(col, length=nrow(object@cldist))
}
col
}
###**********************************************************
MClapply <- function(X, FUN, multicore=TRUE, ...)
{
if(inherits(multicore, "cluster"))
parLapply(multicore, X, FUN)
else if(multicore)
mclapply(X, FUN, ...)
else
lapply(X, FUN, ...)
}
| /R/utils.R | no_license | cran/flexclust | R | false | false | 3,078 | r | #
# Copyright (C) 2005-2008 Friedrich Leisch
# $Id: utils.R 228 2017-04-11 07:08:48Z leisch $
#
list2object = function(from, to){
n = names(from)
s = slotNames(to)
p = pmatch(n, s)
if(any(is.na(p)))
stop(paste("\nInvalid slot name(s) for class",
to, ":", paste(n[is.na(p)], collapse=" ")))
names(from) = s[p]
do.call("new", c(from, Class=to))
}
printIter <- function(iter, logLik, label="Log-likelihood",
format="f", width=12)
cat(formatC(iter, width=6),
label, ":", formatC(logLik, width=width, format=format),"\n")
## library(colorspace)
## ORDER=c(1,3,5,7,2,4,6,8)
## dput(x[ORDER])
## x = hcl(seq(0, 360*7/8, length = 8), c=30, l=85)
LightColors <- c("#FAC8D1", "#D4D8AE", "#A3E0D8", "#D5D0F6",
"#EECEB7", "#B5DFBD", "#B2DAEF", "#F1C8EA")
## dput(hcl(seq(0, 360*7/8, length = 8), c=65, l=85)[ORDER])
MedColors <- c("#FFB8CC", "#D4DB76", "#2BEDDC", "#D5CBFF",
"#FFC88F", "#88E99F", "#72E2FF", "#FFB7FF")
## x = hcl(seq(0, 360*7/8, length = 8), c=100, l=65)
FullColors <- c("#FF6C91", "#9DA700", "#00C1A9", "#9F8CFF",
"#DE8C00", "#00BA38", "#00B4F0", "#F564E3")
## x=hcl(seq(0, 360*7/8, length = 8), c=40, l=65)
DarkColors <- c("#CC8D99", "#9DA268", "#4EADA2", "#9E98CA",
"#BE9675", "#71AB7E", "#69A6C0", "#C28DBA")
flxColors <- function(n=1:8, color=c("full","medium", "light","dark"),
grey=FALSE)
{
color <- match.arg(color)
if(color=="light"){
if(grey)
return("#D4D4D4")
else
return(LightColors[n])
}
if(color=="medium"){
if(grey)
return("#D4D4D4")
else
return(MedColors[n])
}
else{
if(grey) return("#9E9E9E")
if(color=="full"){
return(FullColors[n])
}
else{
return(DarkColors[n])
}
}
}
flxPalette <- function(n, ...) flxColors(1:n)
###**********************************************************
getData <- function(x, error=FALSE)
{
if(empty(x@data)){
if(error) stop("Cluster object contains no data.")
z <- NULL
}
else{
z <- x@data@get("designMatrix")
}
z
}
###**********************************************************
## if length(col)<=k first recycle to k, then do col[cluster]
## else simply recycle to number of observations
expandColors <- function(col, object)
{
k <- object@k
if(is.null(col))
col <- flxColors(n=1:min(k, 8) , color="full")
if(length(col) <= k){
col <- rep(col, length=k)
col <- col[object@cluster]
}
else{
col <- rep(col, length=nrow(object@cldist))
}
col
}
###**********************************************************
MClapply <- function(X, FUN, multicore=TRUE, ...)
{
if(inherits(multicore, "cluster"))
parLapply(multicore, X, FUN)
else if(multicore)
mclapply(X, FUN, ...)
else
lapply(X, FUN, ...)
}
|
data_medical_charges <- read.csv("~/Documents/Review phase/Block 1/R.R/Course material/data_medical_charges.csv")
View(data_medical_charges)
model1 <- lm(charges ~ age + bmi + sex, data =data_medical_charges)
summary(model1)
# check the assumption 3 (linear association):
plot(model1, which=1)
# check the assumption 4 (homoskedastic):
plot(model1, which=3)
# check the assumption 5 (normality):
plot(model1, which=2)
# check potential outliers
plot(model1, which=4)
model2 <- lm(charges~ age+sex+bmi+children+region,data = data_medical_charges)
summary(model2)
#we can remove region because it is has no significant; here look on p- value if it is great than 5%, or not having ***
model3 <- lm(charges~ age+sex+bmi+children,data = data_medical_charges)
summary(model3)
anova(model1, model3)
| /lab5.R | no_license | Emminey/R-code | R | false | false | 798 | r | data_medical_charges <- read.csv("~/Documents/Review phase/Block 1/R.R/Course material/data_medical_charges.csv")
View(data_medical_charges)
model1 <- lm(charges ~ age + bmi + sex, data =data_medical_charges)
summary(model1)
# check the assumption 3 (linear association):
plot(model1, which=1)
# check the assumption 4 (homoskedastic):
plot(model1, which=3)
# check the assumption 5 (normality):
plot(model1, which=2)
# check potential outliers
plot(model1, which=4)
model2 <- lm(charges~ age+sex+bmi+children+region,data = data_medical_charges)
summary(model2)
#we can remove region because it is has no significant; here look on p- value if it is great than 5%, or not having ***
model3 <- lm(charges~ age+sex+bmi+children,data = data_medical_charges)
summary(model3)
anova(model1, model3)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{bwght}
\alias{bwght}
\title{bwght}
\format{
A data frame with 1388 observations.
\describe{
\item{faminc}{1988 family income, $1000s}
\item{cigtax}{cig. tax in home state, 1988}
\item{cigprice}{cig. price in home state, 1988}
\item{bwght}{birth weight, ounces}
\item{fatheduc}{father's yrs of educ}
\item{motheduc}{mother's yrs of educ}
\item{parity}{birth order of child}
\item{male}{=1 if male child}
\item{white}{=1 if white}
\item{cigs}{cigs smked per day while preg}
}
}
\source{
Jeffrey M. Wooldrige (2006): \emph{Introductory Econometrics: A Modern Approach},
3rd ed., Thomson South-Western.
}
\usage{
data("bwght")
}
\description{
Birth weight and cigarette smoking.
}
\details{
Data from J. Mullahy (1997), “Instrumental-Variable Estimation of
Count Data Models: Applications to Models of Cigarette Smoking Behavior,”
\emph{Review of Economics and Statistics} 79, 596-593. Professor Mullahy kindly
provided the data. He obtained them from the 1988 National Health
Interview Survey.
}
\keyword{datasets}
| /man/bwght.Rd | permissive | jcpernias/ec1027 | R | false | true | 1,122 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{bwght}
\alias{bwght}
\title{bwght}
\format{
A data frame with 1388 observations.
\describe{
\item{faminc}{1988 family income, $1000s}
\item{cigtax}{cig. tax in home state, 1988}
\item{cigprice}{cig. price in home state, 1988}
\item{bwght}{birth weight, ounces}
\item{fatheduc}{father's yrs of educ}
\item{motheduc}{mother's yrs of educ}
\item{parity}{birth order of child}
\item{male}{=1 if male child}
\item{white}{=1 if white}
\item{cigs}{cigs smked per day while preg}
}
}
\source{
Jeffrey M. Wooldrige (2006): \emph{Introductory Econometrics: A Modern Approach},
3rd ed., Thomson South-Western.
}
\usage{
data("bwght")
}
\description{
Birth weight and cigarette smoking.
}
\details{
Data from J. Mullahy (1997), “Instrumental-Variable Estimation of
Count Data Models: Applications to Models of Cigarette Smoking Behavior,”
\emph{Review of Economics and Statistics} 79, 596-593. Professor Mullahy kindly
provided the data. He obtained them from the 1988 National Health
Interview Survey.
}
\keyword{datasets}
|
rm(list = ls())
# Load Library ------------------------------------------------------------
library(tidyverse)
library(GGally)
library(rstan)
theme_set(theme_bw() +
theme(axis.text.x = element_text(size=15),
axis.text.y = element_text(size=15)) +
theme(axis.title = element_text(size = 15)) +
theme(text = element_text(family = "MEI"))
)
windowsFonts("MEI"=windowsFont("Meiryo"))
# Load data ---------------------------------------------------------------
d <- airquality %>%
as_tibble() %>%
select(-Month, -Day) %>%
na.omit()
d <- d %>%
mutate(log_Ozone = log(Ozone)) %>%
select(Ozone, log_Ozone, everything())
# Rough plot --------------------------------------------------------------
# ggpairs(d)
ggpairs(d,
aes(alpha = 0.5),
upper=list(continuous=wrap("cor",size=5))) +
theme(strip.text=element_text(size=10, colour = "white"),
strip.background = element_rect(fill = "navy"))
# Settings of MCMC ----
dir <- "001__Model/"
scr <- str_c("./", dir, "QR_repara_LinearModel.stan")
par <- c("alpha", "theta", "beta", "sigma", "y_pred")
war <- 1000
ite <- 11000
see <- 123
dig <- 3
cha <- 4
# Data For MCMC ----
# Scaling data for Model selection by coefficient velue
d <- d %>%
scale() %>%
as_tibble() %>%
filter(log_Ozone > -3.8)
data = list(N = NROW(d),
K = NCOL(d) - 2,
y = d$log_Ozone,
x = d %>% select(Solar.R, Wind, Temp) %>% as.matrix())
stanmodel <- stan_model(file = scr)
fit <- sampling(stanmodel,
data=data,
pars=par,
warmup=war,
iter=ite,
seed=see,
chains=cha,
verbose=F)
print(fit, pars=par, digits_summary=dig)
ms <- summary(fit)$summary %>%
as_tibble(rownames = NA) %>%
mutate(name = row.names(.)) %>%
filter(grepl("y_pred", name)) %>%
bind_cols(d, .)
ms %>% glimpse()
model <- ms %>% lm(mean ~ log_Ozone, data = .)
r2 <- model %>%
summary %>%
.$r.squared %>%
round(., 2)
ggplot(ms, aes(x = log_Ozone)) +
geom_ribbon(aes(ymin = `2.5%`, ymax = `97.5%`),
fill = "lightblue",
alpha = 0.4) +
geom_ribbon(aes(ymin = `25%`, ymax = `75%`),
fill = "lightblue",
alpha = 0.8) +
geom_point(aes(y = mean),
alpha = 0.6,
size = 3) +
geom_text(aes(x = -2.5, y = 2.5,
label = str_c("R^2 : ", r2)),
hjust = 0,
size = 10) +
coord_cartesian(xlim = c(-3, 3),
ylim = c(-3, 3)) +
geom_abline(slope = 1,
colour = "red") +
NULL
# Formattable ----
library(formattable)
Summary <- summary(fit)$summary[, c(1,3,4,8,9,10)]
df <- data.frame(Summary)
df$Rhat[is.nan(df$Rhat)] <- 0
for(i in 1:length(df)){
df[, i] <- digits(df[, i], 3)
}
df <- df[-length(df$mean), ]
formattable(df[1:8, ], list(
mean = formatter('span', style=x ~ style(color=ifelse(x<0, 'steelblue', 'tomato'))),
sd = color_bar('orange'),
X2.5. = formatter('span', style=x ~ ifelse(x<0, style(color='steelblue', font.weight='bold'), NA)),
X97.5. = formatter('span', style=x ~ ifelse(x<0, style(color='steelblue', font.weight='bold'), NA)),
n_eff = formatter('span', style = x ~ style(color=ifelse(rank(-x)<=c(length(df$n_eff)-1),
'gray', 'tomato')),
x ~ sprintf('%.2f (rank: %g)', x, rank(-x))),
Rhat = formatter('span', x ~ digits(x, 2),
style = x ~ style(color=ifelse(x>=1.1, 'tomato', 'green')),
x ~ icontext(ifelse(x<1.1, 'ok', 'remove'), ifelse(x<1.1, 'OK', 'NG')))
))
# Fin ---------------------------------------------------------------------
| /Stan_airquality_QRLM.R | no_license | ShuheiAzuma/Bayes-Modeling-by-rstan | R | false | false | 3,958 | r |
rm(list = ls())
# Load Library ------------------------------------------------------------
library(tidyverse)
library(GGally)
library(rstan)
theme_set(theme_bw() +
theme(axis.text.x = element_text(size=15),
axis.text.y = element_text(size=15)) +
theme(axis.title = element_text(size = 15)) +
theme(text = element_text(family = "MEI"))
)
windowsFonts("MEI"=windowsFont("Meiryo"))
# Load data ---------------------------------------------------------------
d <- airquality %>%
as_tibble() %>%
select(-Month, -Day) %>%
na.omit()
d <- d %>%
mutate(log_Ozone = log(Ozone)) %>%
select(Ozone, log_Ozone, everything())
# Rough plot --------------------------------------------------------------
# ggpairs(d)
ggpairs(d,
aes(alpha = 0.5),
upper=list(continuous=wrap("cor",size=5))) +
theme(strip.text=element_text(size=10, colour = "white"),
strip.background = element_rect(fill = "navy"))
# Settings of MCMC ----
dir <- "001__Model/"
scr <- str_c("./", dir, "QR_repara_LinearModel.stan")
par <- c("alpha", "theta", "beta", "sigma", "y_pred")
war <- 1000
ite <- 11000
see <- 123
dig <- 3
cha <- 4
# Data For MCMC ----
# Scaling data for Model selection by coefficient velue
d <- d %>%
scale() %>%
as_tibble() %>%
filter(log_Ozone > -3.8)
data = list(N = NROW(d),
K = NCOL(d) - 2,
y = d$log_Ozone,
x = d %>% select(Solar.R, Wind, Temp) %>% as.matrix())
stanmodel <- stan_model(file = scr)
fit <- sampling(stanmodel,
data=data,
pars=par,
warmup=war,
iter=ite,
seed=see,
chains=cha,
verbose=F)
print(fit, pars=par, digits_summary=dig)
ms <- summary(fit)$summary %>%
as_tibble(rownames = NA) %>%
mutate(name = row.names(.)) %>%
filter(grepl("y_pred", name)) %>%
bind_cols(d, .)
ms %>% glimpse()
model <- ms %>% lm(mean ~ log_Ozone, data = .)
r2 <- model %>%
summary %>%
.$r.squared %>%
round(., 2)
ggplot(ms, aes(x = log_Ozone)) +
geom_ribbon(aes(ymin = `2.5%`, ymax = `97.5%`),
fill = "lightblue",
alpha = 0.4) +
geom_ribbon(aes(ymin = `25%`, ymax = `75%`),
fill = "lightblue",
alpha = 0.8) +
geom_point(aes(y = mean),
alpha = 0.6,
size = 3) +
geom_text(aes(x = -2.5, y = 2.5,
label = str_c("R^2 : ", r2)),
hjust = 0,
size = 10) +
coord_cartesian(xlim = c(-3, 3),
ylim = c(-3, 3)) +
geom_abline(slope = 1,
colour = "red") +
NULL
# Formattable ----
library(formattable)
Summary <- summary(fit)$summary[, c(1,3,4,8,9,10)]
df <- data.frame(Summary)
df$Rhat[is.nan(df$Rhat)] <- 0
for(i in 1:length(df)){
df[, i] <- digits(df[, i], 3)
}
df <- df[-length(df$mean), ]
formattable(df[1:8, ], list(
mean = formatter('span', style=x ~ style(color=ifelse(x<0, 'steelblue', 'tomato'))),
sd = color_bar('orange'),
X2.5. = formatter('span', style=x ~ ifelse(x<0, style(color='steelblue', font.weight='bold'), NA)),
X97.5. = formatter('span', style=x ~ ifelse(x<0, style(color='steelblue', font.weight='bold'), NA)),
n_eff = formatter('span', style = x ~ style(color=ifelse(rank(-x)<=c(length(df$n_eff)-1),
'gray', 'tomato')),
x ~ sprintf('%.2f (rank: %g)', x, rank(-x))),
Rhat = formatter('span', x ~ digits(x, 2),
style = x ~ style(color=ifelse(x>=1.1, 'tomato', 'green')),
x ~ icontext(ifelse(x<1.1, 'ok', 'remove'), ifelse(x<1.1, 'OK', 'NG')))
))
# Fin ---------------------------------------------------------------------
|
# idPIneg
#' Phosphoinositols (PI) annotation for ESI-
#'
#' PI identification based on fragmentation patterns for LC-MS/MS
#' AIF data acquired in negative mode.
#'
#' @param MS1 list with two data frames cointaining all peaks from the full MS
#' function ("peaklist" data frame) and the raw MS scans data ("rawScans" data
#' frame). They must have four columns: m.z, RT (in seconds), int (intensity)
#' and peakID (link between both data frames). "rawScans" data frame also needs
#' a extra column named "Scan", which indicates the scan order number. Output
#' of \link{dataProcessing} function. In case no coelution score needs to be
#' applied, this argument can be just the peaklist data frame.
#' @param MSMS1 list with two data frames cointaining all peaks from the high
#' energy function ("peaklist" data frame) and the raw MS scans data ("rawScans"
#' data frame). They must have four columns: m.z, RT (in seconds), int (intensity)
#' and peakID (link between both data frames). "rawScans" data frame also needs
#' a extra column named "Scan", which indicates the scan order number. Output
#' of \link{dataProcessing} function. In case no coelution score needs to be
#' applied, this argument can be just the peaklist data frame.
#' @param MSMS2 list with two data frames cointaining all peaks from a second high
#' energy function ("peaklist" data frame) and the raw MS scans data ("rawScans"
#' data frame). They must have four columns: m.z, RT (in seconds), int (intensity)
#' and peakID (link between both data frames). "rawScans" data frame also needs
#' a extra column named "Scan", which indicates the scan order number. Output
#' of \link{dataProcessing} function. In case no coelution score needs to be
#' applied, this argument can be just the peaklist data frame. Optional.
#' @param ppm_precursor mass tolerance for precursor ions. By default, 5 ppm.
#' @param ppm_products mass tolerance for product ions. By default, 10 ppm.
#' @param rttol total rt window for coelution between precursor and product
#' ions. By default, 3 seconds.
#' @param rt rt range where the function will look for candidates. By default,
#' it will search within all RT range in MS1.
#' @param adducts expected adducts for PI in ESI-. Adducts allowed can
#' be modified in adductsTable (dbs argument).
#' @param clfrags vector containing the expected fragments for a given lipid
#' class. See \link{checkClass} for details.
#' @param ftype character vector indicating the type of fragments in clfrags.
#' It can be: "F" (fragment), "NL" (neutral loss) or "BB" (building block).
#' See \link{checkClass} for details.
#' @param clrequired logical vector indicating if each class fragment is
#' required or not. If any of them is required, at least one of them must be
#' present within the coeluting fragments. See \link{checkClass} for details.
#' @param chainfrags_sn1 character vector containing the fragmentation rules for
#' the chain fragments in sn1 position. See \link{chainFrags} for details.
#' @param chainfrags_sn2 character vector containing the fragmentation rules for
#' the chain fragments in sn2 position. See \link{chainFrags} for details. If
#' empty, it will be estimated based on the difference between precursors and
#' sn1 chains.
#' @param intrules character vector specifying the fragments to compare. See
#' \link{checkIntensityRules}.
#' @param rates character vector with the expected rates between fragments given
#' as a string (i.e. "3/1"). See \link{checkIntensityRules}.
#' @param intrequired logical vector indicating if any of the rules is required.
#' If not, at least one must be verified to confirm the structure.
#' @param coelCutoff coelution score threshold between parent and fragment ions.
#' Only applied if rawData info is supplied. By default, 0.8.
#' @param dbs list of data bases required for annotation. By default, dbs
#' contains the required data frames based on the default fragmentation rules.
#' If these rules are modified, dbs may need to be supplied. See \link{createLipidDB}
#' and \link{assignDB}.
#'
#' @return List with PI annotations (results) and some additional information
#' (class fragments and chain fragments).
#'
#' @details \code{idPIneg} function involves 5 steps. 1) FullMS-based
#' identification of candidate PI as M-H. 2) Search of PI class fragments:
#' 241.0115, 223.0008, 259.0219 and 297.0375 coeluting with the precursor
#' ion. 3) Search of specific fragments that inform about chain composition at
#' sn1 (lysoPI as M-H resulting from the loss of the FA chain at sn2 or lysoPA
#' as M-H if it also losses the head group) and sn2 (lysoPI or lysoPA as M-H
#' resulting from the loss of the FA chain at sn1 or FA chain as M-H). 4) Look
#' for possible chains structure based on the combination of chain fragments.
#' 5) Check intensity rules to confirm chains position. In this case, lysoPI or
#' lysoPA from sn1 is at least 3 times more intense than lysoPI or lysoPA from
#' sn2.
#'
#' Results data frame shows: ID, class of lipid, CDB (total number
#' of carbons and double bounds), FA composition (specific chains composition if
#' it has been confirmed), mz, RT (in seconds), I (intensity, which comes
#' directly from de input), Adducts, ppm (m.z error), confidenceLevel (Subclass,
#' FA level, where chains are known but not their positions, or FA position
#' level) and PFCS (parent-fragment coelution score mean of all fragments used
#' for the identification).
#'
#' @note Isotopes should be removed before identification to avoid false
#' positives.
#' This function has been writen based on fragmentation patterns observed for
#' two different platforms (QTOF 6550 from Agilent and Sinapt G2-Si from Waters),
#' but it may need to be customized for other platforms or acquisition settings.
#'
#' @examples
#' \donttest{
#' library(LipidMSdata)
#' idPIneg(MS1 = MS1_neg, MSMS1 = MSMS1_neg, MSMS2 = MSMS2_neg)
#' }
#'
#' @author M Isabel Alcoriza-Balaguer <maialba@alumni.uv.es>
idPIneg <- function(MS1, MSMS1, MSMS2, ppm_precursor = 5,
ppm_products = 10, rttol = 3, rt,
adducts = c("M-H"),
clfrags = c(241.0115, 223.0008, 259.0219, 297.0375),
clrequired = c(F, F, F, F),
ftype = c("F", "F", "F", "F"),
chainfrags_sn1 = c("lysopi_M-H", "lysopa_M-H"),
chainfrags_sn2 = c("lysopi_M-H", "lysopa_M-H", "fa_M-H"),
intrules = c("lysopi_sn1/lysopi_sn2",
"lysopa_sn1/lysopa_sn2"),
rates = c("3/1", "3/1"),
intrequired = c(F, F),
coelCutoff = 0.8,
dbs){
# load dbs
if (missing(dbs)){
dbs <- assignDB()
}
if (missing(MSMS2) | is.null(MSMS2)){
rawDataMSMS2 <- MSMS2 <- data.frame()
}
#reorder MS data
if (class(MS1) == "list" & length(MS1) == 2){
if (!all(c("peaklist", "rawScans") %in% names(MS1))){
stop("MS1, MSMS1 and MSMS2 (if supplied) lists should have two elements
named as peaklist and rawScans")
}
if(!all(c("m.z", "RT", "int", "peakID") %in% colnames(MS1$peaklist))){
stop("peaklist element of MS1, MSMS1 and MSMS2 needs to have at least 4
columns: m.z, RT, int and peakID")
}
if(!all(c("m.z", "RT", "int", "peakID", "Scan") %in% colnames(MS1$rawScans))){
stop("rawScans element of MS1, MSMS1 and MSMS2 needs to have at least 5
columns: m.z, RT, int, peakID and Scan")
}
rawDataMS1 <- MS1$rawScans
rawDataMS1$peakID <- as.vector(paste(rawDataMS1$peakID, "MS1", sep = "_"))
MS1 <- MS1$peaklist
MS1$peakID <- as.vector(paste(MS1$peakID, "MS1", sep = "_"))
rawDataMSMS1 <- MSMS1$rawScans
rawDataMSMS1$peakID <- as.vector(paste(rawDataMSMS1$peakID, "MSMS1", sep = "_"))
MSMS1 <- MSMS1$peaklist
MSMS1$peakID <- as.vector(paste(MSMS1$peakID, "MSMS1", sep = "_"))
if (!missing(MSMS2) & !is.data.frame(MSMS2)){
rawDataMSMS2 <- MSMS2$rawScans
rawDataMSMS2$peakID <- as.vector(paste(rawDataMSMS2$peakID, "MSMS2", sep = "_"))
MSMS2 <- MSMS2$peaklist
MSMS2$peakID <- as.vector(paste(MSMS2$peakID, "MSMS2", sep = "_"))
}
} else {
rawDataMS1 <- rawDataMSMS1 <- rawDataMSMS2 <- data.frame()
coelCutoff <- 0
}
if(!"peakID" %in% colnames(MS1)){
MS1$peakID <- as.vector(rep("", nrow(MS1)))
MSMS1$peakID <- as.vector(rep("", nrow(MSMS1)))
if (nrow(MSMS2) != 0){
MSMS2$peakID <- as.vector(rep("", nrow(MSMS2)))
}
}
if (!(c("m.z", "RT", "int") %in% colnames(MS1)) ||
!(c("m.z", "RT", "int") %in% colnames(MS1))){
stop("Peaklists (MS1, MSMS1 and MSMS2 if supplied, should have at least
3 columns with the following names: m.z, RT, int.")
}
if (!all(adducts %in% dbs[["adductsTable"]]$adduct)){
stop("Some adducts can't be found at the aductsTable. Add them.")
}
if (length(clfrags) > 0){
if (length(clfrags) != length(clrequired) | length(clfrags) !=
length(ftype)){
stop("clfrags, clrequired and ftype should have the same length")
}
if (!all(ftype %in% c("F", "NL", "BB"))){
stop("ftype values allowed are: \"F\", \"NL\" or\"BB\"")
}
strfrag <- which(grepl("_", clfrags))
if (length(strfrag) > 0){
d <- unlist(lapply(strsplit(clfrags[strfrag], "_"), "[[", 1))
a <- unlist(lapply(strsplit(clfrags[strfrag], "_"), "[[", 2))
if (!all(a %in% dbs[["adductsTable"]]$adduct)){
stop("Adducts employed in clfrags also need to be at adductsTable.")
}
if (!all(paste(d, "db", sep="") %in% names(dbs))){
stop("All required dbs must be supplied through dbs argument.")
}
}
}
if (missing(rt)){
rt <- c(min(MS1$RT), max(MS1$RT))
}
rawData <- rbind(rawDataMS1, rawDataMSMS1, rawDataMSMS2)
rawData <- rawData[!rawData$peakID %in% c("0_MS1", "0_MSMS1", "0_MSMS2"),]
# candidates search
candidates <- findCandidates(MS1, dbs$pidb, ppm = ppm_precursor, rt = rt,
adducts = adducts, rttol = rttol, dbs = dbs,
rawData = rawData, coelCutoff = coelCutoff)
if (nrow(candidates) > 0){
# isolation of coeluting fragments
MSMS <- rbind(MSMS1, MSMS2)
coelfrags <- coelutingFrags(candidates, MSMS, rttol, rawData,
coelCutoff = coelCutoff)
# check class fragments
classConf <- checkClass(candidates, coelfrags, clfrags, ftype, clrequired,
ppm_products, dbs)
# search chains fragments
sn1 <- chainFrags(coelfrags, chainfrags_sn1, ppm_products, dbs = dbs,
candidates = candidates)
sn2 <- chainFrags(coelfrags, chainfrags_sn2, ppm_products, candidates, sn1,
dbs)
# combine chain fragments
chainsComb <- combineChains(candidates, nchains=2, sn1, sn2)
# check chains position based on intensity ratios
intConf <- checkIntensityRules(intrules, rates, intrequired, nchains=2,
chainsComb)
# prepare output
res <- organizeResults(candidates, clfrags, classConf, chainsComb, intrules,
intConf, nchains = 2, class="PI")
if (length(clfrags) > 0){
classfragments <- classConf$presence
colnames(classfragments) <- clfrags
} else {
classfragments <- data.frame()
}
if (length(chainfrags_sn1) > 0){
chainfragments <- mapply(rbind,sn1,sn2,SIMPLIFY=FALSE)
} else {
chainsfrags <- list()
}
return(list(results = res, candidates = candidates,
classfragments = classfragments,
chainfragments = chainfragments))
} else {
return(list(results = data.frame()))
}
}
| /R/idPIneg.R | no_license | 13479776/LipidMS | R | false | false | 12,046 | r | # idPIneg
#' Phosphoinositols (PI) annotation for ESI-
#'
#' PI identification based on fragmentation patterns for LC-MS/MS
#' AIF data acquired in negative mode.
#'
#' @param MS1 list with two data frames cointaining all peaks from the full MS
#' function ("peaklist" data frame) and the raw MS scans data ("rawScans" data
#' frame). They must have four columns: m.z, RT (in seconds), int (intensity)
#' and peakID (link between both data frames). "rawScans" data frame also needs
#' a extra column named "Scan", which indicates the scan order number. Output
#' of \link{dataProcessing} function. In case no coelution score needs to be
#' applied, this argument can be just the peaklist data frame.
#' @param MSMS1 list with two data frames cointaining all peaks from the high
#' energy function ("peaklist" data frame) and the raw MS scans data ("rawScans"
#' data frame). They must have four columns: m.z, RT (in seconds), int (intensity)
#' and peakID (link between both data frames). "rawScans" data frame also needs
#' a extra column named "Scan", which indicates the scan order number. Output
#' of \link{dataProcessing} function. In case no coelution score needs to be
#' applied, this argument can be just the peaklist data frame.
#' @param MSMS2 list with two data frames cointaining all peaks from a second high
#' energy function ("peaklist" data frame) and the raw MS scans data ("rawScans"
#' data frame). They must have four columns: m.z, RT (in seconds), int (intensity)
#' and peakID (link between both data frames). "rawScans" data frame also needs
#' a extra column named "Scan", which indicates the scan order number. Output
#' of \link{dataProcessing} function. In case no coelution score needs to be
#' applied, this argument can be just the peaklist data frame. Optional.
#' @param ppm_precursor mass tolerance for precursor ions. By default, 5 ppm.
#' @param ppm_products mass tolerance for product ions. By default, 10 ppm.
#' @param rttol total rt window for coelution between precursor and product
#' ions. By default, 3 seconds.
#' @param rt rt range where the function will look for candidates. By default,
#' it will search within all RT range in MS1.
#' @param adducts expected adducts for PI in ESI-. Adducts allowed can
#' be modified in adductsTable (dbs argument).
#' @param clfrags vector containing the expected fragments for a given lipid
#' class. See \link{checkClass} for details.
#' @param ftype character vector indicating the type of fragments in clfrags.
#' It can be: "F" (fragment), "NL" (neutral loss) or "BB" (building block).
#' See \link{checkClass} for details.
#' @param clrequired logical vector indicating if each class fragment is
#' required or not. If any of them is required, at least one of them must be
#' present within the coeluting fragments. See \link{checkClass} for details.
#' @param chainfrags_sn1 character vector containing the fragmentation rules for
#' the chain fragments in sn1 position. See \link{chainFrags} for details.
#' @param chainfrags_sn2 character vector containing the fragmentation rules for
#' the chain fragments in sn2 position. See \link{chainFrags} for details. If
#' empty, it will be estimated based on the difference between precursors and
#' sn1 chains.
#' @param intrules character vector specifying the fragments to compare. See
#' \link{checkIntensityRules}.
#' @param rates character vector with the expected rates between fragments given
#' as a string (i.e. "3/1"). See \link{checkIntensityRules}.
#' @param intrequired logical vector indicating if any of the rules is required.
#' If not, at least one must be verified to confirm the structure.
#' @param coelCutoff coelution score threshold between parent and fragment ions.
#' Only applied if rawData info is supplied. By default, 0.8.
#' @param dbs list of data bases required for annotation. By default, dbs
#' contains the required data frames based on the default fragmentation rules.
#' If these rules are modified, dbs may need to be supplied. See \link{createLipidDB}
#' and \link{assignDB}.
#'
#' @return List with PI annotations (results) and some additional information
#' (class fragments and chain fragments).
#'
#' @details \code{idPIneg} function involves 5 steps. 1) FullMS-based
#' identification of candidate PI as M-H. 2) Search of PI class fragments:
#' 241.0115, 223.0008, 259.0219 and 297.0375 coeluting with the precursor
#' ion. 3) Search of specific fragments that inform about chain composition at
#' sn1 (lysoPI as M-H resulting from the loss of the FA chain at sn2 or lysoPA
#' as M-H if it also losses the head group) and sn2 (lysoPI or lysoPA as M-H
#' resulting from the loss of the FA chain at sn1 or FA chain as M-H). 4) Look
#' for possible chains structure based on the combination of chain fragments.
#' 5) Check intensity rules to confirm chains position. In this case, lysoPI or
#' lysoPA from sn1 is at least 3 times more intense than lysoPI or lysoPA from
#' sn2.
#'
#' Results data frame shows: ID, class of lipid, CDB (total number
#' of carbons and double bounds), FA composition (specific chains composition if
#' it has been confirmed), mz, RT (in seconds), I (intensity, which comes
#' directly from de input), Adducts, ppm (m.z error), confidenceLevel (Subclass,
#' FA level, where chains are known but not their positions, or FA position
#' level) and PFCS (parent-fragment coelution score mean of all fragments used
#' for the identification).
#'
#' @note Isotopes should be removed before identification to avoid false
#' positives.
#' This function has been writen based on fragmentation patterns observed for
#' two different platforms (QTOF 6550 from Agilent and Sinapt G2-Si from Waters),
#' but it may need to be customized for other platforms or acquisition settings.
#'
#' @examples
#' \donttest{
#' library(LipidMSdata)
#' idPIneg(MS1 = MS1_neg, MSMS1 = MSMS1_neg, MSMS2 = MSMS2_neg)
#' }
#'
#' @author M Isabel Alcoriza-Balaguer <maialba@alumni.uv.es>
idPIneg <- function(MS1, MSMS1, MSMS2, ppm_precursor = 5,
ppm_products = 10, rttol = 3, rt,
adducts = c("M-H"),
clfrags = c(241.0115, 223.0008, 259.0219, 297.0375),
clrequired = c(F, F, F, F),
ftype = c("F", "F", "F", "F"),
chainfrags_sn1 = c("lysopi_M-H", "lysopa_M-H"),
chainfrags_sn2 = c("lysopi_M-H", "lysopa_M-H", "fa_M-H"),
intrules = c("lysopi_sn1/lysopi_sn2",
"lysopa_sn1/lysopa_sn2"),
rates = c("3/1", "3/1"),
intrequired = c(F, F),
coelCutoff = 0.8,
dbs){
# load dbs
if (missing(dbs)){
dbs <- assignDB()
}
if (missing(MSMS2) | is.null(MSMS2)){
rawDataMSMS2 <- MSMS2 <- data.frame()
}
#reorder MS data
if (class(MS1) == "list" & length(MS1) == 2){
if (!all(c("peaklist", "rawScans") %in% names(MS1))){
stop("MS1, MSMS1 and MSMS2 (if supplied) lists should have two elements
named as peaklist and rawScans")
}
if(!all(c("m.z", "RT", "int", "peakID") %in% colnames(MS1$peaklist))){
stop("peaklist element of MS1, MSMS1 and MSMS2 needs to have at least 4
columns: m.z, RT, int and peakID")
}
if(!all(c("m.z", "RT", "int", "peakID", "Scan") %in% colnames(MS1$rawScans))){
stop("rawScans element of MS1, MSMS1 and MSMS2 needs to have at least 5
columns: m.z, RT, int, peakID and Scan")
}
rawDataMS1 <- MS1$rawScans
rawDataMS1$peakID <- as.vector(paste(rawDataMS1$peakID, "MS1", sep = "_"))
MS1 <- MS1$peaklist
MS1$peakID <- as.vector(paste(MS1$peakID, "MS1", sep = "_"))
rawDataMSMS1 <- MSMS1$rawScans
rawDataMSMS1$peakID <- as.vector(paste(rawDataMSMS1$peakID, "MSMS1", sep = "_"))
MSMS1 <- MSMS1$peaklist
MSMS1$peakID <- as.vector(paste(MSMS1$peakID, "MSMS1", sep = "_"))
if (!missing(MSMS2) & !is.data.frame(MSMS2)){
rawDataMSMS2 <- MSMS2$rawScans
rawDataMSMS2$peakID <- as.vector(paste(rawDataMSMS2$peakID, "MSMS2", sep = "_"))
MSMS2 <- MSMS2$peaklist
MSMS2$peakID <- as.vector(paste(MSMS2$peakID, "MSMS2", sep = "_"))
}
} else {
rawDataMS1 <- rawDataMSMS1 <- rawDataMSMS2 <- data.frame()
coelCutoff <- 0
}
if(!"peakID" %in% colnames(MS1)){
MS1$peakID <- as.vector(rep("", nrow(MS1)))
MSMS1$peakID <- as.vector(rep("", nrow(MSMS1)))
if (nrow(MSMS2) != 0){
MSMS2$peakID <- as.vector(rep("", nrow(MSMS2)))
}
}
if (!(c("m.z", "RT", "int") %in% colnames(MS1)) ||
!(c("m.z", "RT", "int") %in% colnames(MS1))){
stop("Peaklists (MS1, MSMS1 and MSMS2 if supplied, should have at least
3 columns with the following names: m.z, RT, int.")
}
if (!all(adducts %in% dbs[["adductsTable"]]$adduct)){
stop("Some adducts can't be found at the aductsTable. Add them.")
}
if (length(clfrags) > 0){
if (length(clfrags) != length(clrequired) | length(clfrags) !=
length(ftype)){
stop("clfrags, clrequired and ftype should have the same length")
}
if (!all(ftype %in% c("F", "NL", "BB"))){
stop("ftype values allowed are: \"F\", \"NL\" or\"BB\"")
}
strfrag <- which(grepl("_", clfrags))
if (length(strfrag) > 0){
d <- unlist(lapply(strsplit(clfrags[strfrag], "_"), "[[", 1))
a <- unlist(lapply(strsplit(clfrags[strfrag], "_"), "[[", 2))
if (!all(a %in% dbs[["adductsTable"]]$adduct)){
stop("Adducts employed in clfrags also need to be at adductsTable.")
}
if (!all(paste(d, "db", sep="") %in% names(dbs))){
stop("All required dbs must be supplied through dbs argument.")
}
}
}
if (missing(rt)){
rt <- c(min(MS1$RT), max(MS1$RT))
}
rawData <- rbind(rawDataMS1, rawDataMSMS1, rawDataMSMS2)
rawData <- rawData[!rawData$peakID %in% c("0_MS1", "0_MSMS1", "0_MSMS2"),]
# candidates search
candidates <- findCandidates(MS1, dbs$pidb, ppm = ppm_precursor, rt = rt,
adducts = adducts, rttol = rttol, dbs = dbs,
rawData = rawData, coelCutoff = coelCutoff)
if (nrow(candidates) > 0){
# isolation of coeluting fragments
MSMS <- rbind(MSMS1, MSMS2)
coelfrags <- coelutingFrags(candidates, MSMS, rttol, rawData,
coelCutoff = coelCutoff)
# check class fragments
classConf <- checkClass(candidates, coelfrags, clfrags, ftype, clrequired,
ppm_products, dbs)
# search chains fragments
sn1 <- chainFrags(coelfrags, chainfrags_sn1, ppm_products, dbs = dbs,
candidates = candidates)
sn2 <- chainFrags(coelfrags, chainfrags_sn2, ppm_products, candidates, sn1,
dbs)
# combine chain fragments
chainsComb <- combineChains(candidates, nchains=2, sn1, sn2)
# check chains position based on intensity ratios
intConf <- checkIntensityRules(intrules, rates, intrequired, nchains=2,
chainsComb)
# prepare output
res <- organizeResults(candidates, clfrags, classConf, chainsComb, intrules,
intConf, nchains = 2, class="PI")
if (length(clfrags) > 0){
classfragments <- classConf$presence
colnames(classfragments) <- clfrags
} else {
classfragments <- data.frame()
}
if (length(chainfrags_sn1) > 0){
chainfragments <- mapply(rbind,sn1,sn2,SIMPLIFY=FALSE)
} else {
chainsfrags <- list()
}
return(list(results = res, candidates = candidates,
classfragments = classfragments,
chainfragments = chainfragments))
} else {
return(list(results = data.frame()))
}
}
|
#!/usr/bin/Rscript
# Andres Chang
######################################################################################################################.
# TROPICS-scenario_prep
######################################################################################################################.
# Description: This script reads, transforms, and generates metadata for SR15 scenario data.
# Section 1 defines path to SR15 data and ETP data, which is used for a couple intensity
# denominators (e.g., steel production)
# Section 2 loads all libraries
# Section 3 contains functions that load and interpolate data, as well as combining variables and generating
# meta-data
# Section 4.1-4.3 prepare variable lists to import from SR15
# Section 4.4 pulls data from the SR15 scenario database and ETP 2017
# Section 4.5 calculates the slopes for all chosen variables between 2020 and future years in 5-year increments
# Section 4.6 produces different subsets of all scenarios based on different combinations of filters, all exported
# as csv w/ a summary sheet to compare their characteristics. csv's can then be loaded into regression model script
######################################################################################################################.
#1. Preamble =================================================================================
# SR15 database fpaths
f_sr15 <- 'input/iamc15_scenario_data_world_r2.0.xlsx'
f_sr15_meta <- 'input/sr15_metadata_indicators_r2.0.xlsx'
# f_sr15_all_regions <- 'input/iamc15_scenario_data_all_regions_r2.0.xlsx'
# By default, ETP data is not used; but if a data path is provided and use_etp is set to TRUE,
# ETP data is combined with SR15 data to calculate a few intensity indicators currently under
# testing
f_etp <- 'input/ETP2017_industry_summary.xlsx'
use_etp <- FALSE
write_csvs <- TRUE
# 2. Library =================================================================================
library(plyr)
library(dplyr)
library(readr)
library(zeallot)
library(futile.logger)
library(magrittr)
library(ggplot2)
library(openxlsx)
library(reshape2)
library(tidyr)
# 3. Functions ===============================================================================
# 3.1 Data loading functions =================================================================
get.SR15.meta <- function() {
# Get SR15 metadata for merge with output df
meta <- (read.xlsx(f_sr15_meta, sheet='meta') %>%
mutate(`Model-Scenario` = paste(model, scenario, sep='-')) %>%
select(-c(1:3))
)
return(meta)
}
get.all.data <- function(refresh, all_regions=FALSE) {
# Returns a df of SR15 data
# Args:
# refresh (bool): if FALSE, get.all.data will use the existing sr15_all_data variable
# in environment if available. Otherwise, pulls fresh from xlsx
# all_regions (bool): if TRUE, pulls from regions xlsx. Otherwise, pulls world data
if(!all_regions) {
if(!exists("sr15_all_data") | refresh) {
flog.debug('Pulling SR15 world data from Excel')
sr15_all_data <- read.xlsx(f_sr15, 2)
# Make model-scenario column to match with scenarios in SBTi scenario file
sr15_ms <- paste(sr15_all_data$Model, sr15_all_data$Scenario, sep='-')
sr15_all_data <- cbind(sr15_all_data, sr15_ms)
colnames(sr15_all_data)[ncol(sr15_all_data)] <- "Model-Scenario"
} else {flog.debug('Using existing sr_15_all_data var in environment')}
return(sr15_all_data)
} else {
if(!exists("sr15_all_regions_all_data") | refresh) {
flog.debug('Pulling SR15 all regions data from Excel')
sr15_all_regions_all_data <- read.xlsx(f_sr15_all_regions, 2)
# Make model-scenario column to match with scenarios in SBTi scenario file
sr15_ms <- paste(sr15_all_regions_all_data$Model, sr15_all_regions_all_data$Scenario, sep='-')
sr15_all_regions_all_data <- cbind(sr15_all_regions_all_data, sr15_ms)
colnames(sr15_all_regions_all_data)[ncol(sr15_all_regions_all_data)] <- "Model-Scenario"
} else{flog.debug('Using existing sr_15_all_regions_all_data var in environment')}
return(sr15_all_regions_all_data)
}
}
get.ETP.data <- function() {
etp_data0 <- (read.xlsx(f_etp, sheet='WORLD', skipEmptyRows = F, skipEmptyCols = F)[103:109, 15:24])
colnames(etp_data0) <- c('Variable', '2014', seq(2025, 2060, 5))
return(etp_data0)
}
# 3.2 Gap-filler functions ===================================================================
interp.all <- function(df, id.cols=5, cdata_yrs_out=FALSE) {
# Returns a dataframe with one column per year between 2000 and 2100
# Where data is interpolated linearly based on spacing of available
# SR15 data per model-scenario. (I.e., works with 5-year or 10-year
# data or mixed.)
# Args:
# * df (dataframe): non-interpolated data -- generally from filter.IPCC
# * id.cols (num vector): numb vector of leading columns to be kept as "ID" columns in
# returned dataframe
# cdata_yrs_out (bool): if TRUE, returns a list where the second item is
# a dataframe of "keystone years," i.e., for each row, which years are
# reported data and which are interpolated. If FALSE, function just returns
# df of interpolated data
int_df <- matrix(0, nrow(df), length(c(2000:2100)))
cd_out <- matrix(0, nrow(df), length(c(2000:2100)))
for(i in 1:nrow(df)) {
# Row index to write to
data_yrs <- colnames(df[i,])[!is.na(df[i,])]
data_yrs <- unlist(lapply(data_yrs, function(x) {
x0 <- type.convert(x)
if(is.numeric(x0)) {return(x0)} else {return(NULL)}
}))
cdata_yrs <- as.character(data_yrs)
for(k in 2000:2100) {
yr_col <- as.character(k)
if(yr_col %in% cdata_yrs) {
int_df[i, k-1999] <- df[i, yr_col]
cd_out[i, k-1999] <- 1
} else {
back_yr <- data_yrs[data_yrs < k][
length(data_yrs[data_yrs < k])]
forward_yr <- data_yrs[data_yrs > k][1]
n_yrs <- forward_yr - back_yr
int_yr <- k - back_yr
int_data <- (
df[i, as.character(back_yr)] +
(int_yr/n_yrs) * (df[i, as.character(forward_yr)] - df[i, as.character(back_yr)]))
if(length(int_data) != 0 & length(int_data) != 1) {
print(df[i])
print(int_data)
}
if(length(int_data) == 0) {
int_df[i, k-1999] <- NA
} else{
int_df[i, k-1999] <- int_data}
}
}
}
if(id.cols >1) {
int_df <- bind_cols(df[,c(1:id.cols)], as.data.frame(int_df))
} else {
int_df <- cbind(df[,1], as.data.frame(int_df))
colnames(int_df)[1] <- colnames(df)[1]
}
colnames(int_df)[c((id.cols+1):ncol(int_df))] <- sapply(c(2000:2100), as.character)
if(cdata_yrs_out) {
return(list(int_df, cd_out))
} else {
return(int_df)
}
}
calculate.AFOLU.cs <- function(df) {
# Add a column estimating land use-related carbon sequestration due to
# poor reporting of 'Carbon Sequestration|Land Use' in SR15 database
mutate(df,
`Carbon Sequestration|Land Use2` = case_when(
`Emissions|CO2|AFOLU` < 0 ~ -`Emissions|CO2|AFOLU`,
TRUE ~ 0
)
)
}
calculate.CDR <- function(df) {
# Return a df with a CDR variable, which is a sum of all CDR categories present in
# the scenario. Note that this does not include CO2 captured at the point of emissions,
# e.g., fossil CCS, it is strictly focused on net negative CO2 via the definition
# in SR15 Figure 2.10
CDR_subs <- c('CCS|Biomass', 'Land Use2', 'Feedstocks',
'Direct Air Capture', 'Enhanced Weathering', 'Other')
all_CDR <- generate.varnames('Carbon Sequestration', CDR_subs, FALSE)
all_CDR <- all_CDR[all_CDR %in% colnames(df)]
df[,all_CDR][is.na(df[,all_CDR])] <- 0
df$cdr <- apply(df, 1, function(X) {
sum(as.numeric(X[all_CDR]))
})
return(df)
}
calculate.intensity.vars <- function(df, use_etp) {
df_out <- (df %>% mutate(
INT.emKyoto_gdp=`Emissions|Kyoto Gases`/`GDP|PPP`,
INT.emCO2EI_PE=`Emissions|CO2|Energy and Industrial Processes`/`Primary Energy`,
INT.emCO2Elec_elecGen = `Emissions|CO2|Energy|Supply|Electricity`/`Secondary Energy|Electricity`,
INT.emCO2EI_elecGen = `Emissions|CO2|Energy and Industrial Processes`/`Secondary Energy|Electricity`,
INT.emCO2Transport_gdp = `Emissions|CO2|Energy|Demand|Transportation`/`GDP|PPP`
))
if(use_etp) {
df_out <- (df_out %>% mutate(
INT.emCO2EI_cement = `Emissions|CO2|Energy and Industrial Processes`/`Cement`,
INT.emCO2IndDemand_cement = `Emissions|CO2|Energy|Demand|Industry`/`Cement`,
INT.emCO2EI_steel = `Emissions|CO2|Energy and Industrial Processes`/`Crude steel`,
INT.emCO2IndDemand_steel = `Emissions|CO2|Energy|Demand|Industry`/`Crude steel`,
INT.emCO2EI_aluminum = `Emissions|CO2|Energy and Industrial Processes`/`Total aluminium (primary and secondary)`,
INT.emCO2IndDemand_aluminum = `Emissions|CO2|Energy|Demand|Industry`/`Total aluminium (primary and secondary)`
))
}
return(df_out)
}
# 3.3 New meta-data ==========================================================================
calculate.new.meta <- function(df, slope_vars, slope_year_pairs) {
df[, c("cdr|cumulative")] <- NA
for(si in unique(df$`Model-Scenario`)) {
df[df$`Model-Scenario` == si, "cdr|cumulative"] <- (
sum(df[df$`Model-Scenario` == si, "cdr"], na.rm = TRUE))
df[df$`Model-Scenario` == si, "cdr|max"] <- (
max(df[df$`Model-Scenario` == si, "cdr"], na.rm = TRUE))
if(is.na(df[df$`Model-Scenario` == si & df$Year == 2030, "Emissions|Kyoto Gases"])) {
df[df$`Model-Scenario` == si, "Year of max Kyoto emissions"] <- NA
} else {
df[df$`Model-Scenario` == si, "Year of max Kyoto emissions"] <- (
df[df$`Model-Scenario` == si, "Year"][
df[df$`Model-Scenario` == si, "Emissions|Kyoto Gases"] == max(
df[df$`Model-Scenario` == si, "Emissions|Kyoto Gases"], na.rm=T
) & !is.na(df[df$`Model-Scenario` == si, "Emissions|Kyoto Gases"])])
}
if(is.na(df[df$`Model-Scenario` == si & df$Year == 2030, "Emissions|CO2|Energy and Industrial Processes"])) {
df[df$`Model-Scenario` == si, "Year of max EI CO2 emissions"] <- NA
} else {
df[df$`Model-Scenario` == si, "Year of max EI CO2 emissions"] <- (
df[df$`Model-Scenario` == si, "Year"][
df[df$`Model-Scenario` == si, "Emissions|CO2|Energy and Industrial Processes"] == max(
df[df$`Model-Scenario` == si, "Emissions|CO2|Energy and Industrial Processes"], na.rm=T
) & !is.na(df[df$`Model-Scenario` == si, "Emissions|CO2|Energy and Industrial Processes"])])
}
}
return(df)
}
# 3.4 Utility functions ======================================================================
generate.varnames <- function(var0, subvars, include.var0=TRUE) {
# Returns a vector of IPCC SR15 variables from a nested category
# Args
# var0 (character): 'Parent' var, e.g. 'Emissions|CO2'
# subvars (chr vector): 'Child' vars, e.g., c('Energy|Supply', 'Energy|Demand')
# include.var0 (bool): whether or not to include var0 w/o any subvars in return
subvars <- sapply(subvars, function(vi)paste(var0, '|', vi, sep=''),
USE.NAMES=FALSE)
if(include.var0) {
var_all <- c(var0, subvars)
} else {
var_all <- subvars
}
return(var_all)
}
# 4. Script ==================================================================================
#___4.1 Logging settings =====================================================================
null.result <- flog.threshold(DEBUG, name="ROOT")
#___4.2 Variable lists =======================================================================
#___4.2.1 Emissions variables to include in output dataframe =================================
em0 <- 'Emissions|CO2'
em_subs <- c('Energy and Industrial Processes', 'Energy', 'Industrial Processes',
'Energy|Supply', 'Energy|Demand', 'Energy|Demand|Industry', 'Energy|Demand|Transportation',
'Energy|Supply|Electricity', 'AFOLU')
#___4.2.2 Carbon seq variables to include in output dataframe ================================
cs0 <- 'Carbon Sequestration'
cs_subs <- c('CCS|Biomass', 'CCS|Biomass|Energy', 'CCS|Biomass|Energy|Supply',
'CCS|Biomass|Energy|Supply|Electricity', 'CCS|Fossil', 'Land Use',
'Feedstocks', 'Direct Air Capture', 'Enhanced Weathering', 'Other')
#___4.2.3 Any variables still missing ========================================================
other_vars <- c('Primary Energy', 'Secondary Energy|Electricity',
'Emissions|Kyoto Gases', 'Emissions|CH4|AFOLU', 'Emissions|N2O|AFOLU', 'Price|Carbon',
'Carbon Sequestration|CCS|Biomass|Energy|Supply|Electricity', 'GDP|PPP')
#___4.3 Prepare arguments for get.scenario.data ==============================================
all0 <- list(
em0, cs0
)
all_subs <- list(
em_subs, cs_subs
)
all_varnames <- c((lapply(c(1:length(all0)),
function(X) generate.varnames(all0[[X]], all_subs[[X]]))
%>% unlist()),
other_vars)
flog.debug('Looking for %s variables', length(all_varnames))
#____4.4 Pull and transform data according to Andres specs ===================================
# Get all SR15 data and filter to only the variables selected
ss0 <- get.all.data(TRUE) %>% filter(Variable %in% all_varnames)
# Interpolate and transform SR15 data
flog.debug('Interpolating SR15 data')
interp_sr15_data <- (ss0 %>% interp.all(id.cols=5) %>%
as.data.frame() %>%
melt(id = c(1:5), variable.name='Year') %>%
dcast(`Model` + `Scenario` + `Year` ~ `Variable`, value.var='value') %>%
mutate(`Model-Scenario`=paste(`Model`, `Scenario`, sep='-'),
Year = as.numeric(as.character(Year))
) %>%
select(c(ncol(.), 1:(ncol(.)-1)))
)
# Get ETP data, interpolate, and transform to same structure as SR15
if(use_etp) {
flog.debug('Pulling ETP data')
interp_etp_data <- (get.ETP.data()
%>% interp.all(id.cols=1)
%>% melt(id.vars='Variable', variable.name = 'Year')
%>% dcast(Year ~ Variable)
%>% mutate(
`Model-Scenario`='ETP-2DS',
`Model`='ETP',
`Scenario`='2DS',
Year = as.numeric(as.character(Year))
)
%>% select(c((ncol(.)-2):ncol(.), 1:(ncol(.)-2)))
%>% rename(Cement = 'Cement ')
%>% arrange(Year))
# Now merge them. We are dropping "Model-Scenario" from the ETP dataframe because
# it is being combined with SR15 scenario data to estimate intensity of certain
# industrial sectors
interp_data <- (interp_sr15_data
%>% merge(interp_etp_data[, -c(1:3)], by='Year'))
} else {
interp_data <- (interp_sr15_data)
}
flog.debug('Calculating new vars')
sr15_out <- (interp_data
%>% calculate.AFOLU.cs()
%>% calculate.CDR()
%>% calculate.intensity.vars(use_etp))
sr15_meta0 <- get.SR15.meta()
sr15_new_meta <- (sr15_out
%>% calculate.new.meta())
meta_cols_new <- colnames(sr15_new_meta)[!colnames(sr15_new_meta) %in% colnames(sr15_out)]
sr15_meta <- (sr15_new_meta
%>% select(c('Model-Scenario', meta_cols_new))
%>% unique()
%>% merge(sr15_meta0, by='Model-Scenario'))
#____4.5 Calculate slopes of each variable and transform to Chris specs ======================
keep_years <- seq(2020, 2050, 5)
sr15_var_long <- (sr15_out %>% filter(Year %in% keep_years)
%>% melt(id.vars=c(1:4))
%>% mutate(value=as.numeric(value))
%>% dcast(Model + Scenario + `Model-Scenario` + variable ~ Year)
)
# Indicate a base year and final year to calculate all slopes
slope_byr <- 2020
slope_yrf_all <-seq(2025, 2050, 5)
# Slopes calculated linearly and compound, generate column names
slope_colsLinear <- paste0('slope', sapply(slope_yrf_all, function(X) X-slope_byr))
slope_colsCA <- paste0('slopeCA', sapply(slope_yrf_all, function(X) X-slope_byr))
all_slope_cols <- list(slope_colsLinear, slope_colsCA)
sr15_var_long[, unlist(all_slope_cols)] <- NA
# Pull all data for base year of slope calculation
byr_data <- sr15_var_long[, as.character(slope_byr)]
# Loop through each final year of slope calculation
for(i in c(1:length(slope_colsLinear))) {
# Get column names to be filled
c(colL, colCA) %<-% c(all_slope_cols[[1]][i], all_slope_cols[[2]][i])
# Final year of slope calculation in loop
slope_yrf <- slope_yrf_all[i]
# Pull data for final year of slope calculation in loop
yrf_data <- sr15_var_long[, as.character(slope_yrf)]
# Calculate linear reduction and comoound reduction
sr15_var_long[, colL] <- 100 * (yrf_data - byr_data)/byr_data/(slope_yrf-slope_byr)
sr15_var_long[, colCA] <- 100 * ((yrf_data/byr_data)^(1/(slope_yrf-slope_byr))-1)
# Replace with NA if infinite or over 1000% growth
sr15_var_long[, colL][is.infinite(sr15_var_long[, colL]) | sr15_var_long[, colL] > 1000] <- NA
sr15_var_long[, colCA][is.infinite(sr15_var_long[, colL]) | sr15_var_long[, colL] > 1000] <- NA
}
#____4.6 Filter based on different combinations of parameters ================================
sr15_prefilter <- sr15_var_long %>% merge(sr15_meta, by='Model-Scenario')
# Latest allowed peak year of emissions
latest_peak <- c(2100, 2020, 2025, 2030)
# Which mitigation scenarios are included by latest peak year filter?
applied_to <- list(c('Below 1.5C' , '1.5C low overshoot', '1.5C high overshoot', 'Lower 2C', 'Higher 2C'),
c('Below 1.5C' , '1.5C low overshoot', '1.5C high overshoot', 'Lower 2C'),
c('Below 1.5C', '1.5C low overshoot', '1.5C high overshoot')
)
# Which emissions variable? All Kyoto GHGs or just Energy & Industrial CO2?
which_peak_var <- c("Year of max Kyoto emissions", "Year of max EI CO2 emissions")
# Min annual CO2 (or max annual CDR, negative) (GT CO2/yr)
min_co2 <- c(-1000, -20, -15, -10)
which_cdr_var <- c("cdr|max", "minimum.net.CO2.emissions.(Gt.CO2/yr)")
# Data collected for output df
nvariations <- (length(latest_peak)*length(applied_to)*length(which_peak_var)*
length(min_co2)*length(which_cdr_var))
filtered_dfs <- vector(mode='list', length = nvariations)
c(peak_yrv, peak_varv, appliedv, cdr_limv, cdr_varv, nscenariosv) %<-% (
lapply(c(1:6), function(X) vector(mode='character', length=nvariations)))
counter <- 0
for(pi in which_peak_var){
for(ci in which_cdr_var) {
for(ai in applied_to) {
for(peak_yr in latest_peak) {
for(cdr_val in min_co2) {
counter <- counter + 1
if(length(ai) > 4) {
pscenarios <- '1.5C and 2C'
} else if(length(ai) > 3){
pscenarios <- '1.5C and lower 2C'} else {
pscenarios <- '1.5C'
}
set_name <- paste0(pi,' for ', pscenarios, ' scenarios: ', peak_yr,
'. CDR less than ', -cdr_val, ' GT CO2/yr based on ', ci)
peak_yrv[counter] <- peak_yr
peak_varv[counter] <- pi
appliedv[counter] <- pscenarios
cdr_limv[counter] <- ci
cdr_varv[counter] <- cdr_val
if(ci == 'cdr|max') {
filtered_dfi <- sr15_prefilter %>% filter(
((!! rlang::sym(pi) <= peak_yr) & (`cdr|max` <= -cdr_val*1000) & (category %in% ai)) |
((`cdr|max` <= -cdr_val*1000) & !category %in% ai))
} else {
filtered_dfi <- sr15_prefilter %>% filter(
((!! rlang::sym(pi) <= peak_yr) & (`minimum.net.CO2.emissions.(Gt.CO2/yr)` >= cdr_val) & (category %in% ai)) |
((`minimum.net.CO2.emissions.(Gt.CO2/yr)` >= cdr_val) & (!category %in% ai)))
}
filtered_dfs[[counter]] <- filtered_dfi
names(filtered_dfs)[counter] <- set_name
# print(length(unique(filtered_dfi$`Model-Scenario`)))
nscenariosv[counter] <- length(unique(filtered_dfi$`Model-Scenario`))
if(write_csvs) {
write_excel_csv(filtered_dfi, path=paste0('TROPICS-scenario_data_csv/TROPICS_dataset-', counter, '.csv'))
}
}
}
}
}
}
mapping <- cbind(names(filtered_dfs), peak_yrv, peak_varv, appliedv, cdr_limv,
cdr_varv, nscenariosv, seq(1, length(filtered_dfs)))
if(write_csvs) {
write.xlsx(mapping, 'TROPICS-scenario_data_csv/TROPICS_dataset_mapping.xlsx')
}
| /TROPICS-scenario_prep.R | no_license | tildtr/TROPICS-regression | R | false | false | 20,885 | r | #!/usr/bin/Rscript
# Andres Chang
######################################################################################################################.
# TROPICS-scenario_prep
######################################################################################################################.
# Description: This script reads, transforms, and generates metadata for SR15 scenario data.
# Section 1 defines path to SR15 data and ETP data, which is used for a couple intensity
# denominators (e.g., steel production)
# Section 2 loads all libraries
# Section 3 contains functions that load and interpolate data, as well as combining variables and generating
# meta-data
# Section 4.1-4.3 prepare variable lists to import from SR15
# Section 4.4 pulls data from the SR15 scenario database and ETP 2017
# Section 4.5 calculates the slopes for all chosen variables between 2020 and future years in 5-year increments
# Section 4.6 produces different subsets of all scenarios based on different combinations of filters, all exported
# as csv w/ a summary sheet to compare their characteristics. csv's can then be loaded into regression model script
######################################################################################################################.
#1. Preamble =================================================================================
# SR15 database fpaths
f_sr15 <- 'input/iamc15_scenario_data_world_r2.0.xlsx'
f_sr15_meta <- 'input/sr15_metadata_indicators_r2.0.xlsx'
# f_sr15_all_regions <- 'input/iamc15_scenario_data_all_regions_r2.0.xlsx'
# By default, ETP data is not used; but if a data path is provided and use_etp is set to TRUE,
# ETP data is combined with SR15 data to calculate a few intensity indicators currently under
# testing
f_etp <- 'input/ETP2017_industry_summary.xlsx'
use_etp <- FALSE
write_csvs <- TRUE
# 2. Library =================================================================================
library(plyr)
library(dplyr)
library(readr)
library(zeallot)
library(futile.logger)
library(magrittr)
library(ggplot2)
library(openxlsx)
library(reshape2)
library(tidyr)
# 3. Functions ===============================================================================
# 3.1 Data loading functions =================================================================
get.SR15.meta <- function() {
# Get SR15 metadata for merge with output df
meta <- (read.xlsx(f_sr15_meta, sheet='meta') %>%
mutate(`Model-Scenario` = paste(model, scenario, sep='-')) %>%
select(-c(1:3))
)
return(meta)
}
get.all.data <- function(refresh, all_regions=FALSE) {
# Returns a df of SR15 data
# Args:
# refresh (bool): if FALSE, get.all.data will use the existing sr15_all_data variable
# in environment if available. Otherwise, pulls fresh from xlsx
# all_regions (bool): if TRUE, pulls from regions xlsx. Otherwise, pulls world data
if(!all_regions) {
if(!exists("sr15_all_data") | refresh) {
flog.debug('Pulling SR15 world data from Excel')
sr15_all_data <- read.xlsx(f_sr15, 2)
# Make model-scenario column to match with scenarios in SBTi scenario file
sr15_ms <- paste(sr15_all_data$Model, sr15_all_data$Scenario, sep='-')
sr15_all_data <- cbind(sr15_all_data, sr15_ms)
colnames(sr15_all_data)[ncol(sr15_all_data)] <- "Model-Scenario"
} else {flog.debug('Using existing sr_15_all_data var in environment')}
return(sr15_all_data)
} else {
if(!exists("sr15_all_regions_all_data") | refresh) {
flog.debug('Pulling SR15 all regions data from Excel')
sr15_all_regions_all_data <- read.xlsx(f_sr15_all_regions, 2)
# Make model-scenario column to match with scenarios in SBTi scenario file
sr15_ms <- paste(sr15_all_regions_all_data$Model, sr15_all_regions_all_data$Scenario, sep='-')
sr15_all_regions_all_data <- cbind(sr15_all_regions_all_data, sr15_ms)
colnames(sr15_all_regions_all_data)[ncol(sr15_all_regions_all_data)] <- "Model-Scenario"
} else{flog.debug('Using existing sr_15_all_regions_all_data var in environment')}
return(sr15_all_regions_all_data)
}
}
get.ETP.data <- function() {
etp_data0 <- (read.xlsx(f_etp, sheet='WORLD', skipEmptyRows = F, skipEmptyCols = F)[103:109, 15:24])
colnames(etp_data0) <- c('Variable', '2014', seq(2025, 2060, 5))
return(etp_data0)
}
# 3.2 Gap-filler functions ===================================================================
interp.all <- function(df, id.cols=5, cdata_yrs_out=FALSE) {
# Returns a dataframe with one column per year between 2000 and 2100
# Where data is interpolated linearly based on spacing of available
# SR15 data per model-scenario. (I.e., works with 5-year or 10-year
# data or mixed.)
# Args:
# * df (dataframe): non-interpolated data -- generally from filter.IPCC
# * id.cols (num vector): numb vector of leading columns to be kept as "ID" columns in
# returned dataframe
# cdata_yrs_out (bool): if TRUE, returns a list where the second item is
# a dataframe of "keystone years," i.e., for each row, which years are
# reported data and which are interpolated. If FALSE, function just returns
# df of interpolated data
int_df <- matrix(0, nrow(df), length(c(2000:2100)))
cd_out <- matrix(0, nrow(df), length(c(2000:2100)))
for(i in 1:nrow(df)) {
# Row index to write to
data_yrs <- colnames(df[i,])[!is.na(df[i,])]
data_yrs <- unlist(lapply(data_yrs, function(x) {
x0 <- type.convert(x)
if(is.numeric(x0)) {return(x0)} else {return(NULL)}
}))
cdata_yrs <- as.character(data_yrs)
for(k in 2000:2100) {
yr_col <- as.character(k)
if(yr_col %in% cdata_yrs) {
int_df[i, k-1999] <- df[i, yr_col]
cd_out[i, k-1999] <- 1
} else {
back_yr <- data_yrs[data_yrs < k][
length(data_yrs[data_yrs < k])]
forward_yr <- data_yrs[data_yrs > k][1]
n_yrs <- forward_yr - back_yr
int_yr <- k - back_yr
int_data <- (
df[i, as.character(back_yr)] +
(int_yr/n_yrs) * (df[i, as.character(forward_yr)] - df[i, as.character(back_yr)]))
if(length(int_data) != 0 & length(int_data) != 1) {
print(df[i])
print(int_data)
}
if(length(int_data) == 0) {
int_df[i, k-1999] <- NA
} else{
int_df[i, k-1999] <- int_data}
}
}
}
if(id.cols >1) {
int_df <- bind_cols(df[,c(1:id.cols)], as.data.frame(int_df))
} else {
int_df <- cbind(df[,1], as.data.frame(int_df))
colnames(int_df)[1] <- colnames(df)[1]
}
colnames(int_df)[c((id.cols+1):ncol(int_df))] <- sapply(c(2000:2100), as.character)
if(cdata_yrs_out) {
return(list(int_df, cd_out))
} else {
return(int_df)
}
}
calculate.AFOLU.cs <- function(df) {
# Add a column estimating land use-related carbon sequestration due to
# poor reporting of 'Carbon Sequestration|Land Use' in SR15 database
mutate(df,
`Carbon Sequestration|Land Use2` = case_when(
`Emissions|CO2|AFOLU` < 0 ~ -`Emissions|CO2|AFOLU`,
TRUE ~ 0
)
)
}
calculate.CDR <- function(df) {
# Return a df with a CDR variable, which is a sum of all CDR categories present in
# the scenario. Note that this does not include CO2 captured at the point of emissions,
# e.g., fossil CCS, it is strictly focused on net negative CO2 via the definition
# in SR15 Figure 2.10
CDR_subs <- c('CCS|Biomass', 'Land Use2', 'Feedstocks',
'Direct Air Capture', 'Enhanced Weathering', 'Other')
all_CDR <- generate.varnames('Carbon Sequestration', CDR_subs, FALSE)
all_CDR <- all_CDR[all_CDR %in% colnames(df)]
df[,all_CDR][is.na(df[,all_CDR])] <- 0
df$cdr <- apply(df, 1, function(X) {
sum(as.numeric(X[all_CDR]))
})
return(df)
}
calculate.intensity.vars <- function(df, use_etp) {
df_out <- (df %>% mutate(
INT.emKyoto_gdp=`Emissions|Kyoto Gases`/`GDP|PPP`,
INT.emCO2EI_PE=`Emissions|CO2|Energy and Industrial Processes`/`Primary Energy`,
INT.emCO2Elec_elecGen = `Emissions|CO2|Energy|Supply|Electricity`/`Secondary Energy|Electricity`,
INT.emCO2EI_elecGen = `Emissions|CO2|Energy and Industrial Processes`/`Secondary Energy|Electricity`,
INT.emCO2Transport_gdp = `Emissions|CO2|Energy|Demand|Transportation`/`GDP|PPP`
))
if(use_etp) {
df_out <- (df_out %>% mutate(
INT.emCO2EI_cement = `Emissions|CO2|Energy and Industrial Processes`/`Cement`,
INT.emCO2IndDemand_cement = `Emissions|CO2|Energy|Demand|Industry`/`Cement`,
INT.emCO2EI_steel = `Emissions|CO2|Energy and Industrial Processes`/`Crude steel`,
INT.emCO2IndDemand_steel = `Emissions|CO2|Energy|Demand|Industry`/`Crude steel`,
INT.emCO2EI_aluminum = `Emissions|CO2|Energy and Industrial Processes`/`Total aluminium (primary and secondary)`,
INT.emCO2IndDemand_aluminum = `Emissions|CO2|Energy|Demand|Industry`/`Total aluminium (primary and secondary)`
))
}
return(df_out)
}
# 3.3 New meta-data ==========================================================================
calculate.new.meta <- function(df, slope_vars, slope_year_pairs) {
df[, c("cdr|cumulative")] <- NA
for(si in unique(df$`Model-Scenario`)) {
df[df$`Model-Scenario` == si, "cdr|cumulative"] <- (
sum(df[df$`Model-Scenario` == si, "cdr"], na.rm = TRUE))
df[df$`Model-Scenario` == si, "cdr|max"] <- (
max(df[df$`Model-Scenario` == si, "cdr"], na.rm = TRUE))
if(is.na(df[df$`Model-Scenario` == si & df$Year == 2030, "Emissions|Kyoto Gases"])) {
df[df$`Model-Scenario` == si, "Year of max Kyoto emissions"] <- NA
} else {
df[df$`Model-Scenario` == si, "Year of max Kyoto emissions"] <- (
df[df$`Model-Scenario` == si, "Year"][
df[df$`Model-Scenario` == si, "Emissions|Kyoto Gases"] == max(
df[df$`Model-Scenario` == si, "Emissions|Kyoto Gases"], na.rm=T
) & !is.na(df[df$`Model-Scenario` == si, "Emissions|Kyoto Gases"])])
}
if(is.na(df[df$`Model-Scenario` == si & df$Year == 2030, "Emissions|CO2|Energy and Industrial Processes"])) {
df[df$`Model-Scenario` == si, "Year of max EI CO2 emissions"] <- NA
} else {
df[df$`Model-Scenario` == si, "Year of max EI CO2 emissions"] <- (
df[df$`Model-Scenario` == si, "Year"][
df[df$`Model-Scenario` == si, "Emissions|CO2|Energy and Industrial Processes"] == max(
df[df$`Model-Scenario` == si, "Emissions|CO2|Energy and Industrial Processes"], na.rm=T
) & !is.na(df[df$`Model-Scenario` == si, "Emissions|CO2|Energy and Industrial Processes"])])
}
}
return(df)
}
# 3.4 Utility functions ======================================================================
generate.varnames <- function(var0, subvars, include.var0=TRUE) {
# Returns a vector of IPCC SR15 variables from a nested category
# Args
# var0 (character): 'Parent' var, e.g. 'Emissions|CO2'
# subvars (chr vector): 'Child' vars, e.g., c('Energy|Supply', 'Energy|Demand')
# include.var0 (bool): whether or not to include var0 w/o any subvars in return
subvars <- sapply(subvars, function(vi)paste(var0, '|', vi, sep=''),
USE.NAMES=FALSE)
if(include.var0) {
var_all <- c(var0, subvars)
} else {
var_all <- subvars
}
return(var_all)
}
# 4. Script ==================================================================================
#___4.1 Logging settings =====================================================================
null.result <- flog.threshold(DEBUG, name="ROOT")
#___4.2 Variable lists =======================================================================
#___4.2.1 Emissions variables to include in output dataframe =================================
em0 <- 'Emissions|CO2'
em_subs <- c('Energy and Industrial Processes', 'Energy', 'Industrial Processes',
'Energy|Supply', 'Energy|Demand', 'Energy|Demand|Industry', 'Energy|Demand|Transportation',
'Energy|Supply|Electricity', 'AFOLU')
#___4.2.2 Carbon seq variables to include in output dataframe ================================
cs0 <- 'Carbon Sequestration'
cs_subs <- c('CCS|Biomass', 'CCS|Biomass|Energy', 'CCS|Biomass|Energy|Supply',
'CCS|Biomass|Energy|Supply|Electricity', 'CCS|Fossil', 'Land Use',
'Feedstocks', 'Direct Air Capture', 'Enhanced Weathering', 'Other')
#___4.2.3 Any variables still missing ========================================================
other_vars <- c('Primary Energy', 'Secondary Energy|Electricity',
'Emissions|Kyoto Gases', 'Emissions|CH4|AFOLU', 'Emissions|N2O|AFOLU', 'Price|Carbon',
'Carbon Sequestration|CCS|Biomass|Energy|Supply|Electricity', 'GDP|PPP')
#___4.3 Prepare arguments for get.scenario.data ==============================================
all0 <- list(
em0, cs0
)
all_subs <- list(
em_subs, cs_subs
)
all_varnames <- c((lapply(c(1:length(all0)),
function(X) generate.varnames(all0[[X]], all_subs[[X]]))
%>% unlist()),
other_vars)
flog.debug('Looking for %s variables', length(all_varnames))
#____4.4 Pull and transform data according to Andres specs ===================================
# Get all SR15 data and filter to only the variables selected
ss0 <- get.all.data(TRUE) %>% filter(Variable %in% all_varnames)
# Interpolate and transform SR15 data
flog.debug('Interpolating SR15 data')
interp_sr15_data <- (ss0 %>% interp.all(id.cols=5) %>%
as.data.frame() %>%
melt(id = c(1:5), variable.name='Year') %>%
dcast(`Model` + `Scenario` + `Year` ~ `Variable`, value.var='value') %>%
mutate(`Model-Scenario`=paste(`Model`, `Scenario`, sep='-'),
Year = as.numeric(as.character(Year))
) %>%
select(c(ncol(.), 1:(ncol(.)-1)))
)
# Get ETP data, interpolate, and transform to same structure as SR15
if(use_etp) {
flog.debug('Pulling ETP data')
interp_etp_data <- (get.ETP.data()
%>% interp.all(id.cols=1)
%>% melt(id.vars='Variable', variable.name = 'Year')
%>% dcast(Year ~ Variable)
%>% mutate(
`Model-Scenario`='ETP-2DS',
`Model`='ETP',
`Scenario`='2DS',
Year = as.numeric(as.character(Year))
)
%>% select(c((ncol(.)-2):ncol(.), 1:(ncol(.)-2)))
%>% rename(Cement = 'Cement ')
%>% arrange(Year))
# Now merge them. We are dropping "Model-Scenario" from the ETP dataframe because
# it is being combined with SR15 scenario data to estimate intensity of certain
# industrial sectors
interp_data <- (interp_sr15_data
%>% merge(interp_etp_data[, -c(1:3)], by='Year'))
} else {
interp_data <- (interp_sr15_data)
}
flog.debug('Calculating new vars')
sr15_out <- (interp_data
%>% calculate.AFOLU.cs()
%>% calculate.CDR()
%>% calculate.intensity.vars(use_etp))
sr15_meta0 <- get.SR15.meta()
sr15_new_meta <- (sr15_out
%>% calculate.new.meta())
meta_cols_new <- colnames(sr15_new_meta)[!colnames(sr15_new_meta) %in% colnames(sr15_out)]
sr15_meta <- (sr15_new_meta
%>% select(c('Model-Scenario', meta_cols_new))
%>% unique()
%>% merge(sr15_meta0, by='Model-Scenario'))
#____4.5 Calculate slopes of each variable and transform to Chris specs ======================
keep_years <- seq(2020, 2050, 5)
sr15_var_long <- (sr15_out %>% filter(Year %in% keep_years)
%>% melt(id.vars=c(1:4))
%>% mutate(value=as.numeric(value))
%>% dcast(Model + Scenario + `Model-Scenario` + variable ~ Year)
)
# Indicate a base year and final year to calculate all slopes
slope_byr <- 2020
slope_yrf_all <-seq(2025, 2050, 5)
# Slopes calculated linearly and compound, generate column names
slope_colsLinear <- paste0('slope', sapply(slope_yrf_all, function(X) X-slope_byr))
slope_colsCA <- paste0('slopeCA', sapply(slope_yrf_all, function(X) X-slope_byr))
all_slope_cols <- list(slope_colsLinear, slope_colsCA)
sr15_var_long[, unlist(all_slope_cols)] <- NA
# Pull all data for base year of slope calculation
byr_data <- sr15_var_long[, as.character(slope_byr)]
# Loop through each final year of slope calculation
for(i in c(1:length(slope_colsLinear))) {
# Get column names to be filled
c(colL, colCA) %<-% c(all_slope_cols[[1]][i], all_slope_cols[[2]][i])
# Final year of slope calculation in loop
slope_yrf <- slope_yrf_all[i]
# Pull data for final year of slope calculation in loop
yrf_data <- sr15_var_long[, as.character(slope_yrf)]
# Calculate linear reduction and comoound reduction
sr15_var_long[, colL] <- 100 * (yrf_data - byr_data)/byr_data/(slope_yrf-slope_byr)
sr15_var_long[, colCA] <- 100 * ((yrf_data/byr_data)^(1/(slope_yrf-slope_byr))-1)
# Replace with NA if infinite or over 1000% growth
sr15_var_long[, colL][is.infinite(sr15_var_long[, colL]) | sr15_var_long[, colL] > 1000] <- NA
sr15_var_long[, colCA][is.infinite(sr15_var_long[, colL]) | sr15_var_long[, colL] > 1000] <- NA
}
#____4.6 Filter based on different combinations of parameters ================================
sr15_prefilter <- sr15_var_long %>% merge(sr15_meta, by='Model-Scenario')
# Latest allowed peak year of emissions
latest_peak <- c(2100, 2020, 2025, 2030)
# Which mitigation scenarios are included by latest peak year filter?
applied_to <- list(c('Below 1.5C' , '1.5C low overshoot', '1.5C high overshoot', 'Lower 2C', 'Higher 2C'),
c('Below 1.5C' , '1.5C low overshoot', '1.5C high overshoot', 'Lower 2C'),
c('Below 1.5C', '1.5C low overshoot', '1.5C high overshoot')
)
# Which emissions variable? All Kyoto GHGs or just Energy & Industrial CO2?
which_peak_var <- c("Year of max Kyoto emissions", "Year of max EI CO2 emissions")
# Min annual CO2 (or max annual CDR, negative) (GT CO2/yr)
min_co2 <- c(-1000, -20, -15, -10)
which_cdr_var <- c("cdr|max", "minimum.net.CO2.emissions.(Gt.CO2/yr)")
# Data collected for output df
nvariations <- (length(latest_peak)*length(applied_to)*length(which_peak_var)*
length(min_co2)*length(which_cdr_var))
filtered_dfs <- vector(mode='list', length = nvariations)
c(peak_yrv, peak_varv, appliedv, cdr_limv, cdr_varv, nscenariosv) %<-% (
lapply(c(1:6), function(X) vector(mode='character', length=nvariations)))
counter <- 0
for(pi in which_peak_var){
for(ci in which_cdr_var) {
for(ai in applied_to) {
for(peak_yr in latest_peak) {
for(cdr_val in min_co2) {
counter <- counter + 1
if(length(ai) > 4) {
pscenarios <- '1.5C and 2C'
} else if(length(ai) > 3){
pscenarios <- '1.5C and lower 2C'} else {
pscenarios <- '1.5C'
}
set_name <- paste0(pi,' for ', pscenarios, ' scenarios: ', peak_yr,
'. CDR less than ', -cdr_val, ' GT CO2/yr based on ', ci)
peak_yrv[counter] <- peak_yr
peak_varv[counter] <- pi
appliedv[counter] <- pscenarios
cdr_limv[counter] <- ci
cdr_varv[counter] <- cdr_val
if(ci == 'cdr|max') {
filtered_dfi <- sr15_prefilter %>% filter(
((!! rlang::sym(pi) <= peak_yr) & (`cdr|max` <= -cdr_val*1000) & (category %in% ai)) |
((`cdr|max` <= -cdr_val*1000) & !category %in% ai))
} else {
filtered_dfi <- sr15_prefilter %>% filter(
((!! rlang::sym(pi) <= peak_yr) & (`minimum.net.CO2.emissions.(Gt.CO2/yr)` >= cdr_val) & (category %in% ai)) |
((`minimum.net.CO2.emissions.(Gt.CO2/yr)` >= cdr_val) & (!category %in% ai)))
}
filtered_dfs[[counter]] <- filtered_dfi
names(filtered_dfs)[counter] <- set_name
# print(length(unique(filtered_dfi$`Model-Scenario`)))
nscenariosv[counter] <- length(unique(filtered_dfi$`Model-Scenario`))
if(write_csvs) {
write_excel_csv(filtered_dfi, path=paste0('TROPICS-scenario_data_csv/TROPICS_dataset-', counter, '.csv'))
}
}
}
}
}
}
mapping <- cbind(names(filtered_dfs), peak_yrv, peak_varv, appliedv, cdr_limv,
cdr_varv, nscenariosv, seq(1, length(filtered_dfs)))
if(write_csvs) {
write.xlsx(mapping, 'TROPICS-scenario_data_csv/TROPICS_dataset_mapping.xlsx')
}
|
# --- GONAD HISTOLOGY -----#
# Read in histology data.
Histology <- read.csv("Data/2017-Oly-Histo-Results-REDO-redostage5.csv", header=T, stringsAsFactors = T, na.strings = "NA")
Histology$TEMPERATURE <- as.factor(Histology$TEMPERATURE) #Convert a few columns to factors
Histology$Dominant.Stage <- as.factor(Histology$Dominant.Stage)
# --------------- QUESITON 1. are there gonad differences between pre-OA treatment temperature groups (chilled, not chilled).
Histo.pre <- droplevels(subset(Histology, SAMPLING == "FEBRUARY")) #subset only pre-treatment data (sampled february, 2017)
CT.SEX.pre <- table(Histo.pre$TREATMENT, Histo.pre$SEX) #create table of gonad sex by chilled/not chilled (pre-OA)
CT.domstage.pre <- table(Histo.pre$TREATMENT, Histo.pre$Dominant.Stage) #create table of gonad dominant stage by temperature (pre-OA)
CT.secstage.pre <- table(Histo.pre$TREATMENT, Histo.pre$Secondary.Stage)#create table of gonad secondary stage by temperature (pre-OA)
# Compare the sex and stages after chilling / not chilling (pre-OA) via Fisher's test due to low sample size (<200)
fisher.test(CT.SEX.pre) #p=0.1903 - sex not different
fisher.test(CT.domstage.pre) #p=7.413e-05 - stages different between temperature. 10C more advanced.
fisher.test(CT.secstage.pre) #p=0.4291 - secondary stages not different
# Apply second test on dominant stage using chi-square test
print(Pre.domstage.chisq <- chisq.test(CT.domstage.pre, simulate.p.value = TRUE)) #X-squared = 23.962, df = NA, p-value = 0.0004998 - VERY DIFFERENT
# ---------------> QUESTION 2. Are there gonad differences between pH treatments? Analyze temp groups separately
# --- compare chilled-amb pH to chilled-low pH
CT.sex.6.pH <- table(subset(Histology, PH!="PRE" & TEMPERATURE==6)$PH, subset(Histology, PH!="PRE" & TEMPERATURE==6)$SEX)
CT.domstage.6.pH <- table(subset(Histology, PH!="PRE" & TEMPERATURE==6)$PH, subset(Histology, PH!="PRE" & TEMPERATURE==6)$Dominant.Stage)
CT.secstage.6.pH <- table(subset(Histology, PH!="PRE" & TEMPERATURE==6)$PH, subset(Histology, PH!="PRE" & TEMPERATURE==6)$Secondary.Stage)
fisher.test(CT.sex.6.pH) #0.2398 No diff.
fisher.test(CT.domstage.6.pH) #0.008287 Yes, difference in dominant gonad stage between pH treatment
fisher.test(CT.secstage.6.pH) #0.9157 No diff.
# Apply second test on dominant stage using chi-square test
print(pH.domstage.chisq <- chisq.test(CT.domstage.6.pH, simulate.p.value = TRUE)) #X-squared = 14.778, df = NA, p-value = 0.004498
# --- compare not chilled-amb pH to not chilled-low pH
CT.sex.10.pH <- table(subset(Histology, PH!="PRE" & TEMPERATURE==10)$PH, subset(Histology, PH!="PRE" & TEMPERATURE==10)$SEX)
CT.domstage.10.pH <- table(subset(Histology, PH!="PRE" & TEMPERATURE==10)$PH, subset(Histology, PH!="PRE" & TEMPERATURE==10)$Dominant.Stage)
CT.secstage.10.pH <- table(subset(Histology, PH!="PRE" & TEMPERATURE==10)$PH, subset(Histology, PH!="PRE" & TEMPERATURE==10)$Secondary.Stage)
fisher.test(CT.sex.10.pH) #0.8848 - no diff between sex ratios.
fisher.test(CT.domstage.10.pH) #0.08 - no diff in dominant stage
fisher.test(CT.secstage.10.pH) #0.04915 - almost but not quite diff in secondary stage
# ---------------> QUESTION 3. Did gonads develop/regress during pH exposure? AKA was there a change from before to after pH treatment? Assess chilled/not chilled groups separately.
# compare chilled pre-pH to chilled amb pH (chilled = 6C)
CT.sex.6.amb <- table(subset(Histology, TEMPERATURE==6 & (PH=="AMBIENT" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==6 & (PH=="AMBIENT" | is.na(PH)))$SEX)
CT.domstage.6.amb <- table(subset(Histology, TEMPERATURE==6 & (PH=="AMBIENT" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==6 & (PH=="AMBIENT" | is.na(PH)))$Dominant.Stage)
CT.secstage.6.amb <- table(subset(Histology, TEMPERATURE==6 & (PH=="AMBIENT" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==6 & (PH=="AMBIENT" | is.na(PH)))$Secondary.Stage)
fisher.test(CT.sex.6.amb) #0.08151, not diff.
fisher.test(CT.domstage.6.amb) #1.122e-06, different <----
fisher.test(CT.secstage.6.amb) #0.03731, different
# compare chilled pre-pH to chilled low pH
CT.sex.6.low <- table(subset(Histology, TEMPERATURE==6 & (PH=="LOW" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==6 & (PH=="LOW" | is.na(PH)))$SEX)
CT.domstage.6.low <- table(subset(Histology, TEMPERATURE==6 & (PH=="LOW" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==6 & (PH=="LOW" | is.na(PH)))$Dominant.Stage)
CT.secstage.6.low <- table(subset(Histology, TEMPERATURE==6 & (PH=="LOW" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==6 & (PH=="LOW" | is.na(PH)))$Secondary.Stage)
fisher.test(CT.sex.6.low) #0.5861, not diff.
fisher.test(CT.domstage.6.low) #0.01797, different <----
fisher.test(CT.secstage.6.low) #0.081, not diff.
# compare not chilled pre-pH to not chilled amb pH (not chilled = 10C)
CT.sex.10.amb <- table(subset(Histology, TEMPERATURE==10 & (PH=="AMBIENT" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==10 & (PH=="AMBIENT" | is.na(PH)))$SEX)
CT.domstage.10.amb <- table(subset(Histology, TEMPERATURE==10 & (PH=="AMBIENT" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==10 & (PH=="AMBIENT" | is.na(PH)))$Dominant.Stage)
CT.secstage.10.amb <- table(subset(Histology, TEMPERATURE==10 & (PH=="AMBIENT" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==10 & (PH=="AMBIENT" | is.na(PH)))$Secondary.Stage)
fisher.test(CT.sex.10.amb) #0.2929, not diff.
fisher.test(CT.domstage.10.amb) #0.1683, not diff.
fisher.test(CT.secstage.10.amb) #0.4686, not diff.
# compare not chilled pre-pH to not chilled low pH
CT.sex.10.low <- table(subset(Histology, TEMPERATURE==10 & (PH=="LOW" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==10 & (PH=="LOW" | is.na(PH)))$SEX)
CT.domstage.10.low <- table(subset(Histology, TEMPERATURE==10 & (PH=="LOW" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==10 & (PH=="LOW" | is.na(PH)))$Dominant.Stage)
CT.secstage.10.low <- table(subset(Histology, TEMPERATURE==10 & (PH=="LOW" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==10 & (PH=="LOW" | is.na(PH)))$Secondary.Stage)
fisher.test(CT.sex.10.low) #0.5202, not diff.
fisher.test(CT.domstage.10.low) #0.08615, not diff.
fisher.test(CT.secstage.10.low) #0.06494, not diff.
# 2 bar plots - chilled (pre, amb, low), and not chilled (pre, amb, low)
# tables for stacked bar plots
levels(Histology$PH) <- c(levels(Histology$PH),"PRE")
Histology$PH <- relevel(Histology$PH, ref="PRE")
Histology[is.na(Histology$PH),"PH"] <- "PRE"
CT.domstage.6 <- table(subset(Histology, TEMPERATURE==6)$PH, subset(Histology, TEMPERATURE==6)$Dominant.Stage)
CT.domstage.10 <- table(subset(Histology, TEMPERATURE==10)$PH, subset(Histology, TEMPERATURE==10)$Dominant.Stage)
colnames(CT.domstage.6) <- c("Undifferentiated (0)", "Early (1)", "Advanced (2)", "Ripe (3)", "Partially Spawned (4)", "Regressing (5)")
colnames(CT.domstage.10) <- c("Undifferentiated (0)", "Early (1)", "Advanced (2)", "Ripe (3)", "Partially Spawned (4)", "Regressing (5)")
png(filename = "Results/Gonad-barplot-chilled-stage", height = 750, width = 420)
par(mfrow=c(1, 1), mar=c(5, 5, 4, 2))
print(chilled.stage <- barplot(t(prop.table(CT.domstage.6, 1)), main="A) Stage, pre- & post-pH treatment\nchilled group (6C)", xlab="pH Treatment", ylab="% Sampled", las=1, col=c("#E2E6BD", "#EAAB28", "#E78A38","#D33F6A", "#DF6753", "lightsalmon"), cex.main=1.5, cex.lab=1.5, cex.axis = 1.2, cex.names = 1.2, legend.text = F, cex=1.5))
dev.off()
png(filename = "Results/Gonad-barplot-notchilled-stage", height = 750, width = 420)
par(mfrow=c(1, 1), mar=c(5, 5, 4, 2))
print(notchilled.stage <- barplot(t(prop.table(CT.domstage.10,1)), main="B) Stage, pre- & post-pH treatment\nnot chilled (10C)", xlab="pH Treatment", ylab="% Sampled", las=1, col=c("#E2E6BD", "#EAAB28", "#E78A38","#D33F6A", "#DF6753", "lightsalmon"), cex.main=1.5, cex.lab=1.5, cex.axis = 1.2, cex.names = 1.2, legend.text = F, cex=1.5))
dev.off()
# to plot this one with legend!
# png(filename = "Results/Gonad-barplot-notchilled-stage", height = 750, width = 650)
# par(mfrow=c(1, 1), mar=c(5, 5, 4, 15))
# print(notchilled.stage <- barplot(t(prop.table(CT.domstage.10,1)), main="B) Stage, pre- & post-pH treatment\nnot chilled (10C)", xlab="pH Treatment", ylab="% Sampled", las=1, col=c("#E2E6BD", "#EAAB28", "#E78A38","#D33F6A", "#DF6753", "lightsalmon"), cex.main=1.5, cex.lab=1.5, cex.axis = 1.2, cex.names = 1.2, legend.text = TRUE, args.legend = list(x = "topright", bty = "n", inset=c(-0.58, 0), title="Gonad Stage", cex=1.5)))
# dev.off()
# Conclusion: chilled group gonad developed less in low pH treatment. Not chilled group was already developed prior to pH treatment, no significant development or resorption occurred during pH treatment.
# ---------------> Summary statistics
100*round(prop.table(summary(Histology$SEX)), 3) # Percent of each sex (all oysters)
length(Histology$SEX) # Number of oysters sampled for histology
###-------------- LARVAL RELEASE (FECUNDITY) --------------###
sum(aggregate(broodstock ~ Spawning.Group, larvae, mean)$broodstock) #number of broodstock total
summarise(larvae, total.by.date = sum(total.released)) #total larvae released
aggregate(Tot.Larvae ~ pH + Temperature, larvae, sum, na.rm=TRUE) #By pH & Temperature
aggregate(Tot.Larvae ~ pH + Temperature + Population, larvae, sum, na.rm=TRUE) #By population
aggregate(Tot.Larvae ~ Spawning.Group, larvae, sum, na.rm=TRUE) #By population & treatment
nrow(subset(larvae, total.released >= 10000)) #Number of times >10k larvae were collected (all grps)
median(na.omit(larvae$Tot.Larvae))
range(na.omit(larvae$Tot.Larvae))
# Comparing larval release metrics between pH groups
summary(total.released.aov <- aov(log(total.released+1) ~ pH*Temperature, data=spawning_group_total)) # <-- daily release data NO DIFF
summary(overall_Total.aov <- aov(cum.total ~ pH*Temperature, data=spawning_group_total)) # <-- cumulative release NO DIFF
summary(total.percap.aov <- aov(cum.percap ~ pH*Temperature, data=spawning_group_total)) # <-- cumulative release per oyster*cm NO DIFF
summary(mean.larvae.aov <- aov(mean.larvae ~ pH*Temperature, data=spawning_group_sum)) # <-- NO DIFF
summary(cum.total.aov <- aov(log(cum.total+1) ~ pH*Temperature, data=spawning_group_total)) # <-- NO DIFF
summary(cum.percap.aov <- aov(log(cum.percap+1) ~ pH*Temperature, data=spawning_group_total)) # <-- NO DIFF
summary(first.big.aov <- aov(first.big ~ pH*Temperature, data=spawning_group_sum)) # <-- NO DIFF
summary(max.aov <- aov(max ~ pH*Temperature, data=spawning_group_sum)) # <-- NO DIFF
summary(maxday.aov <- aov(maxday ~ pH*Temperature, data=spawning_group_sum)) # <-- Temperature diff ... remove pH
summary(maxday.aov <- aov(maxday ~ Temperature, data=spawning_group_sum)) # <-- Temperature difference
summary(release.days <- aov(release.days ~ pH*Temperature, data=spawning_group_sum)) # <-- NO DIFF
nrow(spawning_group_sum)
# inspect date of maximum larval production by tempeature
aggregate(maxday ~ Temperature, spawning_group_sum, mean, na.rm=TRUE)
152.167-143.833
aggregate(maxday ~ Temperature, spawning_group_sum, sd, na.rm=TRUE)
plot(x=spawning_group_sum$Temperature, y=spawning_group_sum$maxday, main="Calendar day of maximum larval relase\nby temperature treatment")
# Compare timing and # released
# cross correlation function --- try this later ...
# https://onlinecourses.science.psu.edu/stat510/node/74/
ccf
# summarize data for each spawning bucket
fecundity <- group_by(larvae, pH, Temperature) %>% mutate(cum.total=cumsum(total.released),cum.percap = cumsum(larvae.per.broodcm),CalDay = as.numeric(format(Date,"%j"))) %>% arrange(Date) %>% dplyr::select(Date,CalDay,pH,Temperature,Treatment,total.released,larvae.per.broodcm,cum.total,cum.percap)
# barplots of larvae released - low and ambient separately
# SAVE SIZE 1000W X 500H
#c("gray60", "lightsteelblue3", "gray40", "steelblue")
colors3 <- c("10-Ambient"="gray40", "10-Low"="steelblue", "6-Ambient"="gray60", "6-Low"="lightsteelblue3")
levels(fecundity$Treatment)
levels(fecundity$Treatment)
spawning.titles <- c("Warm winter temp, ambient pH", "Warm winter temp, low pH", "Cool winter temp, ambient pH", "Cool winter temp, low pH")
# save dimensions: 1000x400
for (i in 1:4) {
print(ggplot(data=subset(fecundity, Treatment==levels(fecundity$Treatment)[i]), aes(x=Date, y=total.released)) +
geom_bar(stat="identity",width=1, position = position_dodge(width=2), fill=colors3[i], col="gray30") +
ylab("No. of larvae\n(summed across replicates)") + xlab(label=element_blank()) + ggtitle(spawning.titles[i]) + theme_minimal(base_size = 16) +
theme(plot.title = element_text(size = 16, hjust = 0, colour = "gray30"),
axis.title = element_text(size=16, colour = "gray30")) +
scale_x_date(date_breaks = "1 week",date_labels ="%b-%d",
limits=c(min=min(subset(fecundity, Treatment==levels(fecundity$Treatment)[1])$Date)-1,max=max(subset(fecundity, Treatment==levels(fecundity$Treatment)[1])$Date)+1)) +
scale_y_continuous(limits=c(min=0,max=max(subset(fecundity, Treatment==levels(fecundity$Treatment)[1])$total.released))) +
theme(panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(), panel.border = element_blank()))
}
| /Analyses/13_Stats-for-Gene-Expression-Paper.R | no_license | laurahspencer/O.lurida_Stress | R | false | false | 13,681 | r |
# --- GONAD HISTOLOGY -----#
# Read in histology data.
Histology <- read.csv("Data/2017-Oly-Histo-Results-REDO-redostage5.csv", header=T, stringsAsFactors = T, na.strings = "NA")
Histology$TEMPERATURE <- as.factor(Histology$TEMPERATURE) #Convert a few columns to factors
Histology$Dominant.Stage <- as.factor(Histology$Dominant.Stage)
# --------------- QUESITON 1. are there gonad differences between pre-OA treatment temperature groups (chilled, not chilled).
Histo.pre <- droplevels(subset(Histology, SAMPLING == "FEBRUARY")) #subset only pre-treatment data (sampled february, 2017)
CT.SEX.pre <- table(Histo.pre$TREATMENT, Histo.pre$SEX) #create table of gonad sex by chilled/not chilled (pre-OA)
CT.domstage.pre <- table(Histo.pre$TREATMENT, Histo.pre$Dominant.Stage) #create table of gonad dominant stage by temperature (pre-OA)
CT.secstage.pre <- table(Histo.pre$TREATMENT, Histo.pre$Secondary.Stage)#create table of gonad secondary stage by temperature (pre-OA)
# Compare the sex and stages after chilling / not chilling (pre-OA) via Fisher's test due to low sample size (<200)
fisher.test(CT.SEX.pre) #p=0.1903 - sex not different
fisher.test(CT.domstage.pre) #p=7.413e-05 - stages different between temperature. 10C more advanced.
fisher.test(CT.secstage.pre) #p=0.4291 - secondary stages not different
# Apply second test on dominant stage using chi-square test
print(Pre.domstage.chisq <- chisq.test(CT.domstage.pre, simulate.p.value = TRUE)) #X-squared = 23.962, df = NA, p-value = 0.0004998 - VERY DIFFERENT
# ---------------> QUESTION 2. Are there gonad differences between pH treatments? Analyze temp groups separately
# --- compare chilled-amb pH to chilled-low pH
CT.sex.6.pH <- table(subset(Histology, PH!="PRE" & TEMPERATURE==6)$PH, subset(Histology, PH!="PRE" & TEMPERATURE==6)$SEX)
CT.domstage.6.pH <- table(subset(Histology, PH!="PRE" & TEMPERATURE==6)$PH, subset(Histology, PH!="PRE" & TEMPERATURE==6)$Dominant.Stage)
CT.secstage.6.pH <- table(subset(Histology, PH!="PRE" & TEMPERATURE==6)$PH, subset(Histology, PH!="PRE" & TEMPERATURE==6)$Secondary.Stage)
fisher.test(CT.sex.6.pH) #0.2398 No diff.
fisher.test(CT.domstage.6.pH) #0.008287 Yes, difference in dominant gonad stage between pH treatment
fisher.test(CT.secstage.6.pH) #0.9157 No diff.
# Apply second test on dominant stage using chi-square test
print(pH.domstage.chisq <- chisq.test(CT.domstage.6.pH, simulate.p.value = TRUE)) #X-squared = 14.778, df = NA, p-value = 0.004498
# --- compare not chilled-amb pH to not chilled-low pH
CT.sex.10.pH <- table(subset(Histology, PH!="PRE" & TEMPERATURE==10)$PH, subset(Histology, PH!="PRE" & TEMPERATURE==10)$SEX)
CT.domstage.10.pH <- table(subset(Histology, PH!="PRE" & TEMPERATURE==10)$PH, subset(Histology, PH!="PRE" & TEMPERATURE==10)$Dominant.Stage)
CT.secstage.10.pH <- table(subset(Histology, PH!="PRE" & TEMPERATURE==10)$PH, subset(Histology, PH!="PRE" & TEMPERATURE==10)$Secondary.Stage)
fisher.test(CT.sex.10.pH) #0.8848 - no diff between sex ratios.
fisher.test(CT.domstage.10.pH) #0.08 - no diff in dominant stage
fisher.test(CT.secstage.10.pH) #0.04915 - almost but not quite diff in secondary stage
# ---------------> QUESTION 3. Did gonads develop/regress during pH exposure? AKA was there a change from before to after pH treatment? Assess chilled/not chilled groups separately.
# compare chilled pre-pH to chilled amb pH (chilled = 6C)
CT.sex.6.amb <- table(subset(Histology, TEMPERATURE==6 & (PH=="AMBIENT" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==6 & (PH=="AMBIENT" | is.na(PH)))$SEX)
CT.domstage.6.amb <- table(subset(Histology, TEMPERATURE==6 & (PH=="AMBIENT" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==6 & (PH=="AMBIENT" | is.na(PH)))$Dominant.Stage)
CT.secstage.6.amb <- table(subset(Histology, TEMPERATURE==6 & (PH=="AMBIENT" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==6 & (PH=="AMBIENT" | is.na(PH)))$Secondary.Stage)
fisher.test(CT.sex.6.amb) #0.08151, not diff.
fisher.test(CT.domstage.6.amb) #1.122e-06, different <----
fisher.test(CT.secstage.6.amb) #0.03731, different
# compare chilled pre-pH to chilled low pH
CT.sex.6.low <- table(subset(Histology, TEMPERATURE==6 & (PH=="LOW" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==6 & (PH=="LOW" | is.na(PH)))$SEX)
CT.domstage.6.low <- table(subset(Histology, TEMPERATURE==6 & (PH=="LOW" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==6 & (PH=="LOW" | is.na(PH)))$Dominant.Stage)
CT.secstage.6.low <- table(subset(Histology, TEMPERATURE==6 & (PH=="LOW" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==6 & (PH=="LOW" | is.na(PH)))$Secondary.Stage)
fisher.test(CT.sex.6.low) #0.5861, not diff.
fisher.test(CT.domstage.6.low) #0.01797, different <----
fisher.test(CT.secstage.6.low) #0.081, not diff.
# compare not chilled pre-pH to not chilled amb pH (not chilled = 10C)
CT.sex.10.amb <- table(subset(Histology, TEMPERATURE==10 & (PH=="AMBIENT" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==10 & (PH=="AMBIENT" | is.na(PH)))$SEX)
CT.domstage.10.amb <- table(subset(Histology, TEMPERATURE==10 & (PH=="AMBIENT" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==10 & (PH=="AMBIENT" | is.na(PH)))$Dominant.Stage)
CT.secstage.10.amb <- table(subset(Histology, TEMPERATURE==10 & (PH=="AMBIENT" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==10 & (PH=="AMBIENT" | is.na(PH)))$Secondary.Stage)
fisher.test(CT.sex.10.amb) #0.2929, not diff.
fisher.test(CT.domstage.10.amb) #0.1683, not diff.
fisher.test(CT.secstage.10.amb) #0.4686, not diff.
# compare not chilled pre-pH to not chilled low pH
CT.sex.10.low <- table(subset(Histology, TEMPERATURE==10 & (PH=="LOW" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==10 & (PH=="LOW" | is.na(PH)))$SEX)
CT.domstage.10.low <- table(subset(Histology, TEMPERATURE==10 & (PH=="LOW" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==10 & (PH=="LOW" | is.na(PH)))$Dominant.Stage)
CT.secstage.10.low <- table(subset(Histology, TEMPERATURE==10 & (PH=="LOW" | is.na(PH)))$SAMPLING, subset(Histology, TEMPERATURE==10 & (PH=="LOW" | is.na(PH)))$Secondary.Stage)
fisher.test(CT.sex.10.low) #0.5202, not diff.
fisher.test(CT.domstage.10.low) #0.08615, not diff.
fisher.test(CT.secstage.10.low) #0.06494, not diff.
# 2 bar plots - chilled (pre, amb, low), and not chilled (pre, amb, low)
# tables for stacked bar plots
levels(Histology$PH) <- c(levels(Histology$PH),"PRE")
Histology$PH <- relevel(Histology$PH, ref="PRE")
Histology[is.na(Histology$PH),"PH"] <- "PRE"
CT.domstage.6 <- table(subset(Histology, TEMPERATURE==6)$PH, subset(Histology, TEMPERATURE==6)$Dominant.Stage)
CT.domstage.10 <- table(subset(Histology, TEMPERATURE==10)$PH, subset(Histology, TEMPERATURE==10)$Dominant.Stage)
colnames(CT.domstage.6) <- c("Undifferentiated (0)", "Early (1)", "Advanced (2)", "Ripe (3)", "Partially Spawned (4)", "Regressing (5)")
colnames(CT.domstage.10) <- c("Undifferentiated (0)", "Early (1)", "Advanced (2)", "Ripe (3)", "Partially Spawned (4)", "Regressing (5)")
png(filename = "Results/Gonad-barplot-chilled-stage", height = 750, width = 420)
par(mfrow=c(1, 1), mar=c(5, 5, 4, 2))
print(chilled.stage <- barplot(t(prop.table(CT.domstage.6, 1)), main="A) Stage, pre- & post-pH treatment\nchilled group (6C)", xlab="pH Treatment", ylab="% Sampled", las=1, col=c("#E2E6BD", "#EAAB28", "#E78A38","#D33F6A", "#DF6753", "lightsalmon"), cex.main=1.5, cex.lab=1.5, cex.axis = 1.2, cex.names = 1.2, legend.text = F, cex=1.5))
dev.off()
png(filename = "Results/Gonad-barplot-notchilled-stage", height = 750, width = 420)
par(mfrow=c(1, 1), mar=c(5, 5, 4, 2))
print(notchilled.stage <- barplot(t(prop.table(CT.domstage.10,1)), main="B) Stage, pre- & post-pH treatment\nnot chilled (10C)", xlab="pH Treatment", ylab="% Sampled", las=1, col=c("#E2E6BD", "#EAAB28", "#E78A38","#D33F6A", "#DF6753", "lightsalmon"), cex.main=1.5, cex.lab=1.5, cex.axis = 1.2, cex.names = 1.2, legend.text = F, cex=1.5))
dev.off()
# to plot this one with legend!
# png(filename = "Results/Gonad-barplot-notchilled-stage", height = 750, width = 650)
# par(mfrow=c(1, 1), mar=c(5, 5, 4, 15))
# print(notchilled.stage <- barplot(t(prop.table(CT.domstage.10,1)), main="B) Stage, pre- & post-pH treatment\nnot chilled (10C)", xlab="pH Treatment", ylab="% Sampled", las=1, col=c("#E2E6BD", "#EAAB28", "#E78A38","#D33F6A", "#DF6753", "lightsalmon"), cex.main=1.5, cex.lab=1.5, cex.axis = 1.2, cex.names = 1.2, legend.text = TRUE, args.legend = list(x = "topright", bty = "n", inset=c(-0.58, 0), title="Gonad Stage", cex=1.5)))
# dev.off()
# Conclusion: chilled group gonad developed less in low pH treatment. Not chilled group was already developed prior to pH treatment, no significant development or resorption occurred during pH treatment.
# ---------------> Summary statistics
100*round(prop.table(summary(Histology$SEX)), 3) # Percent of each sex (all oysters)
length(Histology$SEX) # Number of oysters sampled for histology
###-------------- LARVAL RELEASE (FECUNDITY) --------------###
sum(aggregate(broodstock ~ Spawning.Group, larvae, mean)$broodstock) #number of broodstock total
summarise(larvae, total.by.date = sum(total.released)) #total larvae released
aggregate(Tot.Larvae ~ pH + Temperature, larvae, sum, na.rm=TRUE) #By pH & Temperature
aggregate(Tot.Larvae ~ pH + Temperature + Population, larvae, sum, na.rm=TRUE) #By population
aggregate(Tot.Larvae ~ Spawning.Group, larvae, sum, na.rm=TRUE) #By population & treatment
nrow(subset(larvae, total.released >= 10000)) #Number of times >10k larvae were collected (all grps)
median(na.omit(larvae$Tot.Larvae))
range(na.omit(larvae$Tot.Larvae))
# Comparing larval release metrics between pH groups
summary(total.released.aov <- aov(log(total.released+1) ~ pH*Temperature, data=spawning_group_total)) # <-- daily release data NO DIFF
summary(overall_Total.aov <- aov(cum.total ~ pH*Temperature, data=spawning_group_total)) # <-- cumulative release NO DIFF
summary(total.percap.aov <- aov(cum.percap ~ pH*Temperature, data=spawning_group_total)) # <-- cumulative release per oyster*cm NO DIFF
summary(mean.larvae.aov <- aov(mean.larvae ~ pH*Temperature, data=spawning_group_sum)) # <-- NO DIFF
summary(cum.total.aov <- aov(log(cum.total+1) ~ pH*Temperature, data=spawning_group_total)) # <-- NO DIFF
summary(cum.percap.aov <- aov(log(cum.percap+1) ~ pH*Temperature, data=spawning_group_total)) # <-- NO DIFF
summary(first.big.aov <- aov(first.big ~ pH*Temperature, data=spawning_group_sum)) # <-- NO DIFF
summary(max.aov <- aov(max ~ pH*Temperature, data=spawning_group_sum)) # <-- NO DIFF
summary(maxday.aov <- aov(maxday ~ pH*Temperature, data=spawning_group_sum)) # <-- Temperature diff ... remove pH
summary(maxday.aov <- aov(maxday ~ Temperature, data=spawning_group_sum)) # <-- Temperature difference
summary(release.days <- aov(release.days ~ pH*Temperature, data=spawning_group_sum)) # <-- NO DIFF
nrow(spawning_group_sum)
# inspect date of maximum larval production by tempeature
aggregate(maxday ~ Temperature, spawning_group_sum, mean, na.rm=TRUE)
152.167-143.833
aggregate(maxday ~ Temperature, spawning_group_sum, sd, na.rm=TRUE)
plot(x=spawning_group_sum$Temperature, y=spawning_group_sum$maxday, main="Calendar day of maximum larval relase\nby temperature treatment")
# Compare timing and # released
# cross correlation function --- try this later ...
# https://onlinecourses.science.psu.edu/stat510/node/74/
ccf
# summarize data for each spawning bucket
fecundity <- group_by(larvae, pH, Temperature) %>% mutate(cum.total=cumsum(total.released),cum.percap = cumsum(larvae.per.broodcm),CalDay = as.numeric(format(Date,"%j"))) %>% arrange(Date) %>% dplyr::select(Date,CalDay,pH,Temperature,Treatment,total.released,larvae.per.broodcm,cum.total,cum.percap)
# barplots of larvae released - low and ambient separately
# SAVE SIZE 1000W X 500H
#c("gray60", "lightsteelblue3", "gray40", "steelblue")
colors3 <- c("10-Ambient"="gray40", "10-Low"="steelblue", "6-Ambient"="gray60", "6-Low"="lightsteelblue3")
levels(fecundity$Treatment)
levels(fecundity$Treatment)
spawning.titles <- c("Warm winter temp, ambient pH", "Warm winter temp, low pH", "Cool winter temp, ambient pH", "Cool winter temp, low pH")
# save dimensions: 1000x400
for (i in 1:4) {
print(ggplot(data=subset(fecundity, Treatment==levels(fecundity$Treatment)[i]), aes(x=Date, y=total.released)) +
geom_bar(stat="identity",width=1, position = position_dodge(width=2), fill=colors3[i], col="gray30") +
ylab("No. of larvae\n(summed across replicates)") + xlab(label=element_blank()) + ggtitle(spawning.titles[i]) + theme_minimal(base_size = 16) +
theme(plot.title = element_text(size = 16, hjust = 0, colour = "gray30"),
axis.title = element_text(size=16, colour = "gray30")) +
scale_x_date(date_breaks = "1 week",date_labels ="%b-%d",
limits=c(min=min(subset(fecundity, Treatment==levels(fecundity$Treatment)[1])$Date)-1,max=max(subset(fecundity, Treatment==levels(fecundity$Treatment)[1])$Date)+1)) +
scale_y_continuous(limits=c(min=0,max=max(subset(fecundity, Treatment==levels(fecundity$Treatment)[1])$total.released))) +
theme(panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(), panel.border = element_blank()))
}
|
#' @rdname pcps
#' @encoding UTF-8
#' @export
plot.pcps<-function(x,display=c("text","points"),groups,showlabel=TRUE,choices=c(1,2),...){
sco<-scores.pcps(x,choices = choices)
plot(sco$scores.sites,type="n",ylim=c(min(sco$scores.sites[,2],sco$scores.species[,2],na.rm=TRUE)-0.05, max(sco$scores.sites[,2],sco$scores.species[,2],na.rm=TRUE)+0.05),xlim=c(min(sco$scores.sites[,1],sco$scores.species[,1],na.rm=TRUE)-0.05,max(sco$scores.sites[,1],sco$sco[,1],na.rm=TRUE)+0.05),...)
if(display=="text"){
text(sco$scores.sites,labels=rownames(sco$scores.sites),...)
}
if(display=="points"){
points(sco$scores.sites,...)
}
vegan::ordispider(sco$scores.species,groups=groups,label=showlabel,...)
if(showlabel){
g1<-ifelse(table(groups)==1,1,0)
g1_groups<-names(g1)[g1==1]
if(sum(g1)>0){
for(i in 1:sum(g1)){
position<-which(groups==g1_groups[i])
vegan::ordilabel(sco$scores.species[position,],labels=groups[position],...)
}
}
}
} | /PCPS/R/plot.pcps.R | no_license | ingted/R-Examples | R | false | false | 958 | r | #' @rdname pcps
#' @encoding UTF-8
#' @export
plot.pcps<-function(x,display=c("text","points"),groups,showlabel=TRUE,choices=c(1,2),...){
sco<-scores.pcps(x,choices = choices)
plot(sco$scores.sites,type="n",ylim=c(min(sco$scores.sites[,2],sco$scores.species[,2],na.rm=TRUE)-0.05, max(sco$scores.sites[,2],sco$scores.species[,2],na.rm=TRUE)+0.05),xlim=c(min(sco$scores.sites[,1],sco$scores.species[,1],na.rm=TRUE)-0.05,max(sco$scores.sites[,1],sco$sco[,1],na.rm=TRUE)+0.05),...)
if(display=="text"){
text(sco$scores.sites,labels=rownames(sco$scores.sites),...)
}
if(display=="points"){
points(sco$scores.sites,...)
}
vegan::ordispider(sco$scores.species,groups=groups,label=showlabel,...)
if(showlabel){
g1<-ifelse(table(groups)==1,1,0)
g1_groups<-names(g1)[g1==1]
if(sum(g1)>0){
for(i in 1:sum(g1)){
position<-which(groups==g1_groups[i])
vegan::ordilabel(sco$scores.species[position,],labels=groups[position],...)
}
}
}
} |
cast <- function(type, x)
{
UseMethod("cast")
}
cast.default <- function(type, x)
{
type <- schema(type)
if (is.record(type)) {
cast.record(type, x)
} else {
cast.vector(type, x)
}
}
cast.vector <- function(type, x)
{
type <- as.vector.type(type)
x <- as.vector.value(x)
n <- length(x)
if (is.null(type)) {
if (n != 0) {
stop(sprintf("cannot cast from length-%.0f vector to NULL", n))
}
return(type)
} else if (is.atomic(type) && !is.object(type)) {
mode <- storage.mode(type)
x <- as.vector(x, mode)
}
type[seq_len(n)] <- x
type
}
cast.Date <- function(type, x)
{
type <- as.vector.type(type)
x <- as.vector.value(x)
tz <- get_tzone(x)
x <- as.Date(x, tz = tz, origin = "1970-01-01")
type[seq_along(x)] <- x
type
}
cast.POSIXct <- function(type, x)
{
type <- as.vector.type(type)
x <- as.vector.value(x)
tz <- get_tzone(type)
tz0 <- get_tzone(x, tz)
x <- as.POSIXct(x, tz0, origin = "1970-01-01")
x <- structure(as.numeric(x), class = c("POSIXct", "POSIXt"), tzone = tz)
type[seq_along(x)] <- x
type
}
get_tzone <- function(x, default = "UTC")
{
tz <- attr(x, "tzone")[[1L]]
if (is.null(tz)) {
tz <- default[[1L]]
}
tz
}
cast.record <- function(type, x)
{
type <- as.record.type(type)
if (length(dim(x)) <= 1) {
x <- as.record(x)
} else {
x <- as.dataset(x)
}
nx <- length(x)
n <- length(type)
if (n != nx) {
stop(sprintf("mismatch: destination has %.0f components, source has %.0f",
n, nx))
}
names <- names(type)
if (is.null(names)) {
names(x) <- NULL
} else {
namesx <- names(x)
if (is.null(namesx)) {
names(x) <- names
} else if (!identical(names, namesx)) {
i <- which(!mapply(identical, names, namesx))[[1]]
nfmt <- function(nm) if (is.na(nm)) "<NA>"
else paste0('`', nm, '`')
fmt <- "mismatch: destination component %.0f has name %s, source has name %s"
stop(sprintf(fmt, i, nfmt(names[[i]]), nfmt(namesx[[i]])))
}
}
for (i in seq_len(n)) {
x[[i]] <- cast(type[[i]], x[[i]])
}
x
}
| /R/cast.R | permissive | patperry/r-frame | R | false | false | 2,377 | r |
cast <- function(type, x)
{
UseMethod("cast")
}
cast.default <- function(type, x)
{
type <- schema(type)
if (is.record(type)) {
cast.record(type, x)
} else {
cast.vector(type, x)
}
}
cast.vector <- function(type, x)
{
type <- as.vector.type(type)
x <- as.vector.value(x)
n <- length(x)
if (is.null(type)) {
if (n != 0) {
stop(sprintf("cannot cast from length-%.0f vector to NULL", n))
}
return(type)
} else if (is.atomic(type) && !is.object(type)) {
mode <- storage.mode(type)
x <- as.vector(x, mode)
}
type[seq_len(n)] <- x
type
}
cast.Date <- function(type, x)
{
type <- as.vector.type(type)
x <- as.vector.value(x)
tz <- get_tzone(x)
x <- as.Date(x, tz = tz, origin = "1970-01-01")
type[seq_along(x)] <- x
type
}
cast.POSIXct <- function(type, x)
{
type <- as.vector.type(type)
x <- as.vector.value(x)
tz <- get_tzone(type)
tz0 <- get_tzone(x, tz)
x <- as.POSIXct(x, tz0, origin = "1970-01-01")
x <- structure(as.numeric(x), class = c("POSIXct", "POSIXt"), tzone = tz)
type[seq_along(x)] <- x
type
}
get_tzone <- function(x, default = "UTC")
{
tz <- attr(x, "tzone")[[1L]]
if (is.null(tz)) {
tz <- default[[1L]]
}
tz
}
cast.record <- function(type, x)
{
type <- as.record.type(type)
if (length(dim(x)) <= 1) {
x <- as.record(x)
} else {
x <- as.dataset(x)
}
nx <- length(x)
n <- length(type)
if (n != nx) {
stop(sprintf("mismatch: destination has %.0f components, source has %.0f",
n, nx))
}
names <- names(type)
if (is.null(names)) {
names(x) <- NULL
} else {
namesx <- names(x)
if (is.null(namesx)) {
names(x) <- names
} else if (!identical(names, namesx)) {
i <- which(!mapply(identical, names, namesx))[[1]]
nfmt <- function(nm) if (is.na(nm)) "<NA>"
else paste0('`', nm, '`')
fmt <- "mismatch: destination component %.0f has name %s, source has name %s"
stop(sprintf(fmt, i, nfmt(names[[i]]), nfmt(namesx[[i]])))
}
}
for (i in seq_len(n)) {
x[[i]] <- cast(type[[i]], x[[i]])
}
x
}
|
#' Convert angle and radius to xend and yend.
#'
#' @section Aesthetics:
#' \Sexpr[results=rd,stage=build]{ggplot2:::rd_aesthetics("stat", "spoke")}
#'
#' @inheritParams stat_identity
#' @return a data.frame with additional columns
#' \item{xend}{x position of end of line segment}
#' \item{yend}{x position of end of line segment}
#' @export
#' @examples
#' df <- expand.grid(x = 1:10, y=1:10)
#' df$angle <- runif(100, 0, 2*pi)
#' df$speed <- runif(100, 0, 0.5)
#'
#' ggplot(df, aes(x, y)) +
#' geom_point() +
#' stat_spoke(aes(angle = angle), radius = 0.5)
#'
#' last_plot() + scale_y_reverse()
#'
#' ggplot(df, aes(x, y)) +
#' geom_point() +
#' stat_spoke(aes(angle = angle, radius = speed))
stat_spoke <- function(mapping = NULL, data = NULL, geom = "segment",
position = "identity", show.legend = NA,
inherit.aes = TRUE, ...) {
layer(
data = data,
mapping = mapping,
stat = StatSpoke,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(...)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatSpoke <- ggproto("StatSpoke", Stat,
retransform = FALSE,
calculate = function(data, scales, radius = 1, ...) {
transform(data,
xend = x + cos(angle) * radius,
yend = y + sin(angle) * radius
)
},
default_aes = aes(xend = ..xend.., yend = ..yend..),
required_aes = c("x", "y", "angle", "radius")
)
| /R/stat-spoke.r | no_license | cyang-2014/ggplot2 | R | false | false | 1,499 | r | #' Convert angle and radius to xend and yend.
#'
#' @section Aesthetics:
#' \Sexpr[results=rd,stage=build]{ggplot2:::rd_aesthetics("stat", "spoke")}
#'
#' @inheritParams stat_identity
#' @return a data.frame with additional columns
#' \item{xend}{x position of end of line segment}
#' \item{yend}{x position of end of line segment}
#' @export
#' @examples
#' df <- expand.grid(x = 1:10, y=1:10)
#' df$angle <- runif(100, 0, 2*pi)
#' df$speed <- runif(100, 0, 0.5)
#'
#' ggplot(df, aes(x, y)) +
#' geom_point() +
#' stat_spoke(aes(angle = angle), radius = 0.5)
#'
#' last_plot() + scale_y_reverse()
#'
#' ggplot(df, aes(x, y)) +
#' geom_point() +
#' stat_spoke(aes(angle = angle, radius = speed))
stat_spoke <- function(mapping = NULL, data = NULL, geom = "segment",
position = "identity", show.legend = NA,
inherit.aes = TRUE, ...) {
layer(
data = data,
mapping = mapping,
stat = StatSpoke,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(...)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatSpoke <- ggproto("StatSpoke", Stat,
retransform = FALSE,
calculate = function(data, scales, radius = 1, ...) {
transform(data,
xend = x + cos(angle) * radius,
yend = y + sin(angle) * radius
)
},
default_aes = aes(xend = ..xend.., yend = ..yend..),
required_aes = c("x", "y", "angle", "radius")
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metalog.R
\name{metalog}
\alias{metalog}
\title{Fit the metalog distribution to data}
\usage{
metalog(x, bounds = c(0, 1), boundedness = "u", term_limit = 13,
term_lower_bound = 2, step_len = 0.01, probs = NA,
fit_method = "any")
}
\arguments{
\item{x}{vector of numeric data}
\item{bounds}{numeric vector specifying lower or upper bounds, none required
if the distribution is unbounded}
\item{boundedness}{character string specifying unbounded, semi-bounded upper,
semi-bounded lower or bounded; accepts values \code{u}, \code{su},
\code{sl} and \code{b} (default: 'u')}
\item{term_limit}{integer between 3 and 30, specifying the number of metalog
distributions to generate. Larger term distributions have more flexibility
(default: 13)}
\item{term_lower_bound}{(Optional) the smallest term to generate, used to
minimize computation of unwanted terms must be less than term_limit (default is 2)}
\item{step_len}{(Optional) size of steps to summarize the distribution
(between 0 and 0.01) this is only used if the data vector length is greater
than 100. Use this if a specific fine grid fit is required. (default is
0.01)}
\item{probs}{(Optional) probability quantiles, same length as \code{x}}
\item{fit_method}{(Optional) preferred method of fitting distribution: accepts values
\code{OLS}, \code{LP} or \code{any} (defaults to any)}
}
\value{
A \code{metalog} object with elements
\item{params}{A list of the parameters used to create the metalog object}
\item{dataValues}{a dataframe with the first column the raw data, second
column the cumulative probabilities and the third the z
vector}
\item{Y}{The Y matrix values for each quantile and term}
\item{A}{a dataframe of coefficients for each metalog distribution}
\item{M}{a dataframe of quantiles (M) and probabilities (m) indexed for each
term (i.e. M3,m3 for the third term)}
\item{GridPlotCDF()}{a function that displays a grid plot of the CDF for each
term}
\item{VGridPlotPDF()}{a function that displays a gird plot of the PDF for
each term}
\item{Validation}{a vector of yes/no indicators of the valid distributions
for each term}
}
\description{
Fit the metalog distribution to data
}
\examples{
# Load example data
data("fishSize")
# Create a bounded metalog object
myMetalog <- metalog(fishSize$FishSize,
bounds=c(0, 60),
boundedness = 'b',
term_limit = 13)
}
| /man/metalog.Rd | permissive | jongbinjung/RMetalog | R | false | true | 2,585 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metalog.R
\name{metalog}
\alias{metalog}
\title{Fit the metalog distribution to data}
\usage{
metalog(x, bounds = c(0, 1), boundedness = "u", term_limit = 13,
term_lower_bound = 2, step_len = 0.01, probs = NA,
fit_method = "any")
}
\arguments{
\item{x}{vector of numeric data}
\item{bounds}{numeric vector specifying lower or upper bounds, none required
if the distribution is unbounded}
\item{boundedness}{character string specifying unbounded, semi-bounded upper,
semi-bounded lower or bounded; accepts values \code{u}, \code{su},
\code{sl} and \code{b} (default: 'u')}
\item{term_limit}{integer between 3 and 30, specifying the number of metalog
distributions to generate. Larger term distributions have more flexibility
(default: 13)}
\item{term_lower_bound}{(Optional) the smallest term to generate, used to
minimize computation of unwanted terms must be less than term_limit (default is 2)}
\item{step_len}{(Optional) size of steps to summarize the distribution
(between 0 and 0.01) this is only used if the data vector length is greater
than 100. Use this if a specific fine grid fit is required. (default is
0.01)}
\item{probs}{(Optional) probability quantiles, same length as \code{x}}
\item{fit_method}{(Optional) preferred method of fitting distribution: accepts values
\code{OLS}, \code{LP} or \code{any} (defaults to any)}
}
\value{
A \code{metalog} object with elements
\item{params}{A list of the parameters used to create the metalog object}
\item{dataValues}{a dataframe with the first column the raw data, second
column the cumulative probabilities and the third the z
vector}
\item{Y}{The Y matrix values for each quantile and term}
\item{A}{a dataframe of coefficients for each metalog distribution}
\item{M}{a dataframe of quantiles (M) and probabilities (m) indexed for each
term (i.e. M3,m3 for the third term)}
\item{GridPlotCDF()}{a function that displays a grid plot of the CDF for each
term}
\item{VGridPlotPDF()}{a function that displays a gird plot of the PDF for
each term}
\item{Validation}{a vector of yes/no indicators of the valid distributions
for each term}
}
\description{
Fit the metalog distribution to data
}
\examples{
# Load example data
data("fishSize")
# Create a bounded metalog object
myMetalog <- metalog(fishSize$FishSize,
bounds=c(0, 60),
boundedness = 'b',
term_limit = 13)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter_relationships.R
\name{filter_for_taxonomy_relationships}
\alias{filter_for_taxonomy_relationships}
\title{FUNCTION_TITLE}
\usage{
filter_for_taxonomy_relationships(omop_relationships)
}
\arguments{
\item{omop_relationships}{PARAM_DESCRIPTION}
}
\value{
OUTPUT_DESCRIPTION
}
\description{
FUNCTION_DESCRIPTION
}
\details{
DETAILS
}
\examples{
\dontrun{
if(interactive()){
#EXAMPLE1
}
}
}
| /man/filter_for_taxonomy_relationships.Rd | no_license | meerapatelmd/chariotViz | R | false | true | 474 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter_relationships.R
\name{filter_for_taxonomy_relationships}
\alias{filter_for_taxonomy_relationships}
\title{FUNCTION_TITLE}
\usage{
filter_for_taxonomy_relationships(omop_relationships)
}
\arguments{
\item{omop_relationships}{PARAM_DESCRIPTION}
}
\value{
OUTPUT_DESCRIPTION
}
\description{
FUNCTION_DESCRIPTION
}
\details{
DETAILS
}
\examples{
\dontrun{
if(interactive()){
#EXAMPLE1
}
}
}
|
# storagecalc.R
# calculating subsurface storage over ParFlow domain
# Assumes hydrostatic pressure
storagecalc <- function(press, bot_thickness, area, porosity){
storage <- (press+(bot_thickness/2))*area*porosity
} | /scripts/storagecalc.R | no_license | grapp1/mb_sensitivity | R | false | false | 217 | r | # storagecalc.R
# calculating subsurface storage over ParFlow domain
# Assumes hydrostatic pressure
storagecalc <- function(press, bot_thickness, area, porosity){
storage <- (press+(bot_thickness/2))*area*porosity
} |
#' @title Read ECG records
#'
#' @description Read the ECG records contained in the ecgX_raw.bin files and
#' outputs a dataframe with columns for each of the three ECG leads.
#' Sampled at 250 Hz.
#'
#' NOTE: This is raw ECG data. Nothing is currently done with values
#' greater than 32676 - in the matlab scripts they seem to be set to 0.
#'
#' @param dirname The path to the directory containing the ecgX_raw.bin files
#' (assumes bat_adc.txt is in the same directory).
#' @param recordType The type of recorded signal to read -
#' "raw" (data only high-pass filtered at 0.3Hz) or
#' "filtered" (data further low-passs filtered at 40Hz). Defaults to "raw".
#'
#' @details Returns the start and end of measurement as an attribute measurementStart and
#' measurementEnd.
#'
#' @export
readECG <- function (dirname, recordType="raw") {
# for each ECG lead
for (i in 1:3) {
if (recordType=="raw") {filename <- paste0(dirname, "ecg", i, "_raw.bin")}
if (recordType=="filtered") {filename <- paste0(dirname, "ecg", i, "_filt.bin")}
fileSize <- file.info(filename)$size
# open a connection to the file
con <- file(filename, open="rb")
# read the records
assign( x=paste0("ECG", i), value=readBin(con=con, what=integer(), n=fileSize/2, size=2, signed=TRUE) )
close(con)
}
# add data about start of recording as an attribute
results <- data.frame(ECG1, ECG2, ECG3)
measurementStart <- getStartofMeasurement(dirname)
measurementEnd <- measurementStart + (dim(results)[1])/(250)
# 250 Hz frequency of measurement
attr(results, which="measurementStart") <- measurementStart
attr(results, which="measurementEnd") <- measurementEnd
# return ECG records
return(results)
} | /R/readECG.R | no_license | crtahlin/readCortrium | R | false | false | 1,755 | r | #' @title Read ECG records
#'
#' @description Read the ECG records contained in the ecgX_raw.bin files and
#' outputs a dataframe with columns for each of the three ECG leads.
#' Sampled at 250 Hz.
#'
#' NOTE: This is raw ECG data. Nothing is currently done with values
#' greater than 32676 - in the matlab scripts they seem to be set to 0.
#'
#' @param dirname The path to the directory containing the ecgX_raw.bin files
#' (assumes bat_adc.txt is in the same directory).
#' @param recordType The type of recorded signal to read -
#' "raw" (data only high-pass filtered at 0.3Hz) or
#' "filtered" (data further low-passs filtered at 40Hz). Defaults to "raw".
#'
#' @details Returns the start and end of measurement as an attribute measurementStart and
#' measurementEnd.
#'
#' @export
readECG <- function (dirname, recordType="raw") {
# for each ECG lead
for (i in 1:3) {
if (recordType=="raw") {filename <- paste0(dirname, "ecg", i, "_raw.bin")}
if (recordType=="filtered") {filename <- paste0(dirname, "ecg", i, "_filt.bin")}
fileSize <- file.info(filename)$size
# open a connection to the file
con <- file(filename, open="rb")
# read the records
assign( x=paste0("ECG", i), value=readBin(con=con, what=integer(), n=fileSize/2, size=2, signed=TRUE) )
close(con)
}
# add data about start of recording as an attribute
results <- data.frame(ECG1, ECG2, ECG3)
measurementStart <- getStartofMeasurement(dirname)
measurementEnd <- measurementStart + (dim(results)[1])/(250)
# 250 Hz frequency of measurement
attr(results, which="measurementStart") <- measurementStart
attr(results, which="measurementEnd") <- measurementEnd
# return ECG records
return(results)
} |
#' Trains a simple convnet on the MNIST dataset.
#'
#' Gets to 99.25% test accuracy after 12 epochs
#' Note: There is still a large margin for parameter tuning
#'
#' 16 seconds per epoch on a GRID K520 GPU.
library(keras)
library(reticulate)
library(tensorflow)
# install_tensorflow(method = "conda", version = '1.13.0-gpu')
# install_keras(tensorflow = "1.11.0-gpu")
# install_keras(version = 'gpu')
# Data Preparation -----------------------------------------------------
use_python("/home/ubuntu/anaconda3/envs/r-tensorflow")
# use_python('/home/ubuntu/.virtualenvs/r-tensorflow/bin/python')
batch_size <- 128
num_classes <- 10
epochs <- 12
# Input image dimensions
img_rows <- 28
img_cols <- 28
# The data, shuffled and split between train and test sets
mnist <- dataset_mnist()
x_train <- mnist$train$x
y_train <- mnist$train$y
x_test <- mnist$test$x
y_test <- mnist$test$y
# Redefine dimension of train/test inputs
x_train <- array_reshape(x_train, c(nrow(x_train), img_rows, img_cols, 1))
x_test <- array_reshape(x_test, c(nrow(x_test), img_rows, img_cols, 1))
input_shape <- c(img_rows, img_cols, 1)
# Transform RGB values into [0,1] range
x_train <- x_train / 255
x_test <- x_test / 255
cat('x_train_shape:', dim(x_train), '\n')
cat(nrow(x_train), 'train samples\n')
cat(nrow(x_test), 'test samples\n')
# Convert class vectors to binary class matrices
y_train <- to_categorical(y_train, num_classes)
y_test <- to_categorical(y_test, num_classes)
# Define Model -----------------------------------------------------------
# Define model
model <- keras_model_sequential() %>%
layer_conv_2d(filters = 32, kernel_size = c(3,3), activation = 'relu',
input_shape = input_shape) %>%
layer_conv_2d(filters = 64, kernel_size = c(3,3), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.25) %>%
layer_flatten() %>%
layer_dense(units = 128, activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = num_classes, activation = 'softmax')
# Remove the comment below to enable GPU support.
model <- multi_gpu_model(model, gpus = 8)
# Compile model
model %>% compile(
loss = loss_categorical_crossentropy,
optimizer = optimizer_adadelta(),
metrics = 'accuracy'
)
# Train model
model %>% fit(
x_train, y_train,
batch_size = batch_size,
epochs = epochs,
validation_split = 0.2
)
scores <- model %>% evaluate(
x_test, y_test, verbose = 0
)
# Output metrics
cat('Test loss:', scores[[1]], '\n')
cat('Test accuracy:', scores[[2]], '\n') | /keras.R | no_license | fdrennan/ndexr-scripts | R | false | false | 2,560 | r | #' Trains a simple convnet on the MNIST dataset.
#'
#' Gets to 99.25% test accuracy after 12 epochs
#' Note: There is still a large margin for parameter tuning
#'
#' 16 seconds per epoch on a GRID K520 GPU.
library(keras)
library(reticulate)
library(tensorflow)
# install_tensorflow(method = "conda", version = '1.13.0-gpu')
# install_keras(tensorflow = "1.11.0-gpu")
# install_keras(version = 'gpu')
# Data Preparation -----------------------------------------------------
use_python("/home/ubuntu/anaconda3/envs/r-tensorflow")
# use_python('/home/ubuntu/.virtualenvs/r-tensorflow/bin/python')
batch_size <- 128
num_classes <- 10
epochs <- 12
# Input image dimensions
img_rows <- 28
img_cols <- 28
# The data, shuffled and split between train and test sets
mnist <- dataset_mnist()
x_train <- mnist$train$x
y_train <- mnist$train$y
x_test <- mnist$test$x
y_test <- mnist$test$y
# Redefine dimension of train/test inputs
x_train <- array_reshape(x_train, c(nrow(x_train), img_rows, img_cols, 1))
x_test <- array_reshape(x_test, c(nrow(x_test), img_rows, img_cols, 1))
input_shape <- c(img_rows, img_cols, 1)
# Transform RGB values into [0,1] range
x_train <- x_train / 255
x_test <- x_test / 255
cat('x_train_shape:', dim(x_train), '\n')
cat(nrow(x_train), 'train samples\n')
cat(nrow(x_test), 'test samples\n')
# Convert class vectors to binary class matrices
y_train <- to_categorical(y_train, num_classes)
y_test <- to_categorical(y_test, num_classes)
# Define Model -----------------------------------------------------------
# Define model
model <- keras_model_sequential() %>%
layer_conv_2d(filters = 32, kernel_size = c(3,3), activation = 'relu',
input_shape = input_shape) %>%
layer_conv_2d(filters = 64, kernel_size = c(3,3), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.25) %>%
layer_flatten() %>%
layer_dense(units = 128, activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = num_classes, activation = 'softmax')
# Remove the comment below to enable GPU support.
model <- multi_gpu_model(model, gpus = 8)
# Compile model
model %>% compile(
loss = loss_categorical_crossentropy,
optimizer = optimizer_adadelta(),
metrics = 'accuracy'
)
# Train model
model %>% fit(
x_train, y_train,
batch_size = batch_size,
epochs = epochs,
validation_split = 0.2
)
scores <- model %>% evaluate(
x_test, y_test, verbose = 0
)
# Output metrics
cat('Test loss:', scores[[1]], '\n')
cat('Test accuracy:', scores[[2]], '\n') |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_GxE.R
\name{model_GxE}
\alias{model_GxE}
\title{Run AMMI or GGE model}
\usage{
model_GxE(data, variable, gxe_analysis = "AMMI")
}
\arguments{
\item{data}{The data frame on which the model is run. It should come from \code{\link{format_data_PPBstats.data_agro}}}
\item{variable}{variable to analyse}
\item{gxe_analysis}{the analysis to carry out: "AMMI" or "GGE"}
}
\value{
The function returns a list with three elements :
\itemize{
\item info : a list with variable and gxe_analysis
\item ANOVA a list with five elements:
\itemize{
\item model
\item anova_model
\item germplasm_effects a list of two elements:
\itemize{
\item effects
\item intra_variance
}
\item location_effects
\item interaction_matrix
}
\item PCA : PCA object from FactoMineR
}
}
\description{
\code{model_GxE} runs AMMI or GGE model
}
\details{
scaling for interaction.matrix is not useful as the column mean is equal to 0 because of model constraints and all the values are regarding one variable, so it is possible to compare it into the PCA.
More information can be found in the book regarding \href{https://priviere.github.io/PPBstats_book/family-2.html#ammi}{AMMI} and \href{https://priviere.github.io/PPBstats_book/family-2.html#gge}{GGE}.
}
\seealso{
\itemize{
\item \code{\link{GxE_build_interaction_matrix}},
\item \code{\link{check_model}},
\item \code{\link{check_model.fit_model_GxE}}
}
}
\author{
Pierre Riviere
}
| /man/model_GxE.Rd | no_license | gaelleVF/PPBstats-PPBmelange | R | false | true | 1,528 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_GxE.R
\name{model_GxE}
\alias{model_GxE}
\title{Run AMMI or GGE model}
\usage{
model_GxE(data, variable, gxe_analysis = "AMMI")
}
\arguments{
\item{data}{The data frame on which the model is run. It should come from \code{\link{format_data_PPBstats.data_agro}}}
\item{variable}{variable to analyse}
\item{gxe_analysis}{the analysis to carry out: "AMMI" or "GGE"}
}
\value{
The function returns a list with three elements :
\itemize{
\item info : a list with variable and gxe_analysis
\item ANOVA a list with five elements:
\itemize{
\item model
\item anova_model
\item germplasm_effects a list of two elements:
\itemize{
\item effects
\item intra_variance
}
\item location_effects
\item interaction_matrix
}
\item PCA : PCA object from FactoMineR
}
}
\description{
\code{model_GxE} runs AMMI or GGE model
}
\details{
scaling for interaction.matrix is not useful as the column mean is equal to 0 because of model constraints and all the values are regarding one variable, so it is possible to compare it into the PCA.
More information can be found in the book regarding \href{https://priviere.github.io/PPBstats_book/family-2.html#ammi}{AMMI} and \href{https://priviere.github.io/PPBstats_book/family-2.html#gge}{GGE}.
}
\seealso{
\itemize{
\item \code{\link{GxE_build_interaction_matrix}},
\item \code{\link{check_model}},
\item \code{\link{check_model.fit_model_GxE}}
}
}
\author{
Pierre Riviere
}
|
#source("https://bioconductor.org/biocLite.R")
####install.packages(c("binomTools","feather", "readr"),lib=c("/uufs/chpc.utah.edu/common/home/u6011224/software/pkg/RLibs/3.3.2i"),
#### repos=c("http://cran.us.r-project.org"),verbose=TRUE)
source("Interact Functions.R")
library(snpStats)
library(R.utils)
library(base)
library(boot)
library(binomTools)
library(data.table)
library(dplyr)
library(magrittr)
library(feather) # v0.0.0.9000
library(readr) # v0.2.2
###Although this BIM table is only assciated with the Y = 1 group, the BIM table for the control (Y = 0) group is identical.
###Because of this, I just use the BIM table from the Y = 1 group as the main BIM table.
BIMTable = fread("NICHD_PolycysticOvary_c1_PCOS.bim",
col.names = c("Chromosome", "ID","GeneticDist", "Position", "Allele1", "Allele2"))
BIMTable = BIMTable[,-3] ###Remove the GeneticDistance column. Has no information.
#dim(filter(BIMTable, Chromosome %in% 1:23, Position>0))###The "cleansed" table we want. 729286 (maybe messed up by find replace) by 5
cleanBIMTable = filter(BIMTable, Chromosome %in% 1:23, Position>0)
filter(cleanBIMTable, Chromosome == 7) %>%
select(ID) ->
name7
# genotypes
########Clean bed 1
bed1 = read.plink("NICHD_PolycysticOvary_c1_PCOS.bed", select.snps = name7[,1])
bed1 = bed1$genotypes
bed0 = read.plink("NICHD_PolycysticOvary_c2_NUGENE.bed", select.snps = name7[,1])
bed0 = bed0$genotypes
genotype = rbind(bed1, bed0)
snpsum.col <- col.summary(genotype)
call <- 0.95 ###Why remove low call rate?
use <- with(snpsum.col, (!is.na(Call.rate) & Call.rate >= call))
use[is.na(use)] <- FALSE
cat(ncol(genotype)-sum(use),"SNPs will be removed due to low call rate.\n")
genotype <- genotype[,use]
snpsum.col <- snpsum.col[use,]
minor <- 0.1
use1 <- with(snpsum.col, (!is.na(MAF) & MAF > minor) )
use1[is.na(use1)] <- FALSE
cat(ncol(genotype)-sum(use1),"SNPs will be removed due to low MAF .\n" )
genotype <- genotype[,use1]
snpsum.col <- snpsum.col[use1,]
name7 = rownames(snpsum.col)###Update the chromsomes we are looking at.
class(genotype) = "matrix"
###################################################################
###################################################################
###################################################################
###################################################################
###################################################################
###################################################################
###################################################################
###################################################################
###print("Line 39 executed")
X = apply(genotype, 2, as.numeric)
Y = c(rep(1, 1043), rep(0, 3056))
data = cbind(Y, X)
##rm(X)####delete X, it does not need to be reference again.
#smallData = data[,c("Y", name7[1:4000,1])]###Dimensions are off here.
#dim(smallData)
# Iterative CA
timestamp()
R3LongPrint(data, names = name7, resultFileName = "chrom7LEAN.csv")
timestamp()
###which(result != 0, arr.ind = TRUE)
##fwrite(as.data.frame(result), file = "chrom7Short.csv", row.names = TRUE, col.names = TRUE
| /JCIS/RealData/LongFormChrom7.R | no_license | rr1964/Research | R | false | false | 3,213 | r | #source("https://bioconductor.org/biocLite.R")
####install.packages(c("binomTools","feather", "readr"),lib=c("/uufs/chpc.utah.edu/common/home/u6011224/software/pkg/RLibs/3.3.2i"),
#### repos=c("http://cran.us.r-project.org"),verbose=TRUE)
source("Interact Functions.R")
library(snpStats)
library(R.utils)
library(base)
library(boot)
library(binomTools)
library(data.table)
library(dplyr)
library(magrittr)
library(feather) # v0.0.0.9000
library(readr) # v0.2.2
###Although this BIM table is only assciated with the Y = 1 group, the BIM table for the control (Y = 0) group is identical.
###Because of this, I just use the BIM table from the Y = 1 group as the main BIM table.
BIMTable = fread("NICHD_PolycysticOvary_c1_PCOS.bim",
col.names = c("Chromosome", "ID","GeneticDist", "Position", "Allele1", "Allele2"))
BIMTable = BIMTable[,-3] ###Remove the GeneticDistance column. Has no information.
#dim(filter(BIMTable, Chromosome %in% 1:23, Position>0))###The "cleansed" table we want. 729286 (maybe messed up by find replace) by 5
cleanBIMTable = filter(BIMTable, Chromosome %in% 1:23, Position>0)
filter(cleanBIMTable, Chromosome == 7) %>%
select(ID) ->
name7
# genotypes
########Clean bed 1
bed1 = read.plink("NICHD_PolycysticOvary_c1_PCOS.bed", select.snps = name7[,1])
bed1 = bed1$genotypes
bed0 = read.plink("NICHD_PolycysticOvary_c2_NUGENE.bed", select.snps = name7[,1])
bed0 = bed0$genotypes
genotype = rbind(bed1, bed0)
snpsum.col <- col.summary(genotype)
call <- 0.95 ###Why remove low call rate?
use <- with(snpsum.col, (!is.na(Call.rate) & Call.rate >= call))
use[is.na(use)] <- FALSE
cat(ncol(genotype)-sum(use),"SNPs will be removed due to low call rate.\n")
genotype <- genotype[,use]
snpsum.col <- snpsum.col[use,]
minor <- 0.1
use1 <- with(snpsum.col, (!is.na(MAF) & MAF > minor) )
use1[is.na(use1)] <- FALSE
cat(ncol(genotype)-sum(use1),"SNPs will be removed due to low MAF .\n" )
genotype <- genotype[,use1]
snpsum.col <- snpsum.col[use1,]
name7 = rownames(snpsum.col)###Update the chromsomes we are looking at.
class(genotype) = "matrix"
###################################################################
###################################################################
###################################################################
###################################################################
###################################################################
###################################################################
###################################################################
###################################################################
###print("Line 39 executed")
X = apply(genotype, 2, as.numeric)
Y = c(rep(1, 1043), rep(0, 3056))
data = cbind(Y, X)
##rm(X)####delete X, it does not need to be reference again.
#smallData = data[,c("Y", name7[1:4000,1])]###Dimensions are off here.
#dim(smallData)
# Iterative CA
timestamp()
R3LongPrint(data, names = name7, resultFileName = "chrom7LEAN.csv")
timestamp()
###which(result != 0, arr.ind = TRUE)
##fwrite(as.data.frame(result), file = "chrom7Short.csv", row.names = TRUE, col.names = TRUE
|
num_vec <- (c(1:10))^2
plot(num_vec)
# plot factor
set.seed(4)
abc <- factor(sample(c('A', 'B', 'C'), 20, replace = TRUE))
plot(abc)
abcde
abc_table <- table(abc)
plot(abc_table)
abcd <- sample(c('A', 'B', 'C'), 20, replace = TRUE)
abce <-factor(abcd)
plot(abce)
barplot(num_vec)
ab <- sample(c('A', 'B', 'C'), 20, replace = TRUE)
pie(num_vec:abce)
dotchart(num_vec)
boxplot(num_vec)
boxplot(num_vec)
# pie chart
hist(num_vec)
# dot plot
stripchart(num_vec)
# stem-and-leaf
stem(num_vec)
boxplot(iris$Sepal.Length)
hist(iris$Sepal.Length)
# kernel density curve
dens <- density(num_vec)
plot(dens)
scatterplot(num_vec)
plot(iris$Petal.Length, iris$Sepal.Length)
plot(iris$Petal.Length, iris$Species)
# some fake data
set.seed(1)
# hair color
hair <- factor(sample(c('blond', 'black', 'brown'), 100, replace = TRUE))
# eye color
eye <- factor(sample(c('blue', 'brown', 'green'), 100, replace = TRUE))
plot(hair, eye)
sunflowerplot(num_vec, num_vec)
boxplot(hair, eye)
dotchart(hair, eye)
sunflowerplot(hair, eye)
m <- matrix(1:8, 4, 2)
barplot(m)
barplot(m, beside = TRUE)
x <- margin.table(HairEyeColor, c(2, 1))
mosaicplot(x, main = "Relation between hair and eye color")
plot(iris[ , 1:4])
head(mtcars)
plot(mtcars$mpg,mtcars$hp)
# xlab and ylab
plot(mtcars$mpg, mtcars$hp, xlab = "miles per gallon",
ylab = "horsepower")
# title and subtitle
plot(mtcars$mpg, mtcars$hp, xlab = "miles per gallon",ylab = "horsepower", main = "Simple Scatterplot",
sub = 'data matcars')
plot(mtcars$mpg, mtcars$hp, xlim = c(10, 35), ylim = c(50, 400))
# using 'type' (e.g. lines)
plot(mtcars$mpg, mtcars$hp, type = "l")
# character expansion 'cex'
# and 'point character'
plot(mtcars$mpg, mtcars$hp, cex = 3.5, pch = 20)#cex is the weight of elemnts and pch is style we have them in R many
plot(mtcars$mpg, mtcars$hp,cex=5, pch = "@")
plot(mtcars$mpg, mtcars$hp, cex=3, pch = 1:25)
plot(mtcars$mpg, mtcars$hp, pch = 19, col = "red", cex = 1.2)#red colour
plot(mtcars$mpg, mtcars$hp, pch = 19, col = 1:25, cex = 1.2)#colour in sequence
plot(mtcars$mpg, mtcars$hp,
xlim = c(10, 35), ylim = c(50, 400),
xlab = "miles per gallon",
ylab = "horsepower",
main = "Simple Scatterplot",
sub = 'data matcars',
pch = 1:25, cex = 1.2, col = "blue")
# simple scatter-plot
plot(mtcars$mpg, mtcars$hp)
# adding text
text(mtcars$mpg, mtcars$hp, labels = rownames(mtcars))
# dummy legend
legend("topright", legend = "a legend")
title("Miles Per Galon -vs- Horsepower")
# simple scatter-plot
plot(mtcars$mpg, mtcars$hp, type = "n",
xlab = "miles per gallon", ylab = "horsepower")
# grid lines
abline(v = seq(from = 10, to = 30, by = 5), col = 'gray')
abline(h = seq(from = 50, to = 300, by = 50), col = ' gray')
# plot points
points(mtcars$mpg, mtcars$hp, pch = 19, col = "blue")
# plot text
text(mtcars$mpg, mtcars$hp, labels = rownames(mtcars),
pos = 4, col = "gray50")
# graphic title
title("Miles Per Galon -vs- Horsepower")
plot(mtcars$mpg, mtcars$hp, type = "n")
points(mtcars$mpg, mtcars$hp,col="red", pch=4, cex=2)
lines(mtcars$mpg, mtcars$hp, lty = "solid", lwd = 2, col = "red")
plot(mtcars$mpg, mtcars$hp, type = "n")
lines(mtcars$mpg, mtcars$hp, type = "l", lwd = 2)
x <- 2005:2015
y <- c(81, 83, 84.3, 85, 85.4, 86.5, 88.3, 88.6, 90.8, 91.1, 91.3)
plot(x, y, type = 'n', xlab = "Time", ylab = "Values")
lines(x, y, lwd = 2)
points(x,y, pch=3, cex=3, col="blue")
title(main = "Line Graph Example")
# drawing straight lines
plot(mtcars$mpg, mtcars$hp, type = "n")
abline(v = seq(10, 30, by = 5), h = seq(50, 300, by = 50))
points(mtcars$mpg, mtcars$hp, pch = 19, col = "red")
text(mtcars$mpg, mtcars$hp, row.names(mtcars))
n <- 11
theta <- seq(0, 2 * pi, length = n + 1)[1:n]
x <- sin(theta)
y <- cos(theta)
v1 <- rep(1:n, n)
v2 <- rep(1:n, rep(n, n))
plot(x, y, type = 'n')
segments(x[v1], y[v1], x[v2], y[v2])
plot(0.5, 0.5, xlim = c(0, 1), ylim = c(0, 1), type = 'n')
abline(h = c(.2, .5, .8),
v = c(.5, .2, .8), col = "lightgrey")
text(0.5, 0.5, "srt = 45, adj = c(.5, .5)",
srt = 45, adj = c(.5, .5))
text(0.5, 0.8, "adj = c(0, .5)", adj = c(0, .5))
text(0.5, 0.2, "adj = c(1, .5)", adj = c(1, .5))
text(0.2, 0.5, "adj = c(1, 1)", adj = c(1, 1))
text(0.8, 0.5, "adj = c(0, 0)", adj = c(0, 0))
plot.new()
plot.window(xlim = c(0, 10), ylim = c(-2, 4), xaxs = "i")
axis(1, col.axis = "grey30")
axis(2, col.axis = "grey30", las = 1)
title(main = "Main Title",
col.main = "tomato",
sub = "Plot Subtitle",
col.sub = "orange",
xlab = "x-axis", ylab = "y-axis",
col.lab = "blue", font.lab = 3)
box("figure", col = "grey90")
set.seed(5)
x <- rnorm(200)
y <- x + rnorm(200)
plot.new()
plot.window(xlim = c(-4.5, 4.5), xaxs = "i",
ylim = c(-4.5, 4.5), yaxs = "i")
z <- lm(y ~ x)
abline(h = -4:4, v = -4:4, col = "lightgrey")
abline(a = coef(z)[1], b = coef(z)[2], lwd = 2, col = "red")
points(x, y)
axis(1)
axis(2, las = 1)
box()
title(main = "A Fitted Regression Line")
plot.window(xlim = c(0, 1), ylim = c(0, 1))
plot.new()
plot.window(xlim = c(0, 1), ylim = c(0, 1))
arrows(.05, .075, .45, .9, code = 1)
arrows(.55, .9, .95, .075, code = 2)
arrows(.1, 0, .9, 0, code = 3)
text(.5, 1, "A", cex = 1.5)
text(0, 0, "B", cex = 1.5)
text(1, 0, "C", cex = 1.5)
install.packages("ggplot2")
# load ggplot2
library(ggplot2)
starwars
ggplot(data = starwars) +
geom_point(aes(x = height, y = weight, color = jedi,))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point()
ggplot(data = mtcars) +
geom_point(aes(x = mpg, y = hp))
ggplot() +
geom_point(data = mtcars, aes(x = mpg, y = hp))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_line()
ggplot(data = mtcars) +
geom_line(aes(x = mpg, y = hp))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(size = 3, col="green")
# 'shape' accepts 'pch' values
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(size = 3, color = "tomato", shape = 15)
mtcars<-mtcars
# mapping aesthetic color
ggplot(mtcars, aes(x = mpg, y = hp)) +
geom_point(aes(color = cyl))
ggplot(mtcars, aes(x = mpg, y = hp)) +
geom_point(aes(color = carb))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(size = 3, color = "tomato", shape=24) +
xlab("miles per gallon") +
ylab("horse power") +
ggtitle("Scatter plot with ggplot2")+
theme_bw()
ggplot(data = mtcars,aes(x=mpg, y=hp))+
geom_point(aes(size=disp),shape=17, color = "tomato")+
xlab("miles per gallon")+
ylab("horse power")+
geom_smooth(method = "lm")
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(aes(color = cyl))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(aes(color = as.factor(cyl)))
ggplot(data = mtcars, aes(x=mpg, y=disp))+
geom_point(aes(color =factor(am), size = hp), alpha=0.7)+
xlab("miles per gallon")+
ylab("displacement")+
title("Scatter plot with ggplot2")
ggplot(data = mtcars, aes(x = mpg)) +
geom_histogram(binwidth = 2)
ggplot(data = mtcars, aes(x = mpg)) +
geom_density(fill="blue", alpha=0.5)+
theme_bw()
ggplot(data = mtcars, aes(x = mpg)) +
geom_line(stat = 'density', col = "#a868c0", size = 2)
mpg_hp <- ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(size = 3, color = "tomato")
print(mpg_hp)
# ggplot object
obj <- ggplot(data = mtcars, aes(x = mpg, y = hp, label = rownames(mtcars)))
obj+ ggtitle("Scatter plot") +xlab("miles per gallon")+ ylab("horse power")+
geom_text(aes(color=factor(am)))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(aes(color = factor(am))) +
scale_x_continuous(name = "miles per gallon",
limits = c(10, 40),
breaks = c(10, 15, 20,25, 30,35, 40))+
scale_y_continuous(name="horse power",
limits=c(0,400),
breaks = c(100,200,300,400))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_line(aes(color = factor(am))) +
scale_color_manual(values = c("orange", "purple"))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(aes(color = factor(am))) +
scale_color_manual(values = c("orange", "purple"),
name = "transmission",
labels = c('no', 'yes'))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(color = "#3088f0") +
facet_wrap(~ cyl)
tents <- read.csv("tents1.csv")
ggplot(data = tents, aes(x = Year, y = Percent_opposed)) +
geom_line(aes(color=Race)) +
scale_color_manual(values = c("red","green","blue"),
labels=c("black", "other", "white"))+
scale_y_continuous(breaks = c(10,20,30,40,50))+
xlab("Year") +
ylab("%Opposed to harsher gun law") +
ggtitle("Would you oppose harsher gun law?") +
theme_bw()
median(1:10)
a <- 12345
525 + 34 - 280
median(1:10)
a <- 10
b <- 20
d <- 30
a <- 10; b <- 20; d <- 30
{
a <- 10
b <- 20
d <- 30
}
mtcars[1:10]
{5 + 3; 4 * 2; 1 + 3}
z <- {x <- 4; y <- x^2; x + y}
square <- function(x) {
x^2
}
square(10)
sum_sqr <- function(x, y) {
xy_sum <- x + y
xy_ssqr <- (xy_sum)^2
list(sum = xy_sum,
sumsqr = xy_ssqr)
}
sum_sqr(3, 5)
add <- function(x, y) {
x + y
}
add(2, 3)
# function with 2 arguments
add <- function(x, y) z<-x + y
# function with no arguments
hi <- function() print("Hi there!")
hi()
add(1,1)
hey <- function(x = "") {
cat("Hey", x, "\nHow is it going?")
}
hey()
## Hey
## How is it going?
hey("Gaston Sinan")
abc <- function(a, b, c = 3) {
if (missing(b)) {
result <- a * 2 + c
} else {
result <- a * b + c
}
result
}
abc(2)
abc(2,2)
abcd <- function(a, b = 2, c = 3, d = NULL) {
if (is.null(d)) {
result <- a * b + c
} else {
result <- a * b + c * d
}
result
}
abcd(2)
abcd(2,2,2,2)
myplot <- function(x, y, col = "#3488ff", pch = 19) {
plot(x, y, col = col, pch = pch)
}
myplot(1:5, 1:5)
# encapsulate your code
variance <- function(x) {
sum((x - mean(x))^2) / (length(x) - 1)
}
# check that it works
variance(1:2)
mean(2:8)
# adapt it gradually
variance <- function(x, na.rm = FALSE) {
if (na.rm) {
x <- x[!is.na(x)]
}
sum((x - mean(x))^2) / (length(x) - 1)
}
variance(c(1:9, NA), na.rm = TRUE)
area_rect <- function(length = 1, width = 1) {
length * width
}
# default
area_rect()
# specifying argument values
area_rect(length = 10, width = 2)
area_rect(10,2)
x <- 7
if (x >= 0)
print("it is positive")
set.seed(9)
x <- round(rnorm(1), 1)
if (x > 0) {
print("x is positive")
} else if (x < 0) {
print("x is negative")
} else if (x == 0) {
print("x is zero")
}
true_false <- c(TRUE, FALSE)
ifelse(true_false, "false", "false")
first_name <- "harry"
last_name <- switch(
first_name,
harry = "potter",
ron = "weasley",
hermione = "granger",
"not available")
last_name
switch(
3,
"one",
"two",
"three",
"four")
student <- "ron"
house <- switch(
dracoo = "sinan",
student,
harry = ,
ron = ,
hermione = "gryffindor",
draco = "slytherin")
house
x <- 1:5
y <- x
for (i in 1:5) {
y[i] <- x[i]^2
}
y
x <- 1:5
y <- x^2
y
set.seed(6354)
dat <- data.frame(
replicate(6, sample(c(1:10, -99), 6, rep = TRUE))
)
names(dat) <- letters[1:6]
dat
for (i in 1:5) {
x[i] <- i
}
x[2]
x <- numeric(5)
x
for (i in 1:5) {
x[i] <- i
}
x
"%u%" <- function(x, y) {
union(x, y)
}
1:5 %u% c(1, 3, 5, 7, 9)
| /kviz2_preparation.R | no_license | EdinZecevic/dataanalysis | R | false | false | 11,308 | r | num_vec <- (c(1:10))^2
plot(num_vec)
# plot factor
set.seed(4)
abc <- factor(sample(c('A', 'B', 'C'), 20, replace = TRUE))
plot(abc)
abcde
abc_table <- table(abc)
plot(abc_table)
abcd <- sample(c('A', 'B', 'C'), 20, replace = TRUE)
abce <-factor(abcd)
plot(abce)
barplot(num_vec)
ab <- sample(c('A', 'B', 'C'), 20, replace = TRUE)
pie(num_vec:abce)
dotchart(num_vec)
boxplot(num_vec)
boxplot(num_vec)
# pie chart
hist(num_vec)
# dot plot
stripchart(num_vec)
# stem-and-leaf
stem(num_vec)
boxplot(iris$Sepal.Length)
hist(iris$Sepal.Length)
# kernel density curve
dens <- density(num_vec)
plot(dens)
scatterplot(num_vec)
plot(iris$Petal.Length, iris$Sepal.Length)
plot(iris$Petal.Length, iris$Species)
# some fake data
set.seed(1)
# hair color
hair <- factor(sample(c('blond', 'black', 'brown'), 100, replace = TRUE))
# eye color
eye <- factor(sample(c('blue', 'brown', 'green'), 100, replace = TRUE))
plot(hair, eye)
sunflowerplot(num_vec, num_vec)
boxplot(hair, eye)
dotchart(hair, eye)
sunflowerplot(hair, eye)
m <- matrix(1:8, 4, 2)
barplot(m)
barplot(m, beside = TRUE)
x <- margin.table(HairEyeColor, c(2, 1))
mosaicplot(x, main = "Relation between hair and eye color")
plot(iris[ , 1:4])
head(mtcars)
plot(mtcars$mpg,mtcars$hp)
# xlab and ylab
plot(mtcars$mpg, mtcars$hp, xlab = "miles per gallon",
ylab = "horsepower")
# title and subtitle
plot(mtcars$mpg, mtcars$hp, xlab = "miles per gallon",ylab = "horsepower", main = "Simple Scatterplot",
sub = 'data matcars')
plot(mtcars$mpg, mtcars$hp, xlim = c(10, 35), ylim = c(50, 400))
# using 'type' (e.g. lines)
plot(mtcars$mpg, mtcars$hp, type = "l")
# character expansion 'cex'
# and 'point character'
plot(mtcars$mpg, mtcars$hp, cex = 3.5, pch = 20)#cex is the weight of elemnts and pch is style we have them in R many
plot(mtcars$mpg, mtcars$hp,cex=5, pch = "@")
plot(mtcars$mpg, mtcars$hp, cex=3, pch = 1:25)
plot(mtcars$mpg, mtcars$hp, pch = 19, col = "red", cex = 1.2)#red colour
plot(mtcars$mpg, mtcars$hp, pch = 19, col = 1:25, cex = 1.2)#colour in sequence
plot(mtcars$mpg, mtcars$hp,
xlim = c(10, 35), ylim = c(50, 400),
xlab = "miles per gallon",
ylab = "horsepower",
main = "Simple Scatterplot",
sub = 'data matcars',
pch = 1:25, cex = 1.2, col = "blue")
# simple scatter-plot
plot(mtcars$mpg, mtcars$hp)
# adding text
text(mtcars$mpg, mtcars$hp, labels = rownames(mtcars))
# dummy legend
legend("topright", legend = "a legend")
title("Miles Per Galon -vs- Horsepower")
# simple scatter-plot
plot(mtcars$mpg, mtcars$hp, type = "n",
xlab = "miles per gallon", ylab = "horsepower")
# grid lines
abline(v = seq(from = 10, to = 30, by = 5), col = 'gray')
abline(h = seq(from = 50, to = 300, by = 50), col = ' gray')
# plot points
points(mtcars$mpg, mtcars$hp, pch = 19, col = "blue")
# plot text
text(mtcars$mpg, mtcars$hp, labels = rownames(mtcars),
pos = 4, col = "gray50")
# graphic title
title("Miles Per Galon -vs- Horsepower")
plot(mtcars$mpg, mtcars$hp, type = "n")
points(mtcars$mpg, mtcars$hp,col="red", pch=4, cex=2)
lines(mtcars$mpg, mtcars$hp, lty = "solid", lwd = 2, col = "red")
plot(mtcars$mpg, mtcars$hp, type = "n")
lines(mtcars$mpg, mtcars$hp, type = "l", lwd = 2)
x <- 2005:2015
y <- c(81, 83, 84.3, 85, 85.4, 86.5, 88.3, 88.6, 90.8, 91.1, 91.3)
plot(x, y, type = 'n', xlab = "Time", ylab = "Values")
lines(x, y, lwd = 2)
points(x,y, pch=3, cex=3, col="blue")
title(main = "Line Graph Example")
# drawing straight lines
plot(mtcars$mpg, mtcars$hp, type = "n")
abline(v = seq(10, 30, by = 5), h = seq(50, 300, by = 50))
points(mtcars$mpg, mtcars$hp, pch = 19, col = "red")
text(mtcars$mpg, mtcars$hp, row.names(mtcars))
n <- 11
theta <- seq(0, 2 * pi, length = n + 1)[1:n]
x <- sin(theta)
y <- cos(theta)
v1 <- rep(1:n, n)
v2 <- rep(1:n, rep(n, n))
plot(x, y, type = 'n')
segments(x[v1], y[v1], x[v2], y[v2])
plot(0.5, 0.5, xlim = c(0, 1), ylim = c(0, 1), type = 'n')
abline(h = c(.2, .5, .8),
v = c(.5, .2, .8), col = "lightgrey")
text(0.5, 0.5, "srt = 45, adj = c(.5, .5)",
srt = 45, adj = c(.5, .5))
text(0.5, 0.8, "adj = c(0, .5)", adj = c(0, .5))
text(0.5, 0.2, "adj = c(1, .5)", adj = c(1, .5))
text(0.2, 0.5, "adj = c(1, 1)", adj = c(1, 1))
text(0.8, 0.5, "adj = c(0, 0)", adj = c(0, 0))
plot.new()
plot.window(xlim = c(0, 10), ylim = c(-2, 4), xaxs = "i")
axis(1, col.axis = "grey30")
axis(2, col.axis = "grey30", las = 1)
title(main = "Main Title",
col.main = "tomato",
sub = "Plot Subtitle",
col.sub = "orange",
xlab = "x-axis", ylab = "y-axis",
col.lab = "blue", font.lab = 3)
box("figure", col = "grey90")
set.seed(5)
x <- rnorm(200)
y <- x + rnorm(200)
plot.new()
plot.window(xlim = c(-4.5, 4.5), xaxs = "i",
ylim = c(-4.5, 4.5), yaxs = "i")
z <- lm(y ~ x)
abline(h = -4:4, v = -4:4, col = "lightgrey")
abline(a = coef(z)[1], b = coef(z)[2], lwd = 2, col = "red")
points(x, y)
axis(1)
axis(2, las = 1)
box()
title(main = "A Fitted Regression Line")
plot.window(xlim = c(0, 1), ylim = c(0, 1))
plot.new()
plot.window(xlim = c(0, 1), ylim = c(0, 1))
arrows(.05, .075, .45, .9, code = 1)
arrows(.55, .9, .95, .075, code = 2)
arrows(.1, 0, .9, 0, code = 3)
text(.5, 1, "A", cex = 1.5)
text(0, 0, "B", cex = 1.5)
text(1, 0, "C", cex = 1.5)
install.packages("ggplot2")
# load ggplot2
library(ggplot2)
starwars
ggplot(data = starwars) +
geom_point(aes(x = height, y = weight, color = jedi,))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point()
ggplot(data = mtcars) +
geom_point(aes(x = mpg, y = hp))
ggplot() +
geom_point(data = mtcars, aes(x = mpg, y = hp))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_line()
ggplot(data = mtcars) +
geom_line(aes(x = mpg, y = hp))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(size = 3, col="green")
# 'shape' accepts 'pch' values
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(size = 3, color = "tomato", shape = 15)
mtcars<-mtcars
# mapping aesthetic color
ggplot(mtcars, aes(x = mpg, y = hp)) +
geom_point(aes(color = cyl))
ggplot(mtcars, aes(x = mpg, y = hp)) +
geom_point(aes(color = carb))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(size = 3, color = "tomato", shape=24) +
xlab("miles per gallon") +
ylab("horse power") +
ggtitle("Scatter plot with ggplot2")+
theme_bw()
ggplot(data = mtcars,aes(x=mpg, y=hp))+
geom_point(aes(size=disp),shape=17, color = "tomato")+
xlab("miles per gallon")+
ylab("horse power")+
geom_smooth(method = "lm")
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(aes(color = cyl))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(aes(color = as.factor(cyl)))
ggplot(data = mtcars, aes(x=mpg, y=disp))+
geom_point(aes(color =factor(am), size = hp), alpha=0.7)+
xlab("miles per gallon")+
ylab("displacement")+
title("Scatter plot with ggplot2")
ggplot(data = mtcars, aes(x = mpg)) +
geom_histogram(binwidth = 2)
ggplot(data = mtcars, aes(x = mpg)) +
geom_density(fill="blue", alpha=0.5)+
theme_bw()
ggplot(data = mtcars, aes(x = mpg)) +
geom_line(stat = 'density', col = "#a868c0", size = 2)
mpg_hp <- ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(size = 3, color = "tomato")
print(mpg_hp)
# ggplot object
obj <- ggplot(data = mtcars, aes(x = mpg, y = hp, label = rownames(mtcars)))
obj+ ggtitle("Scatter plot") +xlab("miles per gallon")+ ylab("horse power")+
geom_text(aes(color=factor(am)))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(aes(color = factor(am))) +
scale_x_continuous(name = "miles per gallon",
limits = c(10, 40),
breaks = c(10, 15, 20,25, 30,35, 40))+
scale_y_continuous(name="horse power",
limits=c(0,400),
breaks = c(100,200,300,400))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_line(aes(color = factor(am))) +
scale_color_manual(values = c("orange", "purple"))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(aes(color = factor(am))) +
scale_color_manual(values = c("orange", "purple"),
name = "transmission",
labels = c('no', 'yes'))
ggplot(data = mtcars, aes(x = mpg, y = hp)) +
geom_point(color = "#3088f0") +
facet_wrap(~ cyl)
tents <- read.csv("tents1.csv")
ggplot(data = tents, aes(x = Year, y = Percent_opposed)) +
geom_line(aes(color=Race)) +
scale_color_manual(values = c("red","green","blue"),
labels=c("black", "other", "white"))+
scale_y_continuous(breaks = c(10,20,30,40,50))+
xlab("Year") +
ylab("%Opposed to harsher gun law") +
ggtitle("Would you oppose harsher gun law?") +
theme_bw()
median(1:10)
a <- 12345
525 + 34 - 280
median(1:10)
a <- 10
b <- 20
d <- 30
a <- 10; b <- 20; d <- 30
{
a <- 10
b <- 20
d <- 30
}
mtcars[1:10]
{5 + 3; 4 * 2; 1 + 3}
z <- {x <- 4; y <- x^2; x + y}
square <- function(x) {
x^2
}
square(10)
sum_sqr <- function(x, y) {
xy_sum <- x + y
xy_ssqr <- (xy_sum)^2
list(sum = xy_sum,
sumsqr = xy_ssqr)
}
sum_sqr(3, 5)
add <- function(x, y) {
x + y
}
add(2, 3)
# function with 2 arguments
add <- function(x, y) z<-x + y
# function with no arguments
hi <- function() print("Hi there!")
hi()
add(1,1)
hey <- function(x = "") {
cat("Hey", x, "\nHow is it going?")
}
hey()
## Hey
## How is it going?
hey("Gaston Sinan")
abc <- function(a, b, c = 3) {
if (missing(b)) {
result <- a * 2 + c
} else {
result <- a * b + c
}
result
}
abc(2)
abc(2,2)
abcd <- function(a, b = 2, c = 3, d = NULL) {
if (is.null(d)) {
result <- a * b + c
} else {
result <- a * b + c * d
}
result
}
abcd(2)
abcd(2,2,2,2)
myplot <- function(x, y, col = "#3488ff", pch = 19) {
plot(x, y, col = col, pch = pch)
}
myplot(1:5, 1:5)
# encapsulate your code
variance <- function(x) {
sum((x - mean(x))^2) / (length(x) - 1)
}
# check that it works
variance(1:2)
mean(2:8)
# adapt it gradually
variance <- function(x, na.rm = FALSE) {
if (na.rm) {
x <- x[!is.na(x)]
}
sum((x - mean(x))^2) / (length(x) - 1)
}
variance(c(1:9, NA), na.rm = TRUE)
area_rect <- function(length = 1, width = 1) {
length * width
}
# default
area_rect()
# specifying argument values
area_rect(length = 10, width = 2)
area_rect(10,2)
x <- 7
if (x >= 0)
print("it is positive")
set.seed(9)
x <- round(rnorm(1), 1)
if (x > 0) {
print("x is positive")
} else if (x < 0) {
print("x is negative")
} else if (x == 0) {
print("x is zero")
}
true_false <- c(TRUE, FALSE)
ifelse(true_false, "false", "false")
first_name <- "harry"
last_name <- switch(
first_name,
harry = "potter",
ron = "weasley",
hermione = "granger",
"not available")
last_name
switch(
3,
"one",
"two",
"three",
"four")
student <- "ron"
house <- switch(
dracoo = "sinan",
student,
harry = ,
ron = ,
hermione = "gryffindor",
draco = "slytherin")
house
x <- 1:5
y <- x
for (i in 1:5) {
y[i] <- x[i]^2
}
y
x <- 1:5
y <- x^2
y
set.seed(6354)
dat <- data.frame(
replicate(6, sample(c(1:10, -99), 6, rep = TRUE))
)
names(dat) <- letters[1:6]
dat
for (i in 1:5) {
x[i] <- i
}
x[2]
x <- numeric(5)
x
for (i in 1:5) {
x[i] <- i
}
x
"%u%" <- function(x, y) {
union(x, y)
}
1:5 %u% c(1, 3, 5, 7, 9)
|
1251fbfa17df28430ec87eaedb51ec72 p5-1.pddl_planlen=19.qdimacs 594 1408 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Kronegger-Pfandler-Pichler/bomb/p5-1.pddl_planlen=19/p5-1.pddl_planlen=19.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 70 | r | 1251fbfa17df28430ec87eaedb51ec72 p5-1.pddl_planlen=19.qdimacs 594 1408 |
library(XML)
library(dplyr)
library(purrr)
library(data.table)
load_GPX <- function(file_name) {
gpx <- file_name %>%
xmlTreeParse(useInternalNodes = TRUE) %>%
xmlRoot %>%
xmlToList %>%
(function(x) x$trk) %>%
(function(x) unlist(x[names(x) == "trkseg"], recursive = FALSE)) %>%
map_df(function(x) as.data.frame(t(unlist(x)), stringsAsFactors=FALSE)) %>%
mutate(time = lubridate::parse_date_time(time, orders = "ymdHMS")) %>%
mutate(time = time - min(time)) %>%
sapply(as.double) %>%
as.data.table()
colnames(gpx) <- c('ele', 'time', 'lat', 'lon')
gpx
}
calculate_diffs <- function(gpx) {
gpx[, ele:= smooth(ele, kind = '3RS3R'),] %>%
.[, c('ele_next', 'lat_next', 'lon_next', 'time_next'):=
list(lead(ele), lead(lat), lead(lon), lead(time)),] %>%
na.omit(.) %>%
.[, id:= .I,] %>%
.[, dT:= time_next - time, by = id] %>%
.[, dH:= as.numeric(ele_next - ele), by = id] %>%
.[, dS:= geosphere::distGeo(c(lat, lon), c(lat_next, lon_next)),
by = id] %>%
.[, dist:= round(cumsum(dS)*0.001,3),] %>%
.[, grade:= round(dH/dS * 100, 1),] %>%
select(id, time, dist, grade, dT, dS, dH)
}
# should give data table in the same schema as calculate_diffs returns
# data should be of a race effort over trail terain throughout
# new column speed modifier, 1 for flat, less than one for ascents (with different
# levels), and more or less than one on descents( steep descents are slow)
get_gap_matrix <- function(data) {
}
gap_coefficient <- function(grade) {
grade <- floor(grade / 2) * 2
gap_mat <- data.table(grad = c(-30, -28, -26, -24, -22, -20, -18, -16,
-14, -12, -10, -8, -6, -4, -2, 0,
2, 4, 6, 8, 10, 12, 14, 16,
18, 20, 22, 24, 26, 28, 30),
coef = c(2, 2, 2, 2, 2, 1, 1, 1,
0, 0, 0, -1, -1, -1, 0, 0,
0, 0, 1, 1, 1, 2, 2, 2,
2, 2, 3, 3, 3, 3, 3))
ifelse (grade > 30,
return (gap_mat[grad == 30, coef]),
ifelse (grade < -30,
return (gap_mat[grad == -30, coef]),
return (gap_mat[grad == grade, coef])))
}
grade_adjusted_effort <- function(data, flat_pace) {
data[, race_time:= (dS + dH*fun(grade))*flat_pace * 60 / 1000,]
}
| /scripts/R/funcs.R | no_license | jonark/forecaster | R | false | false | 2,269 | r | library(XML)
library(dplyr)
library(purrr)
library(data.table)
load_GPX <- function(file_name) {
gpx <- file_name %>%
xmlTreeParse(useInternalNodes = TRUE) %>%
xmlRoot %>%
xmlToList %>%
(function(x) x$trk) %>%
(function(x) unlist(x[names(x) == "trkseg"], recursive = FALSE)) %>%
map_df(function(x) as.data.frame(t(unlist(x)), stringsAsFactors=FALSE)) %>%
mutate(time = lubridate::parse_date_time(time, orders = "ymdHMS")) %>%
mutate(time = time - min(time)) %>%
sapply(as.double) %>%
as.data.table()
colnames(gpx) <- c('ele', 'time', 'lat', 'lon')
gpx
}
calculate_diffs <- function(gpx) {
gpx[, ele:= smooth(ele, kind = '3RS3R'),] %>%
.[, c('ele_next', 'lat_next', 'lon_next', 'time_next'):=
list(lead(ele), lead(lat), lead(lon), lead(time)),] %>%
na.omit(.) %>%
.[, id:= .I,] %>%
.[, dT:= time_next - time, by = id] %>%
.[, dH:= as.numeric(ele_next - ele), by = id] %>%
.[, dS:= geosphere::distGeo(c(lat, lon), c(lat_next, lon_next)),
by = id] %>%
.[, dist:= round(cumsum(dS)*0.001,3),] %>%
.[, grade:= round(dH/dS * 100, 1),] %>%
select(id, time, dist, grade, dT, dS, dH)
}
# should give data table in the same schema as calculate_diffs returns
# data should be of a race effort over trail terain throughout
# new column speed modifier, 1 for flat, less than one for ascents (with different
# levels), and more or less than one on descents( steep descents are slow)
get_gap_matrix <- function(data) {
}
gap_coefficient <- function(grade) {
grade <- floor(grade / 2) * 2
gap_mat <- data.table(grad = c(-30, -28, -26, -24, -22, -20, -18, -16,
-14, -12, -10, -8, -6, -4, -2, 0,
2, 4, 6, 8, 10, 12, 14, 16,
18, 20, 22, 24, 26, 28, 30),
coef = c(2, 2, 2, 2, 2, 1, 1, 1,
0, 0, 0, -1, -1, -1, 0, 0,
0, 0, 1, 1, 1, 2, 2, 2,
2, 2, 3, 3, 3, 3, 3))
ifelse (grade > 30,
return (gap_mat[grad == 30, coef]),
ifelse (grade < -30,
return (gap_mat[grad == -30, coef]),
return (gap_mat[grad == grade, coef])))
}
grade_adjusted_effort <- function(data, flat_pace) {
data[, race_time:= (dS + dH*fun(grade))*flat_pace * 60 / 1000,]
}
|
context("Test getMeasurementTypes")
test_that("Testing error is given", {
skip_on_cran()
expect_error(getMeasurementTypes(measurementType = TRUE),
regexp = 'argument must be numeric, integer, charater or NULL')
expect_warning(getMeasurementTypes(measurementType = 'foo'),
regexp = 'no matching measurement types found')
})
test_that("Testing data is returned", {
skip_on_cran()
expect_is(test <- getMeasurementTypes(), 'data.frame')
expect_equal(ncol(test), 2)
expect_true(all(c('Activity Cycle', 'Diet','Litter Size','Ranging Behaviour','Wing Morphology')
%in% test$Name))
expect_is(test <- getMeasurementTypes(1), 'data.frame')
expect_equal(ncol(test), 2)
expect_equal(nrow(test), 1)
expect_equal(test, data.frame(Id = 1, Name = 'Body Mass', stringsAsFactors = FALSE))
expect_is(test <- getMeasurementTypes('Diet'), 'data.frame')
expect_equal(ncol(test), 2)
expect_equal(nrow(test), 1)
expect_equal(test, data.frame(Id = 21, Name = 'Diet', stringsAsFactors = FALSE))
}) | /tests/testthat/testgetMeasurementTypes.R | no_license | BiologicalRecordsCentre/rYoutheria | R | false | false | 1,063 | r | context("Test getMeasurementTypes")
test_that("Testing error is given", {
skip_on_cran()
expect_error(getMeasurementTypes(measurementType = TRUE),
regexp = 'argument must be numeric, integer, charater or NULL')
expect_warning(getMeasurementTypes(measurementType = 'foo'),
regexp = 'no matching measurement types found')
})
test_that("Testing data is returned", {
skip_on_cran()
expect_is(test <- getMeasurementTypes(), 'data.frame')
expect_equal(ncol(test), 2)
expect_true(all(c('Activity Cycle', 'Diet','Litter Size','Ranging Behaviour','Wing Morphology')
%in% test$Name))
expect_is(test <- getMeasurementTypes(1), 'data.frame')
expect_equal(ncol(test), 2)
expect_equal(nrow(test), 1)
expect_equal(test, data.frame(Id = 1, Name = 'Body Mass', stringsAsFactors = FALSE))
expect_is(test <- getMeasurementTypes('Diet'), 'data.frame')
expect_equal(ncol(test), 2)
expect_equal(nrow(test), 1)
expect_equal(test, data.frame(Id = 21, Name = 'Diet', stringsAsFactors = FALSE))
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load.R
\name{get_datadir}
\alias{get_datadir}
\title{Get data directory}
\usage{
get_datadir(datadir)
}
\arguments{
\item{datadir}{character. Directory as passed to \code{\link{load_layers}}.
This does not change the data directory used by \code{\link{load_layers}}
but only shows the result of passing a specific datadir. If \code{NULL} is
passed then the \code{sdmpredictors_datadir} option is read. To set this
run \code{options(sdmpredictors_datadir = "<your preferred directory>")} in
every session or in your .RProfile.}
}
\value{
Path to the data directory.
}
\description{
Find out where the environmental data is stored
}
\seealso{
\code{\link{load_layers}}
}
\keyword{internal}
| /man/get_datadir.Rd | permissive | cran/sdmpredictors | R | false | true | 792 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load.R
\name{get_datadir}
\alias{get_datadir}
\title{Get data directory}
\usage{
get_datadir(datadir)
}
\arguments{
\item{datadir}{character. Directory as passed to \code{\link{load_layers}}.
This does not change the data directory used by \code{\link{load_layers}}
but only shows the result of passing a specific datadir. If \code{NULL} is
passed then the \code{sdmpredictors_datadir} option is read. To set this
run \code{options(sdmpredictors_datadir = "<your preferred directory>")} in
every session or in your .RProfile.}
}
\value{
Path to the data directory.
}
\description{
Find out where the environmental data is stored
}
\seealso{
\code{\link{load_layers}}
}
\keyword{internal}
|
# $Id: mvt.R 334 2017-03-01 14:59:37Z thothorn $
##' Do we have a correlation matrix?
##' @param x typically a matrix
chkcorr <- function(x) {
if (!is.matrix(x) || (d <- dim(x))[1] != d[2])
return(FALSE)
rownames(x) <- colnames(x) <- NULL
storage.mode(x) <- "numeric"
ONE <- 1 + sqrt(.Machine$double.eps)
## return
-ONE <= min(x) && max(x) <= ONE && isTRUE(all.equal(diag(x), rep(1, d[1])))
}
checkmvArgs <- function(lower, upper, mean, corr, sigma)
{
if (!is.numeric(lower) || !is.vector(lower))
stop(sQuote("lower"), " is not a numeric vector")
if (!is.numeric(upper) || !is.vector(upper))
stop(sQuote("upper"), " is not a numeric vector")
if (!is.numeric(mean) || !is.vector(mean))
stop(sQuote("mean"), " is not a numeric vector")
if (is.null(lower) || any(is.na(lower)))
stop(sQuote("lower"), " not specified or contains NA")
if (is.null(upper) || any(is.na(upper)))
stop(sQuote("upper"), " not specified or contains NA")
rec <- cbind(lower, upper, mean)# <--> recycling to same length
lower <- rec[,"lower"]
upper <- rec[,"upper"]
if (!all(lower <= upper))
stop("at least one element of ", sQuote("lower"), " is larger than ",
sQuote("upper"))
mean <- rec[,"mean"]
if (any(is.na(mean)))
stop("mean contains NA")
if (is.null(corr) && is.null(sigma)) {
corr <- diag(length(lower))
# warning("both ", sQuote("corr"), " and ", sQuote("sigma"),
# " not specified: using sigma=diag(length(lower))")
}
if (!is.null(corr) && !is.null(sigma)) {
sigma <- NULL
warning("both ", sQuote("corr"), " and ", sQuote("sigma"),
" specified: ignoring ", sQuote("sigma"))
}
UNI <- FALSE
if (!is.null(corr)) {
if (!is.numeric(corr))
stop(sQuote("corr"), " is not numeric")
if (!is.matrix(corr)) {
if (length(corr) == 1)
UNI <- TRUE
if (length(corr) != length(lower))
stop(sQuote("diag(corr)"), " and ", sQuote("lower"),
" are of different length")
} else {
if (length(corr) == 1) {
UNI <- TRUE
corr <- corr[1,1]
if (length(lower) != 1)
stop(sQuote("corr"), " and ", sQuote("lower"),
" are of different length")
} else {
if (length(diag(corr)) != length(lower))
stop(sQuote("diag(corr)"), " and ", sQuote("lower"),
" are of different length")
if (!chkcorr(corr))
stop(sQuote("corr"), " is not a correlation matrix")
}
}
}
if (!is.null(sigma)) {
if (!is.numeric(sigma))
stop(sQuote("sigma"), " is not numeric")
if (!is.matrix(sigma)) {
if (length(sigma) == 1)
UNI <- TRUE
if (length(sigma) != length(lower))
stop(sQuote("diag(sigma)"), " and ", sQuote("lower"),
" are of different length")
} else {
if (length(sigma) == 1) {
UNI <- TRUE
sigma <- sigma[1,1]
if (length(lower) != 1)
stop(sQuote("sigma"), " and ", sQuote("lower"),
" are of different length")
} else {
if (length(diag(sigma)) != length(lower))
stop(sQuote("diag(sigma)"), " and ", sQuote("lower"),
" are of different length")
if (!isTRUE(all.equal(sigma, t(sigma))) || any(diag(sigma) < 0))
stop(sQuote("sigma"), " is not a covariance matrix")
}
}
}
list(lower=lower, upper=upper, mean=mean, corr=corr, sigma=sigma, uni=UNI)
}
pmvnorm <- function(lower=-Inf, upper=Inf, mean=rep(0, length(lower)), corr=NULL, sigma=NULL,
algorithm = GenzBretz(), ...)
{
carg <- checkmvArgs(lower=lower, upper=upper, mean=mean, corr=corr,
sigma=sigma)
if (!is.null(carg$corr)) {
corr <- carg$corr
if (carg$uni) {
stop(sQuote("sigma"), " not specified: cannot compute pnorm")
} else {
lower <- carg$lower - carg$mean
upper <- carg$upper - carg$mean
mean <- rep(0, length(lower))
RET <- mvt(lower=lower, upper=upper, df=0, corr=corr, delta=mean,
algorithm = algorithm, ...)
}
} else {
if (carg$uni) {
RET <- list(value = pnorm(carg$upper, mean=carg$mean, sd=sqrt(carg$sigma)) -
pnorm(carg$lower, mean=carg$mean, sd=sqrt(carg$sigma)),
error = 0, msg="univariate: using pnorm")
} else {
lower <- (carg$lower - carg$mean)/sqrt(diag(carg$sigma))
upper <- (carg$upper - carg$mean)/sqrt(diag(carg$sigma))
mean <- rep(0, length(lower))
corr <- cov2cor(carg$sigma)
RET <- mvt(lower=lower, upper=upper, df=0, corr=corr, delta=mean,
algorithm = algorithm, ...)
}
}
## return
structure(RET$value, "error" = RET$error, "msg" = RET$msg)
}
pmvt <- function(lower=-Inf, upper=Inf, delta=rep(0, length(lower)),
df=1, corr=NULL, sigma=NULL,
algorithm = GenzBretz(),
type = c("Kshirsagar", "shifted"), ...)
{
type <- match.arg(type)
carg <- checkmvArgs(lower=lower, upper=upper, mean=delta, corr=corr,
sigma=sigma)
if (type == "shifted") { # can be handled by integrating over central t
if(!is.null(carg$corr)){ # using transformed integration bounds
d <- 1
} else {
if(!is.null(carg$sigma)){
d <- sqrt(diag(carg$sigma))
carg$corr <- cov2cor(carg$sigma)
}
}
carg$lower <- (carg$lower - carg$mean)/d
carg$upper <- (carg$upper - carg$mean)/d
carg$mean <- rep(0, length(carg$mean))
}
if (is.null(df))
stop(sQuote("df"), " not specified")
if (df < 0) # MH: was any(..)
stop("cannot compute multivariate t distribution with ",
sQuote("df"), " < 0")
if(is.finite(df) && (df != as.integer(df))) # MH: was !isTRUE(all.equal(as.integer(df), df))
stop(sQuote("df"), " is not an integer")
if (carg$uni) {
if (df > 0) # df = Inf is taken care of by pt()
RET <- list(value = pt(carg$upper, df=df, ncp=carg$mean) -
pt(carg$lower, df=df, ncp=carg$mean),
error = 0, msg="univariate: using pt")
else
RET <- list(value = pnorm(carg$upper, mean = carg$mean) -
pnorm(carg$lower, mean=carg$mean),
error = 0, msg="univariate: using pnorm")
} else { # mvt() takes care of df = 0 || df = Inf
if (!is.null(carg$corr)) {
RET <- mvt(lower=carg$lower, upper=carg$upper, df=df, corr=carg$corr,
delta=carg$mean, algorithm = algorithm, ...)
} else { # need to transform integration bounds and delta
d <- sqrt(diag(carg$sigma))
lower <- carg$lower/d
upper <- carg$upper/d
corr <- cov2cor(carg$sigma)
RET <- mvt(lower=lower, upper=upper, df=df, corr=corr,
delta=carg$mean/d, algorithm = algorithm, ...)
}
}
attr(RET$value, "error") <- RET$error
attr(RET$value, "msg") <- RET$msg
return(RET$value)
}
## identical(., Inf) would be faster but not vectorized
isInf <- function(x) x > 0 & is.infinite(x) # check for Inf
isNInf <- function(x) x < 0 & is.infinite(x) # check for -Inf
mvt <- function(lower, upper, df, corr, delta, algorithm = GenzBretz(), ...)
{
### only for compatibility with older versions
addargs <- list(...)
if (length(addargs) > 0)
algorithm <- GenzBretz(...)
else if (is.function(algorithm) || is.character(algorithm))
algorithm <- do.call(algorithm, list())
### handle cases where the support is the empty set
## Note: checkmvArgs() has been called ==> lower, upper are *not* NA
if (any(abs(d <- lower - upper) < sqrt(.Machine$double.eps)*(abs(lower)+abs(upper)) |
lower == upper)) ## e.g. Inf == Inf
return(list(value = 0, error = 0, msg = "lower == upper"))
n <- ncol(corr)
if (is.null(n) || n < 2) stop("dimension less then n = 2")
if (length(lower) != n) stop("wrong dimensions")
if (length(upper) != n) stop("wrong dimensions")
if (n > 1000) stop("only dimensions 1 <= n <= 1000 allowed")
infin <- rep(2, n)
infin[ isInf(upper)] <- 1
infin[isNInf(lower)] <- 0
infin[isNInf(lower) & isInf(upper)] <- -1
### fix for Miwa algo:
### pmvnorm(lower=c(-Inf, 0, 0), upper=c(0, Inf, Inf),
### mean=c(0, 0, 0), sigma=S, algorithm = Miwa())
### returned NA
if (inherits(algorithm, "Miwa")) {
if (n >= 3 && any(infin == -1)) {
WhereBothInfIs <- which(infin == -1)
n <- n - length(WhereBothInfIs)
corr <- corr[-WhereBothInfIs, -WhereBothInfIs]
upper <- upper[-WhereBothInfIs]
lower <- lower[-WhereBothInfIs]
}
if (n >= 2 && any(infin == 0)) {
WhereNegativInfIs <- which(infin==0)
inversecorr <- rep(1, n)
inversecorr[WhereNegativInfIs] <- -1
corr <- diag(inversecorr) %*% corr %*% diag(inversecorr) ## MM_FIXME
infin[WhereNegativInfIs] <- 1
tempsaveupper <- upper[WhereNegativInfIs]
upper[WhereNegativInfIs] <- -lower[WhereNegativInfIs]
lower[WhereNegativInfIs] <- -tempsaveupper
}
}
### this is a bug in `mvtdst' not yet fixed
if (all(infin < 0))
return(list(value = 1, error = 0, msg = "Normal Completion"))
if (n > 1) {
corrF <- matrix(as.vector(corr), ncol=n, byrow=TRUE)
corrF <- corrF[upper.tri(corrF)]
} else corrF <- corr
ret <- probval(algorithm, n, df, lower, upper, infin, corr, corrF, delta)
inform <- ret$inform
msg <-
if (inform == 0) "Normal Completion"
else if (inform == 1) "Completion with error > abseps"
else if (inform == 2) "N greater 1000 or N < 1"
else if (inform == 3) "Covariance matrix not positive semidefinite"
else inform
## return including error est. and msg:
list(value = ret$value, error = ret$error, msg = msg)
}
rmvt <- function(n, sigma = diag(2), df = 1,
delta = rep(0, nrow(sigma)),
type = c("shifted", "Kshirsagar"), ...)
{
if (length(delta) != nrow(sigma))
stop("delta and sigma have non-conforming size")
if (hasArg(mean)) # MH: normal mean variance mixture != t distribution (!)
stop("Providing 'mean' does *not* sample from a multivariate t distribution!")
if (df == 0 || isInf(df)) # MH: now (also) properly allow df = Inf
return(rmvnorm(n, mean = delta, sigma = sigma, ...))
type <- match.arg(type)
switch(type,
"Kshirsagar" = {
return(rmvnorm(n, mean = delta, sigma = sigma, ...)/
sqrt(rchisq(n, df)/df))
},
"shifted" = {
sims <- rmvnorm(n, sigma = sigma, ...)/sqrt(rchisq(n, df)/df)
return(sweep(sims, 2, delta, "+"))
},
stop("wrong 'type'"))
}
dmvt <- function(x, delta = rep(0, p), sigma = diag(p), df = 1,
log = TRUE, type = "shifted")
{
if (is.vector(x))
x <- matrix(x, ncol = length(x))
p <- ncol(x)
if (df == 0 || isInf(df)) # MH: now (also) properly allow df = Inf
return(dmvnorm(x, mean = delta, sigma = sigma, log = log))
if(!missing(delta)) {
if(!is.null(dim(delta))) dim(delta) <- NULL
if (length(delta) != p)
stop("delta and sigma have non-conforming size")
}
if(!missing(sigma)) {
if (p != ncol(sigma))
stop("x and sigma have non-conforming size")
if (!isSymmetric(sigma, tol = sqrt(.Machine$double.eps),
check.attributes = FALSE))
stop("sigma must be a symmetric matrix")
}
type <- match.arg(type)
dec <- tryCatch(chol(sigma), error=function(e)e)
if (inherits(dec, "error")) {
x.is.d <- colSums(t(x) != delta) == 0
logretval <- rep.int(-Inf, nrow(x))
logretval[x.is.d] <- Inf # and all other f(.) == 0
} else {
R.x_m <- backsolve(dec, t(x) - delta, transpose = TRUE)
rss <- colSums(R.x_m ^ 2)
logretval <- lgamma((p + df)/2) -
(lgamma(df / 2) + sum(log(diag(dec))) + p/2 * log(pi * df)) -
0.5 * (df + p) * log1p(rss / df)
}
names(logretval) <- rownames(x)
if (log) logretval else exp(logretval)
}
## get start interval for root-finder used in qmvnorm and qmvt
getInt <- function(p, delta, sigma, tail,
type = c("Kshirsagar", "shifted"), df){
type <- match.arg(type)
sds <- sqrt(diag(sigma))
if(df == 0 | df == Inf){
df <- Inf
cdf <- function(x, ...)
pnorm(x, delta, sds, ...)
} else {
if(type == "shifted"){
cdf <- function(x, ...)
pt((x-delta)/sds, df=df, ...)
}
if(type == "Kshirsagar"){
cdf <- function(x, ...)
pt(x, ncp=delta/sds, df=df, ...)
}
}
switch(tail, both.tails = {
interval <- c(0,10)
func <- function(x, delta, sds)
prod(cdf(x)-cdf(-x))
UB <- max(abs(delta+sds*qt(1-(1-p)/2, df=df)))
}, upper.tail = {
interval <- c(-10,10)
func <- function(x, delta, sds)
prod(cdf(x, lower.tail=FALSE))
UB <- min(delta+sds*qt(1-p, df=df))
}, lower.tail = {
interval <- c(-10,10)
func <- function(x, delta, sds)
prod(cdf(x))
UB <- max(delta+sds*qt(p, df=df))
}, )
LB <- uniroot(function(x)
func(x, delta=delta, sds=sds)-p,
interval, extendInt = "yes")$root
sort(c(LB, UB))
}
qmvnorm <- function(p, interval = NULL,
tail = c("lower.tail", "upper.tail", "both.tails"),
mean = 0, corr = NULL, sigma = NULL, algorithm =
GenzBretz(),
ptol = 0.001, maxiter = 500, trace = FALSE, ...)
{
if (length(p) != 1 || p < 0 || p > 1)
stop(sQuote("p"), " is not a double between zero and one")
dots <- dots2GenzBretz(...)
if (!is.null(dots$algorithm) && !is.null(algorithm))
algorithm <- dots$algorithm
tail <- match.arg(tail)
if (tail == "both.tails" && p < 0.5)
stop("cannot compute two-sided quantile for p < 0.5")
dim <- length(mean)
if (is.matrix(corr)) dim <- nrow(corr)
if (is.matrix(sigma)) dim <- nrow(sigma)
lower <- upper <- rep.int(0, dim)
args <- checkmvArgs(lower, upper, mean, corr, sigma)
if (args$uni) {
if (is.null(args$sigma))
stop(sQuote("sigma"), " not specified: cannot compute qnorm")
if (tail == "both.tails") p <- ifelse(p < 0.5, p / 2, 1 - (1 - p)/2)
q <- qnorm(p, mean = args$mean, sd = args$sigma,
lower.tail = (tail != "upper.tail"))
return( list(quantile = q, f.quantile = p) )
}
dim <- length(args$mean)
if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE))
runif(1)
R.seed <- get(".Random.seed", envir = .GlobalEnv)
pfct <- function(q) {
### use the same seed for different values of q
assign(".Random.seed", R.seed, envir = .GlobalEnv)
switch(tail, "both.tails" = {
low <- rep(-abs(q), dim)
upp <- rep( abs(q), dim)
}, "upper.tail" = {
low <- rep( q, dim)
upp <- rep( Inf, dim)
}, "lower.tail" = {
low <- rep( -Inf, dim)
upp <- rep( q, dim)
},)
ret <- pmvnorm(lower = low, upper = upp, mean = args$mean,
corr = args$corr, sigma = args$sigma,
algorithm = algorithm)
if(tail == "upper.tail") ## get_quant_loclin assumes an increasing function
ret <- 1-ret
return(ret)
}
if(is.null(interval)){
if(is.null(args$sigma)){
sig <- args$corr
} else {
sig <- args$sigma
}
interval <- getInt(p=p, delta=args$mean, sigma=sig,
tail=tail, df=Inf)
dif <- diff(interval)
interval <- interval+c(-1,1)*0.2*max(dif,0.1) ## extend range slightly
}
if(tail == "upper.tail") ## get_quant_loclin assumes an increasing function
p <- 1-p
minx <- ifelse(tail == "both.tails", 0, -Inf)
qroot <- get_quant_loclin(pfct, p, interval=interval,
link="probit",
ptol=ptol, maxiter=maxiter,
minx=minx, verbose=trace)
qroot$f.quantile <- qroot$f.quantile - p
qroot
}
qmvt <- function(p, interval = NULL,
tail = c("lower.tail", "upper.tail", "both.tails"),
df = 1, delta = 0, corr = NULL, sigma = NULL,
algorithm = GenzBretz(),
type = c("Kshirsagar", "shifted"),
ptol = 0.001, maxiter = 500, trace = FALSE, ...) {
if (length(p) != 1 || (p <= 0 || p >= 1))
stop(sQuote("p"), " is not a double between zero and one")
dots <- dots2GenzBretz(...)
if (!is.null(dots$algorithm) && !is.null(algorithm))
algorithm <- dots$algorithm
type <- match.arg(type)
tail <- match.arg(tail)
if (tail == "both.tails" && p < 0.5)
stop("cannot compute two-sided quantile for p < 0.5")
dim <- 1
if (!is.null(corr)) dim <- NROW(corr)
if (!is.null(sigma)) dim <- NROW(sigma)
lower <- upper <- rep.int(0, dim)
args <- checkmvArgs(lower, upper, delta, corr, sigma)
if (args$uni) {
if (tail == "both.tails") p <- ifelse(p < 0.5, p / 2, 1 - (1 - p)/2)
if (df == 0 || isInf(df)) { # MH: now (also) properly allow df = Inf
q <- qnorm(p, mean = args$mean, lower.tail = (tail != "upper.tail"))
} else {
q <- qt(p, df = df, ncp = args$mean, lower.tail = (tail != "upper.tail"))
}
qroot <- list(quantile = q, f.quantile = p)
return(qroot)
}
dim <- length(args$mean)
if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE))
runif(1)
R.seed <- get(".Random.seed", envir = .GlobalEnv)
pfct <- function(q) {
### use the same seed for different values of q
assign(".Random.seed", R.seed, envir = .GlobalEnv)
switch(tail, "both.tails" = {
low <- rep(-abs(q), dim)
upp <- rep( abs(q), dim)
}, "upper.tail" = {
low <- rep( q, dim)
upp <- rep( Inf, dim)
}, "lower.tail" = {
low <- rep( -Inf, dim)
upp <- rep( q, dim)
},)
ret <- pmvt(lower = low, upper = upp, df = df, delta = args$mean,
corr = args$corr, sigma = args$sigma,
algorithm = algorithm, type = type)
if(tail == "upper.tail") ## get_quant_loclin assumes an increasing function
ret <- 1-ret
return(ret)
}
if(is.null(interval)){
if(is.null(args$sigma)){
sig <- args$corr
} else {
sig <- args$sigma
}
interval <- getInt(p=p, delta=args$mean, sigma=sig,
tail=tail, type=type, df=df)
dif <- diff(interval)
interval <- interval+c(-1,1)*0.2*max(dif,0.1) ## extend range slightly
}
if(tail == "upper.tail") ## get_quant_loclin assumes an increasing function
p <- 1-p
minx <- ifelse(tail == "both.tails", 0, -Inf)
link <- ifelse(df <= 7 & df > 0, "cauchit", "probit")
qroot <- get_quant_loclin(pfct, p, interval=interval,
link=link,
ptol=ptol, maxiter=maxiter,
minx=minx, verbose=trace)
qroot$f.quantile <- qroot$f.quantile - p
qroot
}
GenzBretz <- function(maxpts = 25000, abseps = 0.001, releps = 0) {
structure(list(maxpts = maxpts, abseps = abseps, releps = releps),
class = "GenzBretz")
}
Miwa <- function(steps = 128) {
if (steps > 4098) stop("maximum number of steps is 4098")
structure(list(steps = steps), class = "Miwa")
}
probval <- function(x, ...)
UseMethod("probval")
probval.GenzBretz <- function(x, n, df, lower, upper, infin, corr, corrF, delta) {
if(isInf(df)) df <- 0 # MH: deal with df=Inf (internally requires df=0!)
lower[isNInf(lower)] <- 0
upper[ isInf(upper)] <- 0
error <- 0; value <- 0; inform <- 0
.C(C_mvtdst,
N = as.integer(n),
NU = as.integer(df),
LOWER = as.double(lower),
UPPER = as.double(upper),
INFIN = as.integer(infin),
CORREL = as.double(corrF),
DELTA = as.double(delta),
MAXPTS = as.integer(x$maxpts),
ABSEPS = as.double(x$abseps),
RELEPS = as.double(x$releps),
error = as.double(error),
value = as.double(value),
inform = as.integer(inform),
RND = as.integer(1)) ### init RNG
}
probval.Miwa <- function(x, n, df, lower, upper, infin, corr, corrF, delta) {
if (!( df==0 || isInf(df) ))
stop("Miwa algorithm cannot compute t-probabilities")
if (n > 20)
stop("Miwa algorithm cannot compute probabilities for dimension n > 20")
sc <- try(solve(corr))
if (inherits(sc, "try-error"))
stop("Miwa algorithm cannot compute probabilities for singular problems")
p <- .Call(C_miwa, steps = as.integer(x$steps),
corr = as.double(corr),
upper = as.double(upper),
lower = as.double(lower),
infin = as.integer(infin))
list(value = p, inform = 0, error = NA)
}
dots2GenzBretz <- function(...) {
addargs <- list(...)
fm1 <- sapply(names(addargs), function(x) length(grep(x, names(formals(GenzBretz)))) == 1)
fm2 <- sapply(names(addargs), function(x) length(grep(x, names(formals(uniroot)))) == 1)
algorithm <- NULL
uniroot <- NULL
if (any(fm1))
algorithm <- do.call("GenzBretz", addargs[fm1])
if (any(fm2))
uniroot <- addargs[fm2]
list(algorithm = algorithm, uniroot = uniroot)
}
| /R/mvt.R | no_license | dmahasen/mvtnorm | R | false | false | 22,606 | r | # $Id: mvt.R 334 2017-03-01 14:59:37Z thothorn $
##' Do we have a correlation matrix?
##' @param x typically a matrix
chkcorr <- function(x) {
if (!is.matrix(x) || (d <- dim(x))[1] != d[2])
return(FALSE)
rownames(x) <- colnames(x) <- NULL
storage.mode(x) <- "numeric"
ONE <- 1 + sqrt(.Machine$double.eps)
## return
-ONE <= min(x) && max(x) <= ONE && isTRUE(all.equal(diag(x), rep(1, d[1])))
}
checkmvArgs <- function(lower, upper, mean, corr, sigma)
{
if (!is.numeric(lower) || !is.vector(lower))
stop(sQuote("lower"), " is not a numeric vector")
if (!is.numeric(upper) || !is.vector(upper))
stop(sQuote("upper"), " is not a numeric vector")
if (!is.numeric(mean) || !is.vector(mean))
stop(sQuote("mean"), " is not a numeric vector")
if (is.null(lower) || any(is.na(lower)))
stop(sQuote("lower"), " not specified or contains NA")
if (is.null(upper) || any(is.na(upper)))
stop(sQuote("upper"), " not specified or contains NA")
rec <- cbind(lower, upper, mean)# <--> recycling to same length
lower <- rec[,"lower"]
upper <- rec[,"upper"]
if (!all(lower <= upper))
stop("at least one element of ", sQuote("lower"), " is larger than ",
sQuote("upper"))
mean <- rec[,"mean"]
if (any(is.na(mean)))
stop("mean contains NA")
if (is.null(corr) && is.null(sigma)) {
corr <- diag(length(lower))
# warning("both ", sQuote("corr"), " and ", sQuote("sigma"),
# " not specified: using sigma=diag(length(lower))")
}
if (!is.null(corr) && !is.null(sigma)) {
sigma <- NULL
warning("both ", sQuote("corr"), " and ", sQuote("sigma"),
" specified: ignoring ", sQuote("sigma"))
}
UNI <- FALSE
if (!is.null(corr)) {
if (!is.numeric(corr))
stop(sQuote("corr"), " is not numeric")
if (!is.matrix(corr)) {
if (length(corr) == 1)
UNI <- TRUE
if (length(corr) != length(lower))
stop(sQuote("diag(corr)"), " and ", sQuote("lower"),
" are of different length")
} else {
if (length(corr) == 1) {
UNI <- TRUE
corr <- corr[1,1]
if (length(lower) != 1)
stop(sQuote("corr"), " and ", sQuote("lower"),
" are of different length")
} else {
if (length(diag(corr)) != length(lower))
stop(sQuote("diag(corr)"), " and ", sQuote("lower"),
" are of different length")
if (!chkcorr(corr))
stop(sQuote("corr"), " is not a correlation matrix")
}
}
}
if (!is.null(sigma)) {
if (!is.numeric(sigma))
stop(sQuote("sigma"), " is not numeric")
if (!is.matrix(sigma)) {
if (length(sigma) == 1)
UNI <- TRUE
if (length(sigma) != length(lower))
stop(sQuote("diag(sigma)"), " and ", sQuote("lower"),
" are of different length")
} else {
if (length(sigma) == 1) {
UNI <- TRUE
sigma <- sigma[1,1]
if (length(lower) != 1)
stop(sQuote("sigma"), " and ", sQuote("lower"),
" are of different length")
} else {
if (length(diag(sigma)) != length(lower))
stop(sQuote("diag(sigma)"), " and ", sQuote("lower"),
" are of different length")
if (!isTRUE(all.equal(sigma, t(sigma))) || any(diag(sigma) < 0))
stop(sQuote("sigma"), " is not a covariance matrix")
}
}
}
list(lower=lower, upper=upper, mean=mean, corr=corr, sigma=sigma, uni=UNI)
}
pmvnorm <- function(lower=-Inf, upper=Inf, mean=rep(0, length(lower)), corr=NULL, sigma=NULL,
algorithm = GenzBretz(), ...)
{
carg <- checkmvArgs(lower=lower, upper=upper, mean=mean, corr=corr,
sigma=sigma)
if (!is.null(carg$corr)) {
corr <- carg$corr
if (carg$uni) {
stop(sQuote("sigma"), " not specified: cannot compute pnorm")
} else {
lower <- carg$lower - carg$mean
upper <- carg$upper - carg$mean
mean <- rep(0, length(lower))
RET <- mvt(lower=lower, upper=upper, df=0, corr=corr, delta=mean,
algorithm = algorithm, ...)
}
} else {
if (carg$uni) {
RET <- list(value = pnorm(carg$upper, mean=carg$mean, sd=sqrt(carg$sigma)) -
pnorm(carg$lower, mean=carg$mean, sd=sqrt(carg$sigma)),
error = 0, msg="univariate: using pnorm")
} else {
lower <- (carg$lower - carg$mean)/sqrt(diag(carg$sigma))
upper <- (carg$upper - carg$mean)/sqrt(diag(carg$sigma))
mean <- rep(0, length(lower))
corr <- cov2cor(carg$sigma)
RET <- mvt(lower=lower, upper=upper, df=0, corr=corr, delta=mean,
algorithm = algorithm, ...)
}
}
## return
structure(RET$value, "error" = RET$error, "msg" = RET$msg)
}
pmvt <- function(lower=-Inf, upper=Inf, delta=rep(0, length(lower)),
df=1, corr=NULL, sigma=NULL,
algorithm = GenzBretz(),
type = c("Kshirsagar", "shifted"), ...)
{
type <- match.arg(type)
carg <- checkmvArgs(lower=lower, upper=upper, mean=delta, corr=corr,
sigma=sigma)
if (type == "shifted") { # can be handled by integrating over central t
if(!is.null(carg$corr)){ # using transformed integration bounds
d <- 1
} else {
if(!is.null(carg$sigma)){
d <- sqrt(diag(carg$sigma))
carg$corr <- cov2cor(carg$sigma)
}
}
carg$lower <- (carg$lower - carg$mean)/d
carg$upper <- (carg$upper - carg$mean)/d
carg$mean <- rep(0, length(carg$mean))
}
if (is.null(df))
stop(sQuote("df"), " not specified")
if (df < 0) # MH: was any(..)
stop("cannot compute multivariate t distribution with ",
sQuote("df"), " < 0")
if(is.finite(df) && (df != as.integer(df))) # MH: was !isTRUE(all.equal(as.integer(df), df))
stop(sQuote("df"), " is not an integer")
if (carg$uni) {
if (df > 0) # df = Inf is taken care of by pt()
RET <- list(value = pt(carg$upper, df=df, ncp=carg$mean) -
pt(carg$lower, df=df, ncp=carg$mean),
error = 0, msg="univariate: using pt")
else
RET <- list(value = pnorm(carg$upper, mean = carg$mean) -
pnorm(carg$lower, mean=carg$mean),
error = 0, msg="univariate: using pnorm")
} else { # mvt() takes care of df = 0 || df = Inf
if (!is.null(carg$corr)) {
RET <- mvt(lower=carg$lower, upper=carg$upper, df=df, corr=carg$corr,
delta=carg$mean, algorithm = algorithm, ...)
} else { # need to transform integration bounds and delta
d <- sqrt(diag(carg$sigma))
lower <- carg$lower/d
upper <- carg$upper/d
corr <- cov2cor(carg$sigma)
RET <- mvt(lower=lower, upper=upper, df=df, corr=corr,
delta=carg$mean/d, algorithm = algorithm, ...)
}
}
attr(RET$value, "error") <- RET$error
attr(RET$value, "msg") <- RET$msg
return(RET$value)
}
## identical(., Inf) would be faster but not vectorized
isInf <- function(x) x > 0 & is.infinite(x) # check for Inf
isNInf <- function(x) x < 0 & is.infinite(x) # check for -Inf
mvt <- function(lower, upper, df, corr, delta, algorithm = GenzBretz(), ...)
{
### only for compatibility with older versions
addargs <- list(...)
if (length(addargs) > 0)
algorithm <- GenzBretz(...)
else if (is.function(algorithm) || is.character(algorithm))
algorithm <- do.call(algorithm, list())
### handle cases where the support is the empty set
## Note: checkmvArgs() has been called ==> lower, upper are *not* NA
if (any(abs(d <- lower - upper) < sqrt(.Machine$double.eps)*(abs(lower)+abs(upper)) |
lower == upper)) ## e.g. Inf == Inf
return(list(value = 0, error = 0, msg = "lower == upper"))
n <- ncol(corr)
if (is.null(n) || n < 2) stop("dimension less then n = 2")
if (length(lower) != n) stop("wrong dimensions")
if (length(upper) != n) stop("wrong dimensions")
if (n > 1000) stop("only dimensions 1 <= n <= 1000 allowed")
infin <- rep(2, n)
infin[ isInf(upper)] <- 1
infin[isNInf(lower)] <- 0
infin[isNInf(lower) & isInf(upper)] <- -1
### fix for Miwa algo:
### pmvnorm(lower=c(-Inf, 0, 0), upper=c(0, Inf, Inf),
### mean=c(0, 0, 0), sigma=S, algorithm = Miwa())
### returned NA
if (inherits(algorithm, "Miwa")) {
if (n >= 3 && any(infin == -1)) {
WhereBothInfIs <- which(infin == -1)
n <- n - length(WhereBothInfIs)
corr <- corr[-WhereBothInfIs, -WhereBothInfIs]
upper <- upper[-WhereBothInfIs]
lower <- lower[-WhereBothInfIs]
}
if (n >= 2 && any(infin == 0)) {
WhereNegativInfIs <- which(infin==0)
inversecorr <- rep(1, n)
inversecorr[WhereNegativInfIs] <- -1
corr <- diag(inversecorr) %*% corr %*% diag(inversecorr) ## MM_FIXME
infin[WhereNegativInfIs] <- 1
tempsaveupper <- upper[WhereNegativInfIs]
upper[WhereNegativInfIs] <- -lower[WhereNegativInfIs]
lower[WhereNegativInfIs] <- -tempsaveupper
}
}
### this is a bug in `mvtdst' not yet fixed
if (all(infin < 0))
return(list(value = 1, error = 0, msg = "Normal Completion"))
if (n > 1) {
corrF <- matrix(as.vector(corr), ncol=n, byrow=TRUE)
corrF <- corrF[upper.tri(corrF)]
} else corrF <- corr
ret <- probval(algorithm, n, df, lower, upper, infin, corr, corrF, delta)
inform <- ret$inform
msg <-
if (inform == 0) "Normal Completion"
else if (inform == 1) "Completion with error > abseps"
else if (inform == 2) "N greater 1000 or N < 1"
else if (inform == 3) "Covariance matrix not positive semidefinite"
else inform
## return including error est. and msg:
list(value = ret$value, error = ret$error, msg = msg)
}
rmvt <- function(n, sigma = diag(2), df = 1,
delta = rep(0, nrow(sigma)),
type = c("shifted", "Kshirsagar"), ...)
{
if (length(delta) != nrow(sigma))
stop("delta and sigma have non-conforming size")
if (hasArg(mean)) # MH: normal mean variance mixture != t distribution (!)
stop("Providing 'mean' does *not* sample from a multivariate t distribution!")
if (df == 0 || isInf(df)) # MH: now (also) properly allow df = Inf
return(rmvnorm(n, mean = delta, sigma = sigma, ...))
type <- match.arg(type)
switch(type,
"Kshirsagar" = {
return(rmvnorm(n, mean = delta, sigma = sigma, ...)/
sqrt(rchisq(n, df)/df))
},
"shifted" = {
sims <- rmvnorm(n, sigma = sigma, ...)/sqrt(rchisq(n, df)/df)
return(sweep(sims, 2, delta, "+"))
},
stop("wrong 'type'"))
}
dmvt <- function(x, delta = rep(0, p), sigma = diag(p), df = 1,
log = TRUE, type = "shifted")
{
if (is.vector(x))
x <- matrix(x, ncol = length(x))
p <- ncol(x)
if (df == 0 || isInf(df)) # MH: now (also) properly allow df = Inf
return(dmvnorm(x, mean = delta, sigma = sigma, log = log))
if(!missing(delta)) {
if(!is.null(dim(delta))) dim(delta) <- NULL
if (length(delta) != p)
stop("delta and sigma have non-conforming size")
}
if(!missing(sigma)) {
if (p != ncol(sigma))
stop("x and sigma have non-conforming size")
if (!isSymmetric(sigma, tol = sqrt(.Machine$double.eps),
check.attributes = FALSE))
stop("sigma must be a symmetric matrix")
}
type <- match.arg(type)
dec <- tryCatch(chol(sigma), error=function(e)e)
if (inherits(dec, "error")) {
x.is.d <- colSums(t(x) != delta) == 0
logretval <- rep.int(-Inf, nrow(x))
logretval[x.is.d] <- Inf # and all other f(.) == 0
} else {
R.x_m <- backsolve(dec, t(x) - delta, transpose = TRUE)
rss <- colSums(R.x_m ^ 2)
logretval <- lgamma((p + df)/2) -
(lgamma(df / 2) + sum(log(diag(dec))) + p/2 * log(pi * df)) -
0.5 * (df + p) * log1p(rss / df)
}
names(logretval) <- rownames(x)
if (log) logretval else exp(logretval)
}
## get start interval for root-finder used in qmvnorm and qmvt
getInt <- function(p, delta, sigma, tail,
type = c("Kshirsagar", "shifted"), df){
type <- match.arg(type)
sds <- sqrt(diag(sigma))
if(df == 0 | df == Inf){
df <- Inf
cdf <- function(x, ...)
pnorm(x, delta, sds, ...)
} else {
if(type == "shifted"){
cdf <- function(x, ...)
pt((x-delta)/sds, df=df, ...)
}
if(type == "Kshirsagar"){
cdf <- function(x, ...)
pt(x, ncp=delta/sds, df=df, ...)
}
}
switch(tail, both.tails = {
interval <- c(0,10)
func <- function(x, delta, sds)
prod(cdf(x)-cdf(-x))
UB <- max(abs(delta+sds*qt(1-(1-p)/2, df=df)))
}, upper.tail = {
interval <- c(-10,10)
func <- function(x, delta, sds)
prod(cdf(x, lower.tail=FALSE))
UB <- min(delta+sds*qt(1-p, df=df))
}, lower.tail = {
interval <- c(-10,10)
func <- function(x, delta, sds)
prod(cdf(x))
UB <- max(delta+sds*qt(p, df=df))
}, )
LB <- uniroot(function(x)
func(x, delta=delta, sds=sds)-p,
interval, extendInt = "yes")$root
sort(c(LB, UB))
}
qmvnorm <- function(p, interval = NULL,
tail = c("lower.tail", "upper.tail", "both.tails"),
mean = 0, corr = NULL, sigma = NULL, algorithm =
GenzBretz(),
ptol = 0.001, maxiter = 500, trace = FALSE, ...)
{
if (length(p) != 1 || p < 0 || p > 1)
stop(sQuote("p"), " is not a double between zero and one")
dots <- dots2GenzBretz(...)
if (!is.null(dots$algorithm) && !is.null(algorithm))
algorithm <- dots$algorithm
tail <- match.arg(tail)
if (tail == "both.tails" && p < 0.5)
stop("cannot compute two-sided quantile for p < 0.5")
dim <- length(mean)
if (is.matrix(corr)) dim <- nrow(corr)
if (is.matrix(sigma)) dim <- nrow(sigma)
lower <- upper <- rep.int(0, dim)
args <- checkmvArgs(lower, upper, mean, corr, sigma)
if (args$uni) {
if (is.null(args$sigma))
stop(sQuote("sigma"), " not specified: cannot compute qnorm")
if (tail == "both.tails") p <- ifelse(p < 0.5, p / 2, 1 - (1 - p)/2)
q <- qnorm(p, mean = args$mean, sd = args$sigma,
lower.tail = (tail != "upper.tail"))
return( list(quantile = q, f.quantile = p) )
}
dim <- length(args$mean)
if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE))
runif(1)
R.seed <- get(".Random.seed", envir = .GlobalEnv)
pfct <- function(q) {
### use the same seed for different values of q
assign(".Random.seed", R.seed, envir = .GlobalEnv)
switch(tail, "both.tails" = {
low <- rep(-abs(q), dim)
upp <- rep( abs(q), dim)
}, "upper.tail" = {
low <- rep( q, dim)
upp <- rep( Inf, dim)
}, "lower.tail" = {
low <- rep( -Inf, dim)
upp <- rep( q, dim)
},)
ret <- pmvnorm(lower = low, upper = upp, mean = args$mean,
corr = args$corr, sigma = args$sigma,
algorithm = algorithm)
if(tail == "upper.tail") ## get_quant_loclin assumes an increasing function
ret <- 1-ret
return(ret)
}
if(is.null(interval)){
if(is.null(args$sigma)){
sig <- args$corr
} else {
sig <- args$sigma
}
interval <- getInt(p=p, delta=args$mean, sigma=sig,
tail=tail, df=Inf)
dif <- diff(interval)
interval <- interval+c(-1,1)*0.2*max(dif,0.1) ## extend range slightly
}
if(tail == "upper.tail") ## get_quant_loclin assumes an increasing function
p <- 1-p
minx <- ifelse(tail == "both.tails", 0, -Inf)
qroot <- get_quant_loclin(pfct, p, interval=interval,
link="probit",
ptol=ptol, maxiter=maxiter,
minx=minx, verbose=trace)
qroot$f.quantile <- qroot$f.quantile - p
qroot
}
qmvt <- function(p, interval = NULL,
tail = c("lower.tail", "upper.tail", "both.tails"),
df = 1, delta = 0, corr = NULL, sigma = NULL,
algorithm = GenzBretz(),
type = c("Kshirsagar", "shifted"),
ptol = 0.001, maxiter = 500, trace = FALSE, ...) {
if (length(p) != 1 || (p <= 0 || p >= 1))
stop(sQuote("p"), " is not a double between zero and one")
dots <- dots2GenzBretz(...)
if (!is.null(dots$algorithm) && !is.null(algorithm))
algorithm <- dots$algorithm
type <- match.arg(type)
tail <- match.arg(tail)
if (tail == "both.tails" && p < 0.5)
stop("cannot compute two-sided quantile for p < 0.5")
dim <- 1
if (!is.null(corr)) dim <- NROW(corr)
if (!is.null(sigma)) dim <- NROW(sigma)
lower <- upper <- rep.int(0, dim)
args <- checkmvArgs(lower, upper, delta, corr, sigma)
if (args$uni) {
if (tail == "both.tails") p <- ifelse(p < 0.5, p / 2, 1 - (1 - p)/2)
if (df == 0 || isInf(df)) { # MH: now (also) properly allow df = Inf
q <- qnorm(p, mean = args$mean, lower.tail = (tail != "upper.tail"))
} else {
q <- qt(p, df = df, ncp = args$mean, lower.tail = (tail != "upper.tail"))
}
qroot <- list(quantile = q, f.quantile = p)
return(qroot)
}
dim <- length(args$mean)
if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE))
runif(1)
R.seed <- get(".Random.seed", envir = .GlobalEnv)
pfct <- function(q) {
### use the same seed for different values of q
assign(".Random.seed", R.seed, envir = .GlobalEnv)
switch(tail, "both.tails" = {
low <- rep(-abs(q), dim)
upp <- rep( abs(q), dim)
}, "upper.tail" = {
low <- rep( q, dim)
upp <- rep( Inf, dim)
}, "lower.tail" = {
low <- rep( -Inf, dim)
upp <- rep( q, dim)
},)
ret <- pmvt(lower = low, upper = upp, df = df, delta = args$mean,
corr = args$corr, sigma = args$sigma,
algorithm = algorithm, type = type)
if(tail == "upper.tail") ## get_quant_loclin assumes an increasing function
ret <- 1-ret
return(ret)
}
if(is.null(interval)){
if(is.null(args$sigma)){
sig <- args$corr
} else {
sig <- args$sigma
}
interval <- getInt(p=p, delta=args$mean, sigma=sig,
tail=tail, type=type, df=df)
dif <- diff(interval)
interval <- interval+c(-1,1)*0.2*max(dif,0.1) ## extend range slightly
}
if(tail == "upper.tail") ## get_quant_loclin assumes an increasing function
p <- 1-p
minx <- ifelse(tail == "both.tails", 0, -Inf)
link <- ifelse(df <= 7 & df > 0, "cauchit", "probit")
qroot <- get_quant_loclin(pfct, p, interval=interval,
link=link,
ptol=ptol, maxiter=maxiter,
minx=minx, verbose=trace)
qroot$f.quantile <- qroot$f.quantile - p
qroot
}
GenzBretz <- function(maxpts = 25000, abseps = 0.001, releps = 0) {
structure(list(maxpts = maxpts, abseps = abseps, releps = releps),
class = "GenzBretz")
}
Miwa <- function(steps = 128) {
if (steps > 4098) stop("maximum number of steps is 4098")
structure(list(steps = steps), class = "Miwa")
}
probval <- function(x, ...)
UseMethod("probval")
probval.GenzBretz <- function(x, n, df, lower, upper, infin, corr, corrF, delta) {
if(isInf(df)) df <- 0 # MH: deal with df=Inf (internally requires df=0!)
lower[isNInf(lower)] <- 0
upper[ isInf(upper)] <- 0
error <- 0; value <- 0; inform <- 0
.C(C_mvtdst,
N = as.integer(n),
NU = as.integer(df),
LOWER = as.double(lower),
UPPER = as.double(upper),
INFIN = as.integer(infin),
CORREL = as.double(corrF),
DELTA = as.double(delta),
MAXPTS = as.integer(x$maxpts),
ABSEPS = as.double(x$abseps),
RELEPS = as.double(x$releps),
error = as.double(error),
value = as.double(value),
inform = as.integer(inform),
RND = as.integer(1)) ### init RNG
}
probval.Miwa <- function(x, n, df, lower, upper, infin, corr, corrF, delta) {
if (!( df==0 || isInf(df) ))
stop("Miwa algorithm cannot compute t-probabilities")
if (n > 20)
stop("Miwa algorithm cannot compute probabilities for dimension n > 20")
sc <- try(solve(corr))
if (inherits(sc, "try-error"))
stop("Miwa algorithm cannot compute probabilities for singular problems")
p <- .Call(C_miwa, steps = as.integer(x$steps),
corr = as.double(corr),
upper = as.double(upper),
lower = as.double(lower),
infin = as.integer(infin))
list(value = p, inform = 0, error = NA)
}
dots2GenzBretz <- function(...) {
addargs <- list(...)
fm1 <- sapply(names(addargs), function(x) length(grep(x, names(formals(GenzBretz)))) == 1)
fm2 <- sapply(names(addargs), function(x) length(grep(x, names(formals(uniroot)))) == 1)
algorithm <- NULL
uniroot <- NULL
if (any(fm1))
algorithm <- do.call("GenzBretz", addargs[fm1])
if (any(fm2))
uniroot <- addargs[fm2]
list(algorithm = algorithm, uniroot = uniroot)
}
|
# Generalizing Functions
create_xts_series <- function(dfm, country, status, scale_) {
dfm <- dfm[dfm$Country == country & dfm$Status == status, ]
series <- if (scale_ == "Linear") {
xts(dfm$Pct, order.by = dfm$Date)
} else {
xts(log(dfm$Pct), order.by = dfm$Date)
}
series
}
create_seriesObject <- function(dfm, status_df, status, scale_) {
seriesObject <- NULL
for (i in 1:6) {
seriesObject <- cbind(seriesObject
, create_xts_series(dfm
, status_df$Country[i]
, status
, scale_)
)
}
names(seriesObject) <- status_df$Country[1:6]
seriesObject
}
plot_interactive_df <- function(dfm, status_df, status, scale_) {
seriesObject <- create_seriesObject(dfm
, status_df
, status
, scale_)
ylab_txt <- if (scale_ == "Linear") {
""
} else {
"Log "
}
interactive_df <- dygraph(seriesObject
, main=paste0("Top Countries - "
, status, " Cases ("
, scale_, " Scale)")
, xlab=""
, ylab=paste0(ylab_txt, "Percentage Of "
, status, " Cases")
) %>%
dyOptions(colors=brewer.pal(6, "Dark2")
) %>%
dyRangeSelector()
interactive_df
}
# Confirmed Cases
plot_interactive_df(percap, top_confirmed, "Confirmed", "Linear")
plot_interactive_df(percap, top_confirmed, "Confirmed", "Log")
# Fatal Cases
plot_interactive_df(percap, top_fatal, "Fatal", "Linear")
plot_interactive_df(percap, top_fatal, "Fatal", "Log")
# Recovered Cases
plot_interactive_df(percap, top_recovered, "Recovered", "Linear")
plot_interactive_df(percap, top_recovered, "Recovered", "Log")
| /OLD_DATA_ANALYSES/CODE CHUNKS/generalizing_functions.R | no_license | robintux/CoronavirusDataAnalysis | R | false | false | 2,039 | r |
# Generalizing Functions
create_xts_series <- function(dfm, country, status, scale_) {
dfm <- dfm[dfm$Country == country & dfm$Status == status, ]
series <- if (scale_ == "Linear") {
xts(dfm$Pct, order.by = dfm$Date)
} else {
xts(log(dfm$Pct), order.by = dfm$Date)
}
series
}
create_seriesObject <- function(dfm, status_df, status, scale_) {
seriesObject <- NULL
for (i in 1:6) {
seriesObject <- cbind(seriesObject
, create_xts_series(dfm
, status_df$Country[i]
, status
, scale_)
)
}
names(seriesObject) <- status_df$Country[1:6]
seriesObject
}
plot_interactive_df <- function(dfm, status_df, status, scale_) {
seriesObject <- create_seriesObject(dfm
, status_df
, status
, scale_)
ylab_txt <- if (scale_ == "Linear") {
""
} else {
"Log "
}
interactive_df <- dygraph(seriesObject
, main=paste0("Top Countries - "
, status, " Cases ("
, scale_, " Scale)")
, xlab=""
, ylab=paste0(ylab_txt, "Percentage Of "
, status, " Cases")
) %>%
dyOptions(colors=brewer.pal(6, "Dark2")
) %>%
dyRangeSelector()
interactive_df
}
# Confirmed Cases
plot_interactive_df(percap, top_confirmed, "Confirmed", "Linear")
plot_interactive_df(percap, top_confirmed, "Confirmed", "Log")
# Fatal Cases
plot_interactive_df(percap, top_fatal, "Fatal", "Linear")
plot_interactive_df(percap, top_fatal, "Fatal", "Log")
# Recovered Cases
plot_interactive_df(percap, top_recovered, "Recovered", "Linear")
plot_interactive_df(percap, top_recovered, "Recovered", "Log")
|
# Plant Growth Case Study ----
# Load packages ----
# This should already be loaded if you executed the commands in the previous file.
library(tidyverse)
# A built-in data set ----
data(PlantGrowth)
# 1. Descriptive Statistics ----
# The "global mean", e.g. ANOVA Null hypothesis
mean(PlantGrowth$weight)
# group-wise stats
# Here, using functions from dplyr, a part of the Tidyverse
# Thus, using Tidyverse notation
# %>% is the "the pipe operator"
# Pronounce it as "... and then ..."
# Type it using shift + ctrl + m
PlantGrowth %>%
group_by(group) %>%
summarise(avg = mean(weight),
stdev = sd(weight))
# 2. Data Visualization ----
# Here, using functions from ggplot2, a part of the Tidyverse
# 3 essential components
# 1 - The data
# 2 - Aesthetics - "mapping" variables onto scales
# aes == scales == axes == "encoding elements"
# scales: x, y, color, size, shape, linetype
# 3 - Geometry - how the plot will look
# box plot
ggplot(PlantGrowth, aes(x = group, y = weight)) +
geom_boxplot()
# "dot plot"
ggplot(PlantGrowth, aes(x = group, y = weight)) +
geom_jitter(width = 0.25, alpha = 0.5)
# Q-Q plot (useful, but a bit more advanced):
ggplot(PlantGrowth, aes(sample = weight)) +
geom_qq() +
geom_qq_line(color = "red") +
facet_wrap(~ group, scales = "free_y")
# 3. Inferential Statistics ----
# first step: define a linear model
# ~ means "described by"
plant_lm <- lm(weight ~ group, data = PlantGrowth)
plant_lm
# t-tests
# Typically, use t.test(), but here, we can use:
summary(plant_lm) # p-values are labeled Pr(>|t|)
# 1-way ANOVA
anova(plant_lm)
# For all pair-wise comparisons use
plant_aov <- aov(weight ~ group, data = PlantGrowth)
TukeyHSD(plant_aov) | /2 - The Plant Growth Case Study.R | no_license | Scavetta/DAwR-2105-IMPRS-LS-01 | R | false | false | 1,711 | r | # Plant Growth Case Study ----
# Load packages ----
# This should already be loaded if you executed the commands in the previous file.
library(tidyverse)
# A built-in data set ----
data(PlantGrowth)
# 1. Descriptive Statistics ----
# The "global mean", e.g. ANOVA Null hypothesis
mean(PlantGrowth$weight)
# group-wise stats
# Here, using functions from dplyr, a part of the Tidyverse
# Thus, using Tidyverse notation
# %>% is the "the pipe operator"
# Pronounce it as "... and then ..."
# Type it using shift + ctrl + m
PlantGrowth %>%
group_by(group) %>%
summarise(avg = mean(weight),
stdev = sd(weight))
# 2. Data Visualization ----
# Here, using functions from ggplot2, a part of the Tidyverse
# 3 essential components
# 1 - The data
# 2 - Aesthetics - "mapping" variables onto scales
# aes == scales == axes == "encoding elements"
# scales: x, y, color, size, shape, linetype
# 3 - Geometry - how the plot will look
# box plot
ggplot(PlantGrowth, aes(x = group, y = weight)) +
geom_boxplot()
# "dot plot"
ggplot(PlantGrowth, aes(x = group, y = weight)) +
geom_jitter(width = 0.25, alpha = 0.5)
# Q-Q plot (useful, but a bit more advanced):
ggplot(PlantGrowth, aes(sample = weight)) +
geom_qq() +
geom_qq_line(color = "red") +
facet_wrap(~ group, scales = "free_y")
# 3. Inferential Statistics ----
# first step: define a linear model
# ~ means "described by"
plant_lm <- lm(weight ~ group, data = PlantGrowth)
plant_lm
# t-tests
# Typically, use t.test(), but here, we can use:
summary(plant_lm) # p-values are labeled Pr(>|t|)
# 1-way ANOVA
anova(plant_lm)
# For all pair-wise comparisons use
plant_aov <- aov(weight ~ group, data = PlantGrowth)
TukeyHSD(plant_aov) |
####07-1 결측치 정제하기####
#결측치 찾기
df <- data.frame(sex = c("M", "F", NA, "M", "F"),
score = c(5, 4, 3, 4, NA))
df
is.na(df) #결측치는 TRUE
table(is.na(df))
table(is.na(df$sex)) #결측치 1개
table(is.na(df$score)) #결측치 1개
mean(df$score) #NA
sum(df$score) #NA
#결측치 제거하기
library(dplyr)
df %>% filter(is.na(score)) #결측치 있는 행 추출
df %>% filter(!is.na(score)) #score 결측치 제거
df_nomiss <- df %>% filter(!is.na(score))
mean(df_nomiss$score)
sum(df_nomiss$score)
df_nomiss <- df %>% filter(!is.na(score) & !is.na(sex)) #score, sex 결측치 제
df_nomiss
df_nomiss2 <- na.omit(df) #결측치 하나라도 있으면 제거
df_nomiss2
#함수의 결측치 제외 기능 이용하기
mean(df$score, na.rm=T)
sum(df$score, na.rm=T)
exam <- read.csv("csv_exam.csv")
exam[c(3, 8, 15), "math"] <- NA
exam
exam %>% summarise(mean_math = mean(math))
exam %>% summarise(mean_math = mean(math, na.rm=T))
exam %>% summarise(mean_math = mean(math, na.rm=T),
sum_math = sum(math, na.rm=T),
median_math = median(math, na.rm=T))
#결측치 대체하기#
#평균값으로 결측치 대체하기
mean(exam$math, na.rm=T)
exam$math <- ifelse(is.na(exam$math), 55, exam$math)
table(is.na(exam$math))
exam
mean(exam$math)
#혼자서 해보기#
mpg <- as.data.frame(ggplot2::mpg)
mpg[c(65, 124, 131, 153, 212), "hwy"] <- NA
#Q1
table(is.na(mpg$drv))
table(is.na(mpg$hwy))
#Q2
mpg %>%
filter(!is.na(hwy)) %>%
group_by(drv) %>%
summarise(mean_hwy = mean(hwy))
####07-2 이상치 정제하기####
#이상치 제거하기
outlier <- data.frame(sex = c(1, 2, 1, 3, 2, 1),
score = c(5, 4, 3, 4, 2, 6))
outlier
table(outlier$sex)
table(outlier$score)
outlier$sex <- ifelse(outlier$sex == 3, NA, outlier$sex)
outlier
outlier$score <- ifelse(outlier$score > 5, NA, outlier$score)
outlier
outlier %>%
filter(!is.na(sex) & !is.na(score)) %>%
group_by(sex) %>%
summarise(mean_score = mean(score))
#이상치 제거하기 - 극단적인 값
#상자 그림으로 극단치 기준 정하기
boxplot(mpg$hwy)
boxplot(mpg$hwy)$stats #상자 그림 통계치 출력
#결측 처리하기
mpg$hwy <- ifelse(mpg$hwy < 12 | mpg$hwy > 37, NA, mpg$hwy)
table(is.na(mpg$hwy))
mpg %>%
group_by(drv) %>%
summarise(mean_hwy = mean(hwy, na.rm=T))
#혼자서 해보기#
mpg <- as.data.frame(ggplot2::mpg)
mpg[c(10, 14, 58, 93), "drv"] <- "k"
mpg[c(29, 43, 129, 203), "cty"] <- c(3, 4, 39, 42)
#Q1
table(mpg$drv)
mpg$drv <- ifelse(mpg$drv %in% c("4", "f", "r"), mpg$drv, NA)
table(mpg$drv)
#Q2
boxplot(mpg$cty)
boxplot(mpg$cty)$stats
mpg$cty <- ifelse(mpg $ cty < 9 | mpg $ cty > 26, NA, mpg$cty)
boxplot(mpg$cty)
#Q3
mpg %>%
filter(!is.na(drv) & !is.na(cty)) %>%
group_by(drv) %>%
summarise(mean_cty = mean(cty))
| /R/chapter07.R | no_license | gmltn97/DoRPractice | R | false | false | 2,981 | r | ####07-1 결측치 정제하기####
#결측치 찾기
df <- data.frame(sex = c("M", "F", NA, "M", "F"),
score = c(5, 4, 3, 4, NA))
df
is.na(df) #결측치는 TRUE
table(is.na(df))
table(is.na(df$sex)) #결측치 1개
table(is.na(df$score)) #결측치 1개
mean(df$score) #NA
sum(df$score) #NA
#결측치 제거하기
library(dplyr)
df %>% filter(is.na(score)) #결측치 있는 행 추출
df %>% filter(!is.na(score)) #score 결측치 제거
df_nomiss <- df %>% filter(!is.na(score))
mean(df_nomiss$score)
sum(df_nomiss$score)
df_nomiss <- df %>% filter(!is.na(score) & !is.na(sex)) #score, sex 결측치 제
df_nomiss
df_nomiss2 <- na.omit(df) #결측치 하나라도 있으면 제거
df_nomiss2
#함수의 결측치 제외 기능 이용하기
mean(df$score, na.rm=T)
sum(df$score, na.rm=T)
exam <- read.csv("csv_exam.csv")
exam[c(3, 8, 15), "math"] <- NA
exam
exam %>% summarise(mean_math = mean(math))
exam %>% summarise(mean_math = mean(math, na.rm=T))
exam %>% summarise(mean_math = mean(math, na.rm=T),
sum_math = sum(math, na.rm=T),
median_math = median(math, na.rm=T))
#결측치 대체하기#
#평균값으로 결측치 대체하기
mean(exam$math, na.rm=T)
exam$math <- ifelse(is.na(exam$math), 55, exam$math)
table(is.na(exam$math))
exam
mean(exam$math)
#혼자서 해보기#
mpg <- as.data.frame(ggplot2::mpg)
mpg[c(65, 124, 131, 153, 212), "hwy"] <- NA
#Q1
table(is.na(mpg$drv))
table(is.na(mpg$hwy))
#Q2
mpg %>%
filter(!is.na(hwy)) %>%
group_by(drv) %>%
summarise(mean_hwy = mean(hwy))
####07-2 이상치 정제하기####
#이상치 제거하기
outlier <- data.frame(sex = c(1, 2, 1, 3, 2, 1),
score = c(5, 4, 3, 4, 2, 6))
outlier
table(outlier$sex)
table(outlier$score)
outlier$sex <- ifelse(outlier$sex == 3, NA, outlier$sex)
outlier
outlier$score <- ifelse(outlier$score > 5, NA, outlier$score)
outlier
outlier %>%
filter(!is.na(sex) & !is.na(score)) %>%
group_by(sex) %>%
summarise(mean_score = mean(score))
#이상치 제거하기 - 극단적인 값
#상자 그림으로 극단치 기준 정하기
boxplot(mpg$hwy)
boxplot(mpg$hwy)$stats #상자 그림 통계치 출력
#결측 처리하기
mpg$hwy <- ifelse(mpg$hwy < 12 | mpg$hwy > 37, NA, mpg$hwy)
table(is.na(mpg$hwy))
mpg %>%
group_by(drv) %>%
summarise(mean_hwy = mean(hwy, na.rm=T))
#혼자서 해보기#
mpg <- as.data.frame(ggplot2::mpg)
mpg[c(10, 14, 58, 93), "drv"] <- "k"
mpg[c(29, 43, 129, 203), "cty"] <- c(3, 4, 39, 42)
#Q1
table(mpg$drv)
mpg$drv <- ifelse(mpg$drv %in% c("4", "f", "r"), mpg$drv, NA)
table(mpg$drv)
#Q2
boxplot(mpg$cty)
boxplot(mpg$cty)$stats
mpg$cty <- ifelse(mpg $ cty < 9 | mpg $ cty > 26, NA, mpg$cty)
boxplot(mpg$cty)
#Q3
mpg %>%
filter(!is.na(drv) & !is.na(cty)) %>%
group_by(drv) %>%
summarise(mean_cty = mean(cty))
|
###########################################
# Victor Barbosa
# Eletiva de Analise de dados
# Exercício 2
##########################################
# 1. Extraia em padrão ff todas as bases de situação final de alunos
# disponíveis neste endereço:
# http://dados.recife.pe.gov.br/dataset/situacao-final-dos-alunos-por-periodo-letivo
# 2. Junte todas as bases extraídas em um único objeto ff
# 3. Limpe sua staging area
# 4. Exporte a base única em formato nativo do R
###########################################
###########################################
# 1. Extração em padrão ff das bases de situação final dos alunos (2011-2020)
###########################################
# chamando os pacotes
library(ff)
library(ffbase)
# criando os objetos
resultado_2011 <- 'bases_originais/resultado_final_2011.csv'
resultado_2012 <- 'bases_originais/resultado_final_2012.csv'
resultado_2013 <- 'bases_originais/resultado_final_2013.csv'
resultado_2014 <- 'bases_originais/resultado_final_2014.csv'
resultado_2015 <- 'bases_originais/resultado_final_2015.csv'
resultado_2016 <- 'bases_originais/resultado_final_2016.csv'
resultado_2017 <- 'bases_originais/resultado_final_2017.csv'
resultado_2018 <- 'bases_originais/resultado_final_2018.csv'
resultado_2019 <- 'bases_originais/resultado_final_2019.csv'
resultado_2020 <- 'bases_originais/resultado_final_2020.csv'
# extraindo em padrão ff
extracao_2011 <- read.csv.ffdf(file=resultado_2011)
extracao_2012 <- read.csv.ffdf(file=resultado_2012)
extracao_2013 <- read.csv.ffdf(file=resultado_2013)
extracao_2014 <- read.csv.ffdf(file=resultado_2014)
extracao_2015 <- read.csv.ffdf(file=resultado_2015)
extracao_2016 <- read.csv.ffdf(file=resultado_2016)
extracao_2017 <- read.csv.ffdf(file=resultado_2017)
extracao_2018 <- read.csv.ffdf(file=resultado_2018)
extracao_2019 <- read.csv.ffdf(file=resultado_2019)
extracao_2020 <- read.csv.ffdf(file=resultado_2020)
#########################################################
#########################################################
# 2. Juntando todas as bases extraídas em um único objeto ff
#########################################################
# função ffdfrbind.fill junta bases semelhantes forçando preenchimento
# tentei com ffdfappend mas deu erro
extracao_merge <- ffdfrbind.fill(extracao_2011, extracao_2012, extracao_2013,
extracao_2014, extracao_2015, extracao_2016,
extracao_2017, extracao_2018, extracao_2019,
extracao_2020)
head(extracao_merge) # visualizando
##########################################################
##########################################################
# 3. Limpando a staging area
#########################################################
rm(list=(ls()[ls()!="extracao_merge"]))
##########################################################
##########################################################
# 4. Exportando a base única em formato nativo do R
##########################################################
saveRDS(extracao_merge, "bases_tratadas/resultados_finais.rds")
| /exercicio_2.R | no_license | victortbarbosa/eletiva_analise_de_dados | R | false | false | 3,166 | r | ###########################################
# Victor Barbosa
# Eletiva de Analise de dados
# Exercício 2
##########################################
# 1. Extraia em padrão ff todas as bases de situação final de alunos
# disponíveis neste endereço:
# http://dados.recife.pe.gov.br/dataset/situacao-final-dos-alunos-por-periodo-letivo
# 2. Junte todas as bases extraídas em um único objeto ff
# 3. Limpe sua staging area
# 4. Exporte a base única em formato nativo do R
###########################################
###########################################
# 1. Extração em padrão ff das bases de situação final dos alunos (2011-2020)
###########################################
# chamando os pacotes
library(ff)
library(ffbase)
# criando os objetos
resultado_2011 <- 'bases_originais/resultado_final_2011.csv'
resultado_2012 <- 'bases_originais/resultado_final_2012.csv'
resultado_2013 <- 'bases_originais/resultado_final_2013.csv'
resultado_2014 <- 'bases_originais/resultado_final_2014.csv'
resultado_2015 <- 'bases_originais/resultado_final_2015.csv'
resultado_2016 <- 'bases_originais/resultado_final_2016.csv'
resultado_2017 <- 'bases_originais/resultado_final_2017.csv'
resultado_2018 <- 'bases_originais/resultado_final_2018.csv'
resultado_2019 <- 'bases_originais/resultado_final_2019.csv'
resultado_2020 <- 'bases_originais/resultado_final_2020.csv'
# extraindo em padrão ff
extracao_2011 <- read.csv.ffdf(file=resultado_2011)
extracao_2012 <- read.csv.ffdf(file=resultado_2012)
extracao_2013 <- read.csv.ffdf(file=resultado_2013)
extracao_2014 <- read.csv.ffdf(file=resultado_2014)
extracao_2015 <- read.csv.ffdf(file=resultado_2015)
extracao_2016 <- read.csv.ffdf(file=resultado_2016)
extracao_2017 <- read.csv.ffdf(file=resultado_2017)
extracao_2018 <- read.csv.ffdf(file=resultado_2018)
extracao_2019 <- read.csv.ffdf(file=resultado_2019)
extracao_2020 <- read.csv.ffdf(file=resultado_2020)
#########################################################
#########################################################
# 2. Juntando todas as bases extraídas em um único objeto ff
#########################################################
# função ffdfrbind.fill junta bases semelhantes forçando preenchimento
# tentei com ffdfappend mas deu erro
extracao_merge <- ffdfrbind.fill(extracao_2011, extracao_2012, extracao_2013,
extracao_2014, extracao_2015, extracao_2016,
extracao_2017, extracao_2018, extracao_2019,
extracao_2020)
head(extracao_merge) # visualizando
##########################################################
##########################################################
# 3. Limpando a staging area
#########################################################
rm(list=(ls()[ls()!="extracao_merge"]))
##########################################################
##########################################################
# 4. Exportando a base única em formato nativo do R
##########################################################
saveRDS(extracao_merge, "bases_tratadas/resultados_finais.rds")
|
library(Matrix)
source('/home-4/whou10@jhu.edu/scratch/Wenpin/trajectory_variability/function/01_function.R')
pseudotime = readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/GBM_t/data/order/CD4EM_CD4EX_pseudotime.rds')
rdir <- '/home-4/whou10@jhu.edu/scratch/Wenpin/GBM_t/result/testvar/E_MDSC/CD4EM_CD4EX/'
dir.create(rdir, showWarnings = F, recursive = T)
setwd(rdir)
cnt <- readRDS('/home-4/whou10@jhu.edu/data2/whou10/GBM/singleObject/Lymph/L.rds')
meta <- readRDS('/home-4/whou10@jhu.edu/data2/whou10/GBM/singleObject/Lymph/meta.rds')
cnt <- cnt[, pseudotime[,1]]
cellanno <- data.frame(cell = colnames(cnt), sample = sapply(colnames(cnt), function(i) sub('_.*','',sub('.*-','',i))), stringsAsFactors = FALSE)
mdsc <- read.csv('/home-4/whou10@jhu.edu/data2/whou10/GBM/meta/mdsc_proportions.csv', header = T)
design <- data.frame(MdscProp = mdsc[,3]) ## 3 is E-MDSC, 8 is M-MDSC
rownames(design) <- as.character(mdsc[,2])
cellanno <- cellanno[cellanno[,2] %in% rownames(design),]
cnt <- cnt[, cellanno[,1]]
pseudotime = pseudotime[pseudotime[,1] %in% cellanno[,1], ]
cnt <- as.matrix(cnt)
cnt <- cnt[rowMeans(cnt>0.1)>0.01,] ## filter genes
### algo
design = cbind(1, design)
res <- testpt(expr=cnt,cellanno=cellanno,pseudotime=pseudotime,design=design,ncores=8, permuiter=100)
saveRDS(res, 'final.rds')
| /code/21_CD4EM_CD4EX_testvar_EMDSC.R | no_license | Winnie09/GBM_t | R | false | false | 1,315 | r | library(Matrix)
source('/home-4/whou10@jhu.edu/scratch/Wenpin/trajectory_variability/function/01_function.R')
pseudotime = readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/GBM_t/data/order/CD4EM_CD4EX_pseudotime.rds')
rdir <- '/home-4/whou10@jhu.edu/scratch/Wenpin/GBM_t/result/testvar/E_MDSC/CD4EM_CD4EX/'
dir.create(rdir, showWarnings = F, recursive = T)
setwd(rdir)
cnt <- readRDS('/home-4/whou10@jhu.edu/data2/whou10/GBM/singleObject/Lymph/L.rds')
meta <- readRDS('/home-4/whou10@jhu.edu/data2/whou10/GBM/singleObject/Lymph/meta.rds')
cnt <- cnt[, pseudotime[,1]]
cellanno <- data.frame(cell = colnames(cnt), sample = sapply(colnames(cnt), function(i) sub('_.*','',sub('.*-','',i))), stringsAsFactors = FALSE)
mdsc <- read.csv('/home-4/whou10@jhu.edu/data2/whou10/GBM/meta/mdsc_proportions.csv', header = T)
design <- data.frame(MdscProp = mdsc[,3]) ## 3 is E-MDSC, 8 is M-MDSC
rownames(design) <- as.character(mdsc[,2])
cellanno <- cellanno[cellanno[,2] %in% rownames(design),]
cnt <- cnt[, cellanno[,1]]
pseudotime = pseudotime[pseudotime[,1] %in% cellanno[,1], ]
cnt <- as.matrix(cnt)
cnt <- cnt[rowMeans(cnt>0.1)>0.01,] ## filter genes
### algo
design = cbind(1, design)
res <- testpt(expr=cnt,cellanno=cellanno,pseudotime=pseudotime,design=design,ncores=8, permuiter=100)
saveRDS(res, 'final.rds')
|
library(simstudy)
library(tidyverse)
library(tableone)
library(datapasta)
library(broom)
library(Matching)
set.seed(31)
comorb <- c("hypertension", "diabetes", "CHF", "dementia")
def_lin <- defData(varname = 'hypertension', dist='binary', formula= '0.3 ') %>%
defData(varname = 'male', dist='binary', formula= '0.52 ') %>%
defData(varname = 'diabetes', dist='binary', formula= '0.1 ') %>%
defData(varname = 'dementia', dist='binary', formula= '0.25 ') %>%
defData(varname = 'smoking_unobserved', dist='binary', formula = "0.01 + hypertension*0.08 + dementia*0.12 + diabetes*0.07 + male*0.05") %>%
defData(varname='intervention', dist= 'binary', formula='0.05+hypertension*0.08 + diabetes*0.08 + dementia*0.07 + smoking_unobserved*0.05 + male*.01') %>%
defData(varname='age', dist = "normal", formula = "50 + male*3+ intervention*5 + diabetes*2 + dementia*5 - hypertension*3", variance=10) %>%
defData(varname= 'outcome', dist='normal',
formula='0.1- 0.1*intervention + male*0.03+ hypertension*0.2 + diabetes*0.02 + dementia*0.3 +0.001*age**2 + smoking_unobserved*0.15')
df <- genData(5000, def_lin)
saveRDS(df, 'df.rds')
crude_lin <- lm(died ~ intervention, data=df)
adj_lin <- lm(died ~ intervention + hypertension, data=df)
tidy(crude_lin)
tidy(adj_lin)
filter(df, is.na(died)) %>% View()
map_chr(df, ~sum(is.na(.x)))
table_var <- c("age", "hypertension", "diabetes", "dementia", "smoking_unobserved", 'died', "nr_comorb")
median_var <- median_var<- c('age')
factor_var <- c("hypertension", "diabetes","dementia","smoking_unobserved")
table1<- CreateTableOne(table_var, data = df, strata = "intervention", factorVars = factor_var,
testNonNormal = kruskal.test, argsNonNormal = median_var)
table1
crude <- glm(died ~ intervention, data=df, binomial(link = "logit"))
adj <- glm(died ~ intervention +age + hypertension + diabetes + dementia , data=df, binomial(link = "logit"))
adj_smoking <- glm(died ~ intervention +age + hypertension + diabetes + dementia + smoking_unobserved, data=df, binomial(link = "logit"))
tidy(crude, exponentiate = TRUE, conf.int = TRUE)
tidy(adj, exponentiate = TRUE, conf.int = TRUE)
tidy(adj_smoking, exponentiate = TRUE, conf.int = TRUE)
gen_match <- function(intervention="intervention", data, matching_vars, balance_vars, exact_vars, name, replace=TRUE,...) {
time_start <- Sys.time()
data_name <- quote(data)
data2 <- eval(data)
matching_matrix <- data2 %>%
dplyr::select_at(matching_vars) %>% # in a newer version of dplyr this would be select_at
data.matrix()
balance_matrix <- data2 %>%
dplyr::select_at(balance_vars) %>% # in a newer version of dplyr this would be select_at
data.matrix()
treatment_status <- data2 %>%
dplyr::select_at(intervention) %>% # in a newer version of dplyr this would be select_at
data.matrix()
exact_vars2 <- colnames(matching_matrix) %in% exact_vars # Compute logical indicators in matching 2d-array for variables exactly matched on
list(matching_matrix,balance_matrix,treatment_status,exact_vars2)
gen <- GenMatch(Tr=treatment_status, X=matching_matrix, BalanceMatrix=balance_matrix,
exact=exact_vars2, estimand='ATT', M=1, pop.size=100,
wait.generations=10, hard.generation.limit=TRUE, max.generations=500,
replace=replace, ties=FALSE, weight=NULL, print.level=1, balance=TRUE) # Carry out genetic matching
mw <- Match(Tr=treatment_status, X=matching_matrix, exact=exact_vars2, Weight.matrix=as.matrix(gen$Weight.matrix),
estimand="ATT", replace=replace, ties=FALSE, M=1) # Carry out matching with genetic matching weights
# get matched data
treated <- data.frame(index=mw$index.treated, weights=mw$weights)
control <- data.frame(index=mw$index.control, weights=mw$weights)
bind_treated_control <- rbind(control, treated) %>%
as.data.frame() %>%
tbl_df()
data2 <-dplyr:: mutate(data, index=row_number())
match_dat<- inner_join(bind_treated_control,data2, by="index") %>%
tbl_df()
if(length(mw$index.treated)!=mw$orig.treated.nob) warning("Treated individuals being matched to more than 1 person")
time_end <- Sys.time()- time_start
mw[["spec"]] <- list(data_name,matching_vars=matching_vars, balance_vars=balance_vars, exact_vars=exact_vars, time_run=time_end, nobs=dim(data2))
mw[["genmatch_weight_matrix"]] <- list(gen$Weight.matrix)
mw[['matched_df']] <- match_dat
# saveRDS(mw, file=here::here("data", "matching",'base', paste0(name,".rds" )))
return(mw)
}
base_match_var <- c("hypertension", "diabetes", "dementia", "smoking_unobserved", "age")
dat_neighbourhood_match_base_exact_none_replacement <- df %>%
gen_match(data=., matching_vars=base_match_var, exact_var=NULL,
balance_vars=base_match_var,
replace=FALSE)
matched_df <- dat_neighbourhood_match_base_exact_none_replacement$matched_df
ggplot(matched_df, aes(x=age, ..density..)) + geom_histogram(binwidth = 1) + facet_grid(~intervention)
table2<- CreateTableOne(table_var, data = matched_df, strata = "intervention", factorVars = factor_var,
testNonNormal = kruskal.test, argsNonNormal = median_var)
table2
crude2 <- glm(died ~ intervention, data=matched_df, binomial(link = "logit"))
adj2 <- glm(died ~ intervention +age + hypertension + diabetes + dementia , data=matched_df, binomial(link = "logit"))
adj_smoking2 <- glm(died ~ intervention +age + hypertension + diabetes + dementia +smoking_unobserved, data=matched_df, binomial(link = "logit"))
tidy(crude2, exponentiate = TRUE, conf.int = TRUE)
tidy(adj2, exponentiate = TRUE, conf.int = TRUE)
tidy(adj_smoking2, exponentiate = TRUE, conf.int = TRUE)
ggplot(df, aes(x=age, ..density..)) + geom_histogram(binwidth = 1) + facet_grid(~intervention)
ggplot(df, aes(x=age, ..density.., group=factor(intervention), fill=factor(intervention), alpha=0.5)) + geom_histogram(binwidth = 1)
| /genmatch/fake_data1.R | no_license | RichardPilbery/r-nhs-workshop | R | false | false | 6,064 | r | library(simstudy)
library(tidyverse)
library(tableone)
library(datapasta)
library(broom)
library(Matching)
set.seed(31)
comorb <- c("hypertension", "diabetes", "CHF", "dementia")
def_lin <- defData(varname = 'hypertension', dist='binary', formula= '0.3 ') %>%
defData(varname = 'male', dist='binary', formula= '0.52 ') %>%
defData(varname = 'diabetes', dist='binary', formula= '0.1 ') %>%
defData(varname = 'dementia', dist='binary', formula= '0.25 ') %>%
defData(varname = 'smoking_unobserved', dist='binary', formula = "0.01 + hypertension*0.08 + dementia*0.12 + diabetes*0.07 + male*0.05") %>%
defData(varname='intervention', dist= 'binary', formula='0.05+hypertension*0.08 + diabetes*0.08 + dementia*0.07 + smoking_unobserved*0.05 + male*.01') %>%
defData(varname='age', dist = "normal", formula = "50 + male*3+ intervention*5 + diabetes*2 + dementia*5 - hypertension*3", variance=10) %>%
defData(varname= 'outcome', dist='normal',
formula='0.1- 0.1*intervention + male*0.03+ hypertension*0.2 + diabetes*0.02 + dementia*0.3 +0.001*age**2 + smoking_unobserved*0.15')
df <- genData(5000, def_lin)
saveRDS(df, 'df.rds')
crude_lin <- lm(died ~ intervention, data=df)
adj_lin <- lm(died ~ intervention + hypertension, data=df)
tidy(crude_lin)
tidy(adj_lin)
filter(df, is.na(died)) %>% View()
map_chr(df, ~sum(is.na(.x)))
table_var <- c("age", "hypertension", "diabetes", "dementia", "smoking_unobserved", 'died', "nr_comorb")
median_var <- median_var<- c('age')
factor_var <- c("hypertension", "diabetes","dementia","smoking_unobserved")
table1<- CreateTableOne(table_var, data = df, strata = "intervention", factorVars = factor_var,
testNonNormal = kruskal.test, argsNonNormal = median_var)
table1
crude <- glm(died ~ intervention, data=df, binomial(link = "logit"))
adj <- glm(died ~ intervention +age + hypertension + diabetes + dementia , data=df, binomial(link = "logit"))
adj_smoking <- glm(died ~ intervention +age + hypertension + diabetes + dementia + smoking_unobserved, data=df, binomial(link = "logit"))
tidy(crude, exponentiate = TRUE, conf.int = TRUE)
tidy(adj, exponentiate = TRUE, conf.int = TRUE)
tidy(adj_smoking, exponentiate = TRUE, conf.int = TRUE)
gen_match <- function(intervention="intervention", data, matching_vars, balance_vars, exact_vars, name, replace=TRUE,...) {
time_start <- Sys.time()
data_name <- quote(data)
data2 <- eval(data)
matching_matrix <- data2 %>%
dplyr::select_at(matching_vars) %>% # in a newer version of dplyr this would be select_at
data.matrix()
balance_matrix <- data2 %>%
dplyr::select_at(balance_vars) %>% # in a newer version of dplyr this would be select_at
data.matrix()
treatment_status <- data2 %>%
dplyr::select_at(intervention) %>% # in a newer version of dplyr this would be select_at
data.matrix()
exact_vars2 <- colnames(matching_matrix) %in% exact_vars # Compute logical indicators in matching 2d-array for variables exactly matched on
list(matching_matrix,balance_matrix,treatment_status,exact_vars2)
gen <- GenMatch(Tr=treatment_status, X=matching_matrix, BalanceMatrix=balance_matrix,
exact=exact_vars2, estimand='ATT', M=1, pop.size=100,
wait.generations=10, hard.generation.limit=TRUE, max.generations=500,
replace=replace, ties=FALSE, weight=NULL, print.level=1, balance=TRUE) # Carry out genetic matching
mw <- Match(Tr=treatment_status, X=matching_matrix, exact=exact_vars2, Weight.matrix=as.matrix(gen$Weight.matrix),
estimand="ATT", replace=replace, ties=FALSE, M=1) # Carry out matching with genetic matching weights
# get matched data
treated <- data.frame(index=mw$index.treated, weights=mw$weights)
control <- data.frame(index=mw$index.control, weights=mw$weights)
bind_treated_control <- rbind(control, treated) %>%
as.data.frame() %>%
tbl_df()
data2 <-dplyr:: mutate(data, index=row_number())
match_dat<- inner_join(bind_treated_control,data2, by="index") %>%
tbl_df()
if(length(mw$index.treated)!=mw$orig.treated.nob) warning("Treated individuals being matched to more than 1 person")
time_end <- Sys.time()- time_start
mw[["spec"]] <- list(data_name,matching_vars=matching_vars, balance_vars=balance_vars, exact_vars=exact_vars, time_run=time_end, nobs=dim(data2))
mw[["genmatch_weight_matrix"]] <- list(gen$Weight.matrix)
mw[['matched_df']] <- match_dat
# saveRDS(mw, file=here::here("data", "matching",'base', paste0(name,".rds" )))
return(mw)
}
base_match_var <- c("hypertension", "diabetes", "dementia", "smoking_unobserved", "age")
dat_neighbourhood_match_base_exact_none_replacement <- df %>%
gen_match(data=., matching_vars=base_match_var, exact_var=NULL,
balance_vars=base_match_var,
replace=FALSE)
matched_df <- dat_neighbourhood_match_base_exact_none_replacement$matched_df
ggplot(matched_df, aes(x=age, ..density..)) + geom_histogram(binwidth = 1) + facet_grid(~intervention)
table2<- CreateTableOne(table_var, data = matched_df, strata = "intervention", factorVars = factor_var,
testNonNormal = kruskal.test, argsNonNormal = median_var)
table2
crude2 <- glm(died ~ intervention, data=matched_df, binomial(link = "logit"))
adj2 <- glm(died ~ intervention +age + hypertension + diabetes + dementia , data=matched_df, binomial(link = "logit"))
adj_smoking2 <- glm(died ~ intervention +age + hypertension + diabetes + dementia +smoking_unobserved, data=matched_df, binomial(link = "logit"))
tidy(crude2, exponentiate = TRUE, conf.int = TRUE)
tidy(adj2, exponentiate = TRUE, conf.int = TRUE)
tidy(adj_smoking2, exponentiate = TRUE, conf.int = TRUE)
ggplot(df, aes(x=age, ..density..)) + geom_histogram(binwidth = 1) + facet_grid(~intervention)
ggplot(df, aes(x=age, ..density.., group=factor(intervention), fill=factor(intervention), alpha=0.5)) + geom_histogram(binwidth = 1)
|
\name{rec.reg}
\alias{rec.reg}
\title{Computes Recursive Regression.}
\description{
This function computes Recursive Regression.
}
\details{It might happen during computations that \code{\link[stats]{lm}} (which is used inside \code{rec.reg}) will produce \code{\link[base]{NA}} or \code{\link[base]{NaN}}. In such a case regression coefficients for a given period are taken as \eqn{0} and p-values for t-test for statistical significance of regression coefficients are taken as \eqn{1}.
It is not possible to set \code{c=FALSE} if \code{x=NULL}. In such a case the function will automatically reset \code{c=TRUE} inside the code.}
\usage{
rec.reg(y,x=NULL,c=NULL)
}
\arguments{
\item{y}{\code{\link[base]{numeric}} or a column \code{\link[base]{matrix}} of a dependent variable}
\item{x}{\code{\link[base]{matrix}} of independent variables, different columns should correspond to different variables, if not specified only a constant will be used}
\item{c}{optional, \code{\link[base]{logical}}, a parameter indicating whether constant is included, if not specified \code{c=TRUE} is used, i.e., constant is included}
}
\value{class \code{reg} object, \code{\link[base]{list}} of
\item{$y.hat}{fitted (forecasted) values}
\item{$AIC}{Akaike Information Criterion (from the current set of observations)}
\item{$AICc}{Akaike Information Criterion with a correction for finite sample sizes (from the current set of observations)}
\item{$BIC}{Bayesian Information Criterion (from the current set of observations)}
\item{$MSE}{Mean Squared Error (from the current set of observations)}
\item{$coeff.}{regression coefficients}
\item{$p.val}{p-values for t-test for statistical significance of regression coefficients}
\item{$y}{\code{y}, forecasted time-series}
}
\examples{
wti <- crudeoil[-1,1]
drivers <- (lag(crudeoil[,-1],k=1))[-1,]
ld.wti <- (diff(log(wti)))[-1,]
ld.drivers <- (diff(log(drivers)))[-1,]
rec1 <- rec.reg(y=ld.wti,x=ld.drivers)
rec2 <- rec.reg(y=ld.wti)
}
\seealso{\code{\link{print.reg}}, \code{\link{summary.reg}}, \code{\link{plot.reg}}.}
| /fuzzedpackages/fDMA/man/rec.reg.Rd | no_license | akhikolla/testpackages | R | false | false | 2,113 | rd | \name{rec.reg}
\alias{rec.reg}
\title{Computes Recursive Regression.}
\description{
This function computes Recursive Regression.
}
\details{It might happen during computations that \code{\link[stats]{lm}} (which is used inside \code{rec.reg}) will produce \code{\link[base]{NA}} or \code{\link[base]{NaN}}. In such a case regression coefficients for a given period are taken as \eqn{0} and p-values for t-test for statistical significance of regression coefficients are taken as \eqn{1}.
It is not possible to set \code{c=FALSE} if \code{x=NULL}. In such a case the function will automatically reset \code{c=TRUE} inside the code.}
\usage{
rec.reg(y,x=NULL,c=NULL)
}
\arguments{
\item{y}{\code{\link[base]{numeric}} or a column \code{\link[base]{matrix}} of a dependent variable}
\item{x}{\code{\link[base]{matrix}} of independent variables, different columns should correspond to different variables, if not specified only a constant will be used}
\item{c}{optional, \code{\link[base]{logical}}, a parameter indicating whether constant is included, if not specified \code{c=TRUE} is used, i.e., constant is included}
}
\value{class \code{reg} object, \code{\link[base]{list}} of
\item{$y.hat}{fitted (forecasted) values}
\item{$AIC}{Akaike Information Criterion (from the current set of observations)}
\item{$AICc}{Akaike Information Criterion with a correction for finite sample sizes (from the current set of observations)}
\item{$BIC}{Bayesian Information Criterion (from the current set of observations)}
\item{$MSE}{Mean Squared Error (from the current set of observations)}
\item{$coeff.}{regression coefficients}
\item{$p.val}{p-values for t-test for statistical significance of regression coefficients}
\item{$y}{\code{y}, forecasted time-series}
}
\examples{
wti <- crudeoil[-1,1]
drivers <- (lag(crudeoil[,-1],k=1))[-1,]
ld.wti <- (diff(log(wti)))[-1,]
ld.drivers <- (diff(log(drivers)))[-1,]
rec1 <- rec.reg(y=ld.wti,x=ld.drivers)
rec2 <- rec.reg(y=ld.wti)
}
\seealso{\code{\link{print.reg}}, \code{\link{summary.reg}}, \code{\link{plot.reg}}.}
|
#Spatializing the results
library(raster)
library(sp)
library(rnaturalearthdata)
costline <- coastline50
ages <- mean_age
nri<- ses.mpd_res_noNA
names(nri)<- names(mean_age)
rownames(coords)<- 1:5567
r <- raster(vals=NA, xmn = -170.2166 , xmx = -13.21288, ymn = -55.37714, ymx = 83.6236, resolution=1.0)
### NRI map ####
cell.r <- cellFromXY(r, coords[names(nri),])
values_cell <- rep(NA, ncell(r))
names(values_cell) <- 1:ncell(r)
nri.cells <- 1:ncell(r) %in% cell.r
values_cell[nri.cells] <- nri
r.nri <- raster::setValues(r, values = values_cell)
### AGE map ####
cell.r <- cellFromXY(r, coords[names(mean_age),])
values_cell <- rep(NA, ncell(r))
names(values_cell) <- 1:ncell(r)
age.cells <- 1:ncell(r) %in% cell.r
values_cell[age.cells] <- mean_age
r.age <- raster::setValues(r, values = values_cell)
### Figure ####
#exporting as tif image
tiff("Fig3_Ses_Age_06-07.tif", units = 'cm', width = 17.4, height = 12, res = 300)
par(mfrow = c(1,2), mar=c(6,4,4,5))
plot(r.nri, xlab = "Longitude", ylab = "Latitude")
plot(costline, add=T)
mtext("a",side = 3, line = 0.5, font = 2, adj = 0, cex = 1.5)
plot(r.age, xlab = "Longitude", ylab = "Latitude")
plot(costline, add=T)
mtext("b",side = 3, line = 0.5, font = 2, adj = 0, cex = 1.5)
dev.off()
| /R/S_maps_Age_NRI_20-06-20.R | no_license | kostask84/MS_Tyrannidae_AgeAssemblage | R | false | false | 1,249 | r | #Spatializing the results
library(raster)
library(sp)
library(rnaturalearthdata)
costline <- coastline50
ages <- mean_age
nri<- ses.mpd_res_noNA
names(nri)<- names(mean_age)
rownames(coords)<- 1:5567
r <- raster(vals=NA, xmn = -170.2166 , xmx = -13.21288, ymn = -55.37714, ymx = 83.6236, resolution=1.0)
### NRI map ####
cell.r <- cellFromXY(r, coords[names(nri),])
values_cell <- rep(NA, ncell(r))
names(values_cell) <- 1:ncell(r)
nri.cells <- 1:ncell(r) %in% cell.r
values_cell[nri.cells] <- nri
r.nri <- raster::setValues(r, values = values_cell)
### AGE map ####
cell.r <- cellFromXY(r, coords[names(mean_age),])
values_cell <- rep(NA, ncell(r))
names(values_cell) <- 1:ncell(r)
age.cells <- 1:ncell(r) %in% cell.r
values_cell[age.cells] <- mean_age
r.age <- raster::setValues(r, values = values_cell)
### Figure ####
#exporting as tif image
tiff("Fig3_Ses_Age_06-07.tif", units = 'cm', width = 17.4, height = 12, res = 300)
par(mfrow = c(1,2), mar=c(6,4,4,5))
plot(r.nri, xlab = "Longitude", ylab = "Latitude")
plot(costline, add=T)
mtext("a",side = 3, line = 0.5, font = 2, adj = 0, cex = 1.5)
plot(r.age, xlab = "Longitude", ylab = "Latitude")
plot(costline, add=T)
mtext("b",side = 3, line = 0.5, font = 2, adj = 0, cex = 1.5)
dev.off()
|
# Test plot_scatter and plot_trends functions
test_that("plot_scatter", {
expect_error(plot_scatter(trend_changes_USA,
x_var = old_mean,
y_var = "new_mean",
facet_x = "ihme_loc_id",
pt_size = 2,
facet_type = "wrap",
fix_scale = TRUE,
color_pt = "year_id"))
expect_true(is.ggplot(plot_scatter(trend_changes_USA,
x_var = "old_mean",
y_var = "new_mean",
facet_x = "ihme_loc_id",
pt_size = 2,
facet_type = "wrap",
fix_scale = TRUE,
color_pt = "year_id")))
}
)
test_that("plot_trends", {
expect_error(plot_trends(trend_changes_USA,
time_var = "year_id",
y1_var = "old_mean",
y2_var = "new_mean",
facet_x = "ihme_loc_id",
facet_type = wrap,
line_size = 2))
expect_true(is.ggplot(plot_trends(trend_changes_USA,
time_var = "year_id",
y1_var = "old_mean",
y2_var = "new_mean",
facet_x = "ihme_loc_id",
facet_type = "wrap",
line_size = 2)))
}
) | /tests/testthat/test-plotting.R | permissive | hmsuw-learn-r/DiagnosticReportR | R | false | false | 1,643 | r | # Test plot_scatter and plot_trends functions
test_that("plot_scatter", {
expect_error(plot_scatter(trend_changes_USA,
x_var = old_mean,
y_var = "new_mean",
facet_x = "ihme_loc_id",
pt_size = 2,
facet_type = "wrap",
fix_scale = TRUE,
color_pt = "year_id"))
expect_true(is.ggplot(plot_scatter(trend_changes_USA,
x_var = "old_mean",
y_var = "new_mean",
facet_x = "ihme_loc_id",
pt_size = 2,
facet_type = "wrap",
fix_scale = TRUE,
color_pt = "year_id")))
}
)
test_that("plot_trends", {
expect_error(plot_trends(trend_changes_USA,
time_var = "year_id",
y1_var = "old_mean",
y2_var = "new_mean",
facet_x = "ihme_loc_id",
facet_type = wrap,
line_size = 2))
expect_true(is.ggplot(plot_trends(trend_changes_USA,
time_var = "year_id",
y1_var = "old_mean",
y2_var = "new_mean",
facet_x = "ihme_loc_id",
facet_type = "wrap",
line_size = 2)))
}
) |
#8
#(a)
lm.fit = lm(mpg~horsepower, data = Auto)
summary(lm.fit)
summary(Auto)
predict(lm.fit, data.frame(horsepower=98), interval = 'confidence')
predict(lm.fit, data.frame(horsepower=98), interval = 'prediction')
#(b)
plot(Auto$horsepower, Auto$mpg)
abline(lm.fit)
#(c)
par(mfrow=c(2,2))
plot(lm.fit)
#9
#(a)
pairs(Auto)
#(b)
cor(subset(Auto, select = -name))
#(c)
lm.fit = lm(mpg~.-name, data=Auto)
summary(lm.fit)
#(d)
plot(lm.fit)
par(mfrow=c(1,1))
plot(predict(lm.fit), rstudent(lm.fit))
plot(hatvalues(lm.fit))
which.max(hatvalues(lm.fit))
#(e)
lm.fit1 = lm(mpg~cylinders*displacement+cylinders:weight, data=Auto)
summary(lm.fit1)
#(f)
lm.fit2 = lm(mpg~sqrt(cylinders)+log(weight)+I(acceleration^2), data=Auto)
summary(lm.fit2)
par(mfrow=c(2,2))
plot(lm.fit2)
plot(predict(lm.fit2), rstudent(lm.fit2))
lm.fit3 = lm(log(mpg)~cylinders*weight+acceleration+year+origin, data=Auto)
summary(lm.fit3)
plot(lm.fit3)
#10
#(a)
library(ISLR)
lm.fit1 = lm(Sales~Price+Urban+US, data=Carseats)
summary(lm.fit1)
#(e)
lm.fit2 = lm(Sales~Price+US, data = Carseats)
summary(lm.fit2)
#(g)
confint(lm.fit2)
#(h)
par(mfrow=c(1,1))
plot(predict(lm.fit2), rstudent(lm.fit2))
plot(hatvalues(lm.fit2))
#11
set.seed(1)
x=rnorm(100)
y=2*x+rnorm(100)
#(a)
lm.fit = lm(y~x+0)
summary(lm.fit)
#(b)
lm.fit1 = lm(x~y+0)
summary(lm.fit1)
#(f)
lm.fit1 = lm(y~x)
lm.fit2 = lm(x~y)
summary(lm.fit1)
summary(lm.fit2)
#12
#(b)
set.seed(1)
x=rnorm(100)
y=2*x + rnorm(100)
lm.fit1 = lm(y~x)
lm.fit2 = lm(x~y)
summary(lm.fit1)
summary(lm.fit2)
#(c)
set.seed(1)
x=rnorm(100)
y=x
lm.fit1 = lm(y~x)
lm.fit2 = lm(x~y)
summary(lm.fit1)
summary(lm.fit2)
#13
#(a)
set.seed(1)
x=rnorm(100)
#(b)
eps=rnorm(100,0,0.5)
#(c)
y=-1+0.5*x+eps
#(d)
plot(x,y)
#(e)
lm.fit = lm(y~x)
summary(lm.fit)
#(f)
abline(lm.fit, lwd=3, col='red')
abline(-1, 0.5, lwd=3, col='blue')
legend('right',legend=c('least squared line', 'population regression'),col = c('red','blue'),lwd=3)
#(g)
lm.fit = lm(y~x+I(x^2))
summary(lm.fit)
#(h)
set.seed(1)
x=rnorm(100)
eps=rnorm(100,0,0.1)
y=-1+0.5*x+eps
plot(x,y)
lm.fit1 = lm(y~x)
summary(lm.fit1)
abline(lm.fit1, lwd=3, col='red')
abline(-1, 0.5, lwd=3, col='blue')
legend('right',legend=c('least squared line', 'population regression'),col = c('red','blue'),lwd=3)
#(i)
set.seed(1)
x=rnorm(100)
eps=rnorm(100,0,1)
y=-1+0.5*x+eps
plot(x,y)
lm.fit2 = lm(y~x)
summary(lm.fit2)
abline(lm.fit2, lwd=3, col='red')
abline(-1, 0.5, lwd=3, col='blue')
legend(-1,legend=c('least squared line', 'population regression'),col = c('red','blue'),lwd=3)
#(j)
confint(lm.fit)
confint(lm.fit1)
confint(lm.fit2)
#14
#(a)
set.seed(1)
x1=runif(100)
x2=0.5*x1+rnorm(100)/10
y=2+2*x1+0.3*x2+rnorm(100)
#(b)
cor(x1,x2)
plot(x1,x2)
#(c)
lm.fit = lm(y~x1+x2)
summary(lm.fit)
#(d)
lm.fit1 = lm(y~x1)
summary(lm.fit1)
#(e)
lm.fit2 = lm(y~x2)
summary(lm.fit2)
#(g)
x1=c(x1, 0.1)
x2=c(x2, 0.8)
y=c(y,6)
lm.fit = lm(y~x1+x2)
summary(lm.fit)
lm.fit1 = lm(y~x1)
summary(lm.fit1)
lm.fit2 = lm(y~x2)
summary(lm.fit2)
par(mfrow=c(2,2))
plot(lm.fit)
plot(lm.fit1)
plot(lm.fit2)
plot(predict(lm.fit),rstudent(lm.fit))
plot(predict(lm.fit1), rstudent(lm.fit1))
plot(predict(lm.fit2), rstudent(lm.fit2))
plot(hatvalues(lm.fit))
plot(hatvalues(lm.fit1))
plot(hatvalues(lm.fit2))
#15
#(a)
library(MASS)
summary(Boston)
Boston$chas <- as.factor(Boston$chas)
lm.fit1 = lm(crim~zn, data=Boston)
summary(lm.fit1)
lm.fit2 = lm(crim~indus, data=Boston)
summary(lm.fit2)
lm.fit3 = lm(crim~chas, data=Boston)
summary(lm.fit3)
lm.fit4 = lm(crim~nox, data=Boston)
summary(lm.fit4)
lm.fit5 = lm(crim~rm, data=Boston)
summary(lm.fit5)
lm.fit6 = lm(crim~age, data=Boston)
summary(lm.fit6)
lm.fit7 = lm(crim~dis, data=Boston)
summary(lm.fit7)
lm.fit8 = lm(crim~rad, data=Boston)
summary(lm.fit8)
lm.fit9 = lm(crim~tax, data=Boston)
summary(lm.fit9)
lm.fit10 = lm(crim~ptratio, data=Boston)
summary(lm.fit10)
lm.fit11 = lm(crim~black, data=Boston)
summary(lm.fit11)
lm.fit12 = lm(crim~lstat, data=Boston)
summary(lm.fit12)
lm.fit13 = lm(crim~medv, data=Boston)
summary(lm.fit13)
#(b)
lm.fit = lm(crim~., data=Boston)
summary(lm.fit)
#(c)
x <- c(coef(lm.fit1)[2], coef(lm.fit2)[2], coef(lm.fit3)[2], coef(lm.fit4)[2], coef(lm.fit5)[2],
coef(lm.fit6)[2], coef(lm.fit7)[2], coef(lm.fit8)[2], coef(lm.fit9)[2], coef(lm.fit10)[2],
coef(lm.fit11)[2],coef(lm.fit12)[2],coef(lm.fit13)[2])
y <- c(coef(lm.fit)[-1])
par(mfrow=c(1,1))
plot(x,y)
#(d)
lm.zn = lm(crim~poly(zn,3),data=Boston)
summary(lm.zn)
lm.indus = lm(crim~poly(indus), data=Boston)
summary(lm.indus)
lm.nox = lm(crim~poly(nox), data=Boston)
summary(lm.nox)
lm.rm = lm(crim~poly(rm), data=Boston)
lm.age = lm(crim~poly(age), data=Boston)
summary(lm.age)
lm.dis = lm(crim~poly(dis), data=Boston)
summary(lm.dis)
lm.rad = lm(crim~poly(rad), data=Boston)
summary(lm.rad)
lm.tax = lm(crim~poly(tax), data=Boston)
summary(lm.tax)
lm.ptratio = lm(crim~poly(ptratio), data=Boston)
summary(lm.ptratio)
lm.black = lm(crim~poly(black), data=Boston)
summary(lm.black)
lm.lstat = lm(crim~poly(lstat), data=Boston)
summary(lm.lstat)
lm.medv = lm(crim~poly(medv), data=Boston)
summary(lm.medv)
| /ch3/ch3_exercise.R | no_license | triangel8866/Introduction-to-statistical-learning | R | false | false | 5,101 | r | #8
#(a)
lm.fit = lm(mpg~horsepower, data = Auto)
summary(lm.fit)
summary(Auto)
predict(lm.fit, data.frame(horsepower=98), interval = 'confidence')
predict(lm.fit, data.frame(horsepower=98), interval = 'prediction')
#(b)
plot(Auto$horsepower, Auto$mpg)
abline(lm.fit)
#(c)
par(mfrow=c(2,2))
plot(lm.fit)
#9
#(a)
pairs(Auto)
#(b)
cor(subset(Auto, select = -name))
#(c)
lm.fit = lm(mpg~.-name, data=Auto)
summary(lm.fit)
#(d)
plot(lm.fit)
par(mfrow=c(1,1))
plot(predict(lm.fit), rstudent(lm.fit))
plot(hatvalues(lm.fit))
which.max(hatvalues(lm.fit))
#(e)
lm.fit1 = lm(mpg~cylinders*displacement+cylinders:weight, data=Auto)
summary(lm.fit1)
#(f)
lm.fit2 = lm(mpg~sqrt(cylinders)+log(weight)+I(acceleration^2), data=Auto)
summary(lm.fit2)
par(mfrow=c(2,2))
plot(lm.fit2)
plot(predict(lm.fit2), rstudent(lm.fit2))
lm.fit3 = lm(log(mpg)~cylinders*weight+acceleration+year+origin, data=Auto)
summary(lm.fit3)
plot(lm.fit3)
#10
#(a)
library(ISLR)
lm.fit1 = lm(Sales~Price+Urban+US, data=Carseats)
summary(lm.fit1)
#(e)
lm.fit2 = lm(Sales~Price+US, data = Carseats)
summary(lm.fit2)
#(g)
confint(lm.fit2)
#(h)
par(mfrow=c(1,1))
plot(predict(lm.fit2), rstudent(lm.fit2))
plot(hatvalues(lm.fit2))
#11
set.seed(1)
x=rnorm(100)
y=2*x+rnorm(100)
#(a)
lm.fit = lm(y~x+0)
summary(lm.fit)
#(b)
lm.fit1 = lm(x~y+0)
summary(lm.fit1)
#(f)
lm.fit1 = lm(y~x)
lm.fit2 = lm(x~y)
summary(lm.fit1)
summary(lm.fit2)
#12
#(b)
set.seed(1)
x=rnorm(100)
y=2*x + rnorm(100)
lm.fit1 = lm(y~x)
lm.fit2 = lm(x~y)
summary(lm.fit1)
summary(lm.fit2)
#(c)
set.seed(1)
x=rnorm(100)
y=x
lm.fit1 = lm(y~x)
lm.fit2 = lm(x~y)
summary(lm.fit1)
summary(lm.fit2)
#13
#(a)
set.seed(1)
x=rnorm(100)
#(b)
eps=rnorm(100,0,0.5)
#(c)
y=-1+0.5*x+eps
#(d)
plot(x,y)
#(e)
lm.fit = lm(y~x)
summary(lm.fit)
#(f)
abline(lm.fit, lwd=3, col='red')
abline(-1, 0.5, lwd=3, col='blue')
legend('right',legend=c('least squared line', 'population regression'),col = c('red','blue'),lwd=3)
#(g)
lm.fit = lm(y~x+I(x^2))
summary(lm.fit)
#(h)
set.seed(1)
x=rnorm(100)
eps=rnorm(100,0,0.1)
y=-1+0.5*x+eps
plot(x,y)
lm.fit1 = lm(y~x)
summary(lm.fit1)
abline(lm.fit1, lwd=3, col='red')
abline(-1, 0.5, lwd=3, col='blue')
legend('right',legend=c('least squared line', 'population regression'),col = c('red','blue'),lwd=3)
#(i)
set.seed(1)
x=rnorm(100)
eps=rnorm(100,0,1)
y=-1+0.5*x+eps
plot(x,y)
lm.fit2 = lm(y~x)
summary(lm.fit2)
abline(lm.fit2, lwd=3, col='red')
abline(-1, 0.5, lwd=3, col='blue')
legend(-1,legend=c('least squared line', 'population regression'),col = c('red','blue'),lwd=3)
#(j)
confint(lm.fit)
confint(lm.fit1)
confint(lm.fit2)
#14
#(a)
set.seed(1)
x1=runif(100)
x2=0.5*x1+rnorm(100)/10
y=2+2*x1+0.3*x2+rnorm(100)
#(b)
cor(x1,x2)
plot(x1,x2)
#(c)
lm.fit = lm(y~x1+x2)
summary(lm.fit)
#(d)
lm.fit1 = lm(y~x1)
summary(lm.fit1)
#(e)
lm.fit2 = lm(y~x2)
summary(lm.fit2)
#(g)
x1=c(x1, 0.1)
x2=c(x2, 0.8)
y=c(y,6)
lm.fit = lm(y~x1+x2)
summary(lm.fit)
lm.fit1 = lm(y~x1)
summary(lm.fit1)
lm.fit2 = lm(y~x2)
summary(lm.fit2)
par(mfrow=c(2,2))
plot(lm.fit)
plot(lm.fit1)
plot(lm.fit2)
plot(predict(lm.fit),rstudent(lm.fit))
plot(predict(lm.fit1), rstudent(lm.fit1))
plot(predict(lm.fit2), rstudent(lm.fit2))
plot(hatvalues(lm.fit))
plot(hatvalues(lm.fit1))
plot(hatvalues(lm.fit2))
#15
#(a)
library(MASS)
summary(Boston)
Boston$chas <- as.factor(Boston$chas)
lm.fit1 = lm(crim~zn, data=Boston)
summary(lm.fit1)
lm.fit2 = lm(crim~indus, data=Boston)
summary(lm.fit2)
lm.fit3 = lm(crim~chas, data=Boston)
summary(lm.fit3)
lm.fit4 = lm(crim~nox, data=Boston)
summary(lm.fit4)
lm.fit5 = lm(crim~rm, data=Boston)
summary(lm.fit5)
lm.fit6 = lm(crim~age, data=Boston)
summary(lm.fit6)
lm.fit7 = lm(crim~dis, data=Boston)
summary(lm.fit7)
lm.fit8 = lm(crim~rad, data=Boston)
summary(lm.fit8)
lm.fit9 = lm(crim~tax, data=Boston)
summary(lm.fit9)
lm.fit10 = lm(crim~ptratio, data=Boston)
summary(lm.fit10)
lm.fit11 = lm(crim~black, data=Boston)
summary(lm.fit11)
lm.fit12 = lm(crim~lstat, data=Boston)
summary(lm.fit12)
lm.fit13 = lm(crim~medv, data=Boston)
summary(lm.fit13)
#(b)
lm.fit = lm(crim~., data=Boston)
summary(lm.fit)
#(c)
x <- c(coef(lm.fit1)[2], coef(lm.fit2)[2], coef(lm.fit3)[2], coef(lm.fit4)[2], coef(lm.fit5)[2],
coef(lm.fit6)[2], coef(lm.fit7)[2], coef(lm.fit8)[2], coef(lm.fit9)[2], coef(lm.fit10)[2],
coef(lm.fit11)[2],coef(lm.fit12)[2],coef(lm.fit13)[2])
y <- c(coef(lm.fit)[-1])
par(mfrow=c(1,1))
plot(x,y)
#(d)
lm.zn = lm(crim~poly(zn,3),data=Boston)
summary(lm.zn)
lm.indus = lm(crim~poly(indus), data=Boston)
summary(lm.indus)
lm.nox = lm(crim~poly(nox), data=Boston)
summary(lm.nox)
lm.rm = lm(crim~poly(rm), data=Boston)
lm.age = lm(crim~poly(age), data=Boston)
summary(lm.age)
lm.dis = lm(crim~poly(dis), data=Boston)
summary(lm.dis)
lm.rad = lm(crim~poly(rad), data=Boston)
summary(lm.rad)
lm.tax = lm(crim~poly(tax), data=Boston)
summary(lm.tax)
lm.ptratio = lm(crim~poly(ptratio), data=Boston)
summary(lm.ptratio)
lm.black = lm(crim~poly(black), data=Boston)
summary(lm.black)
lm.lstat = lm(crim~poly(lstat), data=Boston)
summary(lm.lstat)
lm.medv = lm(crim~poly(medv), data=Boston)
summary(lm.medv)
|
x <- c(1,2,4)
q <- c(x,x,8)
x
x[3]
x[2:3]
mean(x)
sd(x)
y <- mean(x)
y # print out y
data()
mean(Nile)
sd(Nile)
hist(Nile)
| /ch01/ex1-2.r | no_license | freebz/The-Art-of-R-Programming | R | false | false | 131 | r | x <- c(1,2,4)
q <- c(x,x,8)
x
x[3]
x[2:3]
mean(x)
sd(x)
y <- mean(x)
y # print out y
data()
mean(Nile)
sd(Nile)
hist(Nile)
|
library(discharge)
### Name: sigmaHighFlows
### Title: Estimate catastrophic flow variability
### Aliases: sigmaHighFlows
### ** Examples
# load data
data(sycamore)
# get streamflow object
sf = asStreamflow(sycamore)
# estimate catastrophic high flow variability
sigmaHighFlows(sf)
| /data/genthat_extracted_code/discharge/examples/sigmaHighFlows.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 291 | r | library(discharge)
### Name: sigmaHighFlows
### Title: Estimate catastrophic flow variability
### Aliases: sigmaHighFlows
### ** Examples
# load data
data(sycamore)
# get streamflow object
sf = asStreamflow(sycamore)
# estimate catastrophic high flow variability
sigmaHighFlows(sf)
|
#'Places an order on Betfair
#'
#'\code{placeOrders} places an order (single bet or multiple bets) on the
#'Betfair betting exchange.
#'
#'\code{placeOrders} places an order (bet) on the Betfair betting exchange. When
#'using this function, be careful not to have logged in with your "DELAY"
#'application key. It is not possible to place bets with the DELAY application
#'key. Executing this function, after a successful login, will place real money
#'bets on the Betfair betting exchange. You do this at your own risk. The author
#'of this package accepts no responsibility if things go wrong. Be careful!
#'Running tests, by placing small bets, is not only a good idea to begin with,
#'it will also probably save you money.
#'
#'@seealso \code{\link{loginBF}}, which must be executed first. Do NOT use the
#' DELAY application key. The DELAY application key does not support placing
#' bets.
#'
#'@param marketId String. The market ID these orders are to be placed on. Can
#' currently only accept a single market ID. IDs can be obtained via
#' \code{\link{listMarketCatalogue}}. Required. No default.
#'@param selectionId String. The selection id of the desired item to bet on. If
#' betting on a Horse Race, this will be a single number to identify a horse.
#' In a Football match, this will be a single ID for one team. IDs can be
#' obtained via \code{\link{listMarketCatalogue}}. Required. No default.
#'@param betSide Sting. Specififies whether the bet is a back or lay. This
#' argument accepts one of two options - BACK or LAY. Must be upper case.See
#' note below explaining each of these options. Required. No default.
#'@param betType String. Supports three order types, one of which must be
#' specified. Valid order types are LIMIT, LIMIT_ON_CLOSE and MARKET_ON_CLOSE.
#' Must be upper case. See note below explaining each of these options.
#' Required. Default is set to LIMIT.
#'@param betSize String. The size of the bet in the currency of your account.
#' Generally, the minimum size for GB accounts is 2 Pounds.
#'@param reqPrice String. The lowest price at which you wish to place your bet.
#' If unmatched higher prices are available on the opposite side of the bet,
#' your order will be matched at those higher prices. Required. No default.
#'@param persistenceType String. What to do with the order, when the market
#' turns in-play. Supports three persistence types, one of which must be
#' specified. Valid persistence types are LAPSE, PERSIST and MARKET_ON_CLOSE.
#' Must be upper case. See note below explaining each of these options.
#' Required. Default is set to LAPSE.
#'@param handicap String. The handicap applied to the selection, if on an
#' asian-style market. Optional. Defaults to 0, meaning no handicap.
#' \code{handicap} must only be manually specified if betting on an asian-style
#' market.
#'@param customerRef String. Optional parameter allowing the client to pass a
#' unique string (up to 32 chars) that is used to de-dupe mistaken
#' re-submissions. CustomerRef can contain: upper/lower chars, digits, chars :
#' - . _ + * : ; ~ only. Optional. Defaults to current system date and time.
#'@param suppress Boolean. By default, this parameter is set to FALSE, meaning
#' that a warning is posted when the placeOrders call throws an error. Changing
#' this parameter to TRUE will suppress this warning.
#'@param sslVerify Boolean. This argument defaults to TRUE and is optional. In
#' some cases, where users have a self signed SSL Certificate, for example they
#' may be behind a proxy server, Betfair will fail login with "SSL certificate
#' problem: self signed certificate in certificate chain". If this error occurs
#' you may set sslVerify to FALSE. This does open a small security risk of a
#' man-in-the-middle intercepting your login credentials.
#'
#'@return Response from Betfair is stored in listMarketCatalogue variable, which
#' is then parsed from JSON as a list. Only the first item of this list
#' contains the required event type identification details.
#'
#'@section Notes on \code{betType} options: There are three options for this
#' argument and one of them must be specified. All upper case letters must be
#' used. \describe{ \item{LIMIT}{A normal exchange limit order for immediate
#' execution. Essentially a bet which will be either matched immediately if
#' possible, or will wait unmatched until the event begins. It will then either
#' remain open in play or expire, depending on \code{persistenceType}}
#' \item{LIMIT_ON_CLOSE}{Limit order for the auction (SP). If the Starting
#' Price (SP) is greater than the value specified in \code{reqPrice} and there
#' is enough market volume, the bet will be matched when the event begins.}
#' \item{MARKET_ON_CLOSE}{Market order for the auction (SP). The bet amount, as
#' specified in \code{betSize}, will be matched at the Starting Price,
#' regardless of price, assuming there is enough market volume.} }
#'
#'@section Notes on \code{betSide} options: There are just two options for this
#' argument and one of them must be specified. All upper case letters must be
#' used. \describe{ \item{BACK}{To back a team, horse or outcome is to bet on
#' the selection to win.} \item{LAY}{To lay a team, horse, or outcome is to bet
#' on the selection to lose.} }
#'
#'@section Notes on \code{persistenceType} options: There are three options for
#' this argument and one of them must be specified. All upper case letters must
#' be used. \describe{ \item{LAPSE}{Lapse the order when the market is turned
#' in-play. Order is canceled if not matched prior to the market turning
#' in-play.} \item{PERSIST}{Persist the order to in-play. The bet will be place
#' automatically into the in-play market at the start of the event.}
#' \item{MARKET_ON_CLOSE}{Put the order into the auction (SP) at turn-in-play.
#' The bet amount, as specified in \code{betSize}, will be matched at the
#' Starting Price, regardless of price, assuming there is enough market
#' volume.} }
#'
#'@section Note on \code{listPlaceOrdersOps} variable: The
#' \code{listPlaceOrdersOps} variable is used to firstly build an R data frame
#' containing all the data to be passed to Betfair, in order for the function
#' to execute successfully. The data frame is then converted to JSON and
#' included in the HTTP POST request. If the placeOrders call throws an error,
#' a data frame containing error information is returned.
#'
#' @examples
#' \dontrun{
#' placeOrders(marketId = "yourMarketId",
#' selectionId = "yourSelectionId",
#' betSide = "BACKORLAY",
#' betType = "LIMITORONCLOSE",
#' betSize = "2",
#' reqPrice = "yourRequestedPrice",
#' persistenceType = "LAPSEORPERSIST")
#'
#' # Place a LIMIT_ON_CLOSE lay bet on a selection on a horse racing market (note that
#' # LIMIT_ON_CLOSE orders only work on markets with Betfair Starting Price (BSP)
#' # enabled):
#'
#' placeOrders(marketId = "1.124156004",
#' selectionId = "8877720", betType = "LIMIT_ON_CLOSE",
#' betSide="LAY",
#' betSize ="2",
#' reqPrice = "1.1")
#'
#' # Place a MARKET_ON_CLOSE lay bet on a selection on a horse racing market (note that
#' # LIMIT_ON_CLOSE orders only work on markets with Betfair Starting Price (BSP)
#' # enabled):
#'
#' placeOrders(marketId = "1.124156004",
#' selectionId = "8877720",
#' betType = "MARKET_ON_CLOSE",
#' betSide="LAY",
#' betSize ="2")
#'
#' # Note that in both MARKET_ON_CLOSE and LIMIT_ON_CLOSE orders, the betSize parameter
#' # specifies the liability of the order. For example, a LIMIT_ON_CLOSE order of betSize=2
#' # and reqPrice = 1.1 is equivalent to a lay bet of 20 at 1.1 (i.e. max liability of
#' # 2 and a minimum profit of 20 if the selection doesn't win).
#'
#' # Place one single lay LIMIT bet on a specific selection on a specific market,
#' # which is set to LAPSE:
#'
#' placeOrders(marketId = "1.123982139",
#' selectionId = "58805",
#' betSide = "LAY",
#' betSize = "2",
#' reqPrice = "1.1")
#'
#' # Place two lay bet at different prices on the same selection in the same market:
#'
#' placeOrders(marketId = "1.123982139",
#' selectionId = "58805",
#' betSide = "LAY",
#' betSize = "2",
#' reqPrice = c("1.1","1.2"))
#'
#' # Place two lay bets of different sizes and at different prices on the same seleciton
#' # in the same market:
#'
#' placeOrders(marketId = "1.123982139",
#' selectionId = "58805",
#' betSide = "LAY",
#' betSize = c("2","3"),
#' reqPrice = c("1.1","1.2"))
#'
#' # Place two lay bets of different sizes and at different prices on different
#' # selections on the same market:
#'
#' placeOrders(marketId = "1.123982139",
#' selectionId = c("58805","68244"),
#' betSide = "LAY",
#' betSize = c("2","3"),
#' reqPrice = c("1.1","1.2"))
#'
#' # Place two lay bets (the first is set to "LAPSE", while the other will "PERSIST") of
#' # different sizes and at different prices on different selections on the same market:
#'
#' placeOrders(marketId = "1.123982139",
#' selectionId = c("58805","68244"),
#' betSide = "LAY",
#' betSize = c("2","3"),
#' reqPrice = c("1.1","1.2"),
#' persistenceType = c("LAPSE","PERSIST"))
#'
#' # Note: The following call should be applied carefully, as incorrect indexing of the
#' # betSide vector (i.e. mixing up the "BACK" and "LAY" positions) could cause
#' # significant problems
#'
#' # Place one back and one lay bet (the back is set to "LAPSE", while lay will "PERSIST") of
#' # different sizes and at different prices on different selections on the same market:
#'
#' placeOrders(marketId = "1.123982139",
#' selectionId = c("58805","68244"),
#' betSide = c("BACK","LAY"), betSize = c("2","3"),
#' reqPrice = c("10","1.2"),
#' persistenceType = c("LAPSE","PERSIST"))
#'
#' }
#'
placeOrders <-
function(marketId, selectionId, betSide, betSize, reqPrice,
betType = "LIMIT", persistenceType = "LAPSE",
handicap = "0", customerRef = (format(Sys.time(), "%Y-%m-%dT%TZ")),
suppress = FALSE, sslVerify = TRUE) {
options(stringsAsFactors = FALSE)
placeOrdersOps <-
data.frame(jsonrpc = "2.0", method = "SportsAPING/v1.0/placeOrders", id = 1)
placeOrdersOps$params <-
data.frame(
marketId = marketId, instructions = c(""), customerRef = customerRef
)
if (betType == "MARKET_ON_CLOSE") {
placeOrdersOps$params$instructions <-
data.frame(
selectionId = selectionId,
handicap = handicap,
side = betSide,
orderType = betType,
marketOnCloseOrder = c("")
)
placeOrdersOps$params$instructions$marketOnCloseOrder <-
data.frame(liability = betSize)
}
else if (betType == "LIMIT_ON_CLOSE") {
placeOrdersOps$params$instructions <-
data.frame(
selectionId = selectionId,
handicap = handicap,
side = betSide,
orderType = betType,
limitOnCloseOrder = c("")
)
placeOrdersOps$params$instructions$limitOnCloseOrder <-
data.frame(liability = betSize, price = reqPrice)
}
else {
instructions.data.frame <-
data.frame(
limitOrder = rep("",max(sapply(list(betSide,betSize,reqPrice,persistenceType),length)))
)
instructions.data.frame$limitOrder <-
data.frame(price = reqPrice)
instructions.data.frame$limitOrder$persistenceType <-
persistenceType
instructions.data.frame$limitOrder$size <- betSize
instructions.data.frame$selectionId <- selectionId
instructions.data.frame$handicap <- handicap
instructions.data.frame$side <- betSide
instructions.data.frame$orderType <- betType
}
if(betType == "LIMIT"){
placeOrdersOps$params$instructions <-
list(instructions.data.frame)}
else(placeOrdersOps$params$instructions <-
list(placeOrdersOps$params$instructions))
placeOrdersOps <-
placeOrdersOps[c("jsonrpc", "method", "params", "id")]
placeOrdersOps <- jsonlite::toJSON(placeOrdersOps, pretty = TRUE)
# Read Environment variables for authorisation details
product <- Sys.getenv('product')
token <- Sys.getenv('token')
headers <- list(
'Accept' = 'application/json', 'X-Application' = product, 'X-Authentication' = token, 'Content-Type' = 'application/json',
'Expect' = ''
)
placeOrders <-
as.list(jsonlite::fromJSON(
RCurl::postForm(
"https://api.betfair.com/exchange/betting/json-rpc/v1", .opts = list(
postfields = placeOrdersOps, httpheader = headers, ssl.verifypeer = sslVerify
)
)
))
if(is.null(placeOrders$error))
as.data.frame(placeOrders$result)
else({
if(!suppress)
warning("Error- See output for details")
as.data.frame(placeOrders$error)})
}
| /R/placeOrders.R | no_license | mihaikondort/abettor | R | false | false | 13,290 | r | #'Places an order on Betfair
#'
#'\code{placeOrders} places an order (single bet or multiple bets) on the
#'Betfair betting exchange.
#'
#'\code{placeOrders} places an order (bet) on the Betfair betting exchange. When
#'using this function, be careful not to have logged in with your "DELAY"
#'application key. It is not possible to place bets with the DELAY application
#'key. Executing this function, after a successful login, will place real money
#'bets on the Betfair betting exchange. You do this at your own risk. The author
#'of this package accepts no responsibility if things go wrong. Be careful!
#'Running tests, by placing small bets, is not only a good idea to begin with,
#'it will also probably save you money.
#'
#'@seealso \code{\link{loginBF}}, which must be executed first. Do NOT use the
#' DELAY application key. The DELAY application key does not support placing
#' bets.
#'
#'@param marketId String. The market ID these orders are to be placed on. Can
#' currently only accept a single market ID. IDs can be obtained via
#' \code{\link{listMarketCatalogue}}. Required. No default.
#'@param selectionId String. The selection id of the desired item to bet on. If
#' betting on a Horse Race, this will be a single number to identify a horse.
#' In a Football match, this will be a single ID for one team. IDs can be
#' obtained via \code{\link{listMarketCatalogue}}. Required. No default.
#'@param betSide Sting. Specififies whether the bet is a back or lay. This
#' argument accepts one of two options - BACK or LAY. Must be upper case.See
#' note below explaining each of these options. Required. No default.
#'@param betType String. Supports three order types, one of which must be
#' specified. Valid order types are LIMIT, LIMIT_ON_CLOSE and MARKET_ON_CLOSE.
#' Must be upper case. See note below explaining each of these options.
#' Required. Default is set to LIMIT.
#'@param betSize String. The size of the bet in the currency of your account.
#' Generally, the minimum size for GB accounts is 2 Pounds.
#'@param reqPrice String. The lowest price at which you wish to place your bet.
#' If unmatched higher prices are available on the opposite side of the bet,
#' your order will be matched at those higher prices. Required. No default.
#'@param persistenceType String. What to do with the order, when the market
#' turns in-play. Supports three persistence types, one of which must be
#' specified. Valid persistence types are LAPSE, PERSIST and MARKET_ON_CLOSE.
#' Must be upper case. See note below explaining each of these options.
#' Required. Default is set to LAPSE.
#'@param handicap String. The handicap applied to the selection, if on an
#' asian-style market. Optional. Defaults to 0, meaning no handicap.
#' \code{handicap} must only be manually specified if betting on an asian-style
#' market.
#'@param customerRef String. Optional parameter allowing the client to pass a
#' unique string (up to 32 chars) that is used to de-dupe mistaken
#' re-submissions. CustomerRef can contain: upper/lower chars, digits, chars :
#' - . _ + * : ; ~ only. Optional. Defaults to current system date and time.
#'@param suppress Boolean. By default, this parameter is set to FALSE, meaning
#' that a warning is posted when the placeOrders call throws an error. Changing
#' this parameter to TRUE will suppress this warning.
#'@param sslVerify Boolean. This argument defaults to TRUE and is optional. In
#' some cases, where users have a self signed SSL Certificate, for example they
#' may be behind a proxy server, Betfair will fail login with "SSL certificate
#' problem: self signed certificate in certificate chain". If this error occurs
#' you may set sslVerify to FALSE. This does open a small security risk of a
#' man-in-the-middle intercepting your login credentials.
#'
#'@return Response from Betfair is stored in listMarketCatalogue variable, which
#' is then parsed from JSON as a list. Only the first item of this list
#' contains the required event type identification details.
#'
#'@section Notes on \code{betType} options: There are three options for this
#' argument and one of them must be specified. All upper case letters must be
#' used. \describe{ \item{LIMIT}{A normal exchange limit order for immediate
#' execution. Essentially a bet which will be either matched immediately if
#' possible, or will wait unmatched until the event begins. It will then either
#' remain open in play or expire, depending on \code{persistenceType}}
#' \item{LIMIT_ON_CLOSE}{Limit order for the auction (SP). If the Starting
#' Price (SP) is greater than the value specified in \code{reqPrice} and there
#' is enough market volume, the bet will be matched when the event begins.}
#' \item{MARKET_ON_CLOSE}{Market order for the auction (SP). The bet amount, as
#' specified in \code{betSize}, will be matched at the Starting Price,
#' regardless of price, assuming there is enough market volume.} }
#'
#'@section Notes on \code{betSide} options: There are just two options for this
#' argument and one of them must be specified. All upper case letters must be
#' used. \describe{ \item{BACK}{To back a team, horse or outcome is to bet on
#' the selection to win.} \item{LAY}{To lay a team, horse, or outcome is to bet
#' on the selection to lose.} }
#'
#'@section Notes on \code{persistenceType} options: There are three options for
#' this argument and one of them must be specified. All upper case letters must
#' be used. \describe{ \item{LAPSE}{Lapse the order when the market is turned
#' in-play. Order is canceled if not matched prior to the market turning
#' in-play.} \item{PERSIST}{Persist the order to in-play. The bet will be place
#' automatically into the in-play market at the start of the event.}
#' \item{MARKET_ON_CLOSE}{Put the order into the auction (SP) at turn-in-play.
#' The bet amount, as specified in \code{betSize}, will be matched at the
#' Starting Price, regardless of price, assuming there is enough market
#' volume.} }
#'
#'@section Note on \code{listPlaceOrdersOps} variable: The
#' \code{listPlaceOrdersOps} variable is used to firstly build an R data frame
#' containing all the data to be passed to Betfair, in order for the function
#' to execute successfully. The data frame is then converted to JSON and
#' included in the HTTP POST request. If the placeOrders call throws an error,
#' a data frame containing error information is returned.
#'
#' @examples
#' \dontrun{
#' placeOrders(marketId = "yourMarketId",
#' selectionId = "yourSelectionId",
#' betSide = "BACKORLAY",
#' betType = "LIMITORONCLOSE",
#' betSize = "2",
#' reqPrice = "yourRequestedPrice",
#' persistenceType = "LAPSEORPERSIST")
#'
#' # Place a LIMIT_ON_CLOSE lay bet on a selection on a horse racing market (note that
#' # LIMIT_ON_CLOSE orders only work on markets with Betfair Starting Price (BSP)
#' # enabled):
#'
#' placeOrders(marketId = "1.124156004",
#' selectionId = "8877720", betType = "LIMIT_ON_CLOSE",
#' betSide="LAY",
#' betSize ="2",
#' reqPrice = "1.1")
#'
#' # Place a MARKET_ON_CLOSE lay bet on a selection on a horse racing market (note that
#' # LIMIT_ON_CLOSE orders only work on markets with Betfair Starting Price (BSP)
#' # enabled):
#'
#' placeOrders(marketId = "1.124156004",
#' selectionId = "8877720",
#' betType = "MARKET_ON_CLOSE",
#' betSide="LAY",
#' betSize ="2")
#'
#' # Note that in both MARKET_ON_CLOSE and LIMIT_ON_CLOSE orders, the betSize parameter
#' # specifies the liability of the order. For example, a LIMIT_ON_CLOSE order of betSize=2
#' # and reqPrice = 1.1 is equivalent to a lay bet of 20 at 1.1 (i.e. max liability of
#' # 2 and a minimum profit of 20 if the selection doesn't win).
#'
#' # Place one single lay LIMIT bet on a specific selection on a specific market,
#' # which is set to LAPSE:
#'
#' placeOrders(marketId = "1.123982139",
#' selectionId = "58805",
#' betSide = "LAY",
#' betSize = "2",
#' reqPrice = "1.1")
#'
#' # Place two lay bet at different prices on the same selection in the same market:
#'
#' placeOrders(marketId = "1.123982139",
#' selectionId = "58805",
#' betSide = "LAY",
#' betSize = "2",
#' reqPrice = c("1.1","1.2"))
#'
#' # Place two lay bets of different sizes and at different prices on the same seleciton
#' # in the same market:
#'
#' placeOrders(marketId = "1.123982139",
#' selectionId = "58805",
#' betSide = "LAY",
#' betSize = c("2","3"),
#' reqPrice = c("1.1","1.2"))
#'
#' # Place two lay bets of different sizes and at different prices on different
#' # selections on the same market:
#'
#' placeOrders(marketId = "1.123982139",
#' selectionId = c("58805","68244"),
#' betSide = "LAY",
#' betSize = c("2","3"),
#' reqPrice = c("1.1","1.2"))
#'
#' # Place two lay bets (the first is set to "LAPSE", while the other will "PERSIST") of
#' # different sizes and at different prices on different selections on the same market:
#'
#' placeOrders(marketId = "1.123982139",
#' selectionId = c("58805","68244"),
#' betSide = "LAY",
#' betSize = c("2","3"),
#' reqPrice = c("1.1","1.2"),
#' persistenceType = c("LAPSE","PERSIST"))
#'
#' # Note: The following call should be applied carefully, as incorrect indexing of the
#' # betSide vector (i.e. mixing up the "BACK" and "LAY" positions) could cause
#' # significant problems
#'
#' # Place one back and one lay bet (the back is set to "LAPSE", while lay will "PERSIST") of
#' # different sizes and at different prices on different selections on the same market:
#'
#' placeOrders(marketId = "1.123982139",
#' selectionId = c("58805","68244"),
#' betSide = c("BACK","LAY"), betSize = c("2","3"),
#' reqPrice = c("10","1.2"),
#' persistenceType = c("LAPSE","PERSIST"))
#'
#' }
#'
placeOrders <-
function(marketId, selectionId, betSide, betSize, reqPrice,
betType = "LIMIT", persistenceType = "LAPSE",
handicap = "0", customerRef = (format(Sys.time(), "%Y-%m-%dT%TZ")),
suppress = FALSE, sslVerify = TRUE) {
options(stringsAsFactors = FALSE)
placeOrdersOps <-
data.frame(jsonrpc = "2.0", method = "SportsAPING/v1.0/placeOrders", id = 1)
placeOrdersOps$params <-
data.frame(
marketId = marketId, instructions = c(""), customerRef = customerRef
)
if (betType == "MARKET_ON_CLOSE") {
placeOrdersOps$params$instructions <-
data.frame(
selectionId = selectionId,
handicap = handicap,
side = betSide,
orderType = betType,
marketOnCloseOrder = c("")
)
placeOrdersOps$params$instructions$marketOnCloseOrder <-
data.frame(liability = betSize)
}
else if (betType == "LIMIT_ON_CLOSE") {
placeOrdersOps$params$instructions <-
data.frame(
selectionId = selectionId,
handicap = handicap,
side = betSide,
orderType = betType,
limitOnCloseOrder = c("")
)
placeOrdersOps$params$instructions$limitOnCloseOrder <-
data.frame(liability = betSize, price = reqPrice)
}
else {
instructions.data.frame <-
data.frame(
limitOrder = rep("",max(sapply(list(betSide,betSize,reqPrice,persistenceType),length)))
)
instructions.data.frame$limitOrder <-
data.frame(price = reqPrice)
instructions.data.frame$limitOrder$persistenceType <-
persistenceType
instructions.data.frame$limitOrder$size <- betSize
instructions.data.frame$selectionId <- selectionId
instructions.data.frame$handicap <- handicap
instructions.data.frame$side <- betSide
instructions.data.frame$orderType <- betType
}
if(betType == "LIMIT"){
placeOrdersOps$params$instructions <-
list(instructions.data.frame)}
else(placeOrdersOps$params$instructions <-
list(placeOrdersOps$params$instructions))
placeOrdersOps <-
placeOrdersOps[c("jsonrpc", "method", "params", "id")]
placeOrdersOps <- jsonlite::toJSON(placeOrdersOps, pretty = TRUE)
# Read Environment variables for authorisation details
product <- Sys.getenv('product')
token <- Sys.getenv('token')
headers <- list(
'Accept' = 'application/json', 'X-Application' = product, 'X-Authentication' = token, 'Content-Type' = 'application/json',
'Expect' = ''
)
placeOrders <-
as.list(jsonlite::fromJSON(
RCurl::postForm(
"https://api.betfair.com/exchange/betting/json-rpc/v1", .opts = list(
postfields = placeOrdersOps, httpheader = headers, ssl.verifypeer = sslVerify
)
)
))
if(is.null(placeOrders$error))
as.data.frame(placeOrders$result)
else({
if(!suppress)
warning("Error- See output for details")
as.data.frame(placeOrders$error)})
}
|
# R Script for compiling and tidying data as part of
# Peer Reviewed Course Project for
# coursera Getting and Cleaning Data Short Course
# May 2014
library(plyr)
library(data.table)
# import raw data files
# assumes downloaded data file is already 'unzipped' into R working directory
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
# import features.txt which lists the column headings
features <- read.table("./UCI HAR Dataset/features.txt")
# Select only data columns containing '-mean()' or '-std()':
# NOTE I have specifically excluded features with 'Mean' as they are
# derived measurements and not the mean or std of actual measurements
# which is how I interpret the requirement 2 of the project
X_test <- X_test[,grep("mean\\()|std\\()",features$V2)]
X_train <- X_train[,grep("mean\\()|std\\()",features$V2)]
# combine the test and training sets for 'X', 'y' and 'subject'
X_combined <- rbind(X_test,X_train)
y_combined <- rbind(y_test,y_train)
subject_combined <- rbind(subject_test,subject_train)
# Subset the feature descriptions to use as the column names
columnHeadings <- subset(features, grepl("mean\\()|std\\()",features$V2)==TRUE)
colnames(X_combined) <- as.character(columnHeadings$V2)
# Replace y_combined Activity code with correct Activity descriptor
Activity <- function(x) {
if(x == 1) y <- "WALKING"
if(x == 2) y <- "WALKING_UPSTAIRS"
if(x == 3) y <- "WALKING_DOWNSTAIRS"
if(x == 4) y <- "SITTING"
if(x == 5) y <- "STANDING"
if(x == 6) y <- "LAYING"
return(y)
}
y_combined$V1 <- sapply(y_combined$V1,Activity)
# Adding an Activity column to the X_combined data
colnames(y_combined) <- c("Activity")
data_yX <- cbind(y_combined,X_combined)
# Adding a Subject column and then sorting to provide a final completed dataset:
colnames(subject_combined) <- c("Subject")
data_complete <- cbind(subject_combined,data_yX)
data_complete <- arrange(data_complete,Activity)
data_complete <- arrange(data_complete,Subject)
# Combine Subject and Activity into a combined identifier
Subject.Activity <- interaction(data_complete$Subject,data_complete$Activity)
data_complete <- cbind(Subject.Activity,data_complete[,3:68])
# Calculate mean of groups by combinations of Subject and Activity
data_complete2 <- data.table(data_complete)
data_complete3 <- data_complete2[, lapply(.SD, mean), by = Subject.Activity]
data_complete4 <- as.data.frame(data_complete3)
# Seperate back out the Subject and Activity into different columns to satisfy
# one of the tidy data requirements
SA.List <- strsplit(as.character(data_complete4[,1]),".",fixed=TRUE)
Subject <- sapply(SA.List, "[[", 1)
Activity <- sapply(SA.List, "[[", 2)
# Combine all columns into final dataset
tidy_data <- cbind(Subject,Activity,data_complete4[,2:67])
# Simplify the column label text to remove duplicatation and extra characters
names(tidy_data) <- gsub("BodyBody","Body",names(tidy_data),)
names(tidy_data) <- gsub("\\()","",names(tidy_data),)
# Write tidy data to file:
write.table(tidy_data, file = "tidy_data.txt", quote=FALSE, row.names = FALSE) | /run_analysis.R | no_license | matthew9691/Getting-and-Cleaning-Data-Course-Project | R | false | false | 3,432 | r | # R Script for compiling and tidying data as part of
# Peer Reviewed Course Project for
# coursera Getting and Cleaning Data Short Course
# May 2014
library(plyr)
library(data.table)
# import raw data files
# assumes downloaded data file is already 'unzipped' into R working directory
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
# import features.txt which lists the column headings
features <- read.table("./UCI HAR Dataset/features.txt")
# Select only data columns containing '-mean()' or '-std()':
# NOTE I have specifically excluded features with 'Mean' as they are
# derived measurements and not the mean or std of actual measurements
# which is how I interpret the requirement 2 of the project
X_test <- X_test[,grep("mean\\()|std\\()",features$V2)]
X_train <- X_train[,grep("mean\\()|std\\()",features$V2)]
# combine the test and training sets for 'X', 'y' and 'subject'
X_combined <- rbind(X_test,X_train)
y_combined <- rbind(y_test,y_train)
subject_combined <- rbind(subject_test,subject_train)
# Subset the feature descriptions to use as the column names
columnHeadings <- subset(features, grepl("mean\\()|std\\()",features$V2)==TRUE)
colnames(X_combined) <- as.character(columnHeadings$V2)
# Replace y_combined Activity code with correct Activity descriptor
Activity <- function(x) {
if(x == 1) y <- "WALKING"
if(x == 2) y <- "WALKING_UPSTAIRS"
if(x == 3) y <- "WALKING_DOWNSTAIRS"
if(x == 4) y <- "SITTING"
if(x == 5) y <- "STANDING"
if(x == 6) y <- "LAYING"
return(y)
}
y_combined$V1 <- sapply(y_combined$V1,Activity)
# Adding an Activity column to the X_combined data
colnames(y_combined) <- c("Activity")
data_yX <- cbind(y_combined,X_combined)
# Adding a Subject column and then sorting to provide a final completed dataset:
colnames(subject_combined) <- c("Subject")
data_complete <- cbind(subject_combined,data_yX)
data_complete <- arrange(data_complete,Activity)
data_complete <- arrange(data_complete,Subject)
# Combine Subject and Activity into a combined identifier
Subject.Activity <- interaction(data_complete$Subject,data_complete$Activity)
data_complete <- cbind(Subject.Activity,data_complete[,3:68])
# Calculate mean of groups by combinations of Subject and Activity
data_complete2 <- data.table(data_complete)
data_complete3 <- data_complete2[, lapply(.SD, mean), by = Subject.Activity]
data_complete4 <- as.data.frame(data_complete3)
# Seperate back out the Subject and Activity into different columns to satisfy
# one of the tidy data requirements
SA.List <- strsplit(as.character(data_complete4[,1]),".",fixed=TRUE)
Subject <- sapply(SA.List, "[[", 1)
Activity <- sapply(SA.List, "[[", 2)
# Combine all columns into final dataset
tidy_data <- cbind(Subject,Activity,data_complete4[,2:67])
# Simplify the column label text to remove duplicatation and extra characters
names(tidy_data) <- gsub("BodyBody","Body",names(tidy_data),)
names(tidy_data) <- gsub("\\()","",names(tidy_data),)
# Write tidy data to file:
write.table(tidy_data, file = "tidy_data.txt", quote=FALSE, row.names = FALSE) |
testlist <- list(type = 1L, z = 2.71827660824939e-310)
result <- do.call(esreg::G1_fun,testlist)
str(result) | /esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609893517-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 108 | r | testlist <- list(type = 1L, z = 2.71827660824939e-310)
result <- do.call(esreg::G1_fun,testlist)
str(result) |
baseline.als <- function(spectra, lambda=6, p=0.05, maxit = 20){
## Eilers baseline correction for Asymmetric Least Squares
## Migrated from MATLAB original by Kristian Hovde Liland
## $Id: baseline.als.R 170 2011-01-03 20:38:25Z bhm $
#
# INPUT:
# spectra - rows of spectra
# lambda - 2nd derivative constraint
# p - regression weight for positive residuals
#
# OUTPUT:
# baseline - proposed baseline
# corrected - baseline corrected spectra
# wgts - final regression weights
mn <- dim(spectra)
baseline <- matrix(0,mn[1],mn[2])
wgts <- matrix(0,mn[1],mn[2])
# Sparse empty matrix (m x m)
empt <- as.matrix.csr(0,mn[2],mn[2])
# Diagonal sparse matrix (m x m)
speye <- empt
diag(speye) <- 1
D <- diff(speye,differences=2)
DD <- 10^lambda*t(D)%*%D
# Iterate through spectra
for(i in 1:mn[1]){
w <- rep.int(1, mn[2])
y <- spectra[i,]
# Iterate restriction and weighting
for(it in 1:maxit){
W <- empt
diag(W) <- w
# Restricted regression
z <- solve(W+DD,w*y)
w_old <- w
# Weights for next regression
w <- p * (y > z) + (1 - p) * (y < z)
sw <- sum(w_old != w)
# Break if no change from last iteration
if(sw == 0)
break;
}
baseline[i,] <- z
wgts[i,] <- w
}
corrected <- spectra-baseline
list(baseline=baseline,corrected=corrected,wgts=wgts)
}
| /baseline/R/baseline.als.R | no_license | ingted/R-Examples | R | false | false | 1,461 | r | baseline.als <- function(spectra, lambda=6, p=0.05, maxit = 20){
## Eilers baseline correction for Asymmetric Least Squares
## Migrated from MATLAB original by Kristian Hovde Liland
## $Id: baseline.als.R 170 2011-01-03 20:38:25Z bhm $
#
# INPUT:
# spectra - rows of spectra
# lambda - 2nd derivative constraint
# p - regression weight for positive residuals
#
# OUTPUT:
# baseline - proposed baseline
# corrected - baseline corrected spectra
# wgts - final regression weights
mn <- dim(spectra)
baseline <- matrix(0,mn[1],mn[2])
wgts <- matrix(0,mn[1],mn[2])
# Sparse empty matrix (m x m)
empt <- as.matrix.csr(0,mn[2],mn[2])
# Diagonal sparse matrix (m x m)
speye <- empt
diag(speye) <- 1
D <- diff(speye,differences=2)
DD <- 10^lambda*t(D)%*%D
# Iterate through spectra
for(i in 1:mn[1]){
w <- rep.int(1, mn[2])
y <- spectra[i,]
# Iterate restriction and weighting
for(it in 1:maxit){
W <- empt
diag(W) <- w
# Restricted regression
z <- solve(W+DD,w*y)
w_old <- w
# Weights for next regression
w <- p * (y > z) + (1 - p) * (y < z)
sw <- sum(w_old != w)
# Break if no change from last iteration
if(sw == 0)
break;
}
baseline[i,] <- z
wgts[i,] <- w
}
corrected <- spectra-baseline
list(baseline=baseline,corrected=corrected,wgts=wgts)
}
|
#' @title circacompare
#' @name circacompare
#'
#' @description \code{circacompare} performs a comparison between two rhythmic groups of data. It tests for rhythmicity and then fits a nonlinear model with parametrization to estimate and statistically support differences in mesor, amplitude, and phase between groups.
#'
#' @param x data.frame. This is the data.frame which contains the rhythmic data for two groups in a tidy format.
#' @param col_time The name of the column within the data.frame, x, which contains time in hours at which the data were collected.
#' @param col_group The name of the column within the data.frame, x, which contains the grouping variable. This should only have two levels.
#' @param col_outcome The name of the column within the data.frame, x, which contains outcome measure of interest.
#' @param period The period of the rhythm. For circadian rhythms, leave this as the default value, 24.
#' @param alpha_threshold The level of alpha for which the presence of rhythmicity is considered. Default is 0.05.
#' @param timeout_n The upper limit for the model fitting attempts. Default is 10,000.
#' @param control \code{list}. Used to control the parameterization of the model.
#'
#' @return list
#' @export
#'
#' @examples
#' df <- make_data(phi1 = 6)
#' out <- circacompare(x = df, col_time = "time", col_group = "group",
#' col_outcome = "measure")
#' out
circacompare <- function(x,
col_time,
col_group,
col_outcome,
period = 24,
alpha_threshold = 0.05,
timeout_n = 10000,
control = list()){
if(!"ggplot2" %in% utils::installed.packages()[, "Package"]){
return(message("Please install 'ggplot2'"))
}
controlVals <- circacompare_control()
controlVals[names(control)] <- control
if(controlVals$period_param & !"tau" %in% controlVals$main_params){
controlVals$main_params <- c(controlVals$main_params, "tau")
}
if("tau" %in% controlVals$main_params){
controlVals$period_param=TRUE
}
x <- x[c(col_time, col_group, col_outcome)]
colnames(x) <- c("time", "group", "measure")
if(length(levels(as.factor(x$group))) != 2){
return(message("Your grouping variable had more or less than 2 levels! \nThis function is used to compare two groups of data. \nTo avoid me having to guess, please send data with only two possible values in your grouping variable to this function."))
}
if(!class(x$time) %in% c("numeric", "integer")){
return(message(paste("The time variable which you gave was a '",
class(x$time),
"' \nThis function expects time to be given as hours and be of class 'integer' or 'numeric'.",
"\nPlease convert the time variable in your dataframe to be of one of these classes",
sep = "")))
}
if(!class(x$measure) %in% c("numeric", "integer")){
return(message(paste("The measure variable which you gave was a '",
class(x$measure),
"' \nThis function expects measure to be number and be of class 'integer' or 'numeric'.",
"\nPlease convert the measure variable in your dataframe to be of one of these classes",
sep = "")))
}
if(!class(period) %in% c("numeric", "integer") & !controlVals$period_param){
return(message(paste0("The period argument must be a number representing the period of the rhythm in hours\n",
"If you would like the period to be estimated as part of the model, use:\ncontrol=list(period_param=TRUE)")))
}
if(controlVals$period_param & !is.na(period)){
message(paste0("control$period_param is TRUE\n'period=", period, "' is being ignored.\nSet 'period=NA' to avoid this message"))
}
x$time_r <- x$time*2*pi
if(!controlVals$period_param){
x$period <- period
}else{
if(is.null(controlVals$period_min) | is.null(controlVals$period_min)){
message(paste0("If you want the model to estimate the period using a parameter,",
"you may get faster convergence if you provide an approximate range using 'period_min' and 'period_max' in control()",
"\nCurrently assuming period is between: period_min=", controlVals$period_min,
"and period_max=", controlVals$period_max))
}
}
group_1_text <- levels(as.factor(x$group))[1]
group_2_text <- levels(as.factor(x$group))[2]
x$x_group <- ifelse(x$group == group_1_text, 0, 1)
dat_group_1 <- x[x$group == group_1_text,]
dat_group_2 <- x[x$group == group_2_text,]
form_single <- create_formula(main_params=controlVals$main_params, decay_params=controlVals$decay_params)$formula
g1_model <- model_each_group(data=dat_group_1, type="nls", form=form_single,
controlVals=controlVals,
args=list(
timeout_n=timeout_n,
alpha_threshold=alpha_threshold
))
if(g1_model$timeout){return(message("Failed to converge", group_1_text, " model prior to timeout. \nYou may try to increase the allowed attempts before timeout by increasing the value of the 'timeout_n' argument or setting a new seed before this function.\nIf you have repeated difficulties, please contact me (via github) or Oliver Rawashdeh (contact details in manuscript)."))}
g2_model <- model_each_group(data=dat_group_2, type="nls", form=form_single,
controlVals=controlVals,
args=list(
timeout_n=timeout_n,
alpha_threshold=alpha_threshold
))
if(g2_model$timeout){return(message("Failed to converge", group_2_text, " model prior to timeout. \nYou may try to increase the allowed attempts before timeout by increasing the value of the 'timeout_n' argument or setting a new seed before this function.\nIf you have repeated difficulties, please contact me (via github) or Oliver Rawashdeh (contact details in manuscript)."))}
both_groups_rhythmic <- ifelse(g1_model$rhythmic & g2_model$rhythmic, TRUE, FALSE)
if(!both_groups_rhythmic){
if(!g1_model$rhythmic & !g2_model$rhythmic){
return(message("Both groups of data were arrhythmic (to the power specified by the argument 'alpha_threshold').\nThe data was, therefore, not used for a comparison between the two groups."))
}
if(!g1_model$rhythmic){
return(message(group_1_text, " was arrhythmic (to the power specified by the argument 'alpha_threshold').\nThe data was, therefore, not used for a comparison between the two groups."))
}else{
return(message(group_2_text, " was arrhythmic (to the power specified by the argument 'alpha_threshold').\nThe data was, therefore, not used for a comparison between the two groups."))
}
}
n <- 0
success <- FALSE
form_group <- create_formula(main_params=controlVals$main_params, decay_params=controlVals$decay_params, grouped_params=controlVals$grouped_params)$formula
while(!success){
fit.nls <- try({stats::nls(formula=form_group,
data=x,
start=start_list_grouped(g1=g1_model$model, g2=g2_model$model, grouped_params=controlVals$grouped_params),
control=stats::nls.control(maxiter = 100, minFactor = 1/10000)
)},
silent = FALSE)
if(class(fit.nls)=="try-error"){
n <- n + 1
}else{
nls_coefs <- extract_model_coefs(fit.nls)
V <- nls_coefs[, 'estimate']
success <- assess_model_estimates(param_estimates=V)
n <- n + 1
}
if(n > timeout_n){
return(message("Both groups of data were rhythmic but the curve fitting procedure failed due to timing out. \nYou may try to increase the allowed attempts before timeout by increasing the value of the 'timeout_n' argument or setting a new seed before this function.\nIf you have repeated difficulties, please contact me (via github) or Oliver Rawashdeh (contact details in manuscript)."))
}
}
if(!controlVals$period_param){V['tau'] <- period}
eq_expression <- create_formula(main_params=controlVals$main_params,
decay_params=controlVals$decay_params,
grouped_params=controlVals$grouped_params)$f_equation
eval(parse(text=eq_expression$g1))
eval(parse(text=eq_expression$g2))
fig_out <- ggplot2::ggplot(x, ggplot2::aes(time, measure)) +
ggplot2::stat_function(ggplot2::aes(colour = group_1_text), fun = eq_1, size = 1) +
ggplot2::stat_function(ggplot2::aes(colour = group_2_text), fun = eq_2, size = 1) +
ggplot2::geom_point(ggplot2::aes(colour = group)) +
ggplot2::scale_colour_manual(breaks = c(group_1_text, group_2_text),
values = c("blue", "red")) +
ggplot2::labs(colour = 'Legend',
x = "time (hours)")+
ggplot2::xlim(min(floor(x$time/V['tau']) * V['tau']),
max(ceiling(x$time/V['tau']) * V['tau']))
results_summary <-
circa_summary(model=fit.nls, period=period, control=controlVals,
g1=g1_model, g2=g2_model, g1_text=group_1_text, g2_text=group_2_text)
return(list(plot=fig_out, summary=results_summary, fit=fit.nls))
}
circacompare_control <- function(period_param=F, period_min=20, period_max=28,
main_params=c("k", "alpha", "phi"),
grouped_params=c("k", "alpha", "phi"),
decay_params=c()){
list(period_param=period_param, period_min=period_min, period_max=period_max,
main_params=main_params, grouped_params=grouped_params,
decay_params=decay_params)
}
| /R/circacompare.R | no_license | cran/circacompare | R | false | false | 10,220 | r | #' @title circacompare
#' @name circacompare
#'
#' @description \code{circacompare} performs a comparison between two rhythmic groups of data. It tests for rhythmicity and then fits a nonlinear model with parametrization to estimate and statistically support differences in mesor, amplitude, and phase between groups.
#'
#' @param x data.frame. This is the data.frame which contains the rhythmic data for two groups in a tidy format.
#' @param col_time The name of the column within the data.frame, x, which contains time in hours at which the data were collected.
#' @param col_group The name of the column within the data.frame, x, which contains the grouping variable. This should only have two levels.
#' @param col_outcome The name of the column within the data.frame, x, which contains outcome measure of interest.
#' @param period The period of the rhythm. For circadian rhythms, leave this as the default value, 24.
#' @param alpha_threshold The level of alpha for which the presence of rhythmicity is considered. Default is 0.05.
#' @param timeout_n The upper limit for the model fitting attempts. Default is 10,000.
#' @param control \code{list}. Used to control the parameterization of the model.
#'
#' @return list
#' @export
#'
#' @examples
#' df <- make_data(phi1 = 6)
#' out <- circacompare(x = df, col_time = "time", col_group = "group",
#' col_outcome = "measure")
#' out
circacompare <- function(x,
col_time,
col_group,
col_outcome,
period = 24,
alpha_threshold = 0.05,
timeout_n = 10000,
control = list()){
if(!"ggplot2" %in% utils::installed.packages()[, "Package"]){
return(message("Please install 'ggplot2'"))
}
controlVals <- circacompare_control()
controlVals[names(control)] <- control
if(controlVals$period_param & !"tau" %in% controlVals$main_params){
controlVals$main_params <- c(controlVals$main_params, "tau")
}
if("tau" %in% controlVals$main_params){
controlVals$period_param=TRUE
}
x <- x[c(col_time, col_group, col_outcome)]
colnames(x) <- c("time", "group", "measure")
if(length(levels(as.factor(x$group))) != 2){
return(message("Your grouping variable had more or less than 2 levels! \nThis function is used to compare two groups of data. \nTo avoid me having to guess, please send data with only two possible values in your grouping variable to this function."))
}
if(!class(x$time) %in% c("numeric", "integer")){
return(message(paste("The time variable which you gave was a '",
class(x$time),
"' \nThis function expects time to be given as hours and be of class 'integer' or 'numeric'.",
"\nPlease convert the time variable in your dataframe to be of one of these classes",
sep = "")))
}
if(!class(x$measure) %in% c("numeric", "integer")){
return(message(paste("The measure variable which you gave was a '",
class(x$measure),
"' \nThis function expects measure to be number and be of class 'integer' or 'numeric'.",
"\nPlease convert the measure variable in your dataframe to be of one of these classes",
sep = "")))
}
if(!class(period) %in% c("numeric", "integer") & !controlVals$period_param){
return(message(paste0("The period argument must be a number representing the period of the rhythm in hours\n",
"If you would like the period to be estimated as part of the model, use:\ncontrol=list(period_param=TRUE)")))
}
if(controlVals$period_param & !is.na(period)){
message(paste0("control$period_param is TRUE\n'period=", period, "' is being ignored.\nSet 'period=NA' to avoid this message"))
}
x$time_r <- x$time*2*pi
if(!controlVals$period_param){
x$period <- period
}else{
if(is.null(controlVals$period_min) | is.null(controlVals$period_min)){
message(paste0("If you want the model to estimate the period using a parameter,",
"you may get faster convergence if you provide an approximate range using 'period_min' and 'period_max' in control()",
"\nCurrently assuming period is between: period_min=", controlVals$period_min,
"and period_max=", controlVals$period_max))
}
}
group_1_text <- levels(as.factor(x$group))[1]
group_2_text <- levels(as.factor(x$group))[2]
x$x_group <- ifelse(x$group == group_1_text, 0, 1)
dat_group_1 <- x[x$group == group_1_text,]
dat_group_2 <- x[x$group == group_2_text,]
form_single <- create_formula(main_params=controlVals$main_params, decay_params=controlVals$decay_params)$formula
g1_model <- model_each_group(data=dat_group_1, type="nls", form=form_single,
controlVals=controlVals,
args=list(
timeout_n=timeout_n,
alpha_threshold=alpha_threshold
))
if(g1_model$timeout){return(message("Failed to converge", group_1_text, " model prior to timeout. \nYou may try to increase the allowed attempts before timeout by increasing the value of the 'timeout_n' argument or setting a new seed before this function.\nIf you have repeated difficulties, please contact me (via github) or Oliver Rawashdeh (contact details in manuscript)."))}
g2_model <- model_each_group(data=dat_group_2, type="nls", form=form_single,
controlVals=controlVals,
args=list(
timeout_n=timeout_n,
alpha_threshold=alpha_threshold
))
if(g2_model$timeout){return(message("Failed to converge", group_2_text, " model prior to timeout. \nYou may try to increase the allowed attempts before timeout by increasing the value of the 'timeout_n' argument or setting a new seed before this function.\nIf you have repeated difficulties, please contact me (via github) or Oliver Rawashdeh (contact details in manuscript)."))}
both_groups_rhythmic <- ifelse(g1_model$rhythmic & g2_model$rhythmic, TRUE, FALSE)
if(!both_groups_rhythmic){
if(!g1_model$rhythmic & !g2_model$rhythmic){
return(message("Both groups of data were arrhythmic (to the power specified by the argument 'alpha_threshold').\nThe data was, therefore, not used for a comparison between the two groups."))
}
if(!g1_model$rhythmic){
return(message(group_1_text, " was arrhythmic (to the power specified by the argument 'alpha_threshold').\nThe data was, therefore, not used for a comparison between the two groups."))
}else{
return(message(group_2_text, " was arrhythmic (to the power specified by the argument 'alpha_threshold').\nThe data was, therefore, not used for a comparison between the two groups."))
}
}
n <- 0
success <- FALSE
form_group <- create_formula(main_params=controlVals$main_params, decay_params=controlVals$decay_params, grouped_params=controlVals$grouped_params)$formula
while(!success){
fit.nls <- try({stats::nls(formula=form_group,
data=x,
start=start_list_grouped(g1=g1_model$model, g2=g2_model$model, grouped_params=controlVals$grouped_params),
control=stats::nls.control(maxiter = 100, minFactor = 1/10000)
)},
silent = FALSE)
if(class(fit.nls)=="try-error"){
n <- n + 1
}else{
nls_coefs <- extract_model_coefs(fit.nls)
V <- nls_coefs[, 'estimate']
success <- assess_model_estimates(param_estimates=V)
n <- n + 1
}
if(n > timeout_n){
return(message("Both groups of data were rhythmic but the curve fitting procedure failed due to timing out. \nYou may try to increase the allowed attempts before timeout by increasing the value of the 'timeout_n' argument or setting a new seed before this function.\nIf you have repeated difficulties, please contact me (via github) or Oliver Rawashdeh (contact details in manuscript)."))
}
}
if(!controlVals$period_param){V['tau'] <- period}
eq_expression <- create_formula(main_params=controlVals$main_params,
decay_params=controlVals$decay_params,
grouped_params=controlVals$grouped_params)$f_equation
eval(parse(text=eq_expression$g1))
eval(parse(text=eq_expression$g2))
fig_out <- ggplot2::ggplot(x, ggplot2::aes(time, measure)) +
ggplot2::stat_function(ggplot2::aes(colour = group_1_text), fun = eq_1, size = 1) +
ggplot2::stat_function(ggplot2::aes(colour = group_2_text), fun = eq_2, size = 1) +
ggplot2::geom_point(ggplot2::aes(colour = group)) +
ggplot2::scale_colour_manual(breaks = c(group_1_text, group_2_text),
values = c("blue", "red")) +
ggplot2::labs(colour = 'Legend',
x = "time (hours)")+
ggplot2::xlim(min(floor(x$time/V['tau']) * V['tau']),
max(ceiling(x$time/V['tau']) * V['tau']))
results_summary <-
circa_summary(model=fit.nls, period=period, control=controlVals,
g1=g1_model, g2=g2_model, g1_text=group_1_text, g2_text=group_2_text)
return(list(plot=fig_out, summary=results_summary, fit=fit.nls))
}
circacompare_control <- function(period_param=F, period_min=20, period_max=28,
main_params=c("k", "alpha", "phi"),
grouped_params=c("k", "alpha", "phi"),
decay_params=c()){
list(period_param=period_param, period_min=period_min, period_max=period_max,
main_params=main_params, grouped_params=grouped_params,
decay_params=decay_params)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bin.R
\name{bin_variance}
\alias{bin_variance}
\title{Variance of a Binomial Random Variable}
\usage{
bin_variance(trials, prob)
}
\arguments{
\item{trials}{number of trials}
\item{prob}{probability for success in each trial}
}
\value{
the variance of the binomial random variable
}
\description{
calculates the variance of a binomial random variable given number of trials and prob
}
| /binomial/man/bin_variance.Rd | no_license | stat133-sp19/hw-stat133-Jason-Zhen | R | false | true | 483 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bin.R
\name{bin_variance}
\alias{bin_variance}
\title{Variance of a Binomial Random Variable}
\usage{
bin_variance(trials, prob)
}
\arguments{
\item{trials}{number of trials}
\item{prob}{probability for success in each trial}
}
\value{
the variance of the binomial random variable
}
\description{
calculates the variance of a binomial random variable given number of trials and prob
}
|
setwd("~/code/skola/tdde01/adam")
library(geosphere)
set.seed(1234567890)
stations = read.csv("lab5/stations.csv")
temps = read.csv("lab5/temps50k.csv")
st = merge(stations, temps, by="station_number")[sample(1:50000, 50000),]
n=dim(st)[1]
id=sample(1:n, floor(n*0.75))
train=st[id,]
test=st[-id,]
difftime_distance = function(x, y, unit_, format_) {
date_strings = c(x, y)
datetimes = strptime(date_strings, format=format_)
diff = as.double(difftime(datetimes[2], datetimes[1], units=unit_))
return(diff)
}
diff_hour = function(t1, t2) {
return(abs(as.numeric(difftime(strptime(t1, "%H:%M:%S"),
strptime(t2, "%H:%M:%S")))))
}
diff_date = function(t1, t2) {
return(as.numeric(difftime(strptime(t1, "%Y-%m-%d "),
strptime(t2, "%Y-%m-%d "))))
}
k_gaussian = function(u, h) {
return(exp(-(abs(u/h)^2)))
}
station_distance = function(p1, p2) {
return(k_gaussian(distHaversine(p1,p2), h_distance))
}
date_distance = function(d1, d2) {
return(k_gaussian((diff_date(d1, d2) %% 365), h_date))
}
hour_distance = function(h1, h2) {
return(k_gaussian(diff_hour(h1, h2), h_time))
}
calc_temp = function(dist, Y) {
sumsum_dist = sum(dist)
norm_dist = dist/sumsum_dist
temp = as.vector(t(norm_dist))%*%as.vector(Y)
}
h_distance <- 60000 # These three values are up to the students
h_date <- 40
h_time <- 3*3600
c_distance = 0.1
c_date = 1
c_time = 0.1
a = 58.413497 # The point to predict (up to the students)
b = 15.582597
point = c(a, b)
date = "2013-02-02" # The date to predict (up to the students)
times = c("04:00:00",
"06:00:00",
"08:00:00",
"10:00:00",
"12:00:00",
"14:00:00",
"16:00:00",
"18:00:00",
"20:00:00",
"22:00:00",
"23:00:00")
temp = vector(length=length(times))
# Students’ code here
predict_temps = function(point_, date_, hour_) {
station_dist = station_distance(point_, train[4:5])
date_dist = date_distance(date_, train$date)
temps_ = c()
if (missing(hour_)) {
for(i in 1:length(times)) {
hour_dist = hour_distance(times[i], train$time)
sum_dist = c_distance*station_dist + c_date*date_dist + c_time*hour_dist
temps_[i] = calc_temp(sum_dist, train$air_temperature)
}
return(temps_)
} else {
hour_dist = hour_distance(hour_, train$time)
sum_dist = c_distance*station_dist + c_date*date_dist + c_time*hour_dist
temp = calc_temp(sum_dist, train$air_temperature)
return(temp)
}
}
# Predict only one day
pred_one_day = predict_temps(point, date)
plot(seq(4, 24, 2), pred_one_day, main="predicted temperatures for 2013-02-02", ylab="Temperature", xlab="Time of day")
# train test
#Yhat = c()
#for(i in 1:nrow(test)) {
# test_row = test[i,]
# Yhat[i] = predict_temps(c(test_row$latitude, test_row$longitud), test_row$date, test_row$time)
#}
#plot(Yhat, test$air_temperature, ylim=range(-30,30), xlim=range(-30,30), ylab="Real temperature", xlab="Predicted temperature")
#abline(1,1)
plot_h_values = function() {
plot(k_gaussian(matrix(seq(1,600000,10000)), h_distance), main="Weights for geographical distance", ylab="Weight", xlab="Distance (swedish miles)", col="blue")
plot(k_gaussian(matrix(seq(1,365,1)), h_date), xlim=range(0,100), main="Weights for difference in day of the year", ylab="Weight", xlab="Difference (days)", col="blue")
plot(k_gaussian(matrix(seq(0,24*3600,2*3600)), h_time), main="Weights for difference in hour of the day", ylab="Weight", xlab="Difference (hours)", col="blue")
}
| /adam/lab5/51.r | no_license | adamnyberg/machine-learning | R | false | false | 3,619 | r | setwd("~/code/skola/tdde01/adam")
library(geosphere)
set.seed(1234567890)
stations = read.csv("lab5/stations.csv")
temps = read.csv("lab5/temps50k.csv")
st = merge(stations, temps, by="station_number")[sample(1:50000, 50000),]
n=dim(st)[1]
id=sample(1:n, floor(n*0.75))
train=st[id,]
test=st[-id,]
difftime_distance = function(x, y, unit_, format_) {
date_strings = c(x, y)
datetimes = strptime(date_strings, format=format_)
diff = as.double(difftime(datetimes[2], datetimes[1], units=unit_))
return(diff)
}
diff_hour = function(t1, t2) {
return(abs(as.numeric(difftime(strptime(t1, "%H:%M:%S"),
strptime(t2, "%H:%M:%S")))))
}
diff_date = function(t1, t2) {
return(as.numeric(difftime(strptime(t1, "%Y-%m-%d "),
strptime(t2, "%Y-%m-%d "))))
}
k_gaussian = function(u, h) {
return(exp(-(abs(u/h)^2)))
}
station_distance = function(p1, p2) {
return(k_gaussian(distHaversine(p1,p2), h_distance))
}
date_distance = function(d1, d2) {
return(k_gaussian((diff_date(d1, d2) %% 365), h_date))
}
hour_distance = function(h1, h2) {
return(k_gaussian(diff_hour(h1, h2), h_time))
}
calc_temp = function(dist, Y) {
sumsum_dist = sum(dist)
norm_dist = dist/sumsum_dist
temp = as.vector(t(norm_dist))%*%as.vector(Y)
}
h_distance <- 60000 # These three values are up to the students
h_date <- 40
h_time <- 3*3600
c_distance = 0.1
c_date = 1
c_time = 0.1
a = 58.413497 # The point to predict (up to the students)
b = 15.582597
point = c(a, b)
date = "2013-02-02" # The date to predict (up to the students)
times = c("04:00:00",
"06:00:00",
"08:00:00",
"10:00:00",
"12:00:00",
"14:00:00",
"16:00:00",
"18:00:00",
"20:00:00",
"22:00:00",
"23:00:00")
temp = vector(length=length(times))
# Students’ code here
predict_temps = function(point_, date_, hour_) {
station_dist = station_distance(point_, train[4:5])
date_dist = date_distance(date_, train$date)
temps_ = c()
if (missing(hour_)) {
for(i in 1:length(times)) {
hour_dist = hour_distance(times[i], train$time)
sum_dist = c_distance*station_dist + c_date*date_dist + c_time*hour_dist
temps_[i] = calc_temp(sum_dist, train$air_temperature)
}
return(temps_)
} else {
hour_dist = hour_distance(hour_, train$time)
sum_dist = c_distance*station_dist + c_date*date_dist + c_time*hour_dist
temp = calc_temp(sum_dist, train$air_temperature)
return(temp)
}
}
# Predict only one day
pred_one_day = predict_temps(point, date)
plot(seq(4, 24, 2), pred_one_day, main="predicted temperatures for 2013-02-02", ylab="Temperature", xlab="Time of day")
# train test
#Yhat = c()
#for(i in 1:nrow(test)) {
# test_row = test[i,]
# Yhat[i] = predict_temps(c(test_row$latitude, test_row$longitud), test_row$date, test_row$time)
#}
#plot(Yhat, test$air_temperature, ylim=range(-30,30), xlim=range(-30,30), ylab="Real temperature", xlab="Predicted temperature")
#abline(1,1)
plot_h_values = function() {
plot(k_gaussian(matrix(seq(1,600000,10000)), h_distance), main="Weights for geographical distance", ylab="Weight", xlab="Distance (swedish miles)", col="blue")
plot(k_gaussian(matrix(seq(1,365,1)), h_date), xlim=range(0,100), main="Weights for difference in day of the year", ylab="Weight", xlab="Difference (days)", col="blue")
plot(k_gaussian(matrix(seq(0,24*3600,2*3600)), h_time), main="Weights for difference in hour of the day", ylab="Weight", xlab="Difference (hours)", col="blue")
}
|
testlist <- list(id = c(131072L, 1895825408L, 524543L, -10261403L, 1667968622L, 1634559347L, -16777216L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), x = numeric(0), y = NaN)
result <- do.call(ggforce:::enclose_points,testlist)
str(result) | /ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1610030019-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 383 | r | testlist <- list(id = c(131072L, 1895825408L, 524543L, -10261403L, 1667968622L, 1634559347L, -16777216L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), x = numeric(0), y = NaN)
result <- do.call(ggforce:::enclose_points,testlist)
str(result) |
library(glmnet)
mydata = read.table("./TrainingSet/RF/cervix.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.01,family="gaussian",standardize=TRUE)
sink('./Model/EN/Classifier/cervix/cervix_007.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/cervix/cervix_007.R | no_license | leon1003/QSMART | R | false | false | 350 | r | library(glmnet)
mydata = read.table("./TrainingSet/RF/cervix.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.01,family="gaussian",standardize=TRUE)
sink('./Model/EN/Classifier/cervix/cervix_007.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.