content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' Measure the distribution of narrow-ranged or endemic species.
#'
#' \code{weighted_endemism} is species richness inversely weighted
#' by species ranges.
#
#' @param x A (sparse) community matrix.
#' @keywords bioregion
#' @importFrom Matrix rowSums Diagonal Matrix colSums
#'
#' @return A data frame of species traits by site.
#'
#' @references
#' Crisp, M.D., Laffan, S., Linder, H.P. & Monro, A. (2001) Endemism in the
#' Australian flora. \emph{Journal of Biogeography} \strong{28}: 183–198.
#'
#' @examples
#' require(raster)
#' data(africa)
#' Endm <- weighted_endemism(africa$comm)
#' m <- merge(africa$polys, data.frame(grids=names(Endm), WE=Endm), by="grids")
#' m <- m[!is.na(m@data$WE),]
#'
#' plot_swatch(m, values = m$WE, k=20)
#'
#' @export
weighted_endemism <- function(x){
if(inherits(x, "matrix") && ncol(x)>2) x <- Matrix(x, sparse=TRUE)
if(!is(x, "sparseMatrix")) stop("x needs to be a sparse matrix!")
x@x[x@x > 1e-8] <- 1 # we want to count species and not occurrences
y <- rowSums(x %*% Diagonal(x = 1 / colSums(x) ) )
y
}
| /R/WeightedEndemism.R | no_license | kostask84/phyloregion | R | false | false | 1,062 | r | #' Measure the distribution of narrow-ranged or endemic species.
#'
#' \code{weighted_endemism} is species richness inversely weighted
#' by species ranges.
#
#' @param x A (sparse) community matrix.
#' @keywords bioregion
#' @importFrom Matrix rowSums Diagonal Matrix colSums
#'
#' @return A data frame of species traits by site.
#'
#' @references
#' Crisp, M.D., Laffan, S., Linder, H.P. & Monro, A. (2001) Endemism in the
#' Australian flora. \emph{Journal of Biogeography} \strong{28}: 183–198.
#'
#' @examples
#' require(raster)
#' data(africa)
#' Endm <- weighted_endemism(africa$comm)
#' m <- merge(africa$polys, data.frame(grids=names(Endm), WE=Endm), by="grids")
#' m <- m[!is.na(m@data$WE),]
#'
#' plot_swatch(m, values = m$WE, k=20)
#'
#' @export
weighted_endemism <- function(x){
if(inherits(x, "matrix") && ncol(x)>2) x <- Matrix(x, sparse=TRUE)
if(!is(x, "sparseMatrix")) stop("x needs to be a sparse matrix!")
x@x[x@x > 1e-8] <- 1 # we want to count species and not occurrences
y <- rowSums(x %*% Diagonal(x = 1 / colSums(x) ) )
y
}
|
# run_analysis.R
# requires library(ddplyr), library(plyr), and library(reshape2)
# install package if necessary
if("plyr" %in% rownames(installed.packages()) == FALSE) {install.packages("plyr")};library(plyr)
if("dplyr" %in% rownames(installed.packages()) == FALSE) {install.packages("dplyr")};library(dplyr)
if("reshape2" %in% rownames(installed.packages()) == FALSE) {install.packages("reshape2")};library(reshape2)
############################
# Overview of this code:
############################
# Merges the training and the test sets to create one data set.
# Extracts only the measurements on the mean and standard deviation for each measurement.
# Uses descriptive activity names to name the activities in the data set
# Appropriately labels the data set with descriptive variable names.
# From the data set in step 4, creates a second, independent tidy data set with the
# average of each variable (measurement) for each activity and each subject.
############################
# Function: set_up_file_names
############################
# takes 3 arguments: a main data directory, a list of sub directories, and a list of file types
# returns a table of data files for each sub directory (columns) and file type (rows)
# systematizes file names from sub directories (test, train) and file types (X, Y, subject)
set_up_file_names<-function(data_dir, sub_dirs, file_types) {
# create data structures to hold the names of files for each data set
all_file_types <- rep(file_types, length(sub_dirs))
all_file_names <- as.data.frame(matrix(all_file_types, ncol = length(sub_dirs)),
row.names=file_types, stringsAsFactors = F)
names(all_file_names) <- sub_dirs
for(i in 1:ncol(all_file_names)) {
sub_dir = sub_dirs[i]
for(j in 1:nrow(all_file_names)) {
file_type = file_types[j]
filename = paste(file_type, "_", sub_dir, ".txt", sep="")
full_dir_name = paste(data_dir, sub_dir, sep="/")
all_file_names[j,i] = paste(full_dir_name, filename, sep="/")
}
}
all_file_names
}
############################
# Function: check_data_exists
############################
# takes 2 arguments: name of the data directory and the name of the zipped file
# if the data directory doesn't exist, we check if the zipped file exists
# if the zipped file exists, the file is unzipped
check_data_exists <- function(data_dir, zip_filename) {
if(! file.exists(data_dir) && file.exists(zip_filename)) {
unzip(zip_filename)
}
}
############################
# Function: get_features
############################
# takes 1 argument: name of feature file
# returns vector of feature names (ordered by feature number)
get_features <- function(feature_file) {
feature_table <- read.table(feature_file, header=F, sep=" ")
names(feature_table) <- c("FeatureID","FeatureName")
feature_names <- feature_table$FeatureName
feature_names
}
############################
# Function: get_activities
############################
# takes 1 argument: name of labeled activity file
# returns vector of activity names (ordered by activity number)
get_activities <- function(activity_file) {
activity_table <- read.table(activity_file, header=F, sep=" ")
names(activity_table) <- c("ActivityID", "ActivityName")
activity_names = as.vector(t(arrange(activity_table, ActivityID)["ActivityName"]))
activity_names
}
############################
# Function: annotate_data_set
############################
# takes 3 arguments: table of file names, list of feature names, list of activity names
# returns a list of tables (one table per data set)
# uses descriptive activity names to name the activities in the data set
# appropriately labels the data set with descriptive variable names.
annotate_data_set <- function(all_file_names, feature_names, activity_names) {
# get the data tables for each data set
for(data_set in 1:ncol(all_file_names)) {
# read data (X file) and annotate features as column names
measurements <- read.table(all_file_names["X", data_set])
names(measurements) <- feature_names
# read activity info (Y file) for this data set
activity_info <- read.table(all_file_names["Y", data_set], header=F)
names(activity_info) <- "ActivityID"
# add activity info to measurements for this data set
# the row indicies in the x and y files map measurements (x) with activities (y)
data_table <- cbind(activity_info, measurements)
# annotation does not use 'merge()', which would rearrange the original order of the activity ids
# annotation replaces "ActivityID" with "ActivityName"
data_table$ActivityID <- factor(data_table$ActivityID, labels=activity_names)
names(data_table)[1] = "ActivityName"
# read subject info (subject file) for this data set
subject_test_data <- read.table((all_file_names["subject", data_set]), header=F, sep=" ")
names(subject_test_data) <- c("SubjectID")
data_table <- cbind(subject_test_data, data_table)
# clean up intermediate tables
rm(measurements)
rm(activity_info)
rm(subject_test_data)
# tidy this data set
# melt data frame to have 4 columns: SubjectID, ActivityName, MeasurementName, MeasurementValue
data_table <- melt(data_table, id.vars=c("SubjectID", "ActivityName"),
variable.name="MeasurementName", value.name="MeasurementValue")
# save data table to the list of annotated data sets
if(length(data_table_list) == 0) {
data_table_list <- list(data_table)
} else {
data_table_list[[length(data_table_list)+1]] <- data_table
}
}
data_table_list
}
############################
# Load data
############################
data_dir <- "UCI\ HAR\ Dataset"
zip_filename <- "getdata-projectfiles-UCI HAR Dataset.zip"
check_data_exists(data_dir, zip_filename)
# indicate which sub-directories (data sets) to consider
sub_dirs = c("test", "train")
# indicate which types of files to consider for data sets
file_types = c("X", "Y", "subject")
# read in feature list
feature_file <- paste(data_dir, "features.txt", sep="/")
feature_names <- get_features(feature_file)
# read in activity labels
activity_label_file <- paste(data_dir, "activity_labels.txt", sep="/")
activity_names <- get_activities(activity_label_file)
# each data set's table will be added to an overall list
data_table_list = list()
# get the file names for all data sets
all_file_names <- set_up_file_names(data_dir, sub_dirs, file_types)
###############################
# Annotate the data sets
###############################
data_table_list = annotate_data_set(all_file_names, feature_names, activity_names)
###############################
# Merge the data sets (training and the test sets) to create one data set
###############################
all_data = data_table_list[[1]]
for(i in 2:length(data_table_list)) {
all_data <- rbind(all_data, data_table_list[[i]])
}
# clean up intermediate data structures
rm(data_table_list)
############################
# Extract only the measurements on the mean and standard deviation
############################
tidy_data <- all_data[grepl("-mean|std", all_data$MeasurementName, ignore.case=T),]
# clean up intermediate data structures
rm(all_data)
############################
# Create a second, independent tidy data set with the average of each variable
# for each activity and each subject
############################
second_tidy_data <- tbl_df(tidy_data)
by_subject_activity <- second_tidy_data %>%
group_by(SubjectID, ActivityName, MeasurementName) %>%
summarise_each(funs(mean),matches("Value"))
names(by_subject_activity)[4] = "MeasurementAverage"
# write second tidy data set to txt file
write.table(by_subject_activity, file="subject_activity_averages.txt", row.name=FALSE, quote=FALSE, sep="\t")
| /run_analysis.R | no_license | radams21/GetDataCourseProject | R | false | false | 8,135 | r | # run_analysis.R
# requires library(ddplyr), library(plyr), and library(reshape2)
# install package if necessary
if("plyr" %in% rownames(installed.packages()) == FALSE) {install.packages("plyr")};library(plyr)
if("dplyr" %in% rownames(installed.packages()) == FALSE) {install.packages("dplyr")};library(dplyr)
if("reshape2" %in% rownames(installed.packages()) == FALSE) {install.packages("reshape2")};library(reshape2)
############################
# Overview of this code:
############################
# Merges the training and the test sets to create one data set.
# Extracts only the measurements on the mean and standard deviation for each measurement.
# Uses descriptive activity names to name the activities in the data set
# Appropriately labels the data set with descriptive variable names.
# From the data set in step 4, creates a second, independent tidy data set with the
# average of each variable (measurement) for each activity and each subject.
############################
# Function: set_up_file_names
############################
# takes 3 arguments: a main data directory, a list of sub directories, and a list of file types
# returns a table of data files for each sub directory (columns) and file type (rows)
# systematizes file names from sub directories (test, train) and file types (X, Y, subject)
set_up_file_names<-function(data_dir, sub_dirs, file_types) {
# create data structures to hold the names of files for each data set
all_file_types <- rep(file_types, length(sub_dirs))
all_file_names <- as.data.frame(matrix(all_file_types, ncol = length(sub_dirs)),
row.names=file_types, stringsAsFactors = F)
names(all_file_names) <- sub_dirs
for(i in 1:ncol(all_file_names)) {
sub_dir = sub_dirs[i]
for(j in 1:nrow(all_file_names)) {
file_type = file_types[j]
filename = paste(file_type, "_", sub_dir, ".txt", sep="")
full_dir_name = paste(data_dir, sub_dir, sep="/")
all_file_names[j,i] = paste(full_dir_name, filename, sep="/")
}
}
all_file_names
}
############################
# Function: check_data_exists
############################
# takes 2 arguments: name of the data directory and the name of the zipped file
# if the data directory doesn't exist, we check if the zipped file exists
# if the zipped file exists, the file is unzipped
check_data_exists <- function(data_dir, zip_filename) {
if(! file.exists(data_dir) && file.exists(zip_filename)) {
unzip(zip_filename)
}
}
############################
# Function: get_features
############################
# takes 1 argument: name of feature file
# returns vector of feature names (ordered by feature number)
get_features <- function(feature_file) {
feature_table <- read.table(feature_file, header=F, sep=" ")
names(feature_table) <- c("FeatureID","FeatureName")
feature_names <- feature_table$FeatureName
feature_names
}
############################
# Function: get_activities
############################
# takes 1 argument: name of labeled activity file
# returns vector of activity names (ordered by activity number)
get_activities <- function(activity_file) {
activity_table <- read.table(activity_file, header=F, sep=" ")
names(activity_table) <- c("ActivityID", "ActivityName")
activity_names = as.vector(t(arrange(activity_table, ActivityID)["ActivityName"]))
activity_names
}
############################
# Function: annotate_data_set
############################
# takes 3 arguments: table of file names, list of feature names, list of activity names
# returns a list of tables (one table per data set)
# uses descriptive activity names to name the activities in the data set
# appropriately labels the data set with descriptive variable names.
annotate_data_set <- function(all_file_names, feature_names, activity_names) {
# get the data tables for each data set
for(data_set in 1:ncol(all_file_names)) {
# read data (X file) and annotate features as column names
measurements <- read.table(all_file_names["X", data_set])
names(measurements) <- feature_names
# read activity info (Y file) for this data set
activity_info <- read.table(all_file_names["Y", data_set], header=F)
names(activity_info) <- "ActivityID"
# add activity info to measurements for this data set
# the row indicies in the x and y files map measurements (x) with activities (y)
data_table <- cbind(activity_info, measurements)
# annotation does not use 'merge()', which would rearrange the original order of the activity ids
# annotation replaces "ActivityID" with "ActivityName"
data_table$ActivityID <- factor(data_table$ActivityID, labels=activity_names)
names(data_table)[1] = "ActivityName"
# read subject info (subject file) for this data set
subject_test_data <- read.table((all_file_names["subject", data_set]), header=F, sep=" ")
names(subject_test_data) <- c("SubjectID")
data_table <- cbind(subject_test_data, data_table)
# clean up intermediate tables
rm(measurements)
rm(activity_info)
rm(subject_test_data)
# tidy this data set
# melt data frame to have 4 columns: SubjectID, ActivityName, MeasurementName, MeasurementValue
data_table <- melt(data_table, id.vars=c("SubjectID", "ActivityName"),
variable.name="MeasurementName", value.name="MeasurementValue")
# save data table to the list of annotated data sets
if(length(data_table_list) == 0) {
data_table_list <- list(data_table)
} else {
data_table_list[[length(data_table_list)+1]] <- data_table
}
}
data_table_list
}
############################
# Load data
############################
data_dir <- "UCI\ HAR\ Dataset"
zip_filename <- "getdata-projectfiles-UCI HAR Dataset.zip"
check_data_exists(data_dir, zip_filename)
# indicate which sub-directories (data sets) to consider
sub_dirs = c("test", "train")
# indicate which types of files to consider for data sets
file_types = c("X", "Y", "subject")
# read in feature list
feature_file <- paste(data_dir, "features.txt", sep="/")
feature_names <- get_features(feature_file)
# read in activity labels
activity_label_file <- paste(data_dir, "activity_labels.txt", sep="/")
activity_names <- get_activities(activity_label_file)
# each data set's table will be added to an overall list
data_table_list = list()
# get the file names for all data sets
all_file_names <- set_up_file_names(data_dir, sub_dirs, file_types)
###############################
# Annotate the data sets
###############################
data_table_list = annotate_data_set(all_file_names, feature_names, activity_names)
###############################
# Merge the data sets (training and the test sets) to create one data set
###############################
all_data = data_table_list[[1]]
for(i in 2:length(data_table_list)) {
all_data <- rbind(all_data, data_table_list[[i]])
}
# clean up intermediate data structures
rm(data_table_list)
############################
# Extract only the measurements on the mean and standard deviation
############################
tidy_data <- all_data[grepl("-mean|std", all_data$MeasurementName, ignore.case=T),]
# clean up intermediate data structures
rm(all_data)
############################
# Create a second, independent tidy data set with the average of each variable
# for each activity and each subject
############################
second_tidy_data <- tbl_df(tidy_data)
by_subject_activity <- second_tidy_data %>%
group_by(SubjectID, ActivityName, MeasurementName) %>%
summarise_each(funs(mean),matches("Value"))
names(by_subject_activity)[4] = "MeasurementAverage"
# write second tidy data set to txt file
write.table(by_subject_activity, file="subject_activity_averages.txt", row.name=FALSE, quote=FALSE, sep="\t")
|
#' ClusterHierarchy class to manage treeviz cluster data
setClass(
"ClusterHierarchy",
contains = c("DataFrame")
)
#' create a new ClusterHierarchy object. User can give either
#' col_regex or columns option to filter the columns or specify
#' the column order
#' @param hierarchy hierarchy as a dataFrame
#' @param col_regex Regular Expression for choosing columns
#' @param columns Vector containing list of columns to choose from with ordering
#' @return `ClusterHierarchy`` return an object of class ClusterHierarchy containing cluster information
#' @importFrom methods new
#' @importFrom S4Vectors DataFrame
#' @export
#'
ClusterHierarchy <- function(hierarchy, col_regex=NULL, columns =NULL) {
#will se later if this is needed
#
# if (is.null(hierarchy)) {
# stop("No hirerarchy")
#
# }
#
# if (ncol(hierarchy) == 0) {
# return(
# new(
# "TreeCluster",
# DataFrame(hierarchy)
# )
# )
# }
#
#Filter key words
if(!is.null(col_regex) && !is.null(columns)){
message("Cannot use both")
}
if(!is.null(col_regex)){
hierarchy <- hierarchy[, grep( col_regex, colnames(hierarchy))]
}
# ordering
if(!is.null(columns)){
hierarchy <- hierarchy[columns]
}
for(cols in colnames(hierarchy)){
hierarchy[[cols]]<- as.factor(hierarchy[[cols]])
}
hierarchy_dt <- as.data.table(hierarchy)
hierarchy_dt$samples <- rownames(hierarchy_dt) <- rownames(hierarchy)
cols <- colnames(hierarchy_dt)[1:length(colnames(hierarchy_dt))-1]
order <- rep(1, length(hierarchy_dt)-1)
hierarchy_dt <- setorderv(hierarchy_dt, cols = cols, order = order)
hierarchy_df <- as.data.frame(hierarchy_dt)
rownames(hierarchy_df) <- hierarchy_df$samples
hierarchy <- hierarchy_df[,cols]
# hierarchy<- as.data.frame(hierarchy)
hierarchy <- rename_clusters(hierarchy)
uniqeness <- check_unique_parent(hierarchy)
#print(str(hierarchy))
# Create clustree object
hierarchy_graph <-
clustree(
hierarchy ,
prefix = "cluster",
prop_filter = 0,
return = "graph"
)
if(uniqeness==FALSE){
# prune the graph with only core edges (this makes it a ~tree)
graph_df <- as_long_data_frame(hierarchy_graph)
hierarchy <- prune_tree(graph_df, hierarchy)
hierarchy_graph = hierarchy$Clustree_obj
hierarchy <- DataFrame(hierarchy$Cluster_obj)
}
# collapses tree if the levels are the same at different resolutions
collapsed_graph <- collapse_tree(hierarchy_graph)
cluster_names <-
unique(sapply(strsplit(collapsed_graph$node, "C"), '[', 1))
#take only uncollapsed columns
hierarchy <- hierarchy[, cluster_names]
digits<- sub(pattern = "cluster", replacement = "", x=cluster_names)
cluster_names <- paste0(digits, ".",cluster_names, sep="")
#renaming nodes from numbers to cluster1C1 cluster1C2 so on..
for (clusnames in names(hierarchy)) {
hierarchy[[clusnames]] <-
paste(clusnames, hierarchy[[clusnames]], sep = 'C')
}
names(hierarchy)<- cluster_names
samples <- rownames(hierarchy)
if(is.null(rownames(hierarchy))){
samples<- 1:nrow(hierarchy)
}
hierarchy <- cbind(hierarchy, samples)
hierarchy <- checkRoot(hierarchy)
new(
"ClusterHierarchy",
hierarchy
)
} | /R/ClusterHierarchy-class.R | no_license | kzintas/TreeSE | R | false | false | 3,325 | r | #' ClusterHierarchy class to manage treeviz cluster data
setClass(
"ClusterHierarchy",
contains = c("DataFrame")
)
#' create a new ClusterHierarchy object. User can give either
#' col_regex or columns option to filter the columns or specify
#' the column order
#' @param hierarchy hierarchy as a dataFrame
#' @param col_regex Regular Expression for choosing columns
#' @param columns Vector containing list of columns to choose from with ordering
#' @return `ClusterHierarchy`` return an object of class ClusterHierarchy containing cluster information
#' @importFrom methods new
#' @importFrom S4Vectors DataFrame
#' @export
#'
ClusterHierarchy <- function(hierarchy, col_regex=NULL, columns =NULL) {
#will se later if this is needed
#
# if (is.null(hierarchy)) {
# stop("No hirerarchy")
#
# }
#
# if (ncol(hierarchy) == 0) {
# return(
# new(
# "TreeCluster",
# DataFrame(hierarchy)
# )
# )
# }
#
#Filter key words
if(!is.null(col_regex) && !is.null(columns)){
message("Cannot use both")
}
if(!is.null(col_regex)){
hierarchy <- hierarchy[, grep( col_regex, colnames(hierarchy))]
}
# ordering
if(!is.null(columns)){
hierarchy <- hierarchy[columns]
}
for(cols in colnames(hierarchy)){
hierarchy[[cols]]<- as.factor(hierarchy[[cols]])
}
hierarchy_dt <- as.data.table(hierarchy)
hierarchy_dt$samples <- rownames(hierarchy_dt) <- rownames(hierarchy)
cols <- colnames(hierarchy_dt)[1:length(colnames(hierarchy_dt))-1]
order <- rep(1, length(hierarchy_dt)-1)
hierarchy_dt <- setorderv(hierarchy_dt, cols = cols, order = order)
hierarchy_df <- as.data.frame(hierarchy_dt)
rownames(hierarchy_df) <- hierarchy_df$samples
hierarchy <- hierarchy_df[,cols]
# hierarchy<- as.data.frame(hierarchy)
hierarchy <- rename_clusters(hierarchy)
uniqeness <- check_unique_parent(hierarchy)
#print(str(hierarchy))
# Create clustree object
hierarchy_graph <-
clustree(
hierarchy ,
prefix = "cluster",
prop_filter = 0,
return = "graph"
)
if(uniqeness==FALSE){
# prune the graph with only core edges (this makes it a ~tree)
graph_df <- as_long_data_frame(hierarchy_graph)
hierarchy <- prune_tree(graph_df, hierarchy)
hierarchy_graph = hierarchy$Clustree_obj
hierarchy <- DataFrame(hierarchy$Cluster_obj)
}
# collapses tree if the levels are the same at different resolutions
collapsed_graph <- collapse_tree(hierarchy_graph)
cluster_names <-
unique(sapply(strsplit(collapsed_graph$node, "C"), '[', 1))
#take only uncollapsed columns
hierarchy <- hierarchy[, cluster_names]
digits<- sub(pattern = "cluster", replacement = "", x=cluster_names)
cluster_names <- paste0(digits, ".",cluster_names, sep="")
#renaming nodes from numbers to cluster1C1 cluster1C2 so on..
for (clusnames in names(hierarchy)) {
hierarchy[[clusnames]] <-
paste(clusnames, hierarchy[[clusnames]], sep = 'C')
}
names(hierarchy)<- cluster_names
samples <- rownames(hierarchy)
if(is.null(rownames(hierarchy))){
samples<- 1:nrow(hierarchy)
}
hierarchy <- cbind(hierarchy, samples)
hierarchy <- checkRoot(hierarchy)
new(
"ClusterHierarchy",
hierarchy
)
} |
\name{crisp}
\alias{crisp}
\title{
Crisp the fuzzy membership degrees
}
\description{
Crisps the fuzzy and possibilistic membership degrees from the fuzzy or possibilistic clustering algorithms.
}
\usage{
crisp(u, method, tv)
}
\arguments{
\item{u}{a numeric matrix containing the fuzzy or possibilistic membership degrees of the data objects.}
\item{method}{a string for selection of the crisping method. The default is \option{max} that assigns the data object to the cluster in which the object has maximum membership. The alternative is \option{threshold} that assigns the objects to a cluster if its maximum membership degree is greater than \code{tv}, a threshold value.}
\item{tv}{a number for the threshold membership degree. The default is 0.5 with the \code{method} is \option{threshold} if it is not speficied by the user.}
}
\value{
\item{cluster}{a numeric vector containing the indexes (labels) of clusters for the maximum membership of the objects.}
}
\details{
The function \code{crisp} produces the crisp or hard membership degrees of the objects in order to place them into only one cluster.
}
\author{
Zeynel Cebeci
}
\examples{
data(iris)
x <- iris[,1:4]
# Run FCM
res.fcm <- fcm(x, centers=3)
# Crisp the fuzzy memberships degrees and plot the crisp memberships
cllabels <- crisp(res.fcm$u)
plot(x, col=cllabels)
}
\concept{fuzzy c-means clustering}
\concept{prototype-based clustering}
\concept{partitioning clustering}
\concept{cluster analysis}
\keyword{cluster}
| /man/crisp.Rd | no_license | cran/ppclust | R | false | false | 1,554 | rd | \name{crisp}
\alias{crisp}
\title{
Crisp the fuzzy membership degrees
}
\description{
Crisps the fuzzy and possibilistic membership degrees from the fuzzy or possibilistic clustering algorithms.
}
\usage{
crisp(u, method, tv)
}
\arguments{
\item{u}{a numeric matrix containing the fuzzy or possibilistic membership degrees of the data objects.}
\item{method}{a string for selection of the crisping method. The default is \option{max} that assigns the data object to the cluster in which the object has maximum membership. The alternative is \option{threshold} that assigns the objects to a cluster if its maximum membership degree is greater than \code{tv}, a threshold value.}
\item{tv}{a number for the threshold membership degree. The default is 0.5 with the \code{method} is \option{threshold} if it is not speficied by the user.}
}
\value{
\item{cluster}{a numeric vector containing the indexes (labels) of clusters for the maximum membership of the objects.}
}
\details{
The function \code{crisp} produces the crisp or hard membership degrees of the objects in order to place them into only one cluster.
}
\author{
Zeynel Cebeci
}
\examples{
data(iris)
x <- iris[,1:4]
# Run FCM
res.fcm <- fcm(x, centers=3)
# Crisp the fuzzy memberships degrees and plot the crisp memberships
cllabels <- crisp(res.fcm$u)
plot(x, col=cllabels)
}
\concept{fuzzy c-means clustering}
\concept{prototype-based clustering}
\concept{partitioning clustering}
\concept{cluster analysis}
\keyword{cluster}
|
\name{LPKsample-package}
\alias{LPKsample-package}
\alias{LPKsample}
\docType{package}
\title{
\packageTitle{LPKsample}
}
\description{
This package performs high dimensional K-sample comparison using graph-based LP nonparametric (GLP) method.
}
\author{ Mukhopadhyay, S. and Wang, K.
Maintainer: \packageMaintainer{LPKsample}
}
\references{
Mukhopadhyay, S. and Wang, K. (2020), "A Nonparametric Approach to High-dimensional K-sample Comparison Problem", arXiv:1810.01724.
Mukhopadhyay, S. (2017+), "Unified Statistical Theory of Spectral Graph Analysis".
Mukhopadhyay, S. and Parzen, E. (2014), "LP Approach to Statistical Modeling", arXiv:1405.2601.
}
\keyword{ package } | /man/LPKsample-package.Rd | no_license | cran/LPKsample | R | false | false | 707 | rd | \name{LPKsample-package}
\alias{LPKsample-package}
\alias{LPKsample}
\docType{package}
\title{
\packageTitle{LPKsample}
}
\description{
This package performs high dimensional K-sample comparison using graph-based LP nonparametric (GLP) method.
}
\author{ Mukhopadhyay, S. and Wang, K.
Maintainer: \packageMaintainer{LPKsample}
}
\references{
Mukhopadhyay, S. and Wang, K. (2020), "A Nonparametric Approach to High-dimensional K-sample Comparison Problem", arXiv:1810.01724.
Mukhopadhyay, S. (2017+), "Unified Statistical Theory of Spectral Graph Analysis".
Mukhopadhyay, S. and Parzen, E. (2014), "LP Approach to Statistical Modeling", arXiv:1405.2601.
}
\keyword{ package } |
# Setup
library(dplyr)
avg.change.raw <- read.csv("../data/global-climate-models/GCM-avg-change.csv", stringsAsFactors = F)
time.evolv.raw <- read.csv("../data/global-climate-models/GCM-TEP.csv", stringsAsFactors = F)
# Finding appropriate model
te.models <- unique(time.evolv.proj$Model)
ac.models <- unique(avg.change.proj$`Model Name`)
models.in.both <- ac.models %in% te.models
model.names <- ac.models[models.in.both]
model.chosen <- "GFDL-ESM2G"
# Average Change Projections Clean Up
avg.change.proj <- avg.change.raw[3:length(avg.change.raw$X),]
colnames(avg.change.proj) <- avg.change.raw[2,]
avg.change.proj$Season <- recode(avg.change.proj$Season,
"ANN" = "Annual",
"DJF" = "Winter",
"MAM" = "Spring",
"JJA" = "Summer",
"SON" = "Autumn")
avg.change.proj$Variable <- recode(avg.change.proj$Variable,
"tas" = "Near-Surface Air Temperature",
"pr" = "Precipitation")
avg.change.proj$Scenario <- recode(avg.change.proj$Scenario,
"rcp26" = "RCP2.6",
"rcp45" = "RCP4.5",
"rcp60" = "RCP6",
"rcp85" = "RCP8.5")
avg.change.proj <- avg.change.proj %>%
filter(`Projected Change` != "NaN") %>%
filter(`Model Name` == model.chosen) %>%
select(-`Model Name`)
# Split average change into two datasets
avg.change.temp <- avg.change.proj %>%
filter(Variable == "Near-Surface Air Temperature") %>%
select(-Variable)
avg.change.prec <- avg.change.proj %>%
filter(Variable == "Precipitation") %>%
select(-Variable)
# Time Evolving Projections Clean Up
time.evolv.proj <- time.evolv.raw
colnames(time.evolv.proj) <- c("Variable", "Epoch", "Scenario", "Season", "Model", "Year", "Value")
time.evolv.proj$Scenario <- recode(time.evolv.proj$Scenario,
"rcp26" = "RCP2.6",
"rcp45" = "RCP4.5",
"rcp60" = "RCP6",
"rcp85" = "RCP8.5")
time.evolv.proj$Variable <- recode(time.evolv.proj$Variable,
"tas" = "Near-Surface Air Temperature",
"pr" = "Precipitation")
time.evolv.proj$Season <- recode(time.evolv.proj$Season,
"ANN" = "Annual",
"DJF" = "Winter",
"MAM" = "Spring",
"JJA" = "Summer",
"SON" = "Autumn")
time.evolv.proj$Epoch <- recode(time.evolv.proj$Epoch,
"historical" = "Historical",
"future" = "Future")
time.evolv.proj <- time.evolv.proj %>%
filter(Model == model.chosen) %>%
select(-Model)
# Split time evolving projections into two datasets
time.evolv.temp <- time.evolv.proj %>%
filter(Variable == "Near-Surface Air Temperature") %>%
select(-Variable) %>%
group_by(Year = trunc((Year/5))*5, Epoch, Season, Scenario) %>%
summarise(Value = mean(Value))
time.evolv.prec <- time.evolv.proj %>%
filter(Variable == "Precipitation") %>%
select(-Variable) %>%
group_by(Year = trunc((Year/5))*5, Epoch, Season, Scenario) %>%
summarise(Value = mean(Value))
# Create new data Files to use
write.csv(avg.change.prec, "../data/global-climate-models/GCM-avg-precipitation.csv", row.names = F)
write.csv(avg.change.temp, "../data/global-climate-models/GCM-avg-temp.csv", row.names = F)
write.csv(time.evolv.prec, "../data/global-climate-models/GCM-TEP-precipitation.csv", row.names = F)
write.csv(time.evolv.temp, "../data/global-climate-models/GCM-TEP-temp.csv", row.names = F)
| /scripts/GCMprojections-preprocessor.R | permissive | andreybutenko/pnw-climate-change | R | false | false | 3,935 | r | # Setup
library(dplyr)
avg.change.raw <- read.csv("../data/global-climate-models/GCM-avg-change.csv", stringsAsFactors = F)
time.evolv.raw <- read.csv("../data/global-climate-models/GCM-TEP.csv", stringsAsFactors = F)
# Finding appropriate model
te.models <- unique(time.evolv.proj$Model)
ac.models <- unique(avg.change.proj$`Model Name`)
models.in.both <- ac.models %in% te.models
model.names <- ac.models[models.in.both]
model.chosen <- "GFDL-ESM2G"
# Average Change Projections Clean Up
avg.change.proj <- avg.change.raw[3:length(avg.change.raw$X),]
colnames(avg.change.proj) <- avg.change.raw[2,]
avg.change.proj$Season <- recode(avg.change.proj$Season,
"ANN" = "Annual",
"DJF" = "Winter",
"MAM" = "Spring",
"JJA" = "Summer",
"SON" = "Autumn")
avg.change.proj$Variable <- recode(avg.change.proj$Variable,
"tas" = "Near-Surface Air Temperature",
"pr" = "Precipitation")
avg.change.proj$Scenario <- recode(avg.change.proj$Scenario,
"rcp26" = "RCP2.6",
"rcp45" = "RCP4.5",
"rcp60" = "RCP6",
"rcp85" = "RCP8.5")
avg.change.proj <- avg.change.proj %>%
filter(`Projected Change` != "NaN") %>%
filter(`Model Name` == model.chosen) %>%
select(-`Model Name`)
# Split average change into two datasets
avg.change.temp <- avg.change.proj %>%
filter(Variable == "Near-Surface Air Temperature") %>%
select(-Variable)
avg.change.prec <- avg.change.proj %>%
filter(Variable == "Precipitation") %>%
select(-Variable)
# Time Evolving Projections Clean Up
time.evolv.proj <- time.evolv.raw
colnames(time.evolv.proj) <- c("Variable", "Epoch", "Scenario", "Season", "Model", "Year", "Value")
time.evolv.proj$Scenario <- recode(time.evolv.proj$Scenario,
"rcp26" = "RCP2.6",
"rcp45" = "RCP4.5",
"rcp60" = "RCP6",
"rcp85" = "RCP8.5")
time.evolv.proj$Variable <- recode(time.evolv.proj$Variable,
"tas" = "Near-Surface Air Temperature",
"pr" = "Precipitation")
time.evolv.proj$Season <- recode(time.evolv.proj$Season,
"ANN" = "Annual",
"DJF" = "Winter",
"MAM" = "Spring",
"JJA" = "Summer",
"SON" = "Autumn")
time.evolv.proj$Epoch <- recode(time.evolv.proj$Epoch,
"historical" = "Historical",
"future" = "Future")
time.evolv.proj <- time.evolv.proj %>%
filter(Model == model.chosen) %>%
select(-Model)
# Split time evolving projections into two datasets
time.evolv.temp <- time.evolv.proj %>%
filter(Variable == "Near-Surface Air Temperature") %>%
select(-Variable) %>%
group_by(Year = trunc((Year/5))*5, Epoch, Season, Scenario) %>%
summarise(Value = mean(Value))
time.evolv.prec <- time.evolv.proj %>%
filter(Variable == "Precipitation") %>%
select(-Variable) %>%
group_by(Year = trunc((Year/5))*5, Epoch, Season, Scenario) %>%
summarise(Value = mean(Value))
# Create new data Files to use
write.csv(avg.change.prec, "../data/global-climate-models/GCM-avg-precipitation.csv", row.names = F)
write.csv(avg.change.temp, "../data/global-climate-models/GCM-avg-temp.csv", row.names = F)
write.csv(time.evolv.prec, "../data/global-climate-models/GCM-TEP-precipitation.csv", row.names = F)
write.csv(time.evolv.temp, "../data/global-climate-models/GCM-TEP-temp.csv", row.names = F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biglasso.R
\name{COPY_biglasso_main}
\alias{COPY_biglasso_main}
\title{Sparse regression path}
\usage{
COPY_biglasso_main(X, y.train, ind.train, ind.col, covar.train,
family = c("gaussian", "binomial"), alphas = 1, K = 10,
ind.sets = sample(rep_len(1:K, n)), nlambda = 200, lambda.min = if
(n > p) 1e-04 else 0.001, nlam.min = 50, n.abort = 10,
base.train = NULL, eps = 1e-05, max.iter = 1000, dfmax = 50000,
warn = FALSE, return.all = FALSE, ncores = 1)
}
\arguments{
\item{family}{Either "gaussian" (linear) or "binomial" (logistic).}
\item{alphas}{The elastic-net mixing parameter that controls the relative
contribution from the lasso (l1) and the ridge (l2) penalty. The penalty is
defined as \deqn{ \alpha||\beta||_1 + (1-\alpha)/2||\beta||_2^2.}
\code{alpha = 1} is the lasso penalty and \code{alpha} in between \code{0}
(\code{1e-4}) and \code{1} is the elastic-net penalty. Default is \code{1}. \strong{You can
pass multiple values, and only one will be used (optimized by grid-search).}}
\item{K}{Number of sets used in the Cross-Model Selection and Averaging
(CMSA) procedure. Default is \code{10}.}
\item{ind.sets}{Integer vectors of values between \code{1} and \code{K} specifying
which set each index of the training set is in. Default randomly assigns
these values.}
\item{nlambda}{The number of lambda values. Default is \code{200}.}
\item{lambda.min}{The smallest value for lambda, \strong{as a fraction of
lambda.max}. Default is \code{.0001} if the number of observations is larger than
the number of variables and \code{.001} otherwise.}
\item{nlam.min}{Minimum number of lambda values to investigate. Default is \code{50}.}
\item{n.abort}{Number of lambda values for which prediction on the validation
set must decrease before stopping. Default is \code{10}.}
\item{eps}{Convergence threshold for inner coordinate descent.
The algorithm iterates until the maximum change in the objective after any
coefficient update is less than \code{eps} times the null deviance.
Default value is \code{1e-5}.}
\item{max.iter}{Maximum number of iterations. Default is \code{1000}.}
\item{dfmax}{Upper bound for the number of nonzero coefficients. Default is
\code{50e3} because, for large data sets, computational burden may be
heavy for models with a large number of nonzero coefficients.}
\item{warn}{Return warning messages for failures to converge and model
saturation? Default is \code{FALSE}.}
\item{return.all}{Whether to return coefficients for all alpha and lambda
values. Default is \code{FALSE} and returns only coefficients which maximize
prediction on the validation sets.}
}
\description{
Fit solution paths for linear or logistic regression models penalized by
lasso (alpha = 1) or elastic-net (1e-4 < alpha < 1) over a grid of values
for the regularization parameter lambda.
}
\details{
The objective function for linear regression (\code{family = "gaussian"}) is
\deqn{\frac{1}{2n}\textrm{RSS} + \textrm{penalty},} for logistic regression
(\code{family = "binomial"}) it is \deqn{-\frac{1}{n} loglike +
\textrm{penalty}.}
}
\keyword{internal}
| /man/COPY_biglasso_main.Rd | no_license | gridl/bigstatsr | R | false | true | 3,167 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biglasso.R
\name{COPY_biglasso_main}
\alias{COPY_biglasso_main}
\title{Sparse regression path}
\usage{
COPY_biglasso_main(X, y.train, ind.train, ind.col, covar.train,
family = c("gaussian", "binomial"), alphas = 1, K = 10,
ind.sets = sample(rep_len(1:K, n)), nlambda = 200, lambda.min = if
(n > p) 1e-04 else 0.001, nlam.min = 50, n.abort = 10,
base.train = NULL, eps = 1e-05, max.iter = 1000, dfmax = 50000,
warn = FALSE, return.all = FALSE, ncores = 1)
}
\arguments{
\item{family}{Either "gaussian" (linear) or "binomial" (logistic).}
\item{alphas}{The elastic-net mixing parameter that controls the relative
contribution from the lasso (l1) and the ridge (l2) penalty. The penalty is
defined as \deqn{ \alpha||\beta||_1 + (1-\alpha)/2||\beta||_2^2.}
\code{alpha = 1} is the lasso penalty and \code{alpha} in between \code{0}
(\code{1e-4}) and \code{1} is the elastic-net penalty. Default is \code{1}. \strong{You can
pass multiple values, and only one will be used (optimized by grid-search).}}
\item{K}{Number of sets used in the Cross-Model Selection and Averaging
(CMSA) procedure. Default is \code{10}.}
\item{ind.sets}{Integer vectors of values between \code{1} and \code{K} specifying
which set each index of the training set is in. Default randomly assigns
these values.}
\item{nlambda}{The number of lambda values. Default is \code{200}.}
\item{lambda.min}{The smallest value for lambda, \strong{as a fraction of
lambda.max}. Default is \code{.0001} if the number of observations is larger than
the number of variables and \code{.001} otherwise.}
\item{nlam.min}{Minimum number of lambda values to investigate. Default is \code{50}.}
\item{n.abort}{Number of lambda values for which prediction on the validation
set must decrease before stopping. Default is \code{10}.}
\item{eps}{Convergence threshold for inner coordinate descent.
The algorithm iterates until the maximum change in the objective after any
coefficient update is less than \code{eps} times the null deviance.
Default value is \code{1e-5}.}
\item{max.iter}{Maximum number of iterations. Default is \code{1000}.}
\item{dfmax}{Upper bound for the number of nonzero coefficients. Default is
\code{50e3} because, for large data sets, computational burden may be
heavy for models with a large number of nonzero coefficients.}
\item{warn}{Return warning messages for failures to converge and model
saturation? Default is \code{FALSE}.}
\item{return.all}{Whether to return coefficients for all alpha and lambda
values. Default is \code{FALSE} and returns only coefficients which maximize
prediction on the validation sets.}
}
\description{
Fit solution paths for linear or logistic regression models penalized by
lasso (alpha = 1) or elastic-net (1e-4 < alpha < 1) over a grid of values
for the regularization parameter lambda.
}
\details{
The objective function for linear regression (\code{family = "gaussian"}) is
\deqn{\frac{1}{2n}\textrm{RSS} + \textrm{penalty},} for logistic regression
(\code{family = "binomial"}) it is \deqn{-\frac{1}{n} loglike +
\textrm{penalty}.}
}
\keyword{internal}
|
\name{mlprof.fn}
\alias{mlprof.fn}
\alias{remlprof.fn}
\alias{mlprof.gr}
\alias{remlprof.gr}
\alias{iter.igls}
\title{ Likelihood Functions for mvmeta Models }
\description{
These functions compute the value of the log-likelihood and the related vectors of first partial derivatives for random-effects multivariate and univariate meta-analysis and meta-regression, in terms of model parameters. They are meant to be used internally and not directly run by the users.
}
\usage{
mlprof.fn(par, Xlist, ylist, Slist, nalist, k, m, p, nall, bscov, ctrl)
mlprof.gr(par, Xlist, ylist, Slist, nalist, k, m, p, nall, bscov, ctrl)
remlprof.fn(par, Xlist, ylist, Slist, nalist, k, m, p, nall, bscov, ctrl)
remlprof.gr(par, Xlist, ylist, Slist, nalist, k, m, p, nall, bscov, ctrl)
iter.igls(Psi, Xlist, ylist, Slist, nalist, k, m)
}
\arguments{
Assuming a meta-analysis or meta-regression based on \eqn{m} studies, \eqn{k} outcomes and \eqn{p} predictors:
\item{par }{ a vector representing the random-effects parameters defining the between-study (co)variance matrix.}
\item{Psi }{ a \eqn{k \times k}{k x k} matrix representing the current estimate of the between-study (co)variance matrix.}
\item{Xlist }{ a \eqn{m}-dimensional list of study-specific design matrices for the fixed-effects part of the model. Rows corresponding to missing outcomes have been excluded.}
\item{ylist }{ a \eqn{m}-dimensional list of study-specific of vectors of estimated outcomes. Entries corresponding to missing outcomes have been excluded.}
\item{Slist }{ a \eqn{m}-dimensional list of within-study (co)variance matrices of estimated outcomes. Rows and columns corresponding to missing outcomes have been excluded.}
\item{nalist }{ a \eqn{m}-dimensional list of \eqn{k}-dimensional study-specific logical vectors, identifying missing outcomes.}
\item{k, m, p, nall }{ numeric scalars: number of outcomes, number of studies included in estimation (equal to the length of lists above), number of predictors (including the intercept), number of observations (excluding missing).}
\item{bscov }{ a string defining the between-study (co)variance structure in likelihood based models. See Details.}
\item{ctrl }{ list of parameters for controlling the fitting process, usually internally set to default values by \code{\link{mvmeta.control}}. The name is chosen to avoid conflicts with the argument \code{control} in \code{\link{optim}}.}
}
\details{
These functions are called internally by the fitting functions \code{\link{mvmeta.ml}} and \code{\link{mvmeta.reml}} to perform iterative optimization algorithms for estimating random effects meta-analytical models.
The maximization of the (restricted) likelihood starts with few runs of an iterative generalized least square algorithm implemented in \code{iter.igls}. This can be regarded as a fast and stable way to get starting values close to the maximum for the Quasi-Newton iterative algorithm, implemented in \code{\link{optim}}. Alternatively, starting values can be provided by the user in the control list (see \code{\link{mvmeta.control}}). The function \code{\link{optim}} requires the algorithms to compute the value of the (restricted) likelihood and (optionally) the vector of its first partial derivatives, provided by the related likelihood functions.
These functions actually specify the \emph{profiled} version of the (restricted) likelihood, expressed only in terms of random-effects parameters, while the estimate of the fixed-effects coefficients is provided at each iteration by the internal function \code{glsfit}, based on the current value of the between-study (co)variance matrix. At convergence, the value of this profiled version is identical to the full (restricted) likelihood. This approach is computationally efficient, as it reduces the number of parameters in the optimization routine, especially for meta-regression models.
The random-effects parameters in \code{par} depends on the chosen \code{\link[=mvmetaCovStruct]{structure}} for the between-study (co)variance matrix. The parameterization ensures the positive-definiteness of the estimated matrix. A Cholesky decomposition is then performed on the marginal (co)variance matrix in order to re-express the problem as standard least square equations, an approach which speeds up the computation of matrix inverses and determinants. These equations are finally solved through a QR decomposition, which guarantees stability. More details are provided in the references below.
Some parameters of the fitting procedures are determined through \code{\link{mvmeta.control}}. Specifically, the user can obtain the Hessian matrix of the estimated parameters (appropriately transformed, see \code{\link{mvmetaCovStruct}}) in the optimization function by setting \code{hessian=TRUE}, and specific control settings in the optimization process can be defined by the control list argument \code{optim}. These values are passed to the optimization function \code{\link{optim}}.
}
\value{
\code{mlprof.fn} and \code{remlprof.fn} return the value of the (restricted) log-likelihood for a given set of parameters in \code{par}. \code{mlprof.gr} and \code{remlprof.gr} return instead the related vector of first partial derivatives. \code{iter.igls} returns an updated estimate of \code{Psi} given its initial value or the value at the previous iteration.
}
\references{
Sera F, Armstrong B, Blangiardo M, Gasparrini A (2019). An extended mixed-effects framework for meta-analysis.\emph{Statistics in Medicine}. 2019;38(29):5429-5444. [Freely available \href{http://www.ag-myresearch.com/2019_sera_statmed.html}{\bold{here}}].
Gasparrini A, Armstrong B, Kenward MG (2012). Multivariate meta-analysis for non-linear and other multi-parameter associations. \emph{Statistics in Medicine}. \bold{31}(29):3821--3839. [Freely available \href{http://www.ag-myresearch.com/2012_gasparrini_statmed.html}{\bold{here}}].
Goldstein H (1986). Multilevel mixed linear model analysis using iterative generalized least squares. \emph{Biometrika}. \bold{73}(1):43.
Lindstrom MJ and Bates DM (1988). Newton-Raphson and EM algorithms for linear mixed-effects models for repeated-measures data. \emph{Journal of the American Statistical Association}. \bold{83}(404):1014--1022.
Pinheiro JC and Bates DM (2000). \emph{Mixed-Effects Models in S and S-PLUS}. New York, Springer Verlag.
}
\author{Antonio Gasparrini, \email{antonio.gasparrini@lshtm.ac.uk}}
\note{
As stated earlier, these functions are called internally by \code{\link{mvmeta.ml}} and \code{\link{mvmeta.reml}}, and are not meant to be used directly. In particular, their code does not contain any check on the arguments provided, which are expected in specific formats. They are however exported in the namespace and documented for completeness.
}
\seealso{
See \code{\link{mvmeta.fit}} and \code{\link{mvmeta.ml}} for additional info on the fitting procedures. See \code{\link{mvmeta.control}} to determine specific parameters of the fitting procedures. See \code{\link{mvmetaCovStruct}} for (co)variance structures. See \code{\link{chol}} and \code{\link{qr}} for info on the Cholesky and QR decomposition.
See \code{\link{mvmeta-package}} for an overview of the package and modelling framework.
}
\keyword{models}
\keyword{regression}
\keyword{multivariate}
| /man/mlprof.fn.Rd | no_license | cran/mvmeta | R | false | false | 7,419 | rd | \name{mlprof.fn}
\alias{mlprof.fn}
\alias{remlprof.fn}
\alias{mlprof.gr}
\alias{remlprof.gr}
\alias{iter.igls}
\title{ Likelihood Functions for mvmeta Models }
\description{
These functions compute the value of the log-likelihood and the related vectors of first partial derivatives for random-effects multivariate and univariate meta-analysis and meta-regression, in terms of model parameters. They are meant to be used internally and not directly run by the users.
}
\usage{
mlprof.fn(par, Xlist, ylist, Slist, nalist, k, m, p, nall, bscov, ctrl)
mlprof.gr(par, Xlist, ylist, Slist, nalist, k, m, p, nall, bscov, ctrl)
remlprof.fn(par, Xlist, ylist, Slist, nalist, k, m, p, nall, bscov, ctrl)
remlprof.gr(par, Xlist, ylist, Slist, nalist, k, m, p, nall, bscov, ctrl)
iter.igls(Psi, Xlist, ylist, Slist, nalist, k, m)
}
\arguments{
Assuming a meta-analysis or meta-regression based on \eqn{m} studies, \eqn{k} outcomes and \eqn{p} predictors:
\item{par }{ a vector representing the random-effects parameters defining the between-study (co)variance matrix.}
\item{Psi }{ a \eqn{k \times k}{k x k} matrix representing the current estimate of the between-study (co)variance matrix.}
\item{Xlist }{ a \eqn{m}-dimensional list of study-specific design matrices for the fixed-effects part of the model. Rows corresponding to missing outcomes have been excluded.}
\item{ylist }{ a \eqn{m}-dimensional list of study-specific of vectors of estimated outcomes. Entries corresponding to missing outcomes have been excluded.}
\item{Slist }{ a \eqn{m}-dimensional list of within-study (co)variance matrices of estimated outcomes. Rows and columns corresponding to missing outcomes have been excluded.}
\item{nalist }{ a \eqn{m}-dimensional list of \eqn{k}-dimensional study-specific logical vectors, identifying missing outcomes.}
\item{k, m, p, nall }{ numeric scalars: number of outcomes, number of studies included in estimation (equal to the length of lists above), number of predictors (including the intercept), number of observations (excluding missing).}
\item{bscov }{ a string defining the between-study (co)variance structure in likelihood based models. See Details.}
\item{ctrl }{ list of parameters for controlling the fitting process, usually internally set to default values by \code{\link{mvmeta.control}}. The name is chosen to avoid conflicts with the argument \code{control} in \code{\link{optim}}.}
}
\details{
These functions are called internally by the fitting functions \code{\link{mvmeta.ml}} and \code{\link{mvmeta.reml}} to perform iterative optimization algorithms for estimating random effects meta-analytical models.
The maximization of the (restricted) likelihood starts with few runs of an iterative generalized least square algorithm implemented in \code{iter.igls}. This can be regarded as a fast and stable way to get starting values close to the maximum for the Quasi-Newton iterative algorithm, implemented in \code{\link{optim}}. Alternatively, starting values can be provided by the user in the control list (see \code{\link{mvmeta.control}}). The function \code{\link{optim}} requires the algorithms to compute the value of the (restricted) likelihood and (optionally) the vector of its first partial derivatives, provided by the related likelihood functions.
These functions actually specify the \emph{profiled} version of the (restricted) likelihood, expressed only in terms of random-effects parameters, while the estimate of the fixed-effects coefficients is provided at each iteration by the internal function \code{glsfit}, based on the current value of the between-study (co)variance matrix. At convergence, the value of this profiled version is identical to the full (restricted) likelihood. This approach is computationally efficient, as it reduces the number of parameters in the optimization routine, especially for meta-regression models.
The random-effects parameters in \code{par} depends on the chosen \code{\link[=mvmetaCovStruct]{structure}} for the between-study (co)variance matrix. The parameterization ensures the positive-definiteness of the estimated matrix. A Cholesky decomposition is then performed on the marginal (co)variance matrix in order to re-express the problem as standard least square equations, an approach which speeds up the computation of matrix inverses and determinants. These equations are finally solved through a QR decomposition, which guarantees stability. More details are provided in the references below.
Some parameters of the fitting procedures are determined through \code{\link{mvmeta.control}}. Specifically, the user can obtain the Hessian matrix of the estimated parameters (appropriately transformed, see \code{\link{mvmetaCovStruct}}) in the optimization function by setting \code{hessian=TRUE}, and specific control settings in the optimization process can be defined by the control list argument \code{optim}. These values are passed to the optimization function \code{\link{optim}}.
}
\value{
\code{mlprof.fn} and \code{remlprof.fn} return the value of the (restricted) log-likelihood for a given set of parameters in \code{par}. \code{mlprof.gr} and \code{remlprof.gr} return instead the related vector of first partial derivatives. \code{iter.igls} returns an updated estimate of \code{Psi} given its initial value or the value at the previous iteration.
}
\references{
Sera F, Armstrong B, Blangiardo M, Gasparrini A (2019). An extended mixed-effects framework for meta-analysis.\emph{Statistics in Medicine}. 2019;38(29):5429-5444. [Freely available \href{http://www.ag-myresearch.com/2019_sera_statmed.html}{\bold{here}}].
Gasparrini A, Armstrong B, Kenward MG (2012). Multivariate meta-analysis for non-linear and other multi-parameter associations. \emph{Statistics in Medicine}. \bold{31}(29):3821--3839. [Freely available \href{http://www.ag-myresearch.com/2012_gasparrini_statmed.html}{\bold{here}}].
Goldstein H (1986). Multilevel mixed linear model analysis using iterative generalized least squares. \emph{Biometrika}. \bold{73}(1):43.
Lindstrom MJ and Bates DM (1988). Newton-Raphson and EM algorithms for linear mixed-effects models for repeated-measures data. \emph{Journal of the American Statistical Association}. \bold{83}(404):1014--1022.
Pinheiro JC and Bates DM (2000). \emph{Mixed-Effects Models in S and S-PLUS}. New York, Springer Verlag.
}
\author{Antonio Gasparrini, \email{antonio.gasparrini@lshtm.ac.uk}}
\note{
As stated earlier, these functions are called internally by \code{\link{mvmeta.ml}} and \code{\link{mvmeta.reml}}, and are not meant to be used directly. In particular, their code does not contain any check on the arguments provided, which are expected in specific formats. They are however exported in the namespace and documented for completeness.
}
\seealso{
See \code{\link{mvmeta.fit}} and \code{\link{mvmeta.ml}} for additional info on the fitting procedures. See \code{\link{mvmeta.control}} to determine specific parameters of the fitting procedures. See \code{\link{mvmetaCovStruct}} for (co)variance structures. See \code{\link{chol}} and \code{\link{qr}} for info on the Cholesky and QR decomposition.
See \code{\link{mvmeta-package}} for an overview of the package and modelling framework.
}
\keyword{models}
\keyword{regression}
\keyword{multivariate}
|
source("Plot_functions_for_simulations.r")
if (!exists("list_bt")) list_bt = readRDS("list_tb.RDS")
if (!exists("pers_df")) list_bt = readRDS("pers_df.RDS")
if (!exists("sim_res")) list_bt = readRDS("sim_res.RDS")
all_p1 = plot_all.f(targ = 5466)
sim_p1 = plot_sim.f(targ = 5466)
all_p2 = plot_all.f(targ = 7952)
sim_p2 = plot_sim.f(targ = 7952)
plot_b_t_all = plot_grid(
sim_p1,
sim_p2,
all_p1,
all_p2,
labels = c("(a)", "(b)", "(c)", "(d)"),
ncol = 2, align = "v",hjust = -4)
plot_b_t_all
# 16 9 | /Plot_b_t_all.r | no_license | simonevincenzi/Contemporary_Extinction | R | false | false | 653 | r | source("Plot_functions_for_simulations.r")
if (!exists("list_bt")) list_bt = readRDS("list_tb.RDS")
if (!exists("pers_df")) list_bt = readRDS("pers_df.RDS")
if (!exists("sim_res")) list_bt = readRDS("sim_res.RDS")
all_p1 = plot_all.f(targ = 5466)
sim_p1 = plot_sim.f(targ = 5466)
all_p2 = plot_all.f(targ = 7952)
sim_p2 = plot_sim.f(targ = 7952)
plot_b_t_all = plot_grid(
sim_p1,
sim_p2,
all_p1,
all_p2,
labels = c("(a)", "(b)", "(c)", "(d)"),
ncol = 2, align = "v",hjust = -4)
plot_b_t_all
# 16 9 |
iris_data<-iris[,1:2]
km.res<-kmeans(iris_data,3)
#if you have not installed library factoextra then use packages.install("factoextra")
library("factoextra")
fviz_cluster(km.res, data = iris_data,palette = "jco",ggtheme = theme_minimal()) | /Lesson01/exercise2.r | permissive | SmithaShivakumar/Unsupervised-Learning-with-R | R | false | false | 238 | r | iris_data<-iris[,1:2]
km.res<-kmeans(iris_data,3)
#if you have not installed library factoextra then use packages.install("factoextra")
library("factoextra")
fviz_cluster(km.res, data = iris_data,palette = "jco",ggtheme = theme_minimal()) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class.R
\name{Class}
\alias{Class}
\title{Extract class smartly}
\usage{
Class(x)
}
\arguments{
\item{x}{an R object (data.frame or list)}
}
\value{
A vector of classes
}
\description{
Extract class smartly from more complex data object rather than atomic
vector (eg data.frame, list) in a handy way
}
\examples{
data(airquality)
Class(airquality)
}
| /man/Class.Rd | no_license | strategist922/yapomif | R | false | true | 432 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class.R
\name{Class}
\alias{Class}
\title{Extract class smartly}
\usage{
Class(x)
}
\arguments{
\item{x}{an R object (data.frame or list)}
}
\value{
A vector of classes
}
\description{
Extract class smartly from more complex data object rather than atomic
vector (eg data.frame, list) in a handy way
}
\examples{
data(airquality)
Class(airquality)
}
|
######################################################################################
############################ GIS CLEANING MIC 2011 ##############################
######################################################################################
#Notes: MICs 2011 has only been processed at this point owing to inability to automate
#loading of MODIS datasets. 2014 tiles are being loaded and processed currently and shall be
#uploaded when available. For more information regarding automated scripts for MODIS downloads
#please see MODISDownload.zip file in the github
#Clean environment
rm(list=ls(all=TRUE))
#install appropriate packages
list.of.packages <- c("dplyr", "rgdal", "raster", "RCurl", "tools", "gtools", "sp")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
#Import libraries
library(dplyr)
library(rgdal)
library(raster)
library(RCurl)
library(tools)
library(gtools)
library(sp)
#Bring in original raster data
#NOTES: the data is messy in the sense they are not all cropped, they aren't in the same extent or projection
#before anything all the above needs to be handled
#a) Crop rasters
#b) resample to same extent (raster for altitude has the correct extent)
#c) reproject to appropriate projection (raster for Population Density has the correct one)
#Call the rasters in a list
rasters <-list.files("Data/Rasters/Original/", ".tif", full.names = T, recursive = T)
#Keep raster names
raster_names <- file_path_sans_ext(c( unlist( lapply(strsplit(rasters,"[/]"), FUN=function(x) { x[4] }))))
#Import Ghana Shapefile
Ghana <- shapefile("Data/Rasters/GHA_adm/GHA_adm0.shp")
#Use for loop to crop, resample extent and project raster and export to cleaned folder
#NOTE: this loops takes a while, an already run version is stored in "cleaned" folders
for(i in 1:length(rasters)){
r=raster(rasters[i])
r=crop(r, extent(Ghana))
r=mask(r, Ghana)
r=resample(r, raster("Data/Rasters/to_fix/Extent.tif"), resample='bilinear') #altitude, will need to change this eventually
r=projectRaster(r, raster("Data/Rasters/to_fix/Projection.tif")) #pop den, will need to change this eventually
show(r)
destfile=paste0("Data/Rasters/cleaned/", raster_names[i],"_crop.tiff","")
writeRaster(r,destfile , format="GTiff", overwrite=T)
}
#Create list of new cleaned rasters
list.rasters <-list.files("Data/Rasters/cleaned/", ".tif", full.names = T, recursive = T)
#Stack together the layers
all=stack(list.rasters)
#plot to see
plot(all)
#Bring in the .csv and convert to shapefile of points
MICS_gps <- read.csv("Data/GIS/MICS_mal.csv")
MICS_gps$ID=as.numeric(row.names(MICS_gps))
names(MICS_gps)
projection <- crs(all)
MICS_2011_loc <- SpatialPointsDataFrame(coords = MICS_gps[,4:5], data = MICS_gps, proj4string = projection)
crs(MICS_2011_loc)
plot(MICS_2011_loc, main="MICS locations")
#Extract Raster values for the points
MICS_2011 <- extract(all, MICS_2011_loc, df=TRUE,method='simple')
#these value for GS model need to be scaled
MICS_2011=cbind(MICS_2011[1],scale(MICS_2011[,2:12]))
#Merge back to .csv file and export
MICS_gps %>%
left_join(MICS_2011, by="ID") %>%
write.csv("Data/Final_for analysis/MICS_2011.csv", row.names=F) | /GIS data cleaning.R | no_license | PunamA/DC_Ghana_mapping | R | false | false | 3,367 | r | ######################################################################################
############################ GIS CLEANING MIC 2011 ##############################
######################################################################################
#Notes: MICs 2011 has only been processed at this point owing to inability to automate
#loading of MODIS datasets. 2014 tiles are being loaded and processed currently and shall be
#uploaded when available. For more information regarding automated scripts for MODIS downloads
#please see MODISDownload.zip file in the github
#Clean environment
rm(list=ls(all=TRUE))
#install appropriate packages
list.of.packages <- c("dplyr", "rgdal", "raster", "RCurl", "tools", "gtools", "sp")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
#Import libraries
library(dplyr)
library(rgdal)
library(raster)
library(RCurl)
library(tools)
library(gtools)
library(sp)
#Bring in original raster data
#NOTES: the data is messy in the sense they are not all cropped, they aren't in the same extent or projection
#before anything all the above needs to be handled
#a) Crop rasters
#b) resample to same extent (raster for altitude has the correct extent)
#c) reproject to appropriate projection (raster for Population Density has the correct one)
#Call the rasters in a list
rasters <-list.files("Data/Rasters/Original/", ".tif", full.names = T, recursive = T)
#Keep raster names
raster_names <- file_path_sans_ext(c( unlist( lapply(strsplit(rasters,"[/]"), FUN=function(x) { x[4] }))))
#Import Ghana Shapefile
Ghana <- shapefile("Data/Rasters/GHA_adm/GHA_adm0.shp")
#Use for loop to crop, resample extent and project raster and export to cleaned folder
#NOTE: this loops takes a while, an already run version is stored in "cleaned" folders
for(i in 1:length(rasters)){
r=raster(rasters[i])
r=crop(r, extent(Ghana))
r=mask(r, Ghana)
r=resample(r, raster("Data/Rasters/to_fix/Extent.tif"), resample='bilinear') #altitude, will need to change this eventually
r=projectRaster(r, raster("Data/Rasters/to_fix/Projection.tif")) #pop den, will need to change this eventually
show(r)
destfile=paste0("Data/Rasters/cleaned/", raster_names[i],"_crop.tiff","")
writeRaster(r,destfile , format="GTiff", overwrite=T)
}
#Create list of new cleaned rasters
list.rasters <-list.files("Data/Rasters/cleaned/", ".tif", full.names = T, recursive = T)
#Stack together the layers
all=stack(list.rasters)
#plot to see
plot(all)
#Bring in the .csv and convert to shapefile of points
MICS_gps <- read.csv("Data/GIS/MICS_mal.csv")
MICS_gps$ID=as.numeric(row.names(MICS_gps))
names(MICS_gps)
projection <- crs(all)
MICS_2011_loc <- SpatialPointsDataFrame(coords = MICS_gps[,4:5], data = MICS_gps, proj4string = projection)
crs(MICS_2011_loc)
plot(MICS_2011_loc, main="MICS locations")
#Extract Raster values for the points
MICS_2011 <- extract(all, MICS_2011_loc, df=TRUE,method='simple')
#these value for GS model need to be scaled
MICS_2011=cbind(MICS_2011[1],scale(MICS_2011[,2:12]))
#Merge back to .csv file and export
MICS_gps %>%
left_join(MICS_2011, by="ID") %>%
write.csv("Data/Final_for analysis/MICS_2011.csv", row.names=F) |
#####################################################################################################
#
# Summarize Adoption and Welfare Loss
# Xiliang Lin
# Jan, 2017
#
#####################################################################################################
# Settings
rm(list = ls())
coffee_modules = c(1463)
maker_modules = 7755
big_markets = c(501, 506, 504, 602, 803, 511, 539, 623, 618, 505,
613, 819, 524, 534, 533, 753, 510, 508, 514, 512,
517, 807, 751, 862, 535, 521, 548, 609, 566, 641)
# Load Necessary Packages
library(parallel)
library(data.table)
setNumericRounding(0)
library(ggplot2)
library(grid)
library(gridExtra)
# Set Working Folder Path Here
setwd("~/Keurig")
meta_dir = "Data/Meta-Data"
HMS_input_dir = "Data/HMS-Transactions"
mlogit_dir = "Data/MLogit-Data"
input_dir = "Data/Machine-Adoption"
output_dir = "Data/Counterfactual"
graph_dir = "Tabfigs/Counterfactuals"
# types of counterfactuals
ctype = c(1,2,3)
mtype = c(1,2,3)
# Source function
source('Scripts/Counterfactuals/machine-functions.R')
#---------------------------------------------------------------------------------------------------#
# Load and combine data
load(paste(output_dir, "/HW-Full-Panel.RData", sep=""))
setkey(hw_panel, household_code, t)
for (c in ctype){
for (m in mtype){
hnames = names(hw_panel)
dtemp = fread(paste(output_dir, "/Prob_type_", c, "_mu_", m, ".csv",sep=""))
knames = c("household_code", "t", paste0("c",c,"m",m))
setnames(dtemp, paste0("V", 1:3), knames)
setkeyv(dtemp, c("household_code", "t"))
hw_panel = dtemp[hw_panel, nomatch=0L]
setkey(hw_panel, household_code, t)
coln = c(hnames, knames[3])
setcolorder(hw_panel, coln)
}
}
# Aggregate to get summary adoption by date
hh_sum = as.list(1:(c*m))
k = 0
maxt = max(hw_panel[, t])
for (c in ctype){
for (m in mtype){
k = k+1
kvar = paste0("c",c,"m",m)
setnames(hw_panel, kvar, "pvar")
hw_panel[, prob:=as.numeric(NA)]
for (it in 1:maxt){
hw_panel[t<=it, prb_temp := prod(pvar), by = c("household_code")]
hw_panel[t==it, prob := prb_temp]
}
hw_panel[, `:=`(ct = as.integer(c), mt = as.integer(m))]
hh_sum[[k]] = hw_panel[, .(prob = mean(prob)), by = c("t", "ct", "mt")]
setnames(hw_panel, "prob", paste0("prob",c,"_",m))
setnames(hw_panel, "pvar", kvar)
}
}
hh_sum = rbindlist(hh_sum)
hh_sum[, prob := 1-prob]
setkey(hh_sum, ct, mt, t)
#---------------------------------------------------------------------------------------------------#
# Plot adoption rate over time
hh_sum[, Type := ifelse(mt==1, "GMCR Only", ifelse(mt==2, "GMCR+Third Party", "GMCR+Licensed"))]
adpt1 = ggplot(hh_sum[.(1), ], aes(x=t, y=prob, group = mt, colour= Type))+
theme(legend.position=c(0.3,0.8), plot.title = element_text(size=12))+
geom_line(aes(linetype=Type))+ylim(0, 0.80)+labs(list(title="Original Households", x="Time", y="Adoption Rate"))
adpt2 = ggplot(hh_sum[.(2), ], aes(x=t, y=prob, group = mt, colour= Type))+
theme(legend.position="none", plot.title = element_text(size=12), axis.text.y=element_blank(),
axis.ticks=element_blank(), axis.title.y=element_blank())+
geom_line(aes(linetype=Type))+ylim(0, 0.80)+labs(list(title="Homogeneous but Variety Seeking", x="Time"))
adpt3 = ggplot(hh_sum[.(3), ], aes(x=t, y=prob, group = mt, colour= Type))+
theme(legend.position="none", plot.title = element_text(size=12), axis.text.y=element_blank(),
axis.ticks=element_blank(), axis.title.y=element_blank())+
geom_line(aes(linetype=Type))+ylim(0, 0.80)+labs(list(title="Heterogeneous but Not Variety Seeking", x="Time"))
multplot = marrangeGrob(list(adpt1, adpt2, adpt3), ncol=3, nrow=1, top="")
ggsave(paste(graph_dir, "/figs/adoption_rate.pdf", sep=""), multplot, width=10, height=4)
#---------------------------------------------------------------------------------------------------#
# Given adoption probability
hh_rev = as.list(1:(c*m))
k = 0
hw_panel[, nh:=.N, by = c("week_end")]
for (c in ctype){
if (c==1){
filename = paste(output_dir, "/HH-Rev-Panel.RData", sep="")
} else if (c==2) {
filename = paste(output_dir, "/HH-Rev-Homo-Panel.RData", sep="")
} else{
filename = paste(output_dir, "/HH-Rev-No-Variety-Panel.RData", sep="")
}
load(filename)
hh_br_rev = hh_br_rev[rev1<=100 & rev2<=100 & rev3<=100, ]
hh_br_rev = hh_br_rev[grepl("KEURIG", brand_descr), ]
setkey(hh_br_rev, household_code, week_end)
setkey(hw_panel, household_code, week_end)
hw_panel_merged = hw_panel[hh_br_rev, nomatch=0L]
for (m in mtype){
k = k+1
fvar1 = paste0("rev", m)
fvar2 = paste0("prob",c,"_",m)
setnames(hw_panel_merged, c(fvar1, fvar2), c("fv1", "fv2"))
hw_panel_merged[, rev := fv1*(1-fv2)]
hw_panel_merged[, `:=`(ct = as.integer(c), mt = as.integer(m))]
hh_rev[[k]] = hw_panel_merged[, .(rev = sum(rev)/mean(nh)), by = c("brand_descr", "t", "ct", "mt")]
setnames(hw_panel_merged, c("fv1", "fv2"), c(fvar1, fvar2))
}
}
hh_rev = rbindlist(hh_rev)
hh_rev_agg = hh_rev[, .(rev = sum(rev)), by=c("t", "ct", "mt")]
hh_rev_gmcr = hh_rev[brand_descr%in%c("GREEN MOUNTAIN KEURIG", "KEURIG"),
.(rev = sum(rev)), by=c("t", "ct", "mt")]
setkey(hh_rev_agg, ct, mt, t)
setkey(hh_rev_gmcr, ct, mt, t)
#---------------------------------------------------------------------------------------------------#
# Plot revenue over time
hh_rev_agg[, Type := ifelse(mt==1, "GMCR Only", ifelse(mt==2, "GMCR+Third Party", "GMCR+Licensed"))]
orev1 = ggplot(hh_rev_agg[.(1), ], aes(x=t, y=rev, group = mt, colour= Type))+
theme(legend.position=c(0.3,0.8), plot.title = element_text(size=12))+
geom_line(aes(linetype=Type))+ylim(0, 0.50)+labs(list(title="Original Households", x="Time", y="Revenue"))
orev2 = ggplot(hh_rev_agg[.(2), ], aes(x=t, y=rev, group = mt, colour= Type))+
theme(legend.position="none", plot.title = element_text(size=12), axis.text.y=element_blank(),
axis.ticks=element_blank(), axis.title.y=element_blank())+
geom_line(aes(linetype=Type))+ylim(0, 0.50) + labs(list(title="Homogeneous but Variety Seeking", x="Time"))
orev3 = ggplot(hh_rev_agg[.(3), ], aes(x=t, y=rev, group = mt, colour= Type))+
theme(legend.position="none", plot.title = element_text(size=12), axis.text.y=element_blank(),
axis.ticks=element_blank(), axis.title.y=element_blank())+
geom_line(aes(linetype=Type))+ylim(0, 0.50)+labs(list(title="Heterogeneous but Not Variety Seeking", x="Time"))
multplot = marrangeGrob(list(orev1, orev2, orev3), ncol=3, nrow=1, top="")
ggsave(paste(graph_dir, "/figs/overall_rev.pdf", sep=""), multplot, width=10, height=4)
#---------------------------------------------------------------------------------------------------#
# Plot revenue over GMCR
hh_rev_gmcr[, Type := ifelse(mt==1, "GMCR Only", ifelse(mt==2, "GMCR+Third Party", "GMCR+Licensed"))]
grev1 = ggplot(hh_rev_gmcr[.(1), ], aes(x=t, y=rev, group = mt, colour= Type))+
theme(legend.position=c(0.3,0.8), plot.title = element_text(size=12))+
geom_line(aes(linetype=Type))+ylim(0, 0.50)+labs(list(title="Original Households", x="Time", y="Revenue"))
grev2 = ggplot(hh_rev_gmcr[.(2), ], aes(x=t, y=rev, group = mt, colour= Type))+
theme(legend.position="none", plot.title = element_text(size=12), axis.text.y=element_blank(),
axis.ticks=element_blank(), axis.title.y=element_blank())+
geom_line(aes(linetype=Type))+ylim(0, 0.50) + labs(list(title="Homogeneous but Variety Seeking", x="Time"))
grev3 = ggplot(hh_rev_gmcr[.(3), ], aes(x=t, y=rev, group = mt, colour= Type))+
theme(legend.position="none", plot.title = element_text(size=12), axis.text.y=element_blank(),
axis.ticks=element_blank(), axis.title.y=element_blank())+
geom_line(aes(linetype=Type))+ylim(0, 0.50)+labs(list(title="Heterogeneous but Not Variety Seeking", x="Time"))
multplot = marrangeGrob(list(grev1, grev2, grev3), ncol=3, nrow=1, top="")
ggsave(paste(graph_dir, "/figs/gmcr_rev.pdf", sep=""), multplot, width=10, height=4)
| /Counterfactuals/Adoption-Revenue-Summary.R | no_license | linxiliang/Keurig | R | false | false | 8,059 | r | #####################################################################################################
#
# Summarize Adoption and Welfare Loss
# Xiliang Lin
# Jan, 2017
#
#####################################################################################################
# Settings
rm(list = ls())
coffee_modules = c(1463)
maker_modules = 7755
big_markets = c(501, 506, 504, 602, 803, 511, 539, 623, 618, 505,
613, 819, 524, 534, 533, 753, 510, 508, 514, 512,
517, 807, 751, 862, 535, 521, 548, 609, 566, 641)
# Load Necessary Packages
library(parallel)
library(data.table)
setNumericRounding(0)
library(ggplot2)
library(grid)
library(gridExtra)
# Set Working Folder Path Here
setwd("~/Keurig")
meta_dir = "Data/Meta-Data"
HMS_input_dir = "Data/HMS-Transactions"
mlogit_dir = "Data/MLogit-Data"
input_dir = "Data/Machine-Adoption"
output_dir = "Data/Counterfactual"
graph_dir = "Tabfigs/Counterfactuals"
# types of counterfactuals
ctype = c(1,2,3)
mtype = c(1,2,3)
# Source function
source('Scripts/Counterfactuals/machine-functions.R')
#---------------------------------------------------------------------------------------------------#
# Load and combine data
load(paste(output_dir, "/HW-Full-Panel.RData", sep=""))
setkey(hw_panel, household_code, t)
for (c in ctype){
for (m in mtype){
hnames = names(hw_panel)
dtemp = fread(paste(output_dir, "/Prob_type_", c, "_mu_", m, ".csv",sep=""))
knames = c("household_code", "t", paste0("c",c,"m",m))
setnames(dtemp, paste0("V", 1:3), knames)
setkeyv(dtemp, c("household_code", "t"))
hw_panel = dtemp[hw_panel, nomatch=0L]
setkey(hw_panel, household_code, t)
coln = c(hnames, knames[3])
setcolorder(hw_panel, coln)
}
}
# Aggregate to get summary adoption by date
hh_sum = as.list(1:(c*m))
k = 0
maxt = max(hw_panel[, t])
for (c in ctype){
for (m in mtype){
k = k+1
kvar = paste0("c",c,"m",m)
setnames(hw_panel, kvar, "pvar")
hw_panel[, prob:=as.numeric(NA)]
for (it in 1:maxt){
hw_panel[t<=it, prb_temp := prod(pvar), by = c("household_code")]
hw_panel[t==it, prob := prb_temp]
}
hw_panel[, `:=`(ct = as.integer(c), mt = as.integer(m))]
hh_sum[[k]] = hw_panel[, .(prob = mean(prob)), by = c("t", "ct", "mt")]
setnames(hw_panel, "prob", paste0("prob",c,"_",m))
setnames(hw_panel, "pvar", kvar)
}
}
hh_sum = rbindlist(hh_sum)
hh_sum[, prob := 1-prob]
setkey(hh_sum, ct, mt, t)
#---------------------------------------------------------------------------------------------------#
# Plot adoption rate over time
hh_sum[, Type := ifelse(mt==1, "GMCR Only", ifelse(mt==2, "GMCR+Third Party", "GMCR+Licensed"))]
adpt1 = ggplot(hh_sum[.(1), ], aes(x=t, y=prob, group = mt, colour= Type))+
theme(legend.position=c(0.3,0.8), plot.title = element_text(size=12))+
geom_line(aes(linetype=Type))+ylim(0, 0.80)+labs(list(title="Original Households", x="Time", y="Adoption Rate"))
adpt2 = ggplot(hh_sum[.(2), ], aes(x=t, y=prob, group = mt, colour= Type))+
theme(legend.position="none", plot.title = element_text(size=12), axis.text.y=element_blank(),
axis.ticks=element_blank(), axis.title.y=element_blank())+
geom_line(aes(linetype=Type))+ylim(0, 0.80)+labs(list(title="Homogeneous but Variety Seeking", x="Time"))
adpt3 = ggplot(hh_sum[.(3), ], aes(x=t, y=prob, group = mt, colour= Type))+
theme(legend.position="none", plot.title = element_text(size=12), axis.text.y=element_blank(),
axis.ticks=element_blank(), axis.title.y=element_blank())+
geom_line(aes(linetype=Type))+ylim(0, 0.80)+labs(list(title="Heterogeneous but Not Variety Seeking", x="Time"))
multplot = marrangeGrob(list(adpt1, adpt2, adpt3), ncol=3, nrow=1, top="")
ggsave(paste(graph_dir, "/figs/adoption_rate.pdf", sep=""), multplot, width=10, height=4)
#---------------------------------------------------------------------------------------------------#
# Given adoption probability
hh_rev = as.list(1:(c*m))
k = 0
hw_panel[, nh:=.N, by = c("week_end")]
for (c in ctype){
if (c==1){
filename = paste(output_dir, "/HH-Rev-Panel.RData", sep="")
} else if (c==2) {
filename = paste(output_dir, "/HH-Rev-Homo-Panel.RData", sep="")
} else{
filename = paste(output_dir, "/HH-Rev-No-Variety-Panel.RData", sep="")
}
load(filename)
hh_br_rev = hh_br_rev[rev1<=100 & rev2<=100 & rev3<=100, ]
hh_br_rev = hh_br_rev[grepl("KEURIG", brand_descr), ]
setkey(hh_br_rev, household_code, week_end)
setkey(hw_panel, household_code, week_end)
hw_panel_merged = hw_panel[hh_br_rev, nomatch=0L]
for (m in mtype){
k = k+1
fvar1 = paste0("rev", m)
fvar2 = paste0("prob",c,"_",m)
setnames(hw_panel_merged, c(fvar1, fvar2), c("fv1", "fv2"))
hw_panel_merged[, rev := fv1*(1-fv2)]
hw_panel_merged[, `:=`(ct = as.integer(c), mt = as.integer(m))]
hh_rev[[k]] = hw_panel_merged[, .(rev = sum(rev)/mean(nh)), by = c("brand_descr", "t", "ct", "mt")]
setnames(hw_panel_merged, c("fv1", "fv2"), c(fvar1, fvar2))
}
}
hh_rev = rbindlist(hh_rev)
hh_rev_agg = hh_rev[, .(rev = sum(rev)), by=c("t", "ct", "mt")]
hh_rev_gmcr = hh_rev[brand_descr%in%c("GREEN MOUNTAIN KEURIG", "KEURIG"),
.(rev = sum(rev)), by=c("t", "ct", "mt")]
setkey(hh_rev_agg, ct, mt, t)
setkey(hh_rev_gmcr, ct, mt, t)
#---------------------------------------------------------------------------------------------------#
# Plot revenue over time
hh_rev_agg[, Type := ifelse(mt==1, "GMCR Only", ifelse(mt==2, "GMCR+Third Party", "GMCR+Licensed"))]
orev1 = ggplot(hh_rev_agg[.(1), ], aes(x=t, y=rev, group = mt, colour= Type))+
theme(legend.position=c(0.3,0.8), plot.title = element_text(size=12))+
geom_line(aes(linetype=Type))+ylim(0, 0.50)+labs(list(title="Original Households", x="Time", y="Revenue"))
orev2 = ggplot(hh_rev_agg[.(2), ], aes(x=t, y=rev, group = mt, colour= Type))+
theme(legend.position="none", plot.title = element_text(size=12), axis.text.y=element_blank(),
axis.ticks=element_blank(), axis.title.y=element_blank())+
geom_line(aes(linetype=Type))+ylim(0, 0.50) + labs(list(title="Homogeneous but Variety Seeking", x="Time"))
orev3 = ggplot(hh_rev_agg[.(3), ], aes(x=t, y=rev, group = mt, colour= Type))+
theme(legend.position="none", plot.title = element_text(size=12), axis.text.y=element_blank(),
axis.ticks=element_blank(), axis.title.y=element_blank())+
geom_line(aes(linetype=Type))+ylim(0, 0.50)+labs(list(title="Heterogeneous but Not Variety Seeking", x="Time"))
multplot = marrangeGrob(list(orev1, orev2, orev3), ncol=3, nrow=1, top="")
ggsave(paste(graph_dir, "/figs/overall_rev.pdf", sep=""), multplot, width=10, height=4)
#---------------------------------------------------------------------------------------------------#
# Plot revenue over GMCR
hh_rev_gmcr[, Type := ifelse(mt==1, "GMCR Only", ifelse(mt==2, "GMCR+Third Party", "GMCR+Licensed"))]
grev1 = ggplot(hh_rev_gmcr[.(1), ], aes(x=t, y=rev, group = mt, colour= Type))+
theme(legend.position=c(0.3,0.8), plot.title = element_text(size=12))+
geom_line(aes(linetype=Type))+ylim(0, 0.50)+labs(list(title="Original Households", x="Time", y="Revenue"))
grev2 = ggplot(hh_rev_gmcr[.(2), ], aes(x=t, y=rev, group = mt, colour= Type))+
theme(legend.position="none", plot.title = element_text(size=12), axis.text.y=element_blank(),
axis.ticks=element_blank(), axis.title.y=element_blank())+
geom_line(aes(linetype=Type))+ylim(0, 0.50) + labs(list(title="Homogeneous but Variety Seeking", x="Time"))
grev3 = ggplot(hh_rev_gmcr[.(3), ], aes(x=t, y=rev, group = mt, colour= Type))+
theme(legend.position="none", plot.title = element_text(size=12), axis.text.y=element_blank(),
axis.ticks=element_blank(), axis.title.y=element_blank())+
geom_line(aes(linetype=Type))+ylim(0, 0.50)+labs(list(title="Heterogeneous but Not Variety Seeking", x="Time"))
multplot = marrangeGrob(list(grev1, grev2, grev3), ncol=3, nrow=1, top="")
ggsave(paste(graph_dir, "/figs/gmcr_rev.pdf", sep=""), multplot, width=10, height=4)
|
#! /usr/bin/Rscript
library(limma)
library(reshape2)
library(Glimma)
library(ggplot2)
library(ggrepel)
library(edgeR)
library(gplots)
library(amap)
library(rjson)
json <- fromJSON(file="config/config.json")
# ingestion inputs
file_url <- json["file_url"]$file_url
raw_dir <- json["raw_dir"]$raw_dir
temp_dir <- json["temp_dir"]$temp_dir
out_dir <- json["out_dir"]$out_dir
file_name <- json["file_name"]$file_name
# file_url = 'https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSE44639&format=file'
# raw_dir = "data/raw/"
# temp_dir = "data/temp/"
# out_dir = "data/out/"
# file_name = 'GSE44639_RAW.tar'
# ingest data function
ingest_data <- function(file_url, file_name, raw_dir, temp_dir) {
if (!dir.exists(raw_dir)) {
dir.create(raw_dir, recursive = TRUE)
}
out = paste(raw_dir, file_name, sep="")
utils::download.file(file_url, destfile=out, mode="wb")
utils::untar(out, exdir=temp_dir)
}
ingest_data(file_url, file_name, raw_dir, temp_dir)
# transform ingested data into one concat df
read_all_files_in_dir_with_columns <- function(file_dir, required_columns) {
files <- list.files(path=file_dir)
all <- list()
for (i in files) {
fp <- paste(file_dir, i, sep='')
seqdata <- read.delim(gzfile(fp), stringsAsFactors = FALSE)
columns <- colnames(seqdata)
has_unidentified_col <- FALSE
# ignore samples with columns not present in required_columns
for (c in columns) {
if (!any(required_columns==c)) {
has_unidentified_col <- TRUE
break
}
}
if (has_unidentified_col) {
next
}
sampleName = strsplit(fp, "/")[[1]][3]
sampleName = strsplit(sampleName, ".txt")[[1]][1]
cellType = strsplit(fp, "_")[1]
cellType = cellType[[1]][3]
cellType = strsplit(cellType, ".txt")
cellType = cellType[[1]][1]
healthy <- !grepl("P", fp, fixed=TRUE)
# add healthy column
seqdata$healthy <- healthy
seqdata$cellType <- cellType
seqdata$sampleName <- sampleName
if (length(all) == 0) {
all <- seqdata
} else {
all <- rbind(all, seqdata)
}
}
return(all)
}
required_columns <- list("miRNA", "Chromosome", "Position", "Strand", "Total.miRNA.reads", "RPM..reads.per.million.")
all <- read_all_files_in_dir_with_columns(temp_dir, required_columns)
# SELECT NAIVE CELLS GROUP ONLY
pret1 <- all[all$healthy == FALSE,]
naive <- all[all$cellType == 'Naive',]
# TRANSFORM THE CONCAT DATAFRAME INTO PIVOTED COUNTS TABLE (SAMPLE ~ MIRNA)
countdata <- dcast(naive, miRNA ~ sampleName, value.var= "Total.miRNA.reads", fill= 0)
rownames(countdata) <- countdata$miRNA
countdata$miRNA <- NULL
# countdata
# TO TRANSFORM THE COUNTS INTO LOG(RPM)
# add 0.5 to counts to prevent log(0) = -inf
countdata <- countdata + 0.5
group<-factor(c(rep("pret1Naive",6),rep("healthyNaive",7)))
d <- DGEList(counts=countdata,group=group) # DGEList helps us transform/separate the dataframe into multiple with summary info (libsize etc).
# #filter, need counts over 1 in 6 samples NOTE: NOT USING
# keep <- rowSums(cpm(d)>1) >= 6
# d <- d[keep,]
# dim (d)
# d$samples
# FILTER all mirna with less than 1.5*(# of total samples) of counts across all samples. removes lowly expressing mirna
dd <- d[rowSums(d$counts) >= 1.5*ncol(d), ]
# multiply by million and divide by column library size to get RPM
d<-1e+06 * dd$count/expandAsMatrix(d$samples$lib.size,dim(dd))
# take the log RPM
d<-log2(d)
# d
# FILTER USING ABS(F) FUNCTION
# CALCULATE ABS-F FUNCTION. has hard coded values for group size. helps us choose from the top DE mirna, which to talk about
# GOT FROM AUTHOR
F<-data.frame(nrow(d))
x<-data.frame(nrow(d),2) # for calculating fold change later
for (i in 1:nrow(d)) {
a<-6*abs(mean(d[i,1:6])-mean(d[i,1:13]))+7*abs(mean(d[i,7:13])-mean(d[i,1:13]))
bb = 0
for (j in 1:6) {
b<-abs(d[i,j]-mean(d[i,1:6]))
bb = bb +b
}
for (j in 7:13) {
b<-abs(d[i,j]-mean(d[i,7:13]))
bb = bb +b
}
F[i]<-12*a/bb
# X contains the mean of the logRPM of each group.
x[i,2]<-mean(d[i,1:6])
x[i,1]<-mean(d[i,7:13])
}
names(F)<-rownames(d)
rownames(x)<-rownames(d)
absFOver9.6<-length(F[,F>9.6]) # 9.6 chosen from paper. corresponds to less than 1% false discovery rate in limma package
# absFOver9.6
#### VOLCANO PLOT
# f contains the fold change, F contains the abs-F value
f <- x[names(-sort(-F))[1:absFOver9.6],1]-x[names(-sort(-F))[1:absFOver9.6],2] # fold change on the mirna with ABS F>9.6
names(f)<-names(-sort(-F))[1:absFOver9.6]
# -sort(-f)
# FILTER by abs(logFC) >= 1
id<-c(names(f[f>=1]),names(f[-f>=1]))
# length(id)
# calculate logFC again for mirna which meet filters, might be redundant
y<-d[id,]
de<-rownames(x)%in%id
f<-c(x[de,1]-x[de,2])
saved_mirna<- rownames(x[de,])
names(f)<- saved_mirna
# f gives us the filtered names of mirna with absF > 9.6 with abs(logFC) >=1. helps us choose which to talk about
#f
# CREATE X AND Y VALUES FOR VOLCANO PLOT. X= LOGFC, Y= ABS(F).
fval<-as.numeric(F)
fold<-as.numeric(x[,1]-x[,2])
volcanodf<-data.frame(fold,fval)
## 20 highest F-values labeled
numToLabel = 20
ggplot(volcanodf, aes(fold,fval)) +
geom_point(color='red',alpha = 0.5) +
geom_text_repel(aes(fold,fval),segment.size = 1, min.segment.length=0.5, box.padding=1,
segment.alpha = .8,
force = 1,segment.color = "grey50",label=as.character(ifelse(F >= as.numeric(-sort(-F)[numToLabel]), colnames(F), NA)),hjust=0,vjust=0) +
labs(y= "F-Value", x = "Log2 Fold Change") +
ggtitle("F-like Stat vs. Log2FC (Pre-T1 to Healthy Naive Cells)") + theme(text = element_text(size = 13))
# plot(fold,fval,main="F-like Stat vs. Log2FC (Pre-T1 to Healthy Naive Cells)",xlab="Log2 Fold change",ylab="F value",pch=20,col="blue")
# # abline(h=c(2,-2),v=10,col="red")
# # get top 10
# f10<- as.numeric(-sort(-F)[1:10])
# F10<- as.numeric(f[1:10])
# #only puts labels if the F-value is greater than absF 9.6
# # text(fold,fval,labels=as.character(ifelse(F >= 9.6, colnames(F), NA)),cex=0.87)
# ## 20 highest F-values labeled
# numToLabel = 20
# text(fold,fval,labels=as.character(ifelse(F >= as.numeric(-sort(-F)[numToLabel]), colnames(F), NA)),cex=0.87) # cex is fontsize
# # text(f10,F10,labels=colnames(-sort(-F)[1:10]),cex=0.8)
out_path = paste(out_dir, "NAIVE_VOLCANO_PLOT.png", sep="")
ggsave(out_path) | /project_18/src/volcano_plot_script.r | no_license | brianvi-98/projects-2019-2020 | R | false | false | 6,610 | r | #! /usr/bin/Rscript
library(limma)
library(reshape2)
library(Glimma)
library(ggplot2)
library(ggrepel)
library(edgeR)
library(gplots)
library(amap)
library(rjson)
json <- fromJSON(file="config/config.json")
# ingestion inputs
file_url <- json["file_url"]$file_url
raw_dir <- json["raw_dir"]$raw_dir
temp_dir <- json["temp_dir"]$temp_dir
out_dir <- json["out_dir"]$out_dir
file_name <- json["file_name"]$file_name
# file_url = 'https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSE44639&format=file'
# raw_dir = "data/raw/"
# temp_dir = "data/temp/"
# out_dir = "data/out/"
# file_name = 'GSE44639_RAW.tar'
# ingest data function
ingest_data <- function(file_url, file_name, raw_dir, temp_dir) {
if (!dir.exists(raw_dir)) {
dir.create(raw_dir, recursive = TRUE)
}
out = paste(raw_dir, file_name, sep="")
utils::download.file(file_url, destfile=out, mode="wb")
utils::untar(out, exdir=temp_dir)
}
ingest_data(file_url, file_name, raw_dir, temp_dir)
# transform ingested data into one concat df
read_all_files_in_dir_with_columns <- function(file_dir, required_columns) {
files <- list.files(path=file_dir)
all <- list()
for (i in files) {
fp <- paste(file_dir, i, sep='')
seqdata <- read.delim(gzfile(fp), stringsAsFactors = FALSE)
columns <- colnames(seqdata)
has_unidentified_col <- FALSE
# ignore samples with columns not present in required_columns
for (c in columns) {
if (!any(required_columns==c)) {
has_unidentified_col <- TRUE
break
}
}
if (has_unidentified_col) {
next
}
sampleName = strsplit(fp, "/")[[1]][3]
sampleName = strsplit(sampleName, ".txt")[[1]][1]
cellType = strsplit(fp, "_")[1]
cellType = cellType[[1]][3]
cellType = strsplit(cellType, ".txt")
cellType = cellType[[1]][1]
healthy <- !grepl("P", fp, fixed=TRUE)
# add healthy column
seqdata$healthy <- healthy
seqdata$cellType <- cellType
seqdata$sampleName <- sampleName
if (length(all) == 0) {
all <- seqdata
} else {
all <- rbind(all, seqdata)
}
}
return(all)
}
required_columns <- list("miRNA", "Chromosome", "Position", "Strand", "Total.miRNA.reads", "RPM..reads.per.million.")
all <- read_all_files_in_dir_with_columns(temp_dir, required_columns)
# SELECT NAIVE CELLS GROUP ONLY
pret1 <- all[all$healthy == FALSE,]
naive <- all[all$cellType == 'Naive',]
# TRANSFORM THE CONCAT DATAFRAME INTO PIVOTED COUNTS TABLE (SAMPLE ~ MIRNA)
countdata <- dcast(naive, miRNA ~ sampleName, value.var= "Total.miRNA.reads", fill= 0)
rownames(countdata) <- countdata$miRNA
countdata$miRNA <- NULL
# countdata
# TO TRANSFORM THE COUNTS INTO LOG(RPM)
# add 0.5 to counts to prevent log(0) = -inf
countdata <- countdata + 0.5
group<-factor(c(rep("pret1Naive",6),rep("healthyNaive",7)))
d <- DGEList(counts=countdata,group=group) # DGEList helps us transform/separate the dataframe into multiple with summary info (libsize etc).
# #filter, need counts over 1 in 6 samples NOTE: NOT USING
# keep <- rowSums(cpm(d)>1) >= 6
# d <- d[keep,]
# dim (d)
# d$samples
# FILTER all mirna with less than 1.5*(# of total samples) of counts across all samples. removes lowly expressing mirna
dd <- d[rowSums(d$counts) >= 1.5*ncol(d), ]
# multiply by million and divide by column library size to get RPM
d<-1e+06 * dd$count/expandAsMatrix(d$samples$lib.size,dim(dd))
# take the log RPM
d<-log2(d)
# d
# FILTER USING ABS(F) FUNCTION
# CALCULATE ABS-F FUNCTION. has hard coded values for group size. helps us choose from the top DE mirna, which to talk about
# GOT FROM AUTHOR
F<-data.frame(nrow(d))
x<-data.frame(nrow(d),2) # for calculating fold change later
for (i in 1:nrow(d)) {
a<-6*abs(mean(d[i,1:6])-mean(d[i,1:13]))+7*abs(mean(d[i,7:13])-mean(d[i,1:13]))
bb = 0
for (j in 1:6) {
b<-abs(d[i,j]-mean(d[i,1:6]))
bb = bb +b
}
for (j in 7:13) {
b<-abs(d[i,j]-mean(d[i,7:13]))
bb = bb +b
}
F[i]<-12*a/bb
# X contains the mean of the logRPM of each group.
x[i,2]<-mean(d[i,1:6])
x[i,1]<-mean(d[i,7:13])
}
names(F)<-rownames(d)
rownames(x)<-rownames(d)
absFOver9.6<-length(F[,F>9.6]) # 9.6 chosen from paper. corresponds to less than 1% false discovery rate in limma package
# absFOver9.6
#### VOLCANO PLOT
# f contains the fold change, F contains the abs-F value
f <- x[names(-sort(-F))[1:absFOver9.6],1]-x[names(-sort(-F))[1:absFOver9.6],2] # fold change on the mirna with ABS F>9.6
names(f)<-names(-sort(-F))[1:absFOver9.6]
# -sort(-f)
# FILTER by abs(logFC) >= 1
id<-c(names(f[f>=1]),names(f[-f>=1]))
# length(id)
# calculate logFC again for mirna which meet filters, might be redundant
y<-d[id,]
de<-rownames(x)%in%id
f<-c(x[de,1]-x[de,2])
saved_mirna<- rownames(x[de,])
names(f)<- saved_mirna
# f gives us the filtered names of mirna with absF > 9.6 with abs(logFC) >=1. helps us choose which to talk about
#f
# CREATE X AND Y VALUES FOR VOLCANO PLOT. X= LOGFC, Y= ABS(F).
fval<-as.numeric(F)
fold<-as.numeric(x[,1]-x[,2])
volcanodf<-data.frame(fold,fval)
## 20 highest F-values labeled
numToLabel = 20
ggplot(volcanodf, aes(fold,fval)) +
geom_point(color='red',alpha = 0.5) +
geom_text_repel(aes(fold,fval),segment.size = 1, min.segment.length=0.5, box.padding=1,
segment.alpha = .8,
force = 1,segment.color = "grey50",label=as.character(ifelse(F >= as.numeric(-sort(-F)[numToLabel]), colnames(F), NA)),hjust=0,vjust=0) +
labs(y= "F-Value", x = "Log2 Fold Change") +
ggtitle("F-like Stat vs. Log2FC (Pre-T1 to Healthy Naive Cells)") + theme(text = element_text(size = 13))
# plot(fold,fval,main="F-like Stat vs. Log2FC (Pre-T1 to Healthy Naive Cells)",xlab="Log2 Fold change",ylab="F value",pch=20,col="blue")
# # abline(h=c(2,-2),v=10,col="red")
# # get top 10
# f10<- as.numeric(-sort(-F)[1:10])
# F10<- as.numeric(f[1:10])
# #only puts labels if the F-value is greater than absF 9.6
# # text(fold,fval,labels=as.character(ifelse(F >= 9.6, colnames(F), NA)),cex=0.87)
# ## 20 highest F-values labeled
# numToLabel = 20
# text(fold,fval,labels=as.character(ifelse(F >= as.numeric(-sort(-F)[numToLabel]), colnames(F), NA)),cex=0.87) # cex is fontsize
# # text(f10,F10,labels=colnames(-sort(-F)[1:10]),cex=0.8)
out_path = paste(out_dir, "NAIVE_VOLCANO_PLOT.png", sep="")
ggsave(out_path) |
## peak flow reduction calculation
if (!require("pacman")) install.packages("pacman")
pacman::p_load(tidyverse, xts, zoo, lubridate)
# Read surface runoff time series -----------------------------------------
org_wd <- getwd()
x_gis_aggre <- list() # x_gis_aggre[[j]][[i]], j is year, i is different BC %
x_ud_aggre <- list() # x_ud[[j]][[i]]
x_org_aggre <- list() # orginal runoff without BC cover
x_runoff_aggre <- list()
for (j in 1:10){
file_location <- str_c(org_wd, "/", "/results/year",j,"/",collapse = "")
files_surf <- list.files(pattern='surf.*\\.txt', recursive=TRUE, path = file_location)
files_surf <- paste("surf", c(1:(length(files_surf)-1)),".txt",sep="")
files_ud <- paste("ud", c(1:(length(files_surf))),".txt",sep="")
x_gis <- list()
x_uds <- list()
x_runoffs <- list()
for (i in 1:81) {
fid <- i
file_name_surf <- paste0(file_location, files_surf[fid])
file_name_ud <- paste0(file_location, files_ud[fid])
x_gis[[i]] <- unname(unlist(read.table(file_name_surf, header = F)))/0.03 # convert from m3 per 1 min per 5000 m2 to L/s/ha
x_uds[[i]] <- unname(unlist(read.table(file_name_ud, header = F)))/0.03
x_runoffs[[i]] <- x_gis[[i]] + x_uds[[i]]
}
org_name <- paste0(file_location,"surf0.txt")
x_org_aggre[[j]] <- unname(unlist(read.table(org_name)))/0.03
x_gis_aggre[[j]] <- x_gis
x_ud_aggre[[j]] <- x_uds
x_runoff_aggre[[j]] <- x_runoffs
}
rm(list=setdiff(ls(), c("x_runoff_aggre","x_org_aggre"))) # clean
# Identify storm starts and ends ------------------------------------------
org_wd <- getwd()
read_rain <- function(file){
temp <- readLines(file)
rain_depths <- as.numeric(str_sub(temp, 18, -1))
rain_depths<- rain_depths/12*25.4 # convert from inch/h to mm per 5 min
return(c(rain_depths))
}
sta_end <- function(x, par.inter) {
# This function is used to find start and end time of a storm event
# Input:
# x is time series
# par.inter is the dry spell interval
# Output:
# sta_end_data is a list containing the start end information
loc.r <- which (x != 0)
le <- length(loc.r)
no.r <- rep(0, le)
dur.r <- rep (0, le)
j <- 1
for (i in 2:le) {
temp <- loc.r[i] - loc.r[i - 1]
no.r[j] <- no.r[j] + 1
if (temp > par.inter) {
if (no.r[j] == 1) {
dur.r[j] <- 1
} else {
dur.r[j] <- loc.r[i - 1] - loc.r[i - no.r[j]] + 1
}
j <- j + 1
}
if (i == le) {
no.r[j] <- no.r[j] + 1
dur.r[j] <-
loc.r[length(loc.r)] - loc.r[length(loc.r) - no.r[j] + 1] + 1
}
}
temp <- length(which(no.r != 0))
no.r <- no.r[1:temp]
dur.r <- dur.r[1:temp]
rm(i, j, le, temp)
m <- matrix(0, ncol = length(dur.r), nrow = max(dur.r))
a <- ncol(m) # the number of rainfall event
j = 1
locmax <- rep(0, a)
sta_end_data <- data.frame(sat = rep(0, a),
end = rep(0, a))
for (i in 1:a) {
temp <- loc.r[j] + dur.r[i] - 1
m[1:dur.r[i], i] <- x[loc.r[j]:temp]
sta_end_data[i, ] <- c(loc.r[j], temp)
tempmax <- which.max(m[, i])
locmax [i] <- loc.r[j] + tempmax - 1
j <- j + no.r[i]
}
sta_end_data
}
setwd(paste0(org_wd,"/data"))
files <- list.files(pattern = '^rain.*\\.csv')
rains_list <- vector("list", length(files))
for (i in seq_along(files)){
file <- paste0("rain_",i, ".csv")
if (i != 1){
rains_list[[i]] <- read_rain(file)
} else {
rains_list[[i]] <- c(0,read_rain(file))
}
}
inter <- as.list((1:4)*96)
sta_ends <- list() # sta_ends stores the start and end index for different par.inter
for (i in seq_along(rains_list)){
rain <- c(rains_list[[i]])
sta_ends[[i]] <- lapply(inter, sta_end, x = rain)
}
setwd(org_wd)
# Identify storm starts and ends 2nd part----------------------------------------------------
counter = 0
x_intervals <- list() # x_intervals is different storm interval
x_intervals_aggre <- list() # aggregated x_intervals
runoff_series_le <- length(x_org_aggre[[10]])
for (j in 1:10) {
for (i in 1:length(inter)) {
x_interval <- (sta_ends[[j]][[i]] - 1) * 5 + 1# every 5 mins, times 5 to convert to every minute, plus 1 is for correct indexing
x_interval <- x_interval[, 1] # the start time
x_intervals[[i]] <- c(0, x_interval, runoff_series_le)
}
x_intervals_aggre[[j]] <- x_intervals
}
# Discharge rate calculation --------------------------------------------------------
find_peak_volume <- function(x, breaks){
# This function is used to find runoff peak and volume for each individual storms
labels = 1:(length(breaks) - 1)
x <- data.frame(x = x, id = seq_along(x)) %>%
mutate(interval = cut(id, breaks=breaks, include.lowest = TRUE, right = F, labels = labels)) %>%
group_by(interval) %>%
summarise(volume = sum(x)/1000*60, # l/s/ha to m3/ha
peak = max(x))
return(x)
}
find_peak_volumes <- function(x_aggre, x_intervals_aggre){
peak_volumes <- vector("list", 10)
for (i in 1:10){
x <- x_aggre[[i]]
peak_volume <- vector("list", 4) # 4 interval
for (j in 1:4) {
breaks <- x_intervals_aggre[[i]][[j]]
temp <- find_peak_volume(x, breaks)
temp$interval <- as.numeric(temp$interval)
temp$year <- i
temp$ietd <- j
peak_volume[[j]] <- temp
}
peak_volume <- bind_rows(peak_volume)
peak_volumes[[i]] <- peak_volume
}
temp <- bind_rows(peak_volumes)
return(temp)
}
reduc_percentage <- function (x, y){
(y - x)/y * 100
}
org_peak_volume <- find_peak_volumes(x_aggre = x_org_aggre, x_intervals_aggre = x_intervals_aggre) # original peak volume
BR_peak_volume <- vector("list", 81) # Bioretention cases
for (i in 1:81){
x_aggre <- vector("list", 10)
for (j in 1:10){
x_aggre[[j]] <- x_runoff_aggre[[j]][[i]]
}
df <- find_peak_volumes(x_aggre = x_aggre, x_intervals_aggre = x_intervals_aggre)
df$gi <- i * 0.1 + 1.9
BR_peak_volume[[i]] <- df
}
BR_peak_volumes <- bind_rows(BR_peak_volume)
BR_peak_volumes <- BR_peak_volumes %>%
left_join(org_peak_volume, by = c("year","ietd","interval")) %>%
select(ietd, year, interval, gi, everything()) %>%
rename(volume = volume.x,
peak = peak.x,
org_volume = volume.y,
org_peak = peak.y) %>%
filter(org_volume != 0) %>%
mutate(volume_perc = map2_dbl(volume, org_volume, reduc_percentage),
peak_perc = map2_dbl(peak, org_peak, reduc_percentage),
volume_perc = ifelse(volume_perc < 0, 0, volume_perc),
peak_perc = ifelse(peak_perc < 0, 0, peak_perc))
# Figure 5 ----------------------------------------------------------------
ietd_ind <- 2
data_plot <- BR_peak_volumes %>%
filter(ietd == ietd_ind,
gi %% 1 == 0) %>%
mutate(peak_percentile = cut_number(org_peak,4))
levels(data_plot$peak_percentile) <- c("[0,25)", "[25,50)","[50,75)","[75,100]")
data_plot2 <- BR_peak_volumes %>%
filter(ietd == ietd_ind) %>%
mutate(peak_percentile = cut_number(org_peak,4))
levels(data_plot2$peak_percentile) <- c("[0,25)", "[25,50)","[50,75)","[75,100]")
data_plot2 <- data_plot2 %>%
group_by(peak_percentile, gi) %>%
summarise(q50 = median(peak_perc),
q90 = quantile(peak_perc, 0.9),
q10 = quantile(peak_perc, 0.1),
q.weighted = (weighted.mean(peak_perc, org_peak))) %>%
gather(Quantiles, value, q50:q.weighted)
data_plot3 <- data_plot2 %>%
filter(Quantiles == "q.weighted")
data_plot2 <- data_plot2 %>%
filter(Quantiles != "q.weighted")
ggplot(data_plot, aes(x = gi, y = peak_perc)) +
geom_jitter(size = 1.4, shape = 1, colour = "steelblue", alpha = 0.5, width = 0.2, height = 0, stroke = 0.3) +
geom_line(data = data_plot2, aes(x = gi, y = value, linetype = Quantiles), size = 0.5, colour = "red") +
geom_line(data = data_plot3, aes(x = gi, y = value, linetype = Quantiles), size = 0.5, colour = "red") +
scale_linetype_manual(values = c("solid","dashed","dotdash","dotted")) +
facet_wrap(~ peak_percentile) +
labs(x = "Bioretention cell area [%]",
y = "Reduction percentage of peak flow [%]",
linetype = "Performance curves") +
scale_x_continuous(breaks = c(2:10)) +
theme_bw(base_size = 7) +
theme(legend.key.width = unit(0.9, "cm"))
ggsave("./figures/figure5.pdf", width = 190, height = 120, units = "mm")
save(BR_peak_volumes,data_plot,data_plot2, data_plot3, file='./figures/Figure5.Rda')
load("./figures/Figure5.Rda")
# Figure 6
library(modelr)
data_plot3 <- BR_peak_volumes %>%
mutate(peak_percentile = cut_number(org_peak,4))
levels(data_plot3$peak_percentile) <- c("[0,25)", "[25,50)","[50,75)","[75,100]")
data_plot3 <- data_plot3 %>%
group_by(peak_percentile, gi, ietd) %>%
summarise(q50 = median(peak_perc),
q90 = quantile(peak_perc, 0.9),
q10 = quantile(peak_perc, 0.1),
q.weighted = (weighted.mean(peak_perc, org_peak))) %>%
gather(Quantiles, value, q50:q.weighted)
data_plot3$ietd <- as.factor(data_plot3$ietd)
levels(data_plot3$ietd) <- c("8 h", "16 h","24 h","32 h")
data_plot4 <- data_plot3 %>%
filter(gi %% 0.5 == 0)
loess_model <- function(data) {
loess(value ~ gi, data = data)
}
data_plot3 <- data_plot3 %>%
group_by(peak_percentile, Quantiles, ietd) %>%
nest()%>%
mutate(model = map(data, loess_model),
pred = map2(data, model, add_predictions))%>%
unnest(pred) %>%
mutate(value = pred) %>%
select(-pred) %>%
group_by(peak_percentile, Quantiles, ietd)%>%
mutate(value = cummax(value)) %>%
ungroup()
data_plot3$value[data_plot3$value > 100] <- 100
data_plot3$value[data_plot3$value < 0] <- 0
ggplot(data_plot3, aes(gi, value, linetype = Quantiles, colour = ietd))+
geom_line(size = 0.5) +
guides(linetype = guide_legend(override.aes= list(color = "black"))) +
scale_linetype_manual(values = c("solid","longdash","dotted","dotdash")) +
geom_point(data = data_plot4, aes(gi, value, color = ietd, shape = ietd), stroke = 0.3) +
scale_shape_discrete(solid = F) +
facet_wrap(~peak_percentile) +
scale_y_continuous(breaks = c(0:5)*20) +
theme_bw(base_size = 7) +
coord_flip() +
theme(legend.key.width = unit(0.8, "cm")) +
labs(y = "Target peak flow reduction percentage [%]",
x = "Required bioretention cell area [%]",
colour = "IETD",
shape = "IETD",
linetype = "Look-up curves"
)
ggsave("./figures/figure6.pdf", width = 190, height = 120, units = "mm")
save(BR_peak_volumes,data_plot,data_plot2, data_plot3, data_plot4, file='./figures/Figure6.Rda')
# Analysis ----------------------------------------------------------------
library(tidyverse)
load("./figures/Figure6.Rda")
data_analysis <- data_plot3 %>%
filter(ietd == "16 h",
Quantiles %in% c("q10","q.weighted")) %>%
select(Quantiles, gi, value, peak_percentile )
write.csv(data_analysis, "./figures/fig6.csv")
| /peak_flow.R | permissive | stsfk/bioretention_optimize | R | false | false | 10,847 | r | ## peak flow reduction calculation
if (!require("pacman")) install.packages("pacman")
pacman::p_load(tidyverse, xts, zoo, lubridate)
# Read surface runoff time series -----------------------------------------
org_wd <- getwd()
x_gis_aggre <- list() # x_gis_aggre[[j]][[i]], j is year, i is different BC %
x_ud_aggre <- list() # x_ud[[j]][[i]]
x_org_aggre <- list() # orginal runoff without BC cover
x_runoff_aggre <- list()
for (j in 1:10){
file_location <- str_c(org_wd, "/", "/results/year",j,"/",collapse = "")
files_surf <- list.files(pattern='surf.*\\.txt', recursive=TRUE, path = file_location)
files_surf <- paste("surf", c(1:(length(files_surf)-1)),".txt",sep="")
files_ud <- paste("ud", c(1:(length(files_surf))),".txt",sep="")
x_gis <- list()
x_uds <- list()
x_runoffs <- list()
for (i in 1:81) {
fid <- i
file_name_surf <- paste0(file_location, files_surf[fid])
file_name_ud <- paste0(file_location, files_ud[fid])
x_gis[[i]] <- unname(unlist(read.table(file_name_surf, header = F)))/0.03 # convert from m3 per 1 min per 5000 m2 to L/s/ha
x_uds[[i]] <- unname(unlist(read.table(file_name_ud, header = F)))/0.03
x_runoffs[[i]] <- x_gis[[i]] + x_uds[[i]]
}
org_name <- paste0(file_location,"surf0.txt")
x_org_aggre[[j]] <- unname(unlist(read.table(org_name)))/0.03
x_gis_aggre[[j]] <- x_gis
x_ud_aggre[[j]] <- x_uds
x_runoff_aggre[[j]] <- x_runoffs
}
rm(list=setdiff(ls(), c("x_runoff_aggre","x_org_aggre"))) # clean
# Identify storm starts and ends ------------------------------------------
org_wd <- getwd()
read_rain <- function(file){
temp <- readLines(file)
rain_depths <- as.numeric(str_sub(temp, 18, -1))
rain_depths<- rain_depths/12*25.4 # convert from inch/h to mm per 5 min
return(c(rain_depths))
}
sta_end <- function(x, par.inter) {
# This function is used to find start and end time of a storm event
# Input:
# x is time series
# par.inter is the dry spell interval
# Output:
# sta_end_data is a list containing the start end information
loc.r <- which (x != 0)
le <- length(loc.r)
no.r <- rep(0, le)
dur.r <- rep (0, le)
j <- 1
for (i in 2:le) {
temp <- loc.r[i] - loc.r[i - 1]
no.r[j] <- no.r[j] + 1
if (temp > par.inter) {
if (no.r[j] == 1) {
dur.r[j] <- 1
} else {
dur.r[j] <- loc.r[i - 1] - loc.r[i - no.r[j]] + 1
}
j <- j + 1
}
if (i == le) {
no.r[j] <- no.r[j] + 1
dur.r[j] <-
loc.r[length(loc.r)] - loc.r[length(loc.r) - no.r[j] + 1] + 1
}
}
temp <- length(which(no.r != 0))
no.r <- no.r[1:temp]
dur.r <- dur.r[1:temp]
rm(i, j, le, temp)
m <- matrix(0, ncol = length(dur.r), nrow = max(dur.r))
a <- ncol(m) # the number of rainfall event
j = 1
locmax <- rep(0, a)
sta_end_data <- data.frame(sat = rep(0, a),
end = rep(0, a))
for (i in 1:a) {
temp <- loc.r[j] + dur.r[i] - 1
m[1:dur.r[i], i] <- x[loc.r[j]:temp]
sta_end_data[i, ] <- c(loc.r[j], temp)
tempmax <- which.max(m[, i])
locmax [i] <- loc.r[j] + tempmax - 1
j <- j + no.r[i]
}
sta_end_data
}
setwd(paste0(org_wd,"/data"))
files <- list.files(pattern = '^rain.*\\.csv')
rains_list <- vector("list", length(files))
for (i in seq_along(files)){
file <- paste0("rain_",i, ".csv")
if (i != 1){
rains_list[[i]] <- read_rain(file)
} else {
rains_list[[i]] <- c(0,read_rain(file))
}
}
inter <- as.list((1:4)*96)
sta_ends <- list() # sta_ends stores the start and end index for different par.inter
for (i in seq_along(rains_list)){
rain <- c(rains_list[[i]])
sta_ends[[i]] <- lapply(inter, sta_end, x = rain)
}
setwd(org_wd)
# Identify storm starts and ends 2nd part----------------------------------------------------
counter = 0
x_intervals <- list() # x_intervals is different storm interval
x_intervals_aggre <- list() # aggregated x_intervals
runoff_series_le <- length(x_org_aggre[[10]])
for (j in 1:10) {
for (i in 1:length(inter)) {
x_interval <- (sta_ends[[j]][[i]] - 1) * 5 + 1# every 5 mins, times 5 to convert to every minute, plus 1 is for correct indexing
x_interval <- x_interval[, 1] # the start time
x_intervals[[i]] <- c(0, x_interval, runoff_series_le)
}
x_intervals_aggre[[j]] <- x_intervals
}
# Discharge rate calculation --------------------------------------------------------
find_peak_volume <- function(x, breaks){
# This function is used to find runoff peak and volume for each individual storms
labels = 1:(length(breaks) - 1)
x <- data.frame(x = x, id = seq_along(x)) %>%
mutate(interval = cut(id, breaks=breaks, include.lowest = TRUE, right = F, labels = labels)) %>%
group_by(interval) %>%
summarise(volume = sum(x)/1000*60, # l/s/ha to m3/ha
peak = max(x))
return(x)
}
find_peak_volumes <- function(x_aggre, x_intervals_aggre){
peak_volumes <- vector("list", 10)
for (i in 1:10){
x <- x_aggre[[i]]
peak_volume <- vector("list", 4) # 4 interval
for (j in 1:4) {
breaks <- x_intervals_aggre[[i]][[j]]
temp <- find_peak_volume(x, breaks)
temp$interval <- as.numeric(temp$interval)
temp$year <- i
temp$ietd <- j
peak_volume[[j]] <- temp
}
peak_volume <- bind_rows(peak_volume)
peak_volumes[[i]] <- peak_volume
}
temp <- bind_rows(peak_volumes)
return(temp)
}
reduc_percentage <- function (x, y){
(y - x)/y * 100
}
org_peak_volume <- find_peak_volumes(x_aggre = x_org_aggre, x_intervals_aggre = x_intervals_aggre) # original peak volume
BR_peak_volume <- vector("list", 81) # Bioretention cases
for (i in 1:81){
x_aggre <- vector("list", 10)
for (j in 1:10){
x_aggre[[j]] <- x_runoff_aggre[[j]][[i]]
}
df <- find_peak_volumes(x_aggre = x_aggre, x_intervals_aggre = x_intervals_aggre)
df$gi <- i * 0.1 + 1.9
BR_peak_volume[[i]] <- df
}
BR_peak_volumes <- bind_rows(BR_peak_volume)
BR_peak_volumes <- BR_peak_volumes %>%
left_join(org_peak_volume, by = c("year","ietd","interval")) %>%
select(ietd, year, interval, gi, everything()) %>%
rename(volume = volume.x,
peak = peak.x,
org_volume = volume.y,
org_peak = peak.y) %>%
filter(org_volume != 0) %>%
mutate(volume_perc = map2_dbl(volume, org_volume, reduc_percentage),
peak_perc = map2_dbl(peak, org_peak, reduc_percentage),
volume_perc = ifelse(volume_perc < 0, 0, volume_perc),
peak_perc = ifelse(peak_perc < 0, 0, peak_perc))
# Figure 5 ----------------------------------------------------------------
ietd_ind <- 2
data_plot <- BR_peak_volumes %>%
filter(ietd == ietd_ind,
gi %% 1 == 0) %>%
mutate(peak_percentile = cut_number(org_peak,4))
levels(data_plot$peak_percentile) <- c("[0,25)", "[25,50)","[50,75)","[75,100]")
data_plot2 <- BR_peak_volumes %>%
filter(ietd == ietd_ind) %>%
mutate(peak_percentile = cut_number(org_peak,4))
levels(data_plot2$peak_percentile) <- c("[0,25)", "[25,50)","[50,75)","[75,100]")
data_plot2 <- data_plot2 %>%
group_by(peak_percentile, gi) %>%
summarise(q50 = median(peak_perc),
q90 = quantile(peak_perc, 0.9),
q10 = quantile(peak_perc, 0.1),
q.weighted = (weighted.mean(peak_perc, org_peak))) %>%
gather(Quantiles, value, q50:q.weighted)
data_plot3 <- data_plot2 %>%
filter(Quantiles == "q.weighted")
data_plot2 <- data_plot2 %>%
filter(Quantiles != "q.weighted")
ggplot(data_plot, aes(x = gi, y = peak_perc)) +
geom_jitter(size = 1.4, shape = 1, colour = "steelblue", alpha = 0.5, width = 0.2, height = 0, stroke = 0.3) +
geom_line(data = data_plot2, aes(x = gi, y = value, linetype = Quantiles), size = 0.5, colour = "red") +
geom_line(data = data_plot3, aes(x = gi, y = value, linetype = Quantiles), size = 0.5, colour = "red") +
scale_linetype_manual(values = c("solid","dashed","dotdash","dotted")) +
facet_wrap(~ peak_percentile) +
labs(x = "Bioretention cell area [%]",
y = "Reduction percentage of peak flow [%]",
linetype = "Performance curves") +
scale_x_continuous(breaks = c(2:10)) +
theme_bw(base_size = 7) +
theme(legend.key.width = unit(0.9, "cm"))
ggsave("./figures/figure5.pdf", width = 190, height = 120, units = "mm")
save(BR_peak_volumes,data_plot,data_plot2, data_plot3, file='./figures/Figure5.Rda')
load("./figures/Figure5.Rda")
# Figure 6
library(modelr)
data_plot3 <- BR_peak_volumes %>%
mutate(peak_percentile = cut_number(org_peak,4))
levels(data_plot3$peak_percentile) <- c("[0,25)", "[25,50)","[50,75)","[75,100]")
data_plot3 <- data_plot3 %>%
group_by(peak_percentile, gi, ietd) %>%
summarise(q50 = median(peak_perc),
q90 = quantile(peak_perc, 0.9),
q10 = quantile(peak_perc, 0.1),
q.weighted = (weighted.mean(peak_perc, org_peak))) %>%
gather(Quantiles, value, q50:q.weighted)
data_plot3$ietd <- as.factor(data_plot3$ietd)
levels(data_plot3$ietd) <- c("8 h", "16 h","24 h","32 h")
data_plot4 <- data_plot3 %>%
filter(gi %% 0.5 == 0)
loess_model <- function(data) {
loess(value ~ gi, data = data)
}
data_plot3 <- data_plot3 %>%
group_by(peak_percentile, Quantiles, ietd) %>%
nest()%>%
mutate(model = map(data, loess_model),
pred = map2(data, model, add_predictions))%>%
unnest(pred) %>%
mutate(value = pred) %>%
select(-pred) %>%
group_by(peak_percentile, Quantiles, ietd)%>%
mutate(value = cummax(value)) %>%
ungroup()
data_plot3$value[data_plot3$value > 100] <- 100
data_plot3$value[data_plot3$value < 0] <- 0
ggplot(data_plot3, aes(gi, value, linetype = Quantiles, colour = ietd))+
geom_line(size = 0.5) +
guides(linetype = guide_legend(override.aes= list(color = "black"))) +
scale_linetype_manual(values = c("solid","longdash","dotted","dotdash")) +
geom_point(data = data_plot4, aes(gi, value, color = ietd, shape = ietd), stroke = 0.3) +
scale_shape_discrete(solid = F) +
facet_wrap(~peak_percentile) +
scale_y_continuous(breaks = c(0:5)*20) +
theme_bw(base_size = 7) +
coord_flip() +
theme(legend.key.width = unit(0.8, "cm")) +
labs(y = "Target peak flow reduction percentage [%]",
x = "Required bioretention cell area [%]",
colour = "IETD",
shape = "IETD",
linetype = "Look-up curves"
)
ggsave("./figures/figure6.pdf", width = 190, height = 120, units = "mm")
save(BR_peak_volumes,data_plot,data_plot2, data_plot3, data_plot4, file='./figures/Figure6.Rda')
# Analysis ----------------------------------------------------------------
library(tidyverse)
load("./figures/Figure6.Rda")
data_analysis <- data_plot3 %>%
filter(ietd == "16 h",
Quantiles %in% c("q10","q.weighted")) %>%
select(Quantiles, gi, value, peak_percentile )
write.csv(data_analysis, "./figures/fig6.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fgsea.R
\name{fgseaSimple}
\alias{fgseaSimple}
\title{Runs preranked gene set enrichment analysis.}
\usage{
fgseaSimple(
pathways,
stats,
nperm,
minSize = 1,
maxSize = length(stats) - 1,
scoreType = c("std", "pos", "neg"),
nproc = 0,
gseaParam = 1,
BPPARAM = NULL
)
}
\arguments{
\item{pathways}{List of gene sets to check.}
\item{stats}{Named vector of gene-level stats. Names should be the same as in 'pathways'}
\item{nperm}{Number of permutations to do. Minimial possible nominal p-value is about 1/nperm}
\item{minSize}{Minimal size of a gene set to test. All pathways below the threshold are excluded.}
\item{maxSize}{Maximal size of a gene set to test. All pathways above the threshold are excluded.}
\item{scoreType}{This parameter defines the GSEA score type.
Possible options are ("std", "pos", "neg").
By default ("std") the enrichment score is computed as in the original GSEA.
The "pos" and "neg" score types are intended to be used for one-tailed tests
(i.e. when one is interested only in positive ("pos") or negateive ("neg") enrichment).}
\item{nproc}{If not equal to zero sets BPPARAM to use nproc workers (default = 0).}
\item{gseaParam}{GSEA parameter value, all gene-level statis are raised to the power of `gseaParam`
before calculation of GSEA enrichment scores.}
\item{BPPARAM}{Parallelization parameter used in bplapply.
Can be used to specify cluster to run. If not initialized explicitly or
by setting `nproc` default value `bpparam()` is used.}
}
\value{
A table with GSEA results. Each row corresponds to a tested pathway.
The columns are the following:
\itemize{
\item pathway -- name of the pathway as in `names(pathway)`;
\item pval -- an enrichment p-value;
\item padj -- a BH-adjusted p-value;
\item ES -- enrichment score, same as in Broad GSEA implementation;
\item NES -- enrichment score normalized to mean enrichment of random samples of the same size;
\item nMoreExtreme` -- a number of times a random gene set had a more
extreme enrichment score value;
\item size -- size of the pathway after removing genes not present in `names(stats)`.
\item leadingEdge -- vector with indexes of leading edge genes that drive the enrichment, see \url{http://software.broadinstitute.org/gsea/doc/GSEAUserGuideTEXT.htm#_Running_a_Leading}.
}
}
\description{
The function takes about \emph{O(nk^\{3/2\})} time,
where \emph{n} is number of permutations and \emph{k} is a maximal
size of the pathways. That means that setting `maxSize` parameter with a value of ~500
is strongly recommended.
}
\examples{
data(examplePathways)
data(exampleRanks)
fgseaRes <- fgseaSimple(examplePathways, exampleRanks, nperm=10000, maxSize=500)
# Testing only one pathway is implemented in a more efficient manner
fgseaRes1 <- fgseaSimple(examplePathways[1], exampleRanks, nperm=10000)
}
| /man/fgseaSimple.Rd | permissive | ctlab/fgsea | R | false | true | 2,910 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fgsea.R
\name{fgseaSimple}
\alias{fgseaSimple}
\title{Runs preranked gene set enrichment analysis.}
\usage{
fgseaSimple(
pathways,
stats,
nperm,
minSize = 1,
maxSize = length(stats) - 1,
scoreType = c("std", "pos", "neg"),
nproc = 0,
gseaParam = 1,
BPPARAM = NULL
)
}
\arguments{
\item{pathways}{List of gene sets to check.}
\item{stats}{Named vector of gene-level stats. Names should be the same as in 'pathways'}
\item{nperm}{Number of permutations to do. Minimial possible nominal p-value is about 1/nperm}
\item{minSize}{Minimal size of a gene set to test. All pathways below the threshold are excluded.}
\item{maxSize}{Maximal size of a gene set to test. All pathways above the threshold are excluded.}
\item{scoreType}{This parameter defines the GSEA score type.
Possible options are ("std", "pos", "neg").
By default ("std") the enrichment score is computed as in the original GSEA.
The "pos" and "neg" score types are intended to be used for one-tailed tests
(i.e. when one is interested only in positive ("pos") or negateive ("neg") enrichment).}
\item{nproc}{If not equal to zero sets BPPARAM to use nproc workers (default = 0).}
\item{gseaParam}{GSEA parameter value, all gene-level statis are raised to the power of `gseaParam`
before calculation of GSEA enrichment scores.}
\item{BPPARAM}{Parallelization parameter used in bplapply.
Can be used to specify cluster to run. If not initialized explicitly or
by setting `nproc` default value `bpparam()` is used.}
}
\value{
A table with GSEA results. Each row corresponds to a tested pathway.
The columns are the following:
\itemize{
\item pathway -- name of the pathway as in `names(pathway)`;
\item pval -- an enrichment p-value;
\item padj -- a BH-adjusted p-value;
\item ES -- enrichment score, same as in Broad GSEA implementation;
\item NES -- enrichment score normalized to mean enrichment of random samples of the same size;
\item nMoreExtreme` -- a number of times a random gene set had a more
extreme enrichment score value;
\item size -- size of the pathway after removing genes not present in `names(stats)`.
\item leadingEdge -- vector with indexes of leading edge genes that drive the enrichment, see \url{http://software.broadinstitute.org/gsea/doc/GSEAUserGuideTEXT.htm#_Running_a_Leading}.
}
}
\description{
The function takes about \emph{O(nk^\{3/2\})} time,
where \emph{n} is number of permutations and \emph{k} is a maximal
size of the pathways. That means that setting `maxSize` parameter with a value of ~500
is strongly recommended.
}
\examples{
data(examplePathways)
data(exampleRanks)
fgseaRes <- fgseaSimple(examplePathways, exampleRanks, nperm=10000, maxSize=500)
# Testing only one pathway is implemented in a more efficient manner
fgseaRes1 <- fgseaSimple(examplePathways[1], exampleRanks, nperm=10000)
}
|
# 二項分布 dbinom(m, N, mu)
dbinom(50, 100, 1/2)
sum(dbinom(51:100, 100, 1/2))
# 多項分布 dmultinom(c(m1, m2,..., mk), prob = c(mu1, mu2,..., muk))
dmultinom(c(5, 5, 5, 5), prob = c(0.28, 0.24, 0.24, 0.24))
p = 0
for (i in 0:4) {
for (j in 0:20) {
for (k in 0:20) {
if (i + j + k <= 20) {
p <- p + dmultinom(c(i, j, k, 20-i-j-k)
, prob = c(0.28, 0.24, 0.24, 0.24))
}
else {break}
}
}
}
print(p)
| /Probability_distribution.R | no_license | zawato/Pattern_Recognition | R | false | false | 469 | r | # 二項分布 dbinom(m, N, mu)
dbinom(50, 100, 1/2)
sum(dbinom(51:100, 100, 1/2))
# 多項分布 dmultinom(c(m1, m2,..., mk), prob = c(mu1, mu2,..., muk))
dmultinom(c(5, 5, 5, 5), prob = c(0.28, 0.24, 0.24, 0.24))
p = 0
for (i in 0:4) {
for (j in 0:20) {
for (k in 0:20) {
if (i + j + k <= 20) {
p <- p + dmultinom(c(i, j, k, 20-i-j-k)
, prob = c(0.28, 0.24, 0.24, 0.24))
}
else {break}
}
}
}
print(p)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/legend.R
\name{legend}
\alias{legend}
\title{Modify plot elements that relate to the second y-axis.}
\usage{
legend(x, ...)
}
\description{
This is an S3 method.
}
\seealso{
Other legend: \code{\link{legend.c3}}
}
| /man/legend.Rd | no_license | drninjamommy/c3 | R | false | true | 293 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/legend.R
\name{legend}
\alias{legend}
\title{Modify plot elements that relate to the second y-axis.}
\usage{
legend(x, ...)
}
\description{
This is an S3 method.
}
\seealso{
Other legend: \code{\link{legend.c3}}
}
|
library(pscl)
### Name: predict.hurdle
### Title: Methods for hurdle Objects
### Aliases: predict.hurdle residuals.hurdle terms.hurdle
### model.matrix.hurdle coef.hurdle vcov.hurdle summary.hurdle
### print.summary.hurdle logLik.hurdle fitted.hurdle predprob.hurdle
### extractAIC.hurdle
### Keywords: regression
### ** Examples
data("bioChemists", package = "pscl")
fm <- hurdle(art ~ ., data = bioChemists)
plot(residuals(fm) ~ fitted(fm))
coef(fm)
coef(fm, model = "zero")
summary(fm)
logLik(fm)
| /data/genthat_extracted_code/pscl/examples/predict.hurdle.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 516 | r | library(pscl)
### Name: predict.hurdle
### Title: Methods for hurdle Objects
### Aliases: predict.hurdle residuals.hurdle terms.hurdle
### model.matrix.hurdle coef.hurdle vcov.hurdle summary.hurdle
### print.summary.hurdle logLik.hurdle fitted.hurdle predprob.hurdle
### extractAIC.hurdle
### Keywords: regression
### ** Examples
data("bioChemists", package = "pscl")
fm <- hurdle(art ~ ., data = bioChemists)
plot(residuals(fm) ~ fitted(fm))
coef(fm)
coef(fm, model = "zero")
summary(fm)
logLik(fm)
|
context("webservice tests")
source("utils.R")
test_that("create, get, generate keys of, and delete webservice", {
skip('skip')
ws <- existing_ws
tmp_dir_name <- "tmp_dir"
model_name <- "dummy_model.data"
dir.create(tmp_dir_name)
file.create(file.path(tmp_dir_name, model_name))
# register the model
model <- register_model(ws, tmp_dir_name, model_name)
# Create a new environment
env <- r_environment(name = "newenv")
env$register(ws)
# Create the inference config to use for Webservice
config <- inference_config(entry_script = "dummy_score.R", environment = env)
# Create ACI deployment config
tags <- reticulate::py_dict('name', 'temp')
aciconfig <-
azureml$core$webservice$AciWebservice$deploy_configuration(cpu_cores = 1,
memory_gb = 1,
tags = tags,
auth_enabled = T)
# Deploy the model
service_name <- paste("svc", build_num, sep="")
service <- deploy_model(ws,
service_name,
models = c(model),
inference_config = config,
deployment_config = aciconfig)
wait_for_deployment(service, show_output = TRUE)
# Get webservice
service <- get_webservice(ws, name = service_name)
# Check the logs
logs <- get_webservice_logs(service)
expect_equal(length(logs), 1)
# Get the service keys
keys <- get_webservice_keys(service)
expect_equal(length(keys), 2)
# Try changing secondary key
generate_new_webservice_key(service, key_type = 'Secondary')
new_keys <- get_webservice_keys(service)
expect_equal(length(new_keys), 2)
# check if the new secondary key is different from the previous one
expect_false(keys[[2]] == new_keys[[2]])
# delete the webservice
delete_webservice(service)
}) | /tests/testthat/test_webservice.R | permissive | sdgilley/azureml-sdk-for-r | R | false | false | 1,982 | r | context("webservice tests")
source("utils.R")
test_that("create, get, generate keys of, and delete webservice", {
skip('skip')
ws <- existing_ws
tmp_dir_name <- "tmp_dir"
model_name <- "dummy_model.data"
dir.create(tmp_dir_name)
file.create(file.path(tmp_dir_name, model_name))
# register the model
model <- register_model(ws, tmp_dir_name, model_name)
# Create a new environment
env <- r_environment(name = "newenv")
env$register(ws)
# Create the inference config to use for Webservice
config <- inference_config(entry_script = "dummy_score.R", environment = env)
# Create ACI deployment config
tags <- reticulate::py_dict('name', 'temp')
aciconfig <-
azureml$core$webservice$AciWebservice$deploy_configuration(cpu_cores = 1,
memory_gb = 1,
tags = tags,
auth_enabled = T)
# Deploy the model
service_name <- paste("svc", build_num, sep="")
service <- deploy_model(ws,
service_name,
models = c(model),
inference_config = config,
deployment_config = aciconfig)
wait_for_deployment(service, show_output = TRUE)
# Get webservice
service <- get_webservice(ws, name = service_name)
# Check the logs
logs <- get_webservice_logs(service)
expect_equal(length(logs), 1)
# Get the service keys
keys <- get_webservice_keys(service)
expect_equal(length(keys), 2)
# Try changing secondary key
generate_new_webservice_key(service, key_type = 'Secondary')
new_keys <- get_webservice_keys(service)
expect_equal(length(new_keys), 2)
# check if the new secondary key is different from the previous one
expect_false(keys[[2]] == new_keys[[2]])
# delete the webservice
delete_webservice(service)
}) |
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/testHelpers.R
\name{run_servr}
\alias{run_servr}
\title{Spawn a child R session that runs a 'blocking' command}
\usage{
run_servr(directory = ".", port = 4848,
code = "servr::httd(dir='\%s', port=\%d)")
}
\arguments{
\item{directory}{path that the server should map to.}
\item{port}{port number to _attempt_ to run server on.}
\item{code}{R code to execute in a child session}
}
\value{
port number of the successful attempt
}
\description{
Run a blocking command in a child R session (for example a
file server or shiny app)
}
| /man/run_servr.Rd | no_license | tokareff/animint | R | false | false | 623 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/testHelpers.R
\name{run_servr}
\alias{run_servr}
\title{Spawn a child R session that runs a 'blocking' command}
\usage{
run_servr(directory = ".", port = 4848,
code = "servr::httd(dir='\%s', port=\%d)")
}
\arguments{
\item{directory}{path that the server should map to.}
\item{port}{port number to _attempt_ to run server on.}
\item{code}{R code to execute in a child session}
}
\value{
port number of the successful attempt
}
\description{
Run a blocking command in a child R session (for example a
file server or shiny app)
}
|
suppressPackageStartupMessages(library(MutationalPatterns))
suppressPackageStartupMessages(library(gridExtra))
# STANDARD PIPELINE to get the signatures of a specific tumour type
# Prepare Data for specific Tumour Type -----------------------------------
#(1) Get the mutational profile (SSM96) for the tumour type of interest
mutSSM96 <- select_tumour(Path_mut96_mat = "/project/devel/PCAWG/mmaqueda/Data/SSM96Matrix/mut96_matALL.txt",
resultsPath = "/project/devel/PCAWG/mmaqueda/Results/Lung/AdenoCA/Data/",
whichtumour = c("Lung-AdenoCA"))
# Plotting Tumour Type Data -----------------------------------------------
#(2) Plot a heatmap with the SSM96 mutational profile - all samples
pdf("Heatmap_SSM96.pdf")
plot_heatmap_SSM96(mutSSM96)
dev.off()
#(3) Plot the SSM6 spectrum (counts and freq) for all samples (together and separate)
# A prior step is to get the SSM6 matrix
mutSSM6 <- SSM96toSSM6matrix(Path_mut96_mat =
"/project/devel/PCAWG/mmaqueda/Results/Lung/AdenoCA/Data/mut96_mat_Lung-AdenoCA.txt")
pdf("Spectrum_SSM6_CountsSamples.pdf")
plot_spectrum_SSM6(mutSSM6 = mutSSM6, plotby="SAMPLE",
legend = TRUE, relative=F)
dev.off()
pdf("Spectrum_SSM6_FreqSamples.pdf")
plot_spectrum_SSM6(mutSSM6 = mutSSM6, plotby="SAMPLE",
legend = TRUE, relative=T)
dev.off()
# This plot function is from the MutPatt package
pdf("Spectrum_SSM6_FreqALL.pdf")
plot_spectrum(as.data.frame(t(mutSSM6)))
dev.off()
#(4) Plot the SSM96 profile for 10 samples (randomly chosen)
# This plot function is from the MutPatt package
pdf("SSM96_RandomSamples.pdf")
plot_96_profile(mutSSM96[,sample(1:dim(mutSSM96)[2],size=10)], condensed = TRUE)
dev.off()
#(5) Plot the first 4PC of PCA over all samples (SSM96 info)
# A prior step is to normalized (freq not counts) the SSM96 matrix
mutSSM96_norm <- normalize_ssm96mat(Path_mut96_mat =
"/project/devel/PCAWG/mmaqueda/Results/Lung/AdenoCA/Data/mut96_mat_S.txt",
resultsPath = "/project/devel/PCAWG/mmaqueda/Results/Lung/AdenoCA/Data/")
pdf("PCA_SSM96_Norm.pdf")
plot_PCA_SSM96(mutSSM96_norm)
dev.off()
#(6) Compute the hierarchical clustering (complete method) over all samples
# and plot the dendrogram
pdf("Dendrogram.pdf")
plot(cluster_SSM96profile(mutSSM96))
dev.off()
# Estimate NMF for different k signatures ---------------------------------
#(7) Estimate the max number of signatures that can be retrieve with
# the number of genomes to be analyzed
kmax <- EstMaxNumOfSignatures(n.genomes = dim(mutSSM96)[2])
#(8) Estimate NMF for different k values considering kmax from previous point
# We will consider 100 iterations
estimate <- nmf(mutSSM96, rank=2:kmax, method="brunet", nrun=100, seed=123456,.options="kvp2")
# Selection of optimal k value from NMF -----------------------------------
#(9) First, let's plot the different default measures from nmf function
pdf("Estimatek.pdf")
plot(estimate)
dev.off()
#(10a) Selection of k values based on CCC
selCCC <- kSelectionCCC(nmf_measures = estimate$measures,cutoffCCCrel=0.85)
#(10b) Selection of k values based on RSS
selRSS <- kSelectionRSS(nmf_measures = estimate$measures,cutoffRSSrel=0.80)
#(10c) Selection of k values based on Frobenius norm stability
selFrob <- kSelectionStableFrob(mut96_mat = mutSSM96,
nmf_estimation = estimate)
# selFrob[[1]] #selected
pdf("Stability_Frobnorm.pdf")
boxplot(selFrob[[2]],use.cols=TRUE,main="Lung AdenoCA - Frobenius Norm") #plot Frob norm values
dev.off()
#(10d) Selection of k values based on similarity Original and Reconstructed
#First, let's do the wrap-up
NMFks_results <- wrapup_results_NMFks(selFrob,estimate)
#Cos Sim for all k's and samples
CSOriReco_ks <- kCosSimOriReco(mutSSM96, NMFks_results)
#Summary of Cos Sim values
selSimOriRec <- summaryCosSim(CSOriReco_ks,estimate)
pdf("summCS_OriRec.pdf")
plot_summaryCosSim_OrRec(selSimOriRec)
dev.off()
#(11) Selection of optimum k value - Consensus among previous metrics
optimumK <- kSelection_Consensus(selCCC,selRSS,selFrob,selSimOriRec,estimate$measures)
# optimumK[[1]] #selected
pdf("Votes4K.pdf")
barplot(optimumK[[2]],main="Lung AdenoCA - K votes")
dev.off()
# Cos Sim Values per k candidates -----------------------------------------
#After visualizing previous results, we'll plot this for k=2,3,4
pdf("CS_OriRec_k4.pdf")
plot_CS_OriRec_forK(CSOriReco_ks,kvalue=4,estimate$measures)
dev.off()
# Retrieve results for optimum k ------------------------------------------
#Based on optimumK[[1]] (selected k value), in this case k=2
#(12) Plot the samples dendrogram based on the consensus matrix from NMF estimation
# for the k selected, in this case k=2
pdf("consensushc_k2.pdf")
plot(consensushc(object = estimate$fit$`2`, method="complete",what="consensus",dendrogram=T))
dev.off()
#Remark: No se visualiza bien el plot anterior. Revisar!!!
#(13) Plot the contribution of each signature per sample in rel and abs mode for the
# k selected
results_k_selected <- NMFks_results[[which(estimateLung$measures$rank == optimumK[[1]])]]
pc1 <- plot_contribution(results_k_selected$contribution,
results_k_selected$signatures,mode = "relative")
pc2 <- plot_contribution(results_k_selected$contribution,
results_k_selected$signatures,mode = "absolute")
pdf("Contribution_k2.pdf")
grid.arrange(pc1, pc2)
dev.off()
#(14) Plot the SSM96 profile of the signatures
pdf("Signatures_k2.pdf")
plot_96_profile(results_k_selected$signatures, condensed = TRUE)
dev.off()
#(15) Plot the signatures contribution per sample in a heatmap
pdf("Contribution_Heat_k2.pdf")
plot_contribution_heatmap(results_k_selected$contribution)
dev.off()
| /deNovo_Signatures/custom_workflow/Gral_Pipeline_NMF.R | no_license | MMaqueda/MutSig_WGSvsWES | R | false | false | 5,868 | r |
suppressPackageStartupMessages(library(MutationalPatterns))
suppressPackageStartupMessages(library(gridExtra))
# STANDARD PIPELINE to get the signatures of a specific tumour type
# Prepare Data for specific Tumour Type -----------------------------------
#(1) Get the mutational profile (SSM96) for the tumour type of interest
mutSSM96 <- select_tumour(Path_mut96_mat = "/project/devel/PCAWG/mmaqueda/Data/SSM96Matrix/mut96_matALL.txt",
resultsPath = "/project/devel/PCAWG/mmaqueda/Results/Lung/AdenoCA/Data/",
whichtumour = c("Lung-AdenoCA"))
# Plotting Tumour Type Data -----------------------------------------------
#(2) Plot a heatmap with the SSM96 mutational profile - all samples
pdf("Heatmap_SSM96.pdf")
plot_heatmap_SSM96(mutSSM96)
dev.off()
#(3) Plot the SSM6 spectrum (counts and freq) for all samples (together and separate)
# A prior step is to get the SSM6 matrix
mutSSM6 <- SSM96toSSM6matrix(Path_mut96_mat =
"/project/devel/PCAWG/mmaqueda/Results/Lung/AdenoCA/Data/mut96_mat_Lung-AdenoCA.txt")
pdf("Spectrum_SSM6_CountsSamples.pdf")
plot_spectrum_SSM6(mutSSM6 = mutSSM6, plotby="SAMPLE",
legend = TRUE, relative=F)
dev.off()
pdf("Spectrum_SSM6_FreqSamples.pdf")
plot_spectrum_SSM6(mutSSM6 = mutSSM6, plotby="SAMPLE",
legend = TRUE, relative=T)
dev.off()
# This plot function is from the MutPatt package
pdf("Spectrum_SSM6_FreqALL.pdf")
plot_spectrum(as.data.frame(t(mutSSM6)))
dev.off()
#(4) Plot the SSM96 profile for 10 samples (randomly chosen)
# This plot function is from the MutPatt package
pdf("SSM96_RandomSamples.pdf")
plot_96_profile(mutSSM96[,sample(1:dim(mutSSM96)[2],size=10)], condensed = TRUE)
dev.off()
#(5) Plot the first 4PC of PCA over all samples (SSM96 info)
# A prior step is to normalized (freq not counts) the SSM96 matrix
mutSSM96_norm <- normalize_ssm96mat(Path_mut96_mat =
"/project/devel/PCAWG/mmaqueda/Results/Lung/AdenoCA/Data/mut96_mat_S.txt",
resultsPath = "/project/devel/PCAWG/mmaqueda/Results/Lung/AdenoCA/Data/")
pdf("PCA_SSM96_Norm.pdf")
plot_PCA_SSM96(mutSSM96_norm)
dev.off()
#(6) Compute the hierarchical clustering (complete method) over all samples
# and plot the dendrogram
pdf("Dendrogram.pdf")
plot(cluster_SSM96profile(mutSSM96))
dev.off()
# Estimate NMF for different k signatures ---------------------------------
#(7) Estimate the max number of signatures that can be retrieve with
# the number of genomes to be analyzed
kmax <- EstMaxNumOfSignatures(n.genomes = dim(mutSSM96)[2])
#(8) Estimate NMF for different k values considering kmax from previous point
# We will consider 100 iterations
estimate <- nmf(mutSSM96, rank=2:kmax, method="brunet", nrun=100, seed=123456,.options="kvp2")
# Selection of optimal k value from NMF -----------------------------------
#(9) First, let's plot the different default measures from nmf function
pdf("Estimatek.pdf")
plot(estimate)
dev.off()
#(10a) Selection of k values based on CCC
selCCC <- kSelectionCCC(nmf_measures = estimate$measures,cutoffCCCrel=0.85)
#(10b) Selection of k values based on RSS
selRSS <- kSelectionRSS(nmf_measures = estimate$measures,cutoffRSSrel=0.80)
#(10c) Selection of k values based on Frobenius norm stability
selFrob <- kSelectionStableFrob(mut96_mat = mutSSM96,
nmf_estimation = estimate)
# selFrob[[1]] #selected
pdf("Stability_Frobnorm.pdf")
boxplot(selFrob[[2]],use.cols=TRUE,main="Lung AdenoCA - Frobenius Norm") #plot Frob norm values
dev.off()
#(10d) Selection of k values based on similarity Original and Reconstructed
#First, let's do the wrap-up
NMFks_results <- wrapup_results_NMFks(selFrob,estimate)
#Cos Sim for all k's and samples
CSOriReco_ks <- kCosSimOriReco(mutSSM96, NMFks_results)
#Summary of Cos Sim values
selSimOriRec <- summaryCosSim(CSOriReco_ks,estimate)
pdf("summCS_OriRec.pdf")
plot_summaryCosSim_OrRec(selSimOriRec)
dev.off()
#(11) Selection of optimum k value - Consensus among previous metrics
optimumK <- kSelection_Consensus(selCCC,selRSS,selFrob,selSimOriRec,estimate$measures)
# optimumK[[1]] #selected
pdf("Votes4K.pdf")
barplot(optimumK[[2]],main="Lung AdenoCA - K votes")
dev.off()
# Cos Sim Values per k candidates -----------------------------------------
#After visualizing previous results, we'll plot this for k=2,3,4
pdf("CS_OriRec_k4.pdf")
plot_CS_OriRec_forK(CSOriReco_ks,kvalue=4,estimate$measures)
dev.off()
# Retrieve results for optimum k ------------------------------------------
#Based on optimumK[[1]] (selected k value), in this case k=2
#(12) Plot the samples dendrogram based on the consensus matrix from NMF estimation
# for the k selected, in this case k=2
pdf("consensushc_k2.pdf")
plot(consensushc(object = estimate$fit$`2`, method="complete",what="consensus",dendrogram=T))
dev.off()
#Remark: No se visualiza bien el plot anterior. Revisar!!!
#(13) Plot the contribution of each signature per sample in rel and abs mode for the
# k selected
results_k_selected <- NMFks_results[[which(estimateLung$measures$rank == optimumK[[1]])]]
pc1 <- plot_contribution(results_k_selected$contribution,
results_k_selected$signatures,mode = "relative")
pc2 <- plot_contribution(results_k_selected$contribution,
results_k_selected$signatures,mode = "absolute")
pdf("Contribution_k2.pdf")
grid.arrange(pc1, pc2)
dev.off()
#(14) Plot the SSM96 profile of the signatures
pdf("Signatures_k2.pdf")
plot_96_profile(results_k_selected$signatures, condensed = TRUE)
dev.off()
#(15) Plot the signatures contribution per sample in a heatmap
pdf("Contribution_Heat_k2.pdf")
plot_contribution_heatmap(results_k_selected$contribution)
dev.off()
|
---
title: "Projet Dataminig"
author: "Asma-Abir-Younes-Wilfried"
date: "02/06/2020"
output: rmdformats::readthedown
rmarkdown::html_document:
theme: journal
highlight: github
---
```{r include=FALSE}
require(readr)
require(dplyr)
require(questionr)
require(base)
require(vcd)
require(rpart)
require(TH.data)
require(tidyr)
require(tidyverse)
require(MASS)
require(partykit)
require(rattle)
require(rpart.plot)
require(RColorBrewer)
require(lubridate)
require(broom)
require(GGally)
require(forestmodel)
require(stringr)
require(readxl)
require(ggplot2)
require(kableExtra)
require(formattable)
require(rmdformats)
```
```{r include=FALSE}
Data2 <- Base <- read_delim("C:/Users/Wilfried/Downloads/Base.csv",
";", escape_double = FALSE, trim_ws = TRUE)
```
```{r include=FALSE}
Data2$DATEDEBUTADHESION<-as.Date(Data2$DATEDEBUTADHESION,"%d/%m/%Y %H:%M")
Data2$DATEFINADHESION <- as.Date(Data2$DATEFINADHESION,"%d/%m/%Y %H:%M")
Data2$DATENAISSANCE <- as.Date(Data2$DATENAISSANCE,"%d/%m/%Y %H:%M")
Data2$DATEREADHESION <- as.Date(Data2$DATEREADHESION,"%d/%m/%Y %H:%M")
```
```{r include=FALSE}
Data_me <-Data2 %>% group_by(IDCLIENT) %>% mutate( Rentabilit =case_when(CA_2016_S1+CA_2016_S2>= 400 ~1,
CA_2016_S1+CA_2016_S2 < 400 ~ 0),REGION=str_sub(CODEINSEE))
```
```{r include=FALSE}
Data_me$VIP <- as.factor(Data_me$VIP)
Data_me$Rentabilit<- as.factor(Data_me$Rentabilit)
```
# Partie Exploration des donnée:
Nous avons choisi la problématique suivant : quel est le profil potentiellement le plus rentable et qui peut générer plus de 400 euro.
Ce sujet pourrait aider le departement marketing à mieux cibler sa clientèle et gérer le budget de ses campagnes marketing en réalisant une segmentation par catégorie la plus rentable, choisir le bon moment pendant lequel un profil potentiel génera un max de chiffre d'affaire oubien définir pour chaque année la saisonnalité de promotion,recrutement.
Notre base de donnée contient 157 variables. Nous allons choisir notre variable cible « Rentabilit= CA_2016_S1+CA_2016_S2>= 400 ~ 1 ».
Afin de construire le modèle, bien évidement nous n’allons pas intégrer les 157 variables.
Nous commençons alors à distinguer les variables les plus significatives et influençant notre variable cible.
Nous avons : 25 variables qualitatives, 114 variables continues et 18 variables discrètes.
Afin de trouver les variables signficatives, nous allons analyser et interpreter les données par trimestre et chercher des corrélations
```{r echo=FALSE, message=FALSE, warning=FALSE, fig.width=15 , fig.height=10}
ggpairs(Data_me[, c("prix_ticket_moy_2016","CA_2016_T1","CA_2016_T2","CA_2016_T3","CA_2016_T4","CA_2016","CA_2017","Rentabilit")], aes(colour= Rentabilit))
```
On remarque que le comportement de la visualisation du **Prix moyen tickect** VS **Chiffre d'affaire en trimestre 2** ressemble à celui du **Prix moyen tickect** VS **Chiffre d'affaire 2016 global**.
Le comportement du client en trimestre 2 a t- il une certaine influence ?
*1ère reflexion*: garder le prix moyen des tickets
On va creuser dans les tickets et analyser la relation entre nombre de tickets et la rentabilité par trimestre :
```{r}
qinf<- quantile(Data_me$nbtic_2016_T1,0.01)
qsup<- quantile(Data_me$nbtic_2016_T1,0.99)
Data_tic <- Data_me %>% filter(nbtic_2016_T1 >= qinf & nbtic_2016_T1<=qsup)
```
```{r echo=FALSE}
ggplot(Data_tic,aes(Rentabilit,nbtic_2016_T1,colour=Rentabilit))+
geom_jitter(width=0.25)+
geom_boxplot(alpha=0.5, outlier.shape=NA)+
xlab(label = "Rentabilité") +
ylab(label = " Nombre ticket Trimestre 1") +
theme(axis.text.x = element_text(angle=30, hjust=1, vjust=1))+
theme(legend.position="none")+
scale_fill_brewer(palette="Blues")+
theme_classic()+
ggtitle("Boxplot avec les observations")
```
```{r echo=FALSE}
qinf<- quantile(Data_me$nbtic_2016_T2,0.01)
qsup<- quantile(Data_me$nbtic_2016_T2,0.99)
Data_tic2 <- Data_me %>% filter(nbtic_2016_T2 >= qinf & nbtic_2016_T2<=qsup)
ggplot(Data_tic2,aes(Rentabilit,nbtic_2016_T2,colour=Rentabilit))+
geom_jitter(width=0.25)+
geom_boxplot(alpha=0.5, outlier.shape=NA)+
xlab(label = "Rentabilité") +
ylab(label = " Nombre ticket Trimestre 2") +
theme(axis.text.x = element_text(angle=30, hjust=1, vjust=1))+
theme(legend.position="none")+
scale_fill_brewer(palette="Blues")+
theme_classic()+
ggtitle("Boxplot avec les observations")
```
```{r echo=FALSE}
qinf<- quantile(Data_me$nbtic_2016_T3,0.01)
qsup<- quantile(Data_me$nbtic_2016_T3,0.99)
Data_tic3 <- Data_me %>% filter(nbtic_2016_T3 >= qinf & nbtic_2016_T3<=qsup)
ggplot(Data_tic3,aes(Rentabilit,nbtic_2016_T3,colour=Rentabilit))+
geom_jitter(width=0.25)+
geom_boxplot(alpha=0.5, outlier.shape=NA)+
xlab(label = "Rentabilité") +
ylab(label = " Nombre ticket Trimestre 3") +
theme(axis.text.x = element_text(angle=30, hjust=1, vjust=1))+
theme(legend.position="none")+
scale_fill_brewer(palette="Blues")+
theme_classic()+
ggtitle("Boxplot avec les observations")
```
```{r echo=FALSE}
qinf<- quantile(Data_me$nbtic_2016_T4,0.01)
qsup<- quantile(Data_me$nbtic_2016_T4,0.99)
Data_tic4 <- Data_me %>% filter(nbtic_2016_T4 >= qinf & nbtic_2016_T4<=qsup)
ggplot(Data_tic4,aes(Rentabilit,nbtic_2016_T4,colour=Rentabilit))+
geom_jitter(width=0.25)+
geom_boxplot(alpha=0.5, outlier.shape=NA)+
xlab(label = "Rentabilité") +
ylab(label = " Nombre ticket Trimestre 4") +
theme(axis.text.x = element_text(angle=30, hjust=1, vjust=1))+
theme(legend.position="none")+
scale_fill_brewer(palette="Blues")+
theme_classic()+
ggtitle("Boxplot avec les observations")
```
Premier constat après l'analyse de ces boxplot à travers ces 4 trimestres (T1,T2,T3,T4) est une importante difference à la fois au niveau de la mediane du nombre de tickect mais également de la dispersion des deux groupe.
En effet on peut constater que la répartition du nombre de ticket chez les individus dit *rentable* varie **entre 1 et plus de 4 tickects** à l'inverse les individus dit *non rentable* dépasse assez rarement les **1,5 tickets** par trimestre.
On pourrait donc fairre l'hypothèse ici, que la variable *nombre tickect* à une importance dans la rentabilité ou non d'un individus.
On va examiner ensuite la relation des variables Civilité, Date debut d’adhesion et VIP
```{r echo=FALSE, message=FALSE, warning=FALSE, fig.width=15 , fig.height=10}
ggpairs(Data_me[, c("CIVILITE2","DATEDEBADH_mois", "CA_2016_T1","CA_2016_T2","CA_2016_T3","CA_2016_T4","Rentabilit")], aes(colour= Rentabilit ))
```
Les femmes génèrent un CA > 400 plus que les hommes dans tous les trimestres >> on va garder la variable CIVILITE2
Les adhérants durant la fin de trimestre 1 et 2 semnle concorder avec notre problématique au vue du rythme d'achat durant l'année.
A l'inverse, les profiles adherant entre les trimestres 3 et 4 réalisent une rentabilité vers la fin de l'année. On pourrait supposer que ces clients sont essentiellement des saisonniers.
Dans le cadre de notre étude on va conserver les variables liés à l'adhésion des clients.
## Relation de la variable: VIP
```{r echo=FALSE, message=FALSE, warning=FALSE, fig.width=15 , fig.height=10}
ggpairs(Data_me[, c("VIP", "CA_2016_T1","CA_2016_T2","CA_2016_T3","CA_2016_T4","Rentabilit")], aes(colour= Rentabilit ))
```
Pour cette variable *VIP*, il faut la croiser avec d'autres informations de l'entreprise pour savoir sur quel critère, ce statut à été attribué :
- Volume d'achat
- Frequence d'achat
- Valeur d'achat
- Date d'adhesion.
Ici, selon le graphe , il y a par exemple des "NON VIP" qui sont rentable selon notre condition et cette population est presque égale en terme de rentabilité à ceux considéré VIP. Par contre les VIP génèrent un max de chiffre d'affaire pendant la trimestre 4 de 2016.
```{r echo=FALSE}
qinf<- quantile(Data_me$CA_2016,0.01)
qsup<- quantile(Data_me$CA_2016,0.99)
Data_CA <- Data_me %>% filter(CA_2016 >= qinf & CA_2016<=qsup)
ggplot(Data_CA,aes(VIP,CA_2016,colour=VIP))+
geom_jitter(width=0.25)+
geom_boxplot(alpha=0.5, outlier.shape=NA)+
xlab(label = "Rentabilité") +
ylab(label = " Nombre ticket Trimestre 1") +
theme(axis.text.x = element_text(angle=30, hjust=1, vjust=1))+
theme(legend.position="none")+
scale_fill_brewer(palette="Blues")+
theme_classic()+
ggtitle("Boxplot avec les observations")
```
## Relation avec l'age :
```{r echo=FALSE, message=FALSE, warning=FALSE, fig.width=15 , fig.height=10}
ggpairs(Data_me[, c("age_QL","Rentabilit")], aes(colour= Rentabilit ))
```
La valeur du test de correlation nous confirme bien que les variables age et rentabilité sont corrélés.
De plus selon on coonstate à l'aide du graphe que la tranche d'age [50-70] ans génére plus de 400 euro de chiffre d'affaire en général.
## Relation avec localité (MAGASIN,PAYS et Région)
```{r include=FALSE}
Repart_magain <- Data_me %>% group_by(MAGASIN) %>% summarise(nb_client = n_distinct(IDCLIENT),nb_VIP = (table(VIP==1,exclude = FALSE)),nb_rentable= (table(Rentabilit==1,exclude = FALSE)),nb_rentable_vip =(table(VIP==1,Rentabilit==1,exclude = FALSE)))
Repart_magain$nb_rentable_vip <- as.numeric(Repart_magain$nb_rentable_vip)
Repart_magain$nb_VIP <- as.numeric(Repart_magain$nb_VIP)
Repart_magain <-Repart_magain %>% group_by(MAGASIN) %>% mutate(percent = round((nb_rentable_vip/nb_VIP)*100)) %>% arrange(desc(percent))
```
```{r echo=FALSE, fig.width=15 , fig.height=10}
Repart_magain[,1:6]%>%kable(escape = F,align = "r") %>%
row_spec(2:4, bold = T, color = "white", background = "#D7261E")%>% kable_styling(bootstrap_options = c("striped", "hover","condensed","responsive"),full_width = F,position = "c",font_size = 11 )%>% row_spec(0, bold = T, color = "white", background = "grey") %>% kable_styling(c("striped", "bordered")) %>% scroll_box(width = "800px", height = "300px")
```
Comme on peut le voir les MAGASIN *CLI* , *VIV* et *GEX* ont une proportion(*percent*) de VIP rentable assez élevé au alentour de 84-88%.
C'est un constat pour le moment mais on pourrai supposer que la proportion de VIP rentable joue un rôle plus ou moins important dans notre modèle.
```{r include=FALSE}
Repart_pays <- Data_me %>% group_by(PAYS) %>% summarise(nb_client = n_distinct(IDCLIENT),nb_VIP = (table(VIP==1,exclude = FALSE)),nb_rentable= (table(Rentabilit==1,exclude = FALSE)),nb_rentable_vip =sum(table(VIP==1,Rentabilit==1,exclude = FALSE)))
Repart_pays$nb_rentable_vip <- as.numeric(Repart_pays$nb_rentable_vip)
Repart_pays$nb_VIP <- as.numeric(Repart_pays$nb_VIP)
Repart_pays <-Repart_pays %>% group_by(PAYS) %>% mutate(percent = round(sum((nb_rentable_vip/nb_VIP))*100)) %>% arrange(desc(percent))
```
```{r echo=FALSE}
Repart_pays[,1:6]%>%kable(escape = F,align = "r") %>%
row_spec(2:3, bold = T, color = "white", background = "#D7261E")%>% kable_styling(bootstrap_options = c("striped", "hover","condensed","responsive"),full_width = F,position = "c",font_size = 11 )%>% row_spec(0, bold = T, color = "white", background = "grey") %>% kable_styling(c("striped", "bordered")) %>% scroll_box(width = "800px", height = "300px")
```
On fait quasiment le même constat ici concernant la variable VIP et rentabilité à travers une répartition par pays , cependant notre échantillon est accés essentiellement autour de la FRANCE. Il aurait été préférable d'avoir plus de données afin d'évaluer l'évolution et la répartion de ces metrics en Suisse(*CHE*).
```{r include=FALSE}
Repart_region <- Data_me %>% group_by(REGION=str_sub(CODEINSEE,1,2)) %>% summarise(nb_client = n_distinct(IDCLIENT),nb_VIP = (table(VIP==1,exclude = FALSE)),nb_rentable= (table(Rentabilit==1,exclude = FALSE)),nb_rentable_vip =sum(table(VIP==1,Rentabilit==1,exclude = FALSE)))
Repart_region$nb_rentable_vip <- as.numeric(Repart_region$nb_rentable_vip)
Repart_region$nb_VIP <- as.numeric(Repart_region$nb_VIP)
Repart_region <-Repart_region %>% group_by(REGION) %>% mutate(percent = round(sum((nb_rentable_vip/nb_VIP))*100)) %>% arrange(desc(nb_rentable_vip))
```
```{r echo=FALSE}
Repart_region[,1:6]%>%kable(escape = F,align = "r") %>%
row_spec(1:6, bold = T, color = "white", background = "#D7261E")%>% kable_styling(bootstrap_options = c("striped", "hover","condensed","responsive"),full_width = F,position = "c",font_size = 11) %>% row_spec(0, bold = T, color = "white", background = "grey") %>% kable_styling(c("striped", "bordered")) %>%
kable_styling()%>% scroll_box(width = "800px", height = "300px")
```
Conclusion du travail d'exploration:
Nous allons garder pour notre modèle les variables suivantes :
- CIVILITE2
- toutes les variables en relation avec l'adhesion(DATEDEBADH_mois, DATEREADH_mois DATEDEBUTADHESION,DATEREADHESION,DATEFINADHESION)
- AGE
- VIP,
- MAGASIN
- PAYS
- Moy_ticket
- nombre ticket
- Région
Afin de confirmer nos hupothèses , nous allons effectuer une régression logistique(modèle stepwise)
# Regression logistique méthode stepwise
## Construction data.set
```{r}
Test_data <- Data2 %>% group_by(IDCLIENT) %>% summarise(CIVILITE2,
DATENAISSANCE,
Age=2017-year(DATENAISSANCE),
DATEDEBUTADHESION=DATEDEBUTADHESION,
DATEREADHESION=DATEREADHESION,
DATEFINADHESION=DATEFINADHESION,
Dureé_réadhésion= DATEFINADH_an - DATEREADH_an,
Duree_adhesion=DATEREADH_an-DATEDEBADH_an,
Region=str_sub(CODEINSEE,1,2),
Ca_global= CA_2016_S1+CA_2016_S2,
Rentabilit =case_when(CA_2016_S1+CA_2016_S2>= 400 ~1,
CA_2016_S1+CA_2016_S2 < 400 ~ 0),
nb_tickets=nbtic_2016_S1+nbtic_2016_S2,
VIP=VIP,
MAGASIN=MAGASIN,
PAYS=PAYS,
Moy_ticket=round((CA_2016_S1+CA_2016_S2)/(nbtic_2016_S1+nbtic_2016_S2),2),
age_disc10=age_disc10,
nb_ticket = nbtic_2016,
nbtic_2016_T1=nbtic_2016_T1,
nbtic_2016_T2=nbtic_2016_T2,
nbtic_2016_T3=nbtic_2016_T3,
nbtic_2016_T4=nbtic_2016_T4,
DATEDEBADH_mois=DATEDEBADH_mois,
DATEREADH_mois=DATEREADH_mois)
```
```{r include=FALSE}
Test_data$age_disc10 <- as.factor(Test_data$age_disc10)
Test_data$Region <- as.factor(Test_data$Region)
Test_data$CIVILITE2 <- as.factor((Test_data$CIVILITE2))
Test_data$MAGASIN <- as.factor(Test_data$MAGASIN)
Test_data$VIP <- as.factor(Test_data$VIP)
Test_data$PAYS <- as.factor(Test_data$PAYS)
Test_data$DATEDEBADH_mois <- as.factor(Test_data$DATEDEBADH_mois)
Test_data$DATEREADH_mois <- as.factor((Test_data$DATEREADH_mois))
Test_data$Rentabilit <- as.factor(Test_data$Rentabilit)
```
## Data.set pour la régression
```{r}
Test_data2 <- Test_data %>% dplyr::select(IDCLIENT,CIVILITE2,
Age,
Dureé_réadhésion,
Duree_adhesion,
Region,MAGASIN,
Rentabilit,
VIP,
PAYS,
Moy_ticket,
nb_ticket,
age_disc10,
nb_ticket,
nbtic_2016_T1,
nbtic_2016_T2,
nbtic_2016_T3,
nbtic_2016_T4,
DATEDEBADH_mois,
DATEREADH_mois)
Test_data2 <- na.omit(Test_data)
```
Afin d'optimiser le scoring, on a fait le choix de supprimer les valeurs manquantes à ce niveau dans notre jeu de donnée.
## Construction echantillon d'apprentissage et de test
Pour la mise en place de notre dataset d'apprentissage, nous avons repartit l'echantillon en 75% train et donc 25% test.
```{r}
set.seed(200)
nb_lignes <- floor((nrow(Test_data2)*0.75)) #Nombre de lignes de l’échantillon d’apprentissage : 75% du dataset
Add_lignes <- Test_data2[sample(nrow(Test_data2)), ] #Ajout de numéros de lignes
Data.train <- Add_lignes[1:nb_lignes, ] #Echantillon d’apprentissage
Data.test <- Add_lignes[(nb_lignes+1):nrow(Add_lignes), ] #Echantillon de test
```
```{r include=FALSE}
# modèle trivial réduit à la constante
str_constant <- "~ 1"
# modèle complet incluant toutes les explicatives potentielles
str_all <- "~CIVILITE2+Age+MAGASIN+VIP+PAYS+Region+age_disc10+nb_ticket+nbtic_2016_T1+nbtic_2016_T2+nbtic_2016_T3+nbtic_2016_T4+DATEDEBADH_mois+DATEREADH_mois"
```
## Affichage score final
```{r include=FALSE}
modele <- glm(Rentabilit~1,data=Data.train,family=binomial)
modele.stepwise <- stepAIC(modele, scope = list(lower = str_constant, upper = str_all), trace = TRUE, data = appren, direction = "both")
```
```{r echo=FALSE}
summary(modele.stepwise)
```
A la suite du modèle stepwise, on a obtenue le résultat suivant pour l'AIC le plus faible (5641.9):
**Rentabilit ~ nb_ticket + VIP + nbtic_2016_T1 + age_disc10 + MAGASIN + nbtic_2016_T4 + PAYS + CIVILITE2**
Il semble être le modéle le plus performant.
Premier constat par rapport à notre première partie exploration, les metrics:
- CIVILITE2
- AGE
- VIP,
- MAGASIN
- PAYS
- nombre ticket
Elles ont une influence dans la modélisation de notre variable y~Rentabilité.
## Ods Ratio
```{r include=FALSE}
odds.ratio(modele.stepwise)
```
```{r include=FALSE}
tmp <- tidy(modele.stepwise, conf.int = TRUE, exponentiate = TRUE)
```
```{r echo=FALSE, warning=TRUE}
knitr::kable(tmp)
```
# Construction de l’arbre de décision
```{r echo=FALSE}
Reg_tree <- rpart(Rentabilit~ VIP + age_disc10 + PAYS+
nbtic_2016_T1 + nbtic_2016_T4 ,data=Data.train, control = rpart.control(minsplit = 15,cp=0.003),parms = list(split = "gini"),method = "class")
```
## Choix cp
```{r echo=FALSE}
printcp(Reg_tree)
plotcp(Reg_tree)
```
## Arbre de décision
```{r echo=FALSE}
library(RColorBrewer)
library(rattle)
fancyRpartPlot(Reg_tree, caption = NULL)
```
```{r echo=FALSE}
plot(Reg_tree, uniform= TRUE, branch = 0.5, margin= 0.1 )
text(Reg_tree, all = FALSE, use.n = TRUE)
```
## Prédiction
```{r}
Reg_test_predict<-predict(Reg_tree,newdata=Data.test,type="class")
summary(Reg_test_predict)
```
## Matrice de confusion
```{r echo=FALSE}
mc<-table(Data.test$Rentabilit,Reg_test_predict)
print(mc)
```
## Erreur de classement
```{r echo=FALSE}
erreur.classement<-1.0-(mc[1,1]+mc[2,2])/sum(mc)
print(erreur.classement)
```
## Taux de prédiction
```{r echo=FALSE}
prediction=mc[2,2]/sum(mc[2,])
print(prediction)
```
## Qualité du modèle
```{r echo=FALSE}
Qualit=(mc[1,1]+mc[2,2])/sum(mc)
print(Qualit)
```
| /customer_analysis.r | no_license | WilfriedBd/example_of_data_analysis | R | false | false | 19,962 | r | ---
title: "Projet Dataminig"
author: "Asma-Abir-Younes-Wilfried"
date: "02/06/2020"
output: rmdformats::readthedown
rmarkdown::html_document:
theme: journal
highlight: github
---
```{r include=FALSE}
require(readr)
require(dplyr)
require(questionr)
require(base)
require(vcd)
require(rpart)
require(TH.data)
require(tidyr)
require(tidyverse)
require(MASS)
require(partykit)
require(rattle)
require(rpart.plot)
require(RColorBrewer)
require(lubridate)
require(broom)
require(GGally)
require(forestmodel)
require(stringr)
require(readxl)
require(ggplot2)
require(kableExtra)
require(formattable)
require(rmdformats)
```
```{r include=FALSE}
Data2 <- Base <- read_delim("C:/Users/Wilfried/Downloads/Base.csv",
";", escape_double = FALSE, trim_ws = TRUE)
```
```{r include=FALSE}
Data2$DATEDEBUTADHESION<-as.Date(Data2$DATEDEBUTADHESION,"%d/%m/%Y %H:%M")
Data2$DATEFINADHESION <- as.Date(Data2$DATEFINADHESION,"%d/%m/%Y %H:%M")
Data2$DATENAISSANCE <- as.Date(Data2$DATENAISSANCE,"%d/%m/%Y %H:%M")
Data2$DATEREADHESION <- as.Date(Data2$DATEREADHESION,"%d/%m/%Y %H:%M")
```
```{r include=FALSE}
Data_me <-Data2 %>% group_by(IDCLIENT) %>% mutate( Rentabilit =case_when(CA_2016_S1+CA_2016_S2>= 400 ~1,
CA_2016_S1+CA_2016_S2 < 400 ~ 0),REGION=str_sub(CODEINSEE))
```
```{r include=FALSE}
Data_me$VIP <- as.factor(Data_me$VIP)
Data_me$Rentabilit<- as.factor(Data_me$Rentabilit)
```
# Partie Exploration des donnée:
Nous avons choisi la problématique suivant : quel est le profil potentiellement le plus rentable et qui peut générer plus de 400 euro.
Ce sujet pourrait aider le departement marketing à mieux cibler sa clientèle et gérer le budget de ses campagnes marketing en réalisant une segmentation par catégorie la plus rentable, choisir le bon moment pendant lequel un profil potentiel génera un max de chiffre d'affaire oubien définir pour chaque année la saisonnalité de promotion,recrutement.
Notre base de donnée contient 157 variables. Nous allons choisir notre variable cible « Rentabilit= CA_2016_S1+CA_2016_S2>= 400 ~ 1 ».
Afin de construire le modèle, bien évidement nous n’allons pas intégrer les 157 variables.
Nous commençons alors à distinguer les variables les plus significatives et influençant notre variable cible.
Nous avons : 25 variables qualitatives, 114 variables continues et 18 variables discrètes.
Afin de trouver les variables signficatives, nous allons analyser et interpreter les données par trimestre et chercher des corrélations
```{r echo=FALSE, message=FALSE, warning=FALSE, fig.width=15 , fig.height=10}
ggpairs(Data_me[, c("prix_ticket_moy_2016","CA_2016_T1","CA_2016_T2","CA_2016_T3","CA_2016_T4","CA_2016","CA_2017","Rentabilit")], aes(colour= Rentabilit))
```
On remarque que le comportement de la visualisation du **Prix moyen tickect** VS **Chiffre d'affaire en trimestre 2** ressemble à celui du **Prix moyen tickect** VS **Chiffre d'affaire 2016 global**.
Le comportement du client en trimestre 2 a t- il une certaine influence ?
*1ère reflexion*: garder le prix moyen des tickets
On va creuser dans les tickets et analyser la relation entre nombre de tickets et la rentabilité par trimestre :
```{r}
qinf<- quantile(Data_me$nbtic_2016_T1,0.01)
qsup<- quantile(Data_me$nbtic_2016_T1,0.99)
Data_tic <- Data_me %>% filter(nbtic_2016_T1 >= qinf & nbtic_2016_T1<=qsup)
```
```{r echo=FALSE}
ggplot(Data_tic,aes(Rentabilit,nbtic_2016_T1,colour=Rentabilit))+
geom_jitter(width=0.25)+
geom_boxplot(alpha=0.5, outlier.shape=NA)+
xlab(label = "Rentabilité") +
ylab(label = " Nombre ticket Trimestre 1") +
theme(axis.text.x = element_text(angle=30, hjust=1, vjust=1))+
theme(legend.position="none")+
scale_fill_brewer(palette="Blues")+
theme_classic()+
ggtitle("Boxplot avec les observations")
```
```{r echo=FALSE}
qinf<- quantile(Data_me$nbtic_2016_T2,0.01)
qsup<- quantile(Data_me$nbtic_2016_T2,0.99)
Data_tic2 <- Data_me %>% filter(nbtic_2016_T2 >= qinf & nbtic_2016_T2<=qsup)
ggplot(Data_tic2,aes(Rentabilit,nbtic_2016_T2,colour=Rentabilit))+
geom_jitter(width=0.25)+
geom_boxplot(alpha=0.5, outlier.shape=NA)+
xlab(label = "Rentabilité") +
ylab(label = " Nombre ticket Trimestre 2") +
theme(axis.text.x = element_text(angle=30, hjust=1, vjust=1))+
theme(legend.position="none")+
scale_fill_brewer(palette="Blues")+
theme_classic()+
ggtitle("Boxplot avec les observations")
```
```{r echo=FALSE}
qinf<- quantile(Data_me$nbtic_2016_T3,0.01)
qsup<- quantile(Data_me$nbtic_2016_T3,0.99)
Data_tic3 <- Data_me %>% filter(nbtic_2016_T3 >= qinf & nbtic_2016_T3<=qsup)
ggplot(Data_tic3,aes(Rentabilit,nbtic_2016_T3,colour=Rentabilit))+
geom_jitter(width=0.25)+
geom_boxplot(alpha=0.5, outlier.shape=NA)+
xlab(label = "Rentabilité") +
ylab(label = " Nombre ticket Trimestre 3") +
theme(axis.text.x = element_text(angle=30, hjust=1, vjust=1))+
theme(legend.position="none")+
scale_fill_brewer(palette="Blues")+
theme_classic()+
ggtitle("Boxplot avec les observations")
```
```{r echo=FALSE}
qinf<- quantile(Data_me$nbtic_2016_T4,0.01)
qsup<- quantile(Data_me$nbtic_2016_T4,0.99)
Data_tic4 <- Data_me %>% filter(nbtic_2016_T4 >= qinf & nbtic_2016_T4<=qsup)
ggplot(Data_tic4,aes(Rentabilit,nbtic_2016_T4,colour=Rentabilit))+
geom_jitter(width=0.25)+
geom_boxplot(alpha=0.5, outlier.shape=NA)+
xlab(label = "Rentabilité") +
ylab(label = " Nombre ticket Trimestre 4") +
theme(axis.text.x = element_text(angle=30, hjust=1, vjust=1))+
theme(legend.position="none")+
scale_fill_brewer(palette="Blues")+
theme_classic()+
ggtitle("Boxplot avec les observations")
```
Premier constat après l'analyse de ces boxplot à travers ces 4 trimestres (T1,T2,T3,T4) est une importante difference à la fois au niveau de la mediane du nombre de tickect mais également de la dispersion des deux groupe.
En effet on peut constater que la répartition du nombre de ticket chez les individus dit *rentable* varie **entre 1 et plus de 4 tickects** à l'inverse les individus dit *non rentable* dépasse assez rarement les **1,5 tickets** par trimestre.
On pourrait donc fairre l'hypothèse ici, que la variable *nombre tickect* à une importance dans la rentabilité ou non d'un individus.
On va examiner ensuite la relation des variables Civilité, Date debut d’adhesion et VIP
```{r echo=FALSE, message=FALSE, warning=FALSE, fig.width=15 , fig.height=10}
ggpairs(Data_me[, c("CIVILITE2","DATEDEBADH_mois", "CA_2016_T1","CA_2016_T2","CA_2016_T3","CA_2016_T4","Rentabilit")], aes(colour= Rentabilit ))
```
Les femmes génèrent un CA > 400 plus que les hommes dans tous les trimestres >> on va garder la variable CIVILITE2
Les adhérants durant la fin de trimestre 1 et 2 semnle concorder avec notre problématique au vue du rythme d'achat durant l'année.
A l'inverse, les profiles adherant entre les trimestres 3 et 4 réalisent une rentabilité vers la fin de l'année. On pourrait supposer que ces clients sont essentiellement des saisonniers.
Dans le cadre de notre étude on va conserver les variables liés à l'adhésion des clients.
## Relation de la variable: VIP
```{r echo=FALSE, message=FALSE, warning=FALSE, fig.width=15 , fig.height=10}
ggpairs(Data_me[, c("VIP", "CA_2016_T1","CA_2016_T2","CA_2016_T3","CA_2016_T4","Rentabilit")], aes(colour= Rentabilit ))
```
Pour cette variable *VIP*, il faut la croiser avec d'autres informations de l'entreprise pour savoir sur quel critère, ce statut à été attribué :
- Volume d'achat
- Frequence d'achat
- Valeur d'achat
- Date d'adhesion.
Ici, selon le graphe , il y a par exemple des "NON VIP" qui sont rentable selon notre condition et cette population est presque égale en terme de rentabilité à ceux considéré VIP. Par contre les VIP génèrent un max de chiffre d'affaire pendant la trimestre 4 de 2016.
```{r echo=FALSE}
qinf<- quantile(Data_me$CA_2016,0.01)
qsup<- quantile(Data_me$CA_2016,0.99)
Data_CA <- Data_me %>% filter(CA_2016 >= qinf & CA_2016<=qsup)
ggplot(Data_CA,aes(VIP,CA_2016,colour=VIP))+
geom_jitter(width=0.25)+
geom_boxplot(alpha=0.5, outlier.shape=NA)+
xlab(label = "Rentabilité") +
ylab(label = " Nombre ticket Trimestre 1") +
theme(axis.text.x = element_text(angle=30, hjust=1, vjust=1))+
theme(legend.position="none")+
scale_fill_brewer(palette="Blues")+
theme_classic()+
ggtitle("Boxplot avec les observations")
```
## Relation avec l'age :
```{r echo=FALSE, message=FALSE, warning=FALSE, fig.width=15 , fig.height=10}
ggpairs(Data_me[, c("age_QL","Rentabilit")], aes(colour= Rentabilit ))
```
La valeur du test de correlation nous confirme bien que les variables age et rentabilité sont corrélés.
De plus selon on coonstate à l'aide du graphe que la tranche d'age [50-70] ans génére plus de 400 euro de chiffre d'affaire en général.
## Relation avec localité (MAGASIN,PAYS et Région)
```{r include=FALSE}
Repart_magain <- Data_me %>% group_by(MAGASIN) %>% summarise(nb_client = n_distinct(IDCLIENT),nb_VIP = (table(VIP==1,exclude = FALSE)),nb_rentable= (table(Rentabilit==1,exclude = FALSE)),nb_rentable_vip =(table(VIP==1,Rentabilit==1,exclude = FALSE)))
Repart_magain$nb_rentable_vip <- as.numeric(Repart_magain$nb_rentable_vip)
Repart_magain$nb_VIP <- as.numeric(Repart_magain$nb_VIP)
Repart_magain <-Repart_magain %>% group_by(MAGASIN) %>% mutate(percent = round((nb_rentable_vip/nb_VIP)*100)) %>% arrange(desc(percent))
```
```{r echo=FALSE, fig.width=15 , fig.height=10}
Repart_magain[,1:6]%>%kable(escape = F,align = "r") %>%
row_spec(2:4, bold = T, color = "white", background = "#D7261E")%>% kable_styling(bootstrap_options = c("striped", "hover","condensed","responsive"),full_width = F,position = "c",font_size = 11 )%>% row_spec(0, bold = T, color = "white", background = "grey") %>% kable_styling(c("striped", "bordered")) %>% scroll_box(width = "800px", height = "300px")
```
Comme on peut le voir les MAGASIN *CLI* , *VIV* et *GEX* ont une proportion(*percent*) de VIP rentable assez élevé au alentour de 84-88%.
C'est un constat pour le moment mais on pourrai supposer que la proportion de VIP rentable joue un rôle plus ou moins important dans notre modèle.
```{r include=FALSE}
Repart_pays <- Data_me %>% group_by(PAYS) %>% summarise(nb_client = n_distinct(IDCLIENT),nb_VIP = (table(VIP==1,exclude = FALSE)),nb_rentable= (table(Rentabilit==1,exclude = FALSE)),nb_rentable_vip =sum(table(VIP==1,Rentabilit==1,exclude = FALSE)))
Repart_pays$nb_rentable_vip <- as.numeric(Repart_pays$nb_rentable_vip)
Repart_pays$nb_VIP <- as.numeric(Repart_pays$nb_VIP)
Repart_pays <-Repart_pays %>% group_by(PAYS) %>% mutate(percent = round(sum((nb_rentable_vip/nb_VIP))*100)) %>% arrange(desc(percent))
```
```{r echo=FALSE}
Repart_pays[,1:6]%>%kable(escape = F,align = "r") %>%
row_spec(2:3, bold = T, color = "white", background = "#D7261E")%>% kable_styling(bootstrap_options = c("striped", "hover","condensed","responsive"),full_width = F,position = "c",font_size = 11 )%>% row_spec(0, bold = T, color = "white", background = "grey") %>% kable_styling(c("striped", "bordered")) %>% scroll_box(width = "800px", height = "300px")
```
On fait quasiment le même constat ici concernant la variable VIP et rentabilité à travers une répartition par pays , cependant notre échantillon est accés essentiellement autour de la FRANCE. Il aurait été préférable d'avoir plus de données afin d'évaluer l'évolution et la répartion de ces metrics en Suisse(*CHE*).
```{r include=FALSE}
Repart_region <- Data_me %>% group_by(REGION=str_sub(CODEINSEE,1,2)) %>% summarise(nb_client = n_distinct(IDCLIENT),nb_VIP = (table(VIP==1,exclude = FALSE)),nb_rentable= (table(Rentabilit==1,exclude = FALSE)),nb_rentable_vip =sum(table(VIP==1,Rentabilit==1,exclude = FALSE)))
Repart_region$nb_rentable_vip <- as.numeric(Repart_region$nb_rentable_vip)
Repart_region$nb_VIP <- as.numeric(Repart_region$nb_VIP)
Repart_region <-Repart_region %>% group_by(REGION) %>% mutate(percent = round(sum((nb_rentable_vip/nb_VIP))*100)) %>% arrange(desc(nb_rentable_vip))
```
```{r echo=FALSE}
Repart_region[,1:6]%>%kable(escape = F,align = "r") %>%
row_spec(1:6, bold = T, color = "white", background = "#D7261E")%>% kable_styling(bootstrap_options = c("striped", "hover","condensed","responsive"),full_width = F,position = "c",font_size = 11) %>% row_spec(0, bold = T, color = "white", background = "grey") %>% kable_styling(c("striped", "bordered")) %>%
kable_styling()%>% scroll_box(width = "800px", height = "300px")
```
Conclusion du travail d'exploration:
Nous allons garder pour notre modèle les variables suivantes :
- CIVILITE2
- toutes les variables en relation avec l'adhesion(DATEDEBADH_mois, DATEREADH_mois DATEDEBUTADHESION,DATEREADHESION,DATEFINADHESION)
- AGE
- VIP,
- MAGASIN
- PAYS
- Moy_ticket
- nombre ticket
- Région
Afin de confirmer nos hupothèses , nous allons effectuer une régression logistique(modèle stepwise)
# Regression logistique méthode stepwise
## Construction data.set
```{r}
Test_data <- Data2 %>% group_by(IDCLIENT) %>% summarise(CIVILITE2,
DATENAISSANCE,
Age=2017-year(DATENAISSANCE),
DATEDEBUTADHESION=DATEDEBUTADHESION,
DATEREADHESION=DATEREADHESION,
DATEFINADHESION=DATEFINADHESION,
Dureé_réadhésion= DATEFINADH_an - DATEREADH_an,
Duree_adhesion=DATEREADH_an-DATEDEBADH_an,
Region=str_sub(CODEINSEE,1,2),
Ca_global= CA_2016_S1+CA_2016_S2,
Rentabilit =case_when(CA_2016_S1+CA_2016_S2>= 400 ~1,
CA_2016_S1+CA_2016_S2 < 400 ~ 0),
nb_tickets=nbtic_2016_S1+nbtic_2016_S2,
VIP=VIP,
MAGASIN=MAGASIN,
PAYS=PAYS,
Moy_ticket=round((CA_2016_S1+CA_2016_S2)/(nbtic_2016_S1+nbtic_2016_S2),2),
age_disc10=age_disc10,
nb_ticket = nbtic_2016,
nbtic_2016_T1=nbtic_2016_T1,
nbtic_2016_T2=nbtic_2016_T2,
nbtic_2016_T3=nbtic_2016_T3,
nbtic_2016_T4=nbtic_2016_T4,
DATEDEBADH_mois=DATEDEBADH_mois,
DATEREADH_mois=DATEREADH_mois)
```
```{r include=FALSE}
Test_data$age_disc10 <- as.factor(Test_data$age_disc10)
Test_data$Region <- as.factor(Test_data$Region)
Test_data$CIVILITE2 <- as.factor((Test_data$CIVILITE2))
Test_data$MAGASIN <- as.factor(Test_data$MAGASIN)
Test_data$VIP <- as.factor(Test_data$VIP)
Test_data$PAYS <- as.factor(Test_data$PAYS)
Test_data$DATEDEBADH_mois <- as.factor(Test_data$DATEDEBADH_mois)
Test_data$DATEREADH_mois <- as.factor((Test_data$DATEREADH_mois))
Test_data$Rentabilit <- as.factor(Test_data$Rentabilit)
```
## Data.set pour la régression
```{r}
Test_data2 <- Test_data %>% dplyr::select(IDCLIENT,CIVILITE2,
Age,
Dureé_réadhésion,
Duree_adhesion,
Region,MAGASIN,
Rentabilit,
VIP,
PAYS,
Moy_ticket,
nb_ticket,
age_disc10,
nb_ticket,
nbtic_2016_T1,
nbtic_2016_T2,
nbtic_2016_T3,
nbtic_2016_T4,
DATEDEBADH_mois,
DATEREADH_mois)
Test_data2 <- na.omit(Test_data)
```
Afin d'optimiser le scoring, on a fait le choix de supprimer les valeurs manquantes à ce niveau dans notre jeu de donnée.
## Construction echantillon d'apprentissage et de test
Pour la mise en place de notre dataset d'apprentissage, nous avons repartit l'echantillon en 75% train et donc 25% test.
```{r}
set.seed(200)
nb_lignes <- floor((nrow(Test_data2)*0.75)) #Nombre de lignes de l’échantillon d’apprentissage : 75% du dataset
Add_lignes <- Test_data2[sample(nrow(Test_data2)), ] #Ajout de numéros de lignes
Data.train <- Add_lignes[1:nb_lignes, ] #Echantillon d’apprentissage
Data.test <- Add_lignes[(nb_lignes+1):nrow(Add_lignes), ] #Echantillon de test
```
```{r include=FALSE}
# modèle trivial réduit à la constante
str_constant <- "~ 1"
# modèle complet incluant toutes les explicatives potentielles
str_all <- "~CIVILITE2+Age+MAGASIN+VIP+PAYS+Region+age_disc10+nb_ticket+nbtic_2016_T1+nbtic_2016_T2+nbtic_2016_T3+nbtic_2016_T4+DATEDEBADH_mois+DATEREADH_mois"
```
## Affichage score final
```{r include=FALSE}
modele <- glm(Rentabilit~1,data=Data.train,family=binomial)
modele.stepwise <- stepAIC(modele, scope = list(lower = str_constant, upper = str_all), trace = TRUE, data = appren, direction = "both")
```
```{r echo=FALSE}
summary(modele.stepwise)
```
A la suite du modèle stepwise, on a obtenue le résultat suivant pour l'AIC le plus faible (5641.9):
**Rentabilit ~ nb_ticket + VIP + nbtic_2016_T1 + age_disc10 + MAGASIN + nbtic_2016_T4 + PAYS + CIVILITE2**
Il semble être le modéle le plus performant.
Premier constat par rapport à notre première partie exploration, les metrics:
- CIVILITE2
- AGE
- VIP,
- MAGASIN
- PAYS
- nombre ticket
Elles ont une influence dans la modélisation de notre variable y~Rentabilité.
## Ods Ratio
```{r include=FALSE}
odds.ratio(modele.stepwise)
```
```{r include=FALSE}
tmp <- tidy(modele.stepwise, conf.int = TRUE, exponentiate = TRUE)
```
```{r echo=FALSE, warning=TRUE}
knitr::kable(tmp)
```
# Construction de l’arbre de décision
```{r echo=FALSE}
Reg_tree <- rpart(Rentabilit~ VIP + age_disc10 + PAYS+
nbtic_2016_T1 + nbtic_2016_T4 ,data=Data.train, control = rpart.control(minsplit = 15,cp=0.003),parms = list(split = "gini"),method = "class")
```
## Choix cp
```{r echo=FALSE}
printcp(Reg_tree)
plotcp(Reg_tree)
```
## Arbre de décision
```{r echo=FALSE}
library(RColorBrewer)
library(rattle)
fancyRpartPlot(Reg_tree, caption = NULL)
```
```{r echo=FALSE}
plot(Reg_tree, uniform= TRUE, branch = 0.5, margin= 0.1 )
text(Reg_tree, all = FALSE, use.n = TRUE)
```
## Prédiction
```{r}
Reg_test_predict<-predict(Reg_tree,newdata=Data.test,type="class")
summary(Reg_test_predict)
```
## Matrice de confusion
```{r echo=FALSE}
mc<-table(Data.test$Rentabilit,Reg_test_predict)
print(mc)
```
## Erreur de classement
```{r echo=FALSE}
erreur.classement<-1.0-(mc[1,1]+mc[2,2])/sum(mc)
print(erreur.classement)
```
## Taux de prédiction
```{r echo=FALSE}
prediction=mc[2,2]/sum(mc[2,])
print(prediction)
```
## Qualité du modèle
```{r echo=FALSE}
Qualit=(mc[1,1]+mc[2,2])/sum(mc)
print(Qualit)
```
|
# ----------------------
# Author: Andreas Alfons
# KU Leuven
# ----------------------
#' Summarize cross-validation results
#'
#' Produce a summary of results from (repeated) \eqn{K}-fold cross-validation.
#'
#' @method summary cv
#'
#' @param object an object inheriting from class \code{"cv"} or
#' \code{"cvSelect"} that contains cross-validation results (note that the
#' latter includes objects of class \code{"cvTuning"}).
#' @param \dots currently ignored.
#'
#' @return
#' An object of class \code{"summary.cv"}, \code{"summary.cvSelect"} or
#' \code{"summary.cvTuning"}, depending on the class of \code{object}.
#'
#' @author Andreas Alfons
#'
#' @seealso \code{\link{cvFit}}, \code{\link{cvSelect}},
#' \code{\link{cvTuning}}, \code{\link{summary}}
#'
#' @example inst/doc/examples/example-summary.R
#'
#' @keywords utilities
#'
#' @export
summary.cv <- function(object, ...) {
cv <- aggregate(object, summary)
out <- list(n=object$n, K=object$K, R=object$R, cv=cv)
class(out) <- "summary.cv"
out
}
#' @rdname summary.cv
#' @method summary cvSelect
#' @export
summary.cvSelect <- function(object, ...) {
cv <- aggregate(object, summary)
out <- list(n=object$n, K=object$K, R=object$R, best=object$best, cv=cv)
class(out) <- "summary.cvSelect"
out
}
#' @rdname summary.cv
#' @method summary cvTuning
#' @export
summary.cvTuning <- function(object, ...) {
out <- summary.cvSelect(object, ...)
out <- list(n=out$n, K=out$K, R=out$R, tuning=object$tuning,
best=out$best, cv=out$cv)
class(out) <- c("summary.cvTuning", class(out))
out
}
| /R/summary.R | no_license | cran/cvTools | R | false | false | 1,634 | r | # ----------------------
# Author: Andreas Alfons
# KU Leuven
# ----------------------
#' Summarize cross-validation results
#'
#' Produce a summary of results from (repeated) \eqn{K}-fold cross-validation.
#'
#' @method summary cv
#'
#' @param object an object inheriting from class \code{"cv"} or
#' \code{"cvSelect"} that contains cross-validation results (note that the
#' latter includes objects of class \code{"cvTuning"}).
#' @param \dots currently ignored.
#'
#' @return
#' An object of class \code{"summary.cv"}, \code{"summary.cvSelect"} or
#' \code{"summary.cvTuning"}, depending on the class of \code{object}.
#'
#' @author Andreas Alfons
#'
#' @seealso \code{\link{cvFit}}, \code{\link{cvSelect}},
#' \code{\link{cvTuning}}, \code{\link{summary}}
#'
#' @example inst/doc/examples/example-summary.R
#'
#' @keywords utilities
#'
#' @export
summary.cv <- function(object, ...) {
cv <- aggregate(object, summary)
out <- list(n=object$n, K=object$K, R=object$R, cv=cv)
class(out) <- "summary.cv"
out
}
#' @rdname summary.cv
#' @method summary cvSelect
#' @export
summary.cvSelect <- function(object, ...) {
cv <- aggregate(object, summary)
out <- list(n=object$n, K=object$K, R=object$R, best=object$best, cv=cv)
class(out) <- "summary.cvSelect"
out
}
#' @rdname summary.cv
#' @method summary cvTuning
#' @export
summary.cvTuning <- function(object, ...) {
out <- summary.cvSelect(object, ...)
out <- list(n=out$n, K=out$K, R=out$R, tuning=object$tuning,
best=out$best, cv=out$cv)
class(out) <- c("summary.cvTuning", class(out))
out
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/coag_descript.R
\docType{data}
\name{coagulation}
\alias{coagulation}
\title{Blood coagulation times by diet}
\format{a data frame with 24 rows and two columns:
\describe{
\item{coag}{coagulation time in seconds}
\item{diet}{diet type: A, B, C, or D}
}}
\source{
"Statistics for Experimenters" by G. P. Box, W. G. Hunter and J. S. Hunter, Wiley, 1978
}
\usage{
coagulation
}
\description{
Dataset comes from a study of blood coagulation times. 24 animals were
randomly assigned to four different diets and the samples were taken
in a random order.
}
\keyword{datasets}
| /man/coagulation.Rd | no_license | raffled/oneway | R | false | false | 661 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/coag_descript.R
\docType{data}
\name{coagulation}
\alias{coagulation}
\title{Blood coagulation times by diet}
\format{a data frame with 24 rows and two columns:
\describe{
\item{coag}{coagulation time in seconds}
\item{diet}{diet type: A, B, C, or D}
}}
\source{
"Statistics for Experimenters" by G. P. Box, W. G. Hunter and J. S. Hunter, Wiley, 1978
}
\usage{
coagulation
}
\description{
Dataset comes from a study of blood coagulation times. 24 animals were
randomly assigned to four different diets and the samples were taken
in a random order.
}
\keyword{datasets}
|
library(datasets)
library(cluster)
data_original = state.x77
data_temp <- subset(data_original, select = -Income)
# data_frost <- subset(data_original, select = Frost)
data <- scale(data_temp)
# distance <- dist(as.matrix(data))
#clust <- hclust(distance)
#plot(clust)
#table = NULL
#for (i in 1:10) {
# the_cluster <- kmeans(data, i)
# table[i] <- the_cluster$tot.withinss
#}
#plot(table, type = 'o')
the_cluster <- kmeans(data, 4)
clusplot(data, the_cluster$cluster, color=TRUE, shade=TRUE, labels=2, lines=0)
| /project7-clusters/cluster.R | no_license | johnnymeyer/cs450 | R | false | false | 519 | r | library(datasets)
library(cluster)
data_original = state.x77
data_temp <- subset(data_original, select = -Income)
# data_frost <- subset(data_original, select = Frost)
data <- scale(data_temp)
# distance <- dist(as.matrix(data))
#clust <- hclust(distance)
#plot(clust)
#table = NULL
#for (i in 1:10) {
# the_cluster <- kmeans(data, i)
# table[i] <- the_cluster$tot.withinss
#}
#plot(table, type = 'o')
the_cluster <- kmeans(data, 4)
clusplot(data, the_cluster$cluster, color=TRUE, shade=TRUE, labels=2, lines=0)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_student_sched.R
\name{make_student_schedule}
\alias{make_student_schedule}
\title{Make student schedule}
\usage{
make_student_schedule(ranked_faculty, slots = 12, f_unavail = NULL)
}
\arguments{
\item{ranked_faculty}{dataframe returned from rank_faculty}
\item{slots}{number of slots in schedule}
\item{f_unavail}{data frame of whether faculty are available to meet (columns: Faculty, Slot)}
}
\value{
student schedule
}
\description{
Make student schedule
}
| /man/make_student_schedule.Rd | permissive | UM-OGPS/matchathon | R | false | true | 544 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_student_sched.R
\name{make_student_schedule}
\alias{make_student_schedule}
\title{Make student schedule}
\usage{
make_student_schedule(ranked_faculty, slots = 12, f_unavail = NULL)
}
\arguments{
\item{ranked_faculty}{dataframe returned from rank_faculty}
\item{slots}{number of slots in schedule}
\item{f_unavail}{data frame of whether faculty are available to meet (columns: Faculty, Slot)}
}
\value{
student schedule
}
\description{
Make student schedule
}
|
\name{AddEventToMigrationArray}
\alias{AddEventToMigrationArray}
\alias{AddEventToAMigrationArray}
\title{
Add an Event To a MigrationArray
}
\description{
This function integrates a non-coalescence demographic event within a model
or set of models. This can be useful if one wants to posit shifts in a parameter that
do not correspond to a splitting event (e.g., one would like migration to only occur
for a given length of time after a split, but then to cease).
}
\usage{
AddEventToMigrationArray(migrationArray, eventTime, n0multiplierVec = NULL,
growthVec = NULL, migrationMat = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{migrationArray}{
A list of models (or a single model) to which an event should be added
}
\item{eventTime}{
The relative time period at which the new event should be inserted within the collapseMatrix
}
\item{n0multiplierVec}{
A vector or single value specifying the n0multiplier parameter indices to be invoked during
the new time interval. If \code{NULL}, no new set of parameter indices are invoked at this time.
}
\item{growthVec}{
A vector or single value specifying the growth parameter indices to be invoked during
the new time interval. If \code{NULL}, no new set of parameter indices are invoked at this time.
}
\item{migrationMat}{
A matrix, vector, or single value specifying the migration parameter indices to be invoked during
the new time interval. If \code{NULL}, no new set of parameter indices are invoked at this time.
}
}
\details{
To use this function, one must specify a model (migrationIndividual) or set of models
(migrationArray). If a set of models is specified, these models must all contain the
same number of populations and the same number of collapse events (i.e., the collapseMatrix
compenent within each migrationIndividual must have the same dimensions).
The relative timing of the desired new event must be specified as a single number using
eventTime. An eventTime of 1 will place a new event (i.e., column) in the first column
position within the collapseMatrix (and the other columns will be shifted to the right).
An eventTime of 2 will place the new column in the second position, etc. The eventTime
cannot exceed the number of events (i.e., columns) within the original collapseMatrix.
The added column will consist entirely of NAs, indicating that no population coalescence
can occur at this time.
Finally, one can specify a new set of n0multiplier, growth, and/or migration parameter
indices to be invoked at the new specified time period using n0muliplierVec, growthVec, or
migrationMat, respectively. When the default value for these is used (which is \code{NULL}),
n0multiplier, growth, and migration matrices within each model are automatically expanded
by simply copying over parameter indices from the adjacent time period to the new time
period (i.e., no change is invoked). For n0multiplier and growth, a new column is added;
for migration, a new matrix is added.
However, one can also specify the parameter indices to be used at this new time, allowing
a shift in that parameter, without a correponding coalescence event. These indices can be
specified in one of two ways:
First, one can simply specify a single parameter index which will be
implemented for all relevant populations at that time period.
For example, if the following collapseMatrix:
\preformatted{
$collapseMatrix
[,1] [,2]
[1,] 1 2
[2,] 1 NA
[3,] 0 2
}
is expanded to include a shift from non-zero migration among all populations to zero migration
at some point prior to the coalescence of populations 1 and 2 (meaning that migration has
occurred recently--upon secondary contact--but did not accompany initial divergence), we
should set \code{eventTime = 1} and \code{migrationMat = 1}. Assuming that the original model included
zero migration across all time periods, this will produce the following collpaseMatrix:
\preformatted{$collapseMatrix
[,1] [,2] [,3]
[1,] NA 1 2
[2,] NA 1 NA
[3,] NA 0 2
}
And the migrationArray will be expanded from two matrices:
\preformatted{$migrationArray
, , 1
[,1] [,2] [,3]
[1,] NA 0 0
[2,] 0 NA 0
[3,] 0 0 NA
, , 2
[,1] [,2] [,3]
[1,] NA NA 0
[2,] NA NA NA
[3,] 0 NA NA
}
to three matrices:
\preformatted{$migrationArray
, , 1
[,1] [,2] [,3]
[1,] NA 1 1
[2,] 1 NA 1
[3,] 1 1 NA
, , 2
[,1] [,2] [,3]
[1,] NA 0 0
[2,] 0 NA 0
[3,] 0 0 NA
, , 3
[,1] [,2] [,3]
[1,] NA NA 0
[2,] NA NA NA
[3,] 0 NA NA
}
where the new migration matrix was added in the first time period position and was filled with
zeros, as per the value specified.
This approach works similarly for n0multiplier and growth. For example, if one would like to model
population growth during the initial stages of divergence, followed by population size stability,
one should set \code{eventTime = 1} and \code{growthVec = 0} to change this history:
\preformatted{$collapseMatrix
[,1] [,2]
[1,] 1 2
[2,] 1 NA
[3,] 0 2
$growthMap
[,1] [,2]
[1,] 1 0
[2,] 1 NA
[3,] 0 0
}
into this history:
\preformatted{$collapseMatrix
[,1] [,2] [,3]
[1,] NA 1 2
[2,] NA 1 NA
[3,] NA 0 2
$growthMap
[,1] [,2] [,3]
[1,] 0 1 0
[2,] 0 1 NA
[3,] 0 0 0
}
If one wishes to specify more complex histories, in which different populations have different
indices, one can alternatively specify the entire new vector (for n0multiplier and growth)
or matrix (for migration) of parameter indices to be inserted. For example, in the previous example,
if one would rather model population size stability upon divergence of populations 1 and 2 followed
by recent growth of population 1, but not of populations 2 or 3, one can set \code{eventTime = 1} and
\code{growthVec = c(1,0,0)} to change the growth history from this:
\preformatted{$growthMap
[,1] [,2]
[1,] 0 0
[2,] 0 NA
[3,] 0 0
}
to this:
\preformatted{$growthMap
[,1] [,2] [,3]
[1,] 1 0 0
[2,] 0 0 NA
[3,] 0 0 0
}
One can also specify a migration matrix using migrationMat. For example, if, one would like to
insert a new matrix in which the migration rate differs among population pairs, one would specify
\preformatted{migrationMat = t(array(c(
NA, 3, 2,
1, NA, 1,
2, 3, NA),
dim=c(3,3)))
}
If one would rather, this can also be specified simply as a vector (with or without the NAs). For
example, the above migration matrix could be also specified as either \code{c(NA,1,2,3,NA,3,2,1,NA)}
or as \code{c(1,2,3,3,2,1)}, where the vector is read first by column, then by row. Note that the
number of values must match the number expected for that time period. For example, if one would like
to insert a new migration history at ancestral time period 2, one should remember that there are
fewer populations at that time period, and thus the number of values specified must be reduced
accordingly. For example, specifying distinct migration parameters from population 1-2 to 3 versus
from population 3 to 1-2 would be specified as either
\preformatted{migrationMat = t(array(c(
NA, NA, 2,
NA, NA, NA,
1, NA, NA),
dim=c(3,3)))
}
or as
\preformatted{migrationMat = (1,2)
}
Specifying two distinct population size parameters for ancestral population 1-2 and population 3 at
the second time period would be done by setting \code{eventTime = 2} and \code{n0muliplierVec = c(1,2)},
to convert
\preformatted{$n0multiplierMap
[,1] [,2]
[1,] 1 1
[2,] 1 NA
[3,] 1 1
}
into
\preformatted{$n0multiplierMap
[,1] [,2] [,3]
[1,] 1 1 1
[2,] 1 NA NA
[3,] 1 2 1
}
Finally, note that although the function can only add a single time period to the collapseMatrix, one can
add multiple non-coalescence time periods by repeatedly running the function on the same migrationArray.
}
\note{
When analyzing a migrationArray using \code{\link{GridSearch}} that includes new time periods
produced by \code{AddEventToMigrationArray}, a time value must be specified using the \code{addedEventTime}
argument. \code{addedEventTime} takes either a single value or vector of values, such that there are as
many values as added events in the collapseMatrix (sorted from most recent to most ancient).
Time values can be specified as either a specific time period (in units of 4Ne) or as a relative time
period. When the latter is desired, the \code{addedEventTimeAsScalar} argument in \code{\link{GridSearch}}
must be set to \code{TRUE} (this is the default), which will cause the values specified by
\code{addedEventTime} to be treated as scalars, rather than as absolute. Scalar values must be greater than zero
and less than one, and will be multiplied by whatever the next collapse time estimate happens to be (which will
typically vary across models). For example, setting \code{addedEventTimeAsScalar = TRUE} and
\code{addedEventTime = 0.1} for the following model:
\preformatted{$collapseMatrix
[,1] [,2] [,3]
[1,] NA 1 2
[2,] NA 1 NA
[3,] NA 0 2
$growthMap
[,1] [,2] [,3]
[1,] 0 1 0
[2,] 0 0 NA
[3,] 0 0 0
}
will cause (looking backwards in time) population growth to commence in population 1 after 10\% of the
branch length leading to coalescence of populations 1 and 2 has been reached.
One final thing to highlight is that the specified timing of these events must fit the modeled chronology of
coalescence and non-coalescence events in the collapseMatrix. That is, the timing for an added non-coalescence
event given by \code{addedEventTime} must be later than the collapse event directly preceding the added event
and must be earlier than the collapse event directly following it. For this reason, it is recommended
that \code{addedEventTimes} be entered as scalars, as this guarantees that the time will always be
earlier than the collapse event that follows (but not that the time will occur after the previous collapse
event). Absolute values can also be used, but remember that in a grid search, different values
are explored, so many parameter combinations in the grid may not fit the specified event times. However,
PHRAPL filters out all parameter combinations in the parameter grid that do not adhere to the specified
chronology of events in the model. Thus, setting absolute dates may reduce the parameter space explored
for a model, but this parameter space will always fit the model.
}
\author{
Nathan Jackson
}
\references{
O'Meara, B.C., N. Jackson, A. Morales-Garcia, and B. Carstens (2014)
Phrapl in prep.
}
\examples{
# ##First, generate a migrationIndividual that models a
# ##specific coalescence history among four populations
# collapse_1<-c(0,0,1,1)
# collapse_2<-c(2,2,0,NA)
# collapse_3<-c(3,NA,3,NA)
# collapseList<-list(collapse_1,collapse_2,collapse_3)
# migrationIndividual<-GenerateMigrationIndividualsOneAtATime(collapseList=collapseList)
#
# ##Then, add a recent migration (secondary contact) event between sister populations 1 and 2
# migrationArray<-migrationIndividual
# eventTime=1
# migrationMat=c(1,0,0,1,0,0,0,0,0,0,0,0)
#
# migrationArray<-AddEventToMigrationArray(migrationArray=migrationArray,eventTime=eventTime,
# migrationMat=migrationMat)
}
| /man/AddEventToMigrationArray.Rd | no_license | bomeara/phrapl | R | false | false | 11,413 | rd | \name{AddEventToMigrationArray}
\alias{AddEventToMigrationArray}
\alias{AddEventToAMigrationArray}
\title{
Add an Event To a MigrationArray
}
\description{
This function integrates a non-coalescence demographic event within a model
or set of models. This can be useful if one wants to posit shifts in a parameter that
do not correspond to a splitting event (e.g., one would like migration to only occur
for a given length of time after a split, but then to cease).
}
\usage{
AddEventToMigrationArray(migrationArray, eventTime, n0multiplierVec = NULL,
growthVec = NULL, migrationMat = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{migrationArray}{
A list of models (or a single model) to which an event should be added
}
\item{eventTime}{
The relative time period at which the new event should be inserted within the collapseMatrix
}
\item{n0multiplierVec}{
A vector or single value specifying the n0multiplier parameter indices to be invoked during
the new time interval. If \code{NULL}, no new set of parameter indices are invoked at this time.
}
\item{growthVec}{
A vector or single value specifying the growth parameter indices to be invoked during
the new time interval. If \code{NULL}, no new set of parameter indices are invoked at this time.
}
\item{migrationMat}{
A matrix, vector, or single value specifying the migration parameter indices to be invoked during
the new time interval. If \code{NULL}, no new set of parameter indices are invoked at this time.
}
}
\details{
To use this function, one must specify a model (migrationIndividual) or set of models
(migrationArray). If a set of models is specified, these models must all contain the
same number of populations and the same number of collapse events (i.e., the collapseMatrix
compenent within each migrationIndividual must have the same dimensions).
The relative timing of the desired new event must be specified as a single number using
eventTime. An eventTime of 1 will place a new event (i.e., column) in the first column
position within the collapseMatrix (and the other columns will be shifted to the right).
An eventTime of 2 will place the new column in the second position, etc. The eventTime
cannot exceed the number of events (i.e., columns) within the original collapseMatrix.
The added column will consist entirely of NAs, indicating that no population coalescence
can occur at this time.
Finally, one can specify a new set of n0multiplier, growth, and/or migration parameter
indices to be invoked at the new specified time period using n0muliplierVec, growthVec, or
migrationMat, respectively. When the default value for these is used (which is \code{NULL}),
n0multiplier, growth, and migration matrices within each model are automatically expanded
by simply copying over parameter indices from the adjacent time period to the new time
period (i.e., no change is invoked). For n0multiplier and growth, a new column is added;
for migration, a new matrix is added.
However, one can also specify the parameter indices to be used at this new time, allowing
a shift in that parameter, without a correponding coalescence event. These indices can be
specified in one of two ways:
First, one can simply specify a single parameter index which will be
implemented for all relevant populations at that time period.
For example, if the following collapseMatrix:
\preformatted{
$collapseMatrix
[,1] [,2]
[1,] 1 2
[2,] 1 NA
[3,] 0 2
}
is expanded to include a shift from non-zero migration among all populations to zero migration
at some point prior to the coalescence of populations 1 and 2 (meaning that migration has
occurred recently--upon secondary contact--but did not accompany initial divergence), we
should set \code{eventTime = 1} and \code{migrationMat = 1}. Assuming that the original model included
zero migration across all time periods, this will produce the following collpaseMatrix:
\preformatted{$collapseMatrix
[,1] [,2] [,3]
[1,] NA 1 2
[2,] NA 1 NA
[3,] NA 0 2
}
And the migrationArray will be expanded from two matrices:
\preformatted{$migrationArray
, , 1
[,1] [,2] [,3]
[1,] NA 0 0
[2,] 0 NA 0
[3,] 0 0 NA
, , 2
[,1] [,2] [,3]
[1,] NA NA 0
[2,] NA NA NA
[3,] 0 NA NA
}
to three matrices:
\preformatted{$migrationArray
, , 1
[,1] [,2] [,3]
[1,] NA 1 1
[2,] 1 NA 1
[3,] 1 1 NA
, , 2
[,1] [,2] [,3]
[1,] NA 0 0
[2,] 0 NA 0
[3,] 0 0 NA
, , 3
[,1] [,2] [,3]
[1,] NA NA 0
[2,] NA NA NA
[3,] 0 NA NA
}
where the new migration matrix was added in the first time period position and was filled with
zeros, as per the value specified.
This approach works similarly for n0multiplier and growth. For example, if one would like to model
population growth during the initial stages of divergence, followed by population size stability,
one should set \code{eventTime = 1} and \code{growthVec = 0} to change this history:
\preformatted{$collapseMatrix
[,1] [,2]
[1,] 1 2
[2,] 1 NA
[3,] 0 2
$growthMap
[,1] [,2]
[1,] 1 0
[2,] 1 NA
[3,] 0 0
}
into this history:
\preformatted{$collapseMatrix
[,1] [,2] [,3]
[1,] NA 1 2
[2,] NA 1 NA
[3,] NA 0 2
$growthMap
[,1] [,2] [,3]
[1,] 0 1 0
[2,] 0 1 NA
[3,] 0 0 0
}
If one wishes to specify more complex histories, in which different populations have different
indices, one can alternatively specify the entire new vector (for n0multiplier and growth)
or matrix (for migration) of parameter indices to be inserted. For example, in the previous example,
if one would rather model population size stability upon divergence of populations 1 and 2 followed
by recent growth of population 1, but not of populations 2 or 3, one can set \code{eventTime = 1} and
\code{growthVec = c(1,0,0)} to change the growth history from this:
\preformatted{$growthMap
[,1] [,2]
[1,] 0 0
[2,] 0 NA
[3,] 0 0
}
to this:
\preformatted{$growthMap
[,1] [,2] [,3]
[1,] 1 0 0
[2,] 0 0 NA
[3,] 0 0 0
}
One can also specify a migration matrix using migrationMat. For example, if, one would like to
insert a new matrix in which the migration rate differs among population pairs, one would specify
\preformatted{migrationMat = t(array(c(
NA, 3, 2,
1, NA, 1,
2, 3, NA),
dim=c(3,3)))
}
If one would rather, this can also be specified simply as a vector (with or without the NAs). For
example, the above migration matrix could be also specified as either \code{c(NA,1,2,3,NA,3,2,1,NA)}
or as \code{c(1,2,3,3,2,1)}, where the vector is read first by column, then by row. Note that the
number of values must match the number expected for that time period. For example, if one would like
to insert a new migration history at ancestral time period 2, one should remember that there are
fewer populations at that time period, and thus the number of values specified must be reduced
accordingly. For example, specifying distinct migration parameters from population 1-2 to 3 versus
from population 3 to 1-2 would be specified as either
\preformatted{migrationMat = t(array(c(
NA, NA, 2,
NA, NA, NA,
1, NA, NA),
dim=c(3,3)))
}
or as
\preformatted{migrationMat = (1,2)
}
Specifying two distinct population size parameters for ancestral population 1-2 and population 3 at
the second time period would be done by setting \code{eventTime = 2} and \code{n0muliplierVec = c(1,2)},
to convert
\preformatted{$n0multiplierMap
[,1] [,2]
[1,] 1 1
[2,] 1 NA
[3,] 1 1
}
into
\preformatted{$n0multiplierMap
[,1] [,2] [,3]
[1,] 1 1 1
[2,] 1 NA NA
[3,] 1 2 1
}
Finally, note that although the function can only add a single time period to the collapseMatrix, one can
add multiple non-coalescence time periods by repeatedly running the function on the same migrationArray.
}
\note{
When analyzing a migrationArray using \code{\link{GridSearch}} that includes new time periods
produced by \code{AddEventToMigrationArray}, a time value must be specified using the \code{addedEventTime}
argument. \code{addedEventTime} takes either a single value or vector of values, such that there are as
many values as added events in the collapseMatrix (sorted from most recent to most ancient).
Time values can be specified as either a specific time period (in units of 4Ne) or as a relative time
period. When the latter is desired, the \code{addedEventTimeAsScalar} argument in \code{\link{GridSearch}}
must be set to \code{TRUE} (this is the default), which will cause the values specified by
\code{addedEventTime} to be treated as scalars, rather than as absolute. Scalar values must be greater than zero
and less than one, and will be multiplied by whatever the next collapse time estimate happens to be (which will
typically vary across models). For example, setting \code{addedEventTimeAsScalar = TRUE} and
\code{addedEventTime = 0.1} for the following model:
\preformatted{$collapseMatrix
[,1] [,2] [,3]
[1,] NA 1 2
[2,] NA 1 NA
[3,] NA 0 2
$growthMap
[,1] [,2] [,3]
[1,] 0 1 0
[2,] 0 0 NA
[3,] 0 0 0
}
will cause (looking backwards in time) population growth to commence in population 1 after 10\% of the
branch length leading to coalescence of populations 1 and 2 has been reached.
One final thing to highlight is that the specified timing of these events must fit the modeled chronology of
coalescence and non-coalescence events in the collapseMatrix. That is, the timing for an added non-coalescence
event given by \code{addedEventTime} must be later than the collapse event directly preceding the added event
and must be earlier than the collapse event directly following it. For this reason, it is recommended
that \code{addedEventTimes} be entered as scalars, as this guarantees that the time will always be
earlier than the collapse event that follows (but not that the time will occur after the previous collapse
event). Absolute values can also be used, but remember that in a grid search, different values
are explored, so many parameter combinations in the grid may not fit the specified event times. However,
PHRAPL filters out all parameter combinations in the parameter grid that do not adhere to the specified
chronology of events in the model. Thus, setting absolute dates may reduce the parameter space explored
for a model, but this parameter space will always fit the model.
}
\author{
Nathan Jackson
}
\references{
O'Meara, B.C., N. Jackson, A. Morales-Garcia, and B. Carstens (2014)
Phrapl in prep.
}
\examples{
# ##First, generate a migrationIndividual that models a
# ##specific coalescence history among four populations
# collapse_1<-c(0,0,1,1)
# collapse_2<-c(2,2,0,NA)
# collapse_3<-c(3,NA,3,NA)
# collapseList<-list(collapse_1,collapse_2,collapse_3)
# migrationIndividual<-GenerateMigrationIndividualsOneAtATime(collapseList=collapseList)
#
# ##Then, add a recent migration (secondary contact) event between sister populations 1 and 2
# migrationArray<-migrationIndividual
# eventTime=1
# migrationMat=c(1,0,0,1,0,0,0,0,0,0,0,0)
#
# migrationArray<-AddEventToMigrationArray(migrationArray=migrationArray,eventTime=eventTime,
# migrationMat=migrationMat)
}
|
#create a function
best<-function(state, outcome)
{
#state <- "MD"
#outcome <- "heart attack"
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character", header=TRUE)
fd <- as.data.frame(cbind(data[, 2], # hospital
data[, 7], # state
data[, 11], # heart attack
data[, 17], # heart failure
data[, 23]), # pneumonia
stringsAsFactors = FALSE)
colnames(fd) <- c("hospital", "state", "heart attack", "heart failure", "pneumonia")
if(!state %in% fd[, "state"])
{
stop('invalid state')
}
else if(!outcome %in% c("heart attack", "heart failure", "pneumonia"))
{
stop('invalid outcome')
}
else
{
si <- which(fd[, "state"] == state) # Select the vector(row #) in the Dataset that matches the state
ts <- fd[si, ] # extracting data for the called state
oi <- as.numeric(ts[, eval(outcome)])
min_val <- min(oi, na.rm = TRUE)
result <- ts[, "hospital"][which(oi == min_val)]
output <- result[order(result)]
}
return(output)
}
#View(oi) # allows you to see the datafram in an excel format screen
#View(fd[,"heart attack"])
#View(fd[,c("heart attack", "heart failure")]) # selecting multiple rows with more than one column in dataframe
#View(fd[,"heart attack"]) # shows all the row for the column heart attack
#View(min(oi,na.rm = TRUE))
#View(ts[, "hospital"])
#View(ts[, eval(outcome)])
#View(ts[, outcome])
#View(oi) | /best.R | no_license | bilalsadiq85/ProgrammingAssignment3 | R | false | false | 1,635 | r | #create a function
best<-function(state, outcome)
{
#state <- "MD"
#outcome <- "heart attack"
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character", header=TRUE)
fd <- as.data.frame(cbind(data[, 2], # hospital
data[, 7], # state
data[, 11], # heart attack
data[, 17], # heart failure
data[, 23]), # pneumonia
stringsAsFactors = FALSE)
colnames(fd) <- c("hospital", "state", "heart attack", "heart failure", "pneumonia")
if(!state %in% fd[, "state"])
{
stop('invalid state')
}
else if(!outcome %in% c("heart attack", "heart failure", "pneumonia"))
{
stop('invalid outcome')
}
else
{
si <- which(fd[, "state"] == state) # Select the vector(row #) in the Dataset that matches the state
ts <- fd[si, ] # extracting data for the called state
oi <- as.numeric(ts[, eval(outcome)])
min_val <- min(oi, na.rm = TRUE)
result <- ts[, "hospital"][which(oi == min_val)]
output <- result[order(result)]
}
return(output)
}
#View(oi) # allows you to see the datafram in an excel format screen
#View(fd[,"heart attack"])
#View(fd[,c("heart attack", "heart failure")]) # selecting multiple rows with more than one column in dataframe
#View(fd[,"heart attack"]) # shows all the row for the column heart attack
#View(min(oi,na.rm = TRUE))
#View(ts[, "hospital"])
#View(ts[, eval(outcome)])
#View(ts[, outcome])
#View(oi) |
/inst/examples/chap11/oxygenOrganism.r | no_license | cran/ecolMod | R | false | false | 3,767 | r | ||
# NamSor API v2
#
# NamSor API v2 : enpoints to process personal names (gender, cultural origin or ethnicity) in all alphabets or languages. Use GET methods for small tests, but prefer POST methods for higher throughput (batch processing of up to 100 names at a time). Need something you can't find here? We have many more features coming soon. Let us know, we'll do our best to add it!
#
# The version of the OpenAPI document: 2.0.10
# Contact: contact@namsor.com
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title FirstLastNameOriginedOut
#' @description FirstLastNameOriginedOut Class
#' @format An \code{R6Class} generator object
#' @field id character [optional]
#'
#' @field firstName character [optional]
#'
#' @field lastName character [optional]
#'
#' @field countryOrigin character [optional]
#'
#' @field countryOriginAlt character [optional]
#'
#' @field countriesOriginTop list( character ) [optional]
#'
#' @field score numeric [optional]
#'
#' @field regionOrigin character [optional]
#'
#' @field topRegionOrigin character [optional]
#'
#' @field subRegionOrigin character [optional]
#'
#' @field probabilityCalibrated numeric [optional]
#'
#' @field probabilityAltCalibrated numeric [optional]
#'
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
FirstLastNameOriginedOut <- R6::R6Class(
'FirstLastNameOriginedOut',
public = list(
`id` = NULL,
`firstName` = NULL,
`lastName` = NULL,
`countryOrigin` = NULL,
`countryOriginAlt` = NULL,
`countriesOriginTop` = NULL,
`score` = NULL,
`regionOrigin` = NULL,
`topRegionOrigin` = NULL,
`subRegionOrigin` = NULL,
`probabilityCalibrated` = NULL,
`probabilityAltCalibrated` = NULL,
initialize = function(`id`=NULL, `firstName`=NULL, `lastName`=NULL, `countryOrigin`=NULL, `countryOriginAlt`=NULL, `countriesOriginTop`=NULL, `score`=NULL, `regionOrigin`=NULL, `topRegionOrigin`=NULL, `subRegionOrigin`=NULL, `probabilityCalibrated`=NULL, `probabilityAltCalibrated`=NULL, ...){
local.optional.var <- list(...)
if (!is.null(`id`)) {
stopifnot(is.character(`id`), length(`id`) == 1)
self$`id` <- `id`
}
if (!is.null(`firstName`)) {
stopifnot(is.character(`firstName`), length(`firstName`) == 1)
self$`firstName` <- `firstName`
}
if (!is.null(`lastName`)) {
stopifnot(is.character(`lastName`), length(`lastName`) == 1)
self$`lastName` <- `lastName`
}
if (!is.null(`countryOrigin`)) {
stopifnot(is.character(`countryOrigin`), length(`countryOrigin`) == 1)
self$`countryOrigin` <- `countryOrigin`
}
if (!is.null(`countryOriginAlt`)) {
stopifnot(is.character(`countryOriginAlt`), length(`countryOriginAlt`) == 1)
self$`countryOriginAlt` <- `countryOriginAlt`
}
if (!is.null(`countriesOriginTop`)) {
stopifnot(is.vector(`countriesOriginTop`), length(`countriesOriginTop`) != 0)
sapply(`countriesOriginTop`, function(x) stopifnot(is.character(x)))
self$`countriesOriginTop` <- `countriesOriginTop`
}
if (!is.null(`score`)) {
stopifnot(is.numeric(`score`), length(`score`) == 1)
self$`score` <- `score`
}
if (!is.null(`regionOrigin`)) {
stopifnot(is.character(`regionOrigin`), length(`regionOrigin`) == 1)
self$`regionOrigin` <- `regionOrigin`
}
if (!is.null(`topRegionOrigin`)) {
stopifnot(is.character(`topRegionOrigin`), length(`topRegionOrigin`) == 1)
self$`topRegionOrigin` <- `topRegionOrigin`
}
if (!is.null(`subRegionOrigin`)) {
stopifnot(is.character(`subRegionOrigin`), length(`subRegionOrigin`) == 1)
self$`subRegionOrigin` <- `subRegionOrigin`
}
if (!is.null(`probabilityCalibrated`)) {
stopifnot(is.numeric(`probabilityCalibrated`), length(`probabilityCalibrated`) == 1)
self$`probabilityCalibrated` <- `probabilityCalibrated`
}
if (!is.null(`probabilityAltCalibrated`)) {
stopifnot(is.numeric(`probabilityAltCalibrated`), length(`probabilityAltCalibrated`) == 1)
self$`probabilityAltCalibrated` <- `probabilityAltCalibrated`
}
},
toJSON = function() {
FirstLastNameOriginedOutObject <- list()
if (!is.null(self$`id`)) {
FirstLastNameOriginedOutObject[['id']] <-
self$`id`
}
if (!is.null(self$`firstName`)) {
FirstLastNameOriginedOutObject[['firstName']] <-
self$`firstName`
}
if (!is.null(self$`lastName`)) {
FirstLastNameOriginedOutObject[['lastName']] <-
self$`lastName`
}
if (!is.null(self$`countryOrigin`)) {
FirstLastNameOriginedOutObject[['countryOrigin']] <-
self$`countryOrigin`
}
if (!is.null(self$`countryOriginAlt`)) {
FirstLastNameOriginedOutObject[['countryOriginAlt']] <-
self$`countryOriginAlt`
}
if (!is.null(self$`countriesOriginTop`)) {
FirstLastNameOriginedOutObject[['countriesOriginTop']] <-
self$`countriesOriginTop`
}
if (!is.null(self$`score`)) {
FirstLastNameOriginedOutObject[['score']] <-
self$`score`
}
if (!is.null(self$`regionOrigin`)) {
FirstLastNameOriginedOutObject[['regionOrigin']] <-
self$`regionOrigin`
}
if (!is.null(self$`topRegionOrigin`)) {
FirstLastNameOriginedOutObject[['topRegionOrigin']] <-
self$`topRegionOrigin`
}
if (!is.null(self$`subRegionOrigin`)) {
FirstLastNameOriginedOutObject[['subRegionOrigin']] <-
self$`subRegionOrigin`
}
if (!is.null(self$`probabilityCalibrated`)) {
FirstLastNameOriginedOutObject[['probabilityCalibrated']] <-
self$`probabilityCalibrated`
}
if (!is.null(self$`probabilityAltCalibrated`)) {
FirstLastNameOriginedOutObject[['probabilityAltCalibrated']] <-
self$`probabilityAltCalibrated`
}
FirstLastNameOriginedOutObject
},
fromJSON = function(FirstLastNameOriginedOutJson) {
FirstLastNameOriginedOutObject <- jsonlite::fromJSON(FirstLastNameOriginedOutJson)
if (!is.null(FirstLastNameOriginedOutObject$`id`)) {
self$`id` <- FirstLastNameOriginedOutObject$`id`
}
if (!is.null(FirstLastNameOriginedOutObject$`firstName`)) {
self$`firstName` <- FirstLastNameOriginedOutObject$`firstName`
}
if (!is.null(FirstLastNameOriginedOutObject$`lastName`)) {
self$`lastName` <- FirstLastNameOriginedOutObject$`lastName`
}
if (!is.null(FirstLastNameOriginedOutObject$`countryOrigin`)) {
self$`countryOrigin` <- FirstLastNameOriginedOutObject$`countryOrigin`
}
if (!is.null(FirstLastNameOriginedOutObject$`countryOriginAlt`)) {
self$`countryOriginAlt` <- FirstLastNameOriginedOutObject$`countryOriginAlt`
}
if (!is.null(FirstLastNameOriginedOutObject$`countriesOriginTop`)) {
self$`countriesOriginTop` <- ApiClient$new()$deserializeObj(FirstLastNameOriginedOutObject$`countriesOriginTop`, "array[character]", loadNamespace("namsor"))
}
if (!is.null(FirstLastNameOriginedOutObject$`score`)) {
self$`score` <- FirstLastNameOriginedOutObject$`score`
}
if (!is.null(FirstLastNameOriginedOutObject$`regionOrigin`)) {
self$`regionOrigin` <- FirstLastNameOriginedOutObject$`regionOrigin`
}
if (!is.null(FirstLastNameOriginedOutObject$`topRegionOrigin`)) {
self$`topRegionOrigin` <- FirstLastNameOriginedOutObject$`topRegionOrigin`
}
if (!is.null(FirstLastNameOriginedOutObject$`subRegionOrigin`)) {
self$`subRegionOrigin` <- FirstLastNameOriginedOutObject$`subRegionOrigin`
}
if (!is.null(FirstLastNameOriginedOutObject$`probabilityCalibrated`)) {
self$`probabilityCalibrated` <- FirstLastNameOriginedOutObject$`probabilityCalibrated`
}
if (!is.null(FirstLastNameOriginedOutObject$`probabilityAltCalibrated`)) {
self$`probabilityAltCalibrated` <- FirstLastNameOriginedOutObject$`probabilityAltCalibrated`
}
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`id`)) {
sprintf(
'"id":
"%s"
',
self$`id`
)},
if (!is.null(self$`firstName`)) {
sprintf(
'"firstName":
"%s"
',
self$`firstName`
)},
if (!is.null(self$`lastName`)) {
sprintf(
'"lastName":
"%s"
',
self$`lastName`
)},
if (!is.null(self$`countryOrigin`)) {
sprintf(
'"countryOrigin":
"%s"
',
self$`countryOrigin`
)},
if (!is.null(self$`countryOriginAlt`)) {
sprintf(
'"countryOriginAlt":
"%s"
',
self$`countryOriginAlt`
)},
if (!is.null(self$`countriesOriginTop`)) {
sprintf(
'"countriesOriginTop":
[%s]
',
paste(unlist(lapply(self$`countriesOriginTop`, function(x) paste0('"', x, '"'))), collapse=",")
)},
if (!is.null(self$`score`)) {
sprintf(
'"score":
%d
',
self$`score`
)},
if (!is.null(self$`regionOrigin`)) {
sprintf(
'"regionOrigin":
"%s"
',
self$`regionOrigin`
)},
if (!is.null(self$`topRegionOrigin`)) {
sprintf(
'"topRegionOrigin":
"%s"
',
self$`topRegionOrigin`
)},
if (!is.null(self$`subRegionOrigin`)) {
sprintf(
'"subRegionOrigin":
"%s"
',
self$`subRegionOrigin`
)},
if (!is.null(self$`probabilityCalibrated`)) {
sprintf(
'"probabilityCalibrated":
%d
',
self$`probabilityCalibrated`
)},
if (!is.null(self$`probabilityAltCalibrated`)) {
sprintf(
'"probabilityAltCalibrated":
%d
',
self$`probabilityAltCalibrated`
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(FirstLastNameOriginedOutJson) {
FirstLastNameOriginedOutObject <- jsonlite::fromJSON(FirstLastNameOriginedOutJson)
self$`id` <- FirstLastNameOriginedOutObject$`id`
self$`firstName` <- FirstLastNameOriginedOutObject$`firstName`
self$`lastName` <- FirstLastNameOriginedOutObject$`lastName`
self$`countryOrigin` <- FirstLastNameOriginedOutObject$`countryOrigin`
self$`countryOriginAlt` <- FirstLastNameOriginedOutObject$`countryOriginAlt`
self$`countriesOriginTop` <- ApiClient$new()$deserializeObj(FirstLastNameOriginedOutObject$`countriesOriginTop`, "array[character]", loadNamespace("namsor"))
self$`score` <- FirstLastNameOriginedOutObject$`score`
self$`regionOrigin` <- FirstLastNameOriginedOutObject$`regionOrigin`
self$`topRegionOrigin` <- FirstLastNameOriginedOutObject$`topRegionOrigin`
self$`subRegionOrigin` <- FirstLastNameOriginedOutObject$`subRegionOrigin`
self$`probabilityCalibrated` <- FirstLastNameOriginedOutObject$`probabilityCalibrated`
self$`probabilityAltCalibrated` <- FirstLastNameOriginedOutObject$`probabilityAltCalibrated`
self
}
)
)
| /R/first_last_name_origined_out.R | no_license | namsor/namsor-r-sdk2 | R | false | false | 11,619 | r | # NamSor API v2
#
# NamSor API v2 : enpoints to process personal names (gender, cultural origin or ethnicity) in all alphabets or languages. Use GET methods for small tests, but prefer POST methods for higher throughput (batch processing of up to 100 names at a time). Need something you can't find here? We have many more features coming soon. Let us know, we'll do our best to add it!
#
# The version of the OpenAPI document: 2.0.10
# Contact: contact@namsor.com
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title FirstLastNameOriginedOut
#' @description FirstLastNameOriginedOut Class
#' @format An \code{R6Class} generator object
#' @field id character [optional]
#'
#' @field firstName character [optional]
#'
#' @field lastName character [optional]
#'
#' @field countryOrigin character [optional]
#'
#' @field countryOriginAlt character [optional]
#'
#' @field countriesOriginTop list( character ) [optional]
#'
#' @field score numeric [optional]
#'
#' @field regionOrigin character [optional]
#'
#' @field topRegionOrigin character [optional]
#'
#' @field subRegionOrigin character [optional]
#'
#' @field probabilityCalibrated numeric [optional]
#'
#' @field probabilityAltCalibrated numeric [optional]
#'
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
FirstLastNameOriginedOut <- R6::R6Class(
'FirstLastNameOriginedOut',
public = list(
`id` = NULL,
`firstName` = NULL,
`lastName` = NULL,
`countryOrigin` = NULL,
`countryOriginAlt` = NULL,
`countriesOriginTop` = NULL,
`score` = NULL,
`regionOrigin` = NULL,
`topRegionOrigin` = NULL,
`subRegionOrigin` = NULL,
`probabilityCalibrated` = NULL,
`probabilityAltCalibrated` = NULL,
initialize = function(`id`=NULL, `firstName`=NULL, `lastName`=NULL, `countryOrigin`=NULL, `countryOriginAlt`=NULL, `countriesOriginTop`=NULL, `score`=NULL, `regionOrigin`=NULL, `topRegionOrigin`=NULL, `subRegionOrigin`=NULL, `probabilityCalibrated`=NULL, `probabilityAltCalibrated`=NULL, ...){
local.optional.var <- list(...)
if (!is.null(`id`)) {
stopifnot(is.character(`id`), length(`id`) == 1)
self$`id` <- `id`
}
if (!is.null(`firstName`)) {
stopifnot(is.character(`firstName`), length(`firstName`) == 1)
self$`firstName` <- `firstName`
}
if (!is.null(`lastName`)) {
stopifnot(is.character(`lastName`), length(`lastName`) == 1)
self$`lastName` <- `lastName`
}
if (!is.null(`countryOrigin`)) {
stopifnot(is.character(`countryOrigin`), length(`countryOrigin`) == 1)
self$`countryOrigin` <- `countryOrigin`
}
if (!is.null(`countryOriginAlt`)) {
stopifnot(is.character(`countryOriginAlt`), length(`countryOriginAlt`) == 1)
self$`countryOriginAlt` <- `countryOriginAlt`
}
if (!is.null(`countriesOriginTop`)) {
stopifnot(is.vector(`countriesOriginTop`), length(`countriesOriginTop`) != 0)
sapply(`countriesOriginTop`, function(x) stopifnot(is.character(x)))
self$`countriesOriginTop` <- `countriesOriginTop`
}
if (!is.null(`score`)) {
stopifnot(is.numeric(`score`), length(`score`) == 1)
self$`score` <- `score`
}
if (!is.null(`regionOrigin`)) {
stopifnot(is.character(`regionOrigin`), length(`regionOrigin`) == 1)
self$`regionOrigin` <- `regionOrigin`
}
if (!is.null(`topRegionOrigin`)) {
stopifnot(is.character(`topRegionOrigin`), length(`topRegionOrigin`) == 1)
self$`topRegionOrigin` <- `topRegionOrigin`
}
if (!is.null(`subRegionOrigin`)) {
stopifnot(is.character(`subRegionOrigin`), length(`subRegionOrigin`) == 1)
self$`subRegionOrigin` <- `subRegionOrigin`
}
if (!is.null(`probabilityCalibrated`)) {
stopifnot(is.numeric(`probabilityCalibrated`), length(`probabilityCalibrated`) == 1)
self$`probabilityCalibrated` <- `probabilityCalibrated`
}
if (!is.null(`probabilityAltCalibrated`)) {
stopifnot(is.numeric(`probabilityAltCalibrated`), length(`probabilityAltCalibrated`) == 1)
self$`probabilityAltCalibrated` <- `probabilityAltCalibrated`
}
},
toJSON = function() {
FirstLastNameOriginedOutObject <- list()
if (!is.null(self$`id`)) {
FirstLastNameOriginedOutObject[['id']] <-
self$`id`
}
if (!is.null(self$`firstName`)) {
FirstLastNameOriginedOutObject[['firstName']] <-
self$`firstName`
}
if (!is.null(self$`lastName`)) {
FirstLastNameOriginedOutObject[['lastName']] <-
self$`lastName`
}
if (!is.null(self$`countryOrigin`)) {
FirstLastNameOriginedOutObject[['countryOrigin']] <-
self$`countryOrigin`
}
if (!is.null(self$`countryOriginAlt`)) {
FirstLastNameOriginedOutObject[['countryOriginAlt']] <-
self$`countryOriginAlt`
}
if (!is.null(self$`countriesOriginTop`)) {
FirstLastNameOriginedOutObject[['countriesOriginTop']] <-
self$`countriesOriginTop`
}
if (!is.null(self$`score`)) {
FirstLastNameOriginedOutObject[['score']] <-
self$`score`
}
if (!is.null(self$`regionOrigin`)) {
FirstLastNameOriginedOutObject[['regionOrigin']] <-
self$`regionOrigin`
}
if (!is.null(self$`topRegionOrigin`)) {
FirstLastNameOriginedOutObject[['topRegionOrigin']] <-
self$`topRegionOrigin`
}
if (!is.null(self$`subRegionOrigin`)) {
FirstLastNameOriginedOutObject[['subRegionOrigin']] <-
self$`subRegionOrigin`
}
if (!is.null(self$`probabilityCalibrated`)) {
FirstLastNameOriginedOutObject[['probabilityCalibrated']] <-
self$`probabilityCalibrated`
}
if (!is.null(self$`probabilityAltCalibrated`)) {
FirstLastNameOriginedOutObject[['probabilityAltCalibrated']] <-
self$`probabilityAltCalibrated`
}
FirstLastNameOriginedOutObject
},
fromJSON = function(FirstLastNameOriginedOutJson) {
FirstLastNameOriginedOutObject <- jsonlite::fromJSON(FirstLastNameOriginedOutJson)
if (!is.null(FirstLastNameOriginedOutObject$`id`)) {
self$`id` <- FirstLastNameOriginedOutObject$`id`
}
if (!is.null(FirstLastNameOriginedOutObject$`firstName`)) {
self$`firstName` <- FirstLastNameOriginedOutObject$`firstName`
}
if (!is.null(FirstLastNameOriginedOutObject$`lastName`)) {
self$`lastName` <- FirstLastNameOriginedOutObject$`lastName`
}
if (!is.null(FirstLastNameOriginedOutObject$`countryOrigin`)) {
self$`countryOrigin` <- FirstLastNameOriginedOutObject$`countryOrigin`
}
if (!is.null(FirstLastNameOriginedOutObject$`countryOriginAlt`)) {
self$`countryOriginAlt` <- FirstLastNameOriginedOutObject$`countryOriginAlt`
}
if (!is.null(FirstLastNameOriginedOutObject$`countriesOriginTop`)) {
self$`countriesOriginTop` <- ApiClient$new()$deserializeObj(FirstLastNameOriginedOutObject$`countriesOriginTop`, "array[character]", loadNamespace("namsor"))
}
if (!is.null(FirstLastNameOriginedOutObject$`score`)) {
self$`score` <- FirstLastNameOriginedOutObject$`score`
}
if (!is.null(FirstLastNameOriginedOutObject$`regionOrigin`)) {
self$`regionOrigin` <- FirstLastNameOriginedOutObject$`regionOrigin`
}
if (!is.null(FirstLastNameOriginedOutObject$`topRegionOrigin`)) {
self$`topRegionOrigin` <- FirstLastNameOriginedOutObject$`topRegionOrigin`
}
if (!is.null(FirstLastNameOriginedOutObject$`subRegionOrigin`)) {
self$`subRegionOrigin` <- FirstLastNameOriginedOutObject$`subRegionOrigin`
}
if (!is.null(FirstLastNameOriginedOutObject$`probabilityCalibrated`)) {
self$`probabilityCalibrated` <- FirstLastNameOriginedOutObject$`probabilityCalibrated`
}
if (!is.null(FirstLastNameOriginedOutObject$`probabilityAltCalibrated`)) {
self$`probabilityAltCalibrated` <- FirstLastNameOriginedOutObject$`probabilityAltCalibrated`
}
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`id`)) {
sprintf(
'"id":
"%s"
',
self$`id`
)},
if (!is.null(self$`firstName`)) {
sprintf(
'"firstName":
"%s"
',
self$`firstName`
)},
if (!is.null(self$`lastName`)) {
sprintf(
'"lastName":
"%s"
',
self$`lastName`
)},
if (!is.null(self$`countryOrigin`)) {
sprintf(
'"countryOrigin":
"%s"
',
self$`countryOrigin`
)},
if (!is.null(self$`countryOriginAlt`)) {
sprintf(
'"countryOriginAlt":
"%s"
',
self$`countryOriginAlt`
)},
if (!is.null(self$`countriesOriginTop`)) {
sprintf(
'"countriesOriginTop":
[%s]
',
paste(unlist(lapply(self$`countriesOriginTop`, function(x) paste0('"', x, '"'))), collapse=",")
)},
if (!is.null(self$`score`)) {
sprintf(
'"score":
%d
',
self$`score`
)},
if (!is.null(self$`regionOrigin`)) {
sprintf(
'"regionOrigin":
"%s"
',
self$`regionOrigin`
)},
if (!is.null(self$`topRegionOrigin`)) {
sprintf(
'"topRegionOrigin":
"%s"
',
self$`topRegionOrigin`
)},
if (!is.null(self$`subRegionOrigin`)) {
sprintf(
'"subRegionOrigin":
"%s"
',
self$`subRegionOrigin`
)},
if (!is.null(self$`probabilityCalibrated`)) {
sprintf(
'"probabilityCalibrated":
%d
',
self$`probabilityCalibrated`
)},
if (!is.null(self$`probabilityAltCalibrated`)) {
sprintf(
'"probabilityAltCalibrated":
%d
',
self$`probabilityAltCalibrated`
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(FirstLastNameOriginedOutJson) {
FirstLastNameOriginedOutObject <- jsonlite::fromJSON(FirstLastNameOriginedOutJson)
self$`id` <- FirstLastNameOriginedOutObject$`id`
self$`firstName` <- FirstLastNameOriginedOutObject$`firstName`
self$`lastName` <- FirstLastNameOriginedOutObject$`lastName`
self$`countryOrigin` <- FirstLastNameOriginedOutObject$`countryOrigin`
self$`countryOriginAlt` <- FirstLastNameOriginedOutObject$`countryOriginAlt`
self$`countriesOriginTop` <- ApiClient$new()$deserializeObj(FirstLastNameOriginedOutObject$`countriesOriginTop`, "array[character]", loadNamespace("namsor"))
self$`score` <- FirstLastNameOriginedOutObject$`score`
self$`regionOrigin` <- FirstLastNameOriginedOutObject$`regionOrigin`
self$`topRegionOrigin` <- FirstLastNameOriginedOutObject$`topRegionOrigin`
self$`subRegionOrigin` <- FirstLastNameOriginedOutObject$`subRegionOrigin`
self$`probabilityCalibrated` <- FirstLastNameOriginedOutObject$`probabilityCalibrated`
self$`probabilityAltCalibrated` <- FirstLastNameOriginedOutObject$`probabilityAltCalibrated`
self
}
)
)
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/liver.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.03,family="gaussian",standardize=FALSE)
sink('./liver_014.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Correlation/liver/liver_014.R | no_license | esbgkannan/QSMART | R | false | false | 349 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/liver.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.03,family="gaussian",standardize=FALSE)
sink('./liver_014.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
## WORKING DIRECTORY SHOULD BE "Ludi compounds" ##
#library for plotting
library(ggplot2)
# Transform compound's files data into object ludi.compounds and ludi.compounds.sig
source("./Ludi_compounds/R/format_data_ludi.R")
# plot manhattan
# Adapted code for manhattan plot
axis.set <- ludi.compounds %>%
group_by(comp) %>%
summarize(center = (max(comp) + min(comp)) / 2)
ylim <- max(ludi.compounds$log2FoldChange) + 5
ymin <- min(ludi.compounds$log2FoldChange) - 5
sig <- 1
sig2 <- -1
zero <- 0
nComp <- nrow(ludi.compounds)
breaks <- seq(-15, 15, 0.5)
labels <- as.character(breaks)
labels[!(breaks%%2==0)] <- ''
tick.sizes <- rep(.5, length(breaks))
tick.sizes[(breaks%%2==0)] <- 1
manhplot <- ggplot(ludi.compounds.nsig, aes(x = comp, y = log2FoldChange)) +
geom_point(size = 1, alpha = 0.5, color = "darkslateblue") +
geom_point(data = ludi.compounds.sig, size = 1, color = "firebrick3", alpha = 0.5, aes(x = comp, y = log2FoldChange)) +
#geom_hline(yintercept = sig, color = "lightgoldenrod1", linetype = "dashed", size = 0.2) +
#geom_hline(yintercept = sig2, color = "lightgoldenrod1", linetype = "dashed", size = 0.2) +
geom_hline(yintercept = zero, color = "black", linetype = "solid", size = 0.2) +
scale_x_continuous(label = compounds, breaks = axis.set$center) +
scale_y_continuous(expand = c(0,0), breaks = breaks, labels = labels, limits = c(ymin, ylim)) +
#ggtitle("Log2 FoldChange by compound") +
labs(y = "Efluxers Influxers") +
theme_bw() +
theme(
legend.position = "none",
panel.grid = element_blank(),
axis.title.x = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
axis.text.x = element_text(angle = 90, size = 8, vjust = 0.5, hjust=1),
axis.title.y = element_text(color="grey48", size=10),
#title = element_text(hjust = 0.5, color = "grey40", size = 10),
plot.margin = margin(t = 3, r = 5, b = 3, l = 3, unit = "pt")
)
manhplot
ggsave("./Ludi_compounds/plots/manhattan_plot.png", manhplot, height = 6, width = 5)
| /Ludi_compounds/R/Analysis_Ludi.R | no_license | IriLrnr/ChemicalGenomics | R | false | false | 2,091 | r | ## WORKING DIRECTORY SHOULD BE "Ludi compounds" ##
#library for plotting
library(ggplot2)
# Transform compound's files data into object ludi.compounds and ludi.compounds.sig
source("./Ludi_compounds/R/format_data_ludi.R")
# plot manhattan
# Adapted code for manhattan plot
axis.set <- ludi.compounds %>%
group_by(comp) %>%
summarize(center = (max(comp) + min(comp)) / 2)
ylim <- max(ludi.compounds$log2FoldChange) + 5
ymin <- min(ludi.compounds$log2FoldChange) - 5
sig <- 1
sig2 <- -1
zero <- 0
nComp <- nrow(ludi.compounds)
breaks <- seq(-15, 15, 0.5)
labels <- as.character(breaks)
labels[!(breaks%%2==0)] <- ''
tick.sizes <- rep(.5, length(breaks))
tick.sizes[(breaks%%2==0)] <- 1
manhplot <- ggplot(ludi.compounds.nsig, aes(x = comp, y = log2FoldChange)) +
geom_point(size = 1, alpha = 0.5, color = "darkslateblue") +
geom_point(data = ludi.compounds.sig, size = 1, color = "firebrick3", alpha = 0.5, aes(x = comp, y = log2FoldChange)) +
#geom_hline(yintercept = sig, color = "lightgoldenrod1", linetype = "dashed", size = 0.2) +
#geom_hline(yintercept = sig2, color = "lightgoldenrod1", linetype = "dashed", size = 0.2) +
geom_hline(yintercept = zero, color = "black", linetype = "solid", size = 0.2) +
scale_x_continuous(label = compounds, breaks = axis.set$center) +
scale_y_continuous(expand = c(0,0), breaks = breaks, labels = labels, limits = c(ymin, ylim)) +
#ggtitle("Log2 FoldChange by compound") +
labs(y = "Efluxers Influxers") +
theme_bw() +
theme(
legend.position = "none",
panel.grid = element_blank(),
axis.title.x = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
axis.text.x = element_text(angle = 90, size = 8, vjust = 0.5, hjust=1),
axis.title.y = element_text(color="grey48", size=10),
#title = element_text(hjust = 0.5, color = "grey40", size = 10),
plot.margin = margin(t = 3, r = 5, b = 3, l = 3, unit = "pt")
)
manhplot
ggsave("./Ludi_compounds/plots/manhattan_plot.png", manhplot, height = 6, width = 5)
|
#' checkPatientUuidExistsOpenMRS -> verifica se existe um paciente no openmrs com um det. uuidopenmrs
#' @param jdbc.properties [urlBase,urlBaseReportingRest,location,hibernateOpenMRSConnectionUrl,hibernateOpenMRSPassword,hibernateOpenMRSUsername]
#' @return TRUE/FALSE
#' @examples
#' status <- checkPatientUuidExistsOpenMRS(jdbc.properties, c("0102010001/2006/04892","julia" ,"jaoquina", "cb90174b-81e9-43e4-9b4d-dc09d2966efa"))
#'
checkPatientUuidExistsOpenMRS <- function(jdbc.properties, patient) {
# url da API
base.url.rest <- as.character(jdbc.properties$urlBaseReportingRest)
base.url <- as.character(jdbc.properties$urlBase)
status <- TRUE
url.check.patient <- paste0(base.url,'person/',patient[4])
r <- content(GET(url.check.patient, authenticate('admin', 'eSaude123')), as = "parsed")
if("error" %in% names(r)){
if(r$error$message =="Object with given uuid doesn't exist" ){
}
status<-FALSE
return(FALSE)
} else{
return(status)
}
return(status)
}
#' ReadJdbcProperties -> carrega os paramentros de conexao no ficheiro jdbc.properties
#' @param file patg to file
#' @return vec [urlBase,urlBaseReportingRest,location,hibernateOpenMRSConnectionUrl,hibernateOpenMRSPassword,hibernateOpenMRSUsername]
#' @examples
#' user_admin <- ReadJdbcProperties(file)
#'
readJdbcProperties <- function(file='jdbc.properties') {
vec <- as.data.frame(read.properties(file = file ))
vec
}
#' apiGetPatientByNid -> verifica se existe um paciente no openmrs com um det. nid
#' @param jdbc.properties [urlBase,urlBaseReportingRest,location,hibernateOpenMRSConnectionUrl,hibernateOpenMRSPassword,hibernateOpenMRSUsername]
#' @return TRUE/FALSE
#' @examples /openmrs/ws/rest/v1/patient?q=
#' status <- checkPatientUuidExistsOpenMRS(jdbc.properties, c("0102010001/2006/04892","julia" ,"jaoquina", "cb90174b-81e9-43e4-9b4d-dc09d2966efa"))
#'
apiGetPatientByNid <- function(jdbc.properties, patientid ) {
# url da API
base.url.rest <- as.character(jdbc.properties$urlBaseReportingRest)
base.url <- as.character(jdbc.properties$urlBase)
url.check.patient <- paste0(base.url,'patient?q=' ,patientid[2])
openmrsuuid <- ""
r <- content(GET(url.check.patient, authenticate('admin', 'eSaude123')), as = "parsed")
return(r)
}
#' apiGetPatientByUuid -> verifica se existe um paciente no openmrs com um det. nid
#' @param jdbc.properties [urlBase,urlBaseReportingRest,location,hibernateOpenMRSConnectionUrl,hibernateOpenMRSPassword,hibernateOpenMRSUsername]
#' @return TRUE/FALSE
#' @examples /openmrs/ws/rest/v1/patient?q=
#' status <- checkPatientUuidExistsOpenMRS(jdbc.properties, c("0102010001/2006/04892","julia" ,"jaoquina", "cb90174b-81e9-43e4-9b4d-dc09d2966efa"))
#'
apiGetPatientByUuid <- function(jdbc.properties, patient ) {
# url da API
base.url.rest <- as.character(jdbc.properties$urlBaseReportingRest)
base.url <- as.character(jdbc.properties$urlBase)
url.check.patient <- paste0(base.url,'patient/' ,patient[4])
r <- content(GET(url.check.patient, authenticate('admin', 'eSaude123')), as = "parsed")
return(r)
}
#' apiGetPatientByName -> verifica se existe um paciente no openmrs com um det. nome
#' @param jdbc.properties [urlBase,urlBaseReportingRest,location,hibernateOpenMRSConnectionUrl,hibernateOpenMRSPassword,hibernateOpenMRSUsername]
#' @return TRUE/FALSE
#' @examples /openmrs/ws/rest/v1/patient?q=
#' status <- checkPatientUuidExistsOpenMRS(jdbc.properties, c("0102010001/2006/04892","julia" ,"jaoquina", "cb90174b-81e9-43e4-9b4d-dc09d2966efa"))
#'
apiGetPatientByName <- function(jdbc.properties, patient.full.name ) {
# url da API
base.url.rest <- as.character(jdbc.properties$urlBaseReportingRest)
base.url <- as.character(jdbc.properties$urlBase)
url.check.patient <- paste0(base.url,'patient?q=' ,gsub(pattern = ' ', replacement = '%20' ,x =patient.full.name,ignore.case = TRUE ))
r <- content(GET(url.check.patient, authenticate('admin', 'eSaude123')), as = "parsed")
return(r)
}
#' composePatientToCheck --> Compoe um vector com dados do paciente =
#'
#' @param df tabela de duplicados para extrair os dados do Pat
#' @param index row do paciente em causa
#' @return vector[id,uuid,patientid,openmrs_patient_id,full.name,index]
#' @examples pat <- composePatientToCheck(k, df.different_uuid)
#'
composePatientToCheck <- function(index,df){
id = df$id[index]
patientid = df$patientid[index]
uuid_idart = df$uuididart[index]
uuid_openmrs =df$uuidopenmrs[index]
full_name = df$idart_full_name[index]
Encoding(full_name) <- "latin1"
full_name <- iconv(full_name, "latin1", "UTF-8",sub='')
full_name <- gsub(pattern = ' ' ,replacement = ' ', x = full_name)
patientid <- gsub(pattern = ' ', replacement = '', x = patientid)
patientid <- gsub(pattern = '\t', replacement = '', x = patientid)
patient <- c(id,patientid,uuid_idart,uuid_openmrs,full_name)
return(patient)
}
apiCheckEstadoPermanencia <- function(jdbc.properties, patient.uuid ) {
# url da API
url.base.reporting.rest<- as.character(jdbc.properties$urlBaseReportingRest)
#base.url <- as.character(jdbc.properties$urlBase)
url.check.patient <- paste0(url.base.reporting.rest,'?personUuid=' ,patient.uuid)
r <- content(GET(url.check.patient, authenticate('admin', 'eSaude123')), as = "parsed")
return(r)
}
| /openmrs_rest_api_functions.R | no_license | crysiscore/idart_data_clean | R | false | false | 5,419 | r |
#' checkPatientUuidExistsOpenMRS -> verifica se existe um paciente no openmrs com um det. uuidopenmrs
#' @param jdbc.properties [urlBase,urlBaseReportingRest,location,hibernateOpenMRSConnectionUrl,hibernateOpenMRSPassword,hibernateOpenMRSUsername]
#' @return TRUE/FALSE
#' @examples
#' status <- checkPatientUuidExistsOpenMRS(jdbc.properties, c("0102010001/2006/04892","julia" ,"jaoquina", "cb90174b-81e9-43e4-9b4d-dc09d2966efa"))
#'
checkPatientUuidExistsOpenMRS <- function(jdbc.properties, patient) {
# url da API
base.url.rest <- as.character(jdbc.properties$urlBaseReportingRest)
base.url <- as.character(jdbc.properties$urlBase)
status <- TRUE
url.check.patient <- paste0(base.url,'person/',patient[4])
r <- content(GET(url.check.patient, authenticate('admin', 'eSaude123')), as = "parsed")
if("error" %in% names(r)){
if(r$error$message =="Object with given uuid doesn't exist" ){
}
status<-FALSE
return(FALSE)
} else{
return(status)
}
return(status)
}
#' ReadJdbcProperties -> carrega os paramentros de conexao no ficheiro jdbc.properties
#' @param file patg to file
#' @return vec [urlBase,urlBaseReportingRest,location,hibernateOpenMRSConnectionUrl,hibernateOpenMRSPassword,hibernateOpenMRSUsername]
#' @examples
#' user_admin <- ReadJdbcProperties(file)
#'
readJdbcProperties <- function(file='jdbc.properties') {
vec <- as.data.frame(read.properties(file = file ))
vec
}
#' apiGetPatientByNid -> verifica se existe um paciente no openmrs com um det. nid
#' @param jdbc.properties [urlBase,urlBaseReportingRest,location,hibernateOpenMRSConnectionUrl,hibernateOpenMRSPassword,hibernateOpenMRSUsername]
#' @return TRUE/FALSE
#' @examples /openmrs/ws/rest/v1/patient?q=
#' status <- checkPatientUuidExistsOpenMRS(jdbc.properties, c("0102010001/2006/04892","julia" ,"jaoquina", "cb90174b-81e9-43e4-9b4d-dc09d2966efa"))
#'
apiGetPatientByNid <- function(jdbc.properties, patientid ) {
# url da API
base.url.rest <- as.character(jdbc.properties$urlBaseReportingRest)
base.url <- as.character(jdbc.properties$urlBase)
url.check.patient <- paste0(base.url,'patient?q=' ,patientid[2])
openmrsuuid <- ""
r <- content(GET(url.check.patient, authenticate('admin', 'eSaude123')), as = "parsed")
return(r)
}
#' apiGetPatientByUuid -> verifica se existe um paciente no openmrs com um det. nid
#' @param jdbc.properties [urlBase,urlBaseReportingRest,location,hibernateOpenMRSConnectionUrl,hibernateOpenMRSPassword,hibernateOpenMRSUsername]
#' @return TRUE/FALSE
#' @examples /openmrs/ws/rest/v1/patient?q=
#' status <- checkPatientUuidExistsOpenMRS(jdbc.properties, c("0102010001/2006/04892","julia" ,"jaoquina", "cb90174b-81e9-43e4-9b4d-dc09d2966efa"))
#'
apiGetPatientByUuid <- function(jdbc.properties, patient ) {
# url da API
base.url.rest <- as.character(jdbc.properties$urlBaseReportingRest)
base.url <- as.character(jdbc.properties$urlBase)
url.check.patient <- paste0(base.url,'patient/' ,patient[4])
r <- content(GET(url.check.patient, authenticate('admin', 'eSaude123')), as = "parsed")
return(r)
}
#' apiGetPatientByName -> verifica se existe um paciente no openmrs com um det. nome
#' @param jdbc.properties [urlBase,urlBaseReportingRest,location,hibernateOpenMRSConnectionUrl,hibernateOpenMRSPassword,hibernateOpenMRSUsername]
#' @return TRUE/FALSE
#' @examples /openmrs/ws/rest/v1/patient?q=
#' status <- checkPatientUuidExistsOpenMRS(jdbc.properties, c("0102010001/2006/04892","julia" ,"jaoquina", "cb90174b-81e9-43e4-9b4d-dc09d2966efa"))
#'
apiGetPatientByName <- function(jdbc.properties, patient.full.name ) {
# url da API
base.url.rest <- as.character(jdbc.properties$urlBaseReportingRest)
base.url <- as.character(jdbc.properties$urlBase)
url.check.patient <- paste0(base.url,'patient?q=' ,gsub(pattern = ' ', replacement = '%20' ,x =patient.full.name,ignore.case = TRUE ))
r <- content(GET(url.check.patient, authenticate('admin', 'eSaude123')), as = "parsed")
return(r)
}
#' composePatientToCheck --> Compoe um vector com dados do paciente =
#'
#' @param df tabela de duplicados para extrair os dados do Pat
#' @param index row do paciente em causa
#' @return vector[id,uuid,patientid,openmrs_patient_id,full.name,index]
#' @examples pat <- composePatientToCheck(k, df.different_uuid)
#'
composePatientToCheck <- function(index,df){
id = df$id[index]
patientid = df$patientid[index]
uuid_idart = df$uuididart[index]
uuid_openmrs =df$uuidopenmrs[index]
full_name = df$idart_full_name[index]
Encoding(full_name) <- "latin1"
full_name <- iconv(full_name, "latin1", "UTF-8",sub='')
full_name <- gsub(pattern = ' ' ,replacement = ' ', x = full_name)
patientid <- gsub(pattern = ' ', replacement = '', x = patientid)
patientid <- gsub(pattern = '\t', replacement = '', x = patientid)
patient <- c(id,patientid,uuid_idart,uuid_openmrs,full_name)
return(patient)
}
apiCheckEstadoPermanencia <- function(jdbc.properties, patient.uuid ) {
# url da API
url.base.reporting.rest<- as.character(jdbc.properties$urlBaseReportingRest)
#base.url <- as.character(jdbc.properties$urlBase)
url.check.patient <- paste0(url.base.reporting.rest,'?personUuid=' ,patient.uuid)
r <- content(GET(url.check.patient, authenticate('admin', 'eSaude123')), as = "parsed")
return(r)
}
|
# Function name: plotAllScores
# Author: Yang Li <yang.li@rug.nl>
# Maintainer: Yang Li <yang.li@rug.nl>
# Version: 1.0.0
# Date: 10 Dec. 2007
plotAllScores <- function(plot.obj,fileName=NULL)
{
#This is used in designGG main function
scores <- plot.obj$scores
cooling <- plot.obj$cooling
startTemp <- plot.obj$startTemp
temperature <- plot.obj$temperature
temperature.step <- plot.obj$temperature.step
nIterations <- plot.obj$nIterations
optimality <- plot.obj$optimality
sciNotation <- function(x, digits = 1) {
if (!x) return(0)
exponent <- floor(log10(x))
base <- round(x / 10^exponent, digits)
return(as.expression(substitute(base * 10^exponent,
list(base = base, exponent = exponent))))
}
if(!is.null(fileName)){
png(filename=paste(fileName,"SAplot.png", sep=""), width=580, height=480,
bg=" light gray")}
par( mfrow=c(2,1) )
par( mai=c(0.2,0.95, 0.76, 0.39) )
ylim <- range(scores,finite=TRUE,na.rm=TRUE)
plot( 1:length(scores), scores, type="l", lwd=2, xaxt="n", xlab=NULL,
ylab="Socres", col="blue", main="Simulated Annealing" )
text( x=0.8*length(scores),
y=ylim[1]+0.95*(ylim[2]-ylim[1]),
paste("T0=", startTemp , ", T.end=", sciNotation( temperature ), sep="") )
text( x=0.9*length(scores),
y=ylim[1]+0.85*(ylim[2]-ylim[1]),
paste("T step=", round(temperature.step,digits=3), sep="") )
text( x=0.9*length(scores),
y=ylim[1]+0.75*(ylim[2]-ylim[1]),
paste("nIterations=", nIterations, sep="") )
par( mai=c(0.95, 0.95, 0.2, 0.39) )
plot( 1:length(cooling),col="blue", cooling, type="l", lwd=2,
xlab="Times of Moving", ylab="Cooling")
if(!is.null(fileName)) dev.off()
}
| /designGG/R/plotAllScores.R | no_license | ingted/R-Examples | R | false | false | 1,961 | r | # Function name: plotAllScores
# Author: Yang Li <yang.li@rug.nl>
# Maintainer: Yang Li <yang.li@rug.nl>
# Version: 1.0.0
# Date: 10 Dec. 2007
plotAllScores <- function(plot.obj,fileName=NULL)
{
#This is used in designGG main function
scores <- plot.obj$scores
cooling <- plot.obj$cooling
startTemp <- plot.obj$startTemp
temperature <- plot.obj$temperature
temperature.step <- plot.obj$temperature.step
nIterations <- plot.obj$nIterations
optimality <- plot.obj$optimality
sciNotation <- function(x, digits = 1) {
if (!x) return(0)
exponent <- floor(log10(x))
base <- round(x / 10^exponent, digits)
return(as.expression(substitute(base * 10^exponent,
list(base = base, exponent = exponent))))
}
if(!is.null(fileName)){
png(filename=paste(fileName,"SAplot.png", sep=""), width=580, height=480,
bg=" light gray")}
par( mfrow=c(2,1) )
par( mai=c(0.2,0.95, 0.76, 0.39) )
ylim <- range(scores,finite=TRUE,na.rm=TRUE)
plot( 1:length(scores), scores, type="l", lwd=2, xaxt="n", xlab=NULL,
ylab="Socres", col="blue", main="Simulated Annealing" )
text( x=0.8*length(scores),
y=ylim[1]+0.95*(ylim[2]-ylim[1]),
paste("T0=", startTemp , ", T.end=", sciNotation( temperature ), sep="") )
text( x=0.9*length(scores),
y=ylim[1]+0.85*(ylim[2]-ylim[1]),
paste("T step=", round(temperature.step,digits=3), sep="") )
text( x=0.9*length(scores),
y=ylim[1]+0.75*(ylim[2]-ylim[1]),
paste("nIterations=", nIterations, sep="") )
par( mai=c(0.95, 0.95, 0.2, 0.39) )
plot( 1:length(cooling),col="blue", cooling, type="l", lwd=2,
xlab="Times of Moving", ylab="Cooling")
if(!is.null(fileName)) dev.off()
}
|
\name{test.scoregpu}
\alias{test.scoregpu}
\title{
Conduct permutation resampling analysis using permGPU
}
\description{
This function can be used to carry our permutation
resampling inference with GPUs. Currently the function supports six test
statistics: the t and Wilcoxon tests, for two-sample problems,
the Pearson and Spearman statistics, for non-censored continuous
outcomes, and the Cox score and rank score tests (Jung et al, 2005), for
right-censored time-to-event outcomes.
}
\usage{
test.scoregpu()
}
\examples{
test.scoregpu()
}
| /man/test.scoregpu.Rd | no_license | cran/permGPU | R | false | false | 561 | rd | \name{test.scoregpu}
\alias{test.scoregpu}
\title{
Conduct permutation resampling analysis using permGPU
}
\description{
This function can be used to carry our permutation
resampling inference with GPUs. Currently the function supports six test
statistics: the t and Wilcoxon tests, for two-sample problems,
the Pearson and Spearman statistics, for non-censored continuous
outcomes, and the Cox score and rank score tests (Jung et al, 2005), for
right-censored time-to-event outcomes.
}
\usage{
test.scoregpu()
}
\examples{
test.scoregpu()
}
|
"as.dudi" <- function (df, col.w, row.w, scannf, nf, call, type, tol = 1e-07,
full = FALSE)
{
if (!is.data.frame(df))
stop("data.frame expected")
lig <- nrow(df)
col <- ncol(df)
if (length(col.w) != col)
stop("Non convenient col weights")
if (length(row.w) != lig)
stop("Non convenient row weights")
if (any(col.w < 0))
stop("col weight < 0")
if (any(row.w < 0))
stop("row weight < 0")
if (full)
scannf <- FALSE
transpose <- FALSE
if(lig<col)
transpose <- TRUE
res <- list(tab = df, cw = col.w, lw = row.w)
df <- as.matrix(df)
df.ori <- df
df <- df * sqrt(row.w)
df <- sweep(df, 2, sqrt(col.w), "*")
if(!transpose){
df <- crossprod(df,df)
}
else{
df <- tcrossprod(df,df)
}
eig1 <- eigen(df,symmetric=TRUE)
eig <- eig1$values
rank <- sum((eig/eig[1]) > tol)
if (scannf) {
if (exists("ade4TkGUIFlag")) {
nf <- ade4TkGUI::chooseaxes(eig, rank)
}
else {
barplot(eig[1:rank])
cat("Select the number of axes: ")
nf <- as.integer(readLines(n = 1))
messageScannf(call, nf)
}
}
if (nf <= 0)
nf <- 2
if (nf > rank)
nf <- rank
if (full)
nf <- rank
res$eig <- eig[1:rank]
res$rank <- rank
res$nf <- nf
col.w[which(col.w == 0)] <- 1
row.w[which(row.w == 0)] <- 1
dval <- sqrt(res$eig)[1:nf]
if(!transpose){
col.w <- 1/sqrt(col.w)
auxi <- eig1$vectors[, 1:nf] * col.w
auxi2 <- sweep(df.ori, 2, res$cw, "*")
auxi2 <- data.frame(auxi2%*%auxi)
auxi <- data.frame(auxi)
names(auxi) <- paste("CS", (1:nf), sep = "")
row.names(auxi) <- make.unique(names(res$tab))
res$c1 <- auxi
names(auxi2) <- paste("Axis", (1:nf), sep = "")
row.names(auxi2) <- row.names(res$tab)
res$li <- auxi2
res$co <- sweep(res$c1,2,dval,"*")
names(res$co) <- paste("Comp", (1:nf), sep = "")
res$l1 <- sweep(res$li,2,dval,"/")
names(res$l1) <- paste("RS", (1:nf), sep = "")
} else {
row.w <- 1/sqrt(row.w)
auxi <- eig1$vectors[, 1:nf] * row.w
auxi2 <- t(sweep(df.ori,1,res$lw,"*"))
auxi2 <- data.frame(auxi2%*%auxi)
auxi <- data.frame(auxi)
names(auxi) <- paste("RS", (1:nf), sep = "")
row.names(auxi) <- row.names(res$tab)
res$l1 <- auxi
names(auxi2) <- paste("Comp", (1:nf), sep = "")
row.names(auxi2) <- make.unique(names(res$tab))
res$co <- auxi2
res$li <- sweep(res$l1,2,dval,"*")
names(res$li) <- paste("Axis", (1:nf), sep = "")
res$c1 <- sweep(res$co,2,dval,"/")
names(res$c1) <- paste("CS", (1:nf), sep = "")
}
res$call <- call
class(res) <- c(type, "dudi")
return(res)
}
"is.dudi" <- function (x) {
inherits(x, "dudi")
}
"print.dudi" <- function (x, ...) {
cat("Duality diagramm\n")
cat("class: ")
cat(class(x))
cat("\n$call: ")
print(x$call)
cat("\n$nf:", x$nf, "axis-components saved")
cat("\n$rank: ")
cat(x$rank)
cat("\neigen values: ")
l0 <- length(x$eig)
cat(signif(x$eig, 4)[1:(min(5, l0))])
if (l0 > 5)
cat(" ...\n")
else cat("\n")
sumry <- array("", c(3, 4), list(1:3, c("vector", "length",
"mode", "content")))
sumry[1, ] <- c("$cw", length(x$cw), mode(x$cw), "column weights")
sumry[2, ] <- c("$lw", length(x$lw), mode(x$lw), "row weights")
sumry[3, ] <- c("$eig", length(x$eig), mode(x$eig), "eigen values")
print(sumry, quote = FALSE)
cat("\n")
sumry <- array("", c(5, 4), list(1:5, c("data.frame", "nrow",
"ncol", "content")))
sumry[1, ] <- c("$tab", nrow(x$tab), ncol(x$tab), "modified array")
sumry[2, ] <- c("$li", nrow(x$li), ncol(x$li), "row coordinates")
sumry[3, ] <- c("$l1", nrow(x$l1), ncol(x$l1), "row normed scores")
sumry[4, ] <- c("$co", nrow(x$co), ncol(x$co), "column coordinates")
sumry[5, ] <- c("$c1", nrow(x$c1), ncol(x$c1), "column normed scores")
print(sumry, quote = FALSE)
cat("other elements: ")
if (length(names(x)) > 11)
cat(names(x)[12:(length(x))], "\n")
else cat("NULL\n")
}
"t.dudi" <- function (x) {
if (!inherits(x, "dudi"))
stop("Object of class 'dudi' expected")
res <- list()
res$tab <- data.frame(t(x$tab))
res$cw <- x$lw
res$lw <- x$cw
res$eig <- x$eig
res$rank <- x$rank
res$nf <- x$nf
res$c1 <- x$l1
res$l1 <- x$c1
res$co <- x$li
res$li <- x$co
res$call <- match.call()
class(res) <- c("transpo", "dudi")
return(res)
}
"redo.dudi" <- function (dudi, newnf = 2) {
if (!inherits(dudi, "dudi"))
stop("Object of class 'dudi' expected")
appel <- as.list(dudi$call)
if (appel[[1]] == "t.dudi") {
dudiold <- eval.parent(appel[[2]])
appel <- as.list(dudiold$call)
appel$nf <- newnf
appel$scannf <- FALSE
dudinew <- eval.parent(as.call(appel))
return(t.dudi(dudinew))
}
appel$nf <- newnf
appel$scannf <- FALSE
eval.parent(as.call(appel))
}
screeplot.dudi <- function (x, npcs = length(x$eig), type = c("barplot","lines"), main = deparse(substitute(x)), col = c(rep("black",x$nf),rep("grey",npcs-x$nf)), ...){
type <- match.arg(type)
pcs <- x$eig
xp <- seq_len(npcs)
if (type == "barplot")
barplot(pcs[xp], names.arg = 1:npcs, main = main, ylab = "Inertia", xlab = "Axis", col = col, ...)
else {
plot(xp, pcs[xp], type = "b", axes = FALSE, main = main, xlab = "Axis", ylab = "Inertia", col = col, ...)
axis(2)
axis(1, at = xp, labels = 1:npcs)
}
invisible()
}
biplot.dudi <- function (x, ...){
scatter(x, ...)
}
summary.dudi <- function(object, ...){
cat("Class: ")
cat(class(object))
cat("\nCall: ")
print(object$call)
cat("\nTotal inertia: ")
cat(signif(sum(object$eig), 4))
cat("\n")
l0 <- length(object$eig)
cat("\nEigenvalues:\n")
vec <- object$eig[1:(min(5, l0))]
names(vec) <- paste("Ax",1:length(vec), sep = "")
print(format(vec, digits = 4, trim = TRUE, width = 7), quote = FALSE)
cat("\nProjected inertia (%):\n")
vec <- (object$eig / sum(object$eig) * 100)[1:(min(5, l0))]
names(vec) <- paste("Ax",1:length(vec), sep = "")
print(format(vec, digits = 4, trim = TRUE, width = 7), quote = FALSE)
cat("\nCumulative projected inertia (%):\n")
vec <- (cumsum(object$eig) / sum(object$eig) * 100)[1:(min(5, l0))]
names(vec)[1] <- "Ax1"
if(l0>1)
names(vec)[2:length(vec)] <- paste("Ax1:",2:length(vec),sep="")
print(format(vec, digits = 4, trim = TRUE, width = 7), quote = FALSE)
if (l0 > 5) {
cat("\n")
cat(paste("(Only 5 dimensions (out of ",l0, ") are shown)\n", sep="",collapse=""))
}
cat("\n")
}
########### [.dudi ###########
"[.dudi" <- function (x, i, j) {
## i: index of rows
## j: index of columns
res <- unclass(x)
if(!missing(i)){
res$tab <- res$tab[i, , drop = FALSE]
res$li <- res$li[i, , drop = FALSE]
res$l1 <- res$l1[i, , drop = FALSE]
res$lw <- res$lw[i, drop = FALSE]
res$lw <- res$lw / sum(res$lw)
}
if(!missing(j)){
res$tab <- res$tab[, j, drop = FALSE]
res$co <- res$co[j, , drop = FALSE]
res$c1 <- res$c1[j, , drop = FALSE]
res$cw <- res$lw[j, drop = FALSE]
}
class(res) <- class(x)
res$call <- match.call()
return(res)
}
| /R/dudi.R | no_license | cran/ade4 | R | false | false | 7,379 | r | "as.dudi" <- function (df, col.w, row.w, scannf, nf, call, type, tol = 1e-07,
full = FALSE)
{
if (!is.data.frame(df))
stop("data.frame expected")
lig <- nrow(df)
col <- ncol(df)
if (length(col.w) != col)
stop("Non convenient col weights")
if (length(row.w) != lig)
stop("Non convenient row weights")
if (any(col.w < 0))
stop("col weight < 0")
if (any(row.w < 0))
stop("row weight < 0")
if (full)
scannf <- FALSE
transpose <- FALSE
if(lig<col)
transpose <- TRUE
res <- list(tab = df, cw = col.w, lw = row.w)
df <- as.matrix(df)
df.ori <- df
df <- df * sqrt(row.w)
df <- sweep(df, 2, sqrt(col.w), "*")
if(!transpose){
df <- crossprod(df,df)
}
else{
df <- tcrossprod(df,df)
}
eig1 <- eigen(df,symmetric=TRUE)
eig <- eig1$values
rank <- sum((eig/eig[1]) > tol)
if (scannf) {
if (exists("ade4TkGUIFlag")) {
nf <- ade4TkGUI::chooseaxes(eig, rank)
}
else {
barplot(eig[1:rank])
cat("Select the number of axes: ")
nf <- as.integer(readLines(n = 1))
messageScannf(call, nf)
}
}
if (nf <= 0)
nf <- 2
if (nf > rank)
nf <- rank
if (full)
nf <- rank
res$eig <- eig[1:rank]
res$rank <- rank
res$nf <- nf
col.w[which(col.w == 0)] <- 1
row.w[which(row.w == 0)] <- 1
dval <- sqrt(res$eig)[1:nf]
if(!transpose){
col.w <- 1/sqrt(col.w)
auxi <- eig1$vectors[, 1:nf] * col.w
auxi2 <- sweep(df.ori, 2, res$cw, "*")
auxi2 <- data.frame(auxi2%*%auxi)
auxi <- data.frame(auxi)
names(auxi) <- paste("CS", (1:nf), sep = "")
row.names(auxi) <- make.unique(names(res$tab))
res$c1 <- auxi
names(auxi2) <- paste("Axis", (1:nf), sep = "")
row.names(auxi2) <- row.names(res$tab)
res$li <- auxi2
res$co <- sweep(res$c1,2,dval,"*")
names(res$co) <- paste("Comp", (1:nf), sep = "")
res$l1 <- sweep(res$li,2,dval,"/")
names(res$l1) <- paste("RS", (1:nf), sep = "")
} else {
row.w <- 1/sqrt(row.w)
auxi <- eig1$vectors[, 1:nf] * row.w
auxi2 <- t(sweep(df.ori,1,res$lw,"*"))
auxi2 <- data.frame(auxi2%*%auxi)
auxi <- data.frame(auxi)
names(auxi) <- paste("RS", (1:nf), sep = "")
row.names(auxi) <- row.names(res$tab)
res$l1 <- auxi
names(auxi2) <- paste("Comp", (1:nf), sep = "")
row.names(auxi2) <- make.unique(names(res$tab))
res$co <- auxi2
res$li <- sweep(res$l1,2,dval,"*")
names(res$li) <- paste("Axis", (1:nf), sep = "")
res$c1 <- sweep(res$co,2,dval,"/")
names(res$c1) <- paste("CS", (1:nf), sep = "")
}
res$call <- call
class(res) <- c(type, "dudi")
return(res)
}
"is.dudi" <- function (x) {
inherits(x, "dudi")
}
"print.dudi" <- function (x, ...) {
cat("Duality diagramm\n")
cat("class: ")
cat(class(x))
cat("\n$call: ")
print(x$call)
cat("\n$nf:", x$nf, "axis-components saved")
cat("\n$rank: ")
cat(x$rank)
cat("\neigen values: ")
l0 <- length(x$eig)
cat(signif(x$eig, 4)[1:(min(5, l0))])
if (l0 > 5)
cat(" ...\n")
else cat("\n")
sumry <- array("", c(3, 4), list(1:3, c("vector", "length",
"mode", "content")))
sumry[1, ] <- c("$cw", length(x$cw), mode(x$cw), "column weights")
sumry[2, ] <- c("$lw", length(x$lw), mode(x$lw), "row weights")
sumry[3, ] <- c("$eig", length(x$eig), mode(x$eig), "eigen values")
print(sumry, quote = FALSE)
cat("\n")
sumry <- array("", c(5, 4), list(1:5, c("data.frame", "nrow",
"ncol", "content")))
sumry[1, ] <- c("$tab", nrow(x$tab), ncol(x$tab), "modified array")
sumry[2, ] <- c("$li", nrow(x$li), ncol(x$li), "row coordinates")
sumry[3, ] <- c("$l1", nrow(x$l1), ncol(x$l1), "row normed scores")
sumry[4, ] <- c("$co", nrow(x$co), ncol(x$co), "column coordinates")
sumry[5, ] <- c("$c1", nrow(x$c1), ncol(x$c1), "column normed scores")
print(sumry, quote = FALSE)
cat("other elements: ")
if (length(names(x)) > 11)
cat(names(x)[12:(length(x))], "\n")
else cat("NULL\n")
}
"t.dudi" <- function (x) {
if (!inherits(x, "dudi"))
stop("Object of class 'dudi' expected")
res <- list()
res$tab <- data.frame(t(x$tab))
res$cw <- x$lw
res$lw <- x$cw
res$eig <- x$eig
res$rank <- x$rank
res$nf <- x$nf
res$c1 <- x$l1
res$l1 <- x$c1
res$co <- x$li
res$li <- x$co
res$call <- match.call()
class(res) <- c("transpo", "dudi")
return(res)
}
"redo.dudi" <- function (dudi, newnf = 2) {
if (!inherits(dudi, "dudi"))
stop("Object of class 'dudi' expected")
appel <- as.list(dudi$call)
if (appel[[1]] == "t.dudi") {
dudiold <- eval.parent(appel[[2]])
appel <- as.list(dudiold$call)
appel$nf <- newnf
appel$scannf <- FALSE
dudinew <- eval.parent(as.call(appel))
return(t.dudi(dudinew))
}
appel$nf <- newnf
appel$scannf <- FALSE
eval.parent(as.call(appel))
}
screeplot.dudi <- function (x, npcs = length(x$eig), type = c("barplot","lines"), main = deparse(substitute(x)), col = c(rep("black",x$nf),rep("grey",npcs-x$nf)), ...){
type <- match.arg(type)
pcs <- x$eig
xp <- seq_len(npcs)
if (type == "barplot")
barplot(pcs[xp], names.arg = 1:npcs, main = main, ylab = "Inertia", xlab = "Axis", col = col, ...)
else {
plot(xp, pcs[xp], type = "b", axes = FALSE, main = main, xlab = "Axis", ylab = "Inertia", col = col, ...)
axis(2)
axis(1, at = xp, labels = 1:npcs)
}
invisible()
}
biplot.dudi <- function (x, ...){
scatter(x, ...)
}
summary.dudi <- function(object, ...){
cat("Class: ")
cat(class(object))
cat("\nCall: ")
print(object$call)
cat("\nTotal inertia: ")
cat(signif(sum(object$eig), 4))
cat("\n")
l0 <- length(object$eig)
cat("\nEigenvalues:\n")
vec <- object$eig[1:(min(5, l0))]
names(vec) <- paste("Ax",1:length(vec), sep = "")
print(format(vec, digits = 4, trim = TRUE, width = 7), quote = FALSE)
cat("\nProjected inertia (%):\n")
vec <- (object$eig / sum(object$eig) * 100)[1:(min(5, l0))]
names(vec) <- paste("Ax",1:length(vec), sep = "")
print(format(vec, digits = 4, trim = TRUE, width = 7), quote = FALSE)
cat("\nCumulative projected inertia (%):\n")
vec <- (cumsum(object$eig) / sum(object$eig) * 100)[1:(min(5, l0))]
names(vec)[1] <- "Ax1"
if(l0>1)
names(vec)[2:length(vec)] <- paste("Ax1:",2:length(vec),sep="")
print(format(vec, digits = 4, trim = TRUE, width = 7), quote = FALSE)
if (l0 > 5) {
cat("\n")
cat(paste("(Only 5 dimensions (out of ",l0, ") are shown)\n", sep="",collapse=""))
}
cat("\n")
}
########### [.dudi ###########
"[.dudi" <- function (x, i, j) {
## i: index of rows
## j: index of columns
res <- unclass(x)
if(!missing(i)){
res$tab <- res$tab[i, , drop = FALSE]
res$li <- res$li[i, , drop = FALSE]
res$l1 <- res$l1[i, , drop = FALSE]
res$lw <- res$lw[i, drop = FALSE]
res$lw <- res$lw / sum(res$lw)
}
if(!missing(j)){
res$tab <- res$tab[, j, drop = FALSE]
res$co <- res$co[j, , drop = FALSE]
res$c1 <- res$c1[j, , drop = FALSE]
res$cw <- res$lw[j, drop = FALSE]
}
class(res) <- class(x)
res$call <- match.call()
return(res)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biofitmodel.r
\name{biofitmodel}
\alias{biofitmodel}
\title{biofitmodel}
\usage{
biofitmodel(i_biometeo, i_biopopulation, i_biocontainer, i_monitoring,
range_alpha_a = seq(0.005, 0.01, 0.001), range_alpha_l = seq(0.005, 0.01,
0.001), range_density_l = 100, stocastic = TRUE, n_sampling = 10,
inibition = FALSE, plotresults = FALSE, testRMSE = FALSE,
ini_rmse = 1, end_rmse = NULL)
}
\arguments{
\item{i_biometeo}{object rAedesSim \code{biometeo} object}
\item{i_biopopulation}{object rAedesSim \code{biopopulation} object .}
\item{i_biocontainer}{object rAedesSim \code{biocontainer} object .}
\item{i_monitoring}{object rAedesSim \code{biodata} object concerning mosquito eggs field observations.}
\item{range_alpha_a}{numeric rAedesSim range of female adult competition alpha. Default values are done by seq(0.005,0.01,0.001).}
\item{range_alpha_l}{numeric rAedesSim range of intra-larval competition alpha . Default values are done by seq(0.005,0.01,0.001)}
\item{range_density_l}{numeric rAedesSim object range of maximum larval density in liter of water volume.Default value is 100.}
\item{n_sampling}{numeric number of resampling if stochastic is implemented see in \code{biomodel}. Default is 10.}
\item{inibition}{logical if larval density is considered in \code{biomodel}.Default is FALSE.}
\item{plotresults}{logical if is true a plot is done. Default is FALSE.}
\item{testRMSE}{logical if test the root mean square error of simualtions. Default is FALSE.}
\item{ini_rmse}{numeric Starting position index to calculate RMSE on the observed time series.Defalut is 1.}
\item{end_rmse}{numeric Ending position index to calculate RMSE on observed data.Default is NULL and means that all observed data are considered if ini_rmse = 1.}
}
\value{
biofitmodel
}
\description{
Biofitmodel is the function to find, troughout a grid scheme, which are the best intra competition parameters Alpha Adults and Alpha Larvs
by using the minimal RMSE criterion, when environmental and population data are fixed.To build grid the function needs the ranges of alpha parameters.
It is possible to esplicit the numerosity of data used to perform the validation indicating the index of starting and ending by using observed data as reference.
}
\author{
Istituto di Biometeorologia Firenze Italy Alfonso crisci \email{a.crisci@ibimet.cnr.it} ASL LUCCA Marco Selmi \email{marco.selmi@uslnordovest.toscana.it }
}
\keyword{biofitmodel}
| /man/biofitmodel.Rd | permissive | alfcrisci/rAedesSim | R | false | true | 2,530 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biofitmodel.r
\name{biofitmodel}
\alias{biofitmodel}
\title{biofitmodel}
\usage{
biofitmodel(i_biometeo, i_biopopulation, i_biocontainer, i_monitoring,
range_alpha_a = seq(0.005, 0.01, 0.001), range_alpha_l = seq(0.005, 0.01,
0.001), range_density_l = 100, stocastic = TRUE, n_sampling = 10,
inibition = FALSE, plotresults = FALSE, testRMSE = FALSE,
ini_rmse = 1, end_rmse = NULL)
}
\arguments{
\item{i_biometeo}{object rAedesSim \code{biometeo} object}
\item{i_biopopulation}{object rAedesSim \code{biopopulation} object .}
\item{i_biocontainer}{object rAedesSim \code{biocontainer} object .}
\item{i_monitoring}{object rAedesSim \code{biodata} object concerning mosquito eggs field observations.}
\item{range_alpha_a}{numeric rAedesSim range of female adult competition alpha. Default values are done by seq(0.005,0.01,0.001).}
\item{range_alpha_l}{numeric rAedesSim range of intra-larval competition alpha . Default values are done by seq(0.005,0.01,0.001)}
\item{range_density_l}{numeric rAedesSim object range of maximum larval density in liter of water volume.Default value is 100.}
\item{n_sampling}{numeric number of resampling if stochastic is implemented see in \code{biomodel}. Default is 10.}
\item{inibition}{logical if larval density is considered in \code{biomodel}.Default is FALSE.}
\item{plotresults}{logical if is true a plot is done. Default is FALSE.}
\item{testRMSE}{logical if test the root mean square error of simualtions. Default is FALSE.}
\item{ini_rmse}{numeric Starting position index to calculate RMSE on the observed time series.Defalut is 1.}
\item{end_rmse}{numeric Ending position index to calculate RMSE on observed data.Default is NULL and means that all observed data are considered if ini_rmse = 1.}
}
\value{
biofitmodel
}
\description{
Biofitmodel is the function to find, troughout a grid scheme, which are the best intra competition parameters Alpha Adults and Alpha Larvs
by using the minimal RMSE criterion, when environmental and population data are fixed.To build grid the function needs the ranges of alpha parameters.
It is possible to esplicit the numerosity of data used to perform the validation indicating the index of starting and ending by using observed data as reference.
}
\author{
Istituto di Biometeorologia Firenze Italy Alfonso crisci \email{a.crisci@ibimet.cnr.it} ASL LUCCA Marco Selmi \email{marco.selmi@uslnordovest.toscana.it }
}
\keyword{biofitmodel}
|
#' @title roll.fitFfmDT
#'
#' @description roll.fitFfmDT rolls the fundamental factor model
#'
#' @param ffMSpecObj a specFFm object
#' @param windowSize the size of the fit window
#' @param refitEvery the frequency of fitting
#' @param refitWindow choice of expanding or rolling
#' @param stdExposuresControl for exposure standardization; (give the Std.Type and lambda)
#' @param stdReturnControl choices to standardize the returns using GARCH controls
#' @param fitControl list of options for fitting the ffm
#' @param full.resid.cov True or False toggle
#' @param analysis choice of "ISM" or "NEW"
#' @export
roll.fitFfmDT <- function(ffMSpecObj, windowSize = 60, refitEvery = 1,
refitWindow = c("Expanding", "Rolling"),
stdExposuresControl = list(Std.Type = "timeSeries", lambda = 0.9),
stdReturnControl = list(GARCH.params = list(omega = 0.09, alpha = 0.1, beta = 0.81)),
fitControl = list(fit.method=c("LS","WLS","Rob","W-Rob"),
resid.scaleType = c("STDDEV","EWMA","ROBEWMA", "GARCH"),
lambda = 0.9, GARCH.params = list(omega = 0.09, alpha = 0.1, beta = 0.81),
GARCH.MLE = FALSE),
full.resid.cov = TRUE, analysis = c("ISM", "NEW")
){
refitWindow = toupper(refitWindow[1])
refitWindow <- match.arg(arg = refitWindow, choices = toupper(c("EXPANDING", "ROLLING")), several.ok = F )
d_ <- eval(ffMSpecObj$date.var) # name of the date var
T_ <- length(unique(ffMSpecObj$dataDT[[d_]]))
Tindx <- 1:T_
uniqueDates <- unique(ffMSpecObj$dataDT[[d_]])
ffMSpecObj$dataDT[ , rollIdx := idx] # this is for rolling so that it does not get confused with idx
start = windowSize # this is the starting window
S <- seq(start , T_, by = refitEvery)
m = length(S)
if(S[m]<T_){
S = c(S,T_)
m = length(S)
}
if(refitWindow == "expanding"){
rollind = lapply(1:m, FUN = function(i) 1:S[i])
} else{
# rollind = lapply(1:m, FUN = function(i) max(1, (S[i]-(windowSize-1))):S[i])
rollind = lapply(1:m, FUN = function(i) (1+(i-1)*refitEvery):S[i])
}
names(rollind) <- uniqueDates[sapply(rollind, data.table::last)]
tmp = lapply(as.list(1:m), FUN = function(i) {
rollingObject <- specFfm(data = ffMSpecObj$dataDT[ rollIdx %in% Tindx[rollind[[i]]]],
asset.var = ffMSpecObj$asset.var, ret.var = ffMSpecObj$ret.var,
date.var = ffMSpecObj$date.var, exposure.vars = ffMSpecObj$exposure.vars,
weight.var = ffMSpecObj$weight.var, addIntercept = ffMSpecObj$addIntercept, rob.stats = ffMSpecObj$rob.stats)
# we should add weight var to the spec object
# lagging is done once prior to roll
if (!is.null(stdExposuresControl)) { # apply StandardizeExposures
rollingObject = standardizeExposures(specObj = rollingObject, Std.Type = stdExposuresControl$Std.Type,
lambda = stdExposuresControl$lambda)
}
if (!is.null(stdReturnControl)){# standardize returns
rollingObject = standardizeReturns(specObj = rollingObject, GARCH.params = stdReturnControl$GARCH.params)
}
rollingFit <- fitFfmDT (rollingObject,fit.method = fitControl$fit.method,
resid.scaleType = fitControl$resid.scaleType, lambda = fitControl$lambda,
GARCH.params = fitControl$GARCH.params, GARCH.MLE = fitControl$GARCH.MLE)
regStats = extractRegressionStats(specObj = rollingObject, fitResults = rollingFit, full.resid.cov = full.resid.cov)
ans <- calcFLAM(specObj = rollingObject, modelStats = regStats, fitResults = results, analysis = analysis[1])
print(i)
rebalDate <- uniqueDates[data.table::last(Tindx[rollind[[i]]])] # names ?
rebalExposures <- rollingObject$dataDT[ get(d_) == rebalDate, c(rollingObject$asset.var, rollingObject$exposure.vars), with = FALSE]
sigmaI <- regStats$resid.var
# names(rebalExposures) <- names(sigmaI)
N_ <- length(sigmaI)
return(list(date = rebalDate, activeWeights = ans$activeWeights, exposures = rebalExposures,
rebalStats = regStats, flamInfo = ans))
})
names(tmp) <- names(rollind)
return(tmp)
}
| /R/fitFfM2_rolling.R | no_license | FoeinLove/FactorAnalytics | R | false | false | 4,427 | r | #' @title roll.fitFfmDT
#'
#' @description roll.fitFfmDT rolls the fundamental factor model
#'
#' @param ffMSpecObj a specFFm object
#' @param windowSize the size of the fit window
#' @param refitEvery the frequency of fitting
#' @param refitWindow choice of expanding or rolling
#' @param stdExposuresControl for exposure standardization; (give the Std.Type and lambda)
#' @param stdReturnControl choices to standardize the returns using GARCH controls
#' @param fitControl list of options for fitting the ffm
#' @param full.resid.cov True or False toggle
#' @param analysis choice of "ISM" or "NEW"
#' @export
roll.fitFfmDT <- function(ffMSpecObj, windowSize = 60, refitEvery = 1,
refitWindow = c("Expanding", "Rolling"),
stdExposuresControl = list(Std.Type = "timeSeries", lambda = 0.9),
stdReturnControl = list(GARCH.params = list(omega = 0.09, alpha = 0.1, beta = 0.81)),
fitControl = list(fit.method=c("LS","WLS","Rob","W-Rob"),
resid.scaleType = c("STDDEV","EWMA","ROBEWMA", "GARCH"),
lambda = 0.9, GARCH.params = list(omega = 0.09, alpha = 0.1, beta = 0.81),
GARCH.MLE = FALSE),
full.resid.cov = TRUE, analysis = c("ISM", "NEW")
){
refitWindow = toupper(refitWindow[1])
refitWindow <- match.arg(arg = refitWindow, choices = toupper(c("EXPANDING", "ROLLING")), several.ok = F )
d_ <- eval(ffMSpecObj$date.var) # name of the date var
T_ <- length(unique(ffMSpecObj$dataDT[[d_]]))
Tindx <- 1:T_
uniqueDates <- unique(ffMSpecObj$dataDT[[d_]])
ffMSpecObj$dataDT[ , rollIdx := idx] # this is for rolling so that it does not get confused with idx
start = windowSize # this is the starting window
S <- seq(start , T_, by = refitEvery)
m = length(S)
if(S[m]<T_){
S = c(S,T_)
m = length(S)
}
if(refitWindow == "expanding"){
rollind = lapply(1:m, FUN = function(i) 1:S[i])
} else{
# rollind = lapply(1:m, FUN = function(i) max(1, (S[i]-(windowSize-1))):S[i])
rollind = lapply(1:m, FUN = function(i) (1+(i-1)*refitEvery):S[i])
}
names(rollind) <- uniqueDates[sapply(rollind, data.table::last)]
tmp = lapply(as.list(1:m), FUN = function(i) {
rollingObject <- specFfm(data = ffMSpecObj$dataDT[ rollIdx %in% Tindx[rollind[[i]]]],
asset.var = ffMSpecObj$asset.var, ret.var = ffMSpecObj$ret.var,
date.var = ffMSpecObj$date.var, exposure.vars = ffMSpecObj$exposure.vars,
weight.var = ffMSpecObj$weight.var, addIntercept = ffMSpecObj$addIntercept, rob.stats = ffMSpecObj$rob.stats)
# we should add weight var to the spec object
# lagging is done once prior to roll
if (!is.null(stdExposuresControl)) { # apply StandardizeExposures
rollingObject = standardizeExposures(specObj = rollingObject, Std.Type = stdExposuresControl$Std.Type,
lambda = stdExposuresControl$lambda)
}
if (!is.null(stdReturnControl)){# standardize returns
rollingObject = standardizeReturns(specObj = rollingObject, GARCH.params = stdReturnControl$GARCH.params)
}
rollingFit <- fitFfmDT (rollingObject,fit.method = fitControl$fit.method,
resid.scaleType = fitControl$resid.scaleType, lambda = fitControl$lambda,
GARCH.params = fitControl$GARCH.params, GARCH.MLE = fitControl$GARCH.MLE)
regStats = extractRegressionStats(specObj = rollingObject, fitResults = rollingFit, full.resid.cov = full.resid.cov)
ans <- calcFLAM(specObj = rollingObject, modelStats = regStats, fitResults = results, analysis = analysis[1])
print(i)
rebalDate <- uniqueDates[data.table::last(Tindx[rollind[[i]]])] # names ?
rebalExposures <- rollingObject$dataDT[ get(d_) == rebalDate, c(rollingObject$asset.var, rollingObject$exposure.vars), with = FALSE]
sigmaI <- regStats$resid.var
# names(rebalExposures) <- names(sigmaI)
N_ <- length(sigmaI)
return(list(date = rebalDate, activeWeights = ans$activeWeights, exposures = rebalExposures,
rebalStats = regStats, flamInfo = ans))
})
names(tmp) <- names(rollind)
return(tmp)
}
|
#' Retrieve aggregated analysis features table.
#'
#' This service returns part or all of the so-called feature table; which
#' aggregates the most important findings across ALL pipelines in the GDAC
#' Firehose analysis workflow into a single table for simple access. For more
#' details please visit the online documentation (\url{https://confluence.broadinstitute.org/display/GDAC/Documentation/#Documentation-FeatureTable}).
#' Please note that the service is still undergoing experimental evaluation and
#' does not return JSON format.
#'
#' @param format Either \code{tsv} or \code{csv}, here \code{json} is not
#' possible, but comming soon.
#' @param column Comma separated list of which data columns/fields to return.
#' @inheritParams Samples.mRNASeq
#'
#' @return A \code{list}, if format is \code{json}, elsewise a \code{data.frame}
#'
#' @export
Analyses.FeatureTable = function(format = "csv",
cohort = "",
column = "",
page = 1,
page_size = 250){
parameters = list(format = format,
cohort = cohort,
column = column,
page = page,
page_size = page_size)
if(parameters[["format"]] == "json"){
stop("Here json is not allowed, yet.")
}
parameters <<- parameters
if(any(parameters[["column"]] != "")){
stop("For version 1.1.5 the column parameter is not working, please don't use it")
}
to.Validate = c("cohort")
validet.Parameters(params = parameters, to.Validate = to.Validate)
url = build.Query(parameters = parameters, invoker = "Analyses", method = "FeatureTable")
ret = download.Data(url, format, page)
return(ret)
}
| /R/Analyses.FeatureTable.R | no_license | ttriche/FirebrowseR | R | false | false | 1,795 | r | #' Retrieve aggregated analysis features table.
#'
#' This service returns part or all of the so-called feature table; which
#' aggregates the most important findings across ALL pipelines in the GDAC
#' Firehose analysis workflow into a single table for simple access. For more
#' details please visit the online documentation (\url{https://confluence.broadinstitute.org/display/GDAC/Documentation/#Documentation-FeatureTable}).
#' Please note that the service is still undergoing experimental evaluation and
#' does not return JSON format.
#'
#' @param format Either \code{tsv} or \code{csv}, here \code{json} is not
#' possible, but comming soon.
#' @param column Comma separated list of which data columns/fields to return.
#' @inheritParams Samples.mRNASeq
#'
#' @return A \code{list}, if format is \code{json}, elsewise a \code{data.frame}
#'
#' @export
Analyses.FeatureTable = function(format = "csv",
cohort = "",
column = "",
page = 1,
page_size = 250){
parameters = list(format = format,
cohort = cohort,
column = column,
page = page,
page_size = page_size)
if(parameters[["format"]] == "json"){
stop("Here json is not allowed, yet.")
}
parameters <<- parameters
if(any(parameters[["column"]] != "")){
stop("For version 1.1.5 the column parameter is not working, please don't use it")
}
to.Validate = c("cohort")
validet.Parameters(params = parameters, to.Validate = to.Validate)
url = build.Query(parameters = parameters, invoker = "Analyses", method = "FeatureTable")
ret = download.Data(url, format, page)
return(ret)
}
|
#!/usr/bin/env Rscript
#rm(list=ls())
#save(cci_eiu_csuite, file = "cci_eiu_csuite.RData")
#save(cci_eiu_Out2, file = "cci_eiu_Out2.RData")
#save(cci_eiu_smart, file = "cci_eiu_smart.RData")
#save(CCIgroupByDateCountry, file = "CCIgroupByDateCountry.RData")
#save(cciOut, file = "cciOut.RData")
#save(cciOutAP, file = "cciOutAP.RData")
#save(cciOutGLOBAL, file = "cciOutGLOBAL.RData")
#save(cciOutREGION, file = "cciOutREGION.RData")
#save(cciOutSEA, file = "cciOutSEA.RData")
#save(cciOutTOTAL, file = "cciOutTOTAL.RData")
#save(eiuFile, file = "eiuFile.RData")
#save(eiuFileQ, file = "eiuFileQ.RData")
#save(eiuFileY, file = "eiuFileY.RData")
#save(long_CCI, file = "long_CCI.RData")
#rm(cci_eiu_csuite,cci_eiu_Out2,cci_eiu_smart,
# CCIgroupByDateCountry,
# cciOut,cciOutAP,cciOutGLOBAL,cciOutREGION,cciOutSEA,cciOutTOTAL,
# eiuFile,eiuFileQ,eiuFileY,
# long_CCI)
args = commandArgs(trailingOnly=TRUE)
if (length(args)<=1) {
stop("\nTwo arguments must be supplied including year (e.g. 2016) + quarter (e.g 2)\nThird argument is e.g. 2016_Q2\n", call.=FALSE)
} else if (length(args)==2) {
args[3] = paste0(args[1],"_Q",args[2])
}
# args[1] <- 2017; args[2] <- 1; args[3] <- paste0(args[1],"_Q",args[2])
userId <- "langestrst01"
# passWd <- "Fill in your enterprise password"
passWd <- "Devt0517"
fileCCIRoot <- "https://intranet.nielsen.com/company/news/newsletters/Consumer%20Confidence%20Concerns%20and%20Spending%20Library/"
Rdir <- "~/projects/thought_leadership/R_Code"
source(file.path(Rdir,"libraries.R"))
source(file.path(Rdir,"Functions_CCI.R"))
source(file.path(Rdir,"Functions_EIU.R"))
source(file.path(Rdir,"Functions_CCI_EIU.R"))
datain <- "~/projects/thought_leadership"
shinyApps <- "~/ShinyApps/"
cciOutTOTAL <- setupCCI(download = F, year = args[1], quarter = args[2]) # CHECK FILENAMES ON ISHARE
eiuFile <- setupEIU(args[3]) # NOTE filename = "eiu_2016_Q2_Q" or "eiu_2016_Q2_Y"
cci_eiu_csuite <- setup_CCI_EIU(cciOutTOTAL,eiuFile, year=args[1], quarter=paste0("Q",args[2]))
head(cciOutTOTAL[cciOutTOTAL$year==2017,])
| /CCI - R Project/CSUITE.R | no_license | stephenl6705/thought_leadership | R | false | false | 2,060 | r | #!/usr/bin/env Rscript
#rm(list=ls())
#save(cci_eiu_csuite, file = "cci_eiu_csuite.RData")
#save(cci_eiu_Out2, file = "cci_eiu_Out2.RData")
#save(cci_eiu_smart, file = "cci_eiu_smart.RData")
#save(CCIgroupByDateCountry, file = "CCIgroupByDateCountry.RData")
#save(cciOut, file = "cciOut.RData")
#save(cciOutAP, file = "cciOutAP.RData")
#save(cciOutGLOBAL, file = "cciOutGLOBAL.RData")
#save(cciOutREGION, file = "cciOutREGION.RData")
#save(cciOutSEA, file = "cciOutSEA.RData")
#save(cciOutTOTAL, file = "cciOutTOTAL.RData")
#save(eiuFile, file = "eiuFile.RData")
#save(eiuFileQ, file = "eiuFileQ.RData")
#save(eiuFileY, file = "eiuFileY.RData")
#save(long_CCI, file = "long_CCI.RData")
#rm(cci_eiu_csuite,cci_eiu_Out2,cci_eiu_smart,
# CCIgroupByDateCountry,
# cciOut,cciOutAP,cciOutGLOBAL,cciOutREGION,cciOutSEA,cciOutTOTAL,
# eiuFile,eiuFileQ,eiuFileY,
# long_CCI)
args = commandArgs(trailingOnly=TRUE)
if (length(args)<=1) {
stop("\nTwo arguments must be supplied including year (e.g. 2016) + quarter (e.g 2)\nThird argument is e.g. 2016_Q2\n", call.=FALSE)
} else if (length(args)==2) {
args[3] = paste0(args[1],"_Q",args[2])
}
# args[1] <- 2017; args[2] <- 1; args[3] <- paste0(args[1],"_Q",args[2])
userId <- "langestrst01"
# passWd <- "Fill in your enterprise password"
passWd <- "Devt0517"
fileCCIRoot <- "https://intranet.nielsen.com/company/news/newsletters/Consumer%20Confidence%20Concerns%20and%20Spending%20Library/"
Rdir <- "~/projects/thought_leadership/R_Code"
source(file.path(Rdir,"libraries.R"))
source(file.path(Rdir,"Functions_CCI.R"))
source(file.path(Rdir,"Functions_EIU.R"))
source(file.path(Rdir,"Functions_CCI_EIU.R"))
datain <- "~/projects/thought_leadership"
shinyApps <- "~/ShinyApps/"
cciOutTOTAL <- setupCCI(download = F, year = args[1], quarter = args[2]) # CHECK FILENAMES ON ISHARE
eiuFile <- setupEIU(args[3]) # NOTE filename = "eiu_2016_Q2_Q" or "eiu_2016_Q2_Y"
cci_eiu_csuite <- setup_CCI_EIU(cciOutTOTAL,eiuFile, year=args[1], quarter=paste0("Q",args[2]))
head(cciOutTOTAL[cciOutTOTAL$year==2017,])
|
#prepare archived flows
# archived.flows.csv <- "data/archived/ActualFlows.csv"
# current.flows.csv <-"data/Actual Flow and Storage_20200302091154.csv"
# current.flows.csv <-"data/Actual Flow and Storage_20200304114556.csv"
# update=T
if (!exists("current.flows") | update) current.flows <- data.table::fread(current.flows.csv, skip=0 ) %>%
as_tibble() %>%
mutate(GasDate= dmy(GasDate)) %>%
rename_all(tolower) %>%
select(-lastupdated)
if (!exists("archived.flows") | update) {
archived.flows <- read_csv(archived.flows.csv ) %>%
dplyr::rename_all(tolower) %>%
dplyr::mutate(gasdate=lubridate::dmy(gasdate), lastchanged=lubridate::dmy_hm(lastchanged)) %>%
dplyr::rename(facilityid=plantid, facilityname=plantname, locationid=zoneid, locationname=zonename, lastupdated=lastchanged)
tail(archived.flows ,10)
facility.types<-current.flows[,c(2,3,4)] %>%
group_by(facilityid) %>%
dplyr::summarise(facilitytype=facilitytype[1])
archived.flows <- left_join(archived.flows, facility.types )
state.type<- current.flows[,c(10,12)]%>% group_by(locationid) %>% dplyr::summarise(state=state[1])
state.type.1<- current.flows[,c(2,10)]%>% group_by(facilityname) %>% dplyr::summarise(state=state[1])
archived.flows <- left_join(archived.flows, state.type.1 )
archived.flows <- left_join(archived.flows, state.type )
}
if (!exists("flows")| update) flows<-bind_rows(archived.flows, current.flows)
| /r/read_flows.R | no_license | msandifo/gasbb | R | false | false | 1,426 | r | #prepare archived flows
# archived.flows.csv <- "data/archived/ActualFlows.csv"
# current.flows.csv <-"data/Actual Flow and Storage_20200302091154.csv"
# current.flows.csv <-"data/Actual Flow and Storage_20200304114556.csv"
# update=T
if (!exists("current.flows") | update) current.flows <- data.table::fread(current.flows.csv, skip=0 ) %>%
as_tibble() %>%
mutate(GasDate= dmy(GasDate)) %>%
rename_all(tolower) %>%
select(-lastupdated)
if (!exists("archived.flows") | update) {
archived.flows <- read_csv(archived.flows.csv ) %>%
dplyr::rename_all(tolower) %>%
dplyr::mutate(gasdate=lubridate::dmy(gasdate), lastchanged=lubridate::dmy_hm(lastchanged)) %>%
dplyr::rename(facilityid=plantid, facilityname=plantname, locationid=zoneid, locationname=zonename, lastupdated=lastchanged)
tail(archived.flows ,10)
facility.types<-current.flows[,c(2,3,4)] %>%
group_by(facilityid) %>%
dplyr::summarise(facilitytype=facilitytype[1])
archived.flows <- left_join(archived.flows, facility.types )
state.type<- current.flows[,c(10,12)]%>% group_by(locationid) %>% dplyr::summarise(state=state[1])
state.type.1<- current.flows[,c(2,10)]%>% group_by(facilityname) %>% dplyr::summarise(state=state[1])
archived.flows <- left_join(archived.flows, state.type.1 )
archived.flows <- left_join(archived.flows, state.type )
}
if (!exists("flows")| update) flows<-bind_rows(archived.flows, current.flows)
|
library(BoutrosLab.plotting.general)
### Name: create.qqplot.fit.confidence.interval
### Title: Create the confidence bands for a one-sample qq plot
### Aliases: create.qqplot.fit.confidence.interval
### Keywords: iplot
### ** Examples
tmp.x <- rnorm(100);
tmp.confidence.interval <- create.qqplot.fit.confidence.interval(tmp.x);
qqnorm(tmp.x);
qqline(tmp.x);
lines(tmp.confidence.interval$z, tmp.confidence.interval$upper.pw, lty = 2, col = "brown");
lines(tmp.confidence.interval$z, tmp.confidence.interval$lower.pw, lty = 2, col = "brown");
lines(tmp.confidence.interval$z[tmp.confidence.interval$u],
tmp.confidence.interval$upper.sim, lty = 2, col = "blue");
lines(tmp.confidence.interval$z[tmp.confidence.interval$l],
tmp.confidence.interval$lower.sim, lty = 2, col = "blue");
legend(1, -1.5, c("simultaneous", "pointwise"), col = c("blue", "brown"), lty = 2, bty = "n");
| /data/genthat_extracted_code/BoutrosLab.plotting.general/examples/create.qqplot.fit.confidence.interval.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 898 | r | library(BoutrosLab.plotting.general)
### Name: create.qqplot.fit.confidence.interval
### Title: Create the confidence bands for a one-sample qq plot
### Aliases: create.qqplot.fit.confidence.interval
### Keywords: iplot
### ** Examples
tmp.x <- rnorm(100);
tmp.confidence.interval <- create.qqplot.fit.confidence.interval(tmp.x);
qqnorm(tmp.x);
qqline(tmp.x);
lines(tmp.confidence.interval$z, tmp.confidence.interval$upper.pw, lty = 2, col = "brown");
lines(tmp.confidence.interval$z, tmp.confidence.interval$lower.pw, lty = 2, col = "brown");
lines(tmp.confidence.interval$z[tmp.confidence.interval$u],
tmp.confidence.interval$upper.sim, lty = 2, col = "blue");
lines(tmp.confidence.interval$z[tmp.confidence.interval$l],
tmp.confidence.interval$lower.sim, lty = 2, col = "blue");
legend(1, -1.5, c("simultaneous", "pointwise"), col = c("blue", "brown"), lty = 2, bty = "n");
|
###############################################################################################
##
## Figures of the collective rescue paper (ToyokawaGaissmaier2021)
## Wataru Toyokawa
## 07 November. 2020
##
###############################################################################################
library(tidyverse)
library(extrafont)
library(cowplot)
library(metR)
library(magick)
## Load Functions
# setwd("~/analysis_repo") #<------- Set this folder as a working directory
# setwd("~/Dropbox/wataru/papers/RiskySocialLearning/draft/analysis_repo") #<------- Set this folder as a working directory
source("agentBasedSim/functions.R")
## ========================================================
#
# figure 1: the collective rescue in the stable environment
#
# =========================================================
# schematic figure
stable_2AB_scheme <- cowplot::ggdraw() + cowplot::draw_image("agentBasedSim/stable_2AB_scheme.png", scale = 1)
# figures of the simulation
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_data <- read_csv("agentBasedSim/socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_data.csv")
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary <- socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_data %>%
group_by(groupSize, learningRate, invTemperature, copyRate, conformityExp) %>%
summarise(
mean_proportionSafeChoice = mean(proportionSafeChoice, na.rm = TRUE),
median_proportionSafeChoice = median(proportionSafeChoice, na.rm = TRUE),
sd_proportionSafeChoice = sd(proportionSafeChoice, na.rm = TRUE)
)
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$copyRate_factor = paste(rep('Copying weight\n\U03C3 = ', nrow(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary)), socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$copyRate, sep ='')
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$conformityExp_factor = paste(rep('Conformity exponent\n\U03b8 = ', nrow(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary)), socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$conformityExp, sep ='')
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$invTemperature_factor = paste(rep('Inv. temperature\n\U03b2 = ', nrow(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary)), socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$invTemperature, sep ='')
#socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$conformityExp_factor = factor(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$conformityExp_factor, levels = c('θ = 1','θ = 2','θ = 4','θ = 8'))
## Adding Denrell (2007)'s analytical solution
Denrell2007 = function (alpha, beta, mu, sd) {
1 / ( 1 + exp( (alpha*(beta^2)*(sd^2))/(2*(2-alpha)) - beta*mu ) )
}
## when mu = surePayoff + 0.5 and sd = 1, the above function can be reduced as follows:
Denrell2007Solution = function (alpha) {
(2 - alpha)/alpha
}
Denrell2007RiskyChoice = c()
alphaArray = c()
betaArray = c()
for (alpha in seq(0,1,0.1)) {
for (beta in seq(0,10,1)) {
alphaArray <- append(alphaArray, alpha)
betaArray <- append(betaArray, beta)
Denrell2007RiskyChoice <- append(Denrell2007RiskyChoice, Denrell2007(alpha, beta, mu=0.5, sd=1))
}
}
Denrell2007Simulation = data.frame(alpha = alphaArray, beta = betaArray, riskyChoiceProb = Denrell2007RiskyChoice)
# (Denrell2007Simulation %>%
# ggplot(aes(alpha, beta))+
# geom_raster(aes(fill = riskyChoiceProb), stat = 'identity')+
# stat_function(fun=Denrell2007Solution, color='black', linetype='dashed', size=2/3)+
# scale_fill_gradient2(midpoint = 0.5, low = "blue", mid = "grey90", high = "red")+
# labs(x=expression(paste('Learning rate ',alpha,sep="")), y=expression(paste('Inverse temperature ',beta,sep="")), title='', fill="Proportion of \nchoosing \nthe risky option")+
# myTheme_Arial()+
# theme(axis.text.x = element_text(angle = 90))+
# theme(strip.text.y = element_text(angle = 0))+
# theme(legend.text = element_text(angle = 0))+
# #theme(legend.position = 'top')+
# ylim(c(0,10))+
# NULL -> Denrell2007_figure_analytical
# )
(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>%
dplyr::filter(copyRate ==0) %>%
ggplot() +
geom_raster(mapping = aes(learningRate, invTemperature, fill = 1-mean_proportionSafeChoice), stat = 'identity') +
labs(x=expression(paste('Learning rate ',alpha,sep="")),
y=expression(paste('Inverse temperature ',beta,sep="")),
#title='Gaussian noise\n mu=0.5; sigma=1',
fill = "Proportion of \nchoosing \nthe risky option"
#title='Gaussian noise\n mu=0.5; sigma=1', fill = "Proportion of\nsafe choice"
)+
#scale_fill_viridis(limits = c(0.45, 1), option="magma")+
stat_function(fun=Denrell2007Solution, color='black', linetype='dashed', size=2/3)+
scale_fill_gradient2(midpoint = 0.5, high = "red", mid = "grey90", low = "blue")+
ylim(c(0,10))+
# myTheme_gillsansMT()+
myTheme_Arial()+
theme(axis.text.x = element_text(angle = 90))+
theme(strip.text.y = element_text(angle = 0))+
theme(legend.text = element_text(angle = 0))+
# theme(legend.position = 'top')+
#facet_grid(copyRate_factor ~ conformityExp_factor)+
NULL -> Denrell2007_figure
)
# plot with Denrell (2007) curve
(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>%
dplyr::filter(copyRate %in% c(0.25, 0.5)) %>%
dplyr::filter(conformityExp %in% c(1, 4)) %>%
ggplot() +
geom_raster(mapping = aes(learningRate, invTemperature, fill = 1-mean_proportionSafeChoice), stat = 'identity') +
labs(x=expression(paste('Learning rate ',alpha,sep="")),
y=expression(paste('Inverse temperature ',beta,sep="")),
#title='Gaussian noise\n mu=0.5; sigma=1',
fill = "Proportion of \nchoosing \nthe risky option"
#title='Gaussian noise\n mu=0.5; sigma=1', fill = "Proportion of\nsafe choice"
)+
#scale_fill_viridis(limits = c(0.45, 1), option="magma")+
stat_function(fun=Denrell2007Solution, color='black', linetype='dashed', size=2/3)+
#geom_contour(mapping = aes(learningRate, invTemperature, z = mean_proportionSafeChoice), breaks = c(0.5), colour = 'black')+
scale_fill_gradient2(midpoint = 0.5, high = "red", mid = "grey90", low = "blue", breaks=c(0.1,0.5,0.9), labels=c(0.1,0.5,0.9) )+
ylim(c(0,10))+
#myTheme_gillsansMT()+
myTheme_Arial()+
theme(axis.text.x = element_text(angle = 90))+
theme(strip.text = element_text(size=12))+
theme(legend.text = element_text(angle = 0))+
theme(legend.position = 'top')+
# theme(legend.position = NaN)+
facet_grid(copyRate_factor ~ conformityExp_factor)+
NULL -> Denrell2007_figure_social_learning
)
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>%
dplyr::filter(copyRate !=0) %>%
# dplyr::filter(conformityExp %in% c(1, 4)) %>%
ggplot() +
geom_raster(mapping = aes(learningRate, invTemperature, fill = 1-mean_proportionSafeChoice), stat = 'identity') +
labs(x=expression(paste('Learning rate ',alpha,sep="")),
y=expression(paste('Inverse temperature ',beta,sep="")),
#title='Gaussian noise\n mu=0.5; sigma=1',
fill = "Proportion of \nchoosing \nthe risky option"
#title='Gaussian noise\n mu=0.5; sigma=1', fill = "Proportion of\nsafe choice"
)+
#scale_fill_viridis(limits = c(0.45, 1), option="magma")+
stat_function(fun=Denrell2007Solution, color='black', linetype='dashed', size=2/3)+
#geom_contour(mapping = aes(learningRate, invTemperature, z = mean_proportionSafeChoice), breaks = c(0.5), colour = 'black')+
scale_fill_gradient2(midpoint = 0.5, high = "red", mid = "grey90", low = "blue")+
scale_y_continuous(limits = c(0, 10), breaks = c(1,5,10))+
#myTheme_gillsansMT()+
myTheme_Arial()+
theme(axis.text.x = element_text(angle = 90))+
theme(strip.text.y = element_text(angle = 0))+
theme(legend.text = element_text(angle = 90))+
theme(legend.position = 'top')+
# theme(legend.position = NaN)+
facet_grid(copyRate_factor ~ conformityExp_factor)+
NULL -> Denrell2007_figure_social_learning_full
## difference between social learners' performance and sole reinforcement learners
sigmaList = c(0, 0.25, 0.5, 0.75, 0.9)
thetaList = c(1, 2, 4, 8)
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary_baseline <- socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>% dplyr::filter(copyRate==0)
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary <- socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>% arrange(copyRate, conformityExp) # re-ordering the data frame
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$mean_proportionSafeChoice_diff_from_baseline <- socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$mean_proportionSafeChoice - rep(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary_baseline$mean_proportionSafeChoice, (length(sigmaList)-1) * length(thetaList) + 1)
# plot with Denrell (2007) curve
(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>%
dplyr::filter(copyRate %in% c(0.25, 0.5)) %>%
dplyr::filter(conformityExp %in% c(1, 4)) %>%
ggplot() +
geom_raster(mapping = aes(learningRate, invTemperature, fill = -mean_proportionSafeChoice_diff_from_baseline), stat = 'identity') +
labs(x=expression(paste('Learning rate ',alpha,sep="")),
y=expression(paste('Inverse temperature ',beta,sep="")),
#title='Gaussian noise\n mu=0.5; sigma=1',
fill = "Changes in \nthe risky choice by \nsocial learning"
#title='Gaussian noise\n mu=0.5; sigma=1', fill = "Proportion of\nsafe choice"
)+
#scale_fill_viridis(limits = c(0.45, 1), option="magma")+
stat_function(fun=Denrell2007Solution, color='black', linetype='dashed', size=2/3)+
#geom_contour(mapping = aes(learningRate, invTemperature, z = mean_proportionSafeChoice), breaks = c(0.5), colour = 'black')+
scale_fill_gradient2(midpoint = 0, high = "darkorange", mid = "grey90", low = "darkorchid3", breaks=c(-0.2,0,0.4),labels=c(-0.2,0,0.4))+
ylim(c(0,10))+
#myTheme_gillsansMT()+
myTheme_Arial()+
theme(axis.text.x = element_text(angle = 90))+
theme(strip.text = element_text(size=12))+
theme(legend.text = element_text(angle = 0))+
theme(legend.position = 'top')+
facet_grid(copyRate_factor ~ conformityExp_factor)+
NULL -> Denrell2007_figure_diff_from_baseline
)
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>%
dplyr::filter(copyRate != 0) %>%
#dplyr::filter(conformityExp %in% c(1, 4)) %>%
ggplot() +
geom_raster(mapping = aes(learningRate, invTemperature, fill = -mean_proportionSafeChoice_diff_from_baseline), stat = 'identity') +
labs(x=expression(paste('Learning rate ',alpha,sep="")),
y=expression(paste('Inverse temperature ',beta,sep="")),
#title='Gaussian noise\n mu=0.5; sigma=1',
fill = "Increases in \nthe risky choice by \nsocial learning"
#title='Gaussian noise\n mu=0.5; sigma=1', fill = "Proportion of\nsafe choice"
)+
#scale_fill_viridis(limits = c(0.45, 1), option="magma")+
stat_function(fun=Denrell2007Solution, color='black', linetype='dashed', size=2/3)+
#geom_contour(mapping = aes(learningRate, invTemperature, z = mean_proportionSafeChoice), breaks = c(0.5), colour = 'black')+
scale_fill_gradient2(midpoint = 0, high = "darkorange", mid = "grey90", low = "darkorchid3")+
scale_y_continuous(limits = c(0, 10), breaks = c(1,5,10))+
#myTheme_gillsansMT()+
myTheme_Arial()+
theme(axis.text.x = element_text(angle = 90))+
# theme(axis.text.y = element_text(size=9))+
theme(strip.text.y = element_text(angle = 0))+
theme(legend.text = element_text(angle = 90))+
theme(legend.position = 'top')+
facet_grid(copyRate_factor ~ conformityExp_factor)+
NULL -> Denrell2007_figure_diff_from_baseline_full
## Figure 1
figure1_left <- plot_grid(stable_2AB_scheme, Denrell2007_figure, labels = c('a','b'), ncol = 1, align = 'v', label_size = 15)
figure1 <- plot_grid(figure1_left, Denrell2007_figure_social_learning, Denrell2007_figure_diff_from_baseline, labels = c('','c','d'), ncol = 3, align = 'v', label_size = 15)
ggsave(file = '~/Dropbox/wataru/papers/RiskySocialLearning/draft/submissions/eLife/Revision2/exp_reanalysis_result/figure1.png', plot = figure1, dpi = 300, width = 15, height = 6)
## ========================================================
#
# figure 2: the collective rescue in the stable environment
#
# =========================================================
# Plot the individual learning from a different angle
# According to Denrell 2007, the asymptotic equilibrium of risky choice rate is
# Pr^* = 1 / (1 + exp(Z)), where Z = -beta/(2*(alpha-2)) * (alpha*(beta+1) - 2)
#
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$hot_stove_suceptibility <- socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$learningRate * (socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$invTemperature + 1)
Pr_when_beta = function (X, beta) {
Z = -beta/(2*(X/(beta+1))-2) * (X - 2)
return_vector <- 1 / (1 + exp(Z))
return_vector[which(X >= beta+1)] <- NA
return_vector[which(X == 0)] <- 1/2
return(return_vector)
# if(X <= beta) {
# 1 / (1 + exp(Z))
# }else{
# return(0)
# }
}
ggplot(mapping = aes(x=X)) +
mapply(
function(b, co){ stat_function(data=data.frame(X=c(0,8)), fun=Pr_when_beta, args=list(beta=b), aes_q(color=co)) },
seq(2,10,1),
seq(2,10,1)
)+
geom_vline(xintercept=2, linetype='dashed')+
geom_hline(yintercept=0.5, linetype='dashed')+
myTheme_Arial()+
scale_colour_viridis_c(expression(beta))+
labs(
y='Probability of choosing\nthe risky alternative',
x=expression(alpha * (beta + 1))
)+
NULL
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary_added <- socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>% rbind(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>% dplyr::filter(copyRate==0))
added_length <- socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>% dplyr::filter(copyRate==0) %>% nrow()
all_length <- socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary_added %>% nrow()
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary_added$conformityExp[(all_length-added_length+1):all_length] <- 4
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary_added$conformityExp_factor[(all_length-added_length+1):all_length] <- 'Conformity exponent\n\U03b8 = 4'
# Figure 2
(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary_added %>%
dplyr::filter(copyRate %in% c(0, 0.25, 0.5)) %>%
dplyr::filter(conformityExp %in% c(1, 4)) %>%
dplyr::filter(invTemperature %in% c(3,5,7)) %>%
ggplot() +
geom_point(aes(hot_stove_suceptibility, 1-mean_proportionSafeChoice, colour=as.factor(copyRate))) +
stat_function(data=data.frame(X=c(0,8),invTemperature_factor='Inv. temperature\nβ = 3'), fun=Pr_when_beta, n = 1001, args=list(beta=3)) +
stat_function(data=data.frame(X=c(0,8),invTemperature_factor='Inv. temperature\nβ = 5'), fun=Pr_when_beta, n = 1001, args=list(beta=5)) +
stat_function(data=data.frame(X=c(0,8),invTemperature_factor='Inv. temperature\nβ = 7'), fun=Pr_when_beta, n = 1001, args=list(beta=7)) +
geom_vline(xintercept=2, linetype='dashed')+
geom_hline(yintercept=0.5, linetype='dashed')+
facet_grid(invTemperature_factor ~ conformityExp_factor) +
scale_colour_viridis_d(name = 'Copying weight\nσ')+
labs(
y='Probability of choosing\nthe risky alternative',
x = 'Susceptibility to the hot stove effect\nα(β + 1)'
# x=paste0('Susceptibility to the hot stove effect\n', expression(alpha (beta + 1)))
)+
xlim(c(0,8))+
myTheme_Arial()+
NULL -> collective_rescue_simplified_view
)
ggsave(file = '~/Dropbox/wataru/papers/RiskySocialLearning/draft/submissions/eLife/Revision2/exp_reanalysis_result/figure1_alternative.png', plot = collective_rescue_simplified_view, dpi = 300, width = 8, height = 6)
## ========================================================
#
# figure 2: Individual heterogeneity
#
# =========================================================
heterogeneous_groups_denrell2007task_alpha_summary <- read.csv('agentBasedSim/heterogeneous_groups_denrell2007task_alpha_summary.csv')
heterogeneous_groups_denrell2007task_beta_summary <- read.csv('agentBasedSim/heterogeneous_groups_denrell2007task_beta_summary.csv')
heterogeneous_groups_denrell2007task_sigma_summary <- read.csv('agentBasedSim/heterogeneous_groups_denrell2007task_sigma_summary.csv')
heterogeneous_groups_denrell2007task_theta_summary <- read.csv('agentBasedSim/heterogeneous_groups_denrell2007task_theta_summary.csv')
# alpha
heterogeneous_groups_denrell2007task_alpha_summary_global <-
heterogeneous_groups_denrell2007task_alpha_summary %>%
group_by(indivOrGroup, variation_level) %>%
summarise(
mean_hot_stove_susceptibility = mean(hot_stove_susceptibility, na.rm=TRUE),
mean_safeChoiceProb = mean(mean_proportionSafeChoice_direct, na.rm=TRUE)
# mean_safeChoiceProb = mean(median_proportionSafeChoice_raw, na.rm=TRUE) %>% convert_alphaRaw_to_alpha()
)
Pr_when_beta = function (X, beta) {
Z = -beta/(2*(X/(beta+1))-2) * (X - 2)
return_vector <- 1 / (1 + exp(Z))
return_vector[which(X >= beta+1)] <- NA
return(return_vector)
}
(heterogeneous_groups_denrell2007task_alpha_summary %>%
dplyr::filter(indivOrGroup=='Group') %>%
ggplot()+
geom_vline(xintercept=2, linetype='dashed', colour='grey60')+
geom_hline(yintercept=0.5, linetype='dashed', colour='grey60')+
stat_function(data=data.frame(X=c(2,9), invTemperature_factor='β = 7', indivOrGroup='Individual'), fun=Pr_when_beta, args=list(beta=7)) +
# group condition
geom_point(aes(hot_stove_susceptibility, 1- mean_proportionSafeChoice, colour=as.factor(variation_level)),alpha=3/3)+
geom_line(aes(hot_stove_susceptibility, 1-mean_proportionSafeChoice, group=variation_level, colour=as.factor(variation_level)), alpha=3/3)+
geom_point(data=heterogeneous_groups_denrell2007task_alpha_summary_global%>%filter(variation_level!=0), aes(mean_hot_stove_susceptibility, 1-mean_safeChoiceProb, colour=as.factor(variation_level)), size=4, shape=18, alpha = 2/3)+
# individual condition
geom_point(data = heterogeneous_groups_denrell2007task_alpha_summary%>%filter(variation_level==0), aes(hot_stove_susceptibility, 1- mean_proportionSafeChoice), colour='grey30',alpha=3/3)+
geom_line(data = heterogeneous_groups_denrell2007task_alpha_summary%>%filter(variation_level==0), aes(hot_stove_susceptibility, 1-mean_proportionSafeChoice, group=variation_level), colour='grey30', alpha=3/3)+
# geom_point(data=heterogeneous_groups_denrell2007task_alpha_summary_global%>%filter(variation_level==0), aes(mean_hot_stove_susceptibility, 1-mean_safeChoiceProb), colour='grey30', size=4, shape=18, alpha = 2/3)+
labs(x=expression(paste("Hot stove susceptibility ",alpha[i],"", (beta[i]+1) ,sep="")),
y="Proportion of choosing\nthe risky option",
colour = "Individual\nheterogeneity",
title=expression(paste("Heterogeneous ", alpha[i], sep=""))
)+
ylim(c(0,1))+
xlim(c(0,8))+
myTheme_Arial()+
scale_colour_viridis_d(direction=-1)+
theme(legend.position = 'NaN')+
theme(plot.title = element_text(vjust = - 10, hjust = 0.7))+
NULL -> heterogeneous_groups_denrell2007task_alphaEffect_plot)
# beta
heterogeneous_groups_denrell2007task_beta_summary_global <-
heterogeneous_groups_denrell2007task_beta_summary %>%
group_by(indivOrGroup, variation_level) %>%
summarise(
mean_hot_stove_susceptibility = mean(hot_stove_susceptibility, na.rm=TRUE),
mean_safeChoiceProb = mean(median_proportionSafeChoice_raw, na.rm=TRUE) %>% convert_alphaRaw_to_alpha())
Pr_when_alpha = function (X, alpha) {
Z = (alpha*(X/alpha - 1)^2)/(2*(2-alpha)) - (X/alpha - 1)/2
# Z = -beta/(2*(X/(beta+1))-2) * (X - 2)
return_vector <- 1 / (1 + exp(Z))
return_vector[which(X < alpha)] <- NA
return(return_vector)
}
(heterogeneous_groups_denrell2007task_beta_summary %>%
dplyr::filter(indivOrGroup=='Group') %>%
ggplot()+
geom_vline(xintercept=2, linetype='dashed', colour='grey60')+
geom_hline(yintercept=0.5, linetype='dashed', colour='grey60')+
stat_function(data=data.frame(X=c(1,8), indivOrGroup='Individual'), fun=Pr_when_alpha, args=list(alpha=0.5), n=1001) +
geom_point(aes(hot_stove_susceptibility, 1- mean_proportionSafeChoice, colour=as.factor(variation_level)),alpha=3/3)+
geom_line(aes(hot_stove_susceptibility, 1-convert_alphaRaw_to_alpha(median_proportionSafeChoice_raw), group=variation_level, colour=as.factor(variation_level)), alpha=3/3)+
geom_point(data=heterogeneous_groups_denrell2007task_beta_summary_global%>%dplyr::filter(indivOrGroup=="Group"), aes(mean_hot_stove_susceptibility, 1-mean_safeChoiceProb, colour=as.factor(variation_level)), size=4, shape=18, alpha = 2/3)+
# individual condition
geom_point(data = heterogeneous_groups_denrell2007task_beta_summary%>%filter(variation_level==0), aes(hot_stove_susceptibility, 1- mean_proportionSafeChoice), colour='grey30',alpha=3/3)+
geom_line(data = heterogeneous_groups_denrell2007task_beta_summary%>%filter(variation_level==0), aes(hot_stove_susceptibility, 1-mean_proportionSafeChoice, group=variation_level), colour='grey30', alpha=3/3)+
# geom_point(data=heterogeneous_groups_denrell2007task_beta_summary_global%>%filter(variation_level==0), aes(mean_hot_stove_susceptibility, 1-mean_safeChoiceProb), colour='grey30', size=4, shape=18, alpha = 2/3)+
labs(x=expression(paste("Hot stove susceptibility ",alpha[i],"", (beta[i]+1) ,sep="")),
y="Proportion of choosing\nthe risky option",
colour = "Individual\nheterogeneity",
title=expression(paste("Heterogeneous ", beta[i], sep=""))
)+
ylim(c(0,1))+
xlim(c(0,8))+
myTheme_Arial()+
scale_colour_viridis_d(direction=-1)+
theme(legend.position = 'NaN')+
theme(plot.title = element_text(vjust = - 10, hjust = 0.7))+
NULL -> heterogeneous_groups_denrell2007task_betaEffect_plot)
# sigma
heterogeneous_groups_denrell2007task_sigma_summary_global <-
heterogeneous_groups_denrell2007task_sigma_summary %>%
group_by(indivOrGroup, variation_level) %>%
summarise(
mean_sigma = mean(sigma, na.rm=TRUE),
mean_safeChoiceProb = mean(median_proportionSafeChoice_raw, na.rm=TRUE) %>% convert_alphaRaw_to_alpha())
Pr_when_beta = function (X, beta) {
Z = -beta/(2*(X/(beta+1))-2) * (X - 2)
return_vector <- 1 / (1 + exp(Z))
return_vector[which(X > beta)] <- NA
return(return_vector)
}
(heterogeneous_groups_denrell2007task_sigma_summary %>%
dplyr::filter(indivOrGroup=="Group") %>%
ggplot()+
geom_hline(yintercept=0.5, linetype='dashed', colour='grey60')+
#stat_function(data=data.frame(X=c(2,9), invTemperature_factor='β = 7', indivOrGroup='Individual'), fun=Pr_when_beta, args=list(beta=7)) +
geom_point(aes(sigma, 1- mean_proportionSafeChoice, colour=as.factor(variation_level)),alpha=3/3)+
geom_line(aes(sigma, 1- mean_proportionSafeChoice, group=variation_level, colour=as.factor(variation_level)), alpha=3/3)+
geom_point(data=heterogeneous_groups_denrell2007task_sigma_summary_global%>%dplyr::filter(indivOrGroup=='Group'), aes(mean_sigma, 1-mean_safeChoiceProb, colour=as.factor(variation_level)), size=4, shape=18, alpha = 2/3)+
geom_point(data=heterogeneous_groups_denrell2007task_sigma_summary_global%>%dplyr::filter(indivOrGroup=='Individual'), aes(mean_sigma, 1-mean_safeChoiceProb), colour='black', size=4, shape=18, alpha = 2/3)+
labs(x=expression(paste("Social learning weight ", sigma[i], sep="")),
y="Proportion of choosing\nthe risky option",
colour = "Individual\nheterogeneity",
title=expression(paste("Heterogeneous ", sigma[i], sep=""))
)+
ylim(c(0,1))+
xlim(c(0,1))+
myTheme_Arial()+
scale_colour_viridis_d(direction=-1)+
theme(legend.position = 'NaN')+
theme(plot.title = element_text(vjust = - 10))+
NULL -> heterogeneous_groups_denrell2007task_sigmaEffect_plot)
# theta
heterogeneous_groups_denrell2007task_theta_summary_global <-
heterogeneous_groups_denrell2007task_theta_summary %>%
group_by(indivOrGroup, variation_level) %>%
summarise(
mean_theta = mean(theta, na.rm=TRUE),
mean_safeChoiceProb = mean(median_proportionSafeChoice_raw, na.rm=TRUE) %>% convert_alphaRaw_to_alpha())
Pr_when_beta = function (X, beta) {
Z = -beta/(2*(X/(beta+1))-2) * (X - 2)
return_vector <- 1 / (1 + exp(Z))
return_vector[which(X > beta)] <- NA
return(return_vector)
}
(heterogeneous_groups_denrell2007task_theta_summary %>%
dplyr::filter(indivOrGroup=="Group") %>%
ggplot()+
geom_hline(yintercept=0.5, linetype='dashed', colour='grey60')+
#stat_function(data=data.frame(X=c(2,9), invTemperature_factor='β = 7', indivOrGroup='Individual'), fun=Pr_when_beta, args=list(beta=7)) +
geom_point(aes(theta, 1- mean_proportionSafeChoice, colour=as.factor(variation_level)),alpha=3/3)+
geom_line(aes(theta, 1- mean_proportionSafeChoice, group=variation_level, colour=as.factor(variation_level)), alpha=3/3)+
geom_point(data=heterogeneous_groups_denrell2007task_theta_summary_global%>%dplyr::filter(indivOrGroup=='Group'), aes(mean_theta, 1-mean_safeChoiceProb, colour=as.factor(variation_level)), size=4, shape=18, alpha = 2/3)+
geom_point(data=heterogeneous_groups_denrell2007task_theta_summary_global%>%dplyr::filter(indivOrGroup=='Individual'), aes(mean_theta, 1-mean_safeChoiceProb), colour='black', size=4, shape=18, alpha = 2/3)+
labs(x=expression(paste("Conformity exponent ", theta[i], sep="")),
y="Proportion of choosing\nthe risky option",
colour = "Individual\nheterogeneity",
title=expression(paste("Heterogeneous ", theta[i], sep=""))
)+
ylim(c(0,1))+
xlim(c(-1,8))+
myTheme_Arial()+
scale_colour_viridis_d(direction=-1)+
theme(legend.position = 'NaN')+
theme(plot.title = element_text(vjust = - 10))+
NULL -> heterogeneous_groups_denrell2007task_thetaEffect_plot)
(
heteroeneity_plot <- plot_grid(
heterogeneous_groups_denrell2007task_alphaEffect_plot,
heterogeneous_groups_denrell2007task_betaEffect_plot,
heterogeneous_groups_denrell2007task_sigmaEffect_plot,
heterogeneous_groups_denrell2007task_thetaEffect_plot,
labels = c('','','',''), ncol = 2, align = 'v')
)
## ========================================================
#
# figure 3: The dynamical model of collective behaviour
#
# =========================================================
## Analytical solution of asocial behavioural dynamics
noSocialCurveSimplest= function (n, e, pH, pL) {
return ( -(n*(pH - pL)*((-1 + e)*pH + e*pL))/((pH + pL)*((-1 + e)*pH - e*pL)) )
}
zeroIsoclineSimplest = function (pH, pL) {
return ( pH/(pH + pL) )
}
# Fig. 3a
# schematic figure
schematic_simplest <- cowplot::ggdraw() + cowplot::draw_image("dynamicsModel/schematic_simplest.001.Times.png", scale = 1)
### Fig. 3b
## function plot
diagonalLine = function(x){return(x)}
pLs <- c(0.1, 0.2, 0.4, 0.6)
(noSocialCurveSimplest_plot <- ggplot(data.frame(X=c(0,1)), aes(x=X)) +
#stat_function(fun = diagonalLine, linetype = 'dashed', color = 'grey')+
geom_vline(xintercept = 0.5, linetype='dashed', color='grey60')+
geom_hline(yintercept = 0, linetype='dashed', color='grey60')+
mapply(
function(explorationrate) {
stat_function(fun = noSocialCurveSimplest, args=list(n = 20, pH=0.7, pL=explorationrate), aes_q(color=explorationrate), size = 1)
},
pLs
) +
annotate(geom="text", x=0.1, y=-13.5, label=expression(paste(italic(p[l]),' = 0.1',sep="")) , size = 5) +
annotate(geom="text", x=0.1, y=-9.5, label=expression(paste(italic(p[l]),' = 0.2',sep="")) , size = 5) +
annotate(geom="text", x=0.1, y=-3.5, label=expression(paste(italic(p[l]),' = 0.4',sep="")) , size = 5) +
annotate(geom="text", x=0.1, y=0.5, label=expression(paste(italic(p[l]),' = 0.6',sep="")) , size = 5) +
annotate(geom="text", x=0.1, y=8, label=expression(paste('(',italic(p[h]),' = 0.7)',sep="")) , size = 5) +
scale_colour_viridis_c(option="cividis", direction=-1)+
labs(color="pL",
# y = expression(paste('Risk-seeking bias: ', N[R]^'*' - N[S]^'*',sep="")),
y = expression(atop('Risk-seeking bias: ', paste(N[R]^'*' - N[S]^'*'))),
x = expression(paste('The population-level "risk premium" ', italic(e)))
# x = expression(paste('The rate of getting enchanted with R: ', italic(e),sep=""))
# x = expression(atop('The rate of', paste('getting enchanted with R: ', italic(e), sep="")))
)+
myTheme_Arial()+
theme(legend.position = 'none')+
NULL)
# Fig. 3c
FigSocialLearningSimplest <- read_csv("dynamicsModel/FigSocialLearningSimplest.csv", col_names = FALSE)
names(FigSocialLearningSimplest) = c('e','c','f','pl','maxS','minS','diffS','maxR','minR','diffR','diffRS')
FigSocialLearningSimplest_sample = dplyr::sample_frac(tbl = FigSocialLearningSimplest, size = 0.00001)
FigSocialLearningSimplest$f_category = paste('Conformity exponent\nθ = ', FigSocialLearningSimplest$f, sep='')
FigSocialLearningSimplest$c_category = paste('Copying weight\nσ = ', FigSocialLearningSimplest$c, sep='')
FigSocialLearningSimplest$pl_category = paste('pl = ', FigSocialLearningSimplest$pl, sep='')
FigSocialLearningSimplest$f_category = factor(FigSocialLearningSimplest$f_category, levels = c('Conformity exponent\nθ = 1','Conformity exponent\nθ = 2','Conformity exponent\nθ = 10'))
(FigSocialLearningSimplest %>%
dplyr::filter(c == 0) %>%
ggplot(aes(x=pl))+
#geom_line(mapping = aes(y = diffRS, group = pl, colour = pl)) +
geom_raster(mapping = aes(y=e, fill = diffRS), stat = 'identity') +
stat_function(fun=zeroIsoclineSimplest, args=list(pH=0.7), color='black', linetype='dashed', size=1)+
labs(
fill = expression(paste(N[R]^'*' - N[S]^'*')),
# fill = 'Differences in\nrisk seeking and\nrisk aversion',
y = expression(paste('The population-level\n"risk premium" e', '')),
x = expression(paste('Probability of exploration ', italic(p[l])))
# y = expression(atop('The rate of getting', paste('enchanted with R: ', italic(e), sep=""))),
# x = expression(paste('The rate of exploration: ', italic(p[l]), sep=""))
)+
scale_fill_gradient2(midpoint = 0, low = "blue", mid = "grey90", high = "red", breaks=c(-10,0,10))+
myTheme_Arial()+
# theme(axis.text.x = element_text(angle = 90), legend.position='top')+
theme(strip.text.y = element_text(angle = 0))+
# facet_grid(c_category ~ f_category)+
#geom_hline(yintercept = 0, linetype = 'dotted')+
NULL -> FigSocialLearningSimplest_individual)
# Fig 3d
(FigSocialLearningSimplest %>%
dplyr::filter((c == 0.25 | c == 0.5) & (f == 1 | f == 2)) %>%
ggplot(aes(x=pl))+
#geom_line(mapping = aes(y = diffRS, group = pl, colour = pl)) +
geom_raster(mapping = aes(y=e, fill = diffRS), stat = 'identity') +
stat_function(fun=zeroIsoclineSimplest, args=list(pH=0.7), color='black', linetype='dashed', size=1)+
labs(
fill = expression(paste(N[R]^'*' - N[S]^'*')),
# fill = 'Differences in\nrisk seeking and\nrisk aversion',
y = expression(paste('The population-level "risk premium" ', italic(e))),
x = expression(paste('Probability of exploration ', italic(p[l])))
# y = expression(atop('The rate of', paste('getting enchanted with R: ', italic(e), sep=""))),
# x = expression(paste('The rate of exploration: ', italic(p[l]), sep=""))
)+
scale_fill_gradient2(midpoint = 0, low = "blue", mid = "grey90", high = "red", breaks=c(-10,0,10))+
myTheme_Arial()+
theme(axis.text.x = element_text(angle = 90), legend.position='top')+
theme(strip.text.y = element_text(angle = 270))+
facet_grid(c_category ~ f_category)+
#geom_hline(yintercept = 0, linetype = 'dotted')+
NULL -> FigSocialLearningSimplest_social)
## Figure 3
set_null_device(cairo_pdf)
figure3_centre <- plot_grid(noSocialCurveSimplest_plot, FigSocialLearningSimplest_individual, labels = c('b','c'), ncol = 1, align = 'v',label_size = 15)
figure3 <- plot_grid(schematic_simplest, figure3_centre, FigSocialLearningSimplest_social, labels = c('a','','d'), ncol = 3, align = 'v',label_size = 15, rel_widths = c(1, 1, 1.4))
ggsave(file = '~/Dropbox/wataru/papers/RiskySocialLearning/draft/submissions/eLife/Revision2/exp_reanalysis_result/figure3.png', plot = figure3, dpi = 300, width = 15, height = 6)
# Figure 3 - additional (the bifurcation analysis)
sleqtableSimplest <- read_csv("dynamicsModel/sleqtableSimplest.csv", col_names = FALSE)
names(sleqtableSimplest) = c('f','S_0','c','e','R_eq')
sleqtableSimplest$RPreferingInitial = 0
sleqtableSimplest$direction = NA
sleqtableSimplest$R_0 = 20 - sleqtableSimplest$S_0
sleqtableSimplest$conformityExponent = paste(rep('Conformity exponent\nθ = ', nrow(sleqtableSimplest)), sleqtableSimplest$f, sep ='')
sleqtableSimplest$conformityExponent = factor(sleqtableSimplest$conformityExponent, levels = c('Conformity exponent\nθ = 0','Conformity exponent\nθ = 1','Conformity exponent\nθ = 2','Conformity exponent\nθ = 10'))
sleqtableSimplest$e_factor = paste(rep('Risk premium\ne = ', nrow(sleqtableSimplest)), sleqtableSimplest$e, sep ='')
for (i in 1:nrow(sleqtableSimplest)) {
if (sleqtableSimplest$R_eq[i]>10) {
sleqtableSimplest$RPreferingInitial[i] = 1
}
if (sleqtableSimplest$R_eq[i] - sleqtableSimplest$R_0[i] > 0) {
sleqtableSimplest$direction[i] = 'upward'
} else {
sleqtableSimplest$direction[i] = 'downward'
}
}
sleqtableSimplest <- sleqtableSimplest %>% dplyr::filter(c != 1)
(sleqtableSimplest %>%
ggplot(aes(x=c))+
geom_point(aes(y=R_0, colour=as.factor(RPreferingInitial), shape=direction), alpha=1/2)+
geom_point(aes(y=R_eq))+
labs(
title = '',
x=expression(paste('Social influence ', sigma, sep="")),
y=expression(paste('Equilibrium density of ',N[R]^'*',sep=""))#,
#title='Social influence\n (pH = 0.5; pL = 0.1; d=0.5; l=0.25)'
)+
scale_shape_manual(values=c('upward'=2, 'downward'=6), name='Stream\'s direction')+
scale_color_manual(values=c('0'='#56B4E9','1'='#D55E00'), name='Risky choice regime')+
myTheme_Arial()+
theme(axis.text.x = element_text(angle = 90), legend.position='top')+
facet_grid(e_factor ~ conformityExponent)+
xlim(c(0,1))+
#ylim(c(0,20))+
geom_hline(yintercept=10, linetype='dashed')+
theme(legend.position = 'none')+
theme(strip.text.y = element_text(angle = 270))+
NULL -> sleqtableSimplest_plot)
ggsave(file = '~/Dropbox/wataru/papers/RiskySocialLearning/draft/submissions/eLife/Revision2/exp_reanalysis_result/sleqtableSimplest_plot.png', plot = sleqtableSimplest_plot, dpi = 300, width = 10, height = 7)
## ========================================================
#
# Figure for the experimental results
#
# =========================================================
library(ggpubr)
social_learning_model_validation_0820_data <- read.csv('experimentalAnalysis/social_learning_model_validation_0820_data.csv')
social_learning_model_validation_1022_riskID11_data <- read.csv('experimentalAnalysis/social_learning_model_validation_1022_riskID11_data.csv')
social_learning_model_validation_1022_riskID12_data <- read.csv('experimentalAnalysis/social_learning_model_validation_1022_riskID12_data.csv')
# ======================================
# 1-risky 1-safe (2-armed) task
# ======================================
fit_AL00_multiVar_LKJ_indiv_0820_globalparameters <- read.csv('experimentalAnalysis/fit_AL00_multiVar_LKJ_indiv_0820_globalparameters.csv')
fit_AL00_multiVar_LKJ_indiv_0820_parameters <- read.csv('experimentalAnalysis/fit_AL00_multiVar_LKJ_indiv_0820_parameters.csv')
fit_SL00_multiVar_LKJ_0820_globalparameters <- read.csv('experimentalAnalysis/fit_SL00_multiVar_LKJ_0820_globalparameters.csv')
fit_SL00_multiVar_LKJ_0820_parameters <- read.csv('experimentalAnalysis/fit_SL00_multiVar_LKJ_0820_parameters.csv')
behaviour_main_0820 <- read.csv("experimentalAnalysis/behaviour_main_0820.csv")
behaviour_indiv_0820 <-read.csv("experimentalAnalysis/behaviour_indiv_0820.csv")
allBehaviour0820 <- rbind(behaviour_main_0820, behaviour_indiv_0820)
allBehaviour0820 <- allBehaviour0820 %>%
dplyr::filter(amazonID != 'INHOUSETEST2') %>% # eliminating data generated by debug tests
dplyr::filter(amazonID != '5eac70db94edd22d57fa00c4') # a bug in the data storaging process
# make the choice data binary
allBehaviour0820$choice_num = NA
allBehaviour0820$choice_num[which(allBehaviour0820$choice=='sure')] = 0
allBehaviour0820$choice_num[which(allBehaviour0820$choice=='risky')] = 1
allBehaviour0820$choice_num[which(allBehaviour0820$choice=='miss')] = -1
allBehaviour0820$riskDistributionId_factor = 'Condition 5'
allBehaviour0820$riskDistributionId_factor[which(allBehaviour0820$riskDistributionId==6)] = 'Condition 6'
allBehaviour0820$riskDistributionId_factor[which(allBehaviour0820$riskDistributionId==7)] = 'Condition 7'
allBehaviour0820$indivOrGroup_factor = 'Individual'
allBehaviour0820$indivOrGroup_factor[allBehaviour0820$indivOrGroup == 1] = 'Social'
allBehaviour0820$groupSize_category = 'Small'
allBehaviour0820$groupSize_category[which(allBehaviour0820$groupSize==1)] = 'Individual'
allBehaviour0820$groupSize_category[which(allBehaviour0820$groupSize>4)] = 'Large'
allBehaviour0820$groupSize_category = factor(allBehaviour0820$groupSize_category, levels = c('Individual','Small','Large'))
# individual condition
completedIDs = which(table(allBehaviour0820$amazonID) >= 36) %>% names()
allBehaviour0820_indiv = allBehaviour0820 %>% dplyr::filter(amazonID %in% completedIDs) %>%
dplyr::filter(indivOrGroup == 0) # note this is only the individual condition
allBehaviour0820_indiv$sub = as.numeric(as.factor(allBehaviour0820_indiv$amazonID))
allBehaviour0820_indiv = allBehaviour0820_indiv %>% group_by(amazonID) %>% arrange(round, .by_group = TRUE)
# summarised data
allBehaviour0820_indiv_summarised_t35 = allBehaviour0820_indiv %>%
dplyr::filter(round>35) %>%
group_by(sub) %>%
summarise(
risky_choice_count = sum(choice_num, na.rm = TRUE),
risky_choice_mean = mean(choice_num, na.rm=TRUE),
trial_num = n(),
indivOrGroup_factor = indivOrGroup_factor[1],
room = room[1],
amazonID = amazonID[1]
)
allBehaviour0820_indiv_summarised_t35$groupID = allBehaviour0820_indiv_summarised_t35$room
allBehaviour0820_indiv_summarised_t35$groupID[which(allBehaviour0820_indiv_summarised_t35$indivOrGroup_factor=='Individual')] = 'Individual'
# Individual Condition Only
parameterfit_indiv_AL00_0820 <- left_join(fit_AL00_multiVar_LKJ_indiv_0820_parameters, allBehaviour0820_indiv_summarised_t35, by = 'sub')
parameterfit_indiv_AL00_0820$hot_stove_susceptibility <- parameterfit_indiv_AL00_0820$alpha_median_AL00_multiVar_LKJ * (parameterfit_indiv_AL00_0820$beta_median_AL00_multiVar_LKJ + 1)
# if the hot stove effect is too large
parameterfit_indiv_AL00_0820$hot_stove_susceptibility_trancated <- parameterfit_indiv_AL00_0820$hot_stove_susceptibility
parameterfit_indiv_AL00_0820$hot_stove_susceptibility_trancated[which(parameterfit_indiv_AL00_0820$hot_stove_susceptibility > 6)] <- 6
# Group condition
completedIDs = which(table(allBehaviour0820$amazonID) >= 36) %>% names()
allBehaviour0820_social = allBehaviour0820 %>% dplyr::filter(amazonID %in% completedIDs) %>%
dplyr::filter(indivOrGroup == 1) # note this is only the social condition
allBehaviour0820_social$sub = as.numeric(as.factor(allBehaviour0820_social$amazonID))
allBehaviour0820_social$group = as.numeric(as.factor(allBehaviour0820_social$room))
# summarised data
allBehaviour0820_social_summarised_t35 = allBehaviour0820_social %>%
dplyr::filter(round>35) %>%
group_by(sub) %>%
summarise(
risky_choice_count = sum(choice_num, na.rm = TRUE),
risky_choice_mean = mean(choice_num, na.rm=TRUE),
trial_num = n(),
indivOrGroup_factor = indivOrGroup_factor[1],
room = room[1],
amazonID = amazonID[1]
)
allBehaviour0820_social_summarised_t35$groupID = allBehaviour0820_social_summarised_t35$room
allBehaviour0820_social_summarised_t35$groupID[which(allBehaviour0820_social_summarised_t35$indivOrGroup_factor=='Individual')] = 'Individual'
# Group condition
fit_parameters_group_SL00_mcmc <- left_join(fit_SL00_multiVar_LKJ_0820_parameters, allBehaviour0820_social_summarised_t35, by = 'sub')
fit_parameters_group_SL00_mcmc$hot_stove_susceptibility <- fit_parameters_group_SL00_mcmc$alpha_median_SL00_multiVar_LKJ * (fit_parameters_group_SL00_mcmc$beta_median_SL00_multiVar_LKJ + 1)
# if the hot stove effect is too large
fit_parameters_group_SL00_mcmc$hot_stove_susceptibility_trancated <- fit_parameters_group_SL00_mcmc$hot_stove_susceptibility
fit_parameters_group_SL00_mcmc$hot_stove_susceptibility_trancated[which(fit_parameters_group_SL00_mcmc$hot_stove_susceptibility > 6)] <- 6
# overall means
social_learning_model_validation_0820_summary <-
social_learning_model_validation_0820_data %>%
group_by(condition_dummy, hot_stove_susceptibility_rounded, soc_mean_category) %>%
summarise(
proportionRiskyChoice_b2_mean = mean(proportionRiskyChoice_b2),
proportionRiskyChoice_b2_sd = sd(proportionRiskyChoice_b2),
raw_proportionRiskyChoice_b2_mean = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% mean(),
raw_proportionRiskyChoice_b2_sd = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% sd(),
soc_mean = mean(soc_mean),
n = n()
)
social_learning_model_validation_0820_summary$proportionRiskyChoice_b2_lower <-
(social_learning_model_validation_0820_summary$raw_proportionRiskyChoice_b2_mean - social_learning_model_validation_0820_summary$raw_proportionRiskyChoice_b2_sd / sqrt(social_learning_model_validation_0820_summary$n)) %>% convert_alphaRaw_to_alpha
social_learning_model_validation_0820_summary$proportionRiskyChoice_b2_upper <-
(social_learning_model_validation_0820_summary$raw_proportionRiskyChoice_b2_mean + social_learning_model_validation_0820_summary$raw_proportionRiskyChoice_b2_sd / sqrt(social_learning_model_validation_0820_summary$n)) %>% convert_alphaRaw_to_alpha
social_learning_model_validation_0820_summary$proportionRiskyChoice_b2_mid <-
social_learning_model_validation_0820_summary$raw_proportionRiskyChoice_b2_mean %>% convert_alphaRaw_to_alpha
# modest social learners' means
social_learning_model_validation_0820_summary_reallyHighSigma <-
social_learning_model_validation_0820_data %>%
dplyr::filter(soc_mean > 3/10 & soc_mean < 6/10 & hot_stove_susceptibility_rounded < 6) %>%
group_by(condition_dummy, hot_stove_susceptibility_rounded, soc_mean_category) %>%
summarise(
proportionRiskyChoice_b2_mean = mean(proportionRiskyChoice_b2),
proportionRiskyChoice_b2_sd = sd(proportionRiskyChoice_b2),
raw_proportionRiskyChoice_b2_mean = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% median(),
raw_proportionRiskyChoice_b2_sd = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% sd(),
soc_mean = mean(soc_mean),
n = n()
)
social_learning_model_validation_0820_summary_reallyHighSigma$proportionRiskyChoice_b2_mid <-
social_learning_model_validation_0820_summary_reallyHighSigma$raw_proportionRiskyChoice_b2_mean %>% convert_alphaRaw_to_alpha
fit_parameters_group_SL00_mcmc$soc_mean <- fit_parameters_group_SL00_mcmc$soc_mean_SL00_multiVar_LKJ
# ======================================
# 1-risky 3-safe (4-armed) task
# ======================================
# fit result -- global parameters
fit_SL00_multiVar_LKJ_1022_globalparameters <- read.csv('experimentalAnalysis/fit_SL00_multiVar_LKJ_1022_globalparameters.csv')
fit_AL00_multiVar_LKJ_indiv_riskID11_indiv_riskID11Condition_globalparameters <- read.csv('experimentalAnalysis/fit_AL00_multiVar_LKJ_indiv_riskID11_indiv_riskID11Condition_globalparameters.csv')
## behavioural data summary
allBehaviour1022_group <- read.csv("experimentalAnalysis/allBehaviour1022_group.csv")
allBehaviour1022_group_riskID11 <- allBehaviour1022_group %>% dplyr::filter(riskDistributionId == 11 & room != '102_session_622') #the group '102_session_622' had a wired error (3 of them played riskID11, and the rest two played riskID12)
fit_SL00_multiVar_LKJ_1022_parameters <- read.csv("experimentalAnalysis/fit_SL00_multiVar_LKJ_1022_parameters.csv")
allBehaviour1022_group_riskID11_summarised_t35 <- allBehaviour1022_group_riskID11 %>%
dplyr::filter(round>35) %>%
group_by(amazonID, sub) %>%
summarise(
risky_choice_count = sum(best_risky_choice, na.rm = TRUE),
risky_choice_mean = mean(best_risky_choice, na.rm=TRUE),
trial_num = n(),
indivOrGroup_factor = indivOrGroup_factor[1],
room = room[1]
)
allBehaviour1022_group_riskID11_summarised_t35$groupID <- allBehaviour1022_group_riskID11_summarised_t35$room
allBehaviour1022_indiv <- read.csv("experimentalAnalysis/allBehaviour1022_indiv.csv")
allBehaviour1022_indiv_riskID11 <- allBehaviour1022_indiv %>%
filter(riskDistributionId_factor=='Con: 0')
# The 1-risky-3-safe task was labeled "Con: 0" or "11" originally
# And in the analysis code, things like "riskID11" means the 1-risky-3-safe task
# while "riskID12" means the 2-risky-2-safe task
allBehaviour1022_indiv_riskID11$sub_old <- allBehaviour1022_indiv_riskID11$sub
allBehaviour1022_indiv_riskID11$sub <- allBehaviour1022_indiv_riskID11$amazonID %>% as.factor() %>% as.numeric()
allBehaviour1022_indiv_riskID11_summarised_t35 <- allBehaviour1022_indiv_riskID11 %>%
dplyr::filter(round>35) %>%
group_by(amazonID, sub) %>%
summarise(
risky_choice_count = sum(best_risky_choice, na.rm = TRUE),
risky_choice_mean = mean(best_risky_choice, na.rm=TRUE),
trial_num = n(),
indivOrGroup_factor = indivOrGroup_factor[1],
room = room[1]
)
allBehaviour1022_indiv_riskID11_summarised_t35$groupID = allBehaviour1022_indiv_riskID11_summarised_t35$room
allBehaviour1022_indiv_riskID11_summarised_t35$groupID[which(allBehaviour1022_indiv_riskID11_summarised_t35$indivOrGroup_factor=='Individual')] = 'Individual'
# Individual fits
fit_AL00_multiVar_LKJ_indiv_riskID11_parameters <- read.csv('experimentalAnalysis/fit_AL00_multiVar_LKJ_indiv_riskID11_parameters.csv')
# Merging the behavioural data with the fit parameters
# hot stove effect - individual
fit_AL_indiv_riskID11_parameters <- right_join(fit_AL00_multiVar_LKJ_indiv_riskID11_parameters, allBehaviour1022_indiv_riskID11_summarised_t35, by = 'sub')
fit_AL_indiv_riskID11_parameters$hot_stove_susceptibility <- fit_AL_indiv_riskID11_parameters$alpha_median_AL00_multiVar_LKJ * (1+ fit_AL_indiv_riskID11_parameters$beta_median_AL00_multiVar_LKJ)
fit_AL_indiv_riskID11_parameters$hot_stove_susceptibility_trancated <- fit_AL_indiv_riskID11_parameters$hot_stove_susceptibility
fit_AL_indiv_riskID11_parameters$hot_stove_susceptibility_trancated[which(fit_AL_indiv_riskID11_parameters$hot_stove_susceptibility > 6)] <- 6
# hot stove effect - group
fit_SL00_riskID11_parameters <- right_join(fit_SL00_multiVar_LKJ_1022_parameters, allBehaviour1022_group_riskID11_summarised_t35, by = 'sub')
fit_SL00_riskID11_parameters$hot_stove_susceptibility <- fit_SL00_riskID11_parameters$alpha_mean_SL00_multiVar_LKJ * (1+ fit_SL00_riskID11_parameters$beta_mean_SL00_multiVar_LKJ)
fit_SL00_riskID11_parameters$hot_stove_susceptibility_trancated <- fit_SL00_riskID11_parameters$hot_stove_susceptibility
fit_SL00_riskID11_parameters$hot_stove_susceptibility_trancated[which(fit_SL00_riskID11_parameters$hot_stove_susceptibility > 6)] <- 6
# overall means
social_learning_model_validation_1022_riskID11_summary <-
social_learning_model_validation_1022_riskID11_data %>%
group_by(condition_dummy, hot_stove_susceptibility_rounded, soc_mean_category) %>%
summarise(
proportionRiskyChoice_b2_mean = mean(proportionRiskyChoice_b2),
proportionRiskyChoice_b2_sd = sd(proportionRiskyChoice_b2),
raw_proportionRiskyChoice_b2_mean = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% mean(),
raw_proportionRiskyChoice_b2_sd = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% sd(),
soc_mean = mean(soc_mean),
n = n()
)
social_learning_model_validation_1022_riskID11_summary$proportionRiskyChoice_b2_lower <-
(social_learning_model_validation_1022_riskID11_summary$raw_proportionRiskyChoice_b2_mean - social_learning_model_validation_1022_riskID11_summary$raw_proportionRiskyChoice_b2_sd / sqrt(social_learning_model_validation_1022_riskID11_summary$n)) %>% convert_alphaRaw_to_alpha
social_learning_model_validation_1022_riskID11_summary$proportionRiskyChoice_b2_upper <-
(social_learning_model_validation_1022_riskID11_summary$raw_proportionRiskyChoice_b2_mean + social_learning_model_validation_1022_riskID11_summary$raw_proportionRiskyChoice_b2_sd / sqrt(social_learning_model_validation_1022_riskID11_summary$n)) %>% convert_alphaRaw_to_alpha
social_learning_model_validation_1022_riskID11_summary$proportionRiskyChoice_b2_mid <-
social_learning_model_validation_1022_riskID11_summary$raw_proportionRiskyChoice_b2_mean %>% convert_alphaRaw_to_alpha
# modest social learners' means
social_learning_model_validation_1022_riskID11_summary_reallyHighSigma <-
social_learning_model_validation_1022_riskID11_data %>%
dplyr::filter(soc_mean > 3/10 & soc_mean < 6/10 & hot_stove_susceptibility_rounded < 6) %>%
group_by(condition_dummy, hot_stove_susceptibility_rounded, soc_mean_category) %>%
summarise(
proportionRiskyChoice_b2_mean = mean(proportionRiskyChoice_b2),
proportionRiskyChoice_b2_sd = sd(proportionRiskyChoice_b2),
raw_proportionRiskyChoice_b2_mean = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% median(),
raw_proportionRiskyChoice_b2_sd = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% sd(),
soc_mean = mean(soc_mean),
n = n()
)
social_learning_model_validation_1022_riskID11_summary_reallyHighSigma$proportionRiskyChoice_b2_mid <-
social_learning_model_validation_1022_riskID11_summary_reallyHighSigma$raw_proportionRiskyChoice_b2_mean %>% convert_alphaRaw_to_alpha
# ======================================
# 2-risky 2-safe (4-armed) task
# ======================================
# fit result -- global parameters
fit_SL00_multiVar_LKJ_1022_globalparameters <- read.csv('experimentalAnalysis/fit_SL00_multiVar_LKJ_1022_globalparameters.csv')
fit_AL00_multiVar_LKJ_indiv_riskID12_indiv_riskID12Condition_globalparameters <- read.csv('experimentalAnalysis/fit_AL00_multiVar_LKJ_indiv_riskID12_indiv_riskID12Condition_globalparameters.csv')
## behavioural data summary
allBehaviour1022_group <- read.csv("experimentalAnalysis/allBehaviour1022_group.csv")
allBehaviour1022_group_riskID12 <- allBehaviour1022_group %>% dplyr::filter(riskDistributionId == 12 & room != '102_session_622')
fit_SL00_multiVar_LKJ_1022_parameters <- read.csv("experimentalAnalysis/fit_SL00_multiVar_LKJ_1022_parameters.csv")
allBehaviour1022_group_riskID12_summarised_t35 <- allBehaviour1022_group_riskID12 %>%
dplyr::filter(round>35) %>%
group_by(amazonID, sub) %>%
summarise(
risky_choice_count = sum(best_risky_choice, na.rm = TRUE),
risky_choice_mean = mean(best_risky_choice, na.rm=TRUE),
trial_num = n(),
indivOrGroup_factor = indivOrGroup_factor[1],
room = room[1]
)
allBehaviour1022_group_riskID12_summarised_t35$groupID <- allBehaviour1022_group_riskID12_summarised_t35$room
allBehaviour1022_indiv <- read.csv("experimentalAnalysis/allBehaviour1022_indiv.csv")
allBehaviour1022_indiv_riskID12 <- allBehaviour1022_indiv %>% filter(riskDistributionId_factor=='Con: 2') #'Con: 2' means the 2-risky-2-safe task
allBehaviour1022_indiv_riskID12$sub_old <- allBehaviour1022_indiv_riskID12$sub
allBehaviour1022_indiv_riskID12$sub <- allBehaviour1022_indiv_riskID12$amazonID %>% as.factor() %>% as.numeric()
allBehaviour1022_indiv_riskID12_summarised_t35 <- allBehaviour1022_indiv_riskID12 %>%
dplyr::filter(round>35) %>%
group_by(amazonID, sub) %>%
summarise(
risky_choice_count = sum(best_risky_choice, na.rm = TRUE),
risky_choice_mean = mean(best_risky_choice, na.rm=TRUE),
trial_num = n(),
indivOrGroup_factor = indivOrGroup_factor[1],
room = room[1]
)
allBehaviour1022_indiv_riskID12_summarised_t35$groupID = allBehaviour1022_indiv_riskID12_summarised_t35$room
allBehaviour1022_indiv_riskID12_summarised_t35$groupID[which(allBehaviour1022_indiv_riskID12_summarised_t35$indivOrGroup_factor=='Individual')] = 'Individual'
# Individual fits
fit_AL00_multiVar_LKJ_indiv_riskID12_parameters <- read.csv('experimentalAnalysis/fit_AL00_multiVar_LKJ_indiv_riskID12_parameters.csv')
# Merging the behavioural data with the fit parameters
# hot stove effect - individual
fit_AL_indiv_riskID12_parameters <- right_join(fit_AL00_multiVar_LKJ_indiv_riskID12_parameters, allBehaviour1022_indiv_riskID12_summarised_t35, by = 'sub')
fit_AL_indiv_riskID12_parameters$hot_stove_susceptibility <- fit_AL_indiv_riskID12_parameters$alpha_median_AL00_multiVar_LKJ * (1+ fit_AL_indiv_riskID12_parameters$beta_median_AL00_multiVar_LKJ)
fit_AL_indiv_riskID12_parameters$hot_stove_susceptibility_trancated <- fit_AL_indiv_riskID12_parameters$hot_stove_susceptibility
fit_AL_indiv_riskID12_parameters$hot_stove_susceptibility_trancated[which(fit_AL_indiv_riskID12_parameters$hot_stove_susceptibility > 6)] <- 6
# hot stove effect - group
fit_SL00_riskID12_parameters <- right_join(fit_SL00_multiVar_LKJ_1022_parameters, allBehaviour1022_group_riskID12_summarised_t35, by = 'sub')
fit_SL00_riskID12_parameters$hot_stove_susceptibility <- fit_SL00_riskID12_parameters$alpha_mean_SL00_multiVar_LKJ * (1+ fit_SL00_riskID12_parameters$beta_mean_SL00_multiVar_LKJ)
fit_SL00_riskID12_parameters$hot_stove_susceptibility_trancated <- fit_SL00_riskID12_parameters$hot_stove_susceptibility
fit_SL00_riskID12_parameters$hot_stove_susceptibility_trancated[which(fit_SL00_riskID12_parameters$hot_stove_susceptibility > 6)] <- 6
# overall means
social_learning_model_validation_1022_riskID12_summary <-
social_learning_model_validation_1022_riskID12_data %>%
group_by(condition_dummy, hot_stove_susceptibility_rounded, soc_mean_category) %>%
summarise(
proportionRiskyChoice_b2_mean = mean(proportionRiskyChoice_b2),
proportionRiskyChoice_b2_sd = sd(proportionRiskyChoice_b2),
raw_proportionRiskyChoice_b2_mean = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% mean(),
raw_proportionRiskyChoice_b2_sd = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% sd(),
soc_mean = mean(soc_mean),
n = n()
)
social_learning_model_validation_1022_riskID12_summary$proportionRiskyChoice_b2_lower <-
(social_learning_model_validation_1022_riskID12_summary$raw_proportionRiskyChoice_b2_mean - social_learning_model_validation_1022_riskID12_summary$raw_proportionRiskyChoice_b2_sd / sqrt(social_learning_model_validation_1022_riskID12_summary$n)) %>% convert_alphaRaw_to_alpha
social_learning_model_validation_1022_riskID12_summary$proportionRiskyChoice_b2_upper <-
(social_learning_model_validation_1022_riskID12_summary$raw_proportionRiskyChoice_b2_mean + social_learning_model_validation_1022_riskID12_summary$raw_proportionRiskyChoice_b2_sd / sqrt(social_learning_model_validation_1022_riskID12_summary$n)) %>% convert_alphaRaw_to_alpha
social_learning_model_validation_1022_riskID12_summary$proportionRiskyChoice_b2_mid <-
social_learning_model_validation_1022_riskID12_summary$raw_proportionRiskyChoice_b2_mean %>% convert_alphaRaw_to_alpha
# modest social learners' means
social_learning_model_validation_1022_riskID12_summary_reallyHighSigma <-
social_learning_model_validation_1022_riskID12_data %>%
dplyr::filter(soc_mean > 3/10 & soc_mean < 6/10 & hot_stove_susceptibility_rounded < 6) %>%
group_by(condition_dummy, hot_stove_susceptibility_rounded, soc_mean_category) %>%
summarise(
proportionRiskyChoice_b2_mean = mean(proportionRiskyChoice_b2),
proportionRiskyChoice_b2_sd = sd(proportionRiskyChoice_b2),
raw_proportionRiskyChoice_b2_mean = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% median(),
raw_proportionRiskyChoice_b2_sd = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% sd(),
soc_mean = mean(soc_mean),
n = n()
)
social_learning_model_validation_1022_riskID12_summary_reallyHighSigma$proportionRiskyChoice_b2_mid <-
social_learning_model_validation_1022_riskID12_summary_reallyHighSigma$raw_proportionRiskyChoice_b2_mean %>% convert_alphaRaw_to_alpha
ggplot() +
geom_segment(aes(x=0,xend=5.8,y=0.5,yend=0.5),colour="grey30", size=0.5) +
geom_ribbon(data=social_learning_model_validation_0820_summary%>%dplyr::filter(condition_dummy==0&hot_stove_susceptibility_rounded<6), mapping=aes(hot_stove_susceptibility_rounded, ymin=proportionRiskyChoice_b2_lower, ymax=proportionRiskyChoice_b2_upper), fill='grey20', alpha=1/2)+
geom_ribbon(data=social_learning_model_validation_0820_summary%>%dplyr::filter(condition_dummy==1&soc_mean_category=='mild'&hot_stove_susceptibility_rounded<6), mapping=aes(hot_stove_susceptibility_rounded, ymin=proportionRiskyChoice_b2_lower, ymax=proportionRiskyChoice_b2_upper), fill='orange', alpha=1/2)+
geom_point(data = parameterfit_indiv_AL00_0820, mapping=aes(hot_stove_susceptibility_trancated, risky_choice_mean), colour='grey20', shape = 17)+ # shape=5: diamond
geom_point(data = fit_parameters_group_SL00_mcmc, mapping=aes(hot_stove_susceptibility_trancated,risky_choice_mean, colour=soc_mean), shape = 20) +
geom_line(data=social_learning_model_validation_0820_summary%>%dplyr::filter(condition_dummy==0&hot_stove_susceptibility_rounded<6), mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid , linetype = "Ind"), size=1.0)+
geom_line(data=social_learning_model_validation_0820_summary%>%dplyr::filter(condition_dummy==1&hot_stove_susceptibility_rounded<6), mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid, group=soc_mean_category, colour=mean(soc_mean) , linetype = "g1"), size=1.0)+
geom_line(data=social_learning_model_validation_0820_summary_reallyHighSigma, mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid, group=soc_mean_category, colour=mean(soc_mean) , linetype = 'g2'), size=1.0)+
scale_colour_viridis_c(expression('Copying weight \U03C3'[i]), begin = 0.2, end = 0.9, option='plasma', direction=-1)+
scale_linetype_manual(name = ""
, breaks = c("Ind","g1","g2")
, labels = c("Individual","Group average","Group with \U03C3 = 0.4")
, values = c('solid','solid','dashed')
, guide = guide_legend(
override.aes = list(linetype = c('solid','solid','dashed')
, size=0.6
, color = c('black','#ff751a','#ff6666'))
, order=1)
) +
myTheme_Arial()+
xlim(c(0,6.5))+
labs(
x = expression(atop('Susceptibility to the hot stove effect', paste(alpha[i], '(', beta[i], '+1)'))),
y = 'Mean proportion of choosing\nthe optimal risky option',
title = 'The 1-risky-1-safe task \n(N = 168)') +
guides(colour = "none")+
#theme(legend.position = c(0.85, 0.5))+
#theme(legend.position = NaN)+
theme(legend.position = c(0.65, 0.75))+
theme(legend.title = element_text(size=12))+
theme(legend.text = element_text(size=11))+
theme(legend.key = element_rect(fill = "transparent", colour = "transparent"))+
NULL -> fig6_a
ggplot() +
geom_segment(aes(x=0,xend=6,y=0.25,yend=0.25),colour="grey30", size=0.5) +
geom_ribbon(data=social_learning_model_validation_1022_riskID11_summary%>%dplyr::filter(condition_dummy==0), mapping=aes(hot_stove_susceptibility_rounded, ymin=proportionRiskyChoice_b2_lower, ymax=proportionRiskyChoice_b2_upper), fill='grey20', alpha=1/2)+
geom_ribbon(data=social_learning_model_validation_1022_riskID11_summary%>%dplyr::filter(condition_dummy==1&soc_mean_category=='mild'), mapping=aes(hot_stove_susceptibility_rounded, ymin=proportionRiskyChoice_b2_lower, ymax=proportionRiskyChoice_b2_upper), fill='orange', alpha=1/2)+
geom_point(data = fit_AL_indiv_riskID11_parameters, mapping=aes(hot_stove_susceptibility_trancated, risky_choice_mean), colour='grey20', shape = 17)+ # shape=5: diamond
geom_point(data = fit_SL00_riskID11_parameters, mapping=aes(hot_stove_susceptibility_trancated,risky_choice_mean, colour=soc_mean_SL00_multiVar_LKJ), shape = 20) +
geom_line(data=social_learning_model_validation_1022_riskID11_summary%>%dplyr::filter(condition_dummy==0), mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid), size=1.0)+
geom_line(data=social_learning_model_validation_1022_riskID11_summary%>%dplyr::filter(condition_dummy==1), mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid, group=soc_mean_category, colour=mean(soc_mean)), size=1.0)+
geom_line(data=social_learning_model_validation_1022_riskID11_summary_reallyHighSigma, mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid, group=soc_mean_category, colour=mean(soc_mean)), linetype = 'dashed', size=1.0)+
scale_colour_viridis_c(expression('Copying weight \U03C3'[i]), begin = 0.2, end = 0.9, option='plasma', direction=-1)+
myTheme_Arial()+
xlim(c(0,6.5))+
labs(
x = expression(atop('Susceptibility to the hot stove effect', paste(alpha[i], '(', beta[i], '+1)'))),
y = 'Mean proportion of choosing\nthe optimal risky option',
title = 'The 1-risky-3-safe task \n(N = 148)') +
theme(legend.position = NaN)+
theme(legend.title = element_text(size=12))+
theme(legend.text = element_text(size=11))+
NULL -> fig6_b
ggplot() +
geom_segment(aes(x=0,xend=6,y=0.25,yend=0.25),colour="grey30", size=0.5) +
geom_ribbon(data=social_learning_model_validation_1022_riskID12_summary%>%dplyr::filter(condition_dummy==0), mapping=aes(hot_stove_susceptibility_rounded, ymin=proportionRiskyChoice_b2_lower, ymax=proportionRiskyChoice_b2_upper), fill='grey20', alpha=1/2)+
geom_ribbon(data=social_learning_model_validation_1022_riskID12_summary%>%dplyr::filter(condition_dummy==1&soc_mean_category=='mild'), mapping=aes(hot_stove_susceptibility_rounded, ymin=proportionRiskyChoice_b2_lower, ymax=proportionRiskyChoice_b2_upper), fill='orange', alpha=1/2)+
geom_point(data = fit_AL_indiv_riskID12_parameters, mapping=aes(hot_stove_susceptibility_trancated, risky_choice_mean), colour='grey20', shape = 17)+ # shape=5: diamond
geom_point(data = fit_SL00_riskID12_parameters, mapping=aes(hot_stove_susceptibility_trancated,risky_choice_mean, colour=soc_mean_SL00_multiVar_LKJ), shape = 20) +
geom_line(data=social_learning_model_validation_1022_riskID12_summary%>%dplyr::filter(condition_dummy==0), mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid), size=1.0)+
geom_line(data=social_learning_model_validation_1022_riskID12_summary%>%dplyr::filter(condition_dummy==1), mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid, group=soc_mean_category, colour=mean(soc_mean)), size=1.0)+
geom_line(data=social_learning_model_validation_1022_riskID12_summary_reallyHighSigma, mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid, group=soc_mean_category, colour=mean(soc_mean)), linetype = "dashed", size=1.0)+
scale_colour_viridis_c(expression('Copying weight \U03C3'[i]), begin = 0.2, end = 0.9, option='plasma', direction=-1)+
myTheme_Arial()+
xlim(c(0,6.5))+
labs(
x = expression(atop('Susceptibility to the hot stove effect', paste(alpha[i], '(', beta[i], '+1)'))),
y = 'Mean proportion of choosing\nthe optimal risky option',
title = 'The 2-risky-2-safe task \n(N = 151)') +
theme(legend.position = c(0.75, 0.7))+
theme(legend.title = element_text(size=12))+
theme(legend.text = element_text(size=11))+
NULL -> fig6_c
(
figure_exp_model_pred <- ggarrange(fig6_a, fig6_b, fig6_c
# , common.legend = TRUE
# , legend = 'right'
, labels = c('','',''), ncol = 3, align = 'v'
)
)
##############################################3
## Supplementary figure
## Each group's behaviour
##############################################
library(ggpubr)
# two-ab task
allBehaviour0820$group_category <- allBehaviour0820$room
allBehaviour0820$group_category[which(allBehaviour0820$indivOrGroup==0)] <- 'Individual'
allBehaviour0820$total_n <- allBehaviour0820$socialFreq_safe + allBehaviour0820$socialFreq_risky
allBehaviour0820$total_n[which(is.na(allBehaviour0820$total_n))] <- 0
allBehaviour0820 %>%
group_by(round, group_category) %>%
summarise(mean_risky_choice_prob = mean(choice_num, na.rm=TRUE)
, total_n = table(total_n) %>% which.max() %>% names() %>% as.numeric()
) -> allBehaviour0820_each_group
allBehaviour0820_each_group$riskDistributionId_factor <- 'twoArmed'
allBehaviour0820_each_group$mean_safe_choice_prob <- 1 - allBehaviour0820_each_group$mean_risky_choice_prob
allBehaviour0820_each_group %>%
group_by(group_category, riskDistributionId_factor) %>%
summarise(groupSize = table(total_n) %>% which.max() %>% names() %>% as.numeric()) -> allBehaviour0820_group_size
allBehaviour0820_group_size$groupSize[which(allBehaviour0820_group_size$group_category=='Individual')] <- 1
# 4-ab task
allBehaviour1022_indiv$group_category <- 'Individual'
allBehaviour1022_group$group_category <- allBehaviour1022_group$room
allBehaviour1022_indiv$total_n <- allBehaviour1022_indiv$socialFreq_safe1 +
allBehaviour1022_indiv$socialFreq_safe2 +
allBehaviour1022_indiv$socialFreq_safe3 +
allBehaviour1022_indiv$socialFreq_risky
allBehaviour1022_group$total_n <- allBehaviour1022_group$socialFreq_safe1 +
allBehaviour1022_group$socialFreq_safe2 +
allBehaviour1022_group$socialFreq_safe3 +
allBehaviour1022_group$socialFreq_risky
allBehaviour1022_indiv %>%
group_by(round, group_category, riskDistributionId_factor) %>%
summarise(mean_risky_choice_prob = mean(best_risky_choice, na.rm=TRUE)
, mean_safe_choice_prob = mean(best_safe_choice, na.rm=TRUE)
, total_n = table(total_n) %>% which.max() %>% names() %>% as.numeric()
) -> allBehaviour1022_indiv_each_group
allBehaviour1022_indiv_each_group %>%
group_by(group_category, riskDistributionId_factor) %>%
summarise(groupSize = table(total_n) %>% which.max() %>% names() %>% as.numeric()) -> allBehaviour1022_indiv_size
allBehaviour1022_indiv_size$groupSize <- 1
allBehaviour1022_group %>%
group_by(round, group_category, riskDistributionId_factor) %>%
summarise(mean_risky_choice_prob = mean(best_risky_choice, na.rm=TRUE)
, mean_safe_choice_prob = mean(best_safe_choice, na.rm=TRUE)
, total_n = table(total_n) %>% which.max() %>% names() %>% as.numeric()
) -> allBehaviour1022_group_each_group
allBehaviour1022_group_each_group %>%
group_by(group_category, riskDistributionId_factor) %>%
summarise(groupSize = table(total_n) %>% which.max() %>% names() %>% as.numeric()) -> allBehaviour1022_group_size
allBehaviour_all_each_group <- allBehaviour0820_each_group %>%
rbind(allBehaviour1022_indiv_each_group) %>%
rbind(allBehaviour1022_group_each_group)
all_group_size <- allBehaviour0820_group_size %>%
rbind(allBehaviour1022_group_size) %>%
rbind(allBehaviour1022_indiv_size)
allBehaviour_all_each_group <- allBehaviour_all_each_group %>% left_join(all_group_size, key=group_category)
allBehaviour_all_each_group$groupSize_with_n <- paste('n =', allBehaviour_all_each_group$groupSize)
allBehaviour_all_each_group$task_name <- allBehaviour_all_each_group$riskDistributionId_factor
allBehaviour_all_each_group$task_name[which(allBehaviour_all_each_group$riskDistributionId_factor=='twoArmed')] <- '1-risky-1-safe'
allBehaviour_all_each_group$task_name[which(allBehaviour_all_each_group$riskDistributionId_factor=='Con: 0')] <- '1-risky-3-safe'
allBehaviour_all_each_group$task_name[which(allBehaviour_all_each_group$riskDistributionId_factor=='Con: 2')] <- '2-risky-2-safe'
horizontal_lines <- data.frame(task_name = c('1-risky-1-safe', '1-risky-3-safe', '2-risky-2-safe')
, yintercept = c(0.5, 0.25, 0.25))
allBehaviour_all_each_group %>%
ggplot(aes(round, mean_risky_choice_prob)) +
geom_line(aes(group=group_category), alpha = 1/2, colour='grey20') +
stat_summary(fun = mean, geom="line", colour='red')+
stat_summary(aes(round, mean_safe_choice_prob), fun = mean, geom="line", colour='blue')+
# geom_hline(yintercept=0.5, linetype='dashed') +
geom_segment(data=horizontal_lines, aes(x=1,xend=70,y=yintercept,yend=yintercept), linetype="dashed", size=0.5) +
facet_grid(groupSize_with_n ~ task_name) +
myTheme_Arial() +
scale_y_continuous(breaks=c(0,0.5,1))+
labs(x = 'Trial', y = 'Proportion of choosing\nthe best risky option')+
NULL -> each_group_behaviour
allBehaviour_all_each_group %>%
group_by(group_category, groupSize, task_name) %>%
summarise(n = n()) %>%
ggplot() +
geom_bar(aes(groupSize), stat = "count")+
facet_grid(. ~ task_name)+
labs(x = 'Group size', y = 'Count')+
myTheme_Arial() +
scale_y_continuous(breaks=c(0,4,8,12))+
xlim(c(2,8.5))+
theme(panel.grid.major = element_line(size = 0.5
, linetype = 'solid', colour='grey40'))+
NULL -> group_size_distribution
(
exp_group_behav_plot <- ggarrange(each_group_behaviour, group_size_distribution
, heights = c(2, 0.5)
# , common.legend = TRUE
# , legend = 'right'
, labels = c('','',''), ncol = 1, align = 'v'
)
)
ggsave(file = "exp_group_behav_plot.pdf"
, plot = exp_group_behav_plot
, dpi = 600, width = 9, height = 10
, device = cairo_pdf
)
########################################3
## relationship between group size vs. parameters?
##################################################
allBehaviour0820_social_summarised_t35_groupSize <- allBehaviour0820_social_summarised_t35 %>% left_join(all_group_size, by = c("groupID" = "group_category"))
allBehaviour0820_social_summarised_t35_groupSize <- allBehaviour0820_social_summarised_t35_groupSize %>% left_join(fit_parameters_group_SL00_mcmc, by='sub')
allBehaviour1022_group_riskID11_summarised_t35_groupSize <- allBehaviour1022_group_riskID11_summarised_t35 %>% left_join(all_group_size, by = c("groupID" = "group_category"))
allBehaviour1022_group_riskID11_summarised_t35_groupSize <- allBehaviour1022_group_riskID11_summarised_t35_groupSize %>% left_join(fit_SL00_riskID11_parameters, by='sub')
allBehaviour1022_group_riskID12_summarised_t35_groupSize <- allBehaviour1022_group_riskID12_summarised_t35 %>% left_join(all_group_size, by = c("groupID" = "group_category"))
allBehaviour1022_group_riskID12_summarised_t35_groupSize <- allBehaviour1022_group_riskID12_summarised_t35_groupSize %>% left_join(fit_SL00_riskID12_parameters, by='sub')
group_size_vs_parameters <- data.frame(
amazonID = c(allBehaviour0820_social_summarised_t35_groupSize$amazonID.x, allBehaviour1022_group_riskID11_summarised_t35_groupSize$amazonID.x, allBehaviour1022_group_riskID12_summarised_t35_groupSize$amazonID.x)
, groupSize = c(allBehaviour0820_social_summarised_t35_groupSize$groupSize, allBehaviour1022_group_riskID11_summarised_t35_groupSize$groupSize, allBehaviour1022_group_riskID12_summarised_t35_groupSize$groupSize)
, groupID = c(allBehaviour0820_social_summarised_t35_groupSize$groupID.x, allBehaviour1022_group_riskID11_summarised_t35_groupSize$groupID.x, allBehaviour1022_group_riskID12_summarised_t35_groupSize$groupID.x)
, riskDistributionId_factor = c(allBehaviour0820_social_summarised_t35_groupSize$riskDistributionId_factor, allBehaviour1022_group_riskID11_summarised_t35_groupSize$riskDistributionId_factor, allBehaviour1022_group_riskID12_summarised_t35_groupSize$riskDistributionId_factor)
, alpha_mean_SL00_multiVar_LKJ = c(allBehaviour0820_social_summarised_t35_groupSize$alpha_mean_SL00_multiVar_LKJ, allBehaviour1022_group_riskID11_summarised_t35_groupSize$alpha_mean_SL00_multiVar_LKJ, allBehaviour1022_group_riskID12_summarised_t35_groupSize$alpha_mean_SL00_multiVar_LKJ)
, beta_mean_SL00_multiVar_LKJ = c(allBehaviour0820_social_summarised_t35_groupSize$beta_mean_SL00_multiVar_LKJ, allBehaviour1022_group_riskID11_summarised_t35_groupSize$beta_mean_SL00_multiVar_LKJ, allBehaviour1022_group_riskID12_summarised_t35_groupSize$beta_mean_SL00_multiVar_LKJ)
, soc_mean_SL00_multiVar_LKJ = c(allBehaviour0820_social_summarised_t35_groupSize$soc_mean_SL00_multiVar_LKJ, allBehaviour1022_group_riskID11_summarised_t35_groupSize$soc_mean_SL00_multiVar_LKJ, allBehaviour1022_group_riskID12_summarised_t35_groupSize$soc_mean_SL00_multiVar_LKJ)
, theta_mean_SL00_multiVar_LKJ = c(allBehaviour0820_social_summarised_t35_groupSize$theta_mean_SL00_multiVar_LKJ, allBehaviour1022_group_riskID11_summarised_t35_groupSize$theta_mean_SL00_multiVar_LKJ, allBehaviour1022_group_riskID12_summarised_t35_groupSize$theta_mean_SL00_multiVar_LKJ)
)
group_size_vs_parameters$task_name <- '1-risky-1-safe'
group_size_vs_parameters$task_name[which(group_size_vs_parameters$riskDistributionId_factor=='Con: 0')] <- '1-risky-3-safe'
group_size_vs_parameters$task_name[which(group_size_vs_parameters$riskDistributionId_factor=='Con: 2')] <- '2-risky-2-safe'
group_size_vs_parameters$pos_vs_neg <- 'Positive frequency dependence'
group_size_vs_parameters$pos_vs_neg[which(group_size_vs_parameters$theta_mean_SL00_multiVar_LKJ < 0)] <- 'Negative frequency dependence'
group_size_vs_parameters$pos_vs_neg <- factor(group_size_vs_parameters$pos_vs_neg, levels=c('Positive frequency dependence', 'Negative frequency dependence'))
## just correlation
cor.test(filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='1-risky-1-safe')$groupSize, filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='1-risky-1-safe')$soc_mean_SL00_multiVar_LKJ)
cor.test(filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='1-risky-1-safe')$groupSize, filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='1-risky-1-safe')$theta_mean_SL00_multiVar_LKJ)
cor.test(filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='1-risky-3-safe')$groupSize, filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='1-risky-3-safe')$soc_mean_SL00_multiVar_LKJ)
cor.test(filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='1-risky-3-safe')$groupSize, filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='1-risky-3-safe')$theta_mean_SL00_multiVar_LKJ)
cor.test(filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='2-risky-2-safe')$groupSize, filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='2-risky-2-safe')$soc_mean_SL00_multiVar_LKJ)
cor.test(filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='2-risky-2-safe')$groupSize, filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='2-risky-2-safe')$theta_mean_SL00_multiVar_LKJ)
group_size_vs_parameters %>%
ggplot(aes(groupSize, soc_mean_SL00_multiVar_LKJ, colour=pos_vs_neg, shape=pos_vs_neg)) +
geom_point(aes(groupSize, soc_mean_SL00_multiVar_LKJ, colour=pos_vs_neg, shape=pos_vs_neg))+
geom_smooth(geom='line',se=FALSE, method='lm')+
scale_colour_manual(name='', values=c('red','blue'))+
scale_shape_manual(name='', values=c(2, 18))+
myTheme_Arial()+
facet_grid(. ~ task_name)+
labs(x = 'Group Size', y = 'Fit social learning weight \U03C3')+
border()+
NULL
group_size_vs_parameters %>%
ggplot(aes(groupSize, theta_mean_SL00_multiVar_LKJ, colour=pos_vs_neg, shape=pos_vs_neg)) +
geom_point()+
geom_smooth(geom='line',se=FALSE, method='lm')+
scale_colour_manual(name='', values=c('red','blue'))+
scale_shape_manual(name='', values=c(2, 18))+
myTheme_Arial()+
facet_grid(. ~ task_name)+
labs(x = 'Group Size', y = 'Fit conformity exponent \U03B8')+
border()+
NULL
| /drawing_figures_rev2.R | no_license | WataruToyokawa/ToyokawaGaissmaier2021 | R | false | false | 75,513 | r | ###############################################################################################
##
## Figures of the collective rescue paper (ToyokawaGaissmaier2021)
## Wataru Toyokawa
## 07 November. 2020
##
###############################################################################################
library(tidyverse)
library(extrafont)
library(cowplot)
library(metR)
library(magick)
## Load Functions
# setwd("~/analysis_repo") #<------- Set this folder as a working directory
# setwd("~/Dropbox/wataru/papers/RiskySocialLearning/draft/analysis_repo") #<------- Set this folder as a working directory
source("agentBasedSim/functions.R")
## ========================================================
#
# figure 1: the collective rescue in the stable environment
#
# =========================================================
# schematic figure
stable_2AB_scheme <- cowplot::ggdraw() + cowplot::draw_image("agentBasedSim/stable_2AB_scheme.png", scale = 1)
# figures of the simulation
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_data <- read_csv("agentBasedSim/socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_data.csv")
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary <- socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_data %>%
group_by(groupSize, learningRate, invTemperature, copyRate, conformityExp) %>%
summarise(
mean_proportionSafeChoice = mean(proportionSafeChoice, na.rm = TRUE),
median_proportionSafeChoice = median(proportionSafeChoice, na.rm = TRUE),
sd_proportionSafeChoice = sd(proportionSafeChoice, na.rm = TRUE)
)
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$copyRate_factor = paste(rep('Copying weight\n\U03C3 = ', nrow(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary)), socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$copyRate, sep ='')
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$conformityExp_factor = paste(rep('Conformity exponent\n\U03b8 = ', nrow(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary)), socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$conformityExp, sep ='')
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$invTemperature_factor = paste(rep('Inv. temperature\n\U03b2 = ', nrow(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary)), socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$invTemperature, sep ='')
#socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$conformityExp_factor = factor(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$conformityExp_factor, levels = c('θ = 1','θ = 2','θ = 4','θ = 8'))
## Adding Denrell (2007)'s analytical solution
Denrell2007 = function (alpha, beta, mu, sd) {
1 / ( 1 + exp( (alpha*(beta^2)*(sd^2))/(2*(2-alpha)) - beta*mu ) )
}
## when mu = surePayoff + 0.5 and sd = 1, the above function can be reduced as follows:
Denrell2007Solution = function (alpha) {
(2 - alpha)/alpha
}
Denrell2007RiskyChoice = c()
alphaArray = c()
betaArray = c()
for (alpha in seq(0,1,0.1)) {
for (beta in seq(0,10,1)) {
alphaArray <- append(alphaArray, alpha)
betaArray <- append(betaArray, beta)
Denrell2007RiskyChoice <- append(Denrell2007RiskyChoice, Denrell2007(alpha, beta, mu=0.5, sd=1))
}
}
Denrell2007Simulation = data.frame(alpha = alphaArray, beta = betaArray, riskyChoiceProb = Denrell2007RiskyChoice)
# (Denrell2007Simulation %>%
# ggplot(aes(alpha, beta))+
# geom_raster(aes(fill = riskyChoiceProb), stat = 'identity')+
# stat_function(fun=Denrell2007Solution, color='black', linetype='dashed', size=2/3)+
# scale_fill_gradient2(midpoint = 0.5, low = "blue", mid = "grey90", high = "red")+
# labs(x=expression(paste('Learning rate ',alpha,sep="")), y=expression(paste('Inverse temperature ',beta,sep="")), title='', fill="Proportion of \nchoosing \nthe risky option")+
# myTheme_Arial()+
# theme(axis.text.x = element_text(angle = 90))+
# theme(strip.text.y = element_text(angle = 0))+
# theme(legend.text = element_text(angle = 0))+
# #theme(legend.position = 'top')+
# ylim(c(0,10))+
# NULL -> Denrell2007_figure_analytical
# )
(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>%
dplyr::filter(copyRate ==0) %>%
ggplot() +
geom_raster(mapping = aes(learningRate, invTemperature, fill = 1-mean_proportionSafeChoice), stat = 'identity') +
labs(x=expression(paste('Learning rate ',alpha,sep="")),
y=expression(paste('Inverse temperature ',beta,sep="")),
#title='Gaussian noise\n mu=0.5; sigma=1',
fill = "Proportion of \nchoosing \nthe risky option"
#title='Gaussian noise\n mu=0.5; sigma=1', fill = "Proportion of\nsafe choice"
)+
#scale_fill_viridis(limits = c(0.45, 1), option="magma")+
stat_function(fun=Denrell2007Solution, color='black', linetype='dashed', size=2/3)+
scale_fill_gradient2(midpoint = 0.5, high = "red", mid = "grey90", low = "blue")+
ylim(c(0,10))+
# myTheme_gillsansMT()+
myTheme_Arial()+
theme(axis.text.x = element_text(angle = 90))+
theme(strip.text.y = element_text(angle = 0))+
theme(legend.text = element_text(angle = 0))+
# theme(legend.position = 'top')+
#facet_grid(copyRate_factor ~ conformityExp_factor)+
NULL -> Denrell2007_figure
)
# plot with Denrell (2007) curve
(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>%
dplyr::filter(copyRate %in% c(0.25, 0.5)) %>%
dplyr::filter(conformityExp %in% c(1, 4)) %>%
ggplot() +
geom_raster(mapping = aes(learningRate, invTemperature, fill = 1-mean_proportionSafeChoice), stat = 'identity') +
labs(x=expression(paste('Learning rate ',alpha,sep="")),
y=expression(paste('Inverse temperature ',beta,sep="")),
#title='Gaussian noise\n mu=0.5; sigma=1',
fill = "Proportion of \nchoosing \nthe risky option"
#title='Gaussian noise\n mu=0.5; sigma=1', fill = "Proportion of\nsafe choice"
)+
#scale_fill_viridis(limits = c(0.45, 1), option="magma")+
stat_function(fun=Denrell2007Solution, color='black', linetype='dashed', size=2/3)+
#geom_contour(mapping = aes(learningRate, invTemperature, z = mean_proportionSafeChoice), breaks = c(0.5), colour = 'black')+
scale_fill_gradient2(midpoint = 0.5, high = "red", mid = "grey90", low = "blue", breaks=c(0.1,0.5,0.9), labels=c(0.1,0.5,0.9) )+
ylim(c(0,10))+
#myTheme_gillsansMT()+
myTheme_Arial()+
theme(axis.text.x = element_text(angle = 90))+
theme(strip.text = element_text(size=12))+
theme(legend.text = element_text(angle = 0))+
theme(legend.position = 'top')+
# theme(legend.position = NaN)+
facet_grid(copyRate_factor ~ conformityExp_factor)+
NULL -> Denrell2007_figure_social_learning
)
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>%
dplyr::filter(copyRate !=0) %>%
# dplyr::filter(conformityExp %in% c(1, 4)) %>%
ggplot() +
geom_raster(mapping = aes(learningRate, invTemperature, fill = 1-mean_proportionSafeChoice), stat = 'identity') +
labs(x=expression(paste('Learning rate ',alpha,sep="")),
y=expression(paste('Inverse temperature ',beta,sep="")),
#title='Gaussian noise\n mu=0.5; sigma=1',
fill = "Proportion of \nchoosing \nthe risky option"
#title='Gaussian noise\n mu=0.5; sigma=1', fill = "Proportion of\nsafe choice"
)+
#scale_fill_viridis(limits = c(0.45, 1), option="magma")+
stat_function(fun=Denrell2007Solution, color='black', linetype='dashed', size=2/3)+
#geom_contour(mapping = aes(learningRate, invTemperature, z = mean_proportionSafeChoice), breaks = c(0.5), colour = 'black')+
scale_fill_gradient2(midpoint = 0.5, high = "red", mid = "grey90", low = "blue")+
scale_y_continuous(limits = c(0, 10), breaks = c(1,5,10))+
#myTheme_gillsansMT()+
myTheme_Arial()+
theme(axis.text.x = element_text(angle = 90))+
theme(strip.text.y = element_text(angle = 0))+
theme(legend.text = element_text(angle = 90))+
theme(legend.position = 'top')+
# theme(legend.position = NaN)+
facet_grid(copyRate_factor ~ conformityExp_factor)+
NULL -> Denrell2007_figure_social_learning_full
## difference between social learners' performance and sole reinforcement learners
sigmaList = c(0, 0.25, 0.5, 0.75, 0.9)
thetaList = c(1, 2, 4, 8)
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary_baseline <- socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>% dplyr::filter(copyRate==0)
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary <- socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>% arrange(copyRate, conformityExp) # re-ordering the data frame
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$mean_proportionSafeChoice_diff_from_baseline <- socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$mean_proportionSafeChoice - rep(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary_baseline$mean_proportionSafeChoice, (length(sigmaList)-1) * length(thetaList) + 1)
# plot with Denrell (2007) curve
(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>%
dplyr::filter(copyRate %in% c(0.25, 0.5)) %>%
dplyr::filter(conformityExp %in% c(1, 4)) %>%
ggplot() +
geom_raster(mapping = aes(learningRate, invTemperature, fill = -mean_proportionSafeChoice_diff_from_baseline), stat = 'identity') +
labs(x=expression(paste('Learning rate ',alpha,sep="")),
y=expression(paste('Inverse temperature ',beta,sep="")),
#title='Gaussian noise\n mu=0.5; sigma=1',
fill = "Changes in \nthe risky choice by \nsocial learning"
#title='Gaussian noise\n mu=0.5; sigma=1', fill = "Proportion of\nsafe choice"
)+
#scale_fill_viridis(limits = c(0.45, 1), option="magma")+
stat_function(fun=Denrell2007Solution, color='black', linetype='dashed', size=2/3)+
#geom_contour(mapping = aes(learningRate, invTemperature, z = mean_proportionSafeChoice), breaks = c(0.5), colour = 'black')+
scale_fill_gradient2(midpoint = 0, high = "darkorange", mid = "grey90", low = "darkorchid3", breaks=c(-0.2,0,0.4),labels=c(-0.2,0,0.4))+
ylim(c(0,10))+
#myTheme_gillsansMT()+
myTheme_Arial()+
theme(axis.text.x = element_text(angle = 90))+
theme(strip.text = element_text(size=12))+
theme(legend.text = element_text(angle = 0))+
theme(legend.position = 'top')+
facet_grid(copyRate_factor ~ conformityExp_factor)+
NULL -> Denrell2007_figure_diff_from_baseline
)
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>%
dplyr::filter(copyRate != 0) %>%
#dplyr::filter(conformityExp %in% c(1, 4)) %>%
ggplot() +
geom_raster(mapping = aes(learningRate, invTemperature, fill = -mean_proportionSafeChoice_diff_from_baseline), stat = 'identity') +
labs(x=expression(paste('Learning rate ',alpha,sep="")),
y=expression(paste('Inverse temperature ',beta,sep="")),
#title='Gaussian noise\n mu=0.5; sigma=1',
fill = "Increases in \nthe risky choice by \nsocial learning"
#title='Gaussian noise\n mu=0.5; sigma=1', fill = "Proportion of\nsafe choice"
)+
#scale_fill_viridis(limits = c(0.45, 1), option="magma")+
stat_function(fun=Denrell2007Solution, color='black', linetype='dashed', size=2/3)+
#geom_contour(mapping = aes(learningRate, invTemperature, z = mean_proportionSafeChoice), breaks = c(0.5), colour = 'black')+
scale_fill_gradient2(midpoint = 0, high = "darkorange", mid = "grey90", low = "darkorchid3")+
scale_y_continuous(limits = c(0, 10), breaks = c(1,5,10))+
#myTheme_gillsansMT()+
myTheme_Arial()+
theme(axis.text.x = element_text(angle = 90))+
# theme(axis.text.y = element_text(size=9))+
theme(strip.text.y = element_text(angle = 0))+
theme(legend.text = element_text(angle = 90))+
theme(legend.position = 'top')+
facet_grid(copyRate_factor ~ conformityExp_factor)+
NULL -> Denrell2007_figure_diff_from_baseline_full
## Figure 1
figure1_left <- plot_grid(stable_2AB_scheme, Denrell2007_figure, labels = c('a','b'), ncol = 1, align = 'v', label_size = 15)
figure1 <- plot_grid(figure1_left, Denrell2007_figure_social_learning, Denrell2007_figure_diff_from_baseline, labels = c('','c','d'), ncol = 3, align = 'v', label_size = 15)
ggsave(file = '~/Dropbox/wataru/papers/RiskySocialLearning/draft/submissions/eLife/Revision2/exp_reanalysis_result/figure1.png', plot = figure1, dpi = 300, width = 15, height = 6)
## ========================================================
#
# figure 2: the collective rescue in the stable environment
#
# =========================================================
# Plot the individual learning from a different angle
# According to Denrell 2007, the asymptotic equilibrium of risky choice rate is
# Pr^* = 1 / (1 + exp(Z)), where Z = -beta/(2*(alpha-2)) * (alpha*(beta+1) - 2)
#
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$hot_stove_suceptibility <- socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$learningRate * (socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary$invTemperature + 1)
Pr_when_beta = function (X, beta) {
Z = -beta/(2*(X/(beta+1))-2) * (X - 2)
return_vector <- 1 / (1 + exp(Z))
return_vector[which(X >= beta+1)] <- NA
return_vector[which(X == 0)] <- 1/2
return(return_vector)
# if(X <= beta) {
# 1 / (1 + exp(Z))
# }else{
# return(0)
# }
}
ggplot(mapping = aes(x=X)) +
mapply(
function(b, co){ stat_function(data=data.frame(X=c(0,8)), fun=Pr_when_beta, args=list(beta=b), aes_q(color=co)) },
seq(2,10,1),
seq(2,10,1)
)+
geom_vline(xintercept=2, linetype='dashed')+
geom_hline(yintercept=0.5, linetype='dashed')+
myTheme_Arial()+
scale_colour_viridis_c(expression(beta))+
labs(
y='Probability of choosing\nthe risky alternative',
x=expression(alpha * (beta + 1))
)+
NULL
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary_added <- socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>% rbind(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>% dplyr::filter(copyRate==0))
added_length <- socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary %>% dplyr::filter(copyRate==0) %>% nrow()
all_length <- socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary_added %>% nrow()
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary_added$conformityExp[(all_length-added_length+1):all_length] <- 4
socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary_added$conformityExp_factor[(all_length-added_length+1):all_length] <- 'Conformity exponent\n\U03b8 = 4'
# Figure 2
(socialLearningParamSearch_riskPrem150_Gaussian_indivDiff_summary_added %>%
dplyr::filter(copyRate %in% c(0, 0.25, 0.5)) %>%
dplyr::filter(conformityExp %in% c(1, 4)) %>%
dplyr::filter(invTemperature %in% c(3,5,7)) %>%
ggplot() +
geom_point(aes(hot_stove_suceptibility, 1-mean_proportionSafeChoice, colour=as.factor(copyRate))) +
stat_function(data=data.frame(X=c(0,8),invTemperature_factor='Inv. temperature\nβ = 3'), fun=Pr_when_beta, n = 1001, args=list(beta=3)) +
stat_function(data=data.frame(X=c(0,8),invTemperature_factor='Inv. temperature\nβ = 5'), fun=Pr_when_beta, n = 1001, args=list(beta=5)) +
stat_function(data=data.frame(X=c(0,8),invTemperature_factor='Inv. temperature\nβ = 7'), fun=Pr_when_beta, n = 1001, args=list(beta=7)) +
geom_vline(xintercept=2, linetype='dashed')+
geom_hline(yintercept=0.5, linetype='dashed')+
facet_grid(invTemperature_factor ~ conformityExp_factor) +
scale_colour_viridis_d(name = 'Copying weight\nσ')+
labs(
y='Probability of choosing\nthe risky alternative',
x = 'Susceptibility to the hot stove effect\nα(β + 1)'
# x=paste0('Susceptibility to the hot stove effect\n', expression(alpha (beta + 1)))
)+
xlim(c(0,8))+
myTheme_Arial()+
NULL -> collective_rescue_simplified_view
)
ggsave(file = '~/Dropbox/wataru/papers/RiskySocialLearning/draft/submissions/eLife/Revision2/exp_reanalysis_result/figure1_alternative.png', plot = collective_rescue_simplified_view, dpi = 300, width = 8, height = 6)
## ========================================================
#
# figure 2: Individual heterogeneity
#
# =========================================================
heterogeneous_groups_denrell2007task_alpha_summary <- read.csv('agentBasedSim/heterogeneous_groups_denrell2007task_alpha_summary.csv')
heterogeneous_groups_denrell2007task_beta_summary <- read.csv('agentBasedSim/heterogeneous_groups_denrell2007task_beta_summary.csv')
heterogeneous_groups_denrell2007task_sigma_summary <- read.csv('agentBasedSim/heterogeneous_groups_denrell2007task_sigma_summary.csv')
heterogeneous_groups_denrell2007task_theta_summary <- read.csv('agentBasedSim/heterogeneous_groups_denrell2007task_theta_summary.csv')
# alpha
heterogeneous_groups_denrell2007task_alpha_summary_global <-
heterogeneous_groups_denrell2007task_alpha_summary %>%
group_by(indivOrGroup, variation_level) %>%
summarise(
mean_hot_stove_susceptibility = mean(hot_stove_susceptibility, na.rm=TRUE),
mean_safeChoiceProb = mean(mean_proportionSafeChoice_direct, na.rm=TRUE)
# mean_safeChoiceProb = mean(median_proportionSafeChoice_raw, na.rm=TRUE) %>% convert_alphaRaw_to_alpha()
)
Pr_when_beta = function (X, beta) {
Z = -beta/(2*(X/(beta+1))-2) * (X - 2)
return_vector <- 1 / (1 + exp(Z))
return_vector[which(X >= beta+1)] <- NA
return(return_vector)
}
(heterogeneous_groups_denrell2007task_alpha_summary %>%
dplyr::filter(indivOrGroup=='Group') %>%
ggplot()+
geom_vline(xintercept=2, linetype='dashed', colour='grey60')+
geom_hline(yintercept=0.5, linetype='dashed', colour='grey60')+
stat_function(data=data.frame(X=c(2,9), invTemperature_factor='β = 7', indivOrGroup='Individual'), fun=Pr_when_beta, args=list(beta=7)) +
# group condition
geom_point(aes(hot_stove_susceptibility, 1- mean_proportionSafeChoice, colour=as.factor(variation_level)),alpha=3/3)+
geom_line(aes(hot_stove_susceptibility, 1-mean_proportionSafeChoice, group=variation_level, colour=as.factor(variation_level)), alpha=3/3)+
geom_point(data=heterogeneous_groups_denrell2007task_alpha_summary_global%>%filter(variation_level!=0), aes(mean_hot_stove_susceptibility, 1-mean_safeChoiceProb, colour=as.factor(variation_level)), size=4, shape=18, alpha = 2/3)+
# individual condition
geom_point(data = heterogeneous_groups_denrell2007task_alpha_summary%>%filter(variation_level==0), aes(hot_stove_susceptibility, 1- mean_proportionSafeChoice), colour='grey30',alpha=3/3)+
geom_line(data = heterogeneous_groups_denrell2007task_alpha_summary%>%filter(variation_level==0), aes(hot_stove_susceptibility, 1-mean_proportionSafeChoice, group=variation_level), colour='grey30', alpha=3/3)+
# geom_point(data=heterogeneous_groups_denrell2007task_alpha_summary_global%>%filter(variation_level==0), aes(mean_hot_stove_susceptibility, 1-mean_safeChoiceProb), colour='grey30', size=4, shape=18, alpha = 2/3)+
labs(x=expression(paste("Hot stove susceptibility ",alpha[i],"", (beta[i]+1) ,sep="")),
y="Proportion of choosing\nthe risky option",
colour = "Individual\nheterogeneity",
title=expression(paste("Heterogeneous ", alpha[i], sep=""))
)+
ylim(c(0,1))+
xlim(c(0,8))+
myTheme_Arial()+
scale_colour_viridis_d(direction=-1)+
theme(legend.position = 'NaN')+
theme(plot.title = element_text(vjust = - 10, hjust = 0.7))+
NULL -> heterogeneous_groups_denrell2007task_alphaEffect_plot)
# beta
heterogeneous_groups_denrell2007task_beta_summary_global <-
heterogeneous_groups_denrell2007task_beta_summary %>%
group_by(indivOrGroup, variation_level) %>%
summarise(
mean_hot_stove_susceptibility = mean(hot_stove_susceptibility, na.rm=TRUE),
mean_safeChoiceProb = mean(median_proportionSafeChoice_raw, na.rm=TRUE) %>% convert_alphaRaw_to_alpha())
Pr_when_alpha = function (X, alpha) {
Z = (alpha*(X/alpha - 1)^2)/(2*(2-alpha)) - (X/alpha - 1)/2
# Z = -beta/(2*(X/(beta+1))-2) * (X - 2)
return_vector <- 1 / (1 + exp(Z))
return_vector[which(X < alpha)] <- NA
return(return_vector)
}
(heterogeneous_groups_denrell2007task_beta_summary %>%
dplyr::filter(indivOrGroup=='Group') %>%
ggplot()+
geom_vline(xintercept=2, linetype='dashed', colour='grey60')+
geom_hline(yintercept=0.5, linetype='dashed', colour='grey60')+
stat_function(data=data.frame(X=c(1,8), indivOrGroup='Individual'), fun=Pr_when_alpha, args=list(alpha=0.5), n=1001) +
geom_point(aes(hot_stove_susceptibility, 1- mean_proportionSafeChoice, colour=as.factor(variation_level)),alpha=3/3)+
geom_line(aes(hot_stove_susceptibility, 1-convert_alphaRaw_to_alpha(median_proportionSafeChoice_raw), group=variation_level, colour=as.factor(variation_level)), alpha=3/3)+
geom_point(data=heterogeneous_groups_denrell2007task_beta_summary_global%>%dplyr::filter(indivOrGroup=="Group"), aes(mean_hot_stove_susceptibility, 1-mean_safeChoiceProb, colour=as.factor(variation_level)), size=4, shape=18, alpha = 2/3)+
# individual condition
geom_point(data = heterogeneous_groups_denrell2007task_beta_summary%>%filter(variation_level==0), aes(hot_stove_susceptibility, 1- mean_proportionSafeChoice), colour='grey30',alpha=3/3)+
geom_line(data = heterogeneous_groups_denrell2007task_beta_summary%>%filter(variation_level==0), aes(hot_stove_susceptibility, 1-mean_proportionSafeChoice, group=variation_level), colour='grey30', alpha=3/3)+
# geom_point(data=heterogeneous_groups_denrell2007task_beta_summary_global%>%filter(variation_level==0), aes(mean_hot_stove_susceptibility, 1-mean_safeChoiceProb), colour='grey30', size=4, shape=18, alpha = 2/3)+
labs(x=expression(paste("Hot stove susceptibility ",alpha[i],"", (beta[i]+1) ,sep="")),
y="Proportion of choosing\nthe risky option",
colour = "Individual\nheterogeneity",
title=expression(paste("Heterogeneous ", beta[i], sep=""))
)+
ylim(c(0,1))+
xlim(c(0,8))+
myTheme_Arial()+
scale_colour_viridis_d(direction=-1)+
theme(legend.position = 'NaN')+
theme(plot.title = element_text(vjust = - 10, hjust = 0.7))+
NULL -> heterogeneous_groups_denrell2007task_betaEffect_plot)
# sigma
heterogeneous_groups_denrell2007task_sigma_summary_global <-
heterogeneous_groups_denrell2007task_sigma_summary %>%
group_by(indivOrGroup, variation_level) %>%
summarise(
mean_sigma = mean(sigma, na.rm=TRUE),
mean_safeChoiceProb = mean(median_proportionSafeChoice_raw, na.rm=TRUE) %>% convert_alphaRaw_to_alpha())
Pr_when_beta = function (X, beta) {
Z = -beta/(2*(X/(beta+1))-2) * (X - 2)
return_vector <- 1 / (1 + exp(Z))
return_vector[which(X > beta)] <- NA
return(return_vector)
}
(heterogeneous_groups_denrell2007task_sigma_summary %>%
dplyr::filter(indivOrGroup=="Group") %>%
ggplot()+
geom_hline(yintercept=0.5, linetype='dashed', colour='grey60')+
#stat_function(data=data.frame(X=c(2,9), invTemperature_factor='β = 7', indivOrGroup='Individual'), fun=Pr_when_beta, args=list(beta=7)) +
geom_point(aes(sigma, 1- mean_proportionSafeChoice, colour=as.factor(variation_level)),alpha=3/3)+
geom_line(aes(sigma, 1- mean_proportionSafeChoice, group=variation_level, colour=as.factor(variation_level)), alpha=3/3)+
geom_point(data=heterogeneous_groups_denrell2007task_sigma_summary_global%>%dplyr::filter(indivOrGroup=='Group'), aes(mean_sigma, 1-mean_safeChoiceProb, colour=as.factor(variation_level)), size=4, shape=18, alpha = 2/3)+
geom_point(data=heterogeneous_groups_denrell2007task_sigma_summary_global%>%dplyr::filter(indivOrGroup=='Individual'), aes(mean_sigma, 1-mean_safeChoiceProb), colour='black', size=4, shape=18, alpha = 2/3)+
labs(x=expression(paste("Social learning weight ", sigma[i], sep="")),
y="Proportion of choosing\nthe risky option",
colour = "Individual\nheterogeneity",
title=expression(paste("Heterogeneous ", sigma[i], sep=""))
)+
ylim(c(0,1))+
xlim(c(0,1))+
myTheme_Arial()+
scale_colour_viridis_d(direction=-1)+
theme(legend.position = 'NaN')+
theme(plot.title = element_text(vjust = - 10))+
NULL -> heterogeneous_groups_denrell2007task_sigmaEffect_plot)
# theta
heterogeneous_groups_denrell2007task_theta_summary_global <-
heterogeneous_groups_denrell2007task_theta_summary %>%
group_by(indivOrGroup, variation_level) %>%
summarise(
mean_theta = mean(theta, na.rm=TRUE),
mean_safeChoiceProb = mean(median_proportionSafeChoice_raw, na.rm=TRUE) %>% convert_alphaRaw_to_alpha())
Pr_when_beta = function (X, beta) {
Z = -beta/(2*(X/(beta+1))-2) * (X - 2)
return_vector <- 1 / (1 + exp(Z))
return_vector[which(X > beta)] <- NA
return(return_vector)
}
(heterogeneous_groups_denrell2007task_theta_summary %>%
dplyr::filter(indivOrGroup=="Group") %>%
ggplot()+
geom_hline(yintercept=0.5, linetype='dashed', colour='grey60')+
#stat_function(data=data.frame(X=c(2,9), invTemperature_factor='β = 7', indivOrGroup='Individual'), fun=Pr_when_beta, args=list(beta=7)) +
geom_point(aes(theta, 1- mean_proportionSafeChoice, colour=as.factor(variation_level)),alpha=3/3)+
geom_line(aes(theta, 1- mean_proportionSafeChoice, group=variation_level, colour=as.factor(variation_level)), alpha=3/3)+
geom_point(data=heterogeneous_groups_denrell2007task_theta_summary_global%>%dplyr::filter(indivOrGroup=='Group'), aes(mean_theta, 1-mean_safeChoiceProb, colour=as.factor(variation_level)), size=4, shape=18, alpha = 2/3)+
geom_point(data=heterogeneous_groups_denrell2007task_theta_summary_global%>%dplyr::filter(indivOrGroup=='Individual'), aes(mean_theta, 1-mean_safeChoiceProb), colour='black', size=4, shape=18, alpha = 2/3)+
labs(x=expression(paste("Conformity exponent ", theta[i], sep="")),
y="Proportion of choosing\nthe risky option",
colour = "Individual\nheterogeneity",
title=expression(paste("Heterogeneous ", theta[i], sep=""))
)+
ylim(c(0,1))+
xlim(c(-1,8))+
myTheme_Arial()+
scale_colour_viridis_d(direction=-1)+
theme(legend.position = 'NaN')+
theme(plot.title = element_text(vjust = - 10))+
NULL -> heterogeneous_groups_denrell2007task_thetaEffect_plot)
(
heteroeneity_plot <- plot_grid(
heterogeneous_groups_denrell2007task_alphaEffect_plot,
heterogeneous_groups_denrell2007task_betaEffect_plot,
heterogeneous_groups_denrell2007task_sigmaEffect_plot,
heterogeneous_groups_denrell2007task_thetaEffect_plot,
labels = c('','','',''), ncol = 2, align = 'v')
)
## ========================================================
#
# figure 3: The dynamical model of collective behaviour
#
# =========================================================
## Analytical solution of asocial behavioural dynamics
noSocialCurveSimplest= function (n, e, pH, pL) {
return ( -(n*(pH - pL)*((-1 + e)*pH + e*pL))/((pH + pL)*((-1 + e)*pH - e*pL)) )
}
zeroIsoclineSimplest = function (pH, pL) {
return ( pH/(pH + pL) )
}
# Fig. 3a
# schematic figure
schematic_simplest <- cowplot::ggdraw() + cowplot::draw_image("dynamicsModel/schematic_simplest.001.Times.png", scale = 1)
### Fig. 3b
## function plot
diagonalLine = function(x){return(x)}
pLs <- c(0.1, 0.2, 0.4, 0.6)
(noSocialCurveSimplest_plot <- ggplot(data.frame(X=c(0,1)), aes(x=X)) +
#stat_function(fun = diagonalLine, linetype = 'dashed', color = 'grey')+
geom_vline(xintercept = 0.5, linetype='dashed', color='grey60')+
geom_hline(yintercept = 0, linetype='dashed', color='grey60')+
mapply(
function(explorationrate) {
stat_function(fun = noSocialCurveSimplest, args=list(n = 20, pH=0.7, pL=explorationrate), aes_q(color=explorationrate), size = 1)
},
pLs
) +
annotate(geom="text", x=0.1, y=-13.5, label=expression(paste(italic(p[l]),' = 0.1',sep="")) , size = 5) +
annotate(geom="text", x=0.1, y=-9.5, label=expression(paste(italic(p[l]),' = 0.2',sep="")) , size = 5) +
annotate(geom="text", x=0.1, y=-3.5, label=expression(paste(italic(p[l]),' = 0.4',sep="")) , size = 5) +
annotate(geom="text", x=0.1, y=0.5, label=expression(paste(italic(p[l]),' = 0.6',sep="")) , size = 5) +
annotate(geom="text", x=0.1, y=8, label=expression(paste('(',italic(p[h]),' = 0.7)',sep="")) , size = 5) +
scale_colour_viridis_c(option="cividis", direction=-1)+
labs(color="pL",
# y = expression(paste('Risk-seeking bias: ', N[R]^'*' - N[S]^'*',sep="")),
y = expression(atop('Risk-seeking bias: ', paste(N[R]^'*' - N[S]^'*'))),
x = expression(paste('The population-level "risk premium" ', italic(e)))
# x = expression(paste('The rate of getting enchanted with R: ', italic(e),sep=""))
# x = expression(atop('The rate of', paste('getting enchanted with R: ', italic(e), sep="")))
)+
myTheme_Arial()+
theme(legend.position = 'none')+
NULL)
# Fig. 3c
FigSocialLearningSimplest <- read_csv("dynamicsModel/FigSocialLearningSimplest.csv", col_names = FALSE)
names(FigSocialLearningSimplest) = c('e','c','f','pl','maxS','minS','diffS','maxR','minR','diffR','diffRS')
FigSocialLearningSimplest_sample = dplyr::sample_frac(tbl = FigSocialLearningSimplest, size = 0.00001)
FigSocialLearningSimplest$f_category = paste('Conformity exponent\nθ = ', FigSocialLearningSimplest$f, sep='')
FigSocialLearningSimplest$c_category = paste('Copying weight\nσ = ', FigSocialLearningSimplest$c, sep='')
FigSocialLearningSimplest$pl_category = paste('pl = ', FigSocialLearningSimplest$pl, sep='')
FigSocialLearningSimplest$f_category = factor(FigSocialLearningSimplest$f_category, levels = c('Conformity exponent\nθ = 1','Conformity exponent\nθ = 2','Conformity exponent\nθ = 10'))
(FigSocialLearningSimplest %>%
dplyr::filter(c == 0) %>%
ggplot(aes(x=pl))+
#geom_line(mapping = aes(y = diffRS, group = pl, colour = pl)) +
geom_raster(mapping = aes(y=e, fill = diffRS), stat = 'identity') +
stat_function(fun=zeroIsoclineSimplest, args=list(pH=0.7), color='black', linetype='dashed', size=1)+
labs(
fill = expression(paste(N[R]^'*' - N[S]^'*')),
# fill = 'Differences in\nrisk seeking and\nrisk aversion',
y = expression(paste('The population-level\n"risk premium" e', '')),
x = expression(paste('Probability of exploration ', italic(p[l])))
# y = expression(atop('The rate of getting', paste('enchanted with R: ', italic(e), sep=""))),
# x = expression(paste('The rate of exploration: ', italic(p[l]), sep=""))
)+
scale_fill_gradient2(midpoint = 0, low = "blue", mid = "grey90", high = "red", breaks=c(-10,0,10))+
myTheme_Arial()+
# theme(axis.text.x = element_text(angle = 90), legend.position='top')+
theme(strip.text.y = element_text(angle = 0))+
# facet_grid(c_category ~ f_category)+
#geom_hline(yintercept = 0, linetype = 'dotted')+
NULL -> FigSocialLearningSimplest_individual)
# Fig 3d
(FigSocialLearningSimplest %>%
dplyr::filter((c == 0.25 | c == 0.5) & (f == 1 | f == 2)) %>%
ggplot(aes(x=pl))+
#geom_line(mapping = aes(y = diffRS, group = pl, colour = pl)) +
geom_raster(mapping = aes(y=e, fill = diffRS), stat = 'identity') +
stat_function(fun=zeroIsoclineSimplest, args=list(pH=0.7), color='black', linetype='dashed', size=1)+
labs(
fill = expression(paste(N[R]^'*' - N[S]^'*')),
# fill = 'Differences in\nrisk seeking and\nrisk aversion',
y = expression(paste('The population-level "risk premium" ', italic(e))),
x = expression(paste('Probability of exploration ', italic(p[l])))
# y = expression(atop('The rate of', paste('getting enchanted with R: ', italic(e), sep=""))),
# x = expression(paste('The rate of exploration: ', italic(p[l]), sep=""))
)+
scale_fill_gradient2(midpoint = 0, low = "blue", mid = "grey90", high = "red", breaks=c(-10,0,10))+
myTheme_Arial()+
theme(axis.text.x = element_text(angle = 90), legend.position='top')+
theme(strip.text.y = element_text(angle = 270))+
facet_grid(c_category ~ f_category)+
#geom_hline(yintercept = 0, linetype = 'dotted')+
NULL -> FigSocialLearningSimplest_social)
## Figure 3
set_null_device(cairo_pdf)
figure3_centre <- plot_grid(noSocialCurveSimplest_plot, FigSocialLearningSimplest_individual, labels = c('b','c'), ncol = 1, align = 'v',label_size = 15)
figure3 <- plot_grid(schematic_simplest, figure3_centre, FigSocialLearningSimplest_social, labels = c('a','','d'), ncol = 3, align = 'v',label_size = 15, rel_widths = c(1, 1, 1.4))
ggsave(file = '~/Dropbox/wataru/papers/RiskySocialLearning/draft/submissions/eLife/Revision2/exp_reanalysis_result/figure3.png', plot = figure3, dpi = 300, width = 15, height = 6)
# Figure 3 - additional (the bifurcation analysis)
sleqtableSimplest <- read_csv("dynamicsModel/sleqtableSimplest.csv", col_names = FALSE)
names(sleqtableSimplest) = c('f','S_0','c','e','R_eq')
sleqtableSimplest$RPreferingInitial = 0
sleqtableSimplest$direction = NA
sleqtableSimplest$R_0 = 20 - sleqtableSimplest$S_0
sleqtableSimplest$conformityExponent = paste(rep('Conformity exponent\nθ = ', nrow(sleqtableSimplest)), sleqtableSimplest$f, sep ='')
sleqtableSimplest$conformityExponent = factor(sleqtableSimplest$conformityExponent, levels = c('Conformity exponent\nθ = 0','Conformity exponent\nθ = 1','Conformity exponent\nθ = 2','Conformity exponent\nθ = 10'))
sleqtableSimplest$e_factor = paste(rep('Risk premium\ne = ', nrow(sleqtableSimplest)), sleqtableSimplest$e, sep ='')
for (i in 1:nrow(sleqtableSimplest)) {
if (sleqtableSimplest$R_eq[i]>10) {
sleqtableSimplest$RPreferingInitial[i] = 1
}
if (sleqtableSimplest$R_eq[i] - sleqtableSimplest$R_0[i] > 0) {
sleqtableSimplest$direction[i] = 'upward'
} else {
sleqtableSimplest$direction[i] = 'downward'
}
}
sleqtableSimplest <- sleqtableSimplest %>% dplyr::filter(c != 1)
(sleqtableSimplest %>%
ggplot(aes(x=c))+
geom_point(aes(y=R_0, colour=as.factor(RPreferingInitial), shape=direction), alpha=1/2)+
geom_point(aes(y=R_eq))+
labs(
title = '',
x=expression(paste('Social influence ', sigma, sep="")),
y=expression(paste('Equilibrium density of ',N[R]^'*',sep=""))#,
#title='Social influence\n (pH = 0.5; pL = 0.1; d=0.5; l=0.25)'
)+
scale_shape_manual(values=c('upward'=2, 'downward'=6), name='Stream\'s direction')+
scale_color_manual(values=c('0'='#56B4E9','1'='#D55E00'), name='Risky choice regime')+
myTheme_Arial()+
theme(axis.text.x = element_text(angle = 90), legend.position='top')+
facet_grid(e_factor ~ conformityExponent)+
xlim(c(0,1))+
#ylim(c(0,20))+
geom_hline(yintercept=10, linetype='dashed')+
theme(legend.position = 'none')+
theme(strip.text.y = element_text(angle = 270))+
NULL -> sleqtableSimplest_plot)
ggsave(file = '~/Dropbox/wataru/papers/RiskySocialLearning/draft/submissions/eLife/Revision2/exp_reanalysis_result/sleqtableSimplest_plot.png', plot = sleqtableSimplest_plot, dpi = 300, width = 10, height = 7)
## ========================================================
#
# Figure for the experimental results
#
# =========================================================
library(ggpubr)
social_learning_model_validation_0820_data <- read.csv('experimentalAnalysis/social_learning_model_validation_0820_data.csv')
social_learning_model_validation_1022_riskID11_data <- read.csv('experimentalAnalysis/social_learning_model_validation_1022_riskID11_data.csv')
social_learning_model_validation_1022_riskID12_data <- read.csv('experimentalAnalysis/social_learning_model_validation_1022_riskID12_data.csv')
# ======================================
# 1-risky 1-safe (2-armed) task
# ======================================
fit_AL00_multiVar_LKJ_indiv_0820_globalparameters <- read.csv('experimentalAnalysis/fit_AL00_multiVar_LKJ_indiv_0820_globalparameters.csv')
fit_AL00_multiVar_LKJ_indiv_0820_parameters <- read.csv('experimentalAnalysis/fit_AL00_multiVar_LKJ_indiv_0820_parameters.csv')
fit_SL00_multiVar_LKJ_0820_globalparameters <- read.csv('experimentalAnalysis/fit_SL00_multiVar_LKJ_0820_globalparameters.csv')
fit_SL00_multiVar_LKJ_0820_parameters <- read.csv('experimentalAnalysis/fit_SL00_multiVar_LKJ_0820_parameters.csv')
behaviour_main_0820 <- read.csv("experimentalAnalysis/behaviour_main_0820.csv")
behaviour_indiv_0820 <-read.csv("experimentalAnalysis/behaviour_indiv_0820.csv")
allBehaviour0820 <- rbind(behaviour_main_0820, behaviour_indiv_0820)
allBehaviour0820 <- allBehaviour0820 %>%
dplyr::filter(amazonID != 'INHOUSETEST2') %>% # eliminating data generated by debug tests
dplyr::filter(amazonID != '5eac70db94edd22d57fa00c4') # a bug in the data storaging process
# make the choice data binary
allBehaviour0820$choice_num = NA
allBehaviour0820$choice_num[which(allBehaviour0820$choice=='sure')] = 0
allBehaviour0820$choice_num[which(allBehaviour0820$choice=='risky')] = 1
allBehaviour0820$choice_num[which(allBehaviour0820$choice=='miss')] = -1
allBehaviour0820$riskDistributionId_factor = 'Condition 5'
allBehaviour0820$riskDistributionId_factor[which(allBehaviour0820$riskDistributionId==6)] = 'Condition 6'
allBehaviour0820$riskDistributionId_factor[which(allBehaviour0820$riskDistributionId==7)] = 'Condition 7'
allBehaviour0820$indivOrGroup_factor = 'Individual'
allBehaviour0820$indivOrGroup_factor[allBehaviour0820$indivOrGroup == 1] = 'Social'
allBehaviour0820$groupSize_category = 'Small'
allBehaviour0820$groupSize_category[which(allBehaviour0820$groupSize==1)] = 'Individual'
allBehaviour0820$groupSize_category[which(allBehaviour0820$groupSize>4)] = 'Large'
allBehaviour0820$groupSize_category = factor(allBehaviour0820$groupSize_category, levels = c('Individual','Small','Large'))
# individual condition
completedIDs = which(table(allBehaviour0820$amazonID) >= 36) %>% names()
allBehaviour0820_indiv = allBehaviour0820 %>% dplyr::filter(amazonID %in% completedIDs) %>%
dplyr::filter(indivOrGroup == 0) # note this is only the individual condition
allBehaviour0820_indiv$sub = as.numeric(as.factor(allBehaviour0820_indiv$amazonID))
allBehaviour0820_indiv = allBehaviour0820_indiv %>% group_by(amazonID) %>% arrange(round, .by_group = TRUE)
# summarised data
allBehaviour0820_indiv_summarised_t35 = allBehaviour0820_indiv %>%
dplyr::filter(round>35) %>%
group_by(sub) %>%
summarise(
risky_choice_count = sum(choice_num, na.rm = TRUE),
risky_choice_mean = mean(choice_num, na.rm=TRUE),
trial_num = n(),
indivOrGroup_factor = indivOrGroup_factor[1],
room = room[1],
amazonID = amazonID[1]
)
allBehaviour0820_indiv_summarised_t35$groupID = allBehaviour0820_indiv_summarised_t35$room
allBehaviour0820_indiv_summarised_t35$groupID[which(allBehaviour0820_indiv_summarised_t35$indivOrGroup_factor=='Individual')] = 'Individual'
# Individual Condition Only
parameterfit_indiv_AL00_0820 <- left_join(fit_AL00_multiVar_LKJ_indiv_0820_parameters, allBehaviour0820_indiv_summarised_t35, by = 'sub')
parameterfit_indiv_AL00_0820$hot_stove_susceptibility <- parameterfit_indiv_AL00_0820$alpha_median_AL00_multiVar_LKJ * (parameterfit_indiv_AL00_0820$beta_median_AL00_multiVar_LKJ + 1)
# if the hot stove effect is too large
parameterfit_indiv_AL00_0820$hot_stove_susceptibility_trancated <- parameterfit_indiv_AL00_0820$hot_stove_susceptibility
parameterfit_indiv_AL00_0820$hot_stove_susceptibility_trancated[which(parameterfit_indiv_AL00_0820$hot_stove_susceptibility > 6)] <- 6
# Group condition
completedIDs = which(table(allBehaviour0820$amazonID) >= 36) %>% names()
allBehaviour0820_social = allBehaviour0820 %>% dplyr::filter(amazonID %in% completedIDs) %>%
dplyr::filter(indivOrGroup == 1) # note this is only the social condition
allBehaviour0820_social$sub = as.numeric(as.factor(allBehaviour0820_social$amazonID))
allBehaviour0820_social$group = as.numeric(as.factor(allBehaviour0820_social$room))
# summarised data
allBehaviour0820_social_summarised_t35 = allBehaviour0820_social %>%
dplyr::filter(round>35) %>%
group_by(sub) %>%
summarise(
risky_choice_count = sum(choice_num, na.rm = TRUE),
risky_choice_mean = mean(choice_num, na.rm=TRUE),
trial_num = n(),
indivOrGroup_factor = indivOrGroup_factor[1],
room = room[1],
amazonID = amazonID[1]
)
allBehaviour0820_social_summarised_t35$groupID = allBehaviour0820_social_summarised_t35$room
allBehaviour0820_social_summarised_t35$groupID[which(allBehaviour0820_social_summarised_t35$indivOrGroup_factor=='Individual')] = 'Individual'
# Group condition
fit_parameters_group_SL00_mcmc <- left_join(fit_SL00_multiVar_LKJ_0820_parameters, allBehaviour0820_social_summarised_t35, by = 'sub')
fit_parameters_group_SL00_mcmc$hot_stove_susceptibility <- fit_parameters_group_SL00_mcmc$alpha_median_SL00_multiVar_LKJ * (fit_parameters_group_SL00_mcmc$beta_median_SL00_multiVar_LKJ + 1)
# if the hot stove effect is too large
fit_parameters_group_SL00_mcmc$hot_stove_susceptibility_trancated <- fit_parameters_group_SL00_mcmc$hot_stove_susceptibility
fit_parameters_group_SL00_mcmc$hot_stove_susceptibility_trancated[which(fit_parameters_group_SL00_mcmc$hot_stove_susceptibility > 6)] <- 6
# overall means
social_learning_model_validation_0820_summary <-
social_learning_model_validation_0820_data %>%
group_by(condition_dummy, hot_stove_susceptibility_rounded, soc_mean_category) %>%
summarise(
proportionRiskyChoice_b2_mean = mean(proportionRiskyChoice_b2),
proportionRiskyChoice_b2_sd = sd(proportionRiskyChoice_b2),
raw_proportionRiskyChoice_b2_mean = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% mean(),
raw_proportionRiskyChoice_b2_sd = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% sd(),
soc_mean = mean(soc_mean),
n = n()
)
social_learning_model_validation_0820_summary$proportionRiskyChoice_b2_lower <-
(social_learning_model_validation_0820_summary$raw_proportionRiskyChoice_b2_mean - social_learning_model_validation_0820_summary$raw_proportionRiskyChoice_b2_sd / sqrt(social_learning_model_validation_0820_summary$n)) %>% convert_alphaRaw_to_alpha
social_learning_model_validation_0820_summary$proportionRiskyChoice_b2_upper <-
(social_learning_model_validation_0820_summary$raw_proportionRiskyChoice_b2_mean + social_learning_model_validation_0820_summary$raw_proportionRiskyChoice_b2_sd / sqrt(social_learning_model_validation_0820_summary$n)) %>% convert_alphaRaw_to_alpha
social_learning_model_validation_0820_summary$proportionRiskyChoice_b2_mid <-
social_learning_model_validation_0820_summary$raw_proportionRiskyChoice_b2_mean %>% convert_alphaRaw_to_alpha
# modest social learners' means
social_learning_model_validation_0820_summary_reallyHighSigma <-
social_learning_model_validation_0820_data %>%
dplyr::filter(soc_mean > 3/10 & soc_mean < 6/10 & hot_stove_susceptibility_rounded < 6) %>%
group_by(condition_dummy, hot_stove_susceptibility_rounded, soc_mean_category) %>%
summarise(
proportionRiskyChoice_b2_mean = mean(proportionRiskyChoice_b2),
proportionRiskyChoice_b2_sd = sd(proportionRiskyChoice_b2),
raw_proportionRiskyChoice_b2_mean = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% median(),
raw_proportionRiskyChoice_b2_sd = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% sd(),
soc_mean = mean(soc_mean),
n = n()
)
social_learning_model_validation_0820_summary_reallyHighSigma$proportionRiskyChoice_b2_mid <-
social_learning_model_validation_0820_summary_reallyHighSigma$raw_proportionRiskyChoice_b2_mean %>% convert_alphaRaw_to_alpha
fit_parameters_group_SL00_mcmc$soc_mean <- fit_parameters_group_SL00_mcmc$soc_mean_SL00_multiVar_LKJ
# ======================================
# 1-risky 3-safe (4-armed) task
# ======================================
# fit result -- global parameters
fit_SL00_multiVar_LKJ_1022_globalparameters <- read.csv('experimentalAnalysis/fit_SL00_multiVar_LKJ_1022_globalparameters.csv')
fit_AL00_multiVar_LKJ_indiv_riskID11_indiv_riskID11Condition_globalparameters <- read.csv('experimentalAnalysis/fit_AL00_multiVar_LKJ_indiv_riskID11_indiv_riskID11Condition_globalparameters.csv')
## behavioural data summary
allBehaviour1022_group <- read.csv("experimentalAnalysis/allBehaviour1022_group.csv")
allBehaviour1022_group_riskID11 <- allBehaviour1022_group %>% dplyr::filter(riskDistributionId == 11 & room != '102_session_622') #the group '102_session_622' had a wired error (3 of them played riskID11, and the rest two played riskID12)
fit_SL00_multiVar_LKJ_1022_parameters <- read.csv("experimentalAnalysis/fit_SL00_multiVar_LKJ_1022_parameters.csv")
allBehaviour1022_group_riskID11_summarised_t35 <- allBehaviour1022_group_riskID11 %>%
dplyr::filter(round>35) %>%
group_by(amazonID, sub) %>%
summarise(
risky_choice_count = sum(best_risky_choice, na.rm = TRUE),
risky_choice_mean = mean(best_risky_choice, na.rm=TRUE),
trial_num = n(),
indivOrGroup_factor = indivOrGroup_factor[1],
room = room[1]
)
allBehaviour1022_group_riskID11_summarised_t35$groupID <- allBehaviour1022_group_riskID11_summarised_t35$room
allBehaviour1022_indiv <- read.csv("experimentalAnalysis/allBehaviour1022_indiv.csv")
allBehaviour1022_indiv_riskID11 <- allBehaviour1022_indiv %>%
filter(riskDistributionId_factor=='Con: 0')
# The 1-risky-3-safe task was labeled "Con: 0" or "11" originally
# And in the analysis code, things like "riskID11" means the 1-risky-3-safe task
# while "riskID12" means the 2-risky-2-safe task
allBehaviour1022_indiv_riskID11$sub_old <- allBehaviour1022_indiv_riskID11$sub
allBehaviour1022_indiv_riskID11$sub <- allBehaviour1022_indiv_riskID11$amazonID %>% as.factor() %>% as.numeric()
allBehaviour1022_indiv_riskID11_summarised_t35 <- allBehaviour1022_indiv_riskID11 %>%
dplyr::filter(round>35) %>%
group_by(amazonID, sub) %>%
summarise(
risky_choice_count = sum(best_risky_choice, na.rm = TRUE),
risky_choice_mean = mean(best_risky_choice, na.rm=TRUE),
trial_num = n(),
indivOrGroup_factor = indivOrGroup_factor[1],
room = room[1]
)
allBehaviour1022_indiv_riskID11_summarised_t35$groupID = allBehaviour1022_indiv_riskID11_summarised_t35$room
allBehaviour1022_indiv_riskID11_summarised_t35$groupID[which(allBehaviour1022_indiv_riskID11_summarised_t35$indivOrGroup_factor=='Individual')] = 'Individual'
# Individual fits
fit_AL00_multiVar_LKJ_indiv_riskID11_parameters <- read.csv('experimentalAnalysis/fit_AL00_multiVar_LKJ_indiv_riskID11_parameters.csv')
# Merging the behavioural data with the fit parameters
# hot stove effect - individual
fit_AL_indiv_riskID11_parameters <- right_join(fit_AL00_multiVar_LKJ_indiv_riskID11_parameters, allBehaviour1022_indiv_riskID11_summarised_t35, by = 'sub')
fit_AL_indiv_riskID11_parameters$hot_stove_susceptibility <- fit_AL_indiv_riskID11_parameters$alpha_median_AL00_multiVar_LKJ * (1+ fit_AL_indiv_riskID11_parameters$beta_median_AL00_multiVar_LKJ)
fit_AL_indiv_riskID11_parameters$hot_stove_susceptibility_trancated <- fit_AL_indiv_riskID11_parameters$hot_stove_susceptibility
fit_AL_indiv_riskID11_parameters$hot_stove_susceptibility_trancated[which(fit_AL_indiv_riskID11_parameters$hot_stove_susceptibility > 6)] <- 6
# hot stove effect - group
fit_SL00_riskID11_parameters <- right_join(fit_SL00_multiVar_LKJ_1022_parameters, allBehaviour1022_group_riskID11_summarised_t35, by = 'sub')
fit_SL00_riskID11_parameters$hot_stove_susceptibility <- fit_SL00_riskID11_parameters$alpha_mean_SL00_multiVar_LKJ * (1+ fit_SL00_riskID11_parameters$beta_mean_SL00_multiVar_LKJ)
fit_SL00_riskID11_parameters$hot_stove_susceptibility_trancated <- fit_SL00_riskID11_parameters$hot_stove_susceptibility
fit_SL00_riskID11_parameters$hot_stove_susceptibility_trancated[which(fit_SL00_riskID11_parameters$hot_stove_susceptibility > 6)] <- 6
# overall means
social_learning_model_validation_1022_riskID11_summary <-
social_learning_model_validation_1022_riskID11_data %>%
group_by(condition_dummy, hot_stove_susceptibility_rounded, soc_mean_category) %>%
summarise(
proportionRiskyChoice_b2_mean = mean(proportionRiskyChoice_b2),
proportionRiskyChoice_b2_sd = sd(proportionRiskyChoice_b2),
raw_proportionRiskyChoice_b2_mean = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% mean(),
raw_proportionRiskyChoice_b2_sd = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% sd(),
soc_mean = mean(soc_mean),
n = n()
)
social_learning_model_validation_1022_riskID11_summary$proportionRiskyChoice_b2_lower <-
(social_learning_model_validation_1022_riskID11_summary$raw_proportionRiskyChoice_b2_mean - social_learning_model_validation_1022_riskID11_summary$raw_proportionRiskyChoice_b2_sd / sqrt(social_learning_model_validation_1022_riskID11_summary$n)) %>% convert_alphaRaw_to_alpha
social_learning_model_validation_1022_riskID11_summary$proportionRiskyChoice_b2_upper <-
(social_learning_model_validation_1022_riskID11_summary$raw_proportionRiskyChoice_b2_mean + social_learning_model_validation_1022_riskID11_summary$raw_proportionRiskyChoice_b2_sd / sqrt(social_learning_model_validation_1022_riskID11_summary$n)) %>% convert_alphaRaw_to_alpha
social_learning_model_validation_1022_riskID11_summary$proportionRiskyChoice_b2_mid <-
social_learning_model_validation_1022_riskID11_summary$raw_proportionRiskyChoice_b2_mean %>% convert_alphaRaw_to_alpha
# modest social learners' means
social_learning_model_validation_1022_riskID11_summary_reallyHighSigma <-
social_learning_model_validation_1022_riskID11_data %>%
dplyr::filter(soc_mean > 3/10 & soc_mean < 6/10 & hot_stove_susceptibility_rounded < 6) %>%
group_by(condition_dummy, hot_stove_susceptibility_rounded, soc_mean_category) %>%
summarise(
proportionRiskyChoice_b2_mean = mean(proportionRiskyChoice_b2),
proportionRiskyChoice_b2_sd = sd(proportionRiskyChoice_b2),
raw_proportionRiskyChoice_b2_mean = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% median(),
raw_proportionRiskyChoice_b2_sd = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% sd(),
soc_mean = mean(soc_mean),
n = n()
)
social_learning_model_validation_1022_riskID11_summary_reallyHighSigma$proportionRiskyChoice_b2_mid <-
social_learning_model_validation_1022_riskID11_summary_reallyHighSigma$raw_proportionRiskyChoice_b2_mean %>% convert_alphaRaw_to_alpha
# ======================================
# 2-risky 2-safe (4-armed) task
# ======================================
# fit result -- global parameters
fit_SL00_multiVar_LKJ_1022_globalparameters <- read.csv('experimentalAnalysis/fit_SL00_multiVar_LKJ_1022_globalparameters.csv')
fit_AL00_multiVar_LKJ_indiv_riskID12_indiv_riskID12Condition_globalparameters <- read.csv('experimentalAnalysis/fit_AL00_multiVar_LKJ_indiv_riskID12_indiv_riskID12Condition_globalparameters.csv')
## behavioural data summary
allBehaviour1022_group <- read.csv("experimentalAnalysis/allBehaviour1022_group.csv")
allBehaviour1022_group_riskID12 <- allBehaviour1022_group %>% dplyr::filter(riskDistributionId == 12 & room != '102_session_622')
fit_SL00_multiVar_LKJ_1022_parameters <- read.csv("experimentalAnalysis/fit_SL00_multiVar_LKJ_1022_parameters.csv")
allBehaviour1022_group_riskID12_summarised_t35 <- allBehaviour1022_group_riskID12 %>%
dplyr::filter(round>35) %>%
group_by(amazonID, sub) %>%
summarise(
risky_choice_count = sum(best_risky_choice, na.rm = TRUE),
risky_choice_mean = mean(best_risky_choice, na.rm=TRUE),
trial_num = n(),
indivOrGroup_factor = indivOrGroup_factor[1],
room = room[1]
)
allBehaviour1022_group_riskID12_summarised_t35$groupID <- allBehaviour1022_group_riskID12_summarised_t35$room
allBehaviour1022_indiv <- read.csv("experimentalAnalysis/allBehaviour1022_indiv.csv")
allBehaviour1022_indiv_riskID12 <- allBehaviour1022_indiv %>% filter(riskDistributionId_factor=='Con: 2') #'Con: 2' means the 2-risky-2-safe task
allBehaviour1022_indiv_riskID12$sub_old <- allBehaviour1022_indiv_riskID12$sub
allBehaviour1022_indiv_riskID12$sub <- allBehaviour1022_indiv_riskID12$amazonID %>% as.factor() %>% as.numeric()
allBehaviour1022_indiv_riskID12_summarised_t35 <- allBehaviour1022_indiv_riskID12 %>%
dplyr::filter(round>35) %>%
group_by(amazonID, sub) %>%
summarise(
risky_choice_count = sum(best_risky_choice, na.rm = TRUE),
risky_choice_mean = mean(best_risky_choice, na.rm=TRUE),
trial_num = n(),
indivOrGroup_factor = indivOrGroup_factor[1],
room = room[1]
)
allBehaviour1022_indiv_riskID12_summarised_t35$groupID = allBehaviour1022_indiv_riskID12_summarised_t35$room
allBehaviour1022_indiv_riskID12_summarised_t35$groupID[which(allBehaviour1022_indiv_riskID12_summarised_t35$indivOrGroup_factor=='Individual')] = 'Individual'
# Individual fits
fit_AL00_multiVar_LKJ_indiv_riskID12_parameters <- read.csv('experimentalAnalysis/fit_AL00_multiVar_LKJ_indiv_riskID12_parameters.csv')
# Merging the behavioural data with the fit parameters
# hot stove effect - individual
fit_AL_indiv_riskID12_parameters <- right_join(fit_AL00_multiVar_LKJ_indiv_riskID12_parameters, allBehaviour1022_indiv_riskID12_summarised_t35, by = 'sub')
fit_AL_indiv_riskID12_parameters$hot_stove_susceptibility <- fit_AL_indiv_riskID12_parameters$alpha_median_AL00_multiVar_LKJ * (1+ fit_AL_indiv_riskID12_parameters$beta_median_AL00_multiVar_LKJ)
fit_AL_indiv_riskID12_parameters$hot_stove_susceptibility_trancated <- fit_AL_indiv_riskID12_parameters$hot_stove_susceptibility
fit_AL_indiv_riskID12_parameters$hot_stove_susceptibility_trancated[which(fit_AL_indiv_riskID12_parameters$hot_stove_susceptibility > 6)] <- 6
# hot stove effect - group
fit_SL00_riskID12_parameters <- right_join(fit_SL00_multiVar_LKJ_1022_parameters, allBehaviour1022_group_riskID12_summarised_t35, by = 'sub')
fit_SL00_riskID12_parameters$hot_stove_susceptibility <- fit_SL00_riskID12_parameters$alpha_mean_SL00_multiVar_LKJ * (1+ fit_SL00_riskID12_parameters$beta_mean_SL00_multiVar_LKJ)
fit_SL00_riskID12_parameters$hot_stove_susceptibility_trancated <- fit_SL00_riskID12_parameters$hot_stove_susceptibility
fit_SL00_riskID12_parameters$hot_stove_susceptibility_trancated[which(fit_SL00_riskID12_parameters$hot_stove_susceptibility > 6)] <- 6
# overall means
social_learning_model_validation_1022_riskID12_summary <-
social_learning_model_validation_1022_riskID12_data %>%
group_by(condition_dummy, hot_stove_susceptibility_rounded, soc_mean_category) %>%
summarise(
proportionRiskyChoice_b2_mean = mean(proportionRiskyChoice_b2),
proportionRiskyChoice_b2_sd = sd(proportionRiskyChoice_b2),
raw_proportionRiskyChoice_b2_mean = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% mean(),
raw_proportionRiskyChoice_b2_sd = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% sd(),
soc_mean = mean(soc_mean),
n = n()
)
social_learning_model_validation_1022_riskID12_summary$proportionRiskyChoice_b2_lower <-
(social_learning_model_validation_1022_riskID12_summary$raw_proportionRiskyChoice_b2_mean - social_learning_model_validation_1022_riskID12_summary$raw_proportionRiskyChoice_b2_sd / sqrt(social_learning_model_validation_1022_riskID12_summary$n)) %>% convert_alphaRaw_to_alpha
social_learning_model_validation_1022_riskID12_summary$proportionRiskyChoice_b2_upper <-
(social_learning_model_validation_1022_riskID12_summary$raw_proportionRiskyChoice_b2_mean + social_learning_model_validation_1022_riskID12_summary$raw_proportionRiskyChoice_b2_sd / sqrt(social_learning_model_validation_1022_riskID12_summary$n)) %>% convert_alphaRaw_to_alpha
social_learning_model_validation_1022_riskID12_summary$proportionRiskyChoice_b2_mid <-
social_learning_model_validation_1022_riskID12_summary$raw_proportionRiskyChoice_b2_mean %>% convert_alphaRaw_to_alpha
# modest social learners' means
social_learning_model_validation_1022_riskID12_summary_reallyHighSigma <-
social_learning_model_validation_1022_riskID12_data %>%
dplyr::filter(soc_mean > 3/10 & soc_mean < 6/10 & hot_stove_susceptibility_rounded < 6) %>%
group_by(condition_dummy, hot_stove_susceptibility_rounded, soc_mean_category) %>%
summarise(
proportionRiskyChoice_b2_mean = mean(proportionRiskyChoice_b2),
proportionRiskyChoice_b2_sd = sd(proportionRiskyChoice_b2),
raw_proportionRiskyChoice_b2_mean = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% median(),
raw_proportionRiskyChoice_b2_sd = proportionRiskyChoice_b2 %>% convert_alpha_to_alphaRaw() %>% sd(),
soc_mean = mean(soc_mean),
n = n()
)
social_learning_model_validation_1022_riskID12_summary_reallyHighSigma$proportionRiskyChoice_b2_mid <-
social_learning_model_validation_1022_riskID12_summary_reallyHighSigma$raw_proportionRiskyChoice_b2_mean %>% convert_alphaRaw_to_alpha
ggplot() +
geom_segment(aes(x=0,xend=5.8,y=0.5,yend=0.5),colour="grey30", size=0.5) +
geom_ribbon(data=social_learning_model_validation_0820_summary%>%dplyr::filter(condition_dummy==0&hot_stove_susceptibility_rounded<6), mapping=aes(hot_stove_susceptibility_rounded, ymin=proportionRiskyChoice_b2_lower, ymax=proportionRiskyChoice_b2_upper), fill='grey20', alpha=1/2)+
geom_ribbon(data=social_learning_model_validation_0820_summary%>%dplyr::filter(condition_dummy==1&soc_mean_category=='mild'&hot_stove_susceptibility_rounded<6), mapping=aes(hot_stove_susceptibility_rounded, ymin=proportionRiskyChoice_b2_lower, ymax=proportionRiskyChoice_b2_upper), fill='orange', alpha=1/2)+
geom_point(data = parameterfit_indiv_AL00_0820, mapping=aes(hot_stove_susceptibility_trancated, risky_choice_mean), colour='grey20', shape = 17)+ # shape=5: diamond
geom_point(data = fit_parameters_group_SL00_mcmc, mapping=aes(hot_stove_susceptibility_trancated,risky_choice_mean, colour=soc_mean), shape = 20) +
geom_line(data=social_learning_model_validation_0820_summary%>%dplyr::filter(condition_dummy==0&hot_stove_susceptibility_rounded<6), mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid , linetype = "Ind"), size=1.0)+
geom_line(data=social_learning_model_validation_0820_summary%>%dplyr::filter(condition_dummy==1&hot_stove_susceptibility_rounded<6), mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid, group=soc_mean_category, colour=mean(soc_mean) , linetype = "g1"), size=1.0)+
geom_line(data=social_learning_model_validation_0820_summary_reallyHighSigma, mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid, group=soc_mean_category, colour=mean(soc_mean) , linetype = 'g2'), size=1.0)+
scale_colour_viridis_c(expression('Copying weight \U03C3'[i]), begin = 0.2, end = 0.9, option='plasma', direction=-1)+
scale_linetype_manual(name = ""
, breaks = c("Ind","g1","g2")
, labels = c("Individual","Group average","Group with \U03C3 = 0.4")
, values = c('solid','solid','dashed')
, guide = guide_legend(
override.aes = list(linetype = c('solid','solid','dashed')
, size=0.6
, color = c('black','#ff751a','#ff6666'))
, order=1)
) +
myTheme_Arial()+
xlim(c(0,6.5))+
labs(
x = expression(atop('Susceptibility to the hot stove effect', paste(alpha[i], '(', beta[i], '+1)'))),
y = 'Mean proportion of choosing\nthe optimal risky option',
title = 'The 1-risky-1-safe task \n(N = 168)') +
guides(colour = "none")+
#theme(legend.position = c(0.85, 0.5))+
#theme(legend.position = NaN)+
theme(legend.position = c(0.65, 0.75))+
theme(legend.title = element_text(size=12))+
theme(legend.text = element_text(size=11))+
theme(legend.key = element_rect(fill = "transparent", colour = "transparent"))+
NULL -> fig6_a
ggplot() +
geom_segment(aes(x=0,xend=6,y=0.25,yend=0.25),colour="grey30", size=0.5) +
geom_ribbon(data=social_learning_model_validation_1022_riskID11_summary%>%dplyr::filter(condition_dummy==0), mapping=aes(hot_stove_susceptibility_rounded, ymin=proportionRiskyChoice_b2_lower, ymax=proportionRiskyChoice_b2_upper), fill='grey20', alpha=1/2)+
geom_ribbon(data=social_learning_model_validation_1022_riskID11_summary%>%dplyr::filter(condition_dummy==1&soc_mean_category=='mild'), mapping=aes(hot_stove_susceptibility_rounded, ymin=proportionRiskyChoice_b2_lower, ymax=proportionRiskyChoice_b2_upper), fill='orange', alpha=1/2)+
geom_point(data = fit_AL_indiv_riskID11_parameters, mapping=aes(hot_stove_susceptibility_trancated, risky_choice_mean), colour='grey20', shape = 17)+ # shape=5: diamond
geom_point(data = fit_SL00_riskID11_parameters, mapping=aes(hot_stove_susceptibility_trancated,risky_choice_mean, colour=soc_mean_SL00_multiVar_LKJ), shape = 20) +
geom_line(data=social_learning_model_validation_1022_riskID11_summary%>%dplyr::filter(condition_dummy==0), mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid), size=1.0)+
geom_line(data=social_learning_model_validation_1022_riskID11_summary%>%dplyr::filter(condition_dummy==1), mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid, group=soc_mean_category, colour=mean(soc_mean)), size=1.0)+
geom_line(data=social_learning_model_validation_1022_riskID11_summary_reallyHighSigma, mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid, group=soc_mean_category, colour=mean(soc_mean)), linetype = 'dashed', size=1.0)+
scale_colour_viridis_c(expression('Copying weight \U03C3'[i]), begin = 0.2, end = 0.9, option='plasma', direction=-1)+
myTheme_Arial()+
xlim(c(0,6.5))+
labs(
x = expression(atop('Susceptibility to the hot stove effect', paste(alpha[i], '(', beta[i], '+1)'))),
y = 'Mean proportion of choosing\nthe optimal risky option',
title = 'The 1-risky-3-safe task \n(N = 148)') +
theme(legend.position = NaN)+
theme(legend.title = element_text(size=12))+
theme(legend.text = element_text(size=11))+
NULL -> fig6_b
ggplot() +
geom_segment(aes(x=0,xend=6,y=0.25,yend=0.25),colour="grey30", size=0.5) +
geom_ribbon(data=social_learning_model_validation_1022_riskID12_summary%>%dplyr::filter(condition_dummy==0), mapping=aes(hot_stove_susceptibility_rounded, ymin=proportionRiskyChoice_b2_lower, ymax=proportionRiskyChoice_b2_upper), fill='grey20', alpha=1/2)+
geom_ribbon(data=social_learning_model_validation_1022_riskID12_summary%>%dplyr::filter(condition_dummy==1&soc_mean_category=='mild'), mapping=aes(hot_stove_susceptibility_rounded, ymin=proportionRiskyChoice_b2_lower, ymax=proportionRiskyChoice_b2_upper), fill='orange', alpha=1/2)+
geom_point(data = fit_AL_indiv_riskID12_parameters, mapping=aes(hot_stove_susceptibility_trancated, risky_choice_mean), colour='grey20', shape = 17)+ # shape=5: diamond
geom_point(data = fit_SL00_riskID12_parameters, mapping=aes(hot_stove_susceptibility_trancated,risky_choice_mean, colour=soc_mean_SL00_multiVar_LKJ), shape = 20) +
geom_line(data=social_learning_model_validation_1022_riskID12_summary%>%dplyr::filter(condition_dummy==0), mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid), size=1.0)+
geom_line(data=social_learning_model_validation_1022_riskID12_summary%>%dplyr::filter(condition_dummy==1), mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid, group=soc_mean_category, colour=mean(soc_mean)), size=1.0)+
geom_line(data=social_learning_model_validation_1022_riskID12_summary_reallyHighSigma, mapping=aes(hot_stove_susceptibility_rounded, proportionRiskyChoice_b2_mid, group=soc_mean_category, colour=mean(soc_mean)), linetype = "dashed", size=1.0)+
scale_colour_viridis_c(expression('Copying weight \U03C3'[i]), begin = 0.2, end = 0.9, option='plasma', direction=-1)+
myTheme_Arial()+
xlim(c(0,6.5))+
labs(
x = expression(atop('Susceptibility to the hot stove effect', paste(alpha[i], '(', beta[i], '+1)'))),
y = 'Mean proportion of choosing\nthe optimal risky option',
title = 'The 2-risky-2-safe task \n(N = 151)') +
theme(legend.position = c(0.75, 0.7))+
theme(legend.title = element_text(size=12))+
theme(legend.text = element_text(size=11))+
NULL -> fig6_c
(
figure_exp_model_pred <- ggarrange(fig6_a, fig6_b, fig6_c
# , common.legend = TRUE
# , legend = 'right'
, labels = c('','',''), ncol = 3, align = 'v'
)
)
##############################################3
## Supplementary figure
## Each group's behaviour
##############################################
library(ggpubr)
# two-ab task
allBehaviour0820$group_category <- allBehaviour0820$room
allBehaviour0820$group_category[which(allBehaviour0820$indivOrGroup==0)] <- 'Individual'
allBehaviour0820$total_n <- allBehaviour0820$socialFreq_safe + allBehaviour0820$socialFreq_risky
allBehaviour0820$total_n[which(is.na(allBehaviour0820$total_n))] <- 0
allBehaviour0820 %>%
group_by(round, group_category) %>%
summarise(mean_risky_choice_prob = mean(choice_num, na.rm=TRUE)
, total_n = table(total_n) %>% which.max() %>% names() %>% as.numeric()
) -> allBehaviour0820_each_group
allBehaviour0820_each_group$riskDistributionId_factor <- 'twoArmed'
allBehaviour0820_each_group$mean_safe_choice_prob <- 1 - allBehaviour0820_each_group$mean_risky_choice_prob
allBehaviour0820_each_group %>%
group_by(group_category, riskDistributionId_factor) %>%
summarise(groupSize = table(total_n) %>% which.max() %>% names() %>% as.numeric()) -> allBehaviour0820_group_size
allBehaviour0820_group_size$groupSize[which(allBehaviour0820_group_size$group_category=='Individual')] <- 1
# 4-ab task
allBehaviour1022_indiv$group_category <- 'Individual'
allBehaviour1022_group$group_category <- allBehaviour1022_group$room
allBehaviour1022_indiv$total_n <- allBehaviour1022_indiv$socialFreq_safe1 +
allBehaviour1022_indiv$socialFreq_safe2 +
allBehaviour1022_indiv$socialFreq_safe3 +
allBehaviour1022_indiv$socialFreq_risky
allBehaviour1022_group$total_n <- allBehaviour1022_group$socialFreq_safe1 +
allBehaviour1022_group$socialFreq_safe2 +
allBehaviour1022_group$socialFreq_safe3 +
allBehaviour1022_group$socialFreq_risky
allBehaviour1022_indiv %>%
group_by(round, group_category, riskDistributionId_factor) %>%
summarise(mean_risky_choice_prob = mean(best_risky_choice, na.rm=TRUE)
, mean_safe_choice_prob = mean(best_safe_choice, na.rm=TRUE)
, total_n = table(total_n) %>% which.max() %>% names() %>% as.numeric()
) -> allBehaviour1022_indiv_each_group
allBehaviour1022_indiv_each_group %>%
group_by(group_category, riskDistributionId_factor) %>%
summarise(groupSize = table(total_n) %>% which.max() %>% names() %>% as.numeric()) -> allBehaviour1022_indiv_size
allBehaviour1022_indiv_size$groupSize <- 1
allBehaviour1022_group %>%
group_by(round, group_category, riskDistributionId_factor) %>%
summarise(mean_risky_choice_prob = mean(best_risky_choice, na.rm=TRUE)
, mean_safe_choice_prob = mean(best_safe_choice, na.rm=TRUE)
, total_n = table(total_n) %>% which.max() %>% names() %>% as.numeric()
) -> allBehaviour1022_group_each_group
allBehaviour1022_group_each_group %>%
group_by(group_category, riskDistributionId_factor) %>%
summarise(groupSize = table(total_n) %>% which.max() %>% names() %>% as.numeric()) -> allBehaviour1022_group_size
allBehaviour_all_each_group <- allBehaviour0820_each_group %>%
rbind(allBehaviour1022_indiv_each_group) %>%
rbind(allBehaviour1022_group_each_group)
all_group_size <- allBehaviour0820_group_size %>%
rbind(allBehaviour1022_group_size) %>%
rbind(allBehaviour1022_indiv_size)
allBehaviour_all_each_group <- allBehaviour_all_each_group %>% left_join(all_group_size, key=group_category)
allBehaviour_all_each_group$groupSize_with_n <- paste('n =', allBehaviour_all_each_group$groupSize)
allBehaviour_all_each_group$task_name <- allBehaviour_all_each_group$riskDistributionId_factor
allBehaviour_all_each_group$task_name[which(allBehaviour_all_each_group$riskDistributionId_factor=='twoArmed')] <- '1-risky-1-safe'
allBehaviour_all_each_group$task_name[which(allBehaviour_all_each_group$riskDistributionId_factor=='Con: 0')] <- '1-risky-3-safe'
allBehaviour_all_each_group$task_name[which(allBehaviour_all_each_group$riskDistributionId_factor=='Con: 2')] <- '2-risky-2-safe'
horizontal_lines <- data.frame(task_name = c('1-risky-1-safe', '1-risky-3-safe', '2-risky-2-safe')
, yintercept = c(0.5, 0.25, 0.25))
allBehaviour_all_each_group %>%
ggplot(aes(round, mean_risky_choice_prob)) +
geom_line(aes(group=group_category), alpha = 1/2, colour='grey20') +
stat_summary(fun = mean, geom="line", colour='red')+
stat_summary(aes(round, mean_safe_choice_prob), fun = mean, geom="line", colour='blue')+
# geom_hline(yintercept=0.5, linetype='dashed') +
geom_segment(data=horizontal_lines, aes(x=1,xend=70,y=yintercept,yend=yintercept), linetype="dashed", size=0.5) +
facet_grid(groupSize_with_n ~ task_name) +
myTheme_Arial() +
scale_y_continuous(breaks=c(0,0.5,1))+
labs(x = 'Trial', y = 'Proportion of choosing\nthe best risky option')+
NULL -> each_group_behaviour
allBehaviour_all_each_group %>%
group_by(group_category, groupSize, task_name) %>%
summarise(n = n()) %>%
ggplot() +
geom_bar(aes(groupSize), stat = "count")+
facet_grid(. ~ task_name)+
labs(x = 'Group size', y = 'Count')+
myTheme_Arial() +
scale_y_continuous(breaks=c(0,4,8,12))+
xlim(c(2,8.5))+
theme(panel.grid.major = element_line(size = 0.5
, linetype = 'solid', colour='grey40'))+
NULL -> group_size_distribution
(
exp_group_behav_plot <- ggarrange(each_group_behaviour, group_size_distribution
, heights = c(2, 0.5)
# , common.legend = TRUE
# , legend = 'right'
, labels = c('','',''), ncol = 1, align = 'v'
)
)
ggsave(file = "exp_group_behav_plot.pdf"
, plot = exp_group_behav_plot
, dpi = 600, width = 9, height = 10
, device = cairo_pdf
)
########################################3
## relationship between group size vs. parameters?
##################################################
allBehaviour0820_social_summarised_t35_groupSize <- allBehaviour0820_social_summarised_t35 %>% left_join(all_group_size, by = c("groupID" = "group_category"))
allBehaviour0820_social_summarised_t35_groupSize <- allBehaviour0820_social_summarised_t35_groupSize %>% left_join(fit_parameters_group_SL00_mcmc, by='sub')
allBehaviour1022_group_riskID11_summarised_t35_groupSize <- allBehaviour1022_group_riskID11_summarised_t35 %>% left_join(all_group_size, by = c("groupID" = "group_category"))
allBehaviour1022_group_riskID11_summarised_t35_groupSize <- allBehaviour1022_group_riskID11_summarised_t35_groupSize %>% left_join(fit_SL00_riskID11_parameters, by='sub')
allBehaviour1022_group_riskID12_summarised_t35_groupSize <- allBehaviour1022_group_riskID12_summarised_t35 %>% left_join(all_group_size, by = c("groupID" = "group_category"))
allBehaviour1022_group_riskID12_summarised_t35_groupSize <- allBehaviour1022_group_riskID12_summarised_t35_groupSize %>% left_join(fit_SL00_riskID12_parameters, by='sub')
group_size_vs_parameters <- data.frame(
amazonID = c(allBehaviour0820_social_summarised_t35_groupSize$amazonID.x, allBehaviour1022_group_riskID11_summarised_t35_groupSize$amazonID.x, allBehaviour1022_group_riskID12_summarised_t35_groupSize$amazonID.x)
, groupSize = c(allBehaviour0820_social_summarised_t35_groupSize$groupSize, allBehaviour1022_group_riskID11_summarised_t35_groupSize$groupSize, allBehaviour1022_group_riskID12_summarised_t35_groupSize$groupSize)
, groupID = c(allBehaviour0820_social_summarised_t35_groupSize$groupID.x, allBehaviour1022_group_riskID11_summarised_t35_groupSize$groupID.x, allBehaviour1022_group_riskID12_summarised_t35_groupSize$groupID.x)
, riskDistributionId_factor = c(allBehaviour0820_social_summarised_t35_groupSize$riskDistributionId_factor, allBehaviour1022_group_riskID11_summarised_t35_groupSize$riskDistributionId_factor, allBehaviour1022_group_riskID12_summarised_t35_groupSize$riskDistributionId_factor)
, alpha_mean_SL00_multiVar_LKJ = c(allBehaviour0820_social_summarised_t35_groupSize$alpha_mean_SL00_multiVar_LKJ, allBehaviour1022_group_riskID11_summarised_t35_groupSize$alpha_mean_SL00_multiVar_LKJ, allBehaviour1022_group_riskID12_summarised_t35_groupSize$alpha_mean_SL00_multiVar_LKJ)
, beta_mean_SL00_multiVar_LKJ = c(allBehaviour0820_social_summarised_t35_groupSize$beta_mean_SL00_multiVar_LKJ, allBehaviour1022_group_riskID11_summarised_t35_groupSize$beta_mean_SL00_multiVar_LKJ, allBehaviour1022_group_riskID12_summarised_t35_groupSize$beta_mean_SL00_multiVar_LKJ)
, soc_mean_SL00_multiVar_LKJ = c(allBehaviour0820_social_summarised_t35_groupSize$soc_mean_SL00_multiVar_LKJ, allBehaviour1022_group_riskID11_summarised_t35_groupSize$soc_mean_SL00_multiVar_LKJ, allBehaviour1022_group_riskID12_summarised_t35_groupSize$soc_mean_SL00_multiVar_LKJ)
, theta_mean_SL00_multiVar_LKJ = c(allBehaviour0820_social_summarised_t35_groupSize$theta_mean_SL00_multiVar_LKJ, allBehaviour1022_group_riskID11_summarised_t35_groupSize$theta_mean_SL00_multiVar_LKJ, allBehaviour1022_group_riskID12_summarised_t35_groupSize$theta_mean_SL00_multiVar_LKJ)
)
group_size_vs_parameters$task_name <- '1-risky-1-safe'
group_size_vs_parameters$task_name[which(group_size_vs_parameters$riskDistributionId_factor=='Con: 0')] <- '1-risky-3-safe'
group_size_vs_parameters$task_name[which(group_size_vs_parameters$riskDistributionId_factor=='Con: 2')] <- '2-risky-2-safe'
group_size_vs_parameters$pos_vs_neg <- 'Positive frequency dependence'
group_size_vs_parameters$pos_vs_neg[which(group_size_vs_parameters$theta_mean_SL00_multiVar_LKJ < 0)] <- 'Negative frequency dependence'
group_size_vs_parameters$pos_vs_neg <- factor(group_size_vs_parameters$pos_vs_neg, levels=c('Positive frequency dependence', 'Negative frequency dependence'))
## just correlation
cor.test(filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='1-risky-1-safe')$groupSize, filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='1-risky-1-safe')$soc_mean_SL00_multiVar_LKJ)
cor.test(filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='1-risky-1-safe')$groupSize, filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='1-risky-1-safe')$theta_mean_SL00_multiVar_LKJ)
cor.test(filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='1-risky-3-safe')$groupSize, filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='1-risky-3-safe')$soc_mean_SL00_multiVar_LKJ)
cor.test(filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='1-risky-3-safe')$groupSize, filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='1-risky-3-safe')$theta_mean_SL00_multiVar_LKJ)
cor.test(filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='2-risky-2-safe')$groupSize, filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='2-risky-2-safe')$soc_mean_SL00_multiVar_LKJ)
cor.test(filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='2-risky-2-safe')$groupSize, filter(group_size_vs_parameters, pos_vs_neg=='Positive frequency dependence'&task_name=='2-risky-2-safe')$theta_mean_SL00_multiVar_LKJ)
group_size_vs_parameters %>%
ggplot(aes(groupSize, soc_mean_SL00_multiVar_LKJ, colour=pos_vs_neg, shape=pos_vs_neg)) +
geom_point(aes(groupSize, soc_mean_SL00_multiVar_LKJ, colour=pos_vs_neg, shape=pos_vs_neg))+
geom_smooth(geom='line',se=FALSE, method='lm')+
scale_colour_manual(name='', values=c('red','blue'))+
scale_shape_manual(name='', values=c(2, 18))+
myTheme_Arial()+
facet_grid(. ~ task_name)+
labs(x = 'Group Size', y = 'Fit social learning weight \U03C3')+
border()+
NULL
group_size_vs_parameters %>%
ggplot(aes(groupSize, theta_mean_SL00_multiVar_LKJ, colour=pos_vs_neg, shape=pos_vs_neg)) +
geom_point()+
geom_smooth(geom='line',se=FALSE, method='lm')+
scale_colour_manual(name='', values=c('red','blue'))+
scale_shape_manual(name='', values=c(2, 18))+
myTheme_Arial()+
facet_grid(. ~ task_name)+
labs(x = 'Group Size', y = 'Fit conformity exponent \U03B8')+
border()+
NULL
|
rmarkdown::render('rmd/accessing_files_in_s3.Rmd', output_file = '../accessing_files_in_s3.md')
| /resources/render_script/render_all_to_md.R | no_license | moj-analytical-services/platform_user_guidance | R | false | false | 96 | r | rmarkdown::render('rmd/accessing_files_in_s3.Rmd', output_file = '../accessing_files_in_s3.md')
|
library(XML)
library(plyr)
library(dplyr)
library(stringr)
library(tidyr)
library(zoo)
library(data.table)
source("functions.R")
# gameid<-readline('Enter Game Code: (e.g., 2016061909) ')
# location<-readline('At Work? (Y/N) ')
gameid<-"2016042306"
location<-"N"
# Rename game to make more easily understandable
games<-fread("C:/Users/brocatoj/Documents/Basketball/Tracking/games.csv")
games<-games[stats_id==gameid,.(stats_id,id,game)]
game_name<-games$game[1]
game_code<-games$id[1]
# EXTRACT PBP
ifelse(location=="N",
pbp<-paste0("C:/Users/brocatoj/Documents/Basketball/svu/2016/xml/",
"NBA_FINALPBP_EXP$",gameid, ".XML"),
pbp<-paste0("S:/NBA_FINALPBP_EXP$",gameid,".XML"))
pbp<-xmlTreeParse(pbp,useInternalNodes = T)
pbp<-xmlRoot(pbp)
pbp<-as.data.table(do.call(rbind,xpathApply(pbp,"//play",xmlAttrs)))
# CLEAN PBP
# Add team codes
teams<-fread("C:/Users/brocatoj/Documents/Basketball/Tracking/teams.csv")
teams<-teams[,.(abbrev,stats_abbrev)]
setnames(teams,c("team","team-alias-1"))
setkey(teams,`team-alias-1`)
# Create columns for game, date, home team, and away team
pbp[,game:=game_name]
pbp[,game_code:=game_code]
pbp[,date:=as.Date(substr(game_name,1,10))]
pbp[,home:=substr(game_name,16,18)]
pbp[,away:=substr(game_name,12,14)]
pbp[,season:=substr(game_name,1,4)]
setnames(pbp,"id","stats_id")
# Create team abbrev column for each event, remove starting lineup
pbp<-left_join(pbp,teams,by="team-alias-1");setDT(pbp)
pbp<-pbp[`textual-description`!="Starting Lineup"]
# Create column indicating whether a possession ended
pbp[,poss:=ifelse(is.na(shift(stats_id,type="lead"))&`event-id`%in%c(1,3,5,6), 1,
ifelse(`event-id`==3&
str_count(shift(`detail-description`,type="lead"),"Shooting")
&`player-id-1`==shift(`player-id-3`,type="lead"),0,
ifelse(`event-id`%in%c(3,6,7)|
`event-id`==5&
str_count(shift(`event-description`,type="lead"),"End ")|
`event-id`==1&(
str_count(`detail-description`,"1 of 1")|
str_count(`detail-description`,"2 of 2")|
str_count(`detail-description`,"3 of 3")),1,0)))]
# Create column indicating whether the away team had the possession
pbp[,aposs:=ifelse(`event-id`==6,ifelse(team==home&poss==1,1,0),
ifelse(team==away&poss==1,1,0))]
# Create column indicating whether the home team had the possession
pbp[,hposs:=ifelse(`event-id`==6,ifelse(team==away&poss==1,1,0),
ifelse(team==home&poss==1,1,0))]
# Create column indicating the id of each possession for the game
pbp$stats_id<-as.numeric(pbp$stats_id)
pbp[,possession_id:=1]
pbp <- within(pbp, possession_id <- pmax(cumsum(poss),1))
pbp[,possession_id:=ifelse(poss==1,possession_id,NA)]
pbp$possession_id[nrow(pbp)]<-tail(na.omit(pbp$possession_id),1)
pbp[,possession_id:=na.locf(possession_id,fromLast = T)]
pbp[,possession_id:=possession_id-1]
pbp[,chance_id:=0]
pbp[,chance:=ifelse(shift(`event-id`)%in%c(5,8,9,10,11,12,13,16,17,18),1,0)]
pbp[,chance:=ifelse(`event-id`==10,shift(chance),chance)]
pbp[,chance_id:=cumsum(chance), by=possession_id]
pbp[,possession_id:=paste0(game_code,quarter,"_",possession_id)]
pbp[,chance_id:=paste0(possession_id,"_",chance_id)]
pbp[,chance:=NULL]
#Simplify PBP frame
pbp[,id:=rownames(pbp)]
pbp[,id:=paste0(game_code,quarter,as.numeric(id)-1)]
pbp[,pbp_game_clock:=round(as.numeric(as.character(`time-minutes`))*60+
as.numeric(as.character(`time-seconds`)),0)]
pbp[,player_id2:=ifelse(`global-player-id-2`=="",
`global-player-id-3`,`global-player-id-2`)]
pbp[,poss:=ifelse(hposs==1,"home",ifelse(aposs==1,"away",NA))]
pbp[,poss:=na.locf(poss,fromLast = T)]
pbp[,team:=ifelse(poss=="home",home,away)]
pbp[,opponent:=ifelse(poss=="home",away,home)]
pbp<-pbp[,.(game,game_code,season,date,id,stats_id,quarter,pbp_game_clock,
team,opponent,`position-id`,`global-player-id-1`,player_id2,`event-id`,
`detail-id`,distance,`x-shot-coord`,`y-shot-coord`,fastbreak,
`in-paint`,`second-chance`,`off-turnover`,`visitor-fouls`,
`home-fouls`,possession_id,chance_id,`oncourt-id`,`points-type`)]
setnames(pbp,c("quarter","oncourt-id","position-id","global-player-id-1",
"event-id","detail-id","x-shot-coord","y-shot-coord","in-paint",
"second-chance","off-turnover","visitor-fouls","home-fouls"),
c("period","oncourt_id","text","player_id1","event_id","detail_id",
"shot_x","shot_y","pitp","2nd_pts","pts_off_tov","away_fouls",
"home_fouls"))
events<-fread("C:/Users/brocatoj/Documents/Basketball/Tracking/events.csv")
pbp<-left_join(pbp,events,by="event_id");setDT(pbp)
pbp[,event:=ifelse(event_id==3,ifelse(`points-type`==3,"3PM","2PM"),event)]
pbp[,event:=ifelse(event_id==4,ifelse(`points-type`==3,"3PX","2PX"),event)]
pbp<-pbp[,c(1:13,29,15:27),with=F]
# EXTRACT TRACKING DATA
svu<-data.table()
for(i in c("Q1","Q2","Q3","Q4","Q5","Q6","Q7","Q8","Q9","Q10",
"OT1","OT2","OT3","OT4","OT5")){
if(!file.exists(
paste0("C:/Users/brocatoj/Documents/Basketball/svu/2016/xml/",
"NBA_FINAL_SEQUENCE_OPTICAL$",gameid,"_",i,".XML"))&
!file.exists(paste0("S:/NBA_FINAL_SEQUENCE_OPTICAL$",gameid,"_",i,".XML")))
break
ifelse(location=="N",
q<-paste0("C:/Users/brocatoj/Documents/Basketball/svu/2016/xml/",
"NBA_FINAL_SEQUENCE_OPTICAL$",gameid,"_",i,".XML"),
q<-paste0("S:/NBA_FINAL_SEQUENCE_OPTICAL$",gameid,"_",i,".XML"))
q<-xmlRoot(xmlTreeParse(q,useInternalNodes = T))
q<-as.data.table(do.call(rbind,xpathApply(q,"//moment",xmlAttrs)))
q[,idx:=rownames(q)][,idx:=as.numeric(idx)-1]
svu<-rbind(svu,q)
rm(q)
}
# CLEAN TRACKING DATA
svu[,locations:=ifelse(str_count(locations,";")==9,
paste0("-1,-1,0,0,0;",locations),locations)]
svu[str_count(locations,";")==10]
svu<-svu %>%
separate(locations,c("ball_tm","ball","ball_x","ball_y","ball_z","hp1_tm",
"hp1","hp1_x","hp1_y","hp1_z","hp2_tm","hp2","hp2_x",
"hp2_y","hp2_z","hp3_tm","hp3","hp3_x","hp3_y","hp3_z",
"hp4_tm","hp4","hp4_x","hp4_y","hp4_z","hp5_tm","hp5",
"hp5_x","hp5_y","hp5_z","ap1_tm","ap1","ap1_x","ap1_y",
"ap1_z","ap2_tm","ap2","ap2_x","ap2_y","ap2_z","ap3_tm",
"ap3","ap3_x","ap3_y","ap3_z","ap4_tm","ap4","ap4_x",
"ap4_y","ap4_z","ap5_tm","ap5","ap5_x","ap5_y",
"ap5_z"),sep="\\,|\\;") %>%
select(-ball_tm,-ball,-hp1_tm,-hp1_z,-hp2_tm,-hp2_z,-hp3_tm,-hp3_z,-hp4_tm,
-hp4_z,-hp5_tm,-hp5_z,-ap1_tm,-ap1_z,-ap2_tm,-ap2_z,-ap3_tm,-ap3_z,
-ap4_tm,-ap4_z,-ap5_tm,-ap5_z)
svu<-svu[,c(38,1:37),with=F]
setnames(svu,c("time","game-clock","game-event-id","shot-clock"),
c("utcTime","gameClock","game_event_id","shot_clock"))
# EXTRACT EXTRA INFO
ifelse(location=="N",
info<-paste0("C:/Users/brocatoj/Documents/Basketball/svu/2016/xml/",
"NBA_FINAL_SEQUENCE_PBP_OPTICAL$",gameid, ".XML"),
info<-paste0("S:/NBA_FINAL_SEQUENCE_PBP_OPTICAL$",gameid,".XML"))
info<-xmlTreeParse(info,useInternalNodes = T)
info<-xmlRoot(info)
info<-as.data.table(do.call(rbind,xpathApply(info,"//moment",xmlAttrs)))
setnames(info,c("event_id","gameClock","utcTime","p_id","player_id",
"stats_id","shot_clock"))
# ATTACH EXTRA INFO TO PBP
info2<-info[stats_id!="",.(stats_id,shot_clock)]
pbp<-join(pbp,info2,by="stats_id",match="first");rm(info2)
# ATTACH EXTRA INFO TO TRACKING FRAME
info<-info[,c(1,3:6),with=F]
svu<-join(svu,info,by="utcTime",match="first")
# ATTACH PBP INFO TO TRACKING FRAME
pbp2<-pbp[,.(stats_id,period,event,detail_id)]
pbp2[,play:=ifelse(event=="FOUL"&detail_id==2,1,0)]
pbp2<-pbp2[,.(stats_id,period,play)]
svu<-join(svu,pbp2,by="stats_id",match="first")
svu$period[nrow(svu)]<-
ifelse(is.na(svu$period[nrow(svu)]),
as.numeric(distinct(svu,period)[nrow(distinct(svu,period)),]),
svu$period[nrow(svu)])
setDT(svu)
svu[,period:=na.locf(period,fromLast=T)]
svu<-svu[,c(3,1:2,43,4,6:40,42,44),with=F]
svu <- svu[, lapply(.SD, as.numeric)]
svu[,play:=ifelse(is.na(play),0,play)]
svu[,event_id:=ifelse(play==1,29,event_id)]
svu<-svu[,1:41,with=F]
# fwrite(svu,paste0("J:/svu/2016/svu/",game_name,"_svu.csv"))
# fwrite(pbp,paste0("J:/svu/2016/pbp/",game_name,"_pbp.csv")) | /extract_data.R | no_license | cbirdman/tracking | R | false | false | 9,004 | r | library(XML)
library(plyr)
library(dplyr)
library(stringr)
library(tidyr)
library(zoo)
library(data.table)
source("functions.R")
# gameid<-readline('Enter Game Code: (e.g., 2016061909) ')
# location<-readline('At Work? (Y/N) ')
gameid<-"2016042306"
location<-"N"
# Rename game to make more easily understandable
games<-fread("C:/Users/brocatoj/Documents/Basketball/Tracking/games.csv")
games<-games[stats_id==gameid,.(stats_id,id,game)]
game_name<-games$game[1]
game_code<-games$id[1]
# EXTRACT PBP
ifelse(location=="N",
pbp<-paste0("C:/Users/brocatoj/Documents/Basketball/svu/2016/xml/",
"NBA_FINALPBP_EXP$",gameid, ".XML"),
pbp<-paste0("S:/NBA_FINALPBP_EXP$",gameid,".XML"))
pbp<-xmlTreeParse(pbp,useInternalNodes = T)
pbp<-xmlRoot(pbp)
pbp<-as.data.table(do.call(rbind,xpathApply(pbp,"//play",xmlAttrs)))
# CLEAN PBP
# Add team codes
teams<-fread("C:/Users/brocatoj/Documents/Basketball/Tracking/teams.csv")
teams<-teams[,.(abbrev,stats_abbrev)]
setnames(teams,c("team","team-alias-1"))
setkey(teams,`team-alias-1`)
# Create columns for game, date, home team, and away team
pbp[,game:=game_name]
pbp[,game_code:=game_code]
pbp[,date:=as.Date(substr(game_name,1,10))]
pbp[,home:=substr(game_name,16,18)]
pbp[,away:=substr(game_name,12,14)]
pbp[,season:=substr(game_name,1,4)]
setnames(pbp,"id","stats_id")
# Create team abbrev column for each event, remove starting lineup
pbp<-left_join(pbp,teams,by="team-alias-1");setDT(pbp)
pbp<-pbp[`textual-description`!="Starting Lineup"]
# Create column indicating whether a possession ended
pbp[,poss:=ifelse(is.na(shift(stats_id,type="lead"))&`event-id`%in%c(1,3,5,6), 1,
ifelse(`event-id`==3&
str_count(shift(`detail-description`,type="lead"),"Shooting")
&`player-id-1`==shift(`player-id-3`,type="lead"),0,
ifelse(`event-id`%in%c(3,6,7)|
`event-id`==5&
str_count(shift(`event-description`,type="lead"),"End ")|
`event-id`==1&(
str_count(`detail-description`,"1 of 1")|
str_count(`detail-description`,"2 of 2")|
str_count(`detail-description`,"3 of 3")),1,0)))]
# Create column indicating whether the away team had the possession
pbp[,aposs:=ifelse(`event-id`==6,ifelse(team==home&poss==1,1,0),
ifelse(team==away&poss==1,1,0))]
# Create column indicating whether the home team had the possession
pbp[,hposs:=ifelse(`event-id`==6,ifelse(team==away&poss==1,1,0),
ifelse(team==home&poss==1,1,0))]
# Create column indicating the id of each possession for the game
pbp$stats_id<-as.numeric(pbp$stats_id)
pbp[,possession_id:=1]
pbp <- within(pbp, possession_id <- pmax(cumsum(poss),1))
pbp[,possession_id:=ifelse(poss==1,possession_id,NA)]
pbp$possession_id[nrow(pbp)]<-tail(na.omit(pbp$possession_id),1)
pbp[,possession_id:=na.locf(possession_id,fromLast = T)]
pbp[,possession_id:=possession_id-1]
pbp[,chance_id:=0]
pbp[,chance:=ifelse(shift(`event-id`)%in%c(5,8,9,10,11,12,13,16,17,18),1,0)]
pbp[,chance:=ifelse(`event-id`==10,shift(chance),chance)]
pbp[,chance_id:=cumsum(chance), by=possession_id]
pbp[,possession_id:=paste0(game_code,quarter,"_",possession_id)]
pbp[,chance_id:=paste0(possession_id,"_",chance_id)]
pbp[,chance:=NULL]
#Simplify PBP frame
pbp[,id:=rownames(pbp)]
pbp[,id:=paste0(game_code,quarter,as.numeric(id)-1)]
pbp[,pbp_game_clock:=round(as.numeric(as.character(`time-minutes`))*60+
as.numeric(as.character(`time-seconds`)),0)]
pbp[,player_id2:=ifelse(`global-player-id-2`=="",
`global-player-id-3`,`global-player-id-2`)]
pbp[,poss:=ifelse(hposs==1,"home",ifelse(aposs==1,"away",NA))]
pbp[,poss:=na.locf(poss,fromLast = T)]
pbp[,team:=ifelse(poss=="home",home,away)]
pbp[,opponent:=ifelse(poss=="home",away,home)]
pbp<-pbp[,.(game,game_code,season,date,id,stats_id,quarter,pbp_game_clock,
team,opponent,`position-id`,`global-player-id-1`,player_id2,`event-id`,
`detail-id`,distance,`x-shot-coord`,`y-shot-coord`,fastbreak,
`in-paint`,`second-chance`,`off-turnover`,`visitor-fouls`,
`home-fouls`,possession_id,chance_id,`oncourt-id`,`points-type`)]
setnames(pbp,c("quarter","oncourt-id","position-id","global-player-id-1",
"event-id","detail-id","x-shot-coord","y-shot-coord","in-paint",
"second-chance","off-turnover","visitor-fouls","home-fouls"),
c("period","oncourt_id","text","player_id1","event_id","detail_id",
"shot_x","shot_y","pitp","2nd_pts","pts_off_tov","away_fouls",
"home_fouls"))
events<-fread("C:/Users/brocatoj/Documents/Basketball/Tracking/events.csv")
pbp<-left_join(pbp,events,by="event_id");setDT(pbp)
pbp[,event:=ifelse(event_id==3,ifelse(`points-type`==3,"3PM","2PM"),event)]
pbp[,event:=ifelse(event_id==4,ifelse(`points-type`==3,"3PX","2PX"),event)]
pbp<-pbp[,c(1:13,29,15:27),with=F]
# EXTRACT TRACKING DATA
svu<-data.table()
for(i in c("Q1","Q2","Q3","Q4","Q5","Q6","Q7","Q8","Q9","Q10",
"OT1","OT2","OT3","OT4","OT5")){
if(!file.exists(
paste0("C:/Users/brocatoj/Documents/Basketball/svu/2016/xml/",
"NBA_FINAL_SEQUENCE_OPTICAL$",gameid,"_",i,".XML"))&
!file.exists(paste0("S:/NBA_FINAL_SEQUENCE_OPTICAL$",gameid,"_",i,".XML")))
break
ifelse(location=="N",
q<-paste0("C:/Users/brocatoj/Documents/Basketball/svu/2016/xml/",
"NBA_FINAL_SEQUENCE_OPTICAL$",gameid,"_",i,".XML"),
q<-paste0("S:/NBA_FINAL_SEQUENCE_OPTICAL$",gameid,"_",i,".XML"))
q<-xmlRoot(xmlTreeParse(q,useInternalNodes = T))
q<-as.data.table(do.call(rbind,xpathApply(q,"//moment",xmlAttrs)))
q[,idx:=rownames(q)][,idx:=as.numeric(idx)-1]
svu<-rbind(svu,q)
rm(q)
}
# CLEAN TRACKING DATA
svu[,locations:=ifelse(str_count(locations,";")==9,
paste0("-1,-1,0,0,0;",locations),locations)]
svu[str_count(locations,";")==10]
svu<-svu %>%
separate(locations,c("ball_tm","ball","ball_x","ball_y","ball_z","hp1_tm",
"hp1","hp1_x","hp1_y","hp1_z","hp2_tm","hp2","hp2_x",
"hp2_y","hp2_z","hp3_tm","hp3","hp3_x","hp3_y","hp3_z",
"hp4_tm","hp4","hp4_x","hp4_y","hp4_z","hp5_tm","hp5",
"hp5_x","hp5_y","hp5_z","ap1_tm","ap1","ap1_x","ap1_y",
"ap1_z","ap2_tm","ap2","ap2_x","ap2_y","ap2_z","ap3_tm",
"ap3","ap3_x","ap3_y","ap3_z","ap4_tm","ap4","ap4_x",
"ap4_y","ap4_z","ap5_tm","ap5","ap5_x","ap5_y",
"ap5_z"),sep="\\,|\\;") %>%
select(-ball_tm,-ball,-hp1_tm,-hp1_z,-hp2_tm,-hp2_z,-hp3_tm,-hp3_z,-hp4_tm,
-hp4_z,-hp5_tm,-hp5_z,-ap1_tm,-ap1_z,-ap2_tm,-ap2_z,-ap3_tm,-ap3_z,
-ap4_tm,-ap4_z,-ap5_tm,-ap5_z)
svu<-svu[,c(38,1:37),with=F]
setnames(svu,c("time","game-clock","game-event-id","shot-clock"),
c("utcTime","gameClock","game_event_id","shot_clock"))
# EXTRACT EXTRA INFO
ifelse(location=="N",
info<-paste0("C:/Users/brocatoj/Documents/Basketball/svu/2016/xml/",
"NBA_FINAL_SEQUENCE_PBP_OPTICAL$",gameid, ".XML"),
info<-paste0("S:/NBA_FINAL_SEQUENCE_PBP_OPTICAL$",gameid,".XML"))
info<-xmlTreeParse(info,useInternalNodes = T)
info<-xmlRoot(info)
info<-as.data.table(do.call(rbind,xpathApply(info,"//moment",xmlAttrs)))
setnames(info,c("event_id","gameClock","utcTime","p_id","player_id",
"stats_id","shot_clock"))
# ATTACH EXTRA INFO TO PBP
info2<-info[stats_id!="",.(stats_id,shot_clock)]
pbp<-join(pbp,info2,by="stats_id",match="first");rm(info2)
# ATTACH EXTRA INFO TO TRACKING FRAME
info<-info[,c(1,3:6),with=F]
svu<-join(svu,info,by="utcTime",match="first")
# ATTACH PBP INFO TO TRACKING FRAME
pbp2<-pbp[,.(stats_id,period,event,detail_id)]
pbp2[,play:=ifelse(event=="FOUL"&detail_id==2,1,0)]
pbp2<-pbp2[,.(stats_id,period,play)]
svu<-join(svu,pbp2,by="stats_id",match="first")
svu$period[nrow(svu)]<-
ifelse(is.na(svu$period[nrow(svu)]),
as.numeric(distinct(svu,period)[nrow(distinct(svu,period)),]),
svu$period[nrow(svu)])
setDT(svu)
svu[,period:=na.locf(period,fromLast=T)]
svu<-svu[,c(3,1:2,43,4,6:40,42,44),with=F]
svu <- svu[, lapply(.SD, as.numeric)]
svu[,play:=ifelse(is.na(play),0,play)]
svu[,event_id:=ifelse(play==1,29,event_id)]
svu<-svu[,1:41,with=F]
# fwrite(svu,paste0("J:/svu/2016/svu/",game_name,"_svu.csv"))
# fwrite(pbp,paste0("J:/svu/2016/pbp/",game_name,"_pbp.csv")) |
library(rvest)
library(readr)
orgs <- read_tsv("organisms.tsv")
res <- lapply(orgs$species, getdetails)
details <- do.call(rbind, res)
details
stopifnot(orgs$species == details$org)
orgs <- cbind(orgs, details[,-1])
write_tsv(orgs, "organisms_genes.tsv")
getdetails <- function(org) {
readit <- function(org, prefix) {
read_html(paste0("http://",prefix,".ensembl.org/",
org, "/Info/Annotation"))
}
page <- tryCatch(readit(org, "www"),
error=function(e) readit(org, "plants"))
tabs <- page %>% html_nodes("table") %>% html_table()
cutafterspace <- function(x) sub("(.*?) .*","\\1",x)
getit <- function(tab,x) {
text <- tab[grep(x,tab[,1])[1],2]
as.numeric(gsub(",","",cutafterspace(text)))
}
offset <- if (colnames(tabs[[1]])[2] == "Number of gene models") 1 else 0
bp <- getit(tabs[[1+offset]], "Golden Path Length")
coding <- getit(tabs[[2+offset]], "Coding genes")
noncoding <- getit(tabs[[2+offset]], "Non coding genes")
pseudo <- getit(tabs[[2+offset]], "Pseudogenes")
tx <- getit(tabs[[2+offset]], "Gene transcripts")
data.frame(org, bp, coding, noncoding, pseudo, tx)
}
| /eda/scrape_ensembl.R | permissive | Frikuto/compbio_src | R | false | false | 1,157 | r | library(rvest)
library(readr)
orgs <- read_tsv("organisms.tsv")
res <- lapply(orgs$species, getdetails)
details <- do.call(rbind, res)
details
stopifnot(orgs$species == details$org)
orgs <- cbind(orgs, details[,-1])
write_tsv(orgs, "organisms_genes.tsv")
getdetails <- function(org) {
readit <- function(org, prefix) {
read_html(paste0("http://",prefix,".ensembl.org/",
org, "/Info/Annotation"))
}
page <- tryCatch(readit(org, "www"),
error=function(e) readit(org, "plants"))
tabs <- page %>% html_nodes("table") %>% html_table()
cutafterspace <- function(x) sub("(.*?) .*","\\1",x)
getit <- function(tab,x) {
text <- tab[grep(x,tab[,1])[1],2]
as.numeric(gsub(",","",cutafterspace(text)))
}
offset <- if (colnames(tabs[[1]])[2] == "Number of gene models") 1 else 0
bp <- getit(tabs[[1+offset]], "Golden Path Length")
coding <- getit(tabs[[2+offset]], "Coding genes")
noncoding <- getit(tabs[[2+offset]], "Non coding genes")
pseudo <- getit(tabs[[2+offset]], "Pseudogenes")
tx <- getit(tabs[[2+offset]], "Gene transcripts")
data.frame(org, bp, coding, noncoding, pseudo, tx)
}
|
neutral_community_model <-function(nspecies=8, gridsize=20, time=100, pdf=FALSE, plotpop=TRUE, sleep=0.5, density=FALSE, liveplot=TRUE){
res <- list()
# Duncan Golicher’s weblog
# http://duncanjg.wordpress.com/2008/03/07/a-simple-illustration-of-an-ecological-lottery-model-in-r/
#
# A simple illustration of an ecological lottery model in R
#
# The neutral community model of Stephen Hubbell.
X <- matrix(0, nspecies, time)
mypalette <- rainbow(nspecies)
mat <- sample(1:nspecies, gridsize * gridsize, replace=T)
res[[1]] <- mat
X[,1] <- table(factor(mat, levels=1:nspecies))
if(liveplot) image(matrix(mat, gridsize, gridsize), col=mypalette[sort(unique(mat))])
if(pdf) dir.create("temp")
for (i in 2:time){
a <- X[,i-1]/sum(X[,i-1])
if(!density){
mat <- sample(1:nspecies, gridsize * gridsize, prob=a, replace=T)
} else {
mat <- sample(1:nspecies, gridsize * gridsize, prob=1-a, replace=T)
}
X[,i] <- table(factor(mat, levels=1:nspecies))
if(liveplot) image(matrix(mat, gridsize,gridsize), col=mypalette[sort(unique(mat))], main=i)
Sys.sleep(sleep)
res[[i]] <- mat
if(pdf) {
pdf(paste0("temp/mat_", i, ".pdf"))
image(matrix(mat, gridsize,gridsize), col=sort(unique(mat)))
dev.off()
}
gc()
}
if(plotpop==TRUE) matplot(t(X), type="l", lwd=2, col=mypalette)
list(world=res, pop=t(X))
}
| /R/neutral_community_model.R | no_license | KIT-IfGG/pets | R | false | false | 1,411 | r |
neutral_community_model <-function(nspecies=8, gridsize=20, time=100, pdf=FALSE, plotpop=TRUE, sleep=0.5, density=FALSE, liveplot=TRUE){
res <- list()
# Duncan Golicher’s weblog
# http://duncanjg.wordpress.com/2008/03/07/a-simple-illustration-of-an-ecological-lottery-model-in-r/
#
# A simple illustration of an ecological lottery model in R
#
# The neutral community model of Stephen Hubbell.
X <- matrix(0, nspecies, time)
mypalette <- rainbow(nspecies)
mat <- sample(1:nspecies, gridsize * gridsize, replace=T)
res[[1]] <- mat
X[,1] <- table(factor(mat, levels=1:nspecies))
if(liveplot) image(matrix(mat, gridsize, gridsize), col=mypalette[sort(unique(mat))])
if(pdf) dir.create("temp")
for (i in 2:time){
a <- X[,i-1]/sum(X[,i-1])
if(!density){
mat <- sample(1:nspecies, gridsize * gridsize, prob=a, replace=T)
} else {
mat <- sample(1:nspecies, gridsize * gridsize, prob=1-a, replace=T)
}
X[,i] <- table(factor(mat, levels=1:nspecies))
if(liveplot) image(matrix(mat, gridsize,gridsize), col=mypalette[sort(unique(mat))], main=i)
Sys.sleep(sleep)
res[[i]] <- mat
if(pdf) {
pdf(paste0("temp/mat_", i, ".pdf"))
image(matrix(mat, gridsize,gridsize), col=sort(unique(mat)))
dev.off()
}
gc()
}
if(plotpop==TRUE) matplot(t(X), type="l", lwd=2, col=mypalette)
list(world=res, pop=t(X))
}
|
#' Preferred Bean Varieties in Nicaragua
#'
#' This is a subset of data from trials of bean varieties
#' (\emph{Phaseolus vulgaris} L.)
#' in Nicaragua over five growing seasons. Farmers were asked to try three
#' varieties of bean from a total of ten varieties and to rank them in order of
#' preference. In addition, for each variety the farmers were asked to compare
#' each trial variety to the local variety and state whether they considered it
#' to be better or worse.
#'
#' There are three crop seasons in Central America:
#' \describe{
#' \item{Primera}{May - August.}
#' \item{Postrera}{September - October.}
#' \item{Apante}{November - January.}
#' }
#' Beans can be planted near the beginning of each season, though are most
#' commonly planted in the Postrera or Apante seasons.
#'
#' @format A data frame with 842 records and 14 variables:
#' \describe{
#' \item{\code{variety_a}}{The name of variety A in the comparison.}
#' \item{\code{variety_b}}{The name of variety B in the comparison.}
#' \item{\code{variety_c}}{The name of variety C in the comparison.}
#' \item{\code{best}}{The variety the farmer ranked in first place ("A",
#' "B" or "C").}
#' \item{\code{worst}}{The variety the farmer ranked in last place ("A",
#' "B" or "C").}
#' \item{\code{var_a}}{How the farmer ranked variety A compared to the local
#' variety ("Worse" or "Better").}
#' \item{\code{var_b}}{How the farmer ranked variety B compared to the local
#' variety ("Worse" or "Better").}
#' \item{\code{var_c}}{How the farmer ranked variety C compared to the local
#' variety ("Worse" or "Better").}
#' \item{\code{season}}{A factor specifying the growing season ("Po - 15",
#' "Ap - 15", "Pr - 16", "Po - 16", "Ap - 16".}
#' \item{\code{year}}{The year of planting.}
#' \item{\code{maxTN}}{The maximum temperature at night during the
#' vegetative cycle (degrees Celsius).}
#' \item{\code{lon}}{The geographic coordinate longitude (X axis) for where
#' the plot was established.}
#' \item{\code{lat}}{The geographic coordinate latitude (Y axis) for where
#' the plot was established.}
#' \item{\code{planting_date}}{A Date, specifying the start date of
#' planting the trial.}
#' }
#' @source van Etten, J. et al. (2019) \emph{PNAS}, \bold{116} (10), 4194--4199,
#' \doi{10.1073/pnas.1813720116}.
#' @examples
#'
#' # Consider the best and worst rankings. These give the variety the
#' # farmer thought was best or worst, coded as A, B or C for the
#' # first, second or third variety assigned to the farmer
#' # respectively.
#' data(beans)
#' head(beans[c("best", "worst")], 2)
#'
#' # Fill in the missing item
#' beans$middle <- complete(beans[c("best", "worst")],
#' items = c("A", "B", "C"))
#' head(beans[c("best", "middle", "worst")], 2)
#'
#' # This gives an ordering of the three varieties the farmer was
#' # given. The names of these varieties are stored in separate
#' # columns
#' varieties <- beans[c("variety_a", "variety_b", "variety_c")]
#' head(varieties, 2)
#'
#' # Use these names to decode the orderings of order 3
#' order3 <- decode(beans[c("best", "middle", "worst")],
#' items = beans[c("variety_a", "variety_b", "variety_c")],
#' code = c("A", "B", "C"))
#'
#' # Now consider the paired comparisons agains the local variety
#' head(beans[c("var_a", "var_b", "var_c")], 2)
#'
#' # Convert these results to a vector and get the corresponding trial variety
#' outcome <- unlist(beans[c("var_a", "var_b", "var_c")])
#' trial_variety <- unlist(beans[c("variety_a", "variety_b", "variety_c")])
#'
#' # Create a data frame of the implied orderings of order 2
#' order2 <- data.frame(Winner = ifelse(outcome == "Worse",
#' "Local", trial_variety),
#' Loser = ifelse(outcome == "Worse",
#' trial_variety, "Local"),
#' stringsAsFactors = FALSE, row.names = NULL)
#' head(order2, 2)
#'
#' # Finally combine the rankings of order 2 and order 3
#' R <- rbind(as.rankings(order3, input = "orderings"),
#' as.rankings(order2, input = "orderings"))
#' head(R)
#' tail(R)
"beans"
| /R/beans.R | no_license | Felix660/PlackettLuce | R | false | false | 4,274 | r | #' Preferred Bean Varieties in Nicaragua
#'
#' This is a subset of data from trials of bean varieties
#' (\emph{Phaseolus vulgaris} L.)
#' in Nicaragua over five growing seasons. Farmers were asked to try three
#' varieties of bean from a total of ten varieties and to rank them in order of
#' preference. In addition, for each variety the farmers were asked to compare
#' each trial variety to the local variety and state whether they considered it
#' to be better or worse.
#'
#' There are three crop seasons in Central America:
#' \describe{
#' \item{Primera}{May - August.}
#' \item{Postrera}{September - October.}
#' \item{Apante}{November - January.}
#' }
#' Beans can be planted near the beginning of each season, though are most
#' commonly planted in the Postrera or Apante seasons.
#'
#' @format A data frame with 842 records and 14 variables:
#' \describe{
#' \item{\code{variety_a}}{The name of variety A in the comparison.}
#' \item{\code{variety_b}}{The name of variety B in the comparison.}
#' \item{\code{variety_c}}{The name of variety C in the comparison.}
#' \item{\code{best}}{The variety the farmer ranked in first place ("A",
#' "B" or "C").}
#' \item{\code{worst}}{The variety the farmer ranked in last place ("A",
#' "B" or "C").}
#' \item{\code{var_a}}{How the farmer ranked variety A compared to the local
#' variety ("Worse" or "Better").}
#' \item{\code{var_b}}{How the farmer ranked variety B compared to the local
#' variety ("Worse" or "Better").}
#' \item{\code{var_c}}{How the farmer ranked variety C compared to the local
#' variety ("Worse" or "Better").}
#' \item{\code{season}}{A factor specifying the growing season ("Po - 15",
#' "Ap - 15", "Pr - 16", "Po - 16", "Ap - 16".}
#' \item{\code{year}}{The year of planting.}
#' \item{\code{maxTN}}{The maximum temperature at night during the
#' vegetative cycle (degrees Celsius).}
#' \item{\code{lon}}{The geographic coordinate longitude (X axis) for where
#' the plot was established.}
#' \item{\code{lat}}{The geographic coordinate latitude (Y axis) for where
#' the plot was established.}
#' \item{\code{planting_date}}{A Date, specifying the start date of
#' planting the trial.}
#' }
#' @source van Etten, J. et al. (2019) \emph{PNAS}, \bold{116} (10), 4194--4199,
#' \doi{10.1073/pnas.1813720116}.
#' @examples
#'
#' # Consider the best and worst rankings. These give the variety the
#' # farmer thought was best or worst, coded as A, B or C for the
#' # first, second or third variety assigned to the farmer
#' # respectively.
#' data(beans)
#' head(beans[c("best", "worst")], 2)
#'
#' # Fill in the missing item
#' beans$middle <- complete(beans[c("best", "worst")],
#' items = c("A", "B", "C"))
#' head(beans[c("best", "middle", "worst")], 2)
#'
#' # This gives an ordering of the three varieties the farmer was
#' # given. The names of these varieties are stored in separate
#' # columns
#' varieties <- beans[c("variety_a", "variety_b", "variety_c")]
#' head(varieties, 2)
#'
#' # Use these names to decode the orderings of order 3
#' order3 <- decode(beans[c("best", "middle", "worst")],
#' items = beans[c("variety_a", "variety_b", "variety_c")],
#' code = c("A", "B", "C"))
#'
#' # Now consider the paired comparisons agains the local variety
#' head(beans[c("var_a", "var_b", "var_c")], 2)
#'
#' # Convert these results to a vector and get the corresponding trial variety
#' outcome <- unlist(beans[c("var_a", "var_b", "var_c")])
#' trial_variety <- unlist(beans[c("variety_a", "variety_b", "variety_c")])
#'
#' # Create a data frame of the implied orderings of order 2
#' order2 <- data.frame(Winner = ifelse(outcome == "Worse",
#' "Local", trial_variety),
#' Loser = ifelse(outcome == "Worse",
#' trial_variety, "Local"),
#' stringsAsFactors = FALSE, row.names = NULL)
#' head(order2, 2)
#'
#' # Finally combine the rankings of order 2 and order 3
#' R <- rbind(as.rankings(order3, input = "orderings"),
#' as.rankings(order2, input = "orderings"))
#' head(R)
#' tail(R)
"beans"
|
prepare_met <- function(met_in,initdata,mo_start_end,co2_in,nplots,nmonths,months,years){
co2elev_year = 1996
co2elev_month = 8
plots = unique(initdata$PlotID)
tmin = array(-99,dim=c(nplots,nmonths))
tmax = array(-99,dim=c(nplots,nmonths))
precip = array(-99,dim=c(nplots,nmonths))
ra = array(-99,dim=c(nplots,nmonths))
frost = array(-99,dim=c(nplots,nmonths))
co2 = array(-99,dim=c(nplots,nmonths))
for(plotnum in 1:nplots){
tmp = met_in[which(met_in$SiteID == initdata$SiteID[plotnum]),]
tmp2 = initdata[which(initdata$PlotID == initdata$PlotID[plotnum]),]
for(mo in mo_start_end[plotnum,1]:mo_start_end[plotnum,2]){
curr_year_index = which(tmp$YEAR == years[mo])
co2[plotnum,mo]= co2_in$CO2_Concentration_RCP85[which(co2_in$Year == years[mo])]
if(months[mo] == 1){
tmin[plotnum,mo] = tmp$TMIN1[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX1[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP1[curr_year_index]
ra[plotnum,mo] = tmp$RA1[curr_year_index]
frost[plotnum,mo] = tmp$FROST1[curr_year_index]
}
if(months[mo] == 2){
tmin[plotnum,mo] = tmp$TMIN2[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX2[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP2[curr_year_index]
ra[plotnum,mo] = tmp$RA2[curr_year_index]
frost[plotnum,mo] = tmp$FROST2[curr_year_index]
}
if(months[mo] == 3){
tmin[plotnum,mo] = tmp$TMIN3[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX3[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP3[curr_year_index]
ra[plotnum,mo] = tmp$RA3[curr_year_index]
frost[plotnum,mo] = tmp$FROST3[curr_year_index]
}
if(months[mo] == 4){
tmin[plotnum,mo] = tmp$TMIN4[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX4[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP4[curr_year_index]
ra[plotnum,mo] = tmp$RA4[curr_year_index]
frost[plotnum,mo] = tmp$FROST4[curr_year_index]
}
if(months[mo] == 5){
tmin[plotnum,mo] = tmp$TMIN5[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX5[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP5[curr_year_index]
ra[plotnum,mo] = tmp$RA5[curr_year_index]
frost[plotnum,mo] = tmp$FROST5[curr_year_index]
}
if(months[mo] == 6){
tmin[plotnum,mo] = tmp$TMIN6[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX6[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP6[curr_year_index]
ra[plotnum,mo] = tmp$RA6[curr_year_index]
frost[plotnum,mo] = tmp$FROST6[curr_year_index]
}
if(months[mo] == 7){
tmin[plotnum,mo] = tmp$TMIN7[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX7[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP7[curr_year_index]
ra[plotnum,mo] = tmp$RA7[curr_year_index]
frost[plotnum,mo] = tmp$FROST7[curr_year_index]
}
if(months[mo] == 8){
tmin[plotnum,mo] = tmp$TMIN8[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX8[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP8[curr_year_index]
ra[plotnum,mo] = tmp$RA8[curr_year_index]
frost[plotnum,mo] = tmp$FROST8[curr_year_index]
}
if(months[mo] == 9){
tmin[plotnum,mo] = tmp$TMIN9[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX9[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP9[curr_year_index]
ra[plotnum,mo] = tmp$RA9[curr_year_index]
frost[plotnum,mo] = tmp$FROST9[curr_year_index]
}
if(months[mo] == 10){
tmin[plotnum,mo] = tmp$TMIN10[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX10[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP10[curr_year_index]
ra[plotnum,mo] = tmp$RA10[curr_year_index]
frost[plotnum,mo] = tmp$FROST10[curr_year_index]
}
if(months[mo] == 11){
tmin[plotnum,mo] = tmp$TMIN11[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX11[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP11[curr_year_index]
ra[plotnum,mo] = tmp$RA11[curr_year_index]
frost[plotnum,mo] = tmp$FROST11[curr_year_index]
}
if(months[mo] == 12){
tmin[plotnum,mo] = tmp$TMIN12[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX12[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP12[curr_year_index]
ra[plotnum,mo] = tmp$RA12[curr_year_index]
frost[plotnum,mo] = tmp$FROST12[curr_year_index]
}
if(years[mo] >= tmp2$DroughtStart){
precip[plotnum,mo] = precip[plotnum,mo]#*DroughtLevel
}
if(tmp2$CO2flag == 1 & years[mo] == co2elev_year & months[mo] >= co2elev_month){
co2[plotnum,mo]=tmp2$CO2elev
}else if(tmp2$CO2flag == 1 & years[mo] > co2elev_year){
co2[plotnum,mo]=tmp2$CO2elev
}else if(tmp2$CO2flag == 1 & tmp2$PlotID > 40006){
co2[plotnum,mo]=tmp2$CO2elev
}
}
}
return(list(tmin=tmin,tmax=tmax,precip=precip,ra=ra,frost=frost,co2=co2))
}
| /scripts/prepare_met.R | no_license | DataFusion18/DAPPER-1 | R | false | false | 5,056 | r | prepare_met <- function(met_in,initdata,mo_start_end,co2_in,nplots,nmonths,months,years){
co2elev_year = 1996
co2elev_month = 8
plots = unique(initdata$PlotID)
tmin = array(-99,dim=c(nplots,nmonths))
tmax = array(-99,dim=c(nplots,nmonths))
precip = array(-99,dim=c(nplots,nmonths))
ra = array(-99,dim=c(nplots,nmonths))
frost = array(-99,dim=c(nplots,nmonths))
co2 = array(-99,dim=c(nplots,nmonths))
for(plotnum in 1:nplots){
tmp = met_in[which(met_in$SiteID == initdata$SiteID[plotnum]),]
tmp2 = initdata[which(initdata$PlotID == initdata$PlotID[plotnum]),]
for(mo in mo_start_end[plotnum,1]:mo_start_end[plotnum,2]){
curr_year_index = which(tmp$YEAR == years[mo])
co2[plotnum,mo]= co2_in$CO2_Concentration_RCP85[which(co2_in$Year == years[mo])]
if(months[mo] == 1){
tmin[plotnum,mo] = tmp$TMIN1[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX1[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP1[curr_year_index]
ra[plotnum,mo] = tmp$RA1[curr_year_index]
frost[plotnum,mo] = tmp$FROST1[curr_year_index]
}
if(months[mo] == 2){
tmin[plotnum,mo] = tmp$TMIN2[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX2[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP2[curr_year_index]
ra[plotnum,mo] = tmp$RA2[curr_year_index]
frost[plotnum,mo] = tmp$FROST2[curr_year_index]
}
if(months[mo] == 3){
tmin[plotnum,mo] = tmp$TMIN3[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX3[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP3[curr_year_index]
ra[plotnum,mo] = tmp$RA3[curr_year_index]
frost[plotnum,mo] = tmp$FROST3[curr_year_index]
}
if(months[mo] == 4){
tmin[plotnum,mo] = tmp$TMIN4[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX4[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP4[curr_year_index]
ra[plotnum,mo] = tmp$RA4[curr_year_index]
frost[plotnum,mo] = tmp$FROST4[curr_year_index]
}
if(months[mo] == 5){
tmin[plotnum,mo] = tmp$TMIN5[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX5[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP5[curr_year_index]
ra[plotnum,mo] = tmp$RA5[curr_year_index]
frost[plotnum,mo] = tmp$FROST5[curr_year_index]
}
if(months[mo] == 6){
tmin[plotnum,mo] = tmp$TMIN6[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX6[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP6[curr_year_index]
ra[plotnum,mo] = tmp$RA6[curr_year_index]
frost[plotnum,mo] = tmp$FROST6[curr_year_index]
}
if(months[mo] == 7){
tmin[plotnum,mo] = tmp$TMIN7[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX7[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP7[curr_year_index]
ra[plotnum,mo] = tmp$RA7[curr_year_index]
frost[plotnum,mo] = tmp$FROST7[curr_year_index]
}
if(months[mo] == 8){
tmin[plotnum,mo] = tmp$TMIN8[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX8[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP8[curr_year_index]
ra[plotnum,mo] = tmp$RA8[curr_year_index]
frost[plotnum,mo] = tmp$FROST8[curr_year_index]
}
if(months[mo] == 9){
tmin[plotnum,mo] = tmp$TMIN9[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX9[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP9[curr_year_index]
ra[plotnum,mo] = tmp$RA9[curr_year_index]
frost[plotnum,mo] = tmp$FROST9[curr_year_index]
}
if(months[mo] == 10){
tmin[plotnum,mo] = tmp$TMIN10[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX10[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP10[curr_year_index]
ra[plotnum,mo] = tmp$RA10[curr_year_index]
frost[plotnum,mo] = tmp$FROST10[curr_year_index]
}
if(months[mo] == 11){
tmin[plotnum,mo] = tmp$TMIN11[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX11[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP11[curr_year_index]
ra[plotnum,mo] = tmp$RA11[curr_year_index]
frost[plotnum,mo] = tmp$FROST11[curr_year_index]
}
if(months[mo] == 12){
tmin[plotnum,mo] = tmp$TMIN12[curr_year_index]
tmax[plotnum,mo] = tmp$TMAX12[curr_year_index]
precip[plotnum,mo] = tmp$PRECIP12[curr_year_index]
ra[plotnum,mo] = tmp$RA12[curr_year_index]
frost[plotnum,mo] = tmp$FROST12[curr_year_index]
}
if(years[mo] >= tmp2$DroughtStart){
precip[plotnum,mo] = precip[plotnum,mo]#*DroughtLevel
}
if(tmp2$CO2flag == 1 & years[mo] == co2elev_year & months[mo] >= co2elev_month){
co2[plotnum,mo]=tmp2$CO2elev
}else if(tmp2$CO2flag == 1 & years[mo] > co2elev_year){
co2[plotnum,mo]=tmp2$CO2elev
}else if(tmp2$CO2flag == 1 & tmp2$PlotID > 40006){
co2[plotnum,mo]=tmp2$CO2elev
}
}
}
return(list(tmin=tmin,tmax=tmax,precip=precip,ra=ra,frost=frost,co2=co2))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tdar_get_ontology_labels.R
\name{tdar_get_ontology_labels}
\alias{tdar_get_ontology_labels}
\title{Title}
\usage{
tdar_get_ontology_labels(x)
}
\arguments{
\item{x}{}
}
\description{
Title
}
| /man/tdar_get_ontology_labels.Rd | no_license | bocinsky/tdar | R | false | true | 269 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tdar_get_ontology_labels.R
\name{tdar_get_ontology_labels}
\alias{tdar_get_ontology_labels}
\title{Title}
\usage{
tdar_get_ontology_labels(x)
}
\arguments{
\item{x}{}
}
\description{
Title
}
|
context("test-tempnetwork-tempflow-basics")
PATH = system.file("inst","testdata","tempflow", package = "tempnetwork")
PATH_GRAPH = system.file("inst","testdata","tempgraph", package = "tempnetwork")
TF1 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_directed_acyclic_small_1.gml"), format = "graphml"))
TF2 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_weighted_directed_acyclic_small_1.gml"), format = "graphml"))
# TF3 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_weighted_forrest_small_1.gml"), format = "graphml"))
# TF4 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_weighted_tree_small_1.gml"), format = "graphml"))
# TF5 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_lattice_small_1.gml"), format = "graphml"), setDefaultValues = TRUE)
#
#
LTF1 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_directed_acyclic_loop_small_1.gml"), format = "graphml"))
LTF2 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_weighted_directed_acyclic_loop_small_1.gml"), format = "graphml"))
# LTF3 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_weighted_forrest_loop_small_1.gml"), format = "graphml"))
# LTF4 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_weighted_tree_loop_small_1.gml"), format = "graphml"))
# LTF5 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_lattice_loop_small_1.gml"), format = "graphml"), setDefaultValues = TRUE)
# LTF6 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_differentlyweighted_directed_acyclic_loop_small_1.gml"), format = "graphml"), setDefaultValues = TRUE)
#
STF1 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_singelton_1.gml"), format = "graphml"))
STF2 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_singelton_noname_1.gml"), format = "graphml"))
STF3 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_singelton_loop_1.gml"), format = "graphml"))
# WTF1 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_partialweighted_directed_acyclic_small_1.gml"), format = "graphml"), setDefaultValues = TRUE)
#
ETF1 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_example_2_6.gml"), format = "graphml"))
# ETF2 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_example_4_1.gml"), format = "graphml"))
# ETF3 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_example_4_2.gml"), format = "graphml"))
GL1 <- read_graph_list_all_variants(PATH_GRAPH, "tempgraph_default_p5_n5_v1")
# GL2 <- read_graph_list_all_variants(PATH_GRAPH, "tempgraph_default_p5_n5_v2")
# GL3 <- read_graph_list_all_variants(PATH_GRAPH, "tempgraph_default_p5_n5_v3")
# GL4 <- read_graph_list_all_variants(PATH_GRAPH, "tempgraph_p13_n5_random50")
GL5 <- read_graph_list_all_variants(PATH_GRAPH, "tempgraph_p11_n5_random50", graphNameList = list("a","p11","p21","p22","p23","b","p31","p32","p41","p42","c"))
# GL6 <- read_graph_list_all_variants(PATH_GRAPH, "tempgraph_default_p5_n5_v1", graphNameList = list("t1", "t2", "t3", "t12", "t13"))
GL1TF1 <- tempnetwork(tempFlow = TF1, equivalenceRelation = NULL, graphList = GL1$simple)
# GL2TF1 <- tempnetwork(tempFlow = TF1, equivalenceRelation = NULL, graphList = GL2$simple)
# GL3TF1 <- tempnetwork(tempFlow = TF1, equivalenceRelation = NULL, graphList = GL3$simple)
#
# GL1TF2 <- tempnetwork(tempFlow = TF2, equivalenceRelation = NULL, graphList = GL1$simple)
# GL2TF2 <- tempnetwork(tempFlow = TF2, equivalenceRelation = NULL, graphList = GL2$simple)
# GL3TF2 <- tempnetwork(tempFlow = TF2, equivalenceRelation = NULL, graphList = GL3$simple)
#
# GL4TF3 <- tempnetwork(tempFlow = TF3, equivalenceRelation = NULL, graphList = GL4$simple)
# GL4TF4 <- tempnetwork(tempFlow = TF4, equivalenceRelation = NULL, graphList = GL4$simple)
# GL4TF5 <- tempnetwork(tempFlow = TF5, equivalenceRelation = NULL, graphList = GL4$simple)
#
# GL1LTF1 <- tempnetwork(tempFlow = LTF1, equivalenceRelation = NULL, graphList = GL1$simple)
# GL2LTF1 <- tempnetwork(tempFlow = LTF1, equivalenceRelation = NULL, graphList = GL2$simple)
# GL3LTF1 <- tempnetwork(tempFlow = LTF1, equivalenceRelation = NULL, graphList = GL3$simple)
#
# GL1LTF2 <- tempnetwork(tempFlow = LTF2, equivalenceRelation = NULL, graphList = GL1$simple)
# GL2LTF2 <- tempnetwork(tempFlow = LTF2, equivalenceRelation = NULL, graphList = GL2$simple)
# GL3LTF2 <- tempnetwork(tempFlow = LTF2, equivalenceRelation = NULL, graphList = GL3$simple)
#
# GL4LTF3 <- tempnetwork(tempFlow = LTF3, equivalenceRelation = NULL, graphList = GL4$simple)
# GL4LTF4 <- tempnetwork(tempFlow = LTF4, equivalenceRelation = NULL, graphList = GL4$simple)
# GL4LTF5 <- tempnetwork(tempFlow = LTF5, equivalenceRelation = NULL, graphList = GL4$simple)
# GL1LTF6 <- tempnetwork(tempFlow = LTF6, equivalenceRelation = NULL, graphList = GL1$simple)
#
GL0STF1 <- tempnetwork(tempFlow = STF1, equivalenceRelation = NULL, graphList = list("t1"=GL1$simple$t1))
GL0STF2 <- tempnetwork(tempFlow = STF2, equivalenceRelation = NULL, graphList = list("1"=GL1$simple$t1))
GL0STF3 <- tempnetwork(tempFlow = STF3, equivalenceRelation = NULL, graphList = list("t1"=GL1$simple$t1))
#
# GL5ETF1 <- tempnetwork(tempFlow = ETF1, equivalenceRelation = NULL, graphList = GL5$simple)
# GL6ETF2 <- tempnetwork(tempFlow = ETF2, equivalenceRelation = NULL, graphList = GL6$simple)
#
#
WGL1TF1 <- tempnetwork(tempFlow = TF1, equivalenceRelation = NULL, graphList = GL1$weighted)
# WGL2TF1 <- tempnetwork(tempFlow = TF1, equivalenceRelation = NULL, graphList = GL2$weighted)
# WGL3TF1 <- tempnetwork(tempFlow = TF1, equivalenceRelation = NULL, graphList = GL3$weighted)
#
WGL1TF2 <- tempnetwork(tempFlow = TF2, equivalenceRelation = NULL, graphList = GL1$weighted)
# WGL2TF2 <- tempnetwork(tempFlow = TF2, equivalenceRelation = NULL, graphList = GL2$weighted)
# WGL3TF2 <- tempnetwork(tempFlow = TF2, equivalenceRelation = NULL, graphList = GL3$weighted)
#
# WGL4TF3 <- tempnetwork(tempFlow = TF3, equivalenceRelation = NULL, graphList = GL4$weighted)
# WGL4TF4 <- tempnetwork(tempFlow = TF4, equivalenceRelation = NULL, graphList = GL4$weighted)
# WGL4TF5 <- tempnetwork(tempFlow = TF5, equivalenceRelation = NULL, graphList = GL4$weighted)
#
WGL1LTF1 <- tempnetwork(tempFlow = LTF1, equivalenceRelation = NULL, graphList = GL1$weighted)
# WGL2LTF1 <- tempnetwork(tempFlow = LTF1, equivalenceRelation = NULL, graphList = GL2$weighted)
# WGL3LTF1 <- tempnetwork(tempFlow = LTF1, equivalenceRelation = NULL, graphList = GL3$weighted)
#
#
WGL1LTF2 <- tempnetwork(tempFlow = LTF2, equivalenceRelation = NULL, graphList = GL1$weighted)
# WGL2LTF2 <- tempnetwork(tempFlow = LTF2, equivalenceRelation = NULL, graphList = GL2$weighted)
# WGL3LTF2 <- tempnetwork(tempFlow = LTF2, equivalenceRelation = NULL, graphList = GL3$weighted)
#
# WGL4LTF3 <- tempnetwork(tempFlow = LTF3, equivalenceRelation = NULL, graphList = GL4$weighted)
# WGL4LTF4 <- tempnetwork(tempFlow = LTF4, equivalenceRelation = NULL, graphList = GL4$weighted)
# WGL4LTF5 <- tempnetwork(tempFlow = LTF5, equivalenceRelation = NULL, graphList = GL4$weighted)
# WGL1LTF6 <- tempnetwork(tempFlow = LTF6, equivalenceRelation = NULL, graphList = GL1$weighted)
#
WGL5ETF1 <- tempnetwork(tempFlow = ETF1, equivalenceRelation = NULL, graphList = GL5$weighted)
# WGL1ETF2 <- tempnetwork(tempFlow = ETF2, equivalenceRelation = NULL, graphList = GL6$weighted)
#
#
#
# EGL1TF1 <- set_equivRelation(GL1TF1, eR(GL1TF1, TRUE))
# EGL2TF1 <- set_equivRelation(GL2TF1, eR(GL2TF1, TRUE))
# EGL3TF1 <- set_equivRelation(GL3TF1, eR(GL3TF1, TRUE))
#
# EGL1TF2 <- set_equivRelation(GL1TF2, eR(GL1TF2, TRUE))
# EGL2TF2 <- set_equivRelation(GL2TF2, eR(GL2TF2, TRUE))
# EGL3TF2 <- set_equivRelation(GL3TF2, eR(GL3TF2, TRUE))
#
#
# EGL4TF3 <- set_equivRelation(GL4TF3, eR(GL4TF3, TRUE))
# EGL4TF4 <- set_equivRelation(GL4TF4, eR(GL4TF4, TRUE))
# EGL4TF5 <- set_equivRelation(GL4TF5, eR(GL4TF5, TRUE))
#
# EGL1LTF1 <- set_equivRelation(GL1LTF1, eR(GL1LTF1, TRUE))
# EGL2LTF1 <- set_equivRelation(GL2LTF1, eR(GL2LTF1, TRUE))
# EGL3LTF1 <- set_equivRelation(GL3LTF1, eR(GL3LTF1, TRUE))
#
# EGL1LTF2 <- set_equivRelation(GL1LTF2, eR(GL1LTF2, TRUE))
# EGL2LTF2 <- set_equivRelation(GL2LTF2, eR(GL2LTF2, TRUE))
# EGL3LTF2 <- set_equivRelation(GL3LTF2, eR(GL3LTF2, TRUE))
#
# EGL4LTF3 <- set_equivRelation(GL4LTF3, eR(GL4LTF3, TRUE))
# EGL4LTF4 <- set_equivRelation(GL4LTF4, eR(GL4LTF4, TRUE))
# EGL4LTF5 <- set_equivRelation(GL4LTF5, eR(GL4LTF5, TRUE))
# EGL1LTF6 <- set_equivRelation(GL1LTF6, eR(GL1LTF6, TRUE))
#
# EGL0STF1 <- set_equivRelation(GL0STF1, eR(GL0STF1, TRUE))
# EGL0STF2 <- set_equivRelation(GL0STF2, eR(GL0STF2, TRUE))
# EGL0STF3 <- set_equivRelation(GL0STF3, eR(GL0STF3, TRUE))
#
# EGL5ETF1 <- set_equivRelation(GL5ETF1, eR(GL5ETF1, TRUE))
# EGL6ETF2 <- set_equivRelation(GL6ETF2, eR(GL6ETF2, TRUE))
#
#
EWGL1TF1 <- set_equivRelation(WGL1TF1, eR(WGL1TF1, TRUE))
# EWGL2TF1 <- set_equivRelation(WGL2TF1, eR(WGL2TF1, TRUE))
# EWGL3TF1 <- set_equivRelation(WGL3TF1, eR(WGL3TF1, TRUE))
#
# EWGL1TF2 <- set_equivRelation(WGL1TF2, eR(WGL1TF2, TRUE))
# EWGL2TF2 <- set_equivRelation(WGL2TF2, eR(WGL2TF2, TRUE))
# EWGL3TF2 <- set_equivRelation(WGL3TF2, eR(WGL3TF2, TRUE))
#
# EWGL4TF3 <- set_equivRelation(WGL4TF3, eR(WGL4TF3, TRUE))
# EWGL4TF4 <- set_equivRelation(WGL4TF4, eR(WGL4TF4, TRUE))
# EWGL4TF5 <- set_equivRelation(WGL4TF5, eR(WGL4TF5, TRUE))
#
EWGL1LTF1 <- set_equivRelation(WGL1LTF1, eR(WGL1LTF1, TRUE))
# EWGL2LTF1 <- set_equivRelation(WGL2LTF1, eR(WGL2LTF1, TRUE))
# EWGL3LTF1 <- set_equivRelation(WGL3LTF1, eR(WGL3LTF1, TRUE))
#
#
# EWGL1LTF2 <- set_equivRelation(WGL1LTF2, eR(WGL1LTF2, TRUE))
# EWGL2LTF2 <- set_equivRelation(WGL2LTF2, eR(WGL2LTF2, TRUE))
# EWGL3LTF2 <- set_equivRelation(WGL3LTF2, eR(WGL3LTF2, TRUE))
#
# EWGL4LTF3 <- set_equivRelation(WGL4LTF3, eR(WGL4LTF3, TRUE))
# EWGL4LTF4 <- set_equivRelation(WGL4LTF4, eR(WGL4LTF4, TRUE))
# EWGL4LTF5 <- set_equivRelation(WGL4LTF5, eR(WGL4LTF5, TRUE))
# EWGL1LTF6 <- set_equivRelation(WGL1LTF6, eR(WGL1LTF6, TRUE))
#
# EWGL5ETF1 <- set_equivRelation(WGL5ETF1, eR(WGL5ETF1, TRUE))
# EWGL1ETF2 <- set_equivRelation(WGL1ETF2, eR(WGL1ETF2, TRUE))
test_that("test 'add_points(tempNetwork, newPointList, graphList, equivRel, attrList = NULL, safe = TRUE)'", {
addGL <- list("t6" = GL1$simple$t1, "t7" = GL1$simple$t2)
addER <- list("v1"=list("t6"= "v1","t7"= "v1"), "v2"=list("t6"= "v2","t7"= "v2"),
"v3"=list("t6"= "v3","t7"= "v3"), "v4"=list("t6"= "v4","t7"= "v4"),
"v5"=list("t6"= "v5","t7"= "v5"))
expect_equal(length(tP(add_points(WGL1LTF1, c("t6","t7"), addGL))), 7)
expect_equal(tP(add_points(WGL1LTF1, c("t6","t7"), addGL))$name, c("t1", "t2", "t3", "t4", "t5", "t6", "t7"))
expect_equal(tP(add_points(WGL1LTF1, c("t6","t7"), addGL, attrList = list("RealName" = c("a", "b"))))$name, c("t1", "t2", "t3", "t4", "t5", "t6", "t7"))
expect_equal(tP(add_points(WGL1LTF1, c("t6","t7"), addGL, attrList = list("RealName" = c("a", "b"))))$RealName, c(NA, NA, NA, NA, NA, "a", "b"))
expect_equal(tP(add_points(WGL1LTF1, c("t6","t7"), addGL, attrList = list("RealName" = c("a"))))$RealName, c(NA, NA, NA, NA, NA, "a", "a"))
expect_equal(igraph::is_isomorphic_to(G(WGL1LTF1), G(add_points(WGL1LTF1, c(), list()))), TRUE)
expect_equal(length(tP(add_points(EWGL1LTF1, c("t6","t7"), addGL))), 7)
expect_equal(length(gL(add_points(EWGL1LTF1, c("t6","t7"), addGL))), 7)
expect_equal(all(sapply(gL(add_points(EWGL1LTF1, c("t6","t7"), addGL)), function(x) class(x)=="igraph")), TRUE)
expect_equal(all(sapply(eR(add_points(EWGL1LTF1, c("t6","t7"), addGL)), function(x) length(x)==7)), TRUE)
expect_equal(names(tP(add_points(EWGL1LTF1, c("t6","t7"), addGL))), c("t1", "t2", "t3", "t4", "t5", "t6", "t7"))
expect_equal(names(gL(add_points(EWGL1LTF1, c("t6","t7"), addGL))), c("t1", "t2", "t3", "t4", "t5", "t6", "t7"))
expect_equal(all(sapply(eR(add_points(EWGL1LTF1, c("t6","t7"), addGL)), function(x) names(x)==c("t1", "t2", "t3", "t4", "t5", "t6", "t7"))), TRUE)
expect_equal(length(tP(add_points(EWGL1LTF1, c("t6","t7"), addGL,addER ))), 7)
expect_equal(length(gL(add_points(EWGL1LTF1, c("t6","t7"), addGL,addER))), 7)
expect_equal(all(sapply(gL(add_points(EWGL1LTF1, c("t6","t7"), addGL,addER)), function(x) class(x)=="igraph")), TRUE)
expect_equal(all(sapply(eR(add_points(EWGL1LTF1, c("t6","t7"), addGL,addER)), function(x) length(x)==7)), TRUE)
expect_equal(names(tP(add_points(EWGL1LTF1, c("t6","t7"), addGL,addER))), c("t1", "t2", "t3", "t4", "t5", "t6", "t7"))
expect_equal(names(gL(add_points(EWGL1LTF1, c("t6","t7"), addGL,addER))), c("t1", "t2", "t3", "t4", "t5", "t6", "t7"))
expect_equal(all(sapply(eR(add_points(EWGL1LTF1, c("t6","t7"), addGL,addER)), function(x) names(x)==c("t1", "t2", "t3", "t4", "t5", "t6", "t7"))), TRUE)
expect_error(tP(add_points(WGL1LTF1, c("t6","t7","t8"), addGL, attrList = list("RealName" = c("a","b"))))$RealName, error_message_vector_not_equal_length_and_not_one())
expect_error(tP(add_points(WGL1LTF1, c("t6","t7","t8"), addGL, attrList = list("RealName" = c("a", "b", "c","d"))))$name, error_message_vector_not_equal_length_and_not_one())
expect_error(add_points(WGL1LTF1, c("t5","t7"), addGL), error_message_names_not_unique())
expect_error(add_points(WGL1LTF1, c("t6","t6"), addGL), error_message_names_not_unique())
E1addGL <- list("t9" = GL1$simple$t1, "t7" = GL1$simple$t2)
expect_error(add_points(EWGL1LTF1, c("t6","t7"), E1addGL), error_message_not_tempflow_graphlist_index_equality())
E1addER <- list("v1"=list("t6"= "v2","t7"= "v1"), "v2"=list("t6"= "v2","t7"= "v2"),
"v3"=list("t6"= "v3","t7"= "v3"), "v4"=list("t6"= "v4","t7"= "v4"),
"v5"=list("t6"= "v5","t7"= "v5"))
expect_error(add_points(EWGL1LTF1, c("t6","t7"), addGL, E1addER), error_message_equivclass_not_disjoined())
E2addER <- list("v1"=list("t6"= "a","t7"= "v1"), "v2"=list("t6"= "v2","t7"= "v2"),
"v3"=list("t6"= "v3","t7"= "v3"), "v4"=list("t6"= "v4","t7"= "v4"),
"v5"=list("t6"= "v5","t7"= "v5"))
expect_error(add_points(EWGL1LTF1, c("t6","t7"), addGL, E2addER), error_message_equivalenceclass_is_not_exhaustive())
E3addER <- list("v1"=list("t6"= "v1","t9"= "v1"), "v2"=list("t6"= "v2","t7"= "v2"),
"v3"=list("t6"= "v3","t7"= "v3"), "v4"=list("t6"= "v4","t7"= "v4"),
"v5"=list("t6"= "v5","t7"= "v5"))
expect_error(add_points(EWGL1LTF1, c("t6","t7"), addGL, E3addER), error_message_not_tempflow_equivclass_index_equality())
E4addER <- list("v1"=list("t6"= "v1","t7"= "v1"), "v1"=list("t6"= "v2","t7"= "v2"),
"v3"=list("t6"= "v3","t7"= "v3"), "v4"=list("t6"= "v4","t7"= "v4"),
"v5"=list("t6"= "v5","t7"= "v5"))
expect_error(add_points(EWGL1LTF1, c("t6","t7"), addGL, E4addER), error_message_not_tempflow_equivclass_index_equality())
E5addER <- list("v1"=list("t6"= "v1","t7"= "v1"), "v2"=list("t6"= "v2","t7"= "v2"),
"v3"=list("t6"= "v3","t7"= "v3"), "v4"=list("t6"= "v4","t7"= "v4"),
"v6"=list("t6"= "v6","t7"= "v6"), "v5"=list("t6"= "v5","t7"= "v5"))
expect_error(add_points(EWGL1LTF1, c("t6","t7"), addGL, E5addER), error_message_not_tempflow_equivclass_index_equality())
E6addER <- list("v1"=list("t6"= "v1","t7"= "v1"), "v2"=list("t6"= "v2","t7"= "v2"),
"v3"=list("t6"= "v3","t7"= "v3"), "v4"=list("t6"= "v4","t7"= "v4"),
"v5"=list("t6"= "v5","t7"= "v5","t8"= "v7"))
expect_error(add_points(EWGL1LTF1, c("t6","t7"), addGL, E6addER), error_message_not_tempflow_equivclass_index_equality())
E7addER <- list("v1"=list("t6"= "v1","t7"= "v1"), "v2"=list("t6"= "v2","t7"= "v2"),
"v3"=list("t6"= "v3","t7"= "v3"), "v4"=list("t6"= "v4","t7"= "v4"),
"v6"=list("t1"= "v6","t2"= "v6","t3"= "v6","t4"= "v6","t5"= "v6","t6"= "v6","t7"= "v6"),
"v5"=list("t6"= "v5","t7"= "v5"))
expect_error(add_points(EWGL1LTF1, c("t6","t7"), addGL, E7addER), error_message_equivrel_not_equal_length_to_vertices())
testTempNetwork <- WGL1LTF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- add_points(WGL1LTF1, c("t6","t7"), addGL)
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'delete_points(tempFlow, deletePointList, safe = TRUE) '", {
expect_equal(tP(delete_points(WGL1LTF1, c()))$name, c("t1", "t2", "t3","t4", "t5"))
expect_equal(names(gL(delete_points(WGL1LTF1, c()))), c("t1", "t2", "t3","t4", "t5"))
expect_equal(tP(delete_points(WGL1LTF1, c(NULL)))$name, c("t1", "t2", "t3", "t4", "t5"))
expect_equal(names(gL(delete_points(WGL1LTF1, c(NULL)))), c("t1", "t2", "t3", "t4", "t5"))
expect_equal(tP(delete_points(WGL1LTF1, c("t3","t3")))$name, c("t1", "t2", "t4", "t5"))
expect_equal(names(gL(delete_points(WGL1LTF1, c("t3","t3")))), c("t1", "t2", "t4", "t5"))
expect_equal(tP(delete_points(WGL1LTF1, c(3,4)))$name, c("t1", "t2", "t5"))
expect_equal(names(gL(delete_points(WGL1LTF1, c(3,4)))), c("t1", "t2", "t5"))
expect_error(delete_points(WGL1LTF1, c(1,2,3,4,5)), error_message_tempflow_empty())
expect_error(delete_points(WGL1LTF1, c("t3","t7")), error_message_name_not_in_object())
expect_error(delete_points(WGL1LTF1, c("t3","t7")), error_message_name_not_in_object())
expect_error(delete_points(WGL1LTF1, c(NA, "t3")), error_message_name_not_in_object())
expect_error(delete_points(WGL1LTF1, c(NaN, "t3")), error_message_name_not_in_object())
expect_error(delete_points(WGL1LTF1, c(3,7)), error_message_ids_not_in_object())
expect_error(delete_points(WGL1LTF1, c(3,"t3")), error_message_name_not_in_object())
testTempNetwork <- WGL1LTF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- delete_points(WGL1LTF1, c("t3","t3"))
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'add_steps(tempFlow, srcPointList, dstPointList, weightList = NULL, attrList = NULL, safe = TRUE)'", {
expect_equal(igraph::get.edge.ids(G(add_steps(WGL1TF1, c(1,3),c(5,4))), c("t1","t5"), error = FALSE)>0, TRUE)
expect_equal(igraph::get.edge.ids(G(add_steps(WGL1TF1, c(1,3),c(5,4))), c("t3","t4"), error = FALSE)>0, TRUE)
expect_equal(igraph::get.edge.ids(G(add_steps(WGL1TF1, c("t1","t3"),c("t5","t4"))), c("t1","t5"), error = FALSE)>0, TRUE)
expect_equal(igraph::get.edge.ids(G(add_steps(WGL1TF1, c("t1","t3"),c("t5","t4"))), c("t3","t4"), error = FALSE)>0, TRUE)
expect_equal(igraph::get.edge.ids(G(add_steps(WGL1TF1, c(1,3),c("t5","t4"))), c("t1","t5"), error = FALSE)>0, TRUE)
expect_equal(igraph::get.edge.ids(G(add_steps(WGL1TF1, c("t1","t3"),c(5,4))), c("t3","t4"), error = FALSE)>0, TRUE)
expect_equal(igraph::get.edge.ids(G(add_steps(WGL1TF1, c("t1","t3"),c("t1","t3"))), c("t1","t1"), error = FALSE)>0, TRUE)
expect_equal(igraph::get.edge.ids(G(add_steps(WGL1TF1, c("t1","t3"),c("t1","t3"))), c("t3","t3"), error = FALSE)>0, TRUE)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3"),c("t1","t4")))$weight[length(tS(WGL1TF1))+1], 0)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3"),c("t1","t4")))$weight[length(tS(WGL1TF1))+2], 1)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3"),c("t1","t4"), c(20,40)))$weight[length(tS(WGL1TF1))+1], 20)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3"),c("t1","t4"), c(20,40)))$weight[length(tS(WGL1TF1))+2], 40)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), c(20)))$weight[length(tS(WGL1TF1))+1], 20)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), c(20)))$weight[length(tS(WGL1TF1))+2], 20)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), c(20)))$weight[length(tS(WGL1TF1))+3], 20)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), attrList = list("name"= c("a","b","c"), "rname"= c("a","b","c"))))$name[length(tS(WGL1TF1))+1], "a")
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), attrList = list("name"= c("a","b","c"), "rname"= c("a","b","c"))))$name[length(tS(WGL1TF1))+2], "b")
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), attrList = list("name"= c("a","b","c"), "rname"= c("a","b","c"))))$name[length(tS(WGL1TF1))+3], "c")
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), attrList = list("name"= c("a","b","c"), "rname"= c("a","b","c"))))$rname[length(tS(WGL1TF1))+1], "a")
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), attrList = list("name"= c("a","b","c"), "rname"= c("a","b","c"))))$rname[length(tS(WGL1TF1))+2], "b")
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), attrList = list("name"= c("a","b","c"), "rname"= c("a","b","c"))))$rname[length(tS(WGL1TF1))+3], "c")
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), weightList = c(20,40,60), attrList = list("name"= c("a","b","c"), "weight"= c(30,50,70))))$weight[length(tS(WGL1TF1))+1], 20)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), weightList = c(20,40,60), attrList = list("name"= c("a","b","c"), "weight"= c(30,50,70))))$weight[length(tS(WGL1TF1))+2], 40)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), weightList = c(20,40,60), attrList = list("name"= c("a","b","c"), "weight"= c(30,50,70))))$weight[length(tS(WGL1TF1))+3], 60)
expect_error(add_steps(WGL1TF1, c("t1",3),c("t5",4)), error_message_name_not_in_object())
expect_error(add_steps(WGL1TF1, c("t1","t2"),c("t2","t5")), error_message_not_simple())
expect_error(add_steps(WGL1TF1, c("t1","t2"),c("t7","t5")), error_message_name_not_in_object())
expect_error(add_steps(WGL1TF1, c("t1","t2","t3"),c("t4","t5")), error_message_vector_not_equal_length())
expect_error(add_steps(WGL1TF1, c("t1","t2"),c("t4","t5","t3")), error_message_vector_not_equal_length())
expect_error(add_steps(WGL1TF1, c("t1","t2", NULL),c("t4","t5","t3")), error_message_vector_not_equal_length())
expect_error(add_steps(WGL1TF1, c("t1",NA),c("t4","t5")), error_message_name_not_in_object())
expect_error(add_steps(WGL1TF1, c("t1","t2"),c("t4",NA)), error_message_name_not_in_object())
expect_error(add_steps(WGL1TF1, c("t1",NaN),c("t4","t5")), error_message_name_not_in_object())
expect_error(add_steps(WGL1TF1, c("t1","t2"),c("t4",NaN)), error_message_name_not_in_object())
expect_error(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), c(20,40)), error_message_vector_not_equal_lengths_and_not_one())
expect_error(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), c(-20,40,60)), error_message_weights_not_nonnegative())
expect_error(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), c(20,"40",60)), error_message_weights_not_numeric())
expect_error(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), c(20,NA,60)), error_message_weights_not_numeric())
expect_error(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), c(20,NaN,60)), error_message_weights_not_numeric())
expect_error(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), attrList = list("name"= c("a","b"))), error_message_vector_not_equal_lengths_and_not_one())
expect_error(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), attrList = list("name"= c("a","b","c","d"))), error_message_vector_not_equal_lengths_and_not_one())
testTempNetwork <- WGL1LTF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- add_steps(WGL1TF1, c("t1","t3"),c("t1","t4"))
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'delete_steps_from_id(tempFlow, stepIDList, safe = TRUE)'", {
expect_equal(sum(igraph::get.edge.ids(G(delete_steps_from_id(WGL1LTF1,c(6,7))), c("t1","t1","t2","t2"), error = FALSE)), 0)
expect_equal(sum(igraph::get.edge.ids(G(delete_steps_from_id(WGL1LTF1,c(6,7))), c("t1","t1","t2","t2"), error = FALSE)), 0)
expect_equal(sum(igraph::get.edge.ids(G(delete_steps_from_id(WGL1TF1,c(1,4))), c("t1","t2","t4","t5"), error = FALSE)), 0)
expect_equal(sum(igraph::get.edge.ids(G(delete_steps_from_id(WGL1TF1,c(1,4,1))), c("t1","t2","t4","t5"), error = FALSE)), 0)
expect_error(delete_steps(WGL1TF1,c(1,9), c(1,5)), error_message_ids_not_in_object())
expect_error(delete_steps(WGL1TF1,c(1,4), c(1,9)), error_message_ids_not_in_object())
expect_error(delete_steps(WGL1TF1,c(1,2,NA), c(2,3,4)), error_message_ids_not_in_object())
expect_error(delete_steps(WGL1TF1,c(1,2,3), c(2,3,NA)), error_message_ids_not_in_object())
expect_error(delete_steps(WGL1TF1,c(1,2,NaN), c(2,3,4)), error_message_ids_not_in_object())
expect_error(delete_steps(WGL1TF1,c(1,2,3), c(2,3,NaN)), error_message_ids_not_in_object())
expect_error(delete_steps(WGL1TF1,c(1,"t2"), c(2,3)), error_message_name_not_in_object())
expect_error(delete_steps(WGL1TF1,c(1,2), c(2,"t3")), error_message_name_not_in_object())
expect_error(delete_steps(WGL1TF1,c("t1","t2"), c("t2","t5")), error_message_edge_not_exist())
testTempNetwork <- WGL1LTF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- delete_steps_from_id(WGL1LTF1,c(6,7))
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'delete_steps(tempFlow, srcPointList, dstPointList, safe = TRUE)'", {
expect_equal(sum(igraph::get.edge.ids(G(delete_steps(WGL1TF1,c("t1","t4"), c("t2","t5"))), c("t1","t2","t4","t5"), error = FALSE)), 0)
expect_equal(sum(igraph::get.edge.ids(G(delete_steps(WGL1LTF1,c("t1","t2"), c("t1","t2"))), c("t1","t1","t2","t2"), error = FALSE)), 0)
expect_equal(sum(igraph::get.edge.ids(G(delete_steps(WGL1TF1,c("t1","t4","t1"), c("t2","t5","t2"))), c("t1","t2","t4","t5"), error = FALSE)), 0)
expect_equal(sum(igraph::get.edge.ids(G(delete_steps(WGL1TF1,c(1,4), c(2,5))), c("t1","t2","t4","t5"), error = FALSE)), 0)
expect_equal(sum(igraph::get.edge.ids(G(delete_steps(WGL1LTF1,c(1,2), c("t1","t2"))), c("t1","t1","t2","t2"), error = FALSE)), 0)
expect_equal(sum(igraph::get.edge.ids(G(delete_steps(WGL1TF1,c(1,4,1), c(2,5,2))), c("t1","t2","t4","t5"), error = FALSE)), 0)
expect_error(delete_steps(WGL1TF1,c("t1","t6"), c("t2","t5")), error_message_name_not_in_object())
expect_error(delete_steps(WGL1TF1,c("t1","t4"), c("t2","t6")), error_message_name_not_in_object())
expect_error(delete_steps(WGL1TF1,c("t1","t4"), c("t5","t5")), "One of the specified edges does not exist.")
expect_error(delete_steps(WGL1TF1,c("t1","t4","t2"), c("t5", "t5", NA)), error_message_name_not_in_object())
expect_error(delete_steps(WGL1TF1,c("t1","t4",NA), c("t5", "t5", "t3")), error_message_name_not_in_object())
expect_error(delete_steps(WGL1TF1,c("t1","t4","t2"), c("t5", "t5", NaN)), error_message_name_not_in_object())
expect_error(delete_steps(WGL1TF1,c("t1","t4",NaN), c("t5", "t5", "t3")), error_message_name_not_in_object())
expect_error(delete_steps(WGL1TF1,c("t1","t4","t2"), c("t5", "t5", NULL)), error_message_vector_not_equal_length())
expect_error(delete_steps(WGL1TF1,c("t1","t4",NULL), c("t5", "t5", "t3")), error_message_vector_not_equal_length())
expect_error(delete_steps(WGL1TF1,c("t1","t4","t2"), c("t5", "t5")), error_message_vector_not_equal_length())
expect_error(delete_steps(WGL1TF1,c("t1","t4"), c("t5", "t5", "t3")), error_message_vector_not_equal_length())
testTempNetwork <- WGL1LTF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- delete_steps(WGL1TF1,c("t1","t4"), c("t2","t5"))
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'insert_point(tempFlow, newPoint, srcPointList, dstPointList, weightList = NULL, attrList = NULL, removeEdges = TRUE, safe = TRUE)'", {
checkER <- list("v1"=list("t1"= "v1","t2"= "v1","t3"= "v1","t4"= "v1","t5"= "v1","tX"= "v1"),
"v2"=list("t1"= "v2","t2"= "v2","t3"= "v2","t4"= "v2","t5"= "v2","tX"= "v2"),
"v3"=list("t1"= "v3","t2"= "v3","t3"= "v3","t4"= "v3","t5"= "v3","tX"= "v3"),
"v4"=list("t1"= "v4","t2"= "v4","t3"= "v4","t4"= "v4","t5"= "v4","tX"= "v4"),
"v5"=list("t1"= "v5","t2"= "v5","t3"= "v5","t4"= "v5","t5"= "v5","tX"= "v5"))
addGL <- list("tX" = GL1$simple$t1)
addER <- list("v1"=list("tX"= "v1"), "v2"=list("tX"= "v2"),
"v3"=list("tX"= "v3"), "v4"=list("tX"= "v4"),
"v5"=list("tX"= "v5"))
addGLeX <- list("tX" = GL5$simple$a)
addEReX <- list("v1"=list("tX"= "1"), "v2"=list("tX"= "2"),
"v3"=list("tX"= "3"), "v4"=list("tX"= "4"),
"v5"=list("tX"= "5"))
# insert_point(WGL5ETF1, "tX", c(), c(),addGLeX)
# sapply(names(tP(WGL5ETF1)), function(x) gV(WGL5ETF1, x))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL)
gtempNetwork <-insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"), GL1$simple$t1)
eTempNetwork <- insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"), addGL, equivRel = addER)
eeTempNetwork <- insert_point(EWGL1TF1, "tX", c("t3","t4"), c("t5"), addGL, equivRel = addER)
expect_equal(eR(tempNetwork), NULL)
expect_equal(eR(eTempNetwork), checkER)
expect_equal(eR(eeTempNetwork), checkER)
E1addGL <- list("tY" = GL1$simple$t1)
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),E1addGL), error_message_not_tempflow_graphlist_index_equality())
E1addER <- list("v1"=list("tX"="v2"), "v2"=list("tX"="v2"),
"v3"=list("tX"="v3"), "v4"=list("tX"="v4"),
"v5"=list("tX"="v5"))
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, equivRel = E1addER), error_message_equivclass_not_disjoined())
E2addER <- list("v1"=list("tX"="v"), "v2"=list("tX"="v2"),
"v3"=list("tX"="v3"), "v4"=list("tX"="v4"),
"v5"=list("tX"="v5"))
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, equivRel = E2addER), error_message_equivalenceclass_is_not_exhaustive())
E3addER <- list("v1"=list("tY"="v"), "v2"=list("tX"="v2"),
"v3"=list("tX"="v3"), "v4"=list("tX"="v4"),
"v5"=list("tX"="v5"))
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, equivRel = E3addER), error_message_not_tempflow_equivclass_index_equality())
E4addER <- list("v1"=list("tX"="v1"), "v1"=list("tX"="v2"),
"v3"=list("tX"="v3"), "v4"=list("tX"="v4"),
"v5"=list("tX"="v5"))
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, equivRel = E4addER), error_message_not_tempflow_equivclass_index_equality())
E5addER <- list("v1"=list("tX"="v1"), "v2"=list("tX"="v2"),
"v3"=list("tX"="v3"), "v4"=list("tX"="v4"),
"v5"=list("tX"="v5"), "v6"=list("tX"="v5"))
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, equivRel = E5addER), error_message_not_tempflow_equivclass_index_equality())
E6addER <- list("v1"=list("tX"="v1"), "v2"=list("tX"="v2"),
"v3"=list("tX"="v3"), "v4"=list("tX"="v4"),
"v5"=list("tX"="v5", "tY" = "v5"))
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, equivRel = E6addER), error_message_not_tempflow_equivclass_index_equality())
E7addER <- list("v1"=list("tX"="v1"), "v2"=list("tX"="v2"),
"v3"=list("tX"="v3"), "v4"=list("tX"="v4"),
"v5"=list("tX"="v5"),
"v6"=list("t1"= "v6","t2"= "v6","t3"= "v6","t4"= "v6","t5"= "v6","tX"= "v6"))
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, equivRel = E7addER), error_message_equivrel_not_equal_length_to_vertices())
E8addER <- list("v1"=list("tX"="v1"), "v2"=list("tX"="v2"),
"v3"=list("tX"="v3"), "v4"=list("tX"="v4"),
"v5"=list("tX"="v5", "tX" = "v5"))
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, equivRel = E8addER), error_message_not_tempflow_equivclass_index_equality())
tempNetwork <-insert_point(WGL1TF1, "tX", c(), c(),addGL)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1)))
expect_equal(length(tP(tempNetwork)), length(tP(WGL1TF1))+1)
expect_equal(tS(tempNetwork)$weight, tS(WGL1TF1)$weight)
tempNetwork <-insert_point(WGL1TF1, "tX", c("tX"), c(),addGL)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("tX"), c(), addGL, weightList = c(10))
eTempNetwork <- insert_point(WGL1TF1, "tX", c("tX"), c(), addGL, equivRel = addER)
eeTempNetwork <- insert_point(EWGL1TF1, "tX", c("tX"), c(), addGL, equivRel = addER)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+1)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("tX","tX"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("tX"), c("tX"))]$weight, c(0))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("tX"), c("tX"))]$weight, c(10))
tempNetwork <-insert_point(WGL1TF1, "tX", c(), c("tX"),addGL)
wTempNetwork <- insert_point(WGL1TF1, "tX", c(), c("tX"),addGL, weightList = c(10))
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+1)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("tX","tX"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("tX"), c("tX"))]$weight, c(0))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("tX"), c("tX"))]$weight, c(10))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t1","tX"), c("t2"),addGL)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("t1","tX"), c("t2"),addGL, weightList = c(10,8,20))
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+2)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","t2"), error = FALSE) == 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","tX","tX","tX","tX","t2"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","tX","tX"), c("tX","tX","t2"))]$weight, c(1,0,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t1","tX","tX"), c("tX","tX","t2"))]$weight, c(10,8,20))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, weightList = c(10,20,30))
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+1)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t3","t5","t4","t5"), error = FALSE) == 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t3","tX","t4","tX","tX","t5"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t3","t4","tX"), c("tX","tX","t5"))]$weight, c(1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t3","t4","tX"), c("tX","tX","t5"))]$weight, c(10,20,30))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t1","t2"), c("t5"),addGL)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("t1","t2"), c("t5"),addGL, weightList = c(10,20,30))
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+3)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","tX","t1","tX","tX","t5"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","t2","tX"), c("tX","tX","t5"))]$weight, c(1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t1","t2","tX"), c("tX","tX","t5"))]$weight, c(10,20,30))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"),addGL)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"),addGL, weightList = c(10,20,30,40))
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+2)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t2","t4"), error = FALSE) == 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","tX","t2","tX","tX","t3","tX","t4"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","t2","tX","tX"), c("tX","tX","t3","t4"))]$weight, c(1,1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t1","t2","tX","tX"), c("tX","tX","t3","t4"))]$weight, c(10,20,30,40))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, weightList = c(10,20,30))
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+1)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t3","t5","t4","t5"), error = FALSE) == 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t3","tX","t4","tX","tX","t5"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t3","t4","tX"), c("tX","tX","t5"))]$weight, c(1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t3","t4","tX"), c("tX","tX","t5"))]$weight, c(10,20,30))
tempNetwork <-insert_point(WGL5ETF1, "tX", c("p31", "p41"), c("p32", "p42"),addGLeX)
wTempNetwork <- insert_point(WGL5ETF1, "tX", c("p31", "p41"), c("p32", "p42"),addGLeX, weightList = c(10,20,30,40))
expect_equal(length(tS(tempNetwork)), length(tS(WGL5ETF1))+2)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("p31","p32","p41","p42"), error = FALSE) == 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("p31","tX","p41","tX","tX","p32","tX","p42"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("p31","p41","tX","tX"), c("tX","tX","p32","p42"))]$weight, c(1,1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("p31","p41","tX","tX"), c("tX","tX","p32","p42"))]$weight, c(10,20,30,40))
tempNetwork <-insert_point(WGL1TF1, "tX", c(), c(),addGL, removeEdges = FALSE)
wTempNetwork <- insert_point(WGL1TF1, "tX", c(), c("tX"),addGL, weightList = c(10))
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1)))
expect_equal(length(tP(tempNetwork)), length(tP(WGL1TF1))+1)
expect_equal(tS(tempNetwork)$weight, tS(WGL1TF1)$weight)
tempNetwork <-insert_point(WGL1TF1, "tX", c("tX"), c(),addGL, removeEdges = FALSE)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("tX"), c(),addGL, weightList = c(10), removeEdges = FALSE)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+1)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("tX","tX"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("tX"), c("tX"))]$weight, c(0))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("tX"), c("tX"))]$weight, c(10))
tempNetwork <-insert_point(WGL1TF1, "tX", c(), c("tX"),addGL, removeEdges = FALSE)
wTempNetwork <- insert_point(WGL1TF1, "tX", c(), c("tX"),addGL, weightList = c(10), removeEdges = FALSE)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+1)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("tX","tX"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("tX"), c("tX"))]$weight, c(0))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("tX"), c("tX"))]$weight, c(10))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t1","tX"), c("t2"),addGL, removeEdges = FALSE)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("t1","tX"), c("t2"),addGL, weightList = c(10,8,20), removeEdges = FALSE)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+3)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","t2"), error = FALSE) > 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","tX","tX","tX","tX","t2"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","tX","tX"), c("tX","tX","t2"))]$weight, c(1,0,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t1","tX","tX"), c("tX","tX","t2"))]$weight, c(10,8,20))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, removeEdges = FALSE)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, weightList = c(10,20,30), removeEdges = FALSE)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+3)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t3","t5","t4","t5"), error = FALSE) > 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t3","tX","t4","tX","tX","t5"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t3","t4","tX"), c("tX","tX","t5"))]$weight, c(1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t3","t4","tX"), c("tX","tX","t5"))]$weight, c(10,20,30))
tempNetwork <-insert_point(WGL1TF1, "tX",c("t1","t2"), c("t5"),addGL, removeEdges = FALSE)
wTempNetwork <- insert_point(WGL1TF1, "tX",c("t1","t2"), c("t5"),addGL, weightList = c(10,20,30), removeEdges = FALSE)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+3)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","tX","t1","tX","tX","t5"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","t2","tX"), c("tX","tX","t5"))]$weight, c(1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t1","t2","tX"), c("tX","tX","t5"))]$weight, c(10,20,30))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"),addGL, removeEdges = FALSE)
wTempNetwork <-insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"),addGL, weightList = c(10,20,30,40), removeEdges = FALSE)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+4)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t2","t4"), error = FALSE) > 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","tX","t2","tX","tX","t3","tX","t4"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","t2","tX","tX"), c("tX","tX","t3","t4"))]$weight, c(1,1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t1","t2","tX","tX"), c("tX","tX","t3","t4"))]$weight, c(10,20,30,40))
tempNetwork <-insert_point(WGL5ETF1, "tX",c("p31", "p41"), c("p32", "p42"),addGLeX, removeEdges = FALSE)
wTempNetwork <- insert_point(WGL5ETF1, "tX",c("p31", "p41"), c("p32", "p42"),addGLeX, weightList = c(10,20,30,40), removeEdges = FALSE)
#eTempNetwork <-insert_point(WGL5ETF1, "tX",c("p31", "p41"), c("p32", "p42"),addGLeX, equivRel = addEReX, removeEdges = FALSE)
expect_equal(length(tS(tempNetwork)), length(tS(WGL5ETF1))+4)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("p31","p32","p41","p42"), error = FALSE) > 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("p31","tX","p41","tX","tX","p32","tX","p42"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("p31","p41","tX","tX"), c("tX","tX","p32","p42"))]$weight, c(1,1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("p31","p41","tX","tX"), c("tX","tX","p32","p42"))]$weight, c(10,20,30,40))
expect_error(insert_point(WGL5ETF1, "tX",c("p31", "p41"), c("p32", "p42"),addGL, weightList = c(10,20,30,40), removeEdges = FALSE), error_message_not_graphlist_vertex_equality())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t6"), c("t3","t4"),addGL), error_message_name_not_in_object())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t6"),addGL), error_message_name_not_in_object())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t2","t4"),addGL), error_message_not_dag())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t1"), c("t2","t2"),addGL), error_message_not_simple())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3",NA),addGL), error_message_name_not_in_object())
expect_error(insert_point(WGL1TF1, "tX", c("t1",NA), c("t3","t4"),addGL), error_message_name_not_in_object())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c(NaN),addGL), error_message_ids_not_in_object())
expect_error(insert_point(WGL1TF1, "tX", c("t1",NaN), c("t3","t4"),addGL), error_message_name_not_in_object())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"), addGL, weightList = c(10,20)), error_message_vector_not_equal_sum_lengths_and_not_one())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"), addGL, weightList = c(-10,20,30,40)), error_message_weights_not_nonnegative())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"),addGL, weightList = c("-10",20,30,40)), error_message_weights_not_numeric())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"),addGL, weightList = c(NA,20,30,40)), error_message_weights_not_numeric())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"),addGL, weightList = c(NaN,20,30,40)), error_message_weights_not_numeric())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"),addGL, weightList = c(10,20,30,NULL)), error_message_vector_not_equal_sum_lengths_and_not_one())
expect_error(insert_point(WGL1TF1, "tX", c("tX"), c("tX"), addGL, weightList = c(10)), error_message_not_simple())
testTempNetwork <- WGL1LTF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL)
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'remove_point(tempFlow, removePoint, safe = TRUE)'", {
tempNetwork <-remove_point(WGL1TF1, "t5")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1TF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))-2)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t3","t5","t4","t5"), error = FALSE) == 0), "Invalid vertex names")
tempNetwork <-remove_point(WGL1LTF1, "t5")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1LTF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1LTF1))-3)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t3","t5","t4","t5","t5","t5"), error = FALSE) == 0), "Invalid vertex names")
tempNetwork <-remove_point(WGL1TF1, "t1")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1TF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))-1)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t1","t2"), error = FALSE) == 0), "Invalid vertex names")
tempNetwork <-remove_point(WGL1LTF1, "t1")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1LTF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1LTF1))-2)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t1","t2","t1","t1"), error = FALSE) == 0), "Invalid vertex names")
tempNetwork <-remove_point(WGL1TF1, "t2")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1TF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))-1)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t2","t4"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","t3","t1","t4"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","t1"), c("t3","t4"))]$weight, c(2,2))
tempNetwork <-remove_point(WGL1LTF1, "t2")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1LTF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1LTF1))-2)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t2","t4", "t2","t2"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","t3","t1","t4"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","t1"), c("t3","t4"))]$weight, c(2,2))
tempNetwork <-remove_point(WGL1TF1, "t3")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1TF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))-1)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t3","t5"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t5"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t2"), c("t5"))]$weight, c(2))
tempNetwork <-remove_point(WGL1LTF1, "t3")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1LTF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1LTF1))-2)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t3","t5"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t5"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t2"), c("t5"))]$weight, c(2))
expect_error(remove_point(GL0STF1, "t1"), error_message_tempflow_empty())
# tempNetwork <-remove_point(GL0STF1, "t1")
# expect_equal(length(tP(tempNetwork)), length(tP(GL0STF1))-1)
# expect_equal(length(tS(tempNetwork)), length(tS(GL0STF1)))
expect_error(remove_point(GL0STF3, "t1"), error_message_tempflow_empty())
# tempNetwork <-remove_point(GL0STF3, "t1")
# expect_equal(length(tP(tempNetwork)), length(tP(GL0STF3))-1)
# expect_equal(length(tS(tempNetwork)), length(tS(GL0STF3))-1)
tempNetwork <-remove_point(WGL5ETF1, "b")
expect_equal(length(tP(tempNetwork)), length(tP(WGL5ETF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL5ETF1)))
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("p11","b","p23","b","b","p31","b","p41"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("p11","p31","p11","p41","p23","p31","p23","p41"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("p11","p11","p23","p23"), c("p31","p41","p31","p41"))]$weight, c(3,3,2,2))
tempNetwork <-remove_point(set_step_weights(WGL5ETF1, "b","p31", 10), "b")
expect_equal(length(tP(tempNetwork)), length(tP(WGL5ETF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL5ETF1)))
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("p11","b","p23","b","b","p31","b","p41"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("p11","p31","p11","p41","p23","p31","p23","p41"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("p11","p11","p23","p23"), c("p31","p41","p31","p41"))]$weight, c(12,3,11,2))
tempNetwork <-remove_point(WGL1LTF1, "t3")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1LTF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1LTF1))-2)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t3","t5"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t5"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t2"), c("t5"))]$weight, c(2))
tempNetwork <-remove_point(WGL1TF2, "t2")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1TF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))-1)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t2","t4"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","t3","t1","t4"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","t1"), c("t3","t4"))]$weight, c(4,4))
tempNetwork <-remove_point(WGL1LTF2, "t2")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1LTF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1LTF1))-2)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t2","t4", "t2","t2"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","t3","t1","t4"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","t1"), c("t3","t4"))]$weight, c(4,4))
expect_error(remove_point(WGL1TF1, c("t6")), error_message_name_not_in_object())
expect_error(remove_point(WGL1TF1, c(6)), error_message_ids_not_in_object())
testTempNetwork <- WGL1LTF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- remove_point(WGL1LTF1, "t3")
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'induce_flow(tempFlow, inpPointList, safe = TRUE)'", {
expect_error(induce_flow(WGL1TF1, c(1,2,8)))
expect_equal(igraph::is_isomorphic_to(G(induce_flow(WGL1TF1, c(1,1,3,5))),G(induce_flow(WGL1TF1, c(1,3,5)))), TRUE)
expect_error(induce_flow(WGL1TF1, c()), error_message_tempflow_empty())
# expect_equal(igraph::is_isomorphic_to(G(induce_flow(WGL1TF1, c())),igraph::graph.empty()), TRUE)
expect_equal(igraph::is_isomorphic_to(G(induce_flow(WGL1TF1, igraph::V(G(WGL1TF1)))),G(WGL1TF1)), TRUE)
expect_equal(igraph::is_isomorphic_to(G(induce_flow(WGL1TF1, c("t1", "t3", "t5"))),G(induce_flow(WGL1TF1, c(1,3,5)))), TRUE)
expect_error(induce_flow(WGL1TF2, c(1,2,6)), error_message_ids_not_in_object())
expect_error(induce_flow(WGL1TF2, c("t1","t2","t6")), error_message_name_not_in_object())
testTempNetwork <- WGL1TF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- induce_flow(WGL1TF1, c(1,3,5))
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'slice_flow(tempFlow, startPointList, endPointList, safe = TRUE)'", {
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF1,2,5)),G(induce_flow(WGL1TF1, c(2,3,4,5)))), TRUE)
expect_error(slice_flow(WGL1TF1,3,4), error_message_tempflow_empty())
# expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF1,3,4)),igraph::graph.empty()), TRUE)
expect_error(slice_flow(WGL1TF1,5,2), error_message_tempflow_empty())
# expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF1,5,2)),igraph::graph.empty()), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF1,2,5)),G(induce_flow(WGL1TF1, c(2,3,4,5)))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF2,1,1)),G(induce_flow(WGL1TF2, c(1)))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1LTF2,1,1)),G(induce_flow(WGL1LTF2, c(1)))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF1,c(2,4),c(5,3))),G(induce_flow(WGL1TF1, c(2,3,4,5)))), TRUE)
expect_error(slice_flow(WGL1TF1,3,4), error_message_tempflow_empty())
# expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF1,3,4)),igraph::graph.empty()), TRUE)
expect_error(slice_flow(WGL1TF1,5,2), error_message_tempflow_empty())
# expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF1,5,2)),igraph::graph.empty()), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF1,2,5)),G(induce_flow(WGL1TF1, c(2,3,4,5)))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1LTF1,c(3,4), c(3,4))),G(induce_flow(WGL1LTF1, c(3,4)))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF2,1,c(1,1))),G(induce_flow(WGL1TF2, c(1)))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF2,c(1,1),1)),G(induce_flow(WGL1TF2, c(1)))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1LTF2,1,c(1,1))),G(induce_flow(WGL1LTF2, c(1)))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1LTF2,c(1,1),1)),G(induce_flow(WGL1LTF2, c(1)))), TRUE)
# expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1LTF2,c(1),c())),G(induce_flow(WGL1LTF2, c()))), TRUE)
expect_error(slice_flow(WGL1LTF2,c(1),c()), error_message_tempflow_empty())
# expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1LTF2,c(),c(1))),G(induce_flow(WGL1LTF2, c()))), TRUE)
expect_error(slice_flow(WGL1LTF2,c(),c(1)), error_message_tempflow_empty())
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL5ETF1,c("p11","p23"),c("c"),1)),G(induce_flow(WGL5ETF1, c("p11","p23","b","p31","p32","p41","p42","c")))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL5ETF1,c("a"),c("p11","p23"),1)),G(induce_flow(WGL5ETF1, c("a","p11","p21","p22","p23")))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL5ETF1,c("p11","p23"),c("p32","p41"),1)),G(induce_flow(WGL5ETF1, c("p11","p23","b","p31","p32","p41")))), TRUE)
expect_error(slice_flow(WGL1TF1, c(1,8)))
testTempNetwork <- WGL1TF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- slice_flow(WGL1TF1,2,5)
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'compute_tempdistance'", {
expect_equal(compute_tempdistance(WGL1TF1, "t5", "t1"), Inf)
expect_equal(compute_tempdistance(WGL1TF1, "t3", "t4"), Inf)
expect_equal(compute_tempdistance(WGL1TF1, "t1", "t1"), 0)
expect_equal(compute_tempdistance(WGL1TF1, "t1", 5), 3)
expect_equal(compute_tempdistance(WGL1TF1, 1, "t5"), 3)
expect_equal(compute_tempdistance(WGL1TF1, 1, 5), 3)
expect_equal(compute_tempdistance(WGL1TF1, "t1", "t5"), 3)
expect_error(compute_tempdistance(WGL1TF1, "t1", "t100"))
expect_error(compute_tempdistance(WGL1TF1, "t1", 100))
expect_error(compute_tempdistance(WGL1TF1, 100, "t1"))
expect_error(compute_tempdistance(WGL1TF1, 111, 222))
expect_equal(compute_tempdistance(WGL1TF2, "t1", "t5"), 5)
expect_equal(compute_tempdistance(WGL1LTF1, "t1", "t5"), 3)
expect_equal(compute_tempdistance(WGL1LTF2, "t1", "t1"), 1)
expect_equal(compute_tempdistance(WGL1LTF2, "t1", "t5"), 5)
expect_equal(compute_tempdistance(WGL1LTF1, "t1", "t5"), 3)
expect_equal(compute_tempdistance(WGL1LTF1, "t1", "t1"), 0)
expect_equal(compute_tempdistance(WGL1LTF2, "t1", "t5"), 5)
})
| /tests/testthat/test-tempnetwork-tempflow-basics.R | no_license | KonstantinRK/tempnetwork | R | false | false | 58,038 | r | context("test-tempnetwork-tempflow-basics")
PATH = system.file("inst","testdata","tempflow", package = "tempnetwork")
PATH_GRAPH = system.file("inst","testdata","tempgraph", package = "tempnetwork")
TF1 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_directed_acyclic_small_1.gml"), format = "graphml"))
TF2 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_weighted_directed_acyclic_small_1.gml"), format = "graphml"))
# TF3 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_weighted_forrest_small_1.gml"), format = "graphml"))
# TF4 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_weighted_tree_small_1.gml"), format = "graphml"))
# TF5 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_lattice_small_1.gml"), format = "graphml"), setDefaultValues = TRUE)
#
#
LTF1 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_directed_acyclic_loop_small_1.gml"), format = "graphml"))
LTF2 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_weighted_directed_acyclic_loop_small_1.gml"), format = "graphml"))
# LTF3 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_weighted_forrest_loop_small_1.gml"), format = "graphml"))
# LTF4 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_weighted_tree_loop_small_1.gml"), format = "graphml"))
# LTF5 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_lattice_loop_small_1.gml"), format = "graphml"), setDefaultValues = TRUE)
# LTF6 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_differentlyweighted_directed_acyclic_loop_small_1.gml"), format = "graphml"), setDefaultValues = TRUE)
#
STF1 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_singelton_1.gml"), format = "graphml"))
STF2 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_singelton_noname_1.gml"), format = "graphml"))
STF3 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_singelton_loop_1.gml"), format = "graphml"))
# WTF1 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_partialweighted_directed_acyclic_small_1.gml"), format = "graphml"), setDefaultValues = TRUE)
#
ETF1 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_example_2_6.gml"), format = "graphml"))
# ETF2 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_example_4_1.gml"), format = "graphml"))
# ETF3 <- tempflow(igraph::read.graph(file.path(PATH, "tempflow_example_4_2.gml"), format = "graphml"))
GL1 <- read_graph_list_all_variants(PATH_GRAPH, "tempgraph_default_p5_n5_v1")
# GL2 <- read_graph_list_all_variants(PATH_GRAPH, "tempgraph_default_p5_n5_v2")
# GL3 <- read_graph_list_all_variants(PATH_GRAPH, "tempgraph_default_p5_n5_v3")
# GL4 <- read_graph_list_all_variants(PATH_GRAPH, "tempgraph_p13_n5_random50")
GL5 <- read_graph_list_all_variants(PATH_GRAPH, "tempgraph_p11_n5_random50", graphNameList = list("a","p11","p21","p22","p23","b","p31","p32","p41","p42","c"))
# GL6 <- read_graph_list_all_variants(PATH_GRAPH, "tempgraph_default_p5_n5_v1", graphNameList = list("t1", "t2", "t3", "t12", "t13"))
GL1TF1 <- tempnetwork(tempFlow = TF1, equivalenceRelation = NULL, graphList = GL1$simple)
# GL2TF1 <- tempnetwork(tempFlow = TF1, equivalenceRelation = NULL, graphList = GL2$simple)
# GL3TF1 <- tempnetwork(tempFlow = TF1, equivalenceRelation = NULL, graphList = GL3$simple)
#
# GL1TF2 <- tempnetwork(tempFlow = TF2, equivalenceRelation = NULL, graphList = GL1$simple)
# GL2TF2 <- tempnetwork(tempFlow = TF2, equivalenceRelation = NULL, graphList = GL2$simple)
# GL3TF2 <- tempnetwork(tempFlow = TF2, equivalenceRelation = NULL, graphList = GL3$simple)
#
# GL4TF3 <- tempnetwork(tempFlow = TF3, equivalenceRelation = NULL, graphList = GL4$simple)
# GL4TF4 <- tempnetwork(tempFlow = TF4, equivalenceRelation = NULL, graphList = GL4$simple)
# GL4TF5 <- tempnetwork(tempFlow = TF5, equivalenceRelation = NULL, graphList = GL4$simple)
#
# GL1LTF1 <- tempnetwork(tempFlow = LTF1, equivalenceRelation = NULL, graphList = GL1$simple)
# GL2LTF1 <- tempnetwork(tempFlow = LTF1, equivalenceRelation = NULL, graphList = GL2$simple)
# GL3LTF1 <- tempnetwork(tempFlow = LTF1, equivalenceRelation = NULL, graphList = GL3$simple)
#
# GL1LTF2 <- tempnetwork(tempFlow = LTF2, equivalenceRelation = NULL, graphList = GL1$simple)
# GL2LTF2 <- tempnetwork(tempFlow = LTF2, equivalenceRelation = NULL, graphList = GL2$simple)
# GL3LTF2 <- tempnetwork(tempFlow = LTF2, equivalenceRelation = NULL, graphList = GL3$simple)
#
# GL4LTF3 <- tempnetwork(tempFlow = LTF3, equivalenceRelation = NULL, graphList = GL4$simple)
# GL4LTF4 <- tempnetwork(tempFlow = LTF4, equivalenceRelation = NULL, graphList = GL4$simple)
# GL4LTF5 <- tempnetwork(tempFlow = LTF5, equivalenceRelation = NULL, graphList = GL4$simple)
# GL1LTF6 <- tempnetwork(tempFlow = LTF6, equivalenceRelation = NULL, graphList = GL1$simple)
#
GL0STF1 <- tempnetwork(tempFlow = STF1, equivalenceRelation = NULL, graphList = list("t1"=GL1$simple$t1))
GL0STF2 <- tempnetwork(tempFlow = STF2, equivalenceRelation = NULL, graphList = list("1"=GL1$simple$t1))
GL0STF3 <- tempnetwork(tempFlow = STF3, equivalenceRelation = NULL, graphList = list("t1"=GL1$simple$t1))
#
# GL5ETF1 <- tempnetwork(tempFlow = ETF1, equivalenceRelation = NULL, graphList = GL5$simple)
# GL6ETF2 <- tempnetwork(tempFlow = ETF2, equivalenceRelation = NULL, graphList = GL6$simple)
#
#
WGL1TF1 <- tempnetwork(tempFlow = TF1, equivalenceRelation = NULL, graphList = GL1$weighted)
# WGL2TF1 <- tempnetwork(tempFlow = TF1, equivalenceRelation = NULL, graphList = GL2$weighted)
# WGL3TF1 <- tempnetwork(tempFlow = TF1, equivalenceRelation = NULL, graphList = GL3$weighted)
#
WGL1TF2 <- tempnetwork(tempFlow = TF2, equivalenceRelation = NULL, graphList = GL1$weighted)
# WGL2TF2 <- tempnetwork(tempFlow = TF2, equivalenceRelation = NULL, graphList = GL2$weighted)
# WGL3TF2 <- tempnetwork(tempFlow = TF2, equivalenceRelation = NULL, graphList = GL3$weighted)
#
# WGL4TF3 <- tempnetwork(tempFlow = TF3, equivalenceRelation = NULL, graphList = GL4$weighted)
# WGL4TF4 <- tempnetwork(tempFlow = TF4, equivalenceRelation = NULL, graphList = GL4$weighted)
# WGL4TF5 <- tempnetwork(tempFlow = TF5, equivalenceRelation = NULL, graphList = GL4$weighted)
#
WGL1LTF1 <- tempnetwork(tempFlow = LTF1, equivalenceRelation = NULL, graphList = GL1$weighted)
# WGL2LTF1 <- tempnetwork(tempFlow = LTF1, equivalenceRelation = NULL, graphList = GL2$weighted)
# WGL3LTF1 <- tempnetwork(tempFlow = LTF1, equivalenceRelation = NULL, graphList = GL3$weighted)
#
#
WGL1LTF2 <- tempnetwork(tempFlow = LTF2, equivalenceRelation = NULL, graphList = GL1$weighted)
# WGL2LTF2 <- tempnetwork(tempFlow = LTF2, equivalenceRelation = NULL, graphList = GL2$weighted)
# WGL3LTF2 <- tempnetwork(tempFlow = LTF2, equivalenceRelation = NULL, graphList = GL3$weighted)
#
# WGL4LTF3 <- tempnetwork(tempFlow = LTF3, equivalenceRelation = NULL, graphList = GL4$weighted)
# WGL4LTF4 <- tempnetwork(tempFlow = LTF4, equivalenceRelation = NULL, graphList = GL4$weighted)
# WGL4LTF5 <- tempnetwork(tempFlow = LTF5, equivalenceRelation = NULL, graphList = GL4$weighted)
# WGL1LTF6 <- tempnetwork(tempFlow = LTF6, equivalenceRelation = NULL, graphList = GL1$weighted)
#
WGL5ETF1 <- tempnetwork(tempFlow = ETF1, equivalenceRelation = NULL, graphList = GL5$weighted)
# WGL1ETF2 <- tempnetwork(tempFlow = ETF2, equivalenceRelation = NULL, graphList = GL6$weighted)
#
#
#
# EGL1TF1 <- set_equivRelation(GL1TF1, eR(GL1TF1, TRUE))
# EGL2TF1 <- set_equivRelation(GL2TF1, eR(GL2TF1, TRUE))
# EGL3TF1 <- set_equivRelation(GL3TF1, eR(GL3TF1, TRUE))
#
# EGL1TF2 <- set_equivRelation(GL1TF2, eR(GL1TF2, TRUE))
# EGL2TF2 <- set_equivRelation(GL2TF2, eR(GL2TF2, TRUE))
# EGL3TF2 <- set_equivRelation(GL3TF2, eR(GL3TF2, TRUE))
#
#
# EGL4TF3 <- set_equivRelation(GL4TF3, eR(GL4TF3, TRUE))
# EGL4TF4 <- set_equivRelation(GL4TF4, eR(GL4TF4, TRUE))
# EGL4TF5 <- set_equivRelation(GL4TF5, eR(GL4TF5, TRUE))
#
# EGL1LTF1 <- set_equivRelation(GL1LTF1, eR(GL1LTF1, TRUE))
# EGL2LTF1 <- set_equivRelation(GL2LTF1, eR(GL2LTF1, TRUE))
# EGL3LTF1 <- set_equivRelation(GL3LTF1, eR(GL3LTF1, TRUE))
#
# EGL1LTF2 <- set_equivRelation(GL1LTF2, eR(GL1LTF2, TRUE))
# EGL2LTF2 <- set_equivRelation(GL2LTF2, eR(GL2LTF2, TRUE))
# EGL3LTF2 <- set_equivRelation(GL3LTF2, eR(GL3LTF2, TRUE))
#
# EGL4LTF3 <- set_equivRelation(GL4LTF3, eR(GL4LTF3, TRUE))
# EGL4LTF4 <- set_equivRelation(GL4LTF4, eR(GL4LTF4, TRUE))
# EGL4LTF5 <- set_equivRelation(GL4LTF5, eR(GL4LTF5, TRUE))
# EGL1LTF6 <- set_equivRelation(GL1LTF6, eR(GL1LTF6, TRUE))
#
# EGL0STF1 <- set_equivRelation(GL0STF1, eR(GL0STF1, TRUE))
# EGL0STF2 <- set_equivRelation(GL0STF2, eR(GL0STF2, TRUE))
# EGL0STF3 <- set_equivRelation(GL0STF3, eR(GL0STF3, TRUE))
#
# EGL5ETF1 <- set_equivRelation(GL5ETF1, eR(GL5ETF1, TRUE))
# EGL6ETF2 <- set_equivRelation(GL6ETF2, eR(GL6ETF2, TRUE))
#
#
EWGL1TF1 <- set_equivRelation(WGL1TF1, eR(WGL1TF1, TRUE))
# EWGL2TF1 <- set_equivRelation(WGL2TF1, eR(WGL2TF1, TRUE))
# EWGL3TF1 <- set_equivRelation(WGL3TF1, eR(WGL3TF1, TRUE))
#
# EWGL1TF2 <- set_equivRelation(WGL1TF2, eR(WGL1TF2, TRUE))
# EWGL2TF2 <- set_equivRelation(WGL2TF2, eR(WGL2TF2, TRUE))
# EWGL3TF2 <- set_equivRelation(WGL3TF2, eR(WGL3TF2, TRUE))
#
# EWGL4TF3 <- set_equivRelation(WGL4TF3, eR(WGL4TF3, TRUE))
# EWGL4TF4 <- set_equivRelation(WGL4TF4, eR(WGL4TF4, TRUE))
# EWGL4TF5 <- set_equivRelation(WGL4TF5, eR(WGL4TF5, TRUE))
#
EWGL1LTF1 <- set_equivRelation(WGL1LTF1, eR(WGL1LTF1, TRUE))
# EWGL2LTF1 <- set_equivRelation(WGL2LTF1, eR(WGL2LTF1, TRUE))
# EWGL3LTF1 <- set_equivRelation(WGL3LTF1, eR(WGL3LTF1, TRUE))
#
#
# EWGL1LTF2 <- set_equivRelation(WGL1LTF2, eR(WGL1LTF2, TRUE))
# EWGL2LTF2 <- set_equivRelation(WGL2LTF2, eR(WGL2LTF2, TRUE))
# EWGL3LTF2 <- set_equivRelation(WGL3LTF2, eR(WGL3LTF2, TRUE))
#
# EWGL4LTF3 <- set_equivRelation(WGL4LTF3, eR(WGL4LTF3, TRUE))
# EWGL4LTF4 <- set_equivRelation(WGL4LTF4, eR(WGL4LTF4, TRUE))
# EWGL4LTF5 <- set_equivRelation(WGL4LTF5, eR(WGL4LTF5, TRUE))
# EWGL1LTF6 <- set_equivRelation(WGL1LTF6, eR(WGL1LTF6, TRUE))
#
# EWGL5ETF1 <- set_equivRelation(WGL5ETF1, eR(WGL5ETF1, TRUE))
# EWGL1ETF2 <- set_equivRelation(WGL1ETF2, eR(WGL1ETF2, TRUE))
test_that("test 'add_points(tempNetwork, newPointList, graphList, equivRel, attrList = NULL, safe = TRUE)'", {
addGL <- list("t6" = GL1$simple$t1, "t7" = GL1$simple$t2)
addER <- list("v1"=list("t6"= "v1","t7"= "v1"), "v2"=list("t6"= "v2","t7"= "v2"),
"v3"=list("t6"= "v3","t7"= "v3"), "v4"=list("t6"= "v4","t7"= "v4"),
"v5"=list("t6"= "v5","t7"= "v5"))
expect_equal(length(tP(add_points(WGL1LTF1, c("t6","t7"), addGL))), 7)
expect_equal(tP(add_points(WGL1LTF1, c("t6","t7"), addGL))$name, c("t1", "t2", "t3", "t4", "t5", "t6", "t7"))
expect_equal(tP(add_points(WGL1LTF1, c("t6","t7"), addGL, attrList = list("RealName" = c("a", "b"))))$name, c("t1", "t2", "t3", "t4", "t5", "t6", "t7"))
expect_equal(tP(add_points(WGL1LTF1, c("t6","t7"), addGL, attrList = list("RealName" = c("a", "b"))))$RealName, c(NA, NA, NA, NA, NA, "a", "b"))
expect_equal(tP(add_points(WGL1LTF1, c("t6","t7"), addGL, attrList = list("RealName" = c("a"))))$RealName, c(NA, NA, NA, NA, NA, "a", "a"))
expect_equal(igraph::is_isomorphic_to(G(WGL1LTF1), G(add_points(WGL1LTF1, c(), list()))), TRUE)
expect_equal(length(tP(add_points(EWGL1LTF1, c("t6","t7"), addGL))), 7)
expect_equal(length(gL(add_points(EWGL1LTF1, c("t6","t7"), addGL))), 7)
expect_equal(all(sapply(gL(add_points(EWGL1LTF1, c("t6","t7"), addGL)), function(x) class(x)=="igraph")), TRUE)
expect_equal(all(sapply(eR(add_points(EWGL1LTF1, c("t6","t7"), addGL)), function(x) length(x)==7)), TRUE)
expect_equal(names(tP(add_points(EWGL1LTF1, c("t6","t7"), addGL))), c("t1", "t2", "t3", "t4", "t5", "t6", "t7"))
expect_equal(names(gL(add_points(EWGL1LTF1, c("t6","t7"), addGL))), c("t1", "t2", "t3", "t4", "t5", "t6", "t7"))
expect_equal(all(sapply(eR(add_points(EWGL1LTF1, c("t6","t7"), addGL)), function(x) names(x)==c("t1", "t2", "t3", "t4", "t5", "t6", "t7"))), TRUE)
expect_equal(length(tP(add_points(EWGL1LTF1, c("t6","t7"), addGL,addER ))), 7)
expect_equal(length(gL(add_points(EWGL1LTF1, c("t6","t7"), addGL,addER))), 7)
expect_equal(all(sapply(gL(add_points(EWGL1LTF1, c("t6","t7"), addGL,addER)), function(x) class(x)=="igraph")), TRUE)
expect_equal(all(sapply(eR(add_points(EWGL1LTF1, c("t6","t7"), addGL,addER)), function(x) length(x)==7)), TRUE)
expect_equal(names(tP(add_points(EWGL1LTF1, c("t6","t7"), addGL,addER))), c("t1", "t2", "t3", "t4", "t5", "t6", "t7"))
expect_equal(names(gL(add_points(EWGL1LTF1, c("t6","t7"), addGL,addER))), c("t1", "t2", "t3", "t4", "t5", "t6", "t7"))
expect_equal(all(sapply(eR(add_points(EWGL1LTF1, c("t6","t7"), addGL,addER)), function(x) names(x)==c("t1", "t2", "t3", "t4", "t5", "t6", "t7"))), TRUE)
expect_error(tP(add_points(WGL1LTF1, c("t6","t7","t8"), addGL, attrList = list("RealName" = c("a","b"))))$RealName, error_message_vector_not_equal_length_and_not_one())
expect_error(tP(add_points(WGL1LTF1, c("t6","t7","t8"), addGL, attrList = list("RealName" = c("a", "b", "c","d"))))$name, error_message_vector_not_equal_length_and_not_one())
expect_error(add_points(WGL1LTF1, c("t5","t7"), addGL), error_message_names_not_unique())
expect_error(add_points(WGL1LTF1, c("t6","t6"), addGL), error_message_names_not_unique())
E1addGL <- list("t9" = GL1$simple$t1, "t7" = GL1$simple$t2)
expect_error(add_points(EWGL1LTF1, c("t6","t7"), E1addGL), error_message_not_tempflow_graphlist_index_equality())
E1addER <- list("v1"=list("t6"= "v2","t7"= "v1"), "v2"=list("t6"= "v2","t7"= "v2"),
"v3"=list("t6"= "v3","t7"= "v3"), "v4"=list("t6"= "v4","t7"= "v4"),
"v5"=list("t6"= "v5","t7"= "v5"))
expect_error(add_points(EWGL1LTF1, c("t6","t7"), addGL, E1addER), error_message_equivclass_not_disjoined())
E2addER <- list("v1"=list("t6"= "a","t7"= "v1"), "v2"=list("t6"= "v2","t7"= "v2"),
"v3"=list("t6"= "v3","t7"= "v3"), "v4"=list("t6"= "v4","t7"= "v4"),
"v5"=list("t6"= "v5","t7"= "v5"))
expect_error(add_points(EWGL1LTF1, c("t6","t7"), addGL, E2addER), error_message_equivalenceclass_is_not_exhaustive())
E3addER <- list("v1"=list("t6"= "v1","t9"= "v1"), "v2"=list("t6"= "v2","t7"= "v2"),
"v3"=list("t6"= "v3","t7"= "v3"), "v4"=list("t6"= "v4","t7"= "v4"),
"v5"=list("t6"= "v5","t7"= "v5"))
expect_error(add_points(EWGL1LTF1, c("t6","t7"), addGL, E3addER), error_message_not_tempflow_equivclass_index_equality())
E4addER <- list("v1"=list("t6"= "v1","t7"= "v1"), "v1"=list("t6"= "v2","t7"= "v2"),
"v3"=list("t6"= "v3","t7"= "v3"), "v4"=list("t6"= "v4","t7"= "v4"),
"v5"=list("t6"= "v5","t7"= "v5"))
expect_error(add_points(EWGL1LTF1, c("t6","t7"), addGL, E4addER), error_message_not_tempflow_equivclass_index_equality())
E5addER <- list("v1"=list("t6"= "v1","t7"= "v1"), "v2"=list("t6"= "v2","t7"= "v2"),
"v3"=list("t6"= "v3","t7"= "v3"), "v4"=list("t6"= "v4","t7"= "v4"),
"v6"=list("t6"= "v6","t7"= "v6"), "v5"=list("t6"= "v5","t7"= "v5"))
expect_error(add_points(EWGL1LTF1, c("t6","t7"), addGL, E5addER), error_message_not_tempflow_equivclass_index_equality())
E6addER <- list("v1"=list("t6"= "v1","t7"= "v1"), "v2"=list("t6"= "v2","t7"= "v2"),
"v3"=list("t6"= "v3","t7"= "v3"), "v4"=list("t6"= "v4","t7"= "v4"),
"v5"=list("t6"= "v5","t7"= "v5","t8"= "v7"))
expect_error(add_points(EWGL1LTF1, c("t6","t7"), addGL, E6addER), error_message_not_tempflow_equivclass_index_equality())
E7addER <- list("v1"=list("t6"= "v1","t7"= "v1"), "v2"=list("t6"= "v2","t7"= "v2"),
"v3"=list("t6"= "v3","t7"= "v3"), "v4"=list("t6"= "v4","t7"= "v4"),
"v6"=list("t1"= "v6","t2"= "v6","t3"= "v6","t4"= "v6","t5"= "v6","t6"= "v6","t7"= "v6"),
"v5"=list("t6"= "v5","t7"= "v5"))
expect_error(add_points(EWGL1LTF1, c("t6","t7"), addGL, E7addER), error_message_equivrel_not_equal_length_to_vertices())
testTempNetwork <- WGL1LTF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- add_points(WGL1LTF1, c("t6","t7"), addGL)
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'delete_points(tempFlow, deletePointList, safe = TRUE) '", {
expect_equal(tP(delete_points(WGL1LTF1, c()))$name, c("t1", "t2", "t3","t4", "t5"))
expect_equal(names(gL(delete_points(WGL1LTF1, c()))), c("t1", "t2", "t3","t4", "t5"))
expect_equal(tP(delete_points(WGL1LTF1, c(NULL)))$name, c("t1", "t2", "t3", "t4", "t5"))
expect_equal(names(gL(delete_points(WGL1LTF1, c(NULL)))), c("t1", "t2", "t3", "t4", "t5"))
expect_equal(tP(delete_points(WGL1LTF1, c("t3","t3")))$name, c("t1", "t2", "t4", "t5"))
expect_equal(names(gL(delete_points(WGL1LTF1, c("t3","t3")))), c("t1", "t2", "t4", "t5"))
expect_equal(tP(delete_points(WGL1LTF1, c(3,4)))$name, c("t1", "t2", "t5"))
expect_equal(names(gL(delete_points(WGL1LTF1, c(3,4)))), c("t1", "t2", "t5"))
expect_error(delete_points(WGL1LTF1, c(1,2,3,4,5)), error_message_tempflow_empty())
expect_error(delete_points(WGL1LTF1, c("t3","t7")), error_message_name_not_in_object())
expect_error(delete_points(WGL1LTF1, c("t3","t7")), error_message_name_not_in_object())
expect_error(delete_points(WGL1LTF1, c(NA, "t3")), error_message_name_not_in_object())
expect_error(delete_points(WGL1LTF1, c(NaN, "t3")), error_message_name_not_in_object())
expect_error(delete_points(WGL1LTF1, c(3,7)), error_message_ids_not_in_object())
expect_error(delete_points(WGL1LTF1, c(3,"t3")), error_message_name_not_in_object())
testTempNetwork <- WGL1LTF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- delete_points(WGL1LTF1, c("t3","t3"))
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'add_steps(tempFlow, srcPointList, dstPointList, weightList = NULL, attrList = NULL, safe = TRUE)'", {
expect_equal(igraph::get.edge.ids(G(add_steps(WGL1TF1, c(1,3),c(5,4))), c("t1","t5"), error = FALSE)>0, TRUE)
expect_equal(igraph::get.edge.ids(G(add_steps(WGL1TF1, c(1,3),c(5,4))), c("t3","t4"), error = FALSE)>0, TRUE)
expect_equal(igraph::get.edge.ids(G(add_steps(WGL1TF1, c("t1","t3"),c("t5","t4"))), c("t1","t5"), error = FALSE)>0, TRUE)
expect_equal(igraph::get.edge.ids(G(add_steps(WGL1TF1, c("t1","t3"),c("t5","t4"))), c("t3","t4"), error = FALSE)>0, TRUE)
expect_equal(igraph::get.edge.ids(G(add_steps(WGL1TF1, c(1,3),c("t5","t4"))), c("t1","t5"), error = FALSE)>0, TRUE)
expect_equal(igraph::get.edge.ids(G(add_steps(WGL1TF1, c("t1","t3"),c(5,4))), c("t3","t4"), error = FALSE)>0, TRUE)
expect_equal(igraph::get.edge.ids(G(add_steps(WGL1TF1, c("t1","t3"),c("t1","t3"))), c("t1","t1"), error = FALSE)>0, TRUE)
expect_equal(igraph::get.edge.ids(G(add_steps(WGL1TF1, c("t1","t3"),c("t1","t3"))), c("t3","t3"), error = FALSE)>0, TRUE)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3"),c("t1","t4")))$weight[length(tS(WGL1TF1))+1], 0)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3"),c("t1","t4")))$weight[length(tS(WGL1TF1))+2], 1)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3"),c("t1","t4"), c(20,40)))$weight[length(tS(WGL1TF1))+1], 20)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3"),c("t1","t4"), c(20,40)))$weight[length(tS(WGL1TF1))+2], 40)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), c(20)))$weight[length(tS(WGL1TF1))+1], 20)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), c(20)))$weight[length(tS(WGL1TF1))+2], 20)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), c(20)))$weight[length(tS(WGL1TF1))+3], 20)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), attrList = list("name"= c("a","b","c"), "rname"= c("a","b","c"))))$name[length(tS(WGL1TF1))+1], "a")
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), attrList = list("name"= c("a","b","c"), "rname"= c("a","b","c"))))$name[length(tS(WGL1TF1))+2], "b")
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), attrList = list("name"= c("a","b","c"), "rname"= c("a","b","c"))))$name[length(tS(WGL1TF1))+3], "c")
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), attrList = list("name"= c("a","b","c"), "rname"= c("a","b","c"))))$rname[length(tS(WGL1TF1))+1], "a")
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), attrList = list("name"= c("a","b","c"), "rname"= c("a","b","c"))))$rname[length(tS(WGL1TF1))+2], "b")
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), attrList = list("name"= c("a","b","c"), "rname"= c("a","b","c"))))$rname[length(tS(WGL1TF1))+3], "c")
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), weightList = c(20,40,60), attrList = list("name"= c("a","b","c"), "weight"= c(30,50,70))))$weight[length(tS(WGL1TF1))+1], 20)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), weightList = c(20,40,60), attrList = list("name"= c("a","b","c"), "weight"= c(30,50,70))))$weight[length(tS(WGL1TF1))+2], 40)
expect_equal(tS(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), weightList = c(20,40,60), attrList = list("name"= c("a","b","c"), "weight"= c(30,50,70))))$weight[length(tS(WGL1TF1))+3], 60)
expect_error(add_steps(WGL1TF1, c("t1",3),c("t5",4)), error_message_name_not_in_object())
expect_error(add_steps(WGL1TF1, c("t1","t2"),c("t2","t5")), error_message_not_simple())
expect_error(add_steps(WGL1TF1, c("t1","t2"),c("t7","t5")), error_message_name_not_in_object())
expect_error(add_steps(WGL1TF1, c("t1","t2","t3"),c("t4","t5")), error_message_vector_not_equal_length())
expect_error(add_steps(WGL1TF1, c("t1","t2"),c("t4","t5","t3")), error_message_vector_not_equal_length())
expect_error(add_steps(WGL1TF1, c("t1","t2", NULL),c("t4","t5","t3")), error_message_vector_not_equal_length())
expect_error(add_steps(WGL1TF1, c("t1",NA),c("t4","t5")), error_message_name_not_in_object())
expect_error(add_steps(WGL1TF1, c("t1","t2"),c("t4",NA)), error_message_name_not_in_object())
expect_error(add_steps(WGL1TF1, c("t1",NaN),c("t4","t5")), error_message_name_not_in_object())
expect_error(add_steps(WGL1TF1, c("t1","t2"),c("t4",NaN)), error_message_name_not_in_object())
expect_error(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), c(20,40)), error_message_vector_not_equal_lengths_and_not_one())
expect_error(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), c(-20,40,60)), error_message_weights_not_nonnegative())
expect_error(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), c(20,"40",60)), error_message_weights_not_numeric())
expect_error(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), c(20,NA,60)), error_message_weights_not_numeric())
expect_error(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), c(20,NaN,60)), error_message_weights_not_numeric())
expect_error(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), attrList = list("name"= c("a","b"))), error_message_vector_not_equal_lengths_and_not_one())
expect_error(add_steps(WGL1TF1, c("t1","t3","t1"),c("t1","t4","t5"), attrList = list("name"= c("a","b","c","d"))), error_message_vector_not_equal_lengths_and_not_one())
testTempNetwork <- WGL1LTF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- add_steps(WGL1TF1, c("t1","t3"),c("t1","t4"))
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'delete_steps_from_id(tempFlow, stepIDList, safe = TRUE)'", {
expect_equal(sum(igraph::get.edge.ids(G(delete_steps_from_id(WGL1LTF1,c(6,7))), c("t1","t1","t2","t2"), error = FALSE)), 0)
expect_equal(sum(igraph::get.edge.ids(G(delete_steps_from_id(WGL1LTF1,c(6,7))), c("t1","t1","t2","t2"), error = FALSE)), 0)
expect_equal(sum(igraph::get.edge.ids(G(delete_steps_from_id(WGL1TF1,c(1,4))), c("t1","t2","t4","t5"), error = FALSE)), 0)
expect_equal(sum(igraph::get.edge.ids(G(delete_steps_from_id(WGL1TF1,c(1,4,1))), c("t1","t2","t4","t5"), error = FALSE)), 0)
expect_error(delete_steps(WGL1TF1,c(1,9), c(1,5)), error_message_ids_not_in_object())
expect_error(delete_steps(WGL1TF1,c(1,4), c(1,9)), error_message_ids_not_in_object())
expect_error(delete_steps(WGL1TF1,c(1,2,NA), c(2,3,4)), error_message_ids_not_in_object())
expect_error(delete_steps(WGL1TF1,c(1,2,3), c(2,3,NA)), error_message_ids_not_in_object())
expect_error(delete_steps(WGL1TF1,c(1,2,NaN), c(2,3,4)), error_message_ids_not_in_object())
expect_error(delete_steps(WGL1TF1,c(1,2,3), c(2,3,NaN)), error_message_ids_not_in_object())
expect_error(delete_steps(WGL1TF1,c(1,"t2"), c(2,3)), error_message_name_not_in_object())
expect_error(delete_steps(WGL1TF1,c(1,2), c(2,"t3")), error_message_name_not_in_object())
expect_error(delete_steps(WGL1TF1,c("t1","t2"), c("t2","t5")), error_message_edge_not_exist())
testTempNetwork <- WGL1LTF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- delete_steps_from_id(WGL1LTF1,c(6,7))
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'delete_steps(tempFlow, srcPointList, dstPointList, safe = TRUE)'", {
expect_equal(sum(igraph::get.edge.ids(G(delete_steps(WGL1TF1,c("t1","t4"), c("t2","t5"))), c("t1","t2","t4","t5"), error = FALSE)), 0)
expect_equal(sum(igraph::get.edge.ids(G(delete_steps(WGL1LTF1,c("t1","t2"), c("t1","t2"))), c("t1","t1","t2","t2"), error = FALSE)), 0)
expect_equal(sum(igraph::get.edge.ids(G(delete_steps(WGL1TF1,c("t1","t4","t1"), c("t2","t5","t2"))), c("t1","t2","t4","t5"), error = FALSE)), 0)
expect_equal(sum(igraph::get.edge.ids(G(delete_steps(WGL1TF1,c(1,4), c(2,5))), c("t1","t2","t4","t5"), error = FALSE)), 0)
expect_equal(sum(igraph::get.edge.ids(G(delete_steps(WGL1LTF1,c(1,2), c("t1","t2"))), c("t1","t1","t2","t2"), error = FALSE)), 0)
expect_equal(sum(igraph::get.edge.ids(G(delete_steps(WGL1TF1,c(1,4,1), c(2,5,2))), c("t1","t2","t4","t5"), error = FALSE)), 0)
expect_error(delete_steps(WGL1TF1,c("t1","t6"), c("t2","t5")), error_message_name_not_in_object())
expect_error(delete_steps(WGL1TF1,c("t1","t4"), c("t2","t6")), error_message_name_not_in_object())
expect_error(delete_steps(WGL1TF1,c("t1","t4"), c("t5","t5")), "One of the specified edges does not exist.")
expect_error(delete_steps(WGL1TF1,c("t1","t4","t2"), c("t5", "t5", NA)), error_message_name_not_in_object())
expect_error(delete_steps(WGL1TF1,c("t1","t4",NA), c("t5", "t5", "t3")), error_message_name_not_in_object())
expect_error(delete_steps(WGL1TF1,c("t1","t4","t2"), c("t5", "t5", NaN)), error_message_name_not_in_object())
expect_error(delete_steps(WGL1TF1,c("t1","t4",NaN), c("t5", "t5", "t3")), error_message_name_not_in_object())
expect_error(delete_steps(WGL1TF1,c("t1","t4","t2"), c("t5", "t5", NULL)), error_message_vector_not_equal_length())
expect_error(delete_steps(WGL1TF1,c("t1","t4",NULL), c("t5", "t5", "t3")), error_message_vector_not_equal_length())
expect_error(delete_steps(WGL1TF1,c("t1","t4","t2"), c("t5", "t5")), error_message_vector_not_equal_length())
expect_error(delete_steps(WGL1TF1,c("t1","t4"), c("t5", "t5", "t3")), error_message_vector_not_equal_length())
testTempNetwork <- WGL1LTF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- delete_steps(WGL1TF1,c("t1","t4"), c("t2","t5"))
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'insert_point(tempFlow, newPoint, srcPointList, dstPointList, weightList = NULL, attrList = NULL, removeEdges = TRUE, safe = TRUE)'", {
checkER <- list("v1"=list("t1"= "v1","t2"= "v1","t3"= "v1","t4"= "v1","t5"= "v1","tX"= "v1"),
"v2"=list("t1"= "v2","t2"= "v2","t3"= "v2","t4"= "v2","t5"= "v2","tX"= "v2"),
"v3"=list("t1"= "v3","t2"= "v3","t3"= "v3","t4"= "v3","t5"= "v3","tX"= "v3"),
"v4"=list("t1"= "v4","t2"= "v4","t3"= "v4","t4"= "v4","t5"= "v4","tX"= "v4"),
"v5"=list("t1"= "v5","t2"= "v5","t3"= "v5","t4"= "v5","t5"= "v5","tX"= "v5"))
addGL <- list("tX" = GL1$simple$t1)
addER <- list("v1"=list("tX"= "v1"), "v2"=list("tX"= "v2"),
"v3"=list("tX"= "v3"), "v4"=list("tX"= "v4"),
"v5"=list("tX"= "v5"))
addGLeX <- list("tX" = GL5$simple$a)
addEReX <- list("v1"=list("tX"= "1"), "v2"=list("tX"= "2"),
"v3"=list("tX"= "3"), "v4"=list("tX"= "4"),
"v5"=list("tX"= "5"))
# insert_point(WGL5ETF1, "tX", c(), c(),addGLeX)
# sapply(names(tP(WGL5ETF1)), function(x) gV(WGL5ETF1, x))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL)
gtempNetwork <-insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"), GL1$simple$t1)
eTempNetwork <- insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"), addGL, equivRel = addER)
eeTempNetwork <- insert_point(EWGL1TF1, "tX", c("t3","t4"), c("t5"), addGL, equivRel = addER)
expect_equal(eR(tempNetwork), NULL)
expect_equal(eR(eTempNetwork), checkER)
expect_equal(eR(eeTempNetwork), checkER)
E1addGL <- list("tY" = GL1$simple$t1)
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),E1addGL), error_message_not_tempflow_graphlist_index_equality())
E1addER <- list("v1"=list("tX"="v2"), "v2"=list("tX"="v2"),
"v3"=list("tX"="v3"), "v4"=list("tX"="v4"),
"v5"=list("tX"="v5"))
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, equivRel = E1addER), error_message_equivclass_not_disjoined())
E2addER <- list("v1"=list("tX"="v"), "v2"=list("tX"="v2"),
"v3"=list("tX"="v3"), "v4"=list("tX"="v4"),
"v5"=list("tX"="v5"))
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, equivRel = E2addER), error_message_equivalenceclass_is_not_exhaustive())
E3addER <- list("v1"=list("tY"="v"), "v2"=list("tX"="v2"),
"v3"=list("tX"="v3"), "v4"=list("tX"="v4"),
"v5"=list("tX"="v5"))
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, equivRel = E3addER), error_message_not_tempflow_equivclass_index_equality())
E4addER <- list("v1"=list("tX"="v1"), "v1"=list("tX"="v2"),
"v3"=list("tX"="v3"), "v4"=list("tX"="v4"),
"v5"=list("tX"="v5"))
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, equivRel = E4addER), error_message_not_tempflow_equivclass_index_equality())
E5addER <- list("v1"=list("tX"="v1"), "v2"=list("tX"="v2"),
"v3"=list("tX"="v3"), "v4"=list("tX"="v4"),
"v5"=list("tX"="v5"), "v6"=list("tX"="v5"))
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, equivRel = E5addER), error_message_not_tempflow_equivclass_index_equality())
E6addER <- list("v1"=list("tX"="v1"), "v2"=list("tX"="v2"),
"v3"=list("tX"="v3"), "v4"=list("tX"="v4"),
"v5"=list("tX"="v5", "tY" = "v5"))
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, equivRel = E6addER), error_message_not_tempflow_equivclass_index_equality())
E7addER <- list("v1"=list("tX"="v1"), "v2"=list("tX"="v2"),
"v3"=list("tX"="v3"), "v4"=list("tX"="v4"),
"v5"=list("tX"="v5"),
"v6"=list("t1"= "v6","t2"= "v6","t3"= "v6","t4"= "v6","t5"= "v6","tX"= "v6"))
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, equivRel = E7addER), error_message_equivrel_not_equal_length_to_vertices())
E8addER <- list("v1"=list("tX"="v1"), "v2"=list("tX"="v2"),
"v3"=list("tX"="v3"), "v4"=list("tX"="v4"),
"v5"=list("tX"="v5", "tX" = "v5"))
expect_error(insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, equivRel = E8addER), error_message_not_tempflow_equivclass_index_equality())
tempNetwork <-insert_point(WGL1TF1, "tX", c(), c(),addGL)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1)))
expect_equal(length(tP(tempNetwork)), length(tP(WGL1TF1))+1)
expect_equal(tS(tempNetwork)$weight, tS(WGL1TF1)$weight)
tempNetwork <-insert_point(WGL1TF1, "tX", c("tX"), c(),addGL)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("tX"), c(), addGL, weightList = c(10))
eTempNetwork <- insert_point(WGL1TF1, "tX", c("tX"), c(), addGL, equivRel = addER)
eeTempNetwork <- insert_point(EWGL1TF1, "tX", c("tX"), c(), addGL, equivRel = addER)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+1)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("tX","tX"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("tX"), c("tX"))]$weight, c(0))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("tX"), c("tX"))]$weight, c(10))
tempNetwork <-insert_point(WGL1TF1, "tX", c(), c("tX"),addGL)
wTempNetwork <- insert_point(WGL1TF1, "tX", c(), c("tX"),addGL, weightList = c(10))
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+1)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("tX","tX"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("tX"), c("tX"))]$weight, c(0))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("tX"), c("tX"))]$weight, c(10))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t1","tX"), c("t2"),addGL)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("t1","tX"), c("t2"),addGL, weightList = c(10,8,20))
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+2)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","t2"), error = FALSE) == 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","tX","tX","tX","tX","t2"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","tX","tX"), c("tX","tX","t2"))]$weight, c(1,0,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t1","tX","tX"), c("tX","tX","t2"))]$weight, c(10,8,20))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, weightList = c(10,20,30))
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+1)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t3","t5","t4","t5"), error = FALSE) == 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t3","tX","t4","tX","tX","t5"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t3","t4","tX"), c("tX","tX","t5"))]$weight, c(1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t3","t4","tX"), c("tX","tX","t5"))]$weight, c(10,20,30))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t1","t2"), c("t5"),addGL)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("t1","t2"), c("t5"),addGL, weightList = c(10,20,30))
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+3)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","tX","t1","tX","tX","t5"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","t2","tX"), c("tX","tX","t5"))]$weight, c(1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t1","t2","tX"), c("tX","tX","t5"))]$weight, c(10,20,30))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"),addGL)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"),addGL, weightList = c(10,20,30,40))
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+2)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t2","t4"), error = FALSE) == 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","tX","t2","tX","tX","t3","tX","t4"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","t2","tX","tX"), c("tX","tX","t3","t4"))]$weight, c(1,1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t1","t2","tX","tX"), c("tX","tX","t3","t4"))]$weight, c(10,20,30,40))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, weightList = c(10,20,30))
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+1)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t3","t5","t4","t5"), error = FALSE) == 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t3","tX","t4","tX","tX","t5"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t3","t4","tX"), c("tX","tX","t5"))]$weight, c(1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t3","t4","tX"), c("tX","tX","t5"))]$weight, c(10,20,30))
tempNetwork <-insert_point(WGL5ETF1, "tX", c("p31", "p41"), c("p32", "p42"),addGLeX)
wTempNetwork <- insert_point(WGL5ETF1, "tX", c("p31", "p41"), c("p32", "p42"),addGLeX, weightList = c(10,20,30,40))
expect_equal(length(tS(tempNetwork)), length(tS(WGL5ETF1))+2)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("p31","p32","p41","p42"), error = FALSE) == 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("p31","tX","p41","tX","tX","p32","tX","p42"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("p31","p41","tX","tX"), c("tX","tX","p32","p42"))]$weight, c(1,1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("p31","p41","tX","tX"), c("tX","tX","p32","p42"))]$weight, c(10,20,30,40))
tempNetwork <-insert_point(WGL1TF1, "tX", c(), c(),addGL, removeEdges = FALSE)
wTempNetwork <- insert_point(WGL1TF1, "tX", c(), c("tX"),addGL, weightList = c(10))
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1)))
expect_equal(length(tP(tempNetwork)), length(tP(WGL1TF1))+1)
expect_equal(tS(tempNetwork)$weight, tS(WGL1TF1)$weight)
tempNetwork <-insert_point(WGL1TF1, "tX", c("tX"), c(),addGL, removeEdges = FALSE)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("tX"), c(),addGL, weightList = c(10), removeEdges = FALSE)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+1)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("tX","tX"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("tX"), c("tX"))]$weight, c(0))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("tX"), c("tX"))]$weight, c(10))
tempNetwork <-insert_point(WGL1TF1, "tX", c(), c("tX"),addGL, removeEdges = FALSE)
wTempNetwork <- insert_point(WGL1TF1, "tX", c(), c("tX"),addGL, weightList = c(10), removeEdges = FALSE)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+1)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("tX","tX"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("tX"), c("tX"))]$weight, c(0))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("tX"), c("tX"))]$weight, c(10))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t1","tX"), c("t2"),addGL, removeEdges = FALSE)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("t1","tX"), c("t2"),addGL, weightList = c(10,8,20), removeEdges = FALSE)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+3)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","t2"), error = FALSE) > 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","tX","tX","tX","tX","t2"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","tX","tX"), c("tX","tX","t2"))]$weight, c(1,0,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t1","tX","tX"), c("tX","tX","t2"))]$weight, c(10,8,20))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, removeEdges = FALSE)
wTempNetwork <- insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL, weightList = c(10,20,30), removeEdges = FALSE)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+3)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t3","t5","t4","t5"), error = FALSE) > 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t3","tX","t4","tX","tX","t5"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t3","t4","tX"), c("tX","tX","t5"))]$weight, c(1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t3","t4","tX"), c("tX","tX","t5"))]$weight, c(10,20,30))
tempNetwork <-insert_point(WGL1TF1, "tX",c("t1","t2"), c("t5"),addGL, removeEdges = FALSE)
wTempNetwork <- insert_point(WGL1TF1, "tX",c("t1","t2"), c("t5"),addGL, weightList = c(10,20,30), removeEdges = FALSE)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+3)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","tX","t1","tX","tX","t5"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","t2","tX"), c("tX","tX","t5"))]$weight, c(1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t1","t2","tX"), c("tX","tX","t5"))]$weight, c(10,20,30))
tempNetwork <-insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"),addGL, removeEdges = FALSE)
wTempNetwork <-insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"),addGL, weightList = c(10,20,30,40), removeEdges = FALSE)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))+4)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t2","t4"), error = FALSE) > 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","tX","t2","tX","tX","t3","tX","t4"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","t2","tX","tX"), c("tX","tX","t3","t4"))]$weight, c(1,1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("t1","t2","tX","tX"), c("tX","tX","t3","t4"))]$weight, c(10,20,30,40))
tempNetwork <-insert_point(WGL5ETF1, "tX",c("p31", "p41"), c("p32", "p42"),addGLeX, removeEdges = FALSE)
wTempNetwork <- insert_point(WGL5ETF1, "tX",c("p31", "p41"), c("p32", "p42"),addGLeX, weightList = c(10,20,30,40), removeEdges = FALSE)
#eTempNetwork <-insert_point(WGL5ETF1, "tX",c("p31", "p41"), c("p32", "p42"),addGLeX, equivRel = addEReX, removeEdges = FALSE)
expect_equal(length(tS(tempNetwork)), length(tS(WGL5ETF1))+4)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("p31","p32","p41","p42"), error = FALSE) > 0), TRUE)
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("p31","tX","p41","tX","tX","p32","tX","p42"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("p31","p41","tX","tX"), c("tX","tX","p32","p42"))]$weight, c(1,1,1,1))
expect_equal(tS(wTempNetwork)[get_step_ids(wTempNetwork, c("p31","p41","tX","tX"), c("tX","tX","p32","p42"))]$weight, c(10,20,30,40))
expect_error(insert_point(WGL5ETF1, "tX",c("p31", "p41"), c("p32", "p42"),addGL, weightList = c(10,20,30,40), removeEdges = FALSE), error_message_not_graphlist_vertex_equality())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t6"), c("t3","t4"),addGL), error_message_name_not_in_object())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t6"),addGL), error_message_name_not_in_object())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t2","t4"),addGL), error_message_not_dag())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t1"), c("t2","t2"),addGL), error_message_not_simple())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3",NA),addGL), error_message_name_not_in_object())
expect_error(insert_point(WGL1TF1, "tX", c("t1",NA), c("t3","t4"),addGL), error_message_name_not_in_object())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c(NaN),addGL), error_message_ids_not_in_object())
expect_error(insert_point(WGL1TF1, "tX", c("t1",NaN), c("t3","t4"),addGL), error_message_name_not_in_object())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"), addGL, weightList = c(10,20)), error_message_vector_not_equal_sum_lengths_and_not_one())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"), addGL, weightList = c(-10,20,30,40)), error_message_weights_not_nonnegative())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"),addGL, weightList = c("-10",20,30,40)), error_message_weights_not_numeric())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"),addGL, weightList = c(NA,20,30,40)), error_message_weights_not_numeric())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"),addGL, weightList = c(NaN,20,30,40)), error_message_weights_not_numeric())
expect_error(insert_point(WGL1TF1, "tX", c("t1","t2"), c("t3","t4"),addGL, weightList = c(10,20,30,NULL)), error_message_vector_not_equal_sum_lengths_and_not_one())
expect_error(insert_point(WGL1TF1, "tX", c("tX"), c("tX"), addGL, weightList = c(10)), error_message_not_simple())
testTempNetwork <- WGL1LTF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- insert_point(WGL1TF1, "tX", c("t3","t4"), c("t5"),addGL)
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'remove_point(tempFlow, removePoint, safe = TRUE)'", {
tempNetwork <-remove_point(WGL1TF1, "t5")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1TF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))-2)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t3","t5","t4","t5"), error = FALSE) == 0), "Invalid vertex names")
tempNetwork <-remove_point(WGL1LTF1, "t5")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1LTF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1LTF1))-3)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t3","t5","t4","t5","t5","t5"), error = FALSE) == 0), "Invalid vertex names")
tempNetwork <-remove_point(WGL1TF1, "t1")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1TF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))-1)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t1","t2"), error = FALSE) == 0), "Invalid vertex names")
tempNetwork <-remove_point(WGL1LTF1, "t1")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1LTF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1LTF1))-2)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t1","t2","t1","t1"), error = FALSE) == 0), "Invalid vertex names")
tempNetwork <-remove_point(WGL1TF1, "t2")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1TF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))-1)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t2","t4"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","t3","t1","t4"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","t1"), c("t3","t4"))]$weight, c(2,2))
tempNetwork <-remove_point(WGL1LTF1, "t2")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1LTF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1LTF1))-2)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t2","t4", "t2","t2"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","t3","t1","t4"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","t1"), c("t3","t4"))]$weight, c(2,2))
tempNetwork <-remove_point(WGL1TF1, "t3")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1TF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))-1)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t3","t5"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t5"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t2"), c("t5"))]$weight, c(2))
tempNetwork <-remove_point(WGL1LTF1, "t3")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1LTF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1LTF1))-2)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t3","t5"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t5"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t2"), c("t5"))]$weight, c(2))
expect_error(remove_point(GL0STF1, "t1"), error_message_tempflow_empty())
# tempNetwork <-remove_point(GL0STF1, "t1")
# expect_equal(length(tP(tempNetwork)), length(tP(GL0STF1))-1)
# expect_equal(length(tS(tempNetwork)), length(tS(GL0STF1)))
expect_error(remove_point(GL0STF3, "t1"), error_message_tempflow_empty())
# tempNetwork <-remove_point(GL0STF3, "t1")
# expect_equal(length(tP(tempNetwork)), length(tP(GL0STF3))-1)
# expect_equal(length(tS(tempNetwork)), length(tS(GL0STF3))-1)
tempNetwork <-remove_point(WGL5ETF1, "b")
expect_equal(length(tP(tempNetwork)), length(tP(WGL5ETF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL5ETF1)))
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("p11","b","p23","b","b","p31","b","p41"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("p11","p31","p11","p41","p23","p31","p23","p41"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("p11","p11","p23","p23"), c("p31","p41","p31","p41"))]$weight, c(3,3,2,2))
tempNetwork <-remove_point(set_step_weights(WGL5ETF1, "b","p31", 10), "b")
expect_equal(length(tP(tempNetwork)), length(tP(WGL5ETF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL5ETF1)))
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("p11","b","p23","b","b","p31","b","p41"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("p11","p31","p11","p41","p23","p31","p23","p41"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("p11","p11","p23","p23"), c("p31","p41","p31","p41"))]$weight, c(12,3,11,2))
tempNetwork <-remove_point(WGL1LTF1, "t3")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1LTF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1LTF1))-2)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t3","t5"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t5"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t2"), c("t5"))]$weight, c(2))
tempNetwork <-remove_point(WGL1TF2, "t2")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1TF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1TF1))-1)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t2","t4"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","t3","t1","t4"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","t1"), c("t3","t4"))]$weight, c(4,4))
tempNetwork <-remove_point(WGL1LTF2, "t2")
expect_equal(length(tP(tempNetwork)), length(tP(WGL1LTF1))-1)
expect_equal(length(tS(tempNetwork)), length(tS(WGL1LTF1))-2)
expect_error(all(igraph::get.edge.ids(G(tempNetwork), c("t2","t3","t2","t4", "t2","t2"), error = FALSE) == 0), "Invalid vertex names")
expect_equal(all(igraph::get.edge.ids(G(tempNetwork), c("t1","t3","t1","t4"), error = FALSE) > 0), TRUE)
expect_equal(tS(tempNetwork)[get_step_ids(tempNetwork, c("t1","t1"), c("t3","t4"))]$weight, c(4,4))
expect_error(remove_point(WGL1TF1, c("t6")), error_message_name_not_in_object())
expect_error(remove_point(WGL1TF1, c(6)), error_message_ids_not_in_object())
testTempNetwork <- WGL1LTF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- remove_point(WGL1LTF1, "t3")
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'induce_flow(tempFlow, inpPointList, safe = TRUE)'", {
expect_error(induce_flow(WGL1TF1, c(1,2,8)))
expect_equal(igraph::is_isomorphic_to(G(induce_flow(WGL1TF1, c(1,1,3,5))),G(induce_flow(WGL1TF1, c(1,3,5)))), TRUE)
expect_error(induce_flow(WGL1TF1, c()), error_message_tempflow_empty())
# expect_equal(igraph::is_isomorphic_to(G(induce_flow(WGL1TF1, c())),igraph::graph.empty()), TRUE)
expect_equal(igraph::is_isomorphic_to(G(induce_flow(WGL1TF1, igraph::V(G(WGL1TF1)))),G(WGL1TF1)), TRUE)
expect_equal(igraph::is_isomorphic_to(G(induce_flow(WGL1TF1, c("t1", "t3", "t5"))),G(induce_flow(WGL1TF1, c(1,3,5)))), TRUE)
expect_error(induce_flow(WGL1TF2, c(1,2,6)), error_message_ids_not_in_object())
expect_error(induce_flow(WGL1TF2, c("t1","t2","t6")), error_message_name_not_in_object())
testTempNetwork <- WGL1TF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- induce_flow(WGL1TF1, c(1,3,5))
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'slice_flow(tempFlow, startPointList, endPointList, safe = TRUE)'", {
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF1,2,5)),G(induce_flow(WGL1TF1, c(2,3,4,5)))), TRUE)
expect_error(slice_flow(WGL1TF1,3,4), error_message_tempflow_empty())
# expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF1,3,4)),igraph::graph.empty()), TRUE)
expect_error(slice_flow(WGL1TF1,5,2), error_message_tempflow_empty())
# expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF1,5,2)),igraph::graph.empty()), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF1,2,5)),G(induce_flow(WGL1TF1, c(2,3,4,5)))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF2,1,1)),G(induce_flow(WGL1TF2, c(1)))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1LTF2,1,1)),G(induce_flow(WGL1LTF2, c(1)))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF1,c(2,4),c(5,3))),G(induce_flow(WGL1TF1, c(2,3,4,5)))), TRUE)
expect_error(slice_flow(WGL1TF1,3,4), error_message_tempflow_empty())
# expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF1,3,4)),igraph::graph.empty()), TRUE)
expect_error(slice_flow(WGL1TF1,5,2), error_message_tempflow_empty())
# expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF1,5,2)),igraph::graph.empty()), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF1,2,5)),G(induce_flow(WGL1TF1, c(2,3,4,5)))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1LTF1,c(3,4), c(3,4))),G(induce_flow(WGL1LTF1, c(3,4)))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF2,1,c(1,1))),G(induce_flow(WGL1TF2, c(1)))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1TF2,c(1,1),1)),G(induce_flow(WGL1TF2, c(1)))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1LTF2,1,c(1,1))),G(induce_flow(WGL1LTF2, c(1)))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1LTF2,c(1,1),1)),G(induce_flow(WGL1LTF2, c(1)))), TRUE)
# expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1LTF2,c(1),c())),G(induce_flow(WGL1LTF2, c()))), TRUE)
expect_error(slice_flow(WGL1LTF2,c(1),c()), error_message_tempflow_empty())
# expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL1LTF2,c(),c(1))),G(induce_flow(WGL1LTF2, c()))), TRUE)
expect_error(slice_flow(WGL1LTF2,c(),c(1)), error_message_tempflow_empty())
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL5ETF1,c("p11","p23"),c("c"),1)),G(induce_flow(WGL5ETF1, c("p11","p23","b","p31","p32","p41","p42","c")))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL5ETF1,c("a"),c("p11","p23"),1)),G(induce_flow(WGL5ETF1, c("a","p11","p21","p22","p23")))), TRUE)
expect_equal(igraph::is_isomorphic_to(G(slice_flow(WGL5ETF1,c("p11","p23"),c("p32","p41"),1)),G(induce_flow(WGL5ETF1, c("p11","p23","b","p31","p32","p41")))), TRUE)
expect_error(slice_flow(WGL1TF1, c(1,8)))
testTempNetwork <- WGL1TF1
testTempNetwork$storeTempGraph <- TRUE
testTempNetwork <- construct_underlying_graph(testTempNetwork)
expect_equal(class(tG(testTempNetwork)),"igraph")
testTempNetwork <- slice_flow(WGL1TF1,2,5)
expect_equal(tG(testTempNetwork),NULL)
})
test_that("test 'compute_tempdistance'", {
expect_equal(compute_tempdistance(WGL1TF1, "t5", "t1"), Inf)
expect_equal(compute_tempdistance(WGL1TF1, "t3", "t4"), Inf)
expect_equal(compute_tempdistance(WGL1TF1, "t1", "t1"), 0)
expect_equal(compute_tempdistance(WGL1TF1, "t1", 5), 3)
expect_equal(compute_tempdistance(WGL1TF1, 1, "t5"), 3)
expect_equal(compute_tempdistance(WGL1TF1, 1, 5), 3)
expect_equal(compute_tempdistance(WGL1TF1, "t1", "t5"), 3)
expect_error(compute_tempdistance(WGL1TF1, "t1", "t100"))
expect_error(compute_tempdistance(WGL1TF1, "t1", 100))
expect_error(compute_tempdistance(WGL1TF1, 100, "t1"))
expect_error(compute_tempdistance(WGL1TF1, 111, 222))
expect_equal(compute_tempdistance(WGL1TF2, "t1", "t5"), 5)
expect_equal(compute_tempdistance(WGL1LTF1, "t1", "t5"), 3)
expect_equal(compute_tempdistance(WGL1LTF2, "t1", "t1"), 1)
expect_equal(compute_tempdistance(WGL1LTF2, "t1", "t5"), 5)
expect_equal(compute_tempdistance(WGL1LTF1, "t1", "t5"), 3)
expect_equal(compute_tempdistance(WGL1LTF1, "t1", "t1"), 0)
expect_equal(compute_tempdistance(WGL1LTF2, "t1", "t5"), 5)
})
|
#' Expression data
"expression_data"
#' Transcription factors
"transcription_factors"
| /R/data.R | no_license | ddeweerd/ComhubbeR | R | false | false | 87 | r | #' Expression data
"expression_data"
#' Transcription factors
"transcription_factors"
|
#' Function to prepare a gene_info RDS file
#'
#' \code{xMakeGenesets} is supposed to prepare a gene_info RDS file.
#'
#' @param association.file an input file for association
#' @param set_info.file an input file for set_info
#' @param output.prefix a prefix for the output file
#' @param output.dir the output directory
#' @param stamp the stamp associated with this RDS file. By default it is the date when the file created
#' @return a GS object
#' @note None
#' @export
#' @seealso \code{\link{xMakeGenesets}}
#' @include xMakeGenesets.r
#' @examples
#' \dontrun{
#' GS <- xMakeGenesets(association.file="GENE2GOMF.txt", set_info.file="GO.txt", output.prefix="org.Hs.egGOMF")
#' }
xMakeGenesets <- function(association.file=NULL, set_info.file=NULL, output.prefix=NULL, output.dir="./", stamp=as.Date(Sys.time()))
{
setID <- distance <- NULL
if(!is.null(association.file) & !is.null(set_info.file) & !is.null(output.prefix)){
output.file <- paste0(output.dir, output.prefix, ".RDS")
## import set info
set_info <- readr::read_delim(set_info.file, delim="\t")
colnames(set_info) <- c("setID","name","namespace","distance")
## import genesets
#association <- readr::read_delim(association.file, col_names=F, delim="\t", skip=1)
association <- readr::read_delim(association.file, delim="\t")
colnames(association) <- c("GeneID","setID")
## focus those gene sets in common
set_info %>% dplyr::semi_join(association, by="setID") %>% dplyr::arrange(distance) -> set_info
## define genesets
association %>% dplyr::semi_join(set_info, by="setID") -> df
gs <- split(x=df$GeneID, f=df$setID)
gs %>% tibble::enframe(name='setID', value='GeneID') -> gs
## GS
GS <- list(gs=gs, set_info=set_info, stamp=stamp)
class(GS) <- "GS"
saveRDS(GS, file=output.file, compress="gzip")
#readr::write_rds(GS, path=output.file, compress="gz")
message(sprintf("Saved into '%s' (%s)!", output.file, as.character(Sys.time())), appendLF=TRUE)
invisible(GS)
}
}
| /R/xMakeGenesets.r | no_license | hfang-bristol/XGR | R | false | false | 2,082 | r | #' Function to prepare a gene_info RDS file
#'
#' \code{xMakeGenesets} is supposed to prepare a gene_info RDS file.
#'
#' @param association.file an input file for association
#' @param set_info.file an input file for set_info
#' @param output.prefix a prefix for the output file
#' @param output.dir the output directory
#' @param stamp the stamp associated with this RDS file. By default it is the date when the file created
#' @return a GS object
#' @note None
#' @export
#' @seealso \code{\link{xMakeGenesets}}
#' @include xMakeGenesets.r
#' @examples
#' \dontrun{
#' GS <- xMakeGenesets(association.file="GENE2GOMF.txt", set_info.file="GO.txt", output.prefix="org.Hs.egGOMF")
#' }
xMakeGenesets <- function(association.file=NULL, set_info.file=NULL, output.prefix=NULL, output.dir="./", stamp=as.Date(Sys.time()))
{
setID <- distance <- NULL
if(!is.null(association.file) & !is.null(set_info.file) & !is.null(output.prefix)){
output.file <- paste0(output.dir, output.prefix, ".RDS")
## import set info
set_info <- readr::read_delim(set_info.file, delim="\t")
colnames(set_info) <- c("setID","name","namespace","distance")
## import genesets
#association <- readr::read_delim(association.file, col_names=F, delim="\t", skip=1)
association <- readr::read_delim(association.file, delim="\t")
colnames(association) <- c("GeneID","setID")
## focus those gene sets in common
set_info %>% dplyr::semi_join(association, by="setID") %>% dplyr::arrange(distance) -> set_info
## define genesets
association %>% dplyr::semi_join(set_info, by="setID") -> df
gs <- split(x=df$GeneID, f=df$setID)
gs %>% tibble::enframe(name='setID', value='GeneID') -> gs
## GS
GS <- list(gs=gs, set_info=set_info, stamp=stamp)
class(GS) <- "GS"
saveRDS(GS, file=output.file, compress="gzip")
#readr::write_rds(GS, path=output.file, compress="gz")
message(sprintf("Saved into '%s' (%s)!", output.file, as.character(Sys.time())), appendLF=TRUE)
invisible(GS)
}
}
|
##########################################################
# MakeTokenTable.r
# 2016.05.26
# Make fake survey token data for practice.
##########################################################
library(dplyr)
set.seed(12345)
token.table <- data.frame(
id=1:100,
firstname="Mentor",
lastname="Teacher",
email="studentteachinginitiative@gmail.com",
emailstatus="OK",
token=1:100,
language="en",
validfrom="",
validuntil="",
invited="N",
reminded="N",
remindercount=0,
completed="N",
usesleft=1,
attribute_1="Mentor Teacher",
attribute_2="Dan Goldhaber",
attribute_3="0001"
)
write.table(token.table,
file="tokens.csv",
sep=",",
row.names=FALSE,
col.names=TRUE)
| /MakeTokenTable.r | no_license | jecowan/isti | R | false | false | 747 | r |
##########################################################
# MakeTokenTable.r
# 2016.05.26
# Make fake survey token data for practice.
##########################################################
library(dplyr)
set.seed(12345)
token.table <- data.frame(
id=1:100,
firstname="Mentor",
lastname="Teacher",
email="studentteachinginitiative@gmail.com",
emailstatus="OK",
token=1:100,
language="en",
validfrom="",
validuntil="",
invited="N",
reminded="N",
remindercount=0,
completed="N",
usesleft=1,
attribute_1="Mentor Teacher",
attribute_2="Dan Goldhaber",
attribute_3="0001"
)
write.table(token.table,
file="tokens.csv",
sep=",",
row.names=FALSE,
col.names=TRUE)
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/NSCLC.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.85,family="gaussian",standardize=FALSE)
sink('./NSCLC_086.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/AvgRank/NSCLC/NSCLC_086.R | no_license | esbgkannan/QSMART | R | false | false | 345 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/NSCLC.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.85,family="gaussian",standardize=FALSE)
sink('./NSCLC_086.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
# Basic math
1 + 3
3 - 2
6 * 4
5 / 2
5 %/% 2
5 %% 2
5 ^ 2
# Assignment operators. = is can be used but <- is preferred
a <- 3
a
b = 4
b
# You can also use print
print(a)
# View and remove objects in your environment
objects()
rm(a)
objects()
rm(list = objects())
objects()
# concatenation
a <- c(1,3,5,7,9)
a
a[2]
# sequence
b <- c(1:50)
b
b [35]
# class
class(b)
a <- c(1, 1.0001)
a
class(a)
a <- c(1, "1", 1.0)
a
class(a)
# single or double quotes allowed, but be consistent
a <- c(1, '1', 1.0)
a
class(a)
# Looping and comparisons
if (1 == 1) {
print('1 = 1')
}
if (1 == 2) {
print('1 = 2')
} else {
print ('1 != 2')
}
if (1 == 2) {
print('1 = 2')
} else if (1 != 1){
print ('1 != 1')
} else {
print ('1 must = 1')
}
c <- 1
while (c <= 10) {
print(c)
c <- c + 1
}
for (i in 1:10) {
print(i)
}
for (i in 1:10) {
print(i)
if (i == 5){
break
}
}
# Create and work with other object types
# Lists
l <- list(1, 1.1, '1')
l
length(l)
class(l)
class(l[[1]])
class(l[[2]])
class(l[[3]])
l[[1]]
l2 <- list(1:5, 6.1:10.1, c('a', 'b', 'c', 'd', 'e'))
l2
l2[[1]]
l2[[2]][[3]]
# Matrix
m <- matrix(1:10, nrow=2)
m
class(m)
m <- matrix(1:10, ncol=2)
m
m <- matrix(1:10, ncol=2, byrow=TRUE)
m
m[1,]
m[,2]
m[3,2]
# Data Frame
df <- data.frame(1:10, 1:5)
df
class(df)
head(df, 8)
tail(df)
names(df)
names(df) <- c('Col1', 'Col2')
df
df$Col1
df[,1]
df[,-1]
df[1,]
df[-5,]
df[c(-2,-4),]
attach(df)
Col1
df$Col3 <- c(11:20)
df
Col4 <- c(30:21)
df <- cbind(df,Col4)
df <- rbind(df, c(11,21,31, 41))
dim(df)
df
# Missing values
missing <- c(1, 2,3, NA, 5, 6, NA, NA, 9, 10)
missing
is.na(missing)
mean(missing)
mean(missing, na.rm = TRUE)
# Packages
search()
View(installed.packages())
View(available.packages())
.libPaths()
install.packages('abc',
lib = 'C:/Users/johnp/Documents/R/win-library/3.3',
dependencies = TRUE)
help (package= 'abc')
library(abc, lib.loc = 'C:/Users/johnp/Documents/R/win-library/3.3')
help(getmode)
remove.packages('abc')
# Working directory
getwd()
setwd("C:/R Sessions")
# Functions
celcius <- function(f) {
c <- (f - 32) * 5/9
}
howhot <- celcius(75)
howhot
rm(celcius)
source('./MyFunctions.r')
howhot <- celcius(75)
howhot
howhot <- fahrenheit(28)
howhot
# Data sets
library(help = datasets)
mydata <- iris
head(mydata)
# Read data
myCSV <- read.csv('./Data/Chicago2016.csv', header = TRUE)
head(myCSV)
# Read data from a web page
library(XML)
url <- 'http://www.hockey-reference.com/leagues/NHL_2016_standings.html'
tables <- readHTMLTable(url, stringsAsFactors = FALSE)
standings <- tables$standings
View(standings)
mystandings1 <- standings[1:5]
View(mystandings1)
write.csv(mystandings1, './Data/NHL2016.csv', row.names = FALSE)
# Connect to a SQL database
library(RODBC)
myConn <- odbcDriverConnect('driver={SQL Server};
server=PERTELL04;
database=DemoDB;
Trusted Connection=true')
userData <- sqlFetch(myConn, 'Catalog')
reportData <- sqlQuery(myConn, "SELECT C.Name,
ELS.TimeStart,
ELS.TimeEnd,
ELS.TimeDataRetrieval,
ELS.TimeProcessing,
ELS.TimeRendering,
ELS.Status,
ELS.ByteCount,
ELS.[RowCount]
FROM Catalog AS C
INNER JOIN ExecutionLogStorage AS ELS ON C.ItemID = ELS.ReportID
WHERE ELS.TimeStart BETWEEN '8/1/2016' AND '8/31/2016';")
close(myConn)
# How much memory does the data set require?
object.size(reportData)
head(reportData[, c(1,5)])
reportData <- reportData[order(-reportData$TimeDataRetrieval),]
head(reportData[, c(1,5)])
summary(reportData)
topTen <- head(summary(subset(reportData$Name, reportData$Name != 'WakeUpWorld')), 10)
topTen
plot(topTen)
filteredNames <- names(topTen)
filteredReports <- (reportData$Name %in% filteredNames)
filteredData <- reportData[filteredReports, c(1,4:9)]
filteredData$Name <- factor(filteredData$Name)
filteredData$Status <- factor(filteredData$Status)
class(topTen)
topTenReports <- data.frame(ReportName = names(topTen), Count = topTen)
View(topTenReports)
d <- data.frame(tapply(filteredData$TimeDataRetrieval, filteredData$Name, mean))
d <- cbind(d, tapply(filteredData$TimeProcessing, filteredData$Name, mean))
d <- cbind(d, tapply(filteredData$TimeRendering, filteredData$Name, mean))
df <- cbind(rownames(d), d)
names(df) <- c('ReportName', 'AvgDataRetrieval', 'AvgProcessing', 'AvgRendering')
df
df <- merge(topTenReports, df, 'ReportName')
View(df)
library(ggplot2)
library(gridExtra)
plot1 <- ggplot(df, aes(x=ReportName, y=Count)) +
geom_bar(position="dodge", fill= "lightgreen", color = "black", stat="identity") +
geom_text(aes(label=Count), vjust=0.1, color="black") +
ggtitle("Number Of Report Views") +
theme(legend.title = element_text(face="italic", size = 14))
plot2 <- ggplot(df, aes(x=ReportName, y=AvgDataRetrieval)) +
geom_bar(position="dodge", fill= "lightblue", color = "black", stat="identity") +
geom_text(aes(label=format(round(df$AvgDataRetrieval / 1000, 2), nsmall = 2)), vjust=0.1, color="black",
position=position_dodge(.9), size=5) +
ggtitle("Average Data Retrieval in Seconds") +
theme(legend.title = element_text(face="italic", size = 14))
plot3 <- ggplot(df, aes(x=ReportName, y=AvgProcessing)) +
geom_bar(position="dodge", fill= "red", color = "black", stat="identity") +
geom_text(aes(label=format(round(df$AvgProcessing / 1000, 2), nsmall = 2)), vjust=0.1, color="black",
position=position_dodge(.9), size=5) +
ggtitle("Average Time Processing in Seconds") +
theme(legend.title = element_text(face="italic", size = 14))
plot4 <- ggplot(df, aes(x=ReportName, y=AvgRendering)) +
geom_bar(position="dodge", fill= "yellow",color = "black", stat="identity") +
geom_text(aes(label=format(round(df$AvgRendering / 1000, 2), nsmall = 2)), vjust=0.1, color="black",
position=position_dodge(.9), size=5) +
ggtitle("Average Time Rendering in Seconds") +
theme(legend.title = element_text(face="italic", size = 14))
plotList <- list(plot1, plot2, plot3, plot4)
do.call(grid.arrange, c(plotList, list(ncol = 1)))
?grid.arrange
| /Basic R Examples.r | no_license | jayape/WestWI | R | false | false | 6,757 | r | # Basic math
1 + 3
3 - 2
6 * 4
5 / 2
5 %/% 2
5 %% 2
5 ^ 2
# Assignment operators. = is can be used but <- is preferred
a <- 3
a
b = 4
b
# You can also use print
print(a)
# View and remove objects in your environment
objects()
rm(a)
objects()
rm(list = objects())
objects()
# concatenation
a <- c(1,3,5,7,9)
a
a[2]
# sequence
b <- c(1:50)
b
b [35]
# class
class(b)
a <- c(1, 1.0001)
a
class(a)
a <- c(1, "1", 1.0)
a
class(a)
# single or double quotes allowed, but be consistent
a <- c(1, '1', 1.0)
a
class(a)
# Looping and comparisons
if (1 == 1) {
print('1 = 1')
}
if (1 == 2) {
print('1 = 2')
} else {
print ('1 != 2')
}
if (1 == 2) {
print('1 = 2')
} else if (1 != 1){
print ('1 != 1')
} else {
print ('1 must = 1')
}
c <- 1
while (c <= 10) {
print(c)
c <- c + 1
}
for (i in 1:10) {
print(i)
}
for (i in 1:10) {
print(i)
if (i == 5){
break
}
}
# Create and work with other object types
# Lists
l <- list(1, 1.1, '1')
l
length(l)
class(l)
class(l[[1]])
class(l[[2]])
class(l[[3]])
l[[1]]
l2 <- list(1:5, 6.1:10.1, c('a', 'b', 'c', 'd', 'e'))
l2
l2[[1]]
l2[[2]][[3]]
# Matrix
m <- matrix(1:10, nrow=2)
m
class(m)
m <- matrix(1:10, ncol=2)
m
m <- matrix(1:10, ncol=2, byrow=TRUE)
m
m[1,]
m[,2]
m[3,2]
# Data Frame
df <- data.frame(1:10, 1:5)
df
class(df)
head(df, 8)
tail(df)
names(df)
names(df) <- c('Col1', 'Col2')
df
df$Col1
df[,1]
df[,-1]
df[1,]
df[-5,]
df[c(-2,-4),]
attach(df)
Col1
df$Col3 <- c(11:20)
df
Col4 <- c(30:21)
df <- cbind(df,Col4)
df <- rbind(df, c(11,21,31, 41))
dim(df)
df
# Missing values
missing <- c(1, 2,3, NA, 5, 6, NA, NA, 9, 10)
missing
is.na(missing)
mean(missing)
mean(missing, na.rm = TRUE)
# Packages
search()
View(installed.packages())
View(available.packages())
.libPaths()
install.packages('abc',
lib = 'C:/Users/johnp/Documents/R/win-library/3.3',
dependencies = TRUE)
help (package= 'abc')
library(abc, lib.loc = 'C:/Users/johnp/Documents/R/win-library/3.3')
help(getmode)
remove.packages('abc')
# Working directory
getwd()
setwd("C:/R Sessions")
# Functions
celcius <- function(f) {
c <- (f - 32) * 5/9
}
howhot <- celcius(75)
howhot
rm(celcius)
source('./MyFunctions.r')
howhot <- celcius(75)
howhot
howhot <- fahrenheit(28)
howhot
# Data sets
library(help = datasets)
mydata <- iris
head(mydata)
# Read data
myCSV <- read.csv('./Data/Chicago2016.csv', header = TRUE)
head(myCSV)
# Read data from a web page
library(XML)
url <- 'http://www.hockey-reference.com/leagues/NHL_2016_standings.html'
tables <- readHTMLTable(url, stringsAsFactors = FALSE)
standings <- tables$standings
View(standings)
mystandings1 <- standings[1:5]
View(mystandings1)
write.csv(mystandings1, './Data/NHL2016.csv', row.names = FALSE)
# Connect to a SQL database
library(RODBC)
myConn <- odbcDriverConnect('driver={SQL Server};
server=PERTELL04;
database=DemoDB;
Trusted Connection=true')
userData <- sqlFetch(myConn, 'Catalog')
reportData <- sqlQuery(myConn, "SELECT C.Name,
ELS.TimeStart,
ELS.TimeEnd,
ELS.TimeDataRetrieval,
ELS.TimeProcessing,
ELS.TimeRendering,
ELS.Status,
ELS.ByteCount,
ELS.[RowCount]
FROM Catalog AS C
INNER JOIN ExecutionLogStorage AS ELS ON C.ItemID = ELS.ReportID
WHERE ELS.TimeStart BETWEEN '8/1/2016' AND '8/31/2016';")
close(myConn)
# How much memory does the data set require?
object.size(reportData)
head(reportData[, c(1,5)])
reportData <- reportData[order(-reportData$TimeDataRetrieval),]
head(reportData[, c(1,5)])
summary(reportData)
topTen <- head(summary(subset(reportData$Name, reportData$Name != 'WakeUpWorld')), 10)
topTen
plot(topTen)
filteredNames <- names(topTen)
filteredReports <- (reportData$Name %in% filteredNames)
filteredData <- reportData[filteredReports, c(1,4:9)]
filteredData$Name <- factor(filteredData$Name)
filteredData$Status <- factor(filteredData$Status)
class(topTen)
topTenReports <- data.frame(ReportName = names(topTen), Count = topTen)
View(topTenReports)
d <- data.frame(tapply(filteredData$TimeDataRetrieval, filteredData$Name, mean))
d <- cbind(d, tapply(filteredData$TimeProcessing, filteredData$Name, mean))
d <- cbind(d, tapply(filteredData$TimeRendering, filteredData$Name, mean))
df <- cbind(rownames(d), d)
names(df) <- c('ReportName', 'AvgDataRetrieval', 'AvgProcessing', 'AvgRendering')
df
df <- merge(topTenReports, df, 'ReportName')
View(df)
library(ggplot2)
library(gridExtra)
plot1 <- ggplot(df, aes(x=ReportName, y=Count)) +
geom_bar(position="dodge", fill= "lightgreen", color = "black", stat="identity") +
geom_text(aes(label=Count), vjust=0.1, color="black") +
ggtitle("Number Of Report Views") +
theme(legend.title = element_text(face="italic", size = 14))
plot2 <- ggplot(df, aes(x=ReportName, y=AvgDataRetrieval)) +
geom_bar(position="dodge", fill= "lightblue", color = "black", stat="identity") +
geom_text(aes(label=format(round(df$AvgDataRetrieval / 1000, 2), nsmall = 2)), vjust=0.1, color="black",
position=position_dodge(.9), size=5) +
ggtitle("Average Data Retrieval in Seconds") +
theme(legend.title = element_text(face="italic", size = 14))
plot3 <- ggplot(df, aes(x=ReportName, y=AvgProcessing)) +
geom_bar(position="dodge", fill= "red", color = "black", stat="identity") +
geom_text(aes(label=format(round(df$AvgProcessing / 1000, 2), nsmall = 2)), vjust=0.1, color="black",
position=position_dodge(.9), size=5) +
ggtitle("Average Time Processing in Seconds") +
theme(legend.title = element_text(face="italic", size = 14))
plot4 <- ggplot(df, aes(x=ReportName, y=AvgRendering)) +
geom_bar(position="dodge", fill= "yellow",color = "black", stat="identity") +
geom_text(aes(label=format(round(df$AvgRendering / 1000, 2), nsmall = 2)), vjust=0.1, color="black",
position=position_dodge(.9), size=5) +
ggtitle("Average Time Rendering in Seconds") +
theme(legend.title = element_text(face="italic", size = 14))
plotList <- list(plot1, plot2, plot3, plot4)
do.call(grid.arrange, c(plotList, list(ncol = 1)))
?grid.arrange
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/keyplayer.R
\name{kpnode}
\alias{kpnode}
\title{Return the most central player in sequentially reduced networks}
\usage{
kpnode(adj.matrix, type, M = Inf, T = ncol(adj.matrix), method,
binary = FALSE, cmode, large = TRUE, geodist.precomp = NULL)
}
\arguments{
\item{adj.matrix}{Matrix indicating the adjacency matrix of the network.}
\item{type}{\code{type="betweenness"} for \code{\link[sna]{betweenness}} centrality. \cr
\code{type="closeness"} for \code{\link[sna]{closeness}} centrality. \cr
\code{type="degree"} for \code{\link[sna]{degree}} centraslity. \cr
\code{type="diffusion"} for \code{\link{diffusion}} centrality. \cr
\code{type="evcent"} for \code{\link[sna]{evcent}} (eigenvector) centrality. \cr
\code{type="fragment"} for \code{\link{fragment}} centrality. \cr
\code{type="mreach.degree"} for \code{\link{mreach.degree}} centrality. \cr
\code{type="mreach.closeness"} for \code{\link{mreach.closeness}} centrality. \cr}
\item{M}{Positive number indicating the maximum geodistance between two nodes,
above witch the two nodes are considered disconnected. The default is
\code{Inf}. The option is applicable to mreach.degree, mreach.closeness,
and fragmentation centralities.}
\item{T}{Integer indicating the maximum number of iterations
of communication process. For diffusion centrality only.
In the first iteration, the adjacency matrix
is as the input. In the nth iteration, the adjacency matrix becomes
the input adjacency matrix to the power of n. By default, T is the network size.}
\item{method}{Indication of which grouping criterion should be used. \cr
\code{"min"} indicates the "minimum" criterion and is suggested for
betweenness, closeness, fragmentation, and M-reach centralities. \cr
\code{"max"} indicates the "maximum" criterion and is suggested for
degree and eigenvector centralities.\cr
\code{"add"} indicates the "addition" criterion and is suggested for
degree and eigenvector centralities as an altenative of "max".\cr
\code{"union"} indicates the "union" criterion and is suggested for
diffusion centrality.\cr
By default, kpset uses "min".
See \code{\link{kpcent}} Details section for explanations on grouping method.}
\item{binary}{If \code{TRUE}, the adjacency matrix is binarized.
If \code{FALSE}, the edge values are considered. By default, \code{binary=FALSE}}
\item{cmode}{String indicating the type of centrality being evaluated.
The option is applicable to degree and M-reach centralities.
\code{"outdegree"}, \code{"indegree"}, and \code{"total"} refer to
indegree, outdegree, and (total) degree respectively. \code{"all"} reports
all the above measures. The default is to report the total degree.
The option also applies to closeness centrality, but with different options.
The default is to use the Gil-Schmidt power index as the closeness measure.
See \code{\link[sna]{closeness}} for complete options.}
\item{large}{Logical scalar, whether the computation method for large network is
implemented. If \code{TRUE} (the default), the method implmented in \pkg{igraph} is
used; otherwise the method implemented in \pkg{sna} is used.}
\item{geodist.precomp}{Geodistance precomputed for the graph to be
analyzed (optional).}
}
\value{
The most central player and its centrality score
}
\description{
\code{kpnode} returns the node with the highest centrality score.
}
\keyword{internal}
| /man/kpnode.Rd | no_license | ttdailytt/keyplayer | R | false | true | 3,496 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/keyplayer.R
\name{kpnode}
\alias{kpnode}
\title{Return the most central player in sequentially reduced networks}
\usage{
kpnode(adj.matrix, type, M = Inf, T = ncol(adj.matrix), method,
binary = FALSE, cmode, large = TRUE, geodist.precomp = NULL)
}
\arguments{
\item{adj.matrix}{Matrix indicating the adjacency matrix of the network.}
\item{type}{\code{type="betweenness"} for \code{\link[sna]{betweenness}} centrality. \cr
\code{type="closeness"} for \code{\link[sna]{closeness}} centrality. \cr
\code{type="degree"} for \code{\link[sna]{degree}} centraslity. \cr
\code{type="diffusion"} for \code{\link{diffusion}} centrality. \cr
\code{type="evcent"} for \code{\link[sna]{evcent}} (eigenvector) centrality. \cr
\code{type="fragment"} for \code{\link{fragment}} centrality. \cr
\code{type="mreach.degree"} for \code{\link{mreach.degree}} centrality. \cr
\code{type="mreach.closeness"} for \code{\link{mreach.closeness}} centrality. \cr}
\item{M}{Positive number indicating the maximum geodistance between two nodes,
above witch the two nodes are considered disconnected. The default is
\code{Inf}. The option is applicable to mreach.degree, mreach.closeness,
and fragmentation centralities.}
\item{T}{Integer indicating the maximum number of iterations
of communication process. For diffusion centrality only.
In the first iteration, the adjacency matrix
is as the input. In the nth iteration, the adjacency matrix becomes
the input adjacency matrix to the power of n. By default, T is the network size.}
\item{method}{Indication of which grouping criterion should be used. \cr
\code{"min"} indicates the "minimum" criterion and is suggested for
betweenness, closeness, fragmentation, and M-reach centralities. \cr
\code{"max"} indicates the "maximum" criterion and is suggested for
degree and eigenvector centralities.\cr
\code{"add"} indicates the "addition" criterion and is suggested for
degree and eigenvector centralities as an altenative of "max".\cr
\code{"union"} indicates the "union" criterion and is suggested for
diffusion centrality.\cr
By default, kpset uses "min".
See \code{\link{kpcent}} Details section for explanations on grouping method.}
\item{binary}{If \code{TRUE}, the adjacency matrix is binarized.
If \code{FALSE}, the edge values are considered. By default, \code{binary=FALSE}}
\item{cmode}{String indicating the type of centrality being evaluated.
The option is applicable to degree and M-reach centralities.
\code{"outdegree"}, \code{"indegree"}, and \code{"total"} refer to
indegree, outdegree, and (total) degree respectively. \code{"all"} reports
all the above measures. The default is to report the total degree.
The option also applies to closeness centrality, but with different options.
The default is to use the Gil-Schmidt power index as the closeness measure.
See \code{\link[sna]{closeness}} for complete options.}
\item{large}{Logical scalar, whether the computation method for large network is
implemented. If \code{TRUE} (the default), the method implmented in \pkg{igraph} is
used; otherwise the method implemented in \pkg{sna} is used.}
\item{geodist.precomp}{Geodistance precomputed for the graph to be
analyzed (optional).}
}
\value{
The most central player and its centrality score
}
\description{
\code{kpnode} returns the node with the highest centrality score.
}
\keyword{internal}
|
####################
## Load libraries ##
####################
library(MOFA2)
################
## Load model ##
################
file <- "/Users/ricard/data/peer/hdf5/with_sparsity/model_fast.hdf5"
model <- load_model(file)
plot_factor_cor(model)
####################################
## (Optional) add sample metadata ##
####################################
# Important:
# (1) the row names in the sample metadata data.frame have to match the sample names in the MOFA model
# (2) the sample name has to contain at least two columns:
# - sample: sample names
# - group: group name. Only required for multi-group inference. In your case just set the entire column to the same value
# Load metadata
# stopifnot(all(samples(model)[[1]]==metadata$sample))
# samples_metadata(model) <- metadata
###############################
## (Optional) Subset factors ##
###############################
# We can remove factors that explain little variance (in this case, we require at least 1%)
# r2 <- model@cache$variance_explained$r2_per_factor
# factors <- sapply(r2, function(x) x[,1]>0.01)
# model <- subset_factors(model, which(apply(factors,1,sum) >= 1))
#############################
## Plot variance explained ##
#############################
# Plot variance explained using individual factors
plot_variance_explained(model, factors="all")
plot_variance_explained(model, factors=c(1,2,3))
# Plot total variance explained using all factors
plot_variance_explained(model, plot_total = TRUE)[[2]]
# Plot variance explained for individual features
features <- c("Rbp4","Ttr","Spink1","Mesp1")
plot_variance_explained_per_feature(model, factors = "all", features = features) # using all factors
plot_variance_explained_per_feature(model, factors = "all", features = features) # using specific factors
########################
## Plot factor values ##
########################
plot_factor(model,
factor = 1
# color_by = "lineage" # lineage is a column in model@samples.metadata
)
# Other options...
p <- plot_factor(model,
factor = 1,
color_by = "lineage",
dot_size = 0.2, # change dot size
dodge = TRUE, # dodge points with different colors
legend = FALSE, # remove legend
add_violin = TRUE, # add violin plots
)
plot_factors(model, factor = c(1,2))
###########################
## Plot feature loadings ##
###########################
# The weights or loadings provide a score for each gene on each factor.
# Genes with no association with the factor are expected to have values close to zero
# Genes with strong association with the factor are expected to have large absolute values.
# The sign of the loading indicates the direction of the effect: a positive loading indicates that the feature is more active in the cells with positive factor values, and viceversa.
# Plot the distribution of loadings for Factor 1.
plot_weights(model,
# view = "RNA",
factor = 1,
nfeatures = 10, # Top number of features to highlight
scale = T # Scale loadings from -1 to 1
)
# If we are not interested in the directionality of the effect, we can take the absolute value of the loadings.
# We can also highlight some genes of interest using the argument `manual` to see where in the distribution they lie:
plot_weights(model,
view = "RNA",
factor = 1,
nfeatures = 5,
manual = list(c("Snai1","Mesp1","Phlda2"), c("Rhox5","Elf5")),
scale = T,
abs = T
)
# If you are not interested in the full distribution, but just on the top loadings:
plot_top_weights(model,
view = "RNA",
factor = 1,
nfeatures = 10,
scale = T,
abs = T
)
######################################
## Plot correlation between factors ##
######################################
plot_factor_cor(model)
########################
## Save updated model ##
########################
outfile <- "/Users/ricard/data/peer_wrapper/test/model_updated.rds"
saveRDS(model, outfile)
models <- list()
models[[1]] <- load_model("/Users/ricard/data/peer/hdf5/with_sparsity/model_fast.hdf5")
models[[2]] <- load_model("/Users/ricard/data/peer/hdf5/with_sparsity/model_medium.hdf5")
compare_factors(models)
library(purrr)
peer <- read.table("/Users/ricard/data/peer/data/X.named.txt.gz") %>% as.matrix %>% t
mofa <- get_factors(model)[[1]]
r <- cor(peer,mofa)
corrplot::corrplot(abs(r))
plot_factor_cor(model)
samples(model)[[1]] <- rownames(peer)
write.table(t(round(mofa,5)), file="/Users/ricard/data/peer/mofa_factors.txt", sep=",", col.names = T, row.names = T, quote=F)
| /test/analysis.R | no_license | bioFAM/PEER | R | false | false | 4,539 | r | ####################
## Load libraries ##
####################
library(MOFA2)
################
## Load model ##
################
file <- "/Users/ricard/data/peer/hdf5/with_sparsity/model_fast.hdf5"
model <- load_model(file)
plot_factor_cor(model)
####################################
## (Optional) add sample metadata ##
####################################
# Important:
# (1) the row names in the sample metadata data.frame have to match the sample names in the MOFA model
# (2) the sample name has to contain at least two columns:
# - sample: sample names
# - group: group name. Only required for multi-group inference. In your case just set the entire column to the same value
# Load metadata
# stopifnot(all(samples(model)[[1]]==metadata$sample))
# samples_metadata(model) <- metadata
###############################
## (Optional) Subset factors ##
###############################
# We can remove factors that explain little variance (in this case, we require at least 1%)
# r2 <- model@cache$variance_explained$r2_per_factor
# factors <- sapply(r2, function(x) x[,1]>0.01)
# model <- subset_factors(model, which(apply(factors,1,sum) >= 1))
#############################
## Plot variance explained ##
#############################
# Plot variance explained using individual factors
plot_variance_explained(model, factors="all")
plot_variance_explained(model, factors=c(1,2,3))
# Plot total variance explained using all factors
plot_variance_explained(model, plot_total = TRUE)[[2]]
# Plot variance explained for individual features
features <- c("Rbp4","Ttr","Spink1","Mesp1")
plot_variance_explained_per_feature(model, factors = "all", features = features) # using all factors
plot_variance_explained_per_feature(model, factors = "all", features = features) # using specific factors
########################
## Plot factor values ##
########################
plot_factor(model,
factor = 1
# color_by = "lineage" # lineage is a column in model@samples.metadata
)
# Other options...
p <- plot_factor(model,
factor = 1,
color_by = "lineage",
dot_size = 0.2, # change dot size
dodge = TRUE, # dodge points with different colors
legend = FALSE, # remove legend
add_violin = TRUE, # add violin plots
)
plot_factors(model, factor = c(1,2))
###########################
## Plot feature loadings ##
###########################
# The weights or loadings provide a score for each gene on each factor.
# Genes with no association with the factor are expected to have values close to zero
# Genes with strong association with the factor are expected to have large absolute values.
# The sign of the loading indicates the direction of the effect: a positive loading indicates that the feature is more active in the cells with positive factor values, and viceversa.
# Plot the distribution of loadings for Factor 1.
plot_weights(model,
# view = "RNA",
factor = 1,
nfeatures = 10, # Top number of features to highlight
scale = T # Scale loadings from -1 to 1
)
# If we are not interested in the directionality of the effect, we can take the absolute value of the loadings.
# We can also highlight some genes of interest using the argument `manual` to see where in the distribution they lie:
plot_weights(model,
view = "RNA",
factor = 1,
nfeatures = 5,
manual = list(c("Snai1","Mesp1","Phlda2"), c("Rhox5","Elf5")),
scale = T,
abs = T
)
# If you are not interested in the full distribution, but just on the top loadings:
plot_top_weights(model,
view = "RNA",
factor = 1,
nfeatures = 10,
scale = T,
abs = T
)
######################################
## Plot correlation between factors ##
######################################
plot_factor_cor(model)
########################
## Save updated model ##
########################
outfile <- "/Users/ricard/data/peer_wrapper/test/model_updated.rds"
saveRDS(model, outfile)
models <- list()
models[[1]] <- load_model("/Users/ricard/data/peer/hdf5/with_sparsity/model_fast.hdf5")
models[[2]] <- load_model("/Users/ricard/data/peer/hdf5/with_sparsity/model_medium.hdf5")
compare_factors(models)
library(purrr)
peer <- read.table("/Users/ricard/data/peer/data/X.named.txt.gz") %>% as.matrix %>% t
mofa <- get_factors(model)[[1]]
r <- cor(peer,mofa)
corrplot::corrplot(abs(r))
plot_factor_cor(model)
samples(model)[[1]] <- rownames(peer)
write.table(t(round(mofa,5)), file="/Users/ricard/data/peer/mofa_factors.txt", sep=",", col.names = T, row.names = T, quote=F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Initiation.R
\name{initiation}
\alias{initiation}
\title{Initiation Function for Genetic Algorithm for Variable Selection}
\usage{
initiation(C, P)
}
\arguments{
\item{C}{The number of independent variables to be selected from}
\item{P}{Population size for each generation}
}
\description{
Generates initial parent generation to be used by select()
}
\examples{
# call initiation function
init_parents <- initiation( C = 5 , P = 30 )
}
\keyword{algorithm,}
\keyword{genetic}
\keyword{initiation}
\keyword{model}
\keyword{selection,}
| /man/initiation.Rd | no_license | WaverlyWei/GA-Package | R | false | true | 612 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Initiation.R
\name{initiation}
\alias{initiation}
\title{Initiation Function for Genetic Algorithm for Variable Selection}
\usage{
initiation(C, P)
}
\arguments{
\item{C}{The number of independent variables to be selected from}
\item{P}{Population size for each generation}
}
\description{
Generates initial parent generation to be used by select()
}
\examples{
# call initiation function
init_parents <- initiation( C = 5 , P = 30 )
}
\keyword{algorithm,}
\keyword{genetic}
\keyword{initiation}
\keyword{model}
\keyword{selection,}
|
\name{F.norm2}
\alias{F.norm2}
\title{The Squared Frobenius Norm}
\description{Calculate the squared Frobenius norm of a matrix}
\usage{
F.norm2(A)
}
\arguments{
\item{A}{ a matrix }
}
\value{
a scalar of the squared Frobenius norm.
}
\author{Binhuan Wang}
\examples{
A<-matrix(1:9,3,3);
F.norm2(A);
}
\keyword{Frobenius norm} | /man/F.norm2.rd | no_license | qizhu21/CVTuningCov | R | false | false | 344 | rd | \name{F.norm2}
\alias{F.norm2}
\title{The Squared Frobenius Norm}
\description{Calculate the squared Frobenius norm of a matrix}
\usage{
F.norm2(A)
}
\arguments{
\item{A}{ a matrix }
}
\value{
a scalar of the squared Frobenius norm.
}
\author{Binhuan Wang}
\examples{
A<-matrix(1:9,3,3);
F.norm2(A);
}
\keyword{Frobenius norm} |
## create combined_master_table
## have a table of rfid, project_name, and sex
########################
## COMBINED_MASTER_TABLE
########################
## update the following items in the following R files
#for p50
source("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/github/P50/after_renewal/CREATE/CREATE_P50DATABASENAMES.R")
#for u01
source("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/github/WFU_U01_MasterTables/CREATE/CREATE_WFUDATABASENAMES.R")
#for pcal
source("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/github/pcal_brian_trainor/CREATE/CREATE_SAMPLEIDS_LIBPREP.R")
#for zebrafish
source("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/github/Zebrafish/CREATE/CREATE_EXCEL.R") # instead of CREATE_SAMPLEIDS_LIBPREP.R
#for huda
source("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/github/u01_huda_akil/data_cleanup_Akil_SD.R")
## XX fix the source of this -- temporary 07/30/2020
shipments_df <- shipments_df %>%
mutate(u01 = replace(u01, grepl("scrub", comment, ignore.case=T)&grepl("Olivier", u01), "Olivier_Scrub")) %>%
mutate(cohort = replace(cohort, u01 == "Olivier_Scrub", NA)) %>%
distinct()
# this is after the id's have been processed
# zebrafish_sample_info_df <- zebrafish_sample_info_df %>% mutate(project_name = "r01_su_guo")
Zebrafish_Guo_xl <- Zebrafish_Guo_xl %>% mutate(project_name = "r01_su_guo") %>%
rename("rfid" = "fish_id") %>%
rowwise() %>%
mutate(rfid = replace(rfid, grepl("\\d+_Plate\\d+_", rfid), gsub("_", "", rfid))) %>%
ungroup()
combined <- list(p50 = shipments_p50_df[, c("p50", "rfid", "sex", "coatcolor")],
u01 = shipments_df[, c("u01", "rfid", "sex", "coatcolor")],
pcal = pcal_sample_info_df[ , c("plate", "rfid")],
zebrafish = Zebrafish_Guo_xl[, c("rfid", "project_name")],
# zebrafish = zebrafish_sample_info_df[, c("plate", "rfid", "project_name")],
huda_df = huda_df)
# run when you have the information for pcal, zebrafish, and huda
# previously named combined_df
sample_metadata <- combined %>%
rbindlist(fill = T) %>%
mutate_all(~gsub(" ", "", .)) %>% # remove all spaces for better joining (Library # -> Library#)
rowwise() %>%
mutate(project_name = replace(project_name, is.na(project_name),
case_when(
grepl("p\\.cal", rfid) ~ "pcal_brian_trainor",
grepl("Plate", rfid) ~ "r01_su_guo",
grepl("Olivier_Oxy", u01) ~ "u01_olivier_george_oxycodone",
grepl("Olivier_Co", u01) ~ "u01_olivier_george_cocaine",
grepl("Olivier_Scrub", u01) ~ "u01_olivier_george_scrub",
grepl("Mitchell", u01) ~ "u01_suzanne_mitchell",
grepl("Jhou", u01) ~ "u01_tom_jhou",
grepl("Kalivas_Italy", u01) ~ "u01_peter_kalivas_italy",
grepl("Kalivas$", u01) ~ "u01_peter_kalivas_us",
grepl("Meyer", p50) ~ "p50_paul_meyer_2020",
grepl("Richards", p50) ~ "p50_jerry_richards_2020",
grepl("Chen", p50) ~ "p50_hao_chen_2020",
TRUE ~ "NA")
)) %>%
mutate(organism = case_when(
project_name %in% c("p50_paul_meyer", "p50_jerry_richards", "p50_hao_chen",
"u01_peter_kalivas_italy", "u01_peter_kalivas_us","u01_tom_jhou",
"u01_suzanne_mitchell", "u01_olivier_george_cocaine", "u01_olivier_george_oxycodone",
"u01_oliver_george_scrub",
"u01_huda_akil") ~ "rat",
project_name == "r01_su_guo" ~ "zebrafish",
project_name == "pcal_brian_trainor" ~ "california mouse"
)) %>%
ungroup() %>%
mutate(strain = NA,
comments = NA) %>% distinct() %>%
# add_count(rfid) %>%
# mutate(comments = replace(comments, n == 2, "Scrub")) %>%
select(rfid, sex, coatcolor, project_name, organism, strain, comments)
# write.csv(sample_metadata, file = "sample_metadata.csv", row.names = F)
## exception for getting data from genotyping file
seq01_fish <- flowcell_df %>%
mutate(rfid = coalesce(rfid, sample_id)) %>%
left_join(sample_metadata[, c("rfid", "project_name")], by ="rfid") %>%
mutate(library = gsub("Riptide-", "Riptide", library)) %>% subset(is.na(project_name)&library=='UMich8_Fish') %>%
rename("comments" = "comment") %>%
mutate(comments = "Test samples with high missing rate, exclude from imputation, no phenotypes",
sex = NA,
coatcolor = NA,
project_name = "r01_su_guo",
organism = NA,
strain = "Ekkwill fish") %>%
select(rfid, sex, coatcolor, project_name, organism, strain, comments)
genotyping_fish_uniform_ids <- seq01_fish %>%
select(rfid) %>%
mutate(rfid_genotyping = gsub("[ -]", "_", rfid))
# %>%
# write.csv(file = "no_phenotype_fish_sample_metadata.csv", row.names = F)
# seq01_fish %>% write.csv(file = "no_phenotype_fish_sample_metadata_fixrfid.csv", row.names = F) # don't change the original rfid's assigned by the center
sample_metadata <- rbind(sample_metadata, seq01_fish)
## updates after 09/04/2020
# Jerry shipment 03
setwd("/home/bonnie/Desktop/Database/csv files/p50_jerry_richards_2020")
shipments_p50_df %>% subset(p50 == "Richards"&cohort == "C03") %>%
mutate_all(~gsub(" ", "", .)) %>% # remove all spaces for better joining (Library # -> Library#)
mutate(project_name = "p50_jerry_richards_2020",
organism = NA,
strain = NA,
comments = NA) %>% select(rfid, sex, coatcolor, project_name, organism, strain, comments) %>% write.csv("c03_samptrack_sampmetadata.csv", row.names = F)
## Jerry shipment 03.5??
## xx Jhou shipment 16
## XX add zebrafish breeders
read_excel("~/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/Zebrafish/Families_zebrafish_20200821 reformatted.xlsx") %>%
clean_names() %>%
mutate_all(as.character) %>% mutate(breeder_id = paste0(mother, ",", father)) %>% select(breeder_id) %>% separate_rows(1, sep = ",") %>% mutate(join = "in pedigree")
## give pedigree for riyan
setwd("~/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/Zebrafish")
read_excel("~/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/Zebrafish/Families_zebrafish_20200821 reformatted.xlsx") %>%
clean_names() %>%
mutate_all(as.character) %>%
mutate(fish_id = replace(fish_id, grepl("Plate", fish_id), paste0(gsub("-", "_", fish_id))),
fish_id = replace(fish_id, grepl("Plate", fish_id), paste0(gsub("-(\\D)(\\d)$", "-\\2\\1", fish_id)))) %>%
write.xlsx("Families_zebrafish_20200821_reformatted_changedids.xlsx")
## get the p50 controls
read.xlsx("Riptide_control_96.xlsx") %>% mutate_all(as.character)
| /CREATE/R/EXTRACT_SAMPLE_METADATA.R | no_license | bonnfire/PalmerLab_genotyping | R | false | false | 6,917 | r | ## create combined_master_table
## have a table of rfid, project_name, and sex
########################
## COMBINED_MASTER_TABLE
########################
## update the following items in the following R files
#for p50
source("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/github/P50/after_renewal/CREATE/CREATE_P50DATABASENAMES.R")
#for u01
source("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/github/WFU_U01_MasterTables/CREATE/CREATE_WFUDATABASENAMES.R")
#for pcal
source("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/github/pcal_brian_trainor/CREATE/CREATE_SAMPLEIDS_LIBPREP.R")
#for zebrafish
source("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/github/Zebrafish/CREATE/CREATE_EXCEL.R") # instead of CREATE_SAMPLEIDS_LIBPREP.R
#for huda
source("/home/bonnie/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/github/u01_huda_akil/data_cleanup_Akil_SD.R")
## XX fix the source of this -- temporary 07/30/2020
shipments_df <- shipments_df %>%
mutate(u01 = replace(u01, grepl("scrub", comment, ignore.case=T)&grepl("Olivier", u01), "Olivier_Scrub")) %>%
mutate(cohort = replace(cohort, u01 == "Olivier_Scrub", NA)) %>%
distinct()
# this is after the id's have been processed
# zebrafish_sample_info_df <- zebrafish_sample_info_df %>% mutate(project_name = "r01_su_guo")
Zebrafish_Guo_xl <- Zebrafish_Guo_xl %>% mutate(project_name = "r01_su_guo") %>%
rename("rfid" = "fish_id") %>%
rowwise() %>%
mutate(rfid = replace(rfid, grepl("\\d+_Plate\\d+_", rfid), gsub("_", "", rfid))) %>%
ungroup()
combined <- list(p50 = shipments_p50_df[, c("p50", "rfid", "sex", "coatcolor")],
u01 = shipments_df[, c("u01", "rfid", "sex", "coatcolor")],
pcal = pcal_sample_info_df[ , c("plate", "rfid")],
zebrafish = Zebrafish_Guo_xl[, c("rfid", "project_name")],
# zebrafish = zebrafish_sample_info_df[, c("plate", "rfid", "project_name")],
huda_df = huda_df)
# run when you have the information for pcal, zebrafish, and huda
# previously named combined_df
sample_metadata <- combined %>%
rbindlist(fill = T) %>%
mutate_all(~gsub(" ", "", .)) %>% # remove all spaces for better joining (Library # -> Library#)
rowwise() %>%
mutate(project_name = replace(project_name, is.na(project_name),
case_when(
grepl("p\\.cal", rfid) ~ "pcal_brian_trainor",
grepl("Plate", rfid) ~ "r01_su_guo",
grepl("Olivier_Oxy", u01) ~ "u01_olivier_george_oxycodone",
grepl("Olivier_Co", u01) ~ "u01_olivier_george_cocaine",
grepl("Olivier_Scrub", u01) ~ "u01_olivier_george_scrub",
grepl("Mitchell", u01) ~ "u01_suzanne_mitchell",
grepl("Jhou", u01) ~ "u01_tom_jhou",
grepl("Kalivas_Italy", u01) ~ "u01_peter_kalivas_italy",
grepl("Kalivas$", u01) ~ "u01_peter_kalivas_us",
grepl("Meyer", p50) ~ "p50_paul_meyer_2020",
grepl("Richards", p50) ~ "p50_jerry_richards_2020",
grepl("Chen", p50) ~ "p50_hao_chen_2020",
TRUE ~ "NA")
)) %>%
mutate(organism = case_when(
project_name %in% c("p50_paul_meyer", "p50_jerry_richards", "p50_hao_chen",
"u01_peter_kalivas_italy", "u01_peter_kalivas_us","u01_tom_jhou",
"u01_suzanne_mitchell", "u01_olivier_george_cocaine", "u01_olivier_george_oxycodone",
"u01_oliver_george_scrub",
"u01_huda_akil") ~ "rat",
project_name == "r01_su_guo" ~ "zebrafish",
project_name == "pcal_brian_trainor" ~ "california mouse"
)) %>%
ungroup() %>%
mutate(strain = NA,
comments = NA) %>% distinct() %>%
# add_count(rfid) %>%
# mutate(comments = replace(comments, n == 2, "Scrub")) %>%
select(rfid, sex, coatcolor, project_name, organism, strain, comments)
# write.csv(sample_metadata, file = "sample_metadata.csv", row.names = F)
## exception for getting data from genotyping file
seq01_fish <- flowcell_df %>%
mutate(rfid = coalesce(rfid, sample_id)) %>%
left_join(sample_metadata[, c("rfid", "project_name")], by ="rfid") %>%
mutate(library = gsub("Riptide-", "Riptide", library)) %>% subset(is.na(project_name)&library=='UMich8_Fish') %>%
rename("comments" = "comment") %>%
mutate(comments = "Test samples with high missing rate, exclude from imputation, no phenotypes",
sex = NA,
coatcolor = NA,
project_name = "r01_su_guo",
organism = NA,
strain = "Ekkwill fish") %>%
select(rfid, sex, coatcolor, project_name, organism, strain, comments)
genotyping_fish_uniform_ids <- seq01_fish %>%
select(rfid) %>%
mutate(rfid_genotyping = gsub("[ -]", "_", rfid))
# %>%
# write.csv(file = "no_phenotype_fish_sample_metadata.csv", row.names = F)
# seq01_fish %>% write.csv(file = "no_phenotype_fish_sample_metadata_fixrfid.csv", row.names = F) # don't change the original rfid's assigned by the center
sample_metadata <- rbind(sample_metadata, seq01_fish)
## updates after 09/04/2020
# Jerry shipment 03
setwd("/home/bonnie/Desktop/Database/csv files/p50_jerry_richards_2020")
shipments_p50_df %>% subset(p50 == "Richards"&cohort == "C03") %>%
mutate_all(~gsub(" ", "", .)) %>% # remove all spaces for better joining (Library # -> Library#)
mutate(project_name = "p50_jerry_richards_2020",
organism = NA,
strain = NA,
comments = NA) %>% select(rfid, sex, coatcolor, project_name, organism, strain, comments) %>% write.csv("c03_samptrack_sampmetadata.csv", row.names = F)
## Jerry shipment 03.5??
## xx Jhou shipment 16
## XX add zebrafish breeders
read_excel("~/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/Zebrafish/Families_zebrafish_20200821 reformatted.xlsx") %>%
clean_names() %>%
mutate_all(as.character) %>% mutate(breeder_id = paste0(mother, ",", father)) %>% select(breeder_id) %>% separate_rows(1, sep = ",") %>% mutate(join = "in pedigree")
## give pedigree for riyan
setwd("~/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/Zebrafish")
read_excel("~/Dropbox (Palmer Lab)/Palmer Lab/Bonnie Lin/Zebrafish/Families_zebrafish_20200821 reformatted.xlsx") %>%
clean_names() %>%
mutate_all(as.character) %>%
mutate(fish_id = replace(fish_id, grepl("Plate", fish_id), paste0(gsub("-", "_", fish_id))),
fish_id = replace(fish_id, grepl("Plate", fish_id), paste0(gsub("-(\\D)(\\d)$", "-\\2\\1", fish_id)))) %>%
write.xlsx("Families_zebrafish_20200821_reformatted_changedids.xlsx")
## get the p50 controls
read.xlsx("Riptide_control_96.xlsx") %>% mutate_all(as.character)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/123.ConfidenceIntervals_CC_n_x.R
\name{ciCSCx}
\alias{ciCSCx}
\title{Continuity corrected Score method of CI estimation}
\usage{
ciCSCx(x, n, alp, c)
}
\arguments{
\item{x}{- Number of successes}
\item{n}{- Number of trials}
\item{alp}{- Alpha value (significance level required)}
\item{c}{- Continuity correction}
}
\value{
A dataframe with
\item{x}{ Number of successes (positive samples)}
\item{LCSx }{ Score Lower limit}
\item{UCSx }{ Score Upper Limit}
\item{LABB }{ Score Lower Abberation}
\item{UABB }{ Score Upper Abberation}
\item{ZWI }{ Zero Width Interval}
}
\description{
Continuity corrected Score method of CI estimation
}
\details{
A score test approach using the
test statistic \eqn{(abs(phat-p)-c)/SE}
where \eqn{c > 0} is a constant for continuity correction for
all \eqn{x = 0, 1, 2 ..n}
}
\examples{
x=5; n=5; alp=0.05; c=1/(2*n)
ciCSCx(x,n,alp,c)
}
\references{
[1] 1998 Agresti A and Coull BA.
Approximate is better than "Exact" for interval estimation of binomial proportions.
The American Statistician: 52; 119 - 126.
[2] 1998 Newcombe RG.
Two-sided confidence intervals for the single proportion: Comparison of seven methods.
Statistics in Medicine: 17; 857 - 872.
[3] 2008 Pires, A.M., Amado, C.
Interval Estimators for a Binomial Proportion: Comparison of Twenty Methods.
REVSTAT - Statistical Journal, 6, 165-197.
}
\seealso{
\code{\link{prop.test} and \link{binom.test}} for equivalent base Stats R functionality,
\code{\link[binom]{binom.confint}} provides similar functionality for 11 methods,
\code{\link[PropCIs]{wald2ci}} which provides multiple functions for CI calculation ,
\code{\link[BlakerCI]{binom.blaker.limits}} which calculates Blaker CI which is not covered here and
\code{\link[prevalence]{propCI}} which provides similar functionality.
Other Continuity correction methods of CI estimation given x and n: \code{\link{PlotciCAllxg}},
\code{\link{PlotciCAllx}}, \code{\link{ciCAllx}},
\code{\link{ciCLTx}}, \code{\link{ciCTWx}},
\code{\link{ciCWDx}}
}
| /man/ciCSCx.Rd | no_license | cran/proportion | R | false | true | 2,181 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/123.ConfidenceIntervals_CC_n_x.R
\name{ciCSCx}
\alias{ciCSCx}
\title{Continuity corrected Score method of CI estimation}
\usage{
ciCSCx(x, n, alp, c)
}
\arguments{
\item{x}{- Number of successes}
\item{n}{- Number of trials}
\item{alp}{- Alpha value (significance level required)}
\item{c}{- Continuity correction}
}
\value{
A dataframe with
\item{x}{ Number of successes (positive samples)}
\item{LCSx }{ Score Lower limit}
\item{UCSx }{ Score Upper Limit}
\item{LABB }{ Score Lower Abberation}
\item{UABB }{ Score Upper Abberation}
\item{ZWI }{ Zero Width Interval}
}
\description{
Continuity corrected Score method of CI estimation
}
\details{
A score test approach using the
test statistic \eqn{(abs(phat-p)-c)/SE}
where \eqn{c > 0} is a constant for continuity correction for
all \eqn{x = 0, 1, 2 ..n}
}
\examples{
x=5; n=5; alp=0.05; c=1/(2*n)
ciCSCx(x,n,alp,c)
}
\references{
[1] 1998 Agresti A and Coull BA.
Approximate is better than "Exact" for interval estimation of binomial proportions.
The American Statistician: 52; 119 - 126.
[2] 1998 Newcombe RG.
Two-sided confidence intervals for the single proportion: Comparison of seven methods.
Statistics in Medicine: 17; 857 - 872.
[3] 2008 Pires, A.M., Amado, C.
Interval Estimators for a Binomial Proportion: Comparison of Twenty Methods.
REVSTAT - Statistical Journal, 6, 165-197.
}
\seealso{
\code{\link{prop.test} and \link{binom.test}} for equivalent base Stats R functionality,
\code{\link[binom]{binom.confint}} provides similar functionality for 11 methods,
\code{\link[PropCIs]{wald2ci}} which provides multiple functions for CI calculation ,
\code{\link[BlakerCI]{binom.blaker.limits}} which calculates Blaker CI which is not covered here and
\code{\link[prevalence]{propCI}} which provides similar functionality.
Other Continuity correction methods of CI estimation given x and n: \code{\link{PlotciCAllxg}},
\code{\link{PlotciCAllx}}, \code{\link{ciCAllx}},
\code{\link{ciCLTx}}, \code{\link{ciCTWx}},
\code{\link{ciCWDx}}
}
|
#options(echo=FALSE)
options(stringsAsFactors=F)
args = commandArgs(trailingOnly = TRUE)
## load the table of candidates
candidates=read.delim(args[1],header=F)
## load the reference annotation which maps gene IDs to
ref=read.delim(args[2],stringsAsFactors=F)[,c("name","name2")]
## load the actually gene ids as they are stored in the bam file
ref_contigs=read.delim(args[3],header=F,stringsAsFactors=F)[,1]
## replace the contig ids in the ref table
names(ref_contigs)<-sapply(strsplit(ref_contigs,"_"),function(x){x[3]})
ref$name<-ref_contigs[ref$name]
sref=split(ref$name,ref$name2)
sref=sapply(sref,function(x){paste(x,collapse=" ")})
#now make the table which need to be run on the .bam file
genes=unique(candidates$V4)
sgenes=lapply(strsplit(genes,":"),function(x){sort(x)})
sgenes=unique(sgenes)
g1=sapply(sgenes,function(x){ sref[[x[1]]] })
g2=sapply(sgenes,function(x){ sref[[x[2]]] })
genes_new=sapply(sgenes,function(x){ paste(x[1],x[2],sep=":") })
write.table(data.frame(genes_new,g1,g2),args[4],sep="?",row.names=F,col.names=F,quote=F)
| /get_spanning_reads_for_direct_1.R | no_license | jchenpku/JAFFA | R | false | false | 1,061 | r | #options(echo=FALSE)
options(stringsAsFactors=F)
args = commandArgs(trailingOnly = TRUE)
## load the table of candidates
candidates=read.delim(args[1],header=F)
## load the reference annotation which maps gene IDs to
ref=read.delim(args[2],stringsAsFactors=F)[,c("name","name2")]
## load the actually gene ids as they are stored in the bam file
ref_contigs=read.delim(args[3],header=F,stringsAsFactors=F)[,1]
## replace the contig ids in the ref table
names(ref_contigs)<-sapply(strsplit(ref_contigs,"_"),function(x){x[3]})
ref$name<-ref_contigs[ref$name]
sref=split(ref$name,ref$name2)
sref=sapply(sref,function(x){paste(x,collapse=" ")})
#now make the table which need to be run on the .bam file
genes=unique(candidates$V4)
sgenes=lapply(strsplit(genes,":"),function(x){sort(x)})
sgenes=unique(sgenes)
g1=sapply(sgenes,function(x){ sref[[x[1]]] })
g2=sapply(sgenes,function(x){ sref[[x[2]]] })
genes_new=sapply(sgenes,function(x){ paste(x[1],x[2],sep=":") })
write.table(data.frame(genes_new,g1,g2),args[4],sep="?",row.names=F,col.names=F,quote=F)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 49946
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 49946
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query49_query06_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 8200
c no.of clauses 49946
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 49946
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query49_query06_1344.qdimacs 8200 49946 E1 [] 0 16 8184 49946 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query49_query06_1344/query49_query06_1344.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 717 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 49946
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 49946
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query49_query06_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 8200
c no.of clauses 49946
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 49946
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query49_query06_1344.qdimacs 8200 49946 E1 [] 0 16 8184 49946 NONE
|
##############################################
# apply logistic model get from clinical data on TCGA samples
##############################################
library(magrittr)
library(tidyverse)
library(ConsensusClusterPlus)
library(ComplexHeatmap)
theme_set(theme_classic())
# data path ---------------------------------------------------------------
# server 1 basic path
basic_path <- file.path("/home/huff/project")
# E zhou basi path
basic_path <- file.path("F:/我的坚果云")
# Hust basi path
basic_path <- file.path("S:/坚果云/我的坚果云")
immune_res_path <- file.path(basic_path,"immune_checkpoint/result_20171025")
TCGA_path <- file.path("/data/TCGA/TCGA_data")
TCGA_path <- file.path("H:/data/TCGA/TCGA_data")
gene_list_path <- file.path(basic_path,"immune_checkpoint/checkpoint/20171021_checkpoint")
res_path <- file.path(immune_res_path,"ICP_score/4.logistics-model-on-TCGA")
# load data ---------------------------------------------------------------
## TCGA expression data from stad and skcm
exp_data <- readr::read_rds(file.path(TCGA_path,"pancan33_expr.rds.gz")) %>%
dplyr::filter(cancer_types %in% c("SKCM","STAD"))
gene_list <- read.table(file.path(gene_list_path, "gene_list_type"),header = T)
gene_list$symbol <- as.character(gene_list$symbol)
## clinical info of TCGA samples
survival_data <- readr::read_rds(file.path(TCGA_path,"TCGA_pancan_cancer_cell_survival_time.rds.gz")) %>%
dplyr::rename("cancer_types" = "type") %>%
dplyr::rename("survival" = "data")
## clinical expression and response data
clinical_data_for_logistic <- readr::read_rds(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical/class_metastic_type","genelist_exp_for_logistic.rds.gz"))
## features filtered by logistic regression
final_feature.5 <- readr::read_tsv(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical/class_metastic_type","by_cancer_targets_30test.6","final_feature.6.tsv")) %>%
tidyr::nest(features,.key="features")
## stepAIC features
final_feature.5.1 <- readr::read_tsv(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical","by_cancer_targets_30test.5.stepAIC","gastric cancer_ALL30_by-feature-from_anti–PD-1_features.tsv")) %>%
dplyr::mutate(Cancer.y = "gastric cancer",blockade="anti-PD-1")
final_feature.5.2 <- readr::read_tsv(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical","by_cancer_targets_30test.5.stepAIC","melanoma_anti-CTLA-4_by-feature-from_anti-CTLA-4_features.tsv")) %>%
dplyr::mutate(Cancer.y = "melanoma",blockade="anti-CTLA-4")
final_feature.5.3 <- readr::read_tsv(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical","by_cancer_targets_30test.5.stepAIC","melanoma_anti-CTLA-4_by-feature-from_anti–PD-1_features.tsv")) %>%
dplyr::mutate(Cancer.y = "melanoma",blockade="anti-PD-1")
final_feature.5.4 <- readr::read_tsv(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical","by_cancer_targets_30test.5.stepAIC","melanoma_anti–PD-1、CTLA-4_by-feature-from_anti-CTLA-4_features.tsv")) %>%
dplyr::mutate(Cancer.y = "melanoma",blockade="anti-CTLA-4")
final_feature.5.5 <- readr::read_tsv(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical","by_cancer_targets_30test.5.stepAIC","melanoma_anti–PD-1、CTLA-4_by-feature-from_anti–PD-1_features.tsv")) %>%
dplyr::mutate(Cancer.y = "melanoma",blockade="anti-PD-1")
final_feature.5.6 <- readr::read_tsv(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical","by_cancer_targets_30test.5.stepAIC","melanoma_anti–PD-1_by-feature-from_anti-CTLA-4_features.tsv")) %>%
dplyr::mutate(Cancer.y = "melanoma",blockade="anti-CTLA-4")
final_feature.5.7 <- readr::read_tsv(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical","by_cancer_targets_30test.5.stepAIC","melanoma_anti–PD-1_by-feature-from_anti–PD-1_features.tsv")) %>%
dplyr::mutate(Cancer.y = "melanoma",blockade="anti-PD-1")
final_feature.5.1 %>%
rbind(final_feature.5.2) %>%
rbind(final_feature.5.3) %>%
rbind(final_feature.5.4) %>%
rbind(final_feature.5.5) %>%
rbind(final_feature.5.6) %>%
rbind(final_feature.5.7) %>%
# dplyr::select(-blockade) %>%
unique() %>%
tidyr::nest(features,.key="features")-> final_feature.5.x
## timer TIL data
immunity_path_2 <- file.path(immune_res_path,"ICP_score/2.1.GSVA-ICPs_exp_site_5_feature")
TIMER_immunity_onlyTumor <- readr::read_tsv(file.path(immunity_path_2,"immuneEstimation.txt")) %>%
dplyr::filter(substr(barcode,14,14) == 0) %>%
dplyr::mutate(barcode = substr(barcode,1,12)) %>%
tidyr::gather(-barcode, key = "Cell_type", value = "TIL")
# mutation burden data
mutation_burden_class <- readr::read_rds(file.path(TCGA_path,"classfication_of_26_cancers_by_mutation_burden192.rds.gz")) %>%
tidyr::unnest() %>%
dplyr::rename("cancer_types"="Cancer_Types") %>%
dplyr::filter(cancer_types %in% c("SKCM","STAD"))
# filter genelist expression ----------------------------------------------
exp_data %>%
dplyr::mutate(filter_exp = purrr::map(expr,.f = function(.x){
.x %>%
dplyr::filter(symbol %in% gene_list$symbol)
})) %>%
dplyr::select(-expr) -> genelist_exp
# combine data for analysis------------------------------------------------
final_feature.5 %>%
dplyr::mutate(cancer_types = ifelse(Cancer.y == "gastric cancer","STAD","SKCM")) %>%
dplyr::inner_join(clinical_data_for_logistic, by = c("Cancer.y","Cancer_type","blockade")) %>%
dplyr::inner_join(genelist_exp,by="cancer_types") %>%
dplyr::inner_join(survival_data,by="cancer_types") %>%
purrr::pwalk(.f=fn_run,analysis="cluster")
# final_feature.5.x %>%
# dplyr::mutate(cancer_types = ifelse(Cancer.y == "gastric cancer","STAD","SKCM")) %>%
# dplyr::inner_join(genelist_exp,by="cancer_types") %>%
# dplyr::inner_join(survival_data,by="cancer_types") %>%
# purrr::pwalk(.f=fn_run,blockade="stepAIC_combined")
#
# final_feature.5.x %>%
# dplyr::mutate(cancer_types = ifelse(Cancer.y == "gastric cancer","STAD","SKCM")) %>%
# dplyr::inner_join(genelist_exp,by="cancer_types") %>%
# dplyr::inner_join(survival_data,by="cancer_types") %>%
# purrr::pwalk(.f=fn_run)
# functions ---------------------------------------------------------------
color_list <- c("1" = "pink1",
"2" = "skyblue1",
"3" = "darkseagreen1",
"4" = "darkgreen",
"5" = "dodgerblue4",
"6" = "tan2")
fn_run <- function(Cancer.y,Cancer_type,blockade,features,cancer_types,response,data_spread,sample_group,filter_exp,survival,analysis="lm_model"){
print(paste(Cancer.y,Cancer_type,blockade,cancer_types))
survival %>%
dplyr::rename("barcode" = "bcr_patient_barcode") %>%
dplyr::select(barcode,PFS,PFS.time) %>%
dplyr::rename("status" = "PFS", "time" = "PFS.time") -> survival
## use features to class samples
if(strsplit(Cancer_type," ")[[1]][1]=="metastatic"){
filter_exp %>%
dplyr::select(-entrez_id) %>%
dplyr::mutate(symbol = gsub("-",".",symbol)) %>%
dplyr::filter(symbol %in% features$features) %>%
tidyr::gather(-symbol,key="barcode",value="exp") %>%
tidyr::spread(key="symbol",value="exp") %>%
dplyr::filter(substr(barcode,14,15)=="06") %>%
dplyr::mutate(barcode = substr(barcode,1,12)) %>%
tidyr::gather(-barcode,key="symbol",value="exp") %>%
tidyr::spread(key="barcode",value="exp") -> filter_exp.ready
if(nrow(filter_exp.ready)==0){
filter_exp %>%
dplyr::select(-entrez_id) %>%
dplyr::mutate(symbol = gsub("-",".",symbol)) %>%
dplyr::filter(symbol %in% features$features) %>%
tidyr::gather(-symbol,key="barcode",value="exp") %>%
tidyr::spread(key="symbol",value="exp") %>%
dplyr::filter(substr(barcode,14,15)=="01") %>%
dplyr::mutate(barcode = substr(barcode,1,12)) %>%
tidyr::gather(-barcode,key="symbol",value="exp") %>%
tidyr::spread(key="barcode",value="exp") -> filter_exp.ready
}
}else{
filter_exp %>%
dplyr::select(-entrez_id) %>%
dplyr::mutate(symbol = gsub("-",".",symbol)) %>%
dplyr::filter(symbol %in% features$features) %>%
tidyr::gather(-symbol,key="barcode",value="exp") %>%
tidyr::spread(key="symbol",value="exp") %>%
dplyr::filter(substr(barcode,14,15)=="01") %>%
dplyr::mutate(barcode = substr(barcode,1,12)) %>%
tidyr::gather(-barcode,key="symbol",value="exp") %>%
tidyr::spread(key="barcode",value="exp") -> filter_exp.ready
}
if(analysis == "lm_model"){
res_path.1 <- file.path(res_path,"by_model")
# ## get the logistic model
model <- fn_logistic_model(data_spread,response,features)
filter_exp.ready %>%
tidyr::gather(-symbol,key="barcode",value="exp") %>%
tidyr::spread(key="symbol",value="exp") -> filter_exp.ready.forlogistic
# use model to predict TCGA data
probabilities <- model %>% predict(filter_exp.ready.forlogistic[,-1],type = "response")
probabilities %>%
as.data.frame() %>%
dplyr::mutate(barcode = filter_exp.ready.forlogistic$barcode) %>%
dplyr::mutate(barcode = substr(barcode,1,12)) %>%
dplyr::inner_join(survival,by="barcode") %>%
dplyr::rename("probabilities" = ".") %>%
dplyr::mutate(group = ifelse(probabilities> 0.5, "1", "2")) %>%
dplyr::arrange(group) %>%
dplyr::mutate(group = as.character(group)) -> sample_info
} else {
res_path.1 <- file.path(res_path,"by_cluster")
## cluster analysis
filter_exp.ready.m <- as.matrix(as.data.frame(filter_exp.ready[,-1]))
rownames(filter_exp.ready.m) <- filter_exp.ready$symbol
# results = ConsensusClusterPlus(filter_exp.ready.m,maxK = 5,reps = 100,pItem = 0.8,
# pFeature = 1,
# title = file.path(res_path, cancer_types),
# distance = "binary", plot = "pdf",
# clusterAlg = "hc",seed = 1262118388.71279)
# Compute the dissimilarity matrix
dist <- dist(t(filter_exp.ready.m), method = "euclidean")
hc <- hclust(d = dist, method = "complete")
C = 2
group <- cutree(hc, k = C)
# group <- results[[i]]$consensusClass
# data.frame(Run = names(group),group = group) %>%
# readr::write_tsv(file.path(res_path, pubmed_ID, paste(C,"clusters_group_info.tsv",sep = "_")))
data.frame(barcode = names(group),group = group) %>%
dplyr::as.tbl() -> group
###### complex heatmap #####
survival %>%
dplyr::inner_join(group, by = "barcode") %>%
# dplyr::select(Response, group) %>%
dplyr::mutate(group = as.character(group)) %>%
as.data.frame() -> sample_info
}
## draw pic
# for (i in 2:5) {
# C <- i
# rownames(sample_info) <- sample_info$barcode
# sample_anno <- HeatmapAnnotation(df = sample_info,
# col = list(group=color_list),
# width = unit(0.5, "cm"),
# name = c("CC group"))
# draw(sample_anno,1:45)
# heatmap
# png(file.path(res_path,"complex_heatmap",paste(C, pubmed_ID, "GSVAscore", "heatmap.png", sep = "_")),
# height = 300, width = 600)
# Heatmap(filter_exp.ready.m,
# show_row_names = T,
# show_column_names = FALSE,
# cluster_columns = T,
# clustering_distance_columns = "euclidean",
# clustering_method_columns = "complete",
# top_annotation = sample_anno,
# show_row_dend = FALSE, # whether show row clusters.
# heatmap_legend_param = list(title = c("GSVA score")))
# dev.off()
#### PCA analysis #####
# fn_pca_scatter(clinical=clinical,group=group,gsva.dist,cancer_types,pubmed_ID=pubmed_ID,C=C)
# fn_compare(group = group, cancer_types = cancer_types,
# data = mutation_burden_class %>%
# dplyr::rename("Cell_type" = "cancer_types") %>%
# dplyr::mutate(sm_count = ifelse(sm_count==0,0.1,sm_count)) %>%
# dplyr::mutate(sm_count = log2(sm_count)),
# value = "sm_count", facet = "Cell_type", ylab = "log2 (Mutation burden)",
# title = "", data_type = "MutationBurden", C = C,
# result_path = file.path(res_path, cancer_types), h = 3, w = 4)
# fn_compare(group = group, cancer_types = cancer_types, data = TIMER_immunity_onlyTumor,
# value = "TIL", facet = "Cell_type", ylab = "TIL", C = C,
# title = cancer_types, data_type = "TIMER",result_path = file.path(res_path, cancer_types), h = 6, w = 8)
#### survival analysis #####
color <- data.frame(group = c("1","2","no","yes","5","6"),
color = c("pink1","skyblue1","darkseagreen1","darkgreen","dodgerblue4","tan2"))
fn_survival(data=sample_info,title=paste(cancer_types,blockade,sep="_"),color=color,
"group",sur_name=paste(C,cancer_types,"from_features",Cancer_type,blockade,sep="_"),
xlab = "Time (days)",result_path = file.path(res_path.1,"survival"),h=3,w=4,lx=0.8,ly=0.6)
fn_compare(group = sample_info %>% dplyr::mutate(group = as.integer(group)),
cancer_types = paste(cancer_types,Cancer_type,blockade), data = TIMER_immunity_onlyTumor,
value = "TIL", facet = "Cell_type", ylab = "TIL", C = C,
title = cancer_types, data_type = "TIMER",result_path = file.path(res_path.1, "TIL"), h = 6, w = 8)
fn_mutation_burden_all(data = sample_info %>%
dplyr::mutate(group = as.integer(group)) %>%
dplyr::inner_join(mutation_burden_class,by="barcode") %>%
dplyr::mutate(sm_count = log2(sm_count)),
group = "group",value = "sm_count", xlab = "log2(Mutation burden)",
m_a_name = paste(C,cancer_types,Cancer_type,blockade,sep="_"),
result_path = file.path(res_path.1, "MB"), h = 6, w = 8)
# apply(filter_exp.ready.m,1,scale)
# tiff(file.path(res_path,paste(pubmed_ID, cancer_types,"GSVAscore", "heatmap.tiff", sep = "_")),
# height = 300, width = 600,compression = c("lzw"))
# pheatmap(res.gsva, #scale = "row",
# clustering_distance_rows = "correlation",
# color = colorRampPalette(c("#00B2EE","white","red"))(50),
# border_color = NA,cutree_rows = 2,
# show_colnames = F, treeheight_col = 30, treeheight_row = 20,
# cutree_cols = 2,main = paste(paste("PMID:",pubmed_ID),cancer_types," GSVA score", sep = ","))
# dev.off()
# fn_survival.calculate(group, clinical, pubmed_ID, C,cancer_types, result_path = res_path, h = 3, w = 4)
# }
}
## do survival analysis
# color <- tibble::tibble(group = c("no","yes"),
# color = c("red","blue"))
# fn_survival(data=data.for.survival,title=paste(cancer_types,"from_features",Cancer.y,blockade,sep="_"),color=color,
# "group",sur_name=paste(cancer_types,"from_features",Cancer.y,blockade,sep="_"),
# xlab = "Time (days)",result_path = file.path(res_path,"survival"),h=3,w=4,lx=0.8,ly=0.6)
## construct logistic model
fn_logistic_model <- function(data_spread,response,features){
data_spread %>%
dplyr::inner_join(response, by = "Run") %>%
dplyr::select(-Run) %>%
dplyr::mutate(Response=as.factor(Response)) -> data.ready
data.ready <- na.omit(data.ready)
colnames(data.ready) <- gsub("-",".",colnames(data.ready))
for(i in 1:nrow(features)){
if(i == 1){
formula <- paste("Response", features$features[i], sep = "~")
} else{
formula <- paste(formula, features$features[i], sep = "+")
}
}
## do logistic regression
model <- glm( as.formula(formula), data = data.ready, family = binomial)
model
}
## function to draw survival
fn_survival <- function(data,title,color,group,sur_name,xlab,result_path,h,w,lx=0.8,ly=0.6){
print("survival")
library(survival)
library(survminer)
fit <- survfit(survival::Surv(time, status) ~ group, data = data, na.action = na.exclude)
diff <- survdiff(survival::Surv(time, status) ~ group, data = data, na.action = na.exclude)
kmp <- 1 - pchisq(diff$chisq, df = length(levels(as.factor(data$group))) - 1)
# legend <- data.frame(group=paste("C",sort(unique(data$group)),sep=""),n=fit$n)
# legend %>%
# dplyr::mutate(
# label = purrr::map2(
# .x = group,
# .y = n,
# .f = function(.x,.y){
# latex2exp::TeX(glue::glue("<<.x>>, n = <<.y>>", .open = "<<", .close = ">>"))
# }
# )
# ) -> legend
color %>%
dplyr::inner_join(data,by="group") %>%
dplyr::group_by(group) %>%
dplyr::mutate(n = n()) %>%
dplyr::ungroup() %>%
dplyr::mutate(group = paste(group,", n=",n,sep="")) %>%
dplyr::select(group,color) %>%
unique() -> color_paired
survminer::ggsurvplot(fit,pval=F, #pval.method = T,
data = data,
surv.median.line = "hv",
title = paste(title,", p =", signif(kmp, 2)), # change it when doing diff data
xlab = xlab,
ylab = 'Probability of survival',
# legend.title = "Methyla group:",
legend= c(lx,ly),
# ggtheme = theme_survminer(),
ggtheme = theme(
panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black",
size = 0.5),
panel.background = element_rect(fill = "white"),
legend.key = element_blank(),
legend.background = element_blank(),
legend.text = element_text(size = 8),
axis.text = element_text(size = 12),
legend.title = element_blank(),
axis.title = element_text(size = 12,color = "black"),
text = element_text(color = "black" )
)
)[[1]] +
scale_color_manual(
values = color_paired$color,
labels = color_paired$group
) -> p
ggsave(filename = paste(sur_name,signif(kmp, 2),"png",sep = "."), plot = p, path = result_path,device = "png",height = h,width = w)
ggsave(filename = paste(sur_name,signif(kmp, 2),"pdf",sep = "."), plot = p, path = result_path,device = "pdf",height = h,width = w)
}
## fn TIL compare
fn_compare <- function(group, cancer_types, data, value, facet, ylab, title, data_type, result_path,C, h, w){
print("Timer")
# print(paste("Compare",data_type,cancer_types))
group %>%
dplyr::inner_join(data, by = "barcode") %>%
dplyr::rename("value" = value) -> plot_ready
assign("group1",facet)
color_paired <- tibble::tibble(group = c(1:5),
color = c("pink1", "skyblue1", "darkseagreen1", "darkgreen", "dodgerblue4")) %>%
dplyr::inner_join(plot_ready, by = "group") %>%
dplyr::select(group, color) %>%
unique()
anno_text <- plot_ready %>%
dplyr::group_by(group,Cell_type) %>%
dplyr::mutate(n = n(), y = min(value) - max(value)*0.05) %>%
dplyr::select(group,Cell_type,n, y) %>%
unique() %>%
dplyr::ungroup()
# combn_matrix <- combn(sort(unique(plot_ready$group)),2)
# comp_list <- list()
# for(i in 1:ncol(combn_matrix)){
# comp_list[[i]] <- combn_matrix[,i]
# }
# comp_list <- list(c("Tumor", "Normal"))
if (nrow(color_paired) >= 2) {
plot_ready %>%
dplyr::arrange(group) %>%
ggpubr::ggboxplot(x = "group", y = "value", fill = "white",alpha = 0,width = 0.1,
color = "group" #add = "jitter",#, palette = "npg"
) +
geom_violin(aes(fill = group),alpha = 0.5) +
# geom_jitter(aes(color = group),alpha = 0.2,width = 0.1,size=0.1) +
geom_boxplot(fill = "white",alpha = 0,width = 0.1) +
geom_text(aes(x = group,y = y,label = paste("n=",n)), data = anno_text, size = 2) +
facet_wrap(as.formula(paste("~",facet)), strip.position = "bottom", scales = "free") +
scale_color_manual(
values = color_paired$color
) +
scale_fill_manual(
values = color_paired$color
) +
# ylim(4,12) +
labs(y = ylab, x = "Cluster", title = title) +
theme(legend.position = "none",
axis.title = element_text(colour = "black"),
strip.background = element_rect(fill = "white",colour = "white"),
text = element_text(size = 12, colour = "black")) +
ggpubr::stat_compare_means(method = "anova")
# ggpubr::stat_compare_means(comparisons = comp_list, method = "t.test",label = "p.signif")
fig_name <- paste("C",C,cancer_types, sep = "_")
# result_path <- file.path(result_path, data_type)
ggsave(filename = paste(fig_name,"tiff",sep = "."), path = result_path,device = "tiff",width = w,height = h)
ggsave(filename = paste(fig_name,"pdf",sep = "."), path = result_path,device = "pdf",width = w,height = h)
}
}
fn_mutation_burden_all <- function(data,group,value,color,xlab,m_a_name,result_path,w=4,h=3){
print("mutation burden")
data %>%
ggpubr::ggboxplot(x = group, y = value,fill="white",alpha = 0,width = 0.1,
color = group #add = "jitter",#, palette = "npg"
) +
geom_violin(aes(fill = group),alpha = 0.5) +
geom_jitter(aes(color = group),alpha = 0.2,width = 0.1,size=0.1) +
geom_boxplot(fill="white",alpha = 0,width = 0.1) +
scale_x_discrete(#breaks = c(1:10),
labels = unique(data$group)
# expand = c(0.2,0.2,0.2)
) +
# facet_wrap(~ cancer_types, strip.position = "bottom", scales = "free") +
# scale_color_manual(
# values = color
# )+
# scale_fill_manual(
# values = color
# ) +
# ylim(4,12) +
ylab(xlab) +
xlab("Group") +
theme(legend.position = "none",
# axis.title.x = element_blank(),
strip.background = element_rect(fill = "white",colour = "white"),
text = element_text(size = 12, colour = "black"),
strip.text = element_text(size = 12))+
ggpubr::stat_compare_means(method = "anova")
# ggpubr::stat_compare_means(label.y = 14,paired = TRUE) +
# ggpubr::stat_compare_means(comparisons = comp_list,method = "wilcox.test",label = "p.signif")
ggsave(filename =paste(m_a_name,"png",sep="."), path = result_path,device = "png",width = w,height = h)
ggsave(filename =paste(m_a_name,"pdf",sep="."), path = result_path,device = "pdf",width = w,height = h)
}
| /ICP_score/4.8.logistic-regression-model_perform_on_TCGA.R | no_license | flower1996/immune-cp | R | false | false | 23,026 | r | ##############################################
# apply logistic model get from clinical data on TCGA samples
##############################################
library(magrittr)
library(tidyverse)
library(ConsensusClusterPlus)
library(ComplexHeatmap)
theme_set(theme_classic())
# data path ---------------------------------------------------------------
# server 1 basic path
basic_path <- file.path("/home/huff/project")
# E zhou basi path
basic_path <- file.path("F:/我的坚果云")
# Hust basi path
basic_path <- file.path("S:/坚果云/我的坚果云")
immune_res_path <- file.path(basic_path,"immune_checkpoint/result_20171025")
TCGA_path <- file.path("/data/TCGA/TCGA_data")
TCGA_path <- file.path("H:/data/TCGA/TCGA_data")
gene_list_path <- file.path(basic_path,"immune_checkpoint/checkpoint/20171021_checkpoint")
res_path <- file.path(immune_res_path,"ICP_score/4.logistics-model-on-TCGA")
# load data ---------------------------------------------------------------
## TCGA expression data from stad and skcm
exp_data <- readr::read_rds(file.path(TCGA_path,"pancan33_expr.rds.gz")) %>%
dplyr::filter(cancer_types %in% c("SKCM","STAD"))
gene_list <- read.table(file.path(gene_list_path, "gene_list_type"),header = T)
gene_list$symbol <- as.character(gene_list$symbol)
## clinical info of TCGA samples
survival_data <- readr::read_rds(file.path(TCGA_path,"TCGA_pancan_cancer_cell_survival_time.rds.gz")) %>%
dplyr::rename("cancer_types" = "type") %>%
dplyr::rename("survival" = "data")
## clinical expression and response data
clinical_data_for_logistic <- readr::read_rds(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical/class_metastic_type","genelist_exp_for_logistic.rds.gz"))
## features filtered by logistic regression
final_feature.5 <- readr::read_tsv(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical/class_metastic_type","by_cancer_targets_30test.6","final_feature.6.tsv")) %>%
tidyr::nest(features,.key="features")
## stepAIC features
final_feature.5.1 <- readr::read_tsv(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical","by_cancer_targets_30test.5.stepAIC","gastric cancer_ALL30_by-feature-from_anti–PD-1_features.tsv")) %>%
dplyr::mutate(Cancer.y = "gastric cancer",blockade="anti-PD-1")
final_feature.5.2 <- readr::read_tsv(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical","by_cancer_targets_30test.5.stepAIC","melanoma_anti-CTLA-4_by-feature-from_anti-CTLA-4_features.tsv")) %>%
dplyr::mutate(Cancer.y = "melanoma",blockade="anti-CTLA-4")
final_feature.5.3 <- readr::read_tsv(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical","by_cancer_targets_30test.5.stepAIC","melanoma_anti-CTLA-4_by-feature-from_anti–PD-1_features.tsv")) %>%
dplyr::mutate(Cancer.y = "melanoma",blockade="anti-PD-1")
final_feature.5.4 <- readr::read_tsv(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical","by_cancer_targets_30test.5.stepAIC","melanoma_anti–PD-1、CTLA-4_by-feature-from_anti-CTLA-4_features.tsv")) %>%
dplyr::mutate(Cancer.y = "melanoma",blockade="anti-CTLA-4")
final_feature.5.5 <- readr::read_tsv(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical","by_cancer_targets_30test.5.stepAIC","melanoma_anti–PD-1、CTLA-4_by-feature-from_anti–PD-1_features.tsv")) %>%
dplyr::mutate(Cancer.y = "melanoma",blockade="anti-PD-1")
final_feature.5.6 <- readr::read_tsv(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical","by_cancer_targets_30test.5.stepAIC","melanoma_anti–PD-1_by-feature-from_anti-CTLA-4_features.tsv")) %>%
dplyr::mutate(Cancer.y = "melanoma",blockade="anti-CTLA-4")
final_feature.5.7 <- readr::read_tsv(file.path(immune_res_path,"ICP_score/3.logistic-regression-clinical","by_cancer_targets_30test.5.stepAIC","melanoma_anti–PD-1_by-feature-from_anti–PD-1_features.tsv")) %>%
dplyr::mutate(Cancer.y = "melanoma",blockade="anti-PD-1")
final_feature.5.1 %>%
rbind(final_feature.5.2) %>%
rbind(final_feature.5.3) %>%
rbind(final_feature.5.4) %>%
rbind(final_feature.5.5) %>%
rbind(final_feature.5.6) %>%
rbind(final_feature.5.7) %>%
# dplyr::select(-blockade) %>%
unique() %>%
tidyr::nest(features,.key="features")-> final_feature.5.x
## timer TIL data
immunity_path_2 <- file.path(immune_res_path,"ICP_score/2.1.GSVA-ICPs_exp_site_5_feature")
TIMER_immunity_onlyTumor <- readr::read_tsv(file.path(immunity_path_2,"immuneEstimation.txt")) %>%
dplyr::filter(substr(barcode,14,14) == 0) %>%
dplyr::mutate(barcode = substr(barcode,1,12)) %>%
tidyr::gather(-barcode, key = "Cell_type", value = "TIL")
# mutation burden data
mutation_burden_class <- readr::read_rds(file.path(TCGA_path,"classfication_of_26_cancers_by_mutation_burden192.rds.gz")) %>%
tidyr::unnest() %>%
dplyr::rename("cancer_types"="Cancer_Types") %>%
dplyr::filter(cancer_types %in% c("SKCM","STAD"))
# filter genelist expression ----------------------------------------------
exp_data %>%
dplyr::mutate(filter_exp = purrr::map(expr,.f = function(.x){
.x %>%
dplyr::filter(symbol %in% gene_list$symbol)
})) %>%
dplyr::select(-expr) -> genelist_exp
# combine data for analysis------------------------------------------------
final_feature.5 %>%
dplyr::mutate(cancer_types = ifelse(Cancer.y == "gastric cancer","STAD","SKCM")) %>%
dplyr::inner_join(clinical_data_for_logistic, by = c("Cancer.y","Cancer_type","blockade")) %>%
dplyr::inner_join(genelist_exp,by="cancer_types") %>%
dplyr::inner_join(survival_data,by="cancer_types") %>%
purrr::pwalk(.f=fn_run,analysis="cluster")
# final_feature.5.x %>%
# dplyr::mutate(cancer_types = ifelse(Cancer.y == "gastric cancer","STAD","SKCM")) %>%
# dplyr::inner_join(genelist_exp,by="cancer_types") %>%
# dplyr::inner_join(survival_data,by="cancer_types") %>%
# purrr::pwalk(.f=fn_run,blockade="stepAIC_combined")
#
# final_feature.5.x %>%
# dplyr::mutate(cancer_types = ifelse(Cancer.y == "gastric cancer","STAD","SKCM")) %>%
# dplyr::inner_join(genelist_exp,by="cancer_types") %>%
# dplyr::inner_join(survival_data,by="cancer_types") %>%
# purrr::pwalk(.f=fn_run)
# functions ---------------------------------------------------------------
color_list <- c("1" = "pink1",
"2" = "skyblue1",
"3" = "darkseagreen1",
"4" = "darkgreen",
"5" = "dodgerblue4",
"6" = "tan2")
fn_run <- function(Cancer.y,Cancer_type,blockade,features,cancer_types,response,data_spread,sample_group,filter_exp,survival,analysis="lm_model"){
print(paste(Cancer.y,Cancer_type,blockade,cancer_types))
survival %>%
dplyr::rename("barcode" = "bcr_patient_barcode") %>%
dplyr::select(barcode,PFS,PFS.time) %>%
dplyr::rename("status" = "PFS", "time" = "PFS.time") -> survival
## use features to class samples
if(strsplit(Cancer_type," ")[[1]][1]=="metastatic"){
filter_exp %>%
dplyr::select(-entrez_id) %>%
dplyr::mutate(symbol = gsub("-",".",symbol)) %>%
dplyr::filter(symbol %in% features$features) %>%
tidyr::gather(-symbol,key="barcode",value="exp") %>%
tidyr::spread(key="symbol",value="exp") %>%
dplyr::filter(substr(barcode,14,15)=="06") %>%
dplyr::mutate(barcode = substr(barcode,1,12)) %>%
tidyr::gather(-barcode,key="symbol",value="exp") %>%
tidyr::spread(key="barcode",value="exp") -> filter_exp.ready
if(nrow(filter_exp.ready)==0){
filter_exp %>%
dplyr::select(-entrez_id) %>%
dplyr::mutate(symbol = gsub("-",".",symbol)) %>%
dplyr::filter(symbol %in% features$features) %>%
tidyr::gather(-symbol,key="barcode",value="exp") %>%
tidyr::spread(key="symbol",value="exp") %>%
dplyr::filter(substr(barcode,14,15)=="01") %>%
dplyr::mutate(barcode = substr(barcode,1,12)) %>%
tidyr::gather(-barcode,key="symbol",value="exp") %>%
tidyr::spread(key="barcode",value="exp") -> filter_exp.ready
}
}else{
filter_exp %>%
dplyr::select(-entrez_id) %>%
dplyr::mutate(symbol = gsub("-",".",symbol)) %>%
dplyr::filter(symbol %in% features$features) %>%
tidyr::gather(-symbol,key="barcode",value="exp") %>%
tidyr::spread(key="symbol",value="exp") %>%
dplyr::filter(substr(barcode,14,15)=="01") %>%
dplyr::mutate(barcode = substr(barcode,1,12)) %>%
tidyr::gather(-barcode,key="symbol",value="exp") %>%
tidyr::spread(key="barcode",value="exp") -> filter_exp.ready
}
if(analysis == "lm_model"){
res_path.1 <- file.path(res_path,"by_model")
# ## get the logistic model
model <- fn_logistic_model(data_spread,response,features)
filter_exp.ready %>%
tidyr::gather(-symbol,key="barcode",value="exp") %>%
tidyr::spread(key="symbol",value="exp") -> filter_exp.ready.forlogistic
# use model to predict TCGA data
probabilities <- model %>% predict(filter_exp.ready.forlogistic[,-1],type = "response")
probabilities %>%
as.data.frame() %>%
dplyr::mutate(barcode = filter_exp.ready.forlogistic$barcode) %>%
dplyr::mutate(barcode = substr(barcode,1,12)) %>%
dplyr::inner_join(survival,by="barcode") %>%
dplyr::rename("probabilities" = ".") %>%
dplyr::mutate(group = ifelse(probabilities> 0.5, "1", "2")) %>%
dplyr::arrange(group) %>%
dplyr::mutate(group = as.character(group)) -> sample_info
} else {
res_path.1 <- file.path(res_path,"by_cluster")
## cluster analysis
filter_exp.ready.m <- as.matrix(as.data.frame(filter_exp.ready[,-1]))
rownames(filter_exp.ready.m) <- filter_exp.ready$symbol
# results = ConsensusClusterPlus(filter_exp.ready.m,maxK = 5,reps = 100,pItem = 0.8,
# pFeature = 1,
# title = file.path(res_path, cancer_types),
# distance = "binary", plot = "pdf",
# clusterAlg = "hc",seed = 1262118388.71279)
# Compute the dissimilarity matrix
dist <- dist(t(filter_exp.ready.m), method = "euclidean")
hc <- hclust(d = dist, method = "complete")
C = 2
group <- cutree(hc, k = C)
# group <- results[[i]]$consensusClass
# data.frame(Run = names(group),group = group) %>%
# readr::write_tsv(file.path(res_path, pubmed_ID, paste(C,"clusters_group_info.tsv",sep = "_")))
data.frame(barcode = names(group),group = group) %>%
dplyr::as.tbl() -> group
###### complex heatmap #####
survival %>%
dplyr::inner_join(group, by = "barcode") %>%
# dplyr::select(Response, group) %>%
dplyr::mutate(group = as.character(group)) %>%
as.data.frame() -> sample_info
}
## draw pic
# for (i in 2:5) {
# C <- i
# rownames(sample_info) <- sample_info$barcode
# sample_anno <- HeatmapAnnotation(df = sample_info,
# col = list(group=color_list),
# width = unit(0.5, "cm"),
# name = c("CC group"))
# draw(sample_anno,1:45)
# heatmap
# png(file.path(res_path,"complex_heatmap",paste(C, pubmed_ID, "GSVAscore", "heatmap.png", sep = "_")),
# height = 300, width = 600)
# Heatmap(filter_exp.ready.m,
# show_row_names = T,
# show_column_names = FALSE,
# cluster_columns = T,
# clustering_distance_columns = "euclidean",
# clustering_method_columns = "complete",
# top_annotation = sample_anno,
# show_row_dend = FALSE, # whether show row clusters.
# heatmap_legend_param = list(title = c("GSVA score")))
# dev.off()
#### PCA analysis #####
# fn_pca_scatter(clinical=clinical,group=group,gsva.dist,cancer_types,pubmed_ID=pubmed_ID,C=C)
# fn_compare(group = group, cancer_types = cancer_types,
# data = mutation_burden_class %>%
# dplyr::rename("Cell_type" = "cancer_types") %>%
# dplyr::mutate(sm_count = ifelse(sm_count==0,0.1,sm_count)) %>%
# dplyr::mutate(sm_count = log2(sm_count)),
# value = "sm_count", facet = "Cell_type", ylab = "log2 (Mutation burden)",
# title = "", data_type = "MutationBurden", C = C,
# result_path = file.path(res_path, cancer_types), h = 3, w = 4)
# fn_compare(group = group, cancer_types = cancer_types, data = TIMER_immunity_onlyTumor,
# value = "TIL", facet = "Cell_type", ylab = "TIL", C = C,
# title = cancer_types, data_type = "TIMER",result_path = file.path(res_path, cancer_types), h = 6, w = 8)
#### survival analysis #####
color <- data.frame(group = c("1","2","no","yes","5","6"),
color = c("pink1","skyblue1","darkseagreen1","darkgreen","dodgerblue4","tan2"))
fn_survival(data=sample_info,title=paste(cancer_types,blockade,sep="_"),color=color,
"group",sur_name=paste(C,cancer_types,"from_features",Cancer_type,blockade,sep="_"),
xlab = "Time (days)",result_path = file.path(res_path.1,"survival"),h=3,w=4,lx=0.8,ly=0.6)
fn_compare(group = sample_info %>% dplyr::mutate(group = as.integer(group)),
cancer_types = paste(cancer_types,Cancer_type,blockade), data = TIMER_immunity_onlyTumor,
value = "TIL", facet = "Cell_type", ylab = "TIL", C = C,
title = cancer_types, data_type = "TIMER",result_path = file.path(res_path.1, "TIL"), h = 6, w = 8)
fn_mutation_burden_all(data = sample_info %>%
dplyr::mutate(group = as.integer(group)) %>%
dplyr::inner_join(mutation_burden_class,by="barcode") %>%
dplyr::mutate(sm_count = log2(sm_count)),
group = "group",value = "sm_count", xlab = "log2(Mutation burden)",
m_a_name = paste(C,cancer_types,Cancer_type,blockade,sep="_"),
result_path = file.path(res_path.1, "MB"), h = 6, w = 8)
# apply(filter_exp.ready.m,1,scale)
# tiff(file.path(res_path,paste(pubmed_ID, cancer_types,"GSVAscore", "heatmap.tiff", sep = "_")),
# height = 300, width = 600,compression = c("lzw"))
# pheatmap(res.gsva, #scale = "row",
# clustering_distance_rows = "correlation",
# color = colorRampPalette(c("#00B2EE","white","red"))(50),
# border_color = NA,cutree_rows = 2,
# show_colnames = F, treeheight_col = 30, treeheight_row = 20,
# cutree_cols = 2,main = paste(paste("PMID:",pubmed_ID),cancer_types," GSVA score", sep = ","))
# dev.off()
# fn_survival.calculate(group, clinical, pubmed_ID, C,cancer_types, result_path = res_path, h = 3, w = 4)
# }
}
## do survival analysis
# color <- tibble::tibble(group = c("no","yes"),
# color = c("red","blue"))
# fn_survival(data=data.for.survival,title=paste(cancer_types,"from_features",Cancer.y,blockade,sep="_"),color=color,
# "group",sur_name=paste(cancer_types,"from_features",Cancer.y,blockade,sep="_"),
# xlab = "Time (days)",result_path = file.path(res_path,"survival"),h=3,w=4,lx=0.8,ly=0.6)
## construct logistic model
fn_logistic_model <- function(data_spread,response,features){
data_spread %>%
dplyr::inner_join(response, by = "Run") %>%
dplyr::select(-Run) %>%
dplyr::mutate(Response=as.factor(Response)) -> data.ready
data.ready <- na.omit(data.ready)
colnames(data.ready) <- gsub("-",".",colnames(data.ready))
for(i in 1:nrow(features)){
if(i == 1){
formula <- paste("Response", features$features[i], sep = "~")
} else{
formula <- paste(formula, features$features[i], sep = "+")
}
}
## do logistic regression
model <- glm( as.formula(formula), data = data.ready, family = binomial)
model
}
## function to draw survival
fn_survival <- function(data,title,color,group,sur_name,xlab,result_path,h,w,lx=0.8,ly=0.6){
print("survival")
library(survival)
library(survminer)
fit <- survfit(survival::Surv(time, status) ~ group, data = data, na.action = na.exclude)
diff <- survdiff(survival::Surv(time, status) ~ group, data = data, na.action = na.exclude)
kmp <- 1 - pchisq(diff$chisq, df = length(levels(as.factor(data$group))) - 1)
# legend <- data.frame(group=paste("C",sort(unique(data$group)),sep=""),n=fit$n)
# legend %>%
# dplyr::mutate(
# label = purrr::map2(
# .x = group,
# .y = n,
# .f = function(.x,.y){
# latex2exp::TeX(glue::glue("<<.x>>, n = <<.y>>", .open = "<<", .close = ">>"))
# }
# )
# ) -> legend
color %>%
dplyr::inner_join(data,by="group") %>%
dplyr::group_by(group) %>%
dplyr::mutate(n = n()) %>%
dplyr::ungroup() %>%
dplyr::mutate(group = paste(group,", n=",n,sep="")) %>%
dplyr::select(group,color) %>%
unique() -> color_paired
survminer::ggsurvplot(fit,pval=F, #pval.method = T,
data = data,
surv.median.line = "hv",
title = paste(title,", p =", signif(kmp, 2)), # change it when doing diff data
xlab = xlab,
ylab = 'Probability of survival',
# legend.title = "Methyla group:",
legend= c(lx,ly),
# ggtheme = theme_survminer(),
ggtheme = theme(
panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black",
size = 0.5),
panel.background = element_rect(fill = "white"),
legend.key = element_blank(),
legend.background = element_blank(),
legend.text = element_text(size = 8),
axis.text = element_text(size = 12),
legend.title = element_blank(),
axis.title = element_text(size = 12,color = "black"),
text = element_text(color = "black" )
)
)[[1]] +
scale_color_manual(
values = color_paired$color,
labels = color_paired$group
) -> p
ggsave(filename = paste(sur_name,signif(kmp, 2),"png",sep = "."), plot = p, path = result_path,device = "png",height = h,width = w)
ggsave(filename = paste(sur_name,signif(kmp, 2),"pdf",sep = "."), plot = p, path = result_path,device = "pdf",height = h,width = w)
}
## fn TIL compare
fn_compare <- function(group, cancer_types, data, value, facet, ylab, title, data_type, result_path,C, h, w){
print("Timer")
# print(paste("Compare",data_type,cancer_types))
group %>%
dplyr::inner_join(data, by = "barcode") %>%
dplyr::rename("value" = value) -> plot_ready
assign("group1",facet)
color_paired <- tibble::tibble(group = c(1:5),
color = c("pink1", "skyblue1", "darkseagreen1", "darkgreen", "dodgerblue4")) %>%
dplyr::inner_join(plot_ready, by = "group") %>%
dplyr::select(group, color) %>%
unique()
anno_text <- plot_ready %>%
dplyr::group_by(group,Cell_type) %>%
dplyr::mutate(n = n(), y = min(value) - max(value)*0.05) %>%
dplyr::select(group,Cell_type,n, y) %>%
unique() %>%
dplyr::ungroup()
# combn_matrix <- combn(sort(unique(plot_ready$group)),2)
# comp_list <- list()
# for(i in 1:ncol(combn_matrix)){
# comp_list[[i]] <- combn_matrix[,i]
# }
# comp_list <- list(c("Tumor", "Normal"))
if (nrow(color_paired) >= 2) {
plot_ready %>%
dplyr::arrange(group) %>%
ggpubr::ggboxplot(x = "group", y = "value", fill = "white",alpha = 0,width = 0.1,
color = "group" #add = "jitter",#, palette = "npg"
) +
geom_violin(aes(fill = group),alpha = 0.5) +
# geom_jitter(aes(color = group),alpha = 0.2,width = 0.1,size=0.1) +
geom_boxplot(fill = "white",alpha = 0,width = 0.1) +
geom_text(aes(x = group,y = y,label = paste("n=",n)), data = anno_text, size = 2) +
facet_wrap(as.formula(paste("~",facet)), strip.position = "bottom", scales = "free") +
scale_color_manual(
values = color_paired$color
) +
scale_fill_manual(
values = color_paired$color
) +
# ylim(4,12) +
labs(y = ylab, x = "Cluster", title = title) +
theme(legend.position = "none",
axis.title = element_text(colour = "black"),
strip.background = element_rect(fill = "white",colour = "white"),
text = element_text(size = 12, colour = "black")) +
ggpubr::stat_compare_means(method = "anova")
# ggpubr::stat_compare_means(comparisons = comp_list, method = "t.test",label = "p.signif")
fig_name <- paste("C",C,cancer_types, sep = "_")
# result_path <- file.path(result_path, data_type)
ggsave(filename = paste(fig_name,"tiff",sep = "."), path = result_path,device = "tiff",width = w,height = h)
ggsave(filename = paste(fig_name,"pdf",sep = "."), path = result_path,device = "pdf",width = w,height = h)
}
}
fn_mutation_burden_all <- function(data,group,value,color,xlab,m_a_name,result_path,w=4,h=3){
print("mutation burden")
data %>%
ggpubr::ggboxplot(x = group, y = value,fill="white",alpha = 0,width = 0.1,
color = group #add = "jitter",#, palette = "npg"
) +
geom_violin(aes(fill = group),alpha = 0.5) +
geom_jitter(aes(color = group),alpha = 0.2,width = 0.1,size=0.1) +
geom_boxplot(fill="white",alpha = 0,width = 0.1) +
scale_x_discrete(#breaks = c(1:10),
labels = unique(data$group)
# expand = c(0.2,0.2,0.2)
) +
# facet_wrap(~ cancer_types, strip.position = "bottom", scales = "free") +
# scale_color_manual(
# values = color
# )+
# scale_fill_manual(
# values = color
# ) +
# ylim(4,12) +
ylab(xlab) +
xlab("Group") +
theme(legend.position = "none",
# axis.title.x = element_blank(),
strip.background = element_rect(fill = "white",colour = "white"),
text = element_text(size = 12, colour = "black"),
strip.text = element_text(size = 12))+
ggpubr::stat_compare_means(method = "anova")
# ggpubr::stat_compare_means(label.y = 14,paired = TRUE) +
# ggpubr::stat_compare_means(comparisons = comp_list,method = "wilcox.test",label = "p.signif")
ggsave(filename =paste(m_a_name,"png",sep="."), path = result_path,device = "png",width = w,height = h)
ggsave(filename =paste(m_a_name,"pdf",sep="."), path = result_path,device = "pdf",width = w,height = h)
}
|
#shinyServer(
function(input, output) {
##########----------##########----------##########
options(digits=6)
source("server_N.R", local=TRUE)$value
source("server_E.R", local=TRUE)$value
source("server_G.R", local=TRUE)$value
source("server_B.R", local=TRUE)$value
source("server_T.R", local=TRUE)$value
source("server_Chi.R", local=TRUE)$value
source("server_F.R", local=TRUE)$value
source("server_data.R", local=TRUE)$value
##########----------##########----------##########
observe({
if (input$close > 0) stopApp() # stop shiny
})
}
#)
| /1_1MFScondist-new/server.R | permissive | ricciardi/mephas_web | R | false | false | 595 | r |
#shinyServer(
function(input, output) {
##########----------##########----------##########
options(digits=6)
source("server_N.R", local=TRUE)$value
source("server_E.R", local=TRUE)$value
source("server_G.R", local=TRUE)$value
source("server_B.R", local=TRUE)$value
source("server_T.R", local=TRUE)$value
source("server_Chi.R", local=TRUE)$value
source("server_F.R", local=TRUE)$value
source("server_data.R", local=TRUE)$value
##########----------##########----------##########
observe({
if (input$close > 0) stopApp() # stop shiny
})
}
#)
|
tmp.simulacoes <- vector("list",0)
tmp.simulacoes[["1"]] <- list(valores.reais=rep(0,50),rw=rep(c(0,1),25),t1=rnorm(50,0,0.3),t2=rchisq(50,1,0),t3=rep(0.4,100))
tmp.simulacoes[["2"]] <- list(valores.reais=rep(0,50),rw=rep(c(0,1),25),t1=rnorm(50,0,0.3),t2=rchisq(50,1,0),t3=rep(0.4,100))
tab.1 <- tabela
# Resultados gerais -------------------------------------------------------
intervalo <- c(1,50)
tabela <- TabelaEQM(tmp.simulacoes,vetor.maturidade=c(1,2),horizonte=1,subconjunto=intervalo)
PlotarSeriesRw(tmp.simulacoes,maturidade=6,horizonte=1,subconjunto=intervalo)
View(tabela)
intervalo <- c(1,50)
tabela <- TabelaEQM(simulacoes,vetor.maturidade,horizonte=1,subconjunto=intervalo)
PlotarSeriesRw(simulacoes,maturidade=6,horizonte=1,subconjunto=intervalo)
View(tabela)
PlotarSeries(simulacoes,maturidade=3,horizonte=3,subconjunto=c(1,20))
heatmap(tabela,scale="row", Colv=NA, Rowv=NA)
tabela <- TabelaEQM(simulacoes,vetor.maturidade,horizonte=12)
heatmap(tabela,scale="row", Colv=NA, Rowv=NA)
View(tabela)
PlotarSeries(simulacoes,maturidade=1,horizonte=3,subconjunto=c(1,20))
tabelao.agrupado <- TabelaoAgrupado(simulacoes,vetor.maturidade,horizonte=5,vetor.intervalos)
View(tabelao.agrupado); rowSums(tabelao.agrupado)
tabelao.agrupado <- tabelao.agrupado/tabelao.agrupado[, 3]
heatmap(tabelao.agrupado[c(-8,-9), ], scale="column", Colv=NA, Rowv=NA) # tira as estimacoes do que não retiram nada
tabelao.agrupado <- TabelaoAgrupado(simulacoes,vetor.maturidade,horizonte=25,vetor.intervalos)
View(tabelao.agrupado); rowSums(tabelao.agrupado)
heatmap(tabelao.agrupado[c(-8,-9), ], scale="column", Colv=NA, Rowv=NA) # tira as estimacoes do que não retiram nada
tabelao.agrupado <- TabelaoAgrupado(simulacoes,vetor.maturidade,horizonte=50,vetor.intervalos)
View(tabelao.agrupado); rowSums(tabelao.agrupado)
heatmap(tabelao.agrupado[c(-8,-9), ], scale="column", Colv=NA, Rowv=NA) # tira as estimacoes do que não retiram nada
GiacominiWhite(simulacoes,benchmarking=3,valor.real=12,tamanho=800,horizonte=25,vetor.maturidade,vetor.intervalos)
# Análise detalhada ----------------------------------------------------
# investiga qualquer item (heatmap e tabela)
tabelao <- Tabelao(simulacoes,vetor.maturidade,vetor.horizontes=c(5),vetor.intervalos)
tabelao <- tabelao[, c(-8,-9)] # tira as estimacoes do que não retiram nada, pois o resultado é muito ruim
require(stringr)
par.ordenar <- 1; tabelao <- tabelao[order(str_split_fixed(rownames(tabelao), "[|]",3)[, par.ordenar]), ] # par.ordenar define qual por qual dos 3 campos que será ordenada as estimações
tabelao <- CombinarPrevisoes(tabelao,"crt.p5.rmat6","rw")
# tabelao <- CombinarPrevisoes(tabelao,"corte.deriv.1","rw")
heatmap(tabelao, scale="row",Rowv=NA) # scale="row",Rowv=NA
require(lattice)
levelplot(tabelao, scale=list(x=list(rot=45)))
summary(tabelao)
View(tabelao)
| /Analise_dados.r | no_license | mcruas/npfda | R | false | false | 2,860 | r | tmp.simulacoes <- vector("list",0)
tmp.simulacoes[["1"]] <- list(valores.reais=rep(0,50),rw=rep(c(0,1),25),t1=rnorm(50,0,0.3),t2=rchisq(50,1,0),t3=rep(0.4,100))
tmp.simulacoes[["2"]] <- list(valores.reais=rep(0,50),rw=rep(c(0,1),25),t1=rnorm(50,0,0.3),t2=rchisq(50,1,0),t3=rep(0.4,100))
tab.1 <- tabela
# Resultados gerais -------------------------------------------------------
intervalo <- c(1,50)
tabela <- TabelaEQM(tmp.simulacoes,vetor.maturidade=c(1,2),horizonte=1,subconjunto=intervalo)
PlotarSeriesRw(tmp.simulacoes,maturidade=6,horizonte=1,subconjunto=intervalo)
View(tabela)
intervalo <- c(1,50)
tabela <- TabelaEQM(simulacoes,vetor.maturidade,horizonte=1,subconjunto=intervalo)
PlotarSeriesRw(simulacoes,maturidade=6,horizonte=1,subconjunto=intervalo)
View(tabela)
PlotarSeries(simulacoes,maturidade=3,horizonte=3,subconjunto=c(1,20))
heatmap(tabela,scale="row", Colv=NA, Rowv=NA)
tabela <- TabelaEQM(simulacoes,vetor.maturidade,horizonte=12)
heatmap(tabela,scale="row", Colv=NA, Rowv=NA)
View(tabela)
PlotarSeries(simulacoes,maturidade=1,horizonte=3,subconjunto=c(1,20))
tabelao.agrupado <- TabelaoAgrupado(simulacoes,vetor.maturidade,horizonte=5,vetor.intervalos)
View(tabelao.agrupado); rowSums(tabelao.agrupado)
tabelao.agrupado <- tabelao.agrupado/tabelao.agrupado[, 3]
heatmap(tabelao.agrupado[c(-8,-9), ], scale="column", Colv=NA, Rowv=NA) # tira as estimacoes do que não retiram nada
tabelao.agrupado <- TabelaoAgrupado(simulacoes,vetor.maturidade,horizonte=25,vetor.intervalos)
View(tabelao.agrupado); rowSums(tabelao.agrupado)
heatmap(tabelao.agrupado[c(-8,-9), ], scale="column", Colv=NA, Rowv=NA) # tira as estimacoes do que não retiram nada
tabelao.agrupado <- TabelaoAgrupado(simulacoes,vetor.maturidade,horizonte=50,vetor.intervalos)
View(tabelao.agrupado); rowSums(tabelao.agrupado)
heatmap(tabelao.agrupado[c(-8,-9), ], scale="column", Colv=NA, Rowv=NA) # tira as estimacoes do que não retiram nada
GiacominiWhite(simulacoes,benchmarking=3,valor.real=12,tamanho=800,horizonte=25,vetor.maturidade,vetor.intervalos)
# Análise detalhada ----------------------------------------------------
# investiga qualquer item (heatmap e tabela)
tabelao <- Tabelao(simulacoes,vetor.maturidade,vetor.horizontes=c(5),vetor.intervalos)
tabelao <- tabelao[, c(-8,-9)] # tira as estimacoes do que não retiram nada, pois o resultado é muito ruim
require(stringr)
par.ordenar <- 1; tabelao <- tabelao[order(str_split_fixed(rownames(tabelao), "[|]",3)[, par.ordenar]), ] # par.ordenar define qual por qual dos 3 campos que será ordenada as estimações
tabelao <- CombinarPrevisoes(tabelao,"crt.p5.rmat6","rw")
# tabelao <- CombinarPrevisoes(tabelao,"corte.deriv.1","rw")
heatmap(tabelao, scale="row",Rowv=NA) # scale="row",Rowv=NA
require(lattice)
levelplot(tabelao, scale=list(x=list(rot=45)))
summary(tabelao)
View(tabelao)
|
readInHead = function(name, picDir){
pic = paste(picDir,name,sep = "/")
img = readImage(pic)
return(img)
} | /lib/readInHead.R | no_license | TZstatsADS/Spr2016-Proj3-Grp2 | R | false | false | 112 | r | readInHead = function(name, picDir){
pic = paste(picDir,name,sep = "/")
img = readImage(pic)
return(img)
} |
library("ggplot2")
library("dplyr")
library("reshape2")
## Figure S2 - Distribution of intervening genes separating interacting pairs.
incomp1 <- "operon_assembly/figures/data/figs2a.txt"
therm_comp1 <- "operon_assembly/figures/data/figs2c.txt"
ex_therm_comp1 <- "operon_assembly/figures/data/figs2b.txt"
y2h <- "operon_assembly/figures/data/figs2d.txt"
df.x.ic1 <- read.table(incomp1)
df.x.tc1 <- read.table(therm_comp1)
df.x.etc1 <- read.table(ex_therm_comp1)
df.x.y2h <- read.table(y2h)
# Lots of data reshaping
reshape.figxdata <- function(df){
colnames(df) <- c("data", "intervening")
df <- dcast(df, intervening~data)
df$expected <- df$expected/1000
df <- melt(df, id.var = "intervening")
df$variable <- factor(df$variable,
levels = rev(levels(factor(df$variable))))
df <- rename(df, data = variable)
return(df)
}
df.x.ic1 <- reshape.figxdata(df.x.ic1)
df.x.tc1 <- reshape.figxdata(df.x.tc1)
df.x.etc1 <- reshape.figxdata(df.x.etc1)
df.x.y2h <- reshape.figxdata(df.x.y2h)
# Generate plots
figx.plot <- function(df){
figx <- ggplot(df, aes(x = intervening, y=value, fill=data)) +
geom_bar(stat = "identity", position = "dodge") +
xlab("Number of intervening genes between interacting pairs") +
ylab("Number of physically interacting pairs") +
scale_x_continuous(breaks = seq(0, 14, 2)) +
theme(text = element_text(size = 6),
legend.title = element_blank(),
legend.key.size = unit(0.4, "cm"),
legend.justification = 'right',
legend.position=c(1, 0.9))
return(figx)
}
figx.ic1 <- figx.plot(df.x.ic1) +
scale_y_continuous(breaks = seq(0, 200, 25)) +
annotate("text", x = 7, y = 190,
label = paste(expression(italic("P")), "< 10^-5"),
size = 4, parse = TRUE)
figx.tc1 <- figx.plot(df.x.tc1) +
scale_y_continuous(breaks = seq(0, 12, 2)) +
annotate("text", x = 4.9, y = 9.5,
label = paste(expression(italic("P"))),
size = 4, parse = TRUE) +
annotate("text", x = 7.3, y = 9.5,
label = "< 0.0004",
size = 4) # Ugly hack to prevent ggplot converting to e-4
figx.etc1 <- figx.plot(df.x.etc1) +
scale_y_continuous(breaks = seq(0, 200, 25)) +
annotate("text", x = 7, y = 175,
label = paste(expression(italic("P")), "< 10^-5"),
size = 4, parse = TRUE)
figx.y2h <- figx.plot(df.x.y2h) +
scale_y_continuous(breaks = seq(0, 50, 5)) +
annotate("text", x = 7, y = 42,
label = paste(expression(italic("P")), "< 0.003"),
size = 4, parse = TRUE)
# Display plots - save at 3.5 x 3.5 inches
figx.ic1
figx.tc1
figx.etc1
figx.y2h
| /figures/src/figS2.R | permissive | jonathan-wells/operon-complexes | R | false | false | 2,660 | r | library("ggplot2")
library("dplyr")
library("reshape2")
## Figure S2 - Distribution of intervening genes separating interacting pairs.
incomp1 <- "operon_assembly/figures/data/figs2a.txt"
therm_comp1 <- "operon_assembly/figures/data/figs2c.txt"
ex_therm_comp1 <- "operon_assembly/figures/data/figs2b.txt"
y2h <- "operon_assembly/figures/data/figs2d.txt"
df.x.ic1 <- read.table(incomp1)
df.x.tc1 <- read.table(therm_comp1)
df.x.etc1 <- read.table(ex_therm_comp1)
df.x.y2h <- read.table(y2h)
# Lots of data reshaping
reshape.figxdata <- function(df){
colnames(df) <- c("data", "intervening")
df <- dcast(df, intervening~data)
df$expected <- df$expected/1000
df <- melt(df, id.var = "intervening")
df$variable <- factor(df$variable,
levels = rev(levels(factor(df$variable))))
df <- rename(df, data = variable)
return(df)
}
df.x.ic1 <- reshape.figxdata(df.x.ic1)
df.x.tc1 <- reshape.figxdata(df.x.tc1)
df.x.etc1 <- reshape.figxdata(df.x.etc1)
df.x.y2h <- reshape.figxdata(df.x.y2h)
# Generate plots
figx.plot <- function(df){
figx <- ggplot(df, aes(x = intervening, y=value, fill=data)) +
geom_bar(stat = "identity", position = "dodge") +
xlab("Number of intervening genes between interacting pairs") +
ylab("Number of physically interacting pairs") +
scale_x_continuous(breaks = seq(0, 14, 2)) +
theme(text = element_text(size = 6),
legend.title = element_blank(),
legend.key.size = unit(0.4, "cm"),
legend.justification = 'right',
legend.position=c(1, 0.9))
return(figx)
}
figx.ic1 <- figx.plot(df.x.ic1) +
scale_y_continuous(breaks = seq(0, 200, 25)) +
annotate("text", x = 7, y = 190,
label = paste(expression(italic("P")), "< 10^-5"),
size = 4, parse = TRUE)
figx.tc1 <- figx.plot(df.x.tc1) +
scale_y_continuous(breaks = seq(0, 12, 2)) +
annotate("text", x = 4.9, y = 9.5,
label = paste(expression(italic("P"))),
size = 4, parse = TRUE) +
annotate("text", x = 7.3, y = 9.5,
label = "< 0.0004",
size = 4) # Ugly hack to prevent ggplot converting to e-4
figx.etc1 <- figx.plot(df.x.etc1) +
scale_y_continuous(breaks = seq(0, 200, 25)) +
annotate("text", x = 7, y = 175,
label = paste(expression(italic("P")), "< 10^-5"),
size = 4, parse = TRUE)
figx.y2h <- figx.plot(df.x.y2h) +
scale_y_continuous(breaks = seq(0, 50, 5)) +
annotate("text", x = 7, y = 42,
label = paste(expression(italic("P")), "< 0.003"),
size = 4, parse = TRUE)
# Display plots - save at 3.5 x 3.5 inches
figx.ic1
figx.tc1
figx.etc1
figx.y2h
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Attribute.R
\name{cell_val_num,tiledb_attr-method}
\alias{cell_val_num,tiledb_attr-method}
\title{Return the number of scalar values per attribute cell}
\usage{
\S4method{cell_val_num}{tiledb_attr}(object)
}
\arguments{
\item{object}{\code{tiledb_attr} object}
}
\value{
integer number of cells
}
\description{
Return the number of scalar values per attribute cell
}
\examples{
a1 <- tiledb_attr("a1", type = "FLOAT64", ncells = 1)
cell_val_num(a1)
}
| /man/cell_val_num-tiledb_attr-method.Rd | permissive | aaronwolen/TileDB-R | R | false | true | 530 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Attribute.R
\name{cell_val_num,tiledb_attr-method}
\alias{cell_val_num,tiledb_attr-method}
\title{Return the number of scalar values per attribute cell}
\usage{
\S4method{cell_val_num}{tiledb_attr}(object)
}
\arguments{
\item{object}{\code{tiledb_attr} object}
}
\value{
integer number of cells
}
\description{
Return the number of scalar values per attribute cell
}
\examples{
a1 <- tiledb_attr("a1", type = "FLOAT64", ncells = 1)
cell_val_num(a1)
}
|
test_completeness <- function(dataset,
tax = "Taxon.ID",
age = "Age..Ma..Gradstein.et.al..2012",
bin.width = 0.1){
dataset$age <- dataset[, age]
dataset <- dataset[dataset$age <= 66, ]
dataset <- droplevels(dataset[!is.na(dataset$site), ])
bins<-seq(0,66,bin.width)
# for each bin width
dataset$age.bin <- cut(dataset$age, breaks = bins)
age.bins <- levels(dataset$age.bin)
mid.points <- cbind(age.bins,
as.numeric(substr(age.bins, regexpr(",", age.bins)+1,
nchar(age.bins)-1)) - 0.5*(bins[2]-bins[1]))
dataset$mid.point <- mid.points[match(dataset$age.bin, age.bins), 2]
dataset$mid.point <- as.numeric(dataset$mid.point)
# by site
dd.list <- lapply(split(dataset, f=dataset$site), function(x){
print(as.character(x$site[1]))
if(length(unique(x$mid.point)) < 3){
return(data.frame(site = x$site[1],
samp.prob = NA))
}
temp.dd <- divDyn(x, tax = tax, bin = "mid.point", revtime = TRUE)
agg.completeness <- data.frame(site = x$site[1],
samp.prob = sum(temp.dd$t3, na.rm=T) / (sum(temp.dd$t3, na.rm=T) +
sum(temp.dd$tPart, na.rm=T)))
return(list(temp.dd, agg.completeness))
})
site.dd <- do.call("rbind", lapply(dd.list, function(x){x[[2]]}))
raw.dd <- lapply(dd.list, function(x){x[[1]]})
return(list(site.dd = site.dd,
raw.dd = raw.dd))
} | /functions/test_completeness.R | no_license | TimothyStaples/novelty-cenozoic-microplankton | R | false | false | 1,618 | r | test_completeness <- function(dataset,
tax = "Taxon.ID",
age = "Age..Ma..Gradstein.et.al..2012",
bin.width = 0.1){
dataset$age <- dataset[, age]
dataset <- dataset[dataset$age <= 66, ]
dataset <- droplevels(dataset[!is.na(dataset$site), ])
bins<-seq(0,66,bin.width)
# for each bin width
dataset$age.bin <- cut(dataset$age, breaks = bins)
age.bins <- levels(dataset$age.bin)
mid.points <- cbind(age.bins,
as.numeric(substr(age.bins, regexpr(",", age.bins)+1,
nchar(age.bins)-1)) - 0.5*(bins[2]-bins[1]))
dataset$mid.point <- mid.points[match(dataset$age.bin, age.bins), 2]
dataset$mid.point <- as.numeric(dataset$mid.point)
# by site
dd.list <- lapply(split(dataset, f=dataset$site), function(x){
print(as.character(x$site[1]))
if(length(unique(x$mid.point)) < 3){
return(data.frame(site = x$site[1],
samp.prob = NA))
}
temp.dd <- divDyn(x, tax = tax, bin = "mid.point", revtime = TRUE)
agg.completeness <- data.frame(site = x$site[1],
samp.prob = sum(temp.dd$t3, na.rm=T) / (sum(temp.dd$t3, na.rm=T) +
sum(temp.dd$tPart, na.rm=T)))
return(list(temp.dd, agg.completeness))
})
site.dd <- do.call("rbind", lapply(dd.list, function(x){x[[2]]}))
raw.dd <- lapply(dd.list, function(x){x[[1]]})
return(list(site.dd = site.dd,
raw.dd = raw.dd))
} |
library(R2jags)
setwd("~/Documents/JAGS")
logit=function(x){
log(x/(1-x))}
revlogit=function(x){
exp(x)/(1+exp(x))}
#################################################################
## Kery & Schaub (2012), Ch 7 mark-recap, CJS with time covariate
#################################################################
##########################simulate data where survival is a (logit) linear function of precipitation & p is a function of sex
# Define parameter values
n.occasions=20 #number of capture occasions
marked=rep(20,n.occasions-1) #annual number of newly marked indiv
Prcp=rnorm(n.occasions-1,0,1) #simulate normalized precipitation data
alpha0=0.8 #intercept coefficent for how Prcp affects phi
alpha1=1.2 #slope coefficient for how prcp affects phi
phi=revlogit(alpha0+alpha1*Prcp) #<------ phi is a function of Prcp
sex=sample(c(rep(1,sum(marked)/2),rep(0,sum(marked)/2))) #randomly assign sex to each indiv with 50/50 prob , 1=male, 0=female
p.male=rep(0.6,n.occasions-1) # prob of capture for males (not time varying)
p.female=rep(0.4,n.occasions-1) # prob of capture for females
#gamma0=-0.63
#gamma1=1.02
#p=revlogit(gamma0+gamma1*sex) # then can change the specification below - get rid of loop
#define matrices with survival and recap probs
PHI=matrix(phi,ncol=n.occasions-1,nrow=sum(marked),byrow=TRUE)
P=matrix(NA,ncol=n.occasions-1,nrow=sum(marked))
for(i in 1:sum(marked)){
if(sex[i]==1){
P[i,]=p.male}
if(sex[i]==0){
P[i,]=p.female
}
}
#define function to simulate a catpure history matrix (CH)
simul.cjs=function(PHI,P,marked){
n.ocassions=dim(PHI)[2]+1
CH=matrix(0,ncol=n.occasions,nrow=sum(marked))
#define a vactor with the occasion of marking
mark.occ=rep(1:length(marked),marked[1:length(marked)])
#fill the CH Matrix
for(i in 1:sum(marked)){
CH[i,mark.occ[i]]=1 #put a 1 at the release occasion
if(mark.occ[i]==n.occasions) next #starts next iter of loop if only caught once
for(t in (mark.occ[i]+1):n.occasions){
#Bernouli trial: does indiv survive?
sur=rbinom(1,1,PHI[i,t-1])
if(sur==0) break # if dead, move to next indiv
#Bernouli trial: is indiv recaptured?
rp=rbinom(1,1,P[i,t-1])
if(rp==1) CH[i,t]=1
} #t
} #i
return(CH)
}
CH=simul.cjs(PHI,P,marked)
########################## code up model
# create vector with occasion of marking (first seen)
get.first=function(x) min(which(x!=0))
f=apply(CH,1,get.first)
#specify model in BUGS language
sink("cjs-Prcp-sex.bug")
cat(" ######<--------------------- uncomment
model{
###############Priors and constraints (specifiying the model)
for(i in 1:nind){
for(t in f[i]:(n.occasions-1)){
logit(phi[i,t])<-alpha0 + alpha1 * Prcp[t] ### do i want this to be t-1?
logit(p[i,t])<-gamma0 + gamma1 * sex[i] ### need to set up indicator sex?
} #t
} #i
alpha0~dunif(-5,5) #prior for alpha0 (intercept coeff for effect of Prcp on phi)
alpha1~dunif(-5,5) #prior for alpha1 (slope)
gamma0~dunif(-5,5) # prior for gamma0 (intercept for p) - revlogit(p male?)
gamma1~dunif(-5,5) # prior for gamma1 (slope for p)
#############Likelihood ## shouldn't have to change this to run diff models
for(i in 1:nind){
#define latent state at first capture
z[i,f[i]]=1 # z is true (latent) state alive or dead, know alive at first capture
for(t in (f[i]+1):n.occasions){
#state process # alive or dead
z[i,t]~dbern(mu1[i,t]) #mu1 is probability alive
mu1[i,t]=phi[i,t-1]*z[i,t-1] # prob alive = survival prob * if it was alive last time (if wasn't alive then multiplying by zero, so this assures that animals that are dead stay dead)
#observation process # caught or not
y[i,t]~dbern(mu2[i,t]) # mu2 is prob of capture
mu2[i,t]=p[i,t-1]*z[i,t] # capture prob= p * if it was alive
} #t
} #i
}
",fill=TRUE) #####<----------------uncomment this
sink()
#function to create matrix with info about known latent state z
known.state.cjs=function(ch){
state=ch
for(i in 1:dim(ch)[1]){
n1=min(which(ch[i,]==1))
n2=max(which(ch[i,]==1))
state[i,n1:n2]=1
state[i,n1]=NA #only filling in those that were 0s but we know were alive because caught before and after
}
state[state==0]=NA
return(state)
}
##### Bundle data
bugs.data=list(y=CH,f=f,nind=dim(CH)[1],n.occasions=dim(CH)[2],z=known.state.cjs(CH),Prcp=Prcp,sex=sex)
### we shouldn't give initial values for those elements of z whose value is specified in the data, they get an NA
#function to create matrix of initial values for latent state z
cjs.init.z=function(ch,f){
for(i in 1:dim(ch)[1]){
if(sum(ch[i,])==1) next
n2=max(which(ch[i,]==1))
ch[i,f[i]:n2]=NA
}
for(i in 1:dim(ch)[1]){
ch[i,1:f[i]]=NA
}
return(ch)
}
#initial values
inits=function(){list(z=cjs.init.z(CH,f),alpha0=runif(1,-5,5),alpha1=runif(1,-5,5),gamma0=runif(1,-5,5),gamma1=runif(1,-5,5))}
#parameters monitored
parameters=c("alpha0","alpha1","gamma0","gamma1")
#MCMCsettings
ni=10000
nt=6
nb=5000
nc=3
date()
## Call JAGS from R
cjs.Prcp.sex=jags(bugs.data,inits,parameters,"cjs-Prcp-sex.bug",n.chains=nc,n.thin=nt,n.iter=ni,n.burnin=nb)
date() #tell how long it ran
#sumarize posteriors
print(cjs.Prcp.sex,digits=3) #does ok
traceplot(cjs.Prcp.sex)
#test test test
| /SimulatedData/CJSModelwPrcpSex.R | no_license | angieluis/BayesianMarkRecapSNV | R | false | false | 5,244 | r | library(R2jags)
setwd("~/Documents/JAGS")
logit=function(x){
log(x/(1-x))}
revlogit=function(x){
exp(x)/(1+exp(x))}
#################################################################
## Kery & Schaub (2012), Ch 7 mark-recap, CJS with time covariate
#################################################################
##########################simulate data where survival is a (logit) linear function of precipitation & p is a function of sex
# Define parameter values
n.occasions=20 #number of capture occasions
marked=rep(20,n.occasions-1) #annual number of newly marked indiv
Prcp=rnorm(n.occasions-1,0,1) #simulate normalized precipitation data
alpha0=0.8 #intercept coefficent for how Prcp affects phi
alpha1=1.2 #slope coefficient for how prcp affects phi
phi=revlogit(alpha0+alpha1*Prcp) #<------ phi is a function of Prcp
sex=sample(c(rep(1,sum(marked)/2),rep(0,sum(marked)/2))) #randomly assign sex to each indiv with 50/50 prob , 1=male, 0=female
p.male=rep(0.6,n.occasions-1) # prob of capture for males (not time varying)
p.female=rep(0.4,n.occasions-1) # prob of capture for females
#gamma0=-0.63
#gamma1=1.02
#p=revlogit(gamma0+gamma1*sex) # then can change the specification below - get rid of loop
#define matrices with survival and recap probs
PHI=matrix(phi,ncol=n.occasions-1,nrow=sum(marked),byrow=TRUE)
P=matrix(NA,ncol=n.occasions-1,nrow=sum(marked))
for(i in 1:sum(marked)){
if(sex[i]==1){
P[i,]=p.male}
if(sex[i]==0){
P[i,]=p.female
}
}
#define function to simulate a catpure history matrix (CH)
simul.cjs=function(PHI,P,marked){
n.ocassions=dim(PHI)[2]+1
CH=matrix(0,ncol=n.occasions,nrow=sum(marked))
#define a vactor with the occasion of marking
mark.occ=rep(1:length(marked),marked[1:length(marked)])
#fill the CH Matrix
for(i in 1:sum(marked)){
CH[i,mark.occ[i]]=1 #put a 1 at the release occasion
if(mark.occ[i]==n.occasions) next #starts next iter of loop if only caught once
for(t in (mark.occ[i]+1):n.occasions){
#Bernouli trial: does indiv survive?
sur=rbinom(1,1,PHI[i,t-1])
if(sur==0) break # if dead, move to next indiv
#Bernouli trial: is indiv recaptured?
rp=rbinom(1,1,P[i,t-1])
if(rp==1) CH[i,t]=1
} #t
} #i
return(CH)
}
CH=simul.cjs(PHI,P,marked)
########################## code up model
# create vector with occasion of marking (first seen)
get.first=function(x) min(which(x!=0))
f=apply(CH,1,get.first)
#specify model in BUGS language
sink("cjs-Prcp-sex.bug")
cat(" ######<--------------------- uncomment
model{
###############Priors and constraints (specifiying the model)
for(i in 1:nind){
for(t in f[i]:(n.occasions-1)){
logit(phi[i,t])<-alpha0 + alpha1 * Prcp[t] ### do i want this to be t-1?
logit(p[i,t])<-gamma0 + gamma1 * sex[i] ### need to set up indicator sex?
} #t
} #i
alpha0~dunif(-5,5) #prior for alpha0 (intercept coeff for effect of Prcp on phi)
alpha1~dunif(-5,5) #prior for alpha1 (slope)
gamma0~dunif(-5,5) # prior for gamma0 (intercept for p) - revlogit(p male?)
gamma1~dunif(-5,5) # prior for gamma1 (slope for p)
#############Likelihood ## shouldn't have to change this to run diff models
for(i in 1:nind){
#define latent state at first capture
z[i,f[i]]=1 # z is true (latent) state alive or dead, know alive at first capture
for(t in (f[i]+1):n.occasions){
#state process # alive or dead
z[i,t]~dbern(mu1[i,t]) #mu1 is probability alive
mu1[i,t]=phi[i,t-1]*z[i,t-1] # prob alive = survival prob * if it was alive last time (if wasn't alive then multiplying by zero, so this assures that animals that are dead stay dead)
#observation process # caught or not
y[i,t]~dbern(mu2[i,t]) # mu2 is prob of capture
mu2[i,t]=p[i,t-1]*z[i,t] # capture prob= p * if it was alive
} #t
} #i
}
",fill=TRUE) #####<----------------uncomment this
sink()
#function to create matrix with info about known latent state z
known.state.cjs=function(ch){
state=ch
for(i in 1:dim(ch)[1]){
n1=min(which(ch[i,]==1))
n2=max(which(ch[i,]==1))
state[i,n1:n2]=1
state[i,n1]=NA #only filling in those that were 0s but we know were alive because caught before and after
}
state[state==0]=NA
return(state)
}
##### Bundle data
bugs.data=list(y=CH,f=f,nind=dim(CH)[1],n.occasions=dim(CH)[2],z=known.state.cjs(CH),Prcp=Prcp,sex=sex)
### we shouldn't give initial values for those elements of z whose value is specified in the data, they get an NA
#function to create matrix of initial values for latent state z
cjs.init.z=function(ch,f){
for(i in 1:dim(ch)[1]){
if(sum(ch[i,])==1) next
n2=max(which(ch[i,]==1))
ch[i,f[i]:n2]=NA
}
for(i in 1:dim(ch)[1]){
ch[i,1:f[i]]=NA
}
return(ch)
}
#initial values
inits=function(){list(z=cjs.init.z(CH,f),alpha0=runif(1,-5,5),alpha1=runif(1,-5,5),gamma0=runif(1,-5,5),gamma1=runif(1,-5,5))}
#parameters monitored
parameters=c("alpha0","alpha1","gamma0","gamma1")
#MCMCsettings
ni=10000
nt=6
nb=5000
nc=3
date()
## Call JAGS from R
cjs.Prcp.sex=jags(bugs.data,inits,parameters,"cjs-Prcp-sex.bug",n.chains=nc,n.thin=nt,n.iter=ni,n.burnin=nb)
date() #tell how long it ran
#sumarize posteriors
print(cjs.Prcp.sex,digits=3) #does ok
traceplot(cjs.Prcp.sex)
#test test test
|
#### Preparation of RNAseq data ####
# This script includes some manipulations of the data that need to occur to have everything ready for analysis.
# i made a comment
# install packages
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("DESeq2")
install.packages("rprojroot")
# load packages
library(DESeq2)
library(rprojroot)
# set top level directory
proj <- find_root_file(criterion = has_file(".git/index"))
#### Prepare metadata ####
# copy original data files to project directory
file.copy(file.path(proj, "DataSets", "RNASeqData", "phenotypeTable.csv"), file.path(proj, "RNASeqProject", "Data", "phenotypeTable.csv"))
file.copy(file.path(proj, "DataSets", "RNASeqData", "molecularDataSets.csv"), file.path(proj, "RNASeqProject", "data", "molecularDataSets.csv"))
# import phenotype data
pheno_df <- read.csv(file.path(proj, "DataSets", "RNASeqData", "phenotypeTable.csv"),
header =TRUE)
# import information about molecular data
mol_df <- read.csv(file.path(proj, "DataSets", "RNASeqData", "molecularDataSets.csv"),
header =TRUE, sep=",")
# combine phenotype and molecular data into metadata
metadata_df <- dplyr::full_join(pheno_df, mol_df,
by = "assay_material_id")
# inspect output
head(metadata_df, n = 3)
# write metadata to file
write.csv(metadata_df, file = file.path(proj, "RNASeqProject", "Data", "metadata.csv"), row.names = FALSE)
#### Combine data files ####
# import metadata (if neceassary)
metadata_df <- read.csv(file.path(proj, "RNASeqProject", "Data", "metadata.csv"))
str(metadata_df)
# make list of directories containing data
RNADirectoryList <- list.dirs(path = file.path(proj, "DataSets", "RNASeqData"),
recursive = FALSE)
# find all files for htseq
filelist <- sapply(RNADirectoryList,
function(x){list.files(path = x,
full.names = TRUE,
pattern = "htseq.txt") })
# create data frame of file names
file_id_df <- as.data.frame(cbind(filelist,
gsub("^.*-","", RNADirectoryList)),
stringsAsFactors = F)
colnames(file_id_df) <- c("Path", "molecular_id")
head(file_id_df, n = 3)
# create data frame of all data frames
all_df <- lapply(seq(1:nrow(file_id_df)),
function(i){
X <- read.delim(file = file_id_df$Path[i],
header = FALSE);
colnames(X) <- c("Gene", file_id_df$molecular_id[i]);
return(X)
} )
# create single data frame of counts
genecounts_df <- plyr::join_all(all_df, by = NULL,
type = "full", match = "all")
# inspect output
head(genecounts_df, n = 1)
# save to file
write.csv(genecounts_df, file.path(proj, "RNASeqProject", "Data", "RNAseqGeneCounts.csv"))
| /Tutorials/IntroToCompBio/RNAseqDataPrep.R | no_license | melikamohammed/bioDS-bootcamp | R | false | false | 3,080 | r | #### Preparation of RNAseq data ####
# This script includes some manipulations of the data that need to occur to have everything ready for analysis.
# i made a comment
# install packages
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("DESeq2")
install.packages("rprojroot")
# load packages
library(DESeq2)
library(rprojroot)
# set top level directory
proj <- find_root_file(criterion = has_file(".git/index"))
#### Prepare metadata ####
# copy original data files to project directory
file.copy(file.path(proj, "DataSets", "RNASeqData", "phenotypeTable.csv"), file.path(proj, "RNASeqProject", "Data", "phenotypeTable.csv"))
file.copy(file.path(proj, "DataSets", "RNASeqData", "molecularDataSets.csv"), file.path(proj, "RNASeqProject", "data", "molecularDataSets.csv"))
# import phenotype data
pheno_df <- read.csv(file.path(proj, "DataSets", "RNASeqData", "phenotypeTable.csv"),
header =TRUE)
# import information about molecular data
mol_df <- read.csv(file.path(proj, "DataSets", "RNASeqData", "molecularDataSets.csv"),
header =TRUE, sep=",")
# combine phenotype and molecular data into metadata
metadata_df <- dplyr::full_join(pheno_df, mol_df,
by = "assay_material_id")
# inspect output
head(metadata_df, n = 3)
# write metadata to file
write.csv(metadata_df, file = file.path(proj, "RNASeqProject", "Data", "metadata.csv"), row.names = FALSE)
#### Combine data files ####
# import metadata (if neceassary)
metadata_df <- read.csv(file.path(proj, "RNASeqProject", "Data", "metadata.csv"))
str(metadata_df)
# make list of directories containing data
RNADirectoryList <- list.dirs(path = file.path(proj, "DataSets", "RNASeqData"),
recursive = FALSE)
# find all files for htseq
filelist <- sapply(RNADirectoryList,
function(x){list.files(path = x,
full.names = TRUE,
pattern = "htseq.txt") })
# create data frame of file names
file_id_df <- as.data.frame(cbind(filelist,
gsub("^.*-","", RNADirectoryList)),
stringsAsFactors = F)
colnames(file_id_df) <- c("Path", "molecular_id")
head(file_id_df, n = 3)
# create data frame of all data frames
all_df <- lapply(seq(1:nrow(file_id_df)),
function(i){
X <- read.delim(file = file_id_df$Path[i],
header = FALSE);
colnames(X) <- c("Gene", file_id_df$molecular_id[i]);
return(X)
} )
# create single data frame of counts
genecounts_df <- plyr::join_all(all_df, by = NULL,
type = "full", match = "all")
# inspect output
head(genecounts_df, n = 1)
# save to file
write.csv(genecounts_df, file.path(proj, "RNASeqProject", "Data", "RNAseqGeneCounts.csv"))
|
#' General Data Import from NWIS
#'
#' Returns data from the NWIS web service.
#' Arguments to the function should be based on \url{http://waterservices.usgs.gov} service calls.
#'
#' @param service string. Possible values are "iv" (for instantaneous), "dv" (for daily values), "gwlevels"
#' (for groundwater levels)
#' @param \dots see \url{http://waterservices.usgs.gov/rest/Site-Service.html#Service} for a complete list of options
#' @keywords data import NWIS web service
#' @return A data frame with the following columns:
#' \tabular{lll}{
#' Name \tab Type \tab Description \cr
#' agency \tab character \tab The NWIS code for the agency reporting the data\cr
#' site \tab character \tab The USGS site number \cr
#' datetime \tab POSIXct \tab The date and time of the value converted to UTC (for unit value data), \cr
#' \tab character \tab or raw character string \cr
#' tz_cd \tab character \tab The time zone code for datetime \cr
#' code \tab character \tab Any codes that qualify the corresponding value\cr
#' value \tab numeric \tab The numeric value for the parameter \cr
#' }
#' Note that code and value are repeated for the parameters requested. The names are of the form
#' X_D_P_S, where X is literal,
#' D is an option description of the parameter,
#' P is the parameter code,
#' and S is the statistic code (if applicable).
#'
#' There are also several useful attributes attached to the data frame:
#' \tabular{lll}{
#' Name \tab Type \tab Description \cr
#' url \tab character \tab The url used to generate the data \cr
#' siteInfo \tab data.frame \tab A data frame containing information on the requested sites \cr
#' variableInfo \tab data.frame \tab A data frame containing information on the requested parameters \cr
#' statisticInfo \tab data.frame \tab A data frame containing information on the requested statistics on the data \cr
#' queryTime \tab POSIXct \tab The time the data was returned \cr
#' }
#'
#' @seealso \code{\link{renameNWISColumns}}, \code{\link{importWaterML1}}, \code{\link{importRDB1}}
#' @export
#' @examples
#' \dontrun{
#' # Examples not run for time considerations
#' dataTemp <- readNWISdata(stateCd="OH",parameterCd="00010")
#' dataTempUnit <- readNWISdata(sites="03086500", service="iv", parameterCd="00010")
#' #Empty:
#' multiSite <- readNWISdata(sites=c("04025000","04072150"), service="iv", parameterCd="00010")
#' #Not empty:
#' multiSite <- readNWISdata(sites=c("04025500","040263491"), service="iv", parameterCd="00060")
#' }
readNWISdata <- function(service="dv", ...){
matchReturn <- list(...)
match.arg(service, c("dv","iv","gwlevels"))
if(length(service) > 1){
stop("Only one service call allowed.")
}
values <- sapply(matchReturn, function(x) URLencode(as.character(paste(eval(x),collapse=",",sep=""))))
names(values)[names(values) == "startDate"] <- "startDT"
names(values)[names(values) == "endDate"] <- "endDT"
urlCall <- paste(paste(names(values),values,sep="="),collapse="&")
if(service %in% c("dv","iv","gwlevels")){
format <- "waterml"
} else {
format <- "rdb1,1"
}
baseURL <- paste0("http://waterservices.usgs.gov/nwis/",service,"/?format=",format,"&")
urlCall <- paste0(baseURL,urlCall)
if(service=="qwdata"){
urlCall <- paste0(urlCall,"&siteOutput=expanded")
retval <- importRDB1(urlCall)
} else {
retval <- importWaterML1(urlCall, asDateTime = ("iv" == service))
}
return(retval)
}
| /R/readNWISdata.r | permissive | lawinslow/dataRetrieval | R | false | false | 3,459 | r | #' General Data Import from NWIS
#'
#' Returns data from the NWIS web service.
#' Arguments to the function should be based on \url{http://waterservices.usgs.gov} service calls.
#'
#' @param service string. Possible values are "iv" (for instantaneous), "dv" (for daily values), "gwlevels"
#' (for groundwater levels)
#' @param \dots see \url{http://waterservices.usgs.gov/rest/Site-Service.html#Service} for a complete list of options
#' @keywords data import NWIS web service
#' @return A data frame with the following columns:
#' \tabular{lll}{
#' Name \tab Type \tab Description \cr
#' agency \tab character \tab The NWIS code for the agency reporting the data\cr
#' site \tab character \tab The USGS site number \cr
#' datetime \tab POSIXct \tab The date and time of the value converted to UTC (for unit value data), \cr
#' \tab character \tab or raw character string \cr
#' tz_cd \tab character \tab The time zone code for datetime \cr
#' code \tab character \tab Any codes that qualify the corresponding value\cr
#' value \tab numeric \tab The numeric value for the parameter \cr
#' }
#' Note that code and value are repeated for the parameters requested. The names are of the form
#' X_D_P_S, where X is literal,
#' D is an option description of the parameter,
#' P is the parameter code,
#' and S is the statistic code (if applicable).
#'
#' There are also several useful attributes attached to the data frame:
#' \tabular{lll}{
#' Name \tab Type \tab Description \cr
#' url \tab character \tab The url used to generate the data \cr
#' siteInfo \tab data.frame \tab A data frame containing information on the requested sites \cr
#' variableInfo \tab data.frame \tab A data frame containing information on the requested parameters \cr
#' statisticInfo \tab data.frame \tab A data frame containing information on the requested statistics on the data \cr
#' queryTime \tab POSIXct \tab The time the data was returned \cr
#' }
#'
#' @seealso \code{\link{renameNWISColumns}}, \code{\link{importWaterML1}}, \code{\link{importRDB1}}
#' @export
#' @examples
#' \dontrun{
#' # Examples not run for time considerations
#' dataTemp <- readNWISdata(stateCd="OH",parameterCd="00010")
#' dataTempUnit <- readNWISdata(sites="03086500", service="iv", parameterCd="00010")
#' #Empty:
#' multiSite <- readNWISdata(sites=c("04025000","04072150"), service="iv", parameterCd="00010")
#' #Not empty:
#' multiSite <- readNWISdata(sites=c("04025500","040263491"), service="iv", parameterCd="00060")
#' }
readNWISdata <- function(service="dv", ...){
matchReturn <- list(...)
match.arg(service, c("dv","iv","gwlevels"))
if(length(service) > 1){
stop("Only one service call allowed.")
}
values <- sapply(matchReturn, function(x) URLencode(as.character(paste(eval(x),collapse=",",sep=""))))
names(values)[names(values) == "startDate"] <- "startDT"
names(values)[names(values) == "endDate"] <- "endDT"
urlCall <- paste(paste(names(values),values,sep="="),collapse="&")
if(service %in% c("dv","iv","gwlevels")){
format <- "waterml"
} else {
format <- "rdb1,1"
}
baseURL <- paste0("http://waterservices.usgs.gov/nwis/",service,"/?format=",format,"&")
urlCall <- paste0(baseURL,urlCall)
if(service=="qwdata"){
urlCall <- paste0(urlCall,"&siteOutput=expanded")
retval <- importRDB1(urlCall)
} else {
retval <- importWaterML1(urlCall, asDateTime = ("iv" == service))
}
return(retval)
}
|
library(dplyr)
library(tibble)
library(ggplot2)
library(ggfortify)
library(OpenImageR)
# Molecuar data and labels
molec_desc <- read.delim('molecular_descriptors.txt', stringsAsFactors = FALSE)
labels <- read.delim("training_set.txt", stringsAsFactors = FALSE)
head(as.tibble(molec_desc))
head(as.tibble(labels))
# Remove rows with NA values
molec_desc <- na.omit(molec_desc)
# Remove columns with no variance
molec_desc <- molec_desc[ ,(apply(molec_desc,2,var)!=0)]
# Subset
olfaction_data <- subset(labels, Intensity == "high ", select = c("Compound.Identifier", "Odor", "VALENCE.PLEASANTNESS", "INTENSITY.STRENGTH"))
head(as.tibble(olfaction_data))
# Averaging the Intensity and Valence values
average_olf <- data.frame(aggregate(cbind(VALENCE.PLEASANTNESS, INTENSITY.STRENGTH) ~ Compound.Identifier, data=olfaction_data, FUN = "mean"))
average_olf$VALENCE.PLEASANTNESS <- as.integer(average_olf$VALENCE.PLEASANTNESS)
average_olf$INTENSITY.STRENGTH <- as.integer(average_olf$INTENSITY.STRENGTH)
# Get odors col w/o the duplicates
unique_odor <- unique(olfaction_data[, 1:2])
str(unique_odor)
# Make factors
average_olf$VALENCE.PLEASANTNESS <- factor(average_olf$VALENCE.PLEASANTNESS)
levels(average_olf$VALENCE.PLEASANTNESS) <- list("Unpleasant" = c(0:33), "Neutral" = c(34:66), "Pleasant" = c(67:100))
average_olf$INTENSITY.STRENGTH <- factor(average_olf$INTENSITY.STRENGTH)
levels(average_olf$INTENSITY.STRENGTH) <- list("Low" = c(0:33), "Medium" = c(34:66), "High" = c(67:100))
# Merge
average_olf <- merge(x= average_olf, y=unique_odor, by = 'Compound.Identifier', all.x=TRUE)
str(average_olf)
# Rename Compound Identifier column
colnames(average_olf)[1] <- "CID"
# Merge data and labels
total_olfac <- merge(x = average_olf, y = molec_desc, by= "CID", all.x = TRUE)
# Examine data
head(as.tibble(total_olfac))
dim(total_olfac)
# Write out the file
write.csv(total_olfac, file="Average_Merged.csv")
| /averaged_merge.R | no_license | EdenF94/DREAM_olfaction_proj | R | false | false | 1,920 | r | library(dplyr)
library(tibble)
library(ggplot2)
library(ggfortify)
library(OpenImageR)
# Molecuar data and labels
molec_desc <- read.delim('molecular_descriptors.txt', stringsAsFactors = FALSE)
labels <- read.delim("training_set.txt", stringsAsFactors = FALSE)
head(as.tibble(molec_desc))
head(as.tibble(labels))
# Remove rows with NA values
molec_desc <- na.omit(molec_desc)
# Remove columns with no variance
molec_desc <- molec_desc[ ,(apply(molec_desc,2,var)!=0)]
# Subset
olfaction_data <- subset(labels, Intensity == "high ", select = c("Compound.Identifier", "Odor", "VALENCE.PLEASANTNESS", "INTENSITY.STRENGTH"))
head(as.tibble(olfaction_data))
# Averaging the Intensity and Valence values
average_olf <- data.frame(aggregate(cbind(VALENCE.PLEASANTNESS, INTENSITY.STRENGTH) ~ Compound.Identifier, data=olfaction_data, FUN = "mean"))
average_olf$VALENCE.PLEASANTNESS <- as.integer(average_olf$VALENCE.PLEASANTNESS)
average_olf$INTENSITY.STRENGTH <- as.integer(average_olf$INTENSITY.STRENGTH)
# Get odors col w/o the duplicates
unique_odor <- unique(olfaction_data[, 1:2])
str(unique_odor)
# Make factors
average_olf$VALENCE.PLEASANTNESS <- factor(average_olf$VALENCE.PLEASANTNESS)
levels(average_olf$VALENCE.PLEASANTNESS) <- list("Unpleasant" = c(0:33), "Neutral" = c(34:66), "Pleasant" = c(67:100))
average_olf$INTENSITY.STRENGTH <- factor(average_olf$INTENSITY.STRENGTH)
levels(average_olf$INTENSITY.STRENGTH) <- list("Low" = c(0:33), "Medium" = c(34:66), "High" = c(67:100))
# Merge
average_olf <- merge(x= average_olf, y=unique_odor, by = 'Compound.Identifier', all.x=TRUE)
str(average_olf)
# Rename Compound Identifier column
colnames(average_olf)[1] <- "CID"
# Merge data and labels
total_olfac <- merge(x = average_olf, y = molec_desc, by= "CID", all.x = TRUE)
# Examine data
head(as.tibble(total_olfac))
dim(total_olfac)
# Write out the file
write.csv(total_olfac, file="Average_Merged.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model-options.R
\name{get_calibration_option_labels}
\alias{get_calibration_option_labels}
\title{Map calibration option ID to JSON calibration option labels}
\usage{
get_calibration_option_labels(options)
}
\arguments{
\item{options}{Key-value (calibration option name - calibration option ID)
list of model options to be mapped.}
}
\value{
Mapped key-value (calibration option name - calibration option label)
list of model options
}
\description{
Map calibration option ID to JSON calibration option labels
}
| /naomi/man/get_calibration_option_labels.Rd | permissive | jeffeaton/naomi-model-paper | R | false | true | 590 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model-options.R
\name{get_calibration_option_labels}
\alias{get_calibration_option_labels}
\title{Map calibration option ID to JSON calibration option labels}
\usage{
get_calibration_option_labels(options)
}
\arguments{
\item{options}{Key-value (calibration option name - calibration option ID)
list of model options to be mapped.}
}
\value{
Mapped key-value (calibration option name - calibration option label)
list of model options
}
\description{
Map calibration option ID to JSON calibration option labels
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/default_arguments.R
\name{default_cluster_features}
\alias{default_cluster_features}
\title{Default cluster features}
\usage{
default_cluster_features()
}
\value{
default value for cluster_features
}
\description{
Default cluster features
}
| /autonomics/man/default_cluster_features.Rd | no_license | bhagwataditya/autonomics0 | R | false | true | 319 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/default_arguments.R
\name{default_cluster_features}
\alias{default_cluster_features}
\title{Default cluster features}
\usage{
default_cluster_features()
}
\value{
default value for cluster_features
}
\description{
Default cluster features
}
|
####======================================================================================================
## clean up processed series html --------------------------------
#╔═╗╔═╦╗╔═╦═╦╦╦╦╗╔═╗╔╗═╦╗╔═╦╗╗╔╦╗╔═╗╔═╦╗╔═╦═╦╦╦╦╗╔═╗╔╗═╦╗╔═╦╗╗╔╦╗╔═╗╔═╦╗╔═╦═╦╦╦╦╗╔═╗╔╗═╦╗╔═╦╗╔╦╦╗
options(stringsAsFactors=F);library(colorout);rm(list=ls());ls()#╚═╣║ ╚╣║¯\_(•_•)_/¯║╚╣╔╣╔╣║║║║╚╣
options(menu.graphics=FALSE);library(R.helper)#╣═╩╚╣║╔╔╣╦═║║╔╚║╔╚╔╣╩╚╚╦╣║╩╔╦║║ ╚╩╣╚╚╣║╣╚╩╔╦╩╚╦╚╩╣
#╚═╝╩═╩╝╚═╩══╩═╩═╩═╩╝╩═╩╝╚═╩═╩═╩╝╚═╝╩═╩╝╚═╩══╩═╩═╩═╩╝╩═╩╝╚═╩═╩═╩╝╚═╝╩═╩╝╚═╩══╩═╩═╩═╩╝╩═╩╝╚═╩═╩╩═╝
Load('/Data/geod/dtb/ref/geo.series.info.refined.proj.171020.Rdata')
#query=c(' glioma. ',' astrocytoma. ','diffuse.astrocytoma','anaplastic.astrocytoma','low.grade.glioma') ## adding space avoid partial matches to things like 'oligodendro"glioma"'
#
#Drop-Seq
query=tolower(c('Amyotrophic Lateral Sclerosis','Lou Gehrig',' ALS ','Amyotrophic','Sclerosis'))
#qubq1=tolower(c('brain ','neuron','nervous','neural','motor.neuro','neurodegenerat','cerebellum','frontal cortex','spinal cord','ventral horns','Central nervous system'))
qubq1=tolower(c('spinal cord'))
qubq1=tolower(c('spinal cord'))
# qubq2=c('fetal','esc','ips')
qspec=c('homo sapiens')#,'mus musculus'
#qmthd=c('expression profiling by high throughput sequencing')
qmthd=c('expression profiling by high throughput sequencing','expression profiling by array')
#
stuffs=serin[
(
(grepl(paste(query,collapse='|'),serin$title) | grepl(paste(query,collapse='|'),serin$main) | grepl(paste(query,collapse='|'),serin$design))
&(grepl(paste(qubq1,collapse='|'),serin$title) | grepl(paste(qubq1,collapse='|'),serin$main) | grepl(paste(qubq1,collapse='|'),serin$design))
# &!(grepl(paste(qubq2,collapse='|'),serin$title) | grepl(paste(qubq2,collapse='|'),serin$main) | grepl(paste(qubq2,collapse='|'),serin$design))
),]
stuffs=stuffs[grepl(paste(qspec,collapse='|'),stuffs$species),]
str(stuffs)
write.delim(stuffs,file='/Data/geod/out/table/als.spinal_cord.matches.txt')
#Head(stuffs[grepl(tolower(paste(sset,collapse='|')),stuffs$id),])
overlap(sset,serin$id)
oda=overlap(sset,stuffs$id)
dummy=serin[serin$id%in%oda$ina,]
dummy[,c('id','title','main','design')]
grepl(paste(qubq1,collapse='|'),dummy$title) | grepl(paste(qubq1,collapse='|'),dummy$main) | grepl(paste(qubq1,collapse='|'),dummy$design)
grepl(paste(query,collapse='|'),dummy$title) | grepl(paste(query,collapse='|'),dummy$main) | grepl(paste(query,collapse='|'),dummy$design)
#write.delim(dummy,file='/Data/geod/dtb/working/serin.match_refine.txt')
#write.delim(stuffs,file='/Data/geod/dtb/working/serin.matches.txt')
#write.delim(stuffs,file='/Data/geod/dtb/working/serin.matches.adult.txt')
#system('cp /Data/geod/dtb/raw/geo_GSE55114_curl.txt /Data/ks/tester/')
query='fenfluramine'
stuffs=serin[
(grepl(paste(query,collapse='|'),serin$title) | grepl(paste(query,collapse='|'),serin$main) | grepl(paste(query,collapse='|'),serin$design))
# |(grepl(paste(query,collapse='|'),serin$title) | grepl(paste(qubq1,collapse='|'),serin$main) | grepl(paste(qubq1,collapse='|'),serin$design))
,]
str(stuffs)
#### pointless but reassuring confirmation of what should be obviously the case by construction.. (which ofc assumes the author is competent..)
# che1=matst(serin[grepl(paste(query,collapse='|'),serin$title),'id'])
# che2=matst(serin[grepl(paste(qubq1,collapse='|'),serin$title),'id'])
# che3=matst(serin[(grepl(paste(query,collapse='|'),serin$title) &grepl(paste(qubq1,collapse='|'),serin$title)),'id'])
# che4=overlap(che1$entry,che2$entry)
# overlap(che3$entry,che4$inter)
#stuffs=serin[grepl(paste(query,collapse='|'),serin$title)|grepl(paste(query,collapse='|'),serin$main),]
# str(stuffs)
head(matst(stuffs$type))
stuffs=stuffs[stuffs$type%in%qmthd,]
str(stuffs)
matst(stuffs$species)
stuffs=stuffs[stuffs$species%in%qspec,]
str(stuffs)
#write.file(stuffs,file='/Data/geod/dtb/working/geo_datasets_series.query_single_cell_nucleus.txt',row.names=F,col.names=T)
write.file(stuffs,file='/Data/geod/dtb/working/geo_datasets_series.query_single_nucleus.txt',row.names=F,col.names=T)
| /bin/geo_series_query.R | no_license | ks4471/geod | R | false | false | 4,684 | r |
####======================================================================================================
## clean up processed series html --------------------------------
#╔═╗╔═╦╗╔═╦═╦╦╦╦╗╔═╗╔╗═╦╗╔═╦╗╗╔╦╗╔═╗╔═╦╗╔═╦═╦╦╦╦╗╔═╗╔╗═╦╗╔═╦╗╗╔╦╗╔═╗╔═╦╗╔═╦═╦╦╦╦╗╔═╗╔╗═╦╗╔═╦╗╔╦╦╗
options(stringsAsFactors=F);library(colorout);rm(list=ls());ls()#╚═╣║ ╚╣║¯\_(•_•)_/¯║╚╣╔╣╔╣║║║║╚╣
options(menu.graphics=FALSE);library(R.helper)#╣═╩╚╣║╔╔╣╦═║║╔╚║╔╚╔╣╩╚╚╦╣║╩╔╦║║ ╚╩╣╚╚╣║╣╚╩╔╦╩╚╦╚╩╣
#╚═╝╩═╩╝╚═╩══╩═╩═╩═╩╝╩═╩╝╚═╩═╩═╩╝╚═╝╩═╩╝╚═╩══╩═╩═╩═╩╝╩═╩╝╚═╩═╩═╩╝╚═╝╩═╩╝╚═╩══╩═╩═╩═╩╝╩═╩╝╚═╩═╩╩═╝
Load('/Data/geod/dtb/ref/geo.series.info.refined.proj.171020.Rdata')
#query=c(' glioma. ',' astrocytoma. ','diffuse.astrocytoma','anaplastic.astrocytoma','low.grade.glioma') ## adding space avoid partial matches to things like 'oligodendro"glioma"'
#
#Drop-Seq
query=tolower(c('Amyotrophic Lateral Sclerosis','Lou Gehrig',' ALS ','Amyotrophic','Sclerosis'))
#qubq1=tolower(c('brain ','neuron','nervous','neural','motor.neuro','neurodegenerat','cerebellum','frontal cortex','spinal cord','ventral horns','Central nervous system'))
qubq1=tolower(c('spinal cord'))
qubq1=tolower(c('spinal cord'))
# qubq2=c('fetal','esc','ips')
qspec=c('homo sapiens')#,'mus musculus'
#qmthd=c('expression profiling by high throughput sequencing')
qmthd=c('expression profiling by high throughput sequencing','expression profiling by array')
#
stuffs=serin[
(
(grepl(paste(query,collapse='|'),serin$title) | grepl(paste(query,collapse='|'),serin$main) | grepl(paste(query,collapse='|'),serin$design))
&(grepl(paste(qubq1,collapse='|'),serin$title) | grepl(paste(qubq1,collapse='|'),serin$main) | grepl(paste(qubq1,collapse='|'),serin$design))
# &!(grepl(paste(qubq2,collapse='|'),serin$title) | grepl(paste(qubq2,collapse='|'),serin$main) | grepl(paste(qubq2,collapse='|'),serin$design))
),]
stuffs=stuffs[grepl(paste(qspec,collapse='|'),stuffs$species),]
str(stuffs)
write.delim(stuffs,file='/Data/geod/out/table/als.spinal_cord.matches.txt')
#Head(stuffs[grepl(tolower(paste(sset,collapse='|')),stuffs$id),])
overlap(sset,serin$id)
oda=overlap(sset,stuffs$id)
dummy=serin[serin$id%in%oda$ina,]
dummy[,c('id','title','main','design')]
grepl(paste(qubq1,collapse='|'),dummy$title) | grepl(paste(qubq1,collapse='|'),dummy$main) | grepl(paste(qubq1,collapse='|'),dummy$design)
grepl(paste(query,collapse='|'),dummy$title) | grepl(paste(query,collapse='|'),dummy$main) | grepl(paste(query,collapse='|'),dummy$design)
#write.delim(dummy,file='/Data/geod/dtb/working/serin.match_refine.txt')
#write.delim(stuffs,file='/Data/geod/dtb/working/serin.matches.txt')
#write.delim(stuffs,file='/Data/geod/dtb/working/serin.matches.adult.txt')
#system('cp /Data/geod/dtb/raw/geo_GSE55114_curl.txt /Data/ks/tester/')
query='fenfluramine'
stuffs=serin[
(grepl(paste(query,collapse='|'),serin$title) | grepl(paste(query,collapse='|'),serin$main) | grepl(paste(query,collapse='|'),serin$design))
# |(grepl(paste(query,collapse='|'),serin$title) | grepl(paste(qubq1,collapse='|'),serin$main) | grepl(paste(qubq1,collapse='|'),serin$design))
,]
str(stuffs)
#### pointless but reassuring confirmation of what should be obviously the case by construction.. (which ofc assumes the author is competent..)
# che1=matst(serin[grepl(paste(query,collapse='|'),serin$title),'id'])
# che2=matst(serin[grepl(paste(qubq1,collapse='|'),serin$title),'id'])
# che3=matst(serin[(grepl(paste(query,collapse='|'),serin$title) &grepl(paste(qubq1,collapse='|'),serin$title)),'id'])
# che4=overlap(che1$entry,che2$entry)
# overlap(che3$entry,che4$inter)
#stuffs=serin[grepl(paste(query,collapse='|'),serin$title)|grepl(paste(query,collapse='|'),serin$main),]
# str(stuffs)
head(matst(stuffs$type))
stuffs=stuffs[stuffs$type%in%qmthd,]
str(stuffs)
matst(stuffs$species)
stuffs=stuffs[stuffs$species%in%qspec,]
str(stuffs)
#write.file(stuffs,file='/Data/geod/dtb/working/geo_datasets_series.query_single_cell_nucleus.txt',row.names=F,col.names=T)
write.file(stuffs,file='/Data/geod/dtb/working/geo_datasets_series.query_single_nucleus.txt',row.names=F,col.names=T)
|
#Today we're going to be proceding with the samples that we processed from the QIIME tutorial. We're going to bring them into R, to use a package called "phyloseq" to analyze and visualize them.
# https://joey711.github.io/phyloseq/
# This is an R script. You can type directly into the R console, but it is best to execute commands from scripts. You can execute a line by typing [CMD+enter]. You can execute groups of lines by selecting them and doing the same.
# Comments are lines that start with a # sign. Even if they are executed, they are not run as code.
# Use comments throughout your scripts to remind yourself and future users of what you are doing at each step.
# The first thing you want to do when you open R is make sure you are in the right working directory. R will use whatever directory you set as its home base, from which it will look for any files you may have. Make sure this script and the final .biom file from our last week are in the same folder. Set the working directory under Misc in R, or under Session in RStudio, to be the folder where you have this script and the .biom folder.
# You can download the .biom file here: https://github.com/TheaWhitman/Soil_Micro_523/raw/master/feature-table-metaD-tax.biom
library("phyloseq")
library("ggplot2")
# This is the first line of code we'll run. This is used to "attach" the packages we will use in R.
# Phyloseq is a package of functions that helps us analyze phylogenetic data.
?phyloseq
# In R, this is the command you run to get help or information on a function
ps = import_biom("feature-table-metaD-tax.biom", parseFunction=parse_taxonomy_greengenes)
# We are importing our .biom file and telling it the format the taxonomy is written in so it processes it correctly
# If you saved the file somewhere else, you'll need to direct it to the correct filepath
# (Don't worry about the warning message.)
ps
# This tells us a bit about our dataset
# We might want to check out the data a bit to see what it looks like.
# A key command for this is head()
# head() allows us to see only the top of something. That's great if it's, for example, a 380-row OTU table.
head(otu_table(ps))
# In the space below, try to look at the head of the sample data and our taxonomy table
# Maybe we're wondering how many sequences are in each sample. We can calculate and plot this:
d = colSums(otu_table(ps))
# defining variable d as the column sums of our otu table.
d = data.frame(names(d),d)
# Creating a dataframe of our sample names
colnames(d)=c("Sample","Total")
# Naming the columns
head(d)
# displaying the top few values of d.
p = qplot(d$Total, geom="histogram", binwidth=60)
p
# This is our first use of ggplot, a figure-making package.
# You can make many different types of flexible, customized plots with ggplot.
# Plotting the Total values we calculated above, to see the total reads from each sample
#Some of our samples had very few sequences - some even had 0! (This is because we only used 1% of the total data, to #make analysis quicker.) For the purposes of this tutorial, we're going to want to get rid of the least abundant ones. #There's not a clear cutoff in the distribution we see above, so let's just take only samples with >200 sequences.
# phyloseq has a function to do this, called prune_samples
ps.cutoff = prune_samples(sample_sums(ps)>=200, ps)
ps.cutoff
# You can see we now have fewer samples (42)
# In the space below, see what happens if you change the cutoff from 200 to something else.
# Be sure the last command you run sets the cutoff to something appropriate.
# Let's look at the taxonomic identity of the OTUs in our samples.
# We can use the plot_bar function from phyloseq, which actually draws on ggplot2.
plot_bar(ps.cutoff, fill="Phylum")
# Can you run the same bar graph command, but use a different taxonomic classification than Phylum?
# What do you get?
# Okay, those plots are interesting, but they're the absolute abundances of each OTU
# We know that sequencing idiosyncracies, not real ecology, are likely driving differences.
# We might rather look at the relative abundance (fraction of total community)
# Phyloseq has a function to transform sample counts:
ps.norm = transform_sample_counts(ps.cutoff, function(x) x / sum(x) )
# And then we can make the same plot
plot_bar(ps.norm, fill="Phylum")
# We might also be interested in what information we have about the samples.
# You saw some of the categories when you ran head(sample_data(ps)) earlier.
# We can use the following command to look at just the column names of our sample data:
colnames(sample_data(ps.cutoff))
# Now we can group the bar charts we made above by different categories
# To do this, we add the facet_grid command to our plot_bar command:
plot_bar(ps.norm, fill="Phylum") + facet_grid(~Vegetation, scale="free")
# Runs the plot_bar command, but grouped by Vegetation
# You can group by different variables.
# Try grouping by phylum by changing the facet_grid command
# You might need to change the plot size to visualize it
# Explore the data however you like!
# Use the ?plot_bar command to find out more options.
# Can you figure out how to give your graph a title?
# Look at the phyloseq stacked bar tutorial - can you figure out how to get rid of the black lines on the figure? (hint: look at the end of the tutorial)
# https://joey711.github.io/phyloseq/plot_bar-examples.html
| /Community_Composition_Tutorial.R | permissive | TheaWhitman/Soil_Micro_523 | R | false | false | 5,442 | r | #Today we're going to be proceding with the samples that we processed from the QIIME tutorial. We're going to bring them into R, to use a package called "phyloseq" to analyze and visualize them.
# https://joey711.github.io/phyloseq/
# This is an R script. You can type directly into the R console, but it is best to execute commands from scripts. You can execute a line by typing [CMD+enter]. You can execute groups of lines by selecting them and doing the same.
# Comments are lines that start with a # sign. Even if they are executed, they are not run as code.
# Use comments throughout your scripts to remind yourself and future users of what you are doing at each step.
# The first thing you want to do when you open R is make sure you are in the right working directory. R will use whatever directory you set as its home base, from which it will look for any files you may have. Make sure this script and the final .biom file from our last week are in the same folder. Set the working directory under Misc in R, or under Session in RStudio, to be the folder where you have this script and the .biom folder.
# You can download the .biom file here: https://github.com/TheaWhitman/Soil_Micro_523/raw/master/feature-table-metaD-tax.biom
library("phyloseq")
library("ggplot2")
# This is the first line of code we'll run. This is used to "attach" the packages we will use in R.
# Phyloseq is a package of functions that helps us analyze phylogenetic data.
?phyloseq
# In R, this is the command you run to get help or information on a function
ps = import_biom("feature-table-metaD-tax.biom", parseFunction=parse_taxonomy_greengenes)
# We are importing our .biom file and telling it the format the taxonomy is written in so it processes it correctly
# If you saved the file somewhere else, you'll need to direct it to the correct filepath
# (Don't worry about the warning message.)
ps
# This tells us a bit about our dataset
# We might want to check out the data a bit to see what it looks like.
# A key command for this is head()
# head() allows us to see only the top of something. That's great if it's, for example, a 380-row OTU table.
head(otu_table(ps))
# In the space below, try to look at the head of the sample data and our taxonomy table
# Maybe we're wondering how many sequences are in each sample. We can calculate and plot this:
d = colSums(otu_table(ps))
# defining variable d as the column sums of our otu table.
d = data.frame(names(d),d)
# Creating a dataframe of our sample names
colnames(d)=c("Sample","Total")
# Naming the columns
head(d)
# displaying the top few values of d.
p = qplot(d$Total, geom="histogram", binwidth=60)
p
# This is our first use of ggplot, a figure-making package.
# You can make many different types of flexible, customized plots with ggplot.
# Plotting the Total values we calculated above, to see the total reads from each sample
#Some of our samples had very few sequences - some even had 0! (This is because we only used 1% of the total data, to #make analysis quicker.) For the purposes of this tutorial, we're going to want to get rid of the least abundant ones. #There's not a clear cutoff in the distribution we see above, so let's just take only samples with >200 sequences.
# phyloseq has a function to do this, called prune_samples
ps.cutoff = prune_samples(sample_sums(ps)>=200, ps)
ps.cutoff
# You can see we now have fewer samples (42)
# In the space below, see what happens if you change the cutoff from 200 to something else.
# Be sure the last command you run sets the cutoff to something appropriate.
# Let's look at the taxonomic identity of the OTUs in our samples.
# We can use the plot_bar function from phyloseq, which actually draws on ggplot2.
plot_bar(ps.cutoff, fill="Phylum")
# Can you run the same bar graph command, but use a different taxonomic classification than Phylum?
# What do you get?
# Okay, those plots are interesting, but they're the absolute abundances of each OTU
# We know that sequencing idiosyncracies, not real ecology, are likely driving differences.
# We might rather look at the relative abundance (fraction of total community)
# Phyloseq has a function to transform sample counts:
ps.norm = transform_sample_counts(ps.cutoff, function(x) x / sum(x) )
# And then we can make the same plot
plot_bar(ps.norm, fill="Phylum")
# We might also be interested in what information we have about the samples.
# You saw some of the categories when you ran head(sample_data(ps)) earlier.
# We can use the following command to look at just the column names of our sample data:
colnames(sample_data(ps.cutoff))
# Now we can group the bar charts we made above by different categories
# To do this, we add the facet_grid command to our plot_bar command:
plot_bar(ps.norm, fill="Phylum") + facet_grid(~Vegetation, scale="free")
# Runs the plot_bar command, but grouped by Vegetation
# You can group by different variables.
# Try grouping by phylum by changing the facet_grid command
# You might need to change the plot size to visualize it
# Explore the data however you like!
# Use the ?plot_bar command to find out more options.
# Can you figure out how to give your graph a title?
# Look at the phyloseq stacked bar tutorial - can you figure out how to get rid of the black lines on the figure? (hint: look at the end of the tutorial)
# https://joey711.github.io/phyloseq/plot_bar-examples.html
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{barplot_ursus}
\alias{barplot_ursus}
\title{Get barplot of the number of ecoregions per species}
\usage{
barplot_ursus(data_ursus_eco_reg)
}
\arguments{
\item{data_ursus_eco_reg}{dataset with both bears and eco_region data}
}
\value{
}
\description{
Get barplot of the number of ecoregions per species
}
| /man/barplot_ursus.Rd | permissive | aissamorin/datatoolboxexos | R | false | true | 397 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{barplot_ursus}
\alias{barplot_ursus}
\title{Get barplot of the number of ecoregions per species}
\usage{
barplot_ursus(data_ursus_eco_reg)
}
\arguments{
\item{data_ursus_eco_reg}{dataset with both bears and eco_region data}
}
\value{
}
\description{
Get barplot of the number of ecoregions per species
}
|
#' Table 21_1
#'
#' Macroeconomic data
#' Source: All the data are collected from FRED, the economic website of the Federal Reserve Bank of St. Louis. GDP, DPI, and PCE are in constant dollars, here 2000 dollars. CP and Dividend are in nominal dollars
#'
#' @docType data
#' @usage data('Table21_1')
#' @format
#'
#' \itemize{
#' \item \strong{Year}
#' \item \strong{Quarter}
#' \item \strong{RPD: }real disposable personal income (billions of dollars)
#' \item \strong{PIB: }gross domestic product (billions of dollars)
#' \item \strong{DCP: }real personal consumption expenditure (billions of dollars)
#' \item \strong{LC: }corporate profits (billions of dollars)
#' \item \strong{DIVIDEND: } dividends, (billions of dollars)
#' }
'Table21_1'
| /R/table21_1.R | no_license | brunoruas2/gujarati | R | false | false | 745 | r | #' Table 21_1
#'
#' Macroeconomic data
#' Source: All the data are collected from FRED, the economic website of the Federal Reserve Bank of St. Louis. GDP, DPI, and PCE are in constant dollars, here 2000 dollars. CP and Dividend are in nominal dollars
#'
#' @docType data
#' @usage data('Table21_1')
#' @format
#'
#' \itemize{
#' \item \strong{Year}
#' \item \strong{Quarter}
#' \item \strong{RPD: }real disposable personal income (billions of dollars)
#' \item \strong{PIB: }gross domestic product (billions of dollars)
#' \item \strong{DCP: }real personal consumption expenditure (billions of dollars)
#' \item \strong{LC: }corporate profits (billions of dollars)
#' \item \strong{DIVIDEND: } dividends, (billions of dollars)
#' }
'Table21_1'
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validate_input.R
\name{validate_input_model}
\alias{validate_input_model}
\title{Validate input of sBG model}
\usage{
validate_input_model(model)
}
\arguments{
\item{model}{sBG model.}
}
\description{
\code{validate_input_model} receives a model and validates if its structure is correct (i.e. if it contains the right components).
}
| /man/validate_input_model.Rd | permissive | runtastic/sBGmodel | R | false | true | 412 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validate_input.R
\name{validate_input_model}
\alias{validate_input_model}
\title{Validate input of sBG model}
\usage{
validate_input_model(model)
}
\arguments{
\item{model}{sBG model.}
}
\description{
\code{validate_input_model} receives a model and validates if its structure is correct (i.e. if it contains the right components).
}
|
##Read the text file from working directory and then print the data
mydata<- read.table("household_power_consumption.txt",header=TRUE,sep=";",stringsAsFactors=FALSE, dec=".")
#subsetting data
mydata3 <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#formatting data
Combineddatetime <- strptime(paste(mydata3$Date, mydata3$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#converting from factor to numeric
globalActivePower <- as.numeric(mydata3$Global_active_power)
Sub_metering_1 <- as.numeric(mydata3$Sub_metering_1)
Sub_metering_2 <- as.numeric(mydata3$Sub_metering_2)
Sub_metering_3 <- as.numeric(mydata3$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(Combineddatetime, Sub_metering_1, type="l", ylab="Energy Submetering", xlab="")
lines(Combineddatetime, Sub_metering_2, type="l", col="red")
lines(Combineddatetime, Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
| /Plot3.R | no_license | datascience2017/ExData_Plotting1 | R | false | false | 1,021 | r |
##Read the text file from working directory and then print the data
mydata<- read.table("household_power_consumption.txt",header=TRUE,sep=";",stringsAsFactors=FALSE, dec=".")
#subsetting data
mydata3 <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#formatting data
Combineddatetime <- strptime(paste(mydata3$Date, mydata3$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#converting from factor to numeric
globalActivePower <- as.numeric(mydata3$Global_active_power)
Sub_metering_1 <- as.numeric(mydata3$Sub_metering_1)
Sub_metering_2 <- as.numeric(mydata3$Sub_metering_2)
Sub_metering_3 <- as.numeric(mydata3$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(Combineddatetime, Sub_metering_1, type="l", ylab="Energy Submetering", xlab="")
lines(Combineddatetime, Sub_metering_2, type="l", col="red")
lines(Combineddatetime, Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
## Read all the data from the file
data <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
## convert Date variable to data class by using as.Date function
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 2
plot(data$Global_active_power~data$Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
## Save to file plot2.png
dev.copy(png, file="plot2.png", height=600, width=600)
dev.off() | /plot2.R | no_license | minkhati/ExData_Plotting1 | R | false | false | 755 | r |
## Read all the data from the file
data <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
## convert Date variable to data class by using as.Date function
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 2
plot(data$Global_active_power~data$Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
## Save to file plot2.png
dev.copy(png, file="plot2.png", height=600, width=600)
dev.off() |
# Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly. The
# following two functions are used to cache the inverse of a matrix.
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y=matrix()) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(solve) i <<- solve
getinverse <- function() i
list(set=set, get=get,
getinverse=getinverse,
setinverse=setinverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
##make sure the matrix is a square matrix
cacheSolve <- function(x=matrix(), ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
if (det(data)!=0) {
i <- solve(data)
x$setinverse(i)
i}
else {message("this matrix is not invertible")}
}
| /cachematrix.R | no_license | anupamagalwankar/ProgrammingAssignment2 | R | false | false | 1,388 | r | # Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly. The
# following two functions are used to cache the inverse of a matrix.
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y=matrix()) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(solve) i <<- solve
getinverse <- function() i
list(set=set, get=get,
getinverse=getinverse,
setinverse=setinverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
##make sure the matrix is a square matrix
cacheSolve <- function(x=matrix(), ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
if (det(data)!=0) {
i <- solve(data)
x$setinverse(i)
i}
else {message("this matrix is not invertible")}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bootstrap_lm_null.R
\name{bootstrap_residual_null}
\alias{bootstrap_residual_null}
\title{Compare nested linear models via (optionally wild) residual bootstrap.}
\usage{
bootstrap_residual_null(fit1, fit0, reps, wild, construct)
}
\arguments{
\item{fit1}{\code{lm} object defining the full model.}
\item{fit0}{\code{lm} object defining the reduced model.}
\item{reps}{scalar; number of bootstrap replications to perform.}
\item{wild}{boolean; if \code{TRUE}, a wild bootstrap is performed. If
\code{FALSE} a traditional residual bootstrap is performed.}
\item{construct}{string defining the type of construct to use when generating
from the distribution for the wild bootrap (see \code{\link{rmammen}}). If
\code{wild = FALSE}, this is ignored.}
}
\value{
vector of length \code{reps} containing the test statistic from each
bootstrap replication. It also has an attribute containing an ANOVA table
comparing the two models.
}
\description{
This is the workhorse for the \code{compare_models()} function when a linear
model is specified. Given two linear models, one nested within the other, the
null distribution of the test statistic comparing the two models is estimated
via a residual bootstrap (or wild bootstrap). This is not meant to be called
by the user directly.
}
\examples{
\dontrun{
fit1 <- lm(mpg ~ hp, data = mtcars)
fit0 <- lm(mpg ~ 1, data = mtcars)
boot_residual_null(fit1, fit0, reps = 4999, wild = FALSE)
}
}
| /man/bootstrap_residual_null.Rd | no_license | reyesem/IntroAnalysis | R | false | true | 1,514 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bootstrap_lm_null.R
\name{bootstrap_residual_null}
\alias{bootstrap_residual_null}
\title{Compare nested linear models via (optionally wild) residual bootstrap.}
\usage{
bootstrap_residual_null(fit1, fit0, reps, wild, construct)
}
\arguments{
\item{fit1}{\code{lm} object defining the full model.}
\item{fit0}{\code{lm} object defining the reduced model.}
\item{reps}{scalar; number of bootstrap replications to perform.}
\item{wild}{boolean; if \code{TRUE}, a wild bootstrap is performed. If
\code{FALSE} a traditional residual bootstrap is performed.}
\item{construct}{string defining the type of construct to use when generating
from the distribution for the wild bootrap (see \code{\link{rmammen}}). If
\code{wild = FALSE}, this is ignored.}
}
\value{
vector of length \code{reps} containing the test statistic from each
bootstrap replication. It also has an attribute containing an ANOVA table
comparing the two models.
}
\description{
This is the workhorse for the \code{compare_models()} function when a linear
model is specified. Given two linear models, one nested within the other, the
null distribution of the test statistic comparing the two models is estimated
via a residual bootstrap (or wild bootstrap). This is not meant to be called
by the user directly.
}
\examples{
\dontrun{
fit1 <- lm(mpg ~ hp, data = mtcars)
fit0 <- lm(mpg ~ 1, data = mtcars)
boot_residual_null(fit1, fit0, reps = 4999, wild = FALSE)
}
}
|
inputFile <- "../inst/extdata/toy.gen"
system.time({
con <- file(inputFile, open = "r")
out <- data.table(ID = 1:1000)
while (length(oneLine <- readLines(con, n = 1, warn = FALSE)) > 0) {
myVector <- (strsplit(oneLine, " "))
myVector <- as.vector(as.factor(unlist(myVector)))
foreach(row = 1:nrow(gen)) %:% foreach(i = seq(6,((length(myVector)-2)),by=3), .combine = c) %do% {
myVector <- gen[row,]
j <- i + 1
h <- i + 2
one <- myVector[i]
two <- myVector[j]
three <- myVector[h]
final <- NA
if (one > 0.9) {
final <- 0
} else if (two > 0.9) {
final <- 1
} else if (three > 0.9) {
final <- 2
} else {
final <- NA
}
final
}
out[, myVector[3] := vec, with = FALSE] -> out
message(paste0(ncol(out)))
}
close(con)
})
#---------------------------
| /R/.new.read.R | permissive | Chris1221/coRge | R | false | false | 849 | r | inputFile <- "../inst/extdata/toy.gen"
system.time({
con <- file(inputFile, open = "r")
out <- data.table(ID = 1:1000)
while (length(oneLine <- readLines(con, n = 1, warn = FALSE)) > 0) {
myVector <- (strsplit(oneLine, " "))
myVector <- as.vector(as.factor(unlist(myVector)))
foreach(row = 1:nrow(gen)) %:% foreach(i = seq(6,((length(myVector)-2)),by=3), .combine = c) %do% {
myVector <- gen[row,]
j <- i + 1
h <- i + 2
one <- myVector[i]
two <- myVector[j]
three <- myVector[h]
final <- NA
if (one > 0.9) {
final <- 0
} else if (two > 0.9) {
final <- 1
} else if (three > 0.9) {
final <- 2
} else {
final <- NA
}
final
}
out[, myVector[3] := vec, with = FALSE] -> out
message(paste0(ncol(out)))
}
close(con)
})
#---------------------------
|
/代码.R | no_license | liuxiaoxuan97/geographical-data-analysis | R | false | false | 3,019 | r | ||
library(tidyverse)
library(ggimage)
friends <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-09-08/friends.csv')
main_characters <- c("Rachel Green", "Monica Geller", "Phoebe Buffay", "Joey Tribbiani", "Chandler Bing", "Ross Geller")
words <- friends %>%
filter(speaker %in% main_characters) %>%
mutate(
words_n = str_count(text, "\\w+"),
speaker = word(speaker)
) %>%
group_by(speaker, season) %>%
summarise(words_median = median(words_n))
images <- words %>%
distinct(speaker) %>%
mutate(img = paste0("2020-week37/images/", tolower(speaker), ".png"))
pal <- c("#EC5244", "#FADC4A", "#5CA1D1", "#9A6324", "#FEF590", "#4363d8")
ggplot(words) +
geom_segment(data = images, aes(x = speaker, xend = speaker, y = -0.5, yend = 10, color = speaker), size = 1) +
geom_point(aes(x = speaker, y = season, size = words_median, color = speaker)) +
geom_text(aes(x = speaker, y = season, label = words_median), color = "grey10", family = "IBM Plex Mono Bold", size = 5) +
geom_point(data = images, aes(x = speaker, y = -0.4, color = speaker), size = 25) +
geom_image(data = images, aes(x = speaker, y = -0.4, image = img), size = 0.095, asp = 0.95) +
labs(title = "Median number of words\nin a line of dialogue by season") +
annotate("text", x = 6.7, y = 9, hjust = 0, label = "Source: friends R package / Graphic: Georgios Karamanis", angle = 90, color = "grey97", family = "Friends", size = 3) +
scale_x_discrete(position = "top") +
scale_y_reverse(breaks = 1:10) +
scale_size_continuous(range = c(10, 15)) +
scale_color_manual(values = pal) +
coord_cartesian(clip = "off") +
theme_void() +
theme(
legend.position = "none",
plot.background = element_rect(fill = "grey15", color = NA),
axis.text.y = element_text(family = "Friends", color = "grey97"),
plot.margin = margin(20, 20, 20, 20),
plot.title = element_text(family = "Friends", color = "grey97", size = 20, margin = margin(0, 0, 20, 0), hjust = 0.5)
)
ggsave(here::here("temp", paste0("friends-", format(Sys.time(), "%Y%m%d_%H%M%S"), ".png")), dpi = 320, width = 8, height = 9)
| /2020/2020-week37/friends.R | permissive | gkaramanis/tidytuesday | R | false | false | 2,168 | r | library(tidyverse)
library(ggimage)
friends <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-09-08/friends.csv')
main_characters <- c("Rachel Green", "Monica Geller", "Phoebe Buffay", "Joey Tribbiani", "Chandler Bing", "Ross Geller")
words <- friends %>%
filter(speaker %in% main_characters) %>%
mutate(
words_n = str_count(text, "\\w+"),
speaker = word(speaker)
) %>%
group_by(speaker, season) %>%
summarise(words_median = median(words_n))
images <- words %>%
distinct(speaker) %>%
mutate(img = paste0("2020-week37/images/", tolower(speaker), ".png"))
pal <- c("#EC5244", "#FADC4A", "#5CA1D1", "#9A6324", "#FEF590", "#4363d8")
ggplot(words) +
geom_segment(data = images, aes(x = speaker, xend = speaker, y = -0.5, yend = 10, color = speaker), size = 1) +
geom_point(aes(x = speaker, y = season, size = words_median, color = speaker)) +
geom_text(aes(x = speaker, y = season, label = words_median), color = "grey10", family = "IBM Plex Mono Bold", size = 5) +
geom_point(data = images, aes(x = speaker, y = -0.4, color = speaker), size = 25) +
geom_image(data = images, aes(x = speaker, y = -0.4, image = img), size = 0.095, asp = 0.95) +
labs(title = "Median number of words\nin a line of dialogue by season") +
annotate("text", x = 6.7, y = 9, hjust = 0, label = "Source: friends R package / Graphic: Georgios Karamanis", angle = 90, color = "grey97", family = "Friends", size = 3) +
scale_x_discrete(position = "top") +
scale_y_reverse(breaks = 1:10) +
scale_size_continuous(range = c(10, 15)) +
scale_color_manual(values = pal) +
coord_cartesian(clip = "off") +
theme_void() +
theme(
legend.position = "none",
plot.background = element_rect(fill = "grey15", color = NA),
axis.text.y = element_text(family = "Friends", color = "grey97"),
plot.margin = margin(20, 20, 20, 20),
plot.title = element_text(family = "Friends", color = "grey97", size = 20, margin = margin(0, 0, 20, 0), hjust = 0.5)
)
ggsave(here::here("temp", paste0("friends-", format(Sys.time(), "%Y%m%d_%H%M%S"), ".png")), dpi = 320, width = 8, height = 9)
|
RBinom <- function(h,...){
kua <- gwindow("Bangkitan Binomial")
Big <- ggroup(container = kua,horizontal = F)
g1 <- gframe("Jumlah:",container = Big)
g2 <- gframe("Banyaknya Percobaan:",container = Big,expand=F)
g3 <- gframe("Peluang Sukses:",container = Big,expand=F)
nilai <- gedit(container = g1,coerce.with = as.numeric)
coba <- gedit(container = g2,coerce.with = as.numeric)
sukses <- gedit(container = g3,coerce.with = as.numeric)
Big1 <- ggroup(container = Big)
canc=gbutton("Cancel",cont=Big1,expand=T)
addHandlerChanged(canc, handler= function(h,...){
visible(kua)= FALSE
focus (main)
})
ok=gbutton("OK",cont=Big1,expand=T)
addHandlerChanged(ok,handler=function(h,...){
pelu <- rbinom(svalue(nilai),svalue(coba),svalue(sukses))
insert(Output,"")
insert(Output,capture.output(cbind(X=pelu)))
win.graph()
plotDistr(pelu, dbinom(pelu,size=svalue(coba),prob=svalue(sukses)),
xlab="Banyaknya Sukses",ylab="Peluang",main="Distribusi Binomial",discrete = T)
legend('topright',legend =c(paste('Sampel =',svalue(nilai)),
paste('Banyak Percobaan =',svalue(coba)),
paste('Peluang Sukses =',svalue(sukses))),bty = 'n')
visible(kua)= FALSE
focus(main)
})
}
| /GUI for Statistical Distribution/Code R/RBinom.R | no_license | rauzansumara/myproject | R | false | false | 1,241 | r | RBinom <- function(h,...){
kua <- gwindow("Bangkitan Binomial")
Big <- ggroup(container = kua,horizontal = F)
g1 <- gframe("Jumlah:",container = Big)
g2 <- gframe("Banyaknya Percobaan:",container = Big,expand=F)
g3 <- gframe("Peluang Sukses:",container = Big,expand=F)
nilai <- gedit(container = g1,coerce.with = as.numeric)
coba <- gedit(container = g2,coerce.with = as.numeric)
sukses <- gedit(container = g3,coerce.with = as.numeric)
Big1 <- ggroup(container = Big)
canc=gbutton("Cancel",cont=Big1,expand=T)
addHandlerChanged(canc, handler= function(h,...){
visible(kua)= FALSE
focus (main)
})
ok=gbutton("OK",cont=Big1,expand=T)
addHandlerChanged(ok,handler=function(h,...){
pelu <- rbinom(svalue(nilai),svalue(coba),svalue(sukses))
insert(Output,"")
insert(Output,capture.output(cbind(X=pelu)))
win.graph()
plotDistr(pelu, dbinom(pelu,size=svalue(coba),prob=svalue(sukses)),
xlab="Banyaknya Sukses",ylab="Peluang",main="Distribusi Binomial",discrete = T)
legend('topright',legend =c(paste('Sampel =',svalue(nilai)),
paste('Banyak Percobaan =',svalue(coba)),
paste('Peluang Sukses =',svalue(sukses))),bty = 'n')
visible(kua)= FALSE
focus(main)
})
}
|
# Store current working directory
projdir <- getwd()
projdir
# Change working directory to covid-19 Folder
setwd("./covid-19-data/")
US <- read.csv("us.csv")
setwd(projdir)
| /Read Data.R | no_license | katrinafett/Covid_19_Project | R | false | false | 173 | r | # Store current working directory
projdir <- getwd()
projdir
# Change working directory to covid-19 Folder
setwd("./covid-19-data/")
US <- read.csv("us.csv")
setwd(projdir)
|
rm(list = ls())
setwd("C:/Users/HP/Desktop/project2")
#Loading required libraries
#Help for exploring missing data dependencies with minimal deviation.
library(naniar)
#Calculates correlation of variables and displays the results graphically.
library(corrgram)
#Offers a powerful graphics language for creating elegant and complex plots.
library(ggplot2)
#Contains functions to streamline the model training process for complex regressions.
library(caret)
#This package includes functions and data accompanying the book "Data Mining with R.
library(DMwR)
#Recursive partitioning for classification, regression and survival trees.
library(rpart)
library("rpart.plot")
#For random forest
library(randomForest)
#For impact of uncertainities
library(usdm)
#For easily combining clean datasets
library(DataCombine)
#For DecisionTree
library(inTrees)
#collection of R packages designed for data science
library('tidyverse')
#For better visualizations
library('hrbrthemes')
#For better visualizations
library('viridis')
#################################################################################################
# Loading Data
training_data = read.csv('day.csv',header = T,na.strings = c(""," ","NA"))
bckup = training_data
#################################################################################################
# Exploratory Data Analysis
################################################################################################
#Getting the view and dimension of data
head(training_data,5)
dim(training_data)
# Structure of our dataset
str(training_data)
# Summary of the data
summary(training_data)
# Extracting datetime
training_data$dteday <- format(as.Date(training_data$dteday,format="%Y-%m-%d"), "%d")
# Removing instant as it's just an indexing id
training_data$instant <- NULL
# Frequency of each unique value
apply(training_data, 2,function(x) length(table(x)))
# Distribution of cnt variable that is also our target variable
hist((training_data$cnt))
# Releant type conversion
cat_var = c('dteday','season', 'yr', 'mnth', 'holiday', 'weekday', 'workingday','weathersit')
num_var = c('temp', 'atemp', 'hum', 'windspeed','casual','registered','cnt')
# Data type conversion function
typ_conv = function(df,var,type){
df[var] = lapply(df[var], type)
return(df)
}
training_data = typ_conv(training_data,cat_var, factor)
############################################################################################
# Checking for missing values
############################################################################################
apply(training_data, 2, function(x) {sum(is.na(x))})
# Data has no missing values
############################################################################################
# Data Visualization
############################################################################################
box_plot = function(numerical_variables, categorical_variables, dataframe=training_data){
dataframe %>% #Chaining
ggplot(aes_string(x = categorical_variables, y = numerical_variables, fill = categorical_variables)) +
geom_boxplot() +
geom_jitter(color='black', size=0.4, alpha=0.9) +
theme_ipsum() +
theme(
legend.position = 'top',
plot.title = element_text(size = 9)
) +
ggtitle(paste("BoxPlot with", categorical_variables, " & ",numerical_variables))
}
box_plot('temp','season',training_data)
box_plot('hum','season')
box_plot('windspeed','weathersit')
box_hist_plot = function(numerical_variables, dataframe=training_data){
numerical_col = dataframe[,numerical_variables]
#For splitting the screen
layout(mat = matrix(c(1,2),nrow = 2, ncol = 1, byrow = TRUE), heights = c(1,8))
#Boxplot formation
par(mar=c(0, 3.1, 1.1, 2.1)) #Margins
boxplot(numerical_col , horizontal=TRUE , ylim=c(min(numerical_col),max(numerical_col)), xaxt="n" , col=rgb(0.8,0.8,0,0.5) , frame=F)
par(mar=c(4, 3.1, 1.1, 2.1))
hist(numerical_col , breaks=40 , col=rgb(0.2,0.8,0.5,0.5) , border=F , main="" , xlab=paste("Variable value : ",numerical_variables), xlim=c(min(numerical_col),max(numerical_col)))
}
box_hist_plot('temp')
box_hist_plot('atemp')
box_hist_plot('hum')
box_hist_plot('windspeed')
box_hist_plot('casual')
box_hist_plot('registered')
box_hist_plot('cnt')
bar_plot = function(x_col, y_col, fill_col){
training_data %>%
ggplot(aes_string(x = x_col, y = y_col, fill = fill_col))+
geom_bar(position='stack', stat = 'identity')+
scale_fill_viridis(discrete = T)+
ggtitle(paste("Bar Plot of",x_col,"on X-Axis,",y_col,"on Y-Axis &", fill_col,"stacked bars."))+
theme_dark()+
xlab("")
}
bar_plot('season','hum','weathersit')
bar_plot('mnth','windspeed','weathersit')
bar_plot('season','cnt','weathersit')
bar_plot('weathersit','temp','season')
bar_plot('season','temp','weathersit')
########################################################################################
# Outlier Analysis
########################################################################################
# Imputing outliers
for(i in c('temp', 'atemp', 'hum', 'windspeed')){
print(i)
outv = training_data[,i][training_data[,i] %in% boxplot.stats(training_data[,i])$out]
print(length(outv))
training_data[,i][training_data[,i] %in% outv] = NA
}
sum(is.na(training_data))
training_data$hum[is.na(training_data$hum)] = mean(training_data$hum,na.rm = T)
training_data$windspeed[is.na(training_data$windspeed)] = mean(training_data$windspeed, na.rm = T)
# Cross Verifying
sum(is.na(training_data))
##########################################################################################
# Feacture Selection
##########################################################################################
num_var = c('temp', 'atemp', 'hum', 'windspeed','casual','registered','cnt')
corrgram(training_data[,num_var],
order = F,
#As we don't want to reorder
upper.panel=panel.pie,
lower.panel=panel.shade,
text.panel=panel.txt,
main = 'CORRELATION PLOT')
#Positive correlations are displayed in blue and negative correlations in red color.
#Color intensity and the size of the circle are proportional to the correlation coefficients.
#training_data = subset(training_data, select=-c(atemp,casual,registered))
#Chi-Square test
chi_cat_var = c('dteday','season', 'yr', 'mnth', 'holiday', 'weekday', 'workingday','weathersit')
chi_cat_df = training_data[,cat_var]
for (i in chi_cat_var){
for (j in chi_cat_var){
print(i)
print(j)
print(chisq.test(table(chi_cat_df[,i], chi_cat_df[,j]))$p.value)
}
}
########################################################################################
# check multicollearity
########################################################################################
vif(training_data)
training_data = subset(training_data, select=-c(atemp,casual,registered))
training_data = subset(training_data, select=-c(holiday, workingday,dteday))
#Getting final columns
colnames(training_data)
########################################################################################
# Sampling of Data
########################################################################################
#To produce the same result for different instances.
set.seed(17)
t_index = sample(1:nrow(training_data), 0.8*nrow(training_data))
train = training_data[t_index,]
test = training_data[-t_index,]
# MAPE
mape = function(actual, predict){
mean(abs((actual-predict)/actual))*100
}
########################################################################################
# Linear Regression
########################################################################################
dummy = dummyVars(~., training_data)
dummy_df = data.frame(predict(dummy, training_data))
set.seed(100)
dum_index = sample(1:nrow(dummy_df), 0.8*nrow(dummy_df))
dum_train_df = dummy_df[dum_index,]
dum_test_df = dummy_df[-dum_index,]
lr_model = lm(cnt ~. , data = dum_train_df)
summary(lr_model)
# Forecasting
LR_predict_train = predict(lr_model, dum_train_df[,-32])
plot(dum_train_df$cnt, LR_predict_train,
xlab = 'Actual values',
ylab = 'Predicted values',
main = 'Linear Regression Model')
# Evaluation
postResample(LR_predict_train, dum_train_df$cnt)
mape(dum_train_df$cnt, LR_predict_train)
# Forecasting for test
LR_predict_test = predict(lr_model, dum_test_df[,-32])
plot(dum_test_df$cnt, LR_predict_test,
xlab = 'Actual values',
ylab = 'Predicted values',
main = 'Linear Regression Model')
# Evaluation
postResample(LR_predict_test, dum_test_df$cnt)
mape(dum_test_df$cnt, LR_predict_test)
#########################################################################################
# Decision Tree
#########################################################################################
set.seed(101)
# Model Development
dt_model = rpart(cnt~. , data = train, method = "anova")
summary(dt_model)
plt = rpart.plot(dt_model, type = 5, digits = 2, fallen.leaves = TRUE)
# Forecasting on Train data
DT_Predict_train = predict(dt_model, train[,-9])
plot(train$cnt, DT_Predict_train,
xlab = 'Actual values',
ylab = 'Predicted values',
main = 'Decision Tree Model')
# Evaluation
postResample(DT_Predict_train, train$cnt)
mape(train$cnt, DT_Predict_train)
# Forecasing on Test data
DT_Predict_test = predict(dt_model, test[,-9])
plot(test$cnt, DT_Predict_test,
xlab = 'Actual values',
ylab = 'Predicted values',
main = 'Decision Tree Model')
# Evaluation
postResample(DT_Predict_test, test$cnt)
mape(test$cnt, DT_Predict_test)
#########################################################################################
# Random Forest
#########################################################################################
set.seed(102)
rf_model = randomForest(cnt ~. , train, importance = TRUE, ntree = 500)
rf_model
# Plotting of error
plot(rf_model)
# Importance of variables
varImpPlot(rf_model)
#Plotting using Random Forest model
RF_predict_train = predict(rf_model, train[,-9])
plot(train$cnt, RF_predict_train,
xlab = 'Actual values',
ylab = 'Predicted values',
main = 'Random Forest model')
# Train result
postResample(RF_predict_train, train$cnt)
mape(train$cnt, RF_predict_train)
#Plotting predict test data using RF model
RF_predict_test = predict(rf_model, test[,-9])
plot(test$cnt, RF_predict_test,
xlab = 'Actual values',
ylab = 'Predicted values',
main = 'Random Forest model')
#Test Result
postResample(RF_predict_test, test$cnt)
mape(test$cnt, RF_predict_test)
| /Bike Rentals R.R | no_license | Kushagra1718/Bike-Rentals | R | false | false | 10,981 | r | rm(list = ls())
setwd("C:/Users/HP/Desktop/project2")
#Loading required libraries
#Help for exploring missing data dependencies with minimal deviation.
library(naniar)
#Calculates correlation of variables and displays the results graphically.
library(corrgram)
#Offers a powerful graphics language for creating elegant and complex plots.
library(ggplot2)
#Contains functions to streamline the model training process for complex regressions.
library(caret)
#This package includes functions and data accompanying the book "Data Mining with R.
library(DMwR)
#Recursive partitioning for classification, regression and survival trees.
library(rpart)
library("rpart.plot")
#For random forest
library(randomForest)
#For impact of uncertainities
library(usdm)
#For easily combining clean datasets
library(DataCombine)
#For DecisionTree
library(inTrees)
#collection of R packages designed for data science
library('tidyverse')
#For better visualizations
library('hrbrthemes')
#For better visualizations
library('viridis')
#################################################################################################
# Loading Data
training_data = read.csv('day.csv',header = T,na.strings = c(""," ","NA"))
bckup = training_data
#################################################################################################
# Exploratory Data Analysis
################################################################################################
#Getting the view and dimension of data
head(training_data,5)
dim(training_data)
# Structure of our dataset
str(training_data)
# Summary of the data
summary(training_data)
# Extracting datetime
training_data$dteday <- format(as.Date(training_data$dteday,format="%Y-%m-%d"), "%d")
# Removing instant as it's just an indexing id
training_data$instant <- NULL
# Frequency of each unique value
apply(training_data, 2,function(x) length(table(x)))
# Distribution of cnt variable that is also our target variable
hist((training_data$cnt))
# Releant type conversion
cat_var = c('dteday','season', 'yr', 'mnth', 'holiday', 'weekday', 'workingday','weathersit')
num_var = c('temp', 'atemp', 'hum', 'windspeed','casual','registered','cnt')
# Data type conversion function
typ_conv = function(df,var,type){
df[var] = lapply(df[var], type)
return(df)
}
training_data = typ_conv(training_data,cat_var, factor)
############################################################################################
# Checking for missing values
############################################################################################
apply(training_data, 2, function(x) {sum(is.na(x))})
# Data has no missing values
############################################################################################
# Data Visualization
############################################################################################
box_plot = function(numerical_variables, categorical_variables, dataframe=training_data){
dataframe %>% #Chaining
ggplot(aes_string(x = categorical_variables, y = numerical_variables, fill = categorical_variables)) +
geom_boxplot() +
geom_jitter(color='black', size=0.4, alpha=0.9) +
theme_ipsum() +
theme(
legend.position = 'top',
plot.title = element_text(size = 9)
) +
ggtitle(paste("BoxPlot with", categorical_variables, " & ",numerical_variables))
}
box_plot('temp','season',training_data)
box_plot('hum','season')
box_plot('windspeed','weathersit')
box_hist_plot = function(numerical_variables, dataframe=training_data){
numerical_col = dataframe[,numerical_variables]
#For splitting the screen
layout(mat = matrix(c(1,2),nrow = 2, ncol = 1, byrow = TRUE), heights = c(1,8))
#Boxplot formation
par(mar=c(0, 3.1, 1.1, 2.1)) #Margins
boxplot(numerical_col , horizontal=TRUE , ylim=c(min(numerical_col),max(numerical_col)), xaxt="n" , col=rgb(0.8,0.8,0,0.5) , frame=F)
par(mar=c(4, 3.1, 1.1, 2.1))
hist(numerical_col , breaks=40 , col=rgb(0.2,0.8,0.5,0.5) , border=F , main="" , xlab=paste("Variable value : ",numerical_variables), xlim=c(min(numerical_col),max(numerical_col)))
}
box_hist_plot('temp')
box_hist_plot('atemp')
box_hist_plot('hum')
box_hist_plot('windspeed')
box_hist_plot('casual')
box_hist_plot('registered')
box_hist_plot('cnt')
bar_plot = function(x_col, y_col, fill_col){
training_data %>%
ggplot(aes_string(x = x_col, y = y_col, fill = fill_col))+
geom_bar(position='stack', stat = 'identity')+
scale_fill_viridis(discrete = T)+
ggtitle(paste("Bar Plot of",x_col,"on X-Axis,",y_col,"on Y-Axis &", fill_col,"stacked bars."))+
theme_dark()+
xlab("")
}
bar_plot('season','hum','weathersit')
bar_plot('mnth','windspeed','weathersit')
bar_plot('season','cnt','weathersit')
bar_plot('weathersit','temp','season')
bar_plot('season','temp','weathersit')
########################################################################################
# Outlier Analysis
########################################################################################
# Imputing outliers
for(i in c('temp', 'atemp', 'hum', 'windspeed')){
print(i)
outv = training_data[,i][training_data[,i] %in% boxplot.stats(training_data[,i])$out]
print(length(outv))
training_data[,i][training_data[,i] %in% outv] = NA
}
sum(is.na(training_data))
training_data$hum[is.na(training_data$hum)] = mean(training_data$hum,na.rm = T)
training_data$windspeed[is.na(training_data$windspeed)] = mean(training_data$windspeed, na.rm = T)
# Cross Verifying
sum(is.na(training_data))
##########################################################################################
# Feacture Selection
##########################################################################################
num_var = c('temp', 'atemp', 'hum', 'windspeed','casual','registered','cnt')
corrgram(training_data[,num_var],
order = F,
#As we don't want to reorder
upper.panel=panel.pie,
lower.panel=panel.shade,
text.panel=panel.txt,
main = 'CORRELATION PLOT')
#Positive correlations are displayed in blue and negative correlations in red color.
#Color intensity and the size of the circle are proportional to the correlation coefficients.
#training_data = subset(training_data, select=-c(atemp,casual,registered))
#Chi-Square test
chi_cat_var = c('dteday','season', 'yr', 'mnth', 'holiday', 'weekday', 'workingday','weathersit')
chi_cat_df = training_data[,cat_var]
for (i in chi_cat_var){
for (j in chi_cat_var){
print(i)
print(j)
print(chisq.test(table(chi_cat_df[,i], chi_cat_df[,j]))$p.value)
}
}
########################################################################################
# check multicollearity
########################################################################################
vif(training_data)
training_data = subset(training_data, select=-c(atemp,casual,registered))
training_data = subset(training_data, select=-c(holiday, workingday,dteday))
#Getting final columns
colnames(training_data)
########################################################################################
# Sampling of Data
########################################################################################
#To produce the same result for different instances.
set.seed(17)
t_index = sample(1:nrow(training_data), 0.8*nrow(training_data))
train = training_data[t_index,]
test = training_data[-t_index,]
# MAPE
mape = function(actual, predict){
mean(abs((actual-predict)/actual))*100
}
########################################################################################
# Linear Regression
########################################################################################
dummy = dummyVars(~., training_data)
dummy_df = data.frame(predict(dummy, training_data))
set.seed(100)
dum_index = sample(1:nrow(dummy_df), 0.8*nrow(dummy_df))
dum_train_df = dummy_df[dum_index,]
dum_test_df = dummy_df[-dum_index,]
lr_model = lm(cnt ~. , data = dum_train_df)
summary(lr_model)
# Forecasting
LR_predict_train = predict(lr_model, dum_train_df[,-32])
plot(dum_train_df$cnt, LR_predict_train,
xlab = 'Actual values',
ylab = 'Predicted values',
main = 'Linear Regression Model')
# Evaluation
postResample(LR_predict_train, dum_train_df$cnt)
mape(dum_train_df$cnt, LR_predict_train)
# Forecasting for test
LR_predict_test = predict(lr_model, dum_test_df[,-32])
plot(dum_test_df$cnt, LR_predict_test,
xlab = 'Actual values',
ylab = 'Predicted values',
main = 'Linear Regression Model')
# Evaluation
postResample(LR_predict_test, dum_test_df$cnt)
mape(dum_test_df$cnt, LR_predict_test)
#########################################################################################
# Decision Tree
#########################################################################################
set.seed(101)
# Model Development
dt_model = rpart(cnt~. , data = train, method = "anova")
summary(dt_model)
plt = rpart.plot(dt_model, type = 5, digits = 2, fallen.leaves = TRUE)
# Forecasting on Train data
DT_Predict_train = predict(dt_model, train[,-9])
plot(train$cnt, DT_Predict_train,
xlab = 'Actual values',
ylab = 'Predicted values',
main = 'Decision Tree Model')
# Evaluation
postResample(DT_Predict_train, train$cnt)
mape(train$cnt, DT_Predict_train)
# Forecasing on Test data
DT_Predict_test = predict(dt_model, test[,-9])
plot(test$cnt, DT_Predict_test,
xlab = 'Actual values',
ylab = 'Predicted values',
main = 'Decision Tree Model')
# Evaluation
postResample(DT_Predict_test, test$cnt)
mape(test$cnt, DT_Predict_test)
#########################################################################################
# Random Forest
#########################################################################################
set.seed(102)
rf_model = randomForest(cnt ~. , train, importance = TRUE, ntree = 500)
rf_model
# Plotting of error
plot(rf_model)
# Importance of variables
varImpPlot(rf_model)
#Plotting using Random Forest model
RF_predict_train = predict(rf_model, train[,-9])
plot(train$cnt, RF_predict_train,
xlab = 'Actual values',
ylab = 'Predicted values',
main = 'Random Forest model')
# Train result
postResample(RF_predict_train, train$cnt)
mape(train$cnt, RF_predict_train)
#Plotting predict test data using RF model
RF_predict_test = predict(rf_model, test[,-9])
plot(test$cnt, RF_predict_test,
xlab = 'Actual values',
ylab = 'Predicted values',
main = 'Random Forest model')
#Test Result
postResample(RF_predict_test, test$cnt)
mape(test$cnt, RF_predict_test)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cube.R
\name{print.Importances}
\alias{print.Importances}
\title{Prints an Importances object.}
\usage{
\method{print}{Importances}(x, ...)
}
\arguments{
\item{x}{The \code{Importances} object that will be printed.}
\item{...}{Ignored parameters.}
}
\value{
Sparsity value
}
\description{
Prints an \code{Importances} object.
}
\examples{
data("sales")
cube = generateCube(sales, columns = list(time = c("month", "year"),
location = c("state"), product = "product"), valueColumn = "amount")
importances = importance(cube)
print(importances)
}
\seealso{
\code{\link{importance}}
}
\author{
Michael Scholz \email{michael.scholz@th-deg.de}
}
| /man/print.Importances.Rd | no_license | cran/hypercube | R | false | true | 726 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cube.R
\name{print.Importances}
\alias{print.Importances}
\title{Prints an Importances object.}
\usage{
\method{print}{Importances}(x, ...)
}
\arguments{
\item{x}{The \code{Importances} object that will be printed.}
\item{...}{Ignored parameters.}
}
\value{
Sparsity value
}
\description{
Prints an \code{Importances} object.
}
\examples{
data("sales")
cube = generateCube(sales, columns = list(time = c("month", "year"),
location = c("state"), product = "product"), valueColumn = "amount")
importances = importance(cube)
print(importances)
}
\seealso{
\code{\link{importance}}
}
\author{
Michael Scholz \email{michael.scholz@th-deg.de}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{get_color_scheme}
\alias{get_color_scheme}
\title{Get colors according to the current colorScheme of the IOHanalyzer}
\usage{
get_color_scheme(algnames_in)
}
\arguments{
\item{algnames_in}{List of algorithms for which to get colors}
}
\description{
Get colors according to the current colorScheme of the IOHanalyzer
}
\examples{
get_color_scheme(get_algId(dsl))
}
| /man/get_color_scheme.Rd | permissive | nojhan/IOHanalyzer | R | false | true | 455 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{get_color_scheme}
\alias{get_color_scheme}
\title{Get colors according to the current colorScheme of the IOHanalyzer}
\usage{
get_color_scheme(algnames_in)
}
\arguments{
\item{algnames_in}{List of algorithms for which to get colors}
}
\description{
Get colors according to the current colorScheme of the IOHanalyzer
}
\examples{
get_color_scheme(get_algId(dsl))
}
|
library(RFOC)
### Name: JMAT
### Title: Vertical Rotation matrix
### Aliases: JMAT
### Keywords: misc
### ** Examples
phi = 18
MAT = JMAT(phi)
v1 = c(1,1,0)
v2 = MAT
| /data/genthat_extracted_code/RFOC/examples/JMAT.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 181 | r | library(RFOC)
### Name: JMAT
### Title: Vertical Rotation matrix
### Aliases: JMAT
### Keywords: misc
### ** Examples
phi = 18
MAT = JMAT(phi)
v1 = c(1,1,0)
v2 = MAT
|
library(testthat)
library(syntaxr)
test_check("syntaxr")
| /tests/testthat.R | permissive | cran/syntaxr | R | false | false | 62 | r | library(testthat)
library(syntaxr)
test_check("syntaxr")
|
#' @title resid
#'
#' @keywords internal
#'
resid <- function(dat, lab, useMean = T) {
if (is.null(dim(lab))) {
mod <- model.matrix(~ 1 + lab)
} else {
mod <- lab
}
ne <- nonEstimable(mod)
if (!is.null(ne)) {
message("Coefficients not estimable:", paste(ne, collapse = " "), "\n")
mod <- mod[, -match(ne, colnames(mod))]
}
n <- dim(dat)[2]
Id <- diag(n)
out <- dat %*% (Id - mod %*% solve(t(mod) %*% mod) %*% t(mod))
colnames(out) <- colnames(dat)
if (useMean) {
out <- sweep(out, 1, apply(dat, 1, mean), "+")
}
out
} | /R/resid.R | no_license | milescsmith/PLIER | R | false | false | 565 | r | #' @title resid
#'
#' @keywords internal
#'
resid <- function(dat, lab, useMean = T) {
if (is.null(dim(lab))) {
mod <- model.matrix(~ 1 + lab)
} else {
mod <- lab
}
ne <- nonEstimable(mod)
if (!is.null(ne)) {
message("Coefficients not estimable:", paste(ne, collapse = " "), "\n")
mod <- mod[, -match(ne, colnames(mod))]
}
n <- dim(dat)[2]
Id <- diag(n)
out <- dat %*% (Id - mod %*% solve(t(mod) %*% mod) %*% t(mod))
colnames(out) <- colnames(dat)
if (useMean) {
out <- sweep(out, 1, apply(dat, 1, mean), "+")
}
out
} |
phantom_run <- function(args, wait = TRUE) {
phantom_bin <- find_phantom()
# Handle missing phantomjs
if (is.null(phantom_bin)) return(NULL)
# Make sure args is a char vector
args <- as.character(args)
system2(phantom_bin, args = args, wait = wait)
}
# Find PhantomJS from PATH, APPDATA, system.file('webshot'), ~/bin, etc
find_phantom <- function() {
path <- Sys.which( "phantomjs" )
if (path != "") return(path)
for (d in phantom_paths()) {
exec <- if (is_windows()) "phantomjs.exe" else "phantomjs"
path <- file.path(d, exec)
if (utils::file_test("-x", path)) break else path <- ""
}
if (path == "") {
# It would make the most sense to throw an error here. However, that would
# cause problems with CRAN. The CRAN checking systems may not have phantomjs
# and may not be capable of installing phantomjs (like on Solaris), and any
# packages which use webshot in their R CMD check (in examples or vignettes)
# will get an ERROR. We'll issue a message and return NULL; other
message(
"PhantomJS not found. You can install it with webshot::install_phantomjs(). ",
"If it is installed, please make sure the phantomjs executable ",
"can be found via the PATH variable."
)
return(NULL)
}
path.expand(path)
}
#' Install PhantomJS
#'
#' Download the zip package, unzip it, and copy the executable to a system
#' directory in which \pkg{webshot} can look for the PhantomJS executable.
#'
#' This function was designed primarily to help Windows users since it is
#' cumbersome to modify the \code{PATH} variable. Mac OS X users may install
#' PhantomJS via Homebrew. If you download the package from the PhantomJS
#' website instead, please make sure the executable can be found via the
#' \code{PATH} variable.
#'
#' On Windows, the directory specified by the environment variable
#' \code{APPDATA} is used to store \file{phantomjs.exe}. On OS X, the directory
#' \file{~/Library/Application Support} is used. On other platforms (such as
#' Linux), the directory \file{~/bin} is used. If these directories are not
#' writable, the directory \file{PhantomJS} under the installation directory of
#' the \pkg{webshot} package will be tried. If this directory still fails, you
#' will have to install PhantomJS by yourself.
#' @param version The version number of PhantomJS.
#' @param baseURL The base URL for the location of PhantomJS binaries for
#' download. If the default download site is unavailable, you may specify an
#' alternative mirror, such as
#' \code{"https://bitbucket.org/ariya/phantomjs/downloads/"}.
#' @return \code{NULL} (the executable is written to a system directory).
#' @export
install_phantomjs <- function(version = '2.1.1',
baseURL = 'https://github.com/wch/webshot/releases/download/v0.3.1/') {
if (!grepl("/$", baseURL))
baseURL <- paste0(baseURL, "/")
owd <- setwd(tempdir())
on.exit(setwd(owd), add = TRUE)
if (is_windows()) {
zipfile <- sprintf('phantomjs-%s-windows.zip', version)
download(paste0(baseURL, zipfile), zipfile, mode = 'wb')
utils::unzip(zipfile)
zipdir <- sub('.zip$', '', zipfile)
exec <- file.path(zipdir, 'bin', 'phantomjs.exe')
} else if (is_osx()) {
zipfile <- sprintf('phantomjs-%s-macosx.zip', version)
download(paste0(baseURL, zipfile), zipfile, mode = 'wb')
utils::unzip(zipfile)
zipdir <- sub('.zip$', '', zipfile)
exec <- file.path(zipdir, 'bin', 'phantomjs')
Sys.chmod(exec, '0755') # chmod +x
} else if (is_linux()) {
zipfile <- sprintf(
'phantomjs-%s-linux-%s.tar.bz2', version,
if (grepl('64', Sys.info()[['machine']])) 'x86_64' else 'i686'
)
download(paste0(baseURL, zipfile), zipfile, mode = 'wb')
utils::untar(zipfile)
zipdir <- sub('.tar.bz2$', '', zipfile)
exec <- file.path(zipdir, 'bin', 'phantomjs')
Sys.chmod(exec, '0755') # chmod +x
} else {
# Unsupported platform, like Solaris
message("Sorry, this platform is not supported.")
return(invisible())
}
success <- FALSE
dirs <- phantom_paths()
for (destdir in dirs) {
dir.create(destdir, showWarnings = FALSE)
success <- file.copy(exec, destdir, overwrite = TRUE)
if (success) break
}
unlink(c(zipdir, zipfile), recursive = TRUE)
if (!success) stop(
'Unable to install PhantomJS to any of these dirs: ',
paste(dirs, collapse = ', ')
)
message('phantomjs has been installed to ', normalizePath(destdir))
invisible()
}
# Possible locations of the PhantomJS executable
phantom_paths <- function() {
if (is_windows()) {
path <- Sys.getenv('APPDATA', '')
path <- if (dir_exists(path)) file.path(path, 'PhantomJS')
} else if (is_osx()) {
path <- '~/Library/Application Support'
path <- if (dir_exists(path)) file.path(path, 'PhantomJS')
} else {
path <- '~/bin'
}
path <- c(path, system.file('PhantomJS', package = 'webshot'))
path
}
dir_exists <- function(path) utils::file_test('-d', path)
# Given a vector or list, drop all the NULL items in it
dropNulls <- function(x) {
x[!vapply(x, is.null, FUN.VALUE=logical(1))]
}
is_windows <- function() .Platform$OS.type == "windows"
is_osx <- function() Sys.info()[['sysname']] == 'Darwin'
is_linux <- function() Sys.info()[['sysname']] == 'Linux'
is_solaris <- function() Sys.info()[['sysname']] == 'SunOS'
# Find an available TCP port (to launch Shiny apps)
available_port <- function(port) {
if (!is.null(port)) return(port)
for (p in sample(3000:8000, 20)) {
tmp <- try(httpuv::startServer('127.0.0.1', p, list()), silent = TRUE)
if (!inherits(tmp, 'try-error')) {
httpuv::stopServer(tmp)
port <- p
break
}
}
if (is.null(port)) stop("Cannot find an available port")
port
}
# Wrapper for utils::download.file which works around a problem with R 3.3.0 and
# 3.3.1. In these versions, download.file(method="libcurl") issues a HEAD
# request to check if a file is available, before sending the GET request. This
# causes problems when downloading attached files from GitHub binary releases
# (like the PhantomJS binaries), because the url for the GET request returns a
# 403 for HEAD requests. See
# https://stat.ethz.ch/pipermail/r-devel/2016-June/072852.html
download <- function(url, destfile, mode = "w") {
if (getRversion() == "3.3.0" || getRversion() == "3.3.1") {
download_no_libcurl(url, destfile, mode = mode)
} else if (is_windows() && getRversion() < "3.2") {
# Older versions of R on Windows need setInternet2 to download https.
download_old_win(url, destfile, mode = mode)
} else {
utils::download.file(url, destfile, mode = mode)
}
}
# Adapted from downloader::download, but avoids using libcurl.
download_no_libcurl <- function(url, ...) {
# Windows
if (is_windows()) {
method <- "wininet"
utils::download.file(url, method = method, ...)
} else {
# If non-Windows, check for libcurl/curl/wget/lynx, then call download.file with
# appropriate method.
if (nzchar(Sys.which("wget")[1])) {
method <- "wget"
} else if (nzchar(Sys.which("curl")[1])) {
method <- "curl"
# curl needs to add a -L option to follow redirects.
# Save the original options and restore when we exit.
orig_extra_options <- getOption("download.file.extra")
on.exit(options(download.file.extra = orig_extra_options))
options(download.file.extra = paste("-L", orig_extra_options))
} else if (nzchar(Sys.which("lynx")[1])) {
method <- "lynx"
} else {
stop("no download method found")
}
utils::download.file(url, method = method, ...)
}
}
# Adapted from downloader::download, for R<3.2 on Windows
download_old_win <- function(url, ...) {
# If we directly use setInternet2, R CMD CHECK gives a Note on Mac/Linux
seti2 <- `::`(utils, 'setInternet2')
# Check whether we are already using internet2 for internal
internet2_start <- seti2(NA)
# If not then temporarily set it
if (!internet2_start) {
# Store initial settings, and restore on exit
on.exit(suppressWarnings(seti2(internet2_start)))
# Needed for https. Will get warning if setInternet2(FALSE) already run
# and internet routines are used. But the warnings don't seem to matter.
suppressWarnings(seti2(TRUE))
}
method <- "internal"
# download.file will complain about file size with something like:
# Warning message:
# In download.file(url, ...) : downloaded length 19457 != reported length 200
# because apparently it compares the length with the status code returned (?)
# so we supress that
utils::download.file(url, method = method, ...)
}
# Fix local filenames like "c:/path/file.html" to "file:///c:/path/file.html"
# because that's the format used by casperjs and the webshot.js script.
fix_windows_url <- function(url) {
if (!is_windows()) return(url)
# If it's a "c:/path/file.html" path, or contains any backslashs, like
# "c:\path", "\\path\\file.html", or "/path\\file.html", we need to fix it up.
if (grepl("^[a-zA-Z]:/", url) || grepl("\\", url, fixed = TRUE)) {
paste0("file:///", normalizePath(url, winslash = "/"))
} else {
url
}
}
# Borrowed from animation package, with some adaptations.
find_magic = function() {
# try to look for ImageMagick in the Windows Registry Hive, the Program Files
# directory and the LyX installation
if (!inherits(try({
magick.path = utils::readRegistry('SOFTWARE\\ImageMagick\\Current')$BinPath
}, silent = TRUE), 'try-error')) {
if (nzchar(magick.path)) {
convert = normalizePath(file.path(magick.path, 'convert.exe'), "/", mustWork = FALSE)
}
} else if (
nzchar(prog <- Sys.getenv('ProgramFiles')) &&
length(magick.dir <- list.files(prog, '^ImageMagick.*')) &&
length(magick.path <- list.files(file.path(prog, magick.dir), pattern = '^convert\\.exe$',
full.names = TRUE, recursive = TRUE))
) {
convert = normalizePath(magick.path[1], "/", mustWork = FALSE)
} else if (!inherits(try({
magick.path = utils::readRegistry('LyX.Document\\Shell\\open\\command', 'HCR')
}, silent = TRUE), 'try-error')) {
convert = file.path(dirname(gsub('(^\"|\" \"%1\"$)', '', magick.path[[1]])), c('..', '../etc'),
'imagemagick', 'convert.exe')
convert = convert[file.exists(convert)]
if (length(convert)) {
convert = normalizePath(convert, "/", mustWork = FALSE)
} else {
warning('No way to find ImageMagick!')
return("")
}
} else {
warning('ImageMagick not installed yet!')
return("")
}
if (!file.exists(convert)) {
# Found an ImageMagick installation, but not the convert.exe binary.
warning("ImageMagick's convert.exe not found at ", convert)
return("")
}
return(convert)
}
| /R/utils.R | no_license | jimhester/webshot | R | false | false | 10,865 | r | phantom_run <- function(args, wait = TRUE) {
phantom_bin <- find_phantom()
# Handle missing phantomjs
if (is.null(phantom_bin)) return(NULL)
# Make sure args is a char vector
args <- as.character(args)
system2(phantom_bin, args = args, wait = wait)
}
# Find PhantomJS from PATH, APPDATA, system.file('webshot'), ~/bin, etc
find_phantom <- function() {
path <- Sys.which( "phantomjs" )
if (path != "") return(path)
for (d in phantom_paths()) {
exec <- if (is_windows()) "phantomjs.exe" else "phantomjs"
path <- file.path(d, exec)
if (utils::file_test("-x", path)) break else path <- ""
}
if (path == "") {
# It would make the most sense to throw an error here. However, that would
# cause problems with CRAN. The CRAN checking systems may not have phantomjs
# and may not be capable of installing phantomjs (like on Solaris), and any
# packages which use webshot in their R CMD check (in examples or vignettes)
# will get an ERROR. We'll issue a message and return NULL; other
message(
"PhantomJS not found. You can install it with webshot::install_phantomjs(). ",
"If it is installed, please make sure the phantomjs executable ",
"can be found via the PATH variable."
)
return(NULL)
}
path.expand(path)
}
#' Install PhantomJS
#'
#' Download the zip package, unzip it, and copy the executable to a system
#' directory in which \pkg{webshot} can look for the PhantomJS executable.
#'
#' This function was designed primarily to help Windows users since it is
#' cumbersome to modify the \code{PATH} variable. Mac OS X users may install
#' PhantomJS via Homebrew. If you download the package from the PhantomJS
#' website instead, please make sure the executable can be found via the
#' \code{PATH} variable.
#'
#' On Windows, the directory specified by the environment variable
#' \code{APPDATA} is used to store \file{phantomjs.exe}. On OS X, the directory
#' \file{~/Library/Application Support} is used. On other platforms (such as
#' Linux), the directory \file{~/bin} is used. If these directories are not
#' writable, the directory \file{PhantomJS} under the installation directory of
#' the \pkg{webshot} package will be tried. If this directory still fails, you
#' will have to install PhantomJS by yourself.
#' @param version The version number of PhantomJS.
#' @param baseURL The base URL for the location of PhantomJS binaries for
#' download. If the default download site is unavailable, you may specify an
#' alternative mirror, such as
#' \code{"https://bitbucket.org/ariya/phantomjs/downloads/"}.
#' @return \code{NULL} (the executable is written to a system directory).
#' @export
install_phantomjs <- function(version = '2.1.1',
baseURL = 'https://github.com/wch/webshot/releases/download/v0.3.1/') {
if (!grepl("/$", baseURL))
baseURL <- paste0(baseURL, "/")
owd <- setwd(tempdir())
on.exit(setwd(owd), add = TRUE)
if (is_windows()) {
zipfile <- sprintf('phantomjs-%s-windows.zip', version)
download(paste0(baseURL, zipfile), zipfile, mode = 'wb')
utils::unzip(zipfile)
zipdir <- sub('.zip$', '', zipfile)
exec <- file.path(zipdir, 'bin', 'phantomjs.exe')
} else if (is_osx()) {
zipfile <- sprintf('phantomjs-%s-macosx.zip', version)
download(paste0(baseURL, zipfile), zipfile, mode = 'wb')
utils::unzip(zipfile)
zipdir <- sub('.zip$', '', zipfile)
exec <- file.path(zipdir, 'bin', 'phantomjs')
Sys.chmod(exec, '0755') # chmod +x
} else if (is_linux()) {
zipfile <- sprintf(
'phantomjs-%s-linux-%s.tar.bz2', version,
if (grepl('64', Sys.info()[['machine']])) 'x86_64' else 'i686'
)
download(paste0(baseURL, zipfile), zipfile, mode = 'wb')
utils::untar(zipfile)
zipdir <- sub('.tar.bz2$', '', zipfile)
exec <- file.path(zipdir, 'bin', 'phantomjs')
Sys.chmod(exec, '0755') # chmod +x
} else {
# Unsupported platform, like Solaris
message("Sorry, this platform is not supported.")
return(invisible())
}
success <- FALSE
dirs <- phantom_paths()
for (destdir in dirs) {
dir.create(destdir, showWarnings = FALSE)
success <- file.copy(exec, destdir, overwrite = TRUE)
if (success) break
}
unlink(c(zipdir, zipfile), recursive = TRUE)
if (!success) stop(
'Unable to install PhantomJS to any of these dirs: ',
paste(dirs, collapse = ', ')
)
message('phantomjs has been installed to ', normalizePath(destdir))
invisible()
}
# Possible locations of the PhantomJS executable
phantom_paths <- function() {
if (is_windows()) {
path <- Sys.getenv('APPDATA', '')
path <- if (dir_exists(path)) file.path(path, 'PhantomJS')
} else if (is_osx()) {
path <- '~/Library/Application Support'
path <- if (dir_exists(path)) file.path(path, 'PhantomJS')
} else {
path <- '~/bin'
}
path <- c(path, system.file('PhantomJS', package = 'webshot'))
path
}
dir_exists <- function(path) utils::file_test('-d', path)
# Given a vector or list, drop all the NULL items in it
dropNulls <- function(x) {
x[!vapply(x, is.null, FUN.VALUE=logical(1))]
}
is_windows <- function() .Platform$OS.type == "windows"
is_osx <- function() Sys.info()[['sysname']] == 'Darwin'
is_linux <- function() Sys.info()[['sysname']] == 'Linux'
is_solaris <- function() Sys.info()[['sysname']] == 'SunOS'
# Find an available TCP port (to launch Shiny apps)
available_port <- function(port) {
if (!is.null(port)) return(port)
for (p in sample(3000:8000, 20)) {
tmp <- try(httpuv::startServer('127.0.0.1', p, list()), silent = TRUE)
if (!inherits(tmp, 'try-error')) {
httpuv::stopServer(tmp)
port <- p
break
}
}
if (is.null(port)) stop("Cannot find an available port")
port
}
# Wrapper for utils::download.file which works around a problem with R 3.3.0 and
# 3.3.1. In these versions, download.file(method="libcurl") issues a HEAD
# request to check if a file is available, before sending the GET request. This
# causes problems when downloading attached files from GitHub binary releases
# (like the PhantomJS binaries), because the url for the GET request returns a
# 403 for HEAD requests. See
# https://stat.ethz.ch/pipermail/r-devel/2016-June/072852.html
download <- function(url, destfile, mode = "w") {
if (getRversion() == "3.3.0" || getRversion() == "3.3.1") {
download_no_libcurl(url, destfile, mode = mode)
} else if (is_windows() && getRversion() < "3.2") {
# Older versions of R on Windows need setInternet2 to download https.
download_old_win(url, destfile, mode = mode)
} else {
utils::download.file(url, destfile, mode = mode)
}
}
# Adapted from downloader::download, but avoids using libcurl.
download_no_libcurl <- function(url, ...) {
# Windows
if (is_windows()) {
method <- "wininet"
utils::download.file(url, method = method, ...)
} else {
# If non-Windows, check for libcurl/curl/wget/lynx, then call download.file with
# appropriate method.
if (nzchar(Sys.which("wget")[1])) {
method <- "wget"
} else if (nzchar(Sys.which("curl")[1])) {
method <- "curl"
# curl needs to add a -L option to follow redirects.
# Save the original options and restore when we exit.
orig_extra_options <- getOption("download.file.extra")
on.exit(options(download.file.extra = orig_extra_options))
options(download.file.extra = paste("-L", orig_extra_options))
} else if (nzchar(Sys.which("lynx")[1])) {
method <- "lynx"
} else {
stop("no download method found")
}
utils::download.file(url, method = method, ...)
}
}
# Adapted from downloader::download, for R<3.2 on Windows
download_old_win <- function(url, ...) {
# If we directly use setInternet2, R CMD CHECK gives a Note on Mac/Linux
seti2 <- `::`(utils, 'setInternet2')
# Check whether we are already using internet2 for internal
internet2_start <- seti2(NA)
# If not then temporarily set it
if (!internet2_start) {
# Store initial settings, and restore on exit
on.exit(suppressWarnings(seti2(internet2_start)))
# Needed for https. Will get warning if setInternet2(FALSE) already run
# and internet routines are used. But the warnings don't seem to matter.
suppressWarnings(seti2(TRUE))
}
method <- "internal"
# download.file will complain about file size with something like:
# Warning message:
# In download.file(url, ...) : downloaded length 19457 != reported length 200
# because apparently it compares the length with the status code returned (?)
# so we supress that
utils::download.file(url, method = method, ...)
}
# Fix local filenames like "c:/path/file.html" to "file:///c:/path/file.html"
# because that's the format used by casperjs and the webshot.js script.
fix_windows_url <- function(url) {
if (!is_windows()) return(url)
# If it's a "c:/path/file.html" path, or contains any backslashs, like
# "c:\path", "\\path\\file.html", or "/path\\file.html", we need to fix it up.
if (grepl("^[a-zA-Z]:/", url) || grepl("\\", url, fixed = TRUE)) {
paste0("file:///", normalizePath(url, winslash = "/"))
} else {
url
}
}
# Borrowed from animation package, with some adaptations.
find_magic = function() {
# try to look for ImageMagick in the Windows Registry Hive, the Program Files
# directory and the LyX installation
if (!inherits(try({
magick.path = utils::readRegistry('SOFTWARE\\ImageMagick\\Current')$BinPath
}, silent = TRUE), 'try-error')) {
if (nzchar(magick.path)) {
convert = normalizePath(file.path(magick.path, 'convert.exe'), "/", mustWork = FALSE)
}
} else if (
nzchar(prog <- Sys.getenv('ProgramFiles')) &&
length(magick.dir <- list.files(prog, '^ImageMagick.*')) &&
length(magick.path <- list.files(file.path(prog, magick.dir), pattern = '^convert\\.exe$',
full.names = TRUE, recursive = TRUE))
) {
convert = normalizePath(magick.path[1], "/", mustWork = FALSE)
} else if (!inherits(try({
magick.path = utils::readRegistry('LyX.Document\\Shell\\open\\command', 'HCR')
}, silent = TRUE), 'try-error')) {
convert = file.path(dirname(gsub('(^\"|\" \"%1\"$)', '', magick.path[[1]])), c('..', '../etc'),
'imagemagick', 'convert.exe')
convert = convert[file.exists(convert)]
if (length(convert)) {
convert = normalizePath(convert, "/", mustWork = FALSE)
} else {
warning('No way to find ImageMagick!')
return("")
}
} else {
warning('ImageMagick not installed yet!')
return("")
}
if (!file.exists(convert)) {
# Found an ImageMagick installation, but not the convert.exe binary.
warning("ImageMagick's convert.exe not found at ", convert)
return("")
}
return(convert)
}
|
#' Bold your text in markdown
#'
#' This function helps users to paste bold text in markdown
#' @return Character.
#' @author Jiaxiang Li
#'
#' @import clipr
#' @import glue
#' @import cli
#' @export
#'
#' @examples
#' m3("Jiaxiang Li")
m3 <- function(){
line <-
# clipr::read_clip() %>%
rstudioapi::getSourceEditorContext() %>%
rstudioapi::primary_selection() %>%
.[["text"]] %>%
# 省略复制的步骤,参考reprex
str_split(n=2,pattern=' ') %>%
.[[1]]
m3 <-
function(name="Type something"){
text <- glue::glue('**{name}**')
# clipr::write_clip(text)
rstudioapi::insertText(text)
# 省略粘贴的步骤
cat(
sep="\n"
,text
,tips()
)
}
m3(line)
}
| /R/m3.R | permissive | slsongge/add2md | R | false | false | 860 | r | #' Bold your text in markdown
#'
#' This function helps users to paste bold text in markdown
#' @return Character.
#' @author Jiaxiang Li
#'
#' @import clipr
#' @import glue
#' @import cli
#' @export
#'
#' @examples
#' m3("Jiaxiang Li")
m3 <- function(){
line <-
# clipr::read_clip() %>%
rstudioapi::getSourceEditorContext() %>%
rstudioapi::primary_selection() %>%
.[["text"]] %>%
# 省略复制的步骤,参考reprex
str_split(n=2,pattern=' ') %>%
.[[1]]
m3 <-
function(name="Type something"){
text <- glue::glue('**{name}**')
# clipr::write_clip(text)
rstudioapi::insertText(text)
# 省略粘贴的步骤
cat(
sep="\n"
,text
,tips()
)
}
m3(line)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.