content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
process_data <- function(show_messages = FALSE) {
# CREATING FILE NAME VARIABLES
f_activity_lables <- ".//UCI HAR Dataset//activity_labels.txt"
f_features <- ".//UCI HAR Dataset//features.txt"
f_X_train <- ".//UCI HAR Dataset//train//X_train.txt"
f_y_train <- ".//UCI HAR Dataset//train//y_train.txt"
f_subj_train <- ".//UCI HAR Dataset//train//subject_train.txt"
f_X_test <- ".//UCI HAR Dataset//test//X_test.txt"
f_y_test <- ".//UCI HAR Dataset//test//y_test.txt"
f_subj_test <- ".//UCI HAR Dataset//test//subject_test.txt"
f_X_merged <- ".//UCI HAR Dataset//merged//X_merged.txt"
f_y_merged <- ".//UCI HAR Dataset//merged//y_merged.txt"
f_subj_merged <- ".//UCI HAR Dataset//merged//subject_merged.txt"
f_inertial_train <- c(".//UCI HAR Dataset//train//Inertial Signals//body_acc_x_train.txt",
".//UCI HAR Dataset//train//Inertial Signals//body_acc_y_train.txt",
".//UCI HAR Dataset//train//Inertial Signals//body_acc_z_train.txt",
".//UCI HAR Dataset//train//Inertial Signals//body_gyro_x_train.txt",
".//UCI HAR Dataset//train//Inertial Signals//body_gyro_y_train.txt",
".//UCI HAR Dataset//train//Inertial Signals//body_gyro_z_train.txt",
".//UCI HAR Dataset//train//Inertial Signals//total_acc_x_train.txt",
".//UCI HAR Dataset//train//Inertial Signals//total_acc_y_train.txt",
".//UCI HAR Dataset//train//Inertial Signals//total_acc_z_train.txt"
)
f_inertial_test <- c(".//UCI HAR Dataset//test//Inertial Signals//body_acc_x_test.txt",
".//UCI HAR Dataset//test//Inertial Signals//body_acc_y_test.txt",
".//UCI HAR Dataset//test//Inertial Signals//body_acc_z_test.txt",
".//UCI HAR Dataset//test//Inertial Signals//body_gyro_x_test.txt",
".//UCI HAR Dataset//test//Inertial Signals//body_gyro_y_test.txt",
".//UCI HAR Dataset//test//Inertial Signals//body_gyro_z_test.txt",
".//UCI HAR Dataset//test//Inertial Signals//total_acc_x_test.txt",
".//UCI HAR Dataset//test//Inertial Signals//total_acc_y_test.txt",
".//UCI HAR Dataset//test//Inertial Signals//total_acc_z_test.txt"
)
f_inertial_merged <- c(".//UCI HAR Dataset//merged//Inertial Signals//body_acc_x_merged.txt",
".//UCI HAR Dataset//merged//Inertial Signals//body_acc_y_merged.txt",
".//UCI HAR Dataset//merged//Inertial Signals//body_acc_z_merged.txt",
".//UCI HAR Dataset//merged//Inertial Signals//body_gyro_x_merged.txt",
".//UCI HAR Dataset//merged//Inertial Signals//body_gyro_y_merged.txt",
".//UCI HAR Dataset//merged//Inertial Signals//body_gyro_z_merged.txt",
".//UCI HAR Dataset//merged//Inertial Signals//total_acc_x_merged.txt",
".//UCI HAR Dataset//merged//Inertial Signals//total_acc_y_merged.txt",
".//UCI HAR Dataset//merged//Inertial Signals//total_acc_z_merged.txt"
)
#CHECKING IF THE DATA ARE AVILABLE
data_found <- TRUE
data_found <- file.exists(f_activity_lables) &&
file.exists(f_features) &&
file.exists(f_X_train) &&
file.exists(f_y_train) &&
file.exists(f_subj_train) &&
file.exists(f_X_test) &&
file.exists(f_y_test) &&
file.exists(f_subj_test) &&
as.logical(prod(file.exists(f_inertial_train))) &&
as.logical(prod(file.exists(f_inertial_test)))
if (!data_found)
{
stop("Data files were not found. Please, inzip 'getdata-projectfiles-UCI HAR Dataset.zip'
into your working directory without changing the archive structure.")
} else {
print("Data found. Processing ...")
}
remove(data_found)
#CREATING DIRECTORIES FOR THE MERGED DATA SETS
if (!file.exists(".//UCI HAR Dataset//merged")) {
dir.create(".//UCI HAR Dataset//merged")
}
if (!file.exists(".//UCI HAR Dataset//merged//Inertial Signals")) {
dir.create(".//UCI HAR Dataset//merged//Inertial Signals")
}
#LOADING DATA AND PROCESSING
if (show_messages) {print("Loading activities lables ...")}
activity_labels <- read.table(file = f_activity_lables, sep = " ")
activity_labels <- as.vector(activity_labels[[2]])
remove(f_activity_lables)
if (show_messages) {print("Loading features lables ...")}
features <- read.table(file = f_features, sep = " ")
features <- as.vector(features[[2]])
remove(f_features)
if (show_messages) {print("Loading trainig data set persons IDs ...")}
training_set_person <- read.table(f_subj_train, sep = "")
training_set_person <- training_set_person[[1]]
if (show_messages) {print("Loading test data set persons IDs ...")}
test_set_person <- read.table(f_subj_test, sep = "")
test_set_person <- test_set_person[[1]]
if (show_messages) {print("Merging persons IDs ...")}
merged_person <- c(training_set_person, test_set_person)
if (show_messages) {print("Loading trainig data set activities IDs ...")}
training_set_activity <- read.table(f_y_train, sep = "")
training_set_activity <- training_set_activity[[1]]
if (show_messages) {print("Loading test data set activities IDs ...")}
test_set_activity <- read.table(f_y_test, sep = "")
test_set_activity <- test_set_activity[[1]]
if (show_messages) {print("Merging activities IDs ...")}
merged_activity <- c(training_set_activity, test_set_activity)
if (show_messages) {print("Sorting persons and activities IDs ...")}
sorted_row_indexes <- order(merged_activity,merged_person)
merged_person <- merged_person[sorted_row_indexes]
merged_activity <- merged_activity[sorted_row_indexes]
if (show_messages) {print("Saving persons and activities IDs ...")}
write.table(merged_person, file = f_subj_merged, col.names = FALSE, row.names = FALSE)
write.table(merged_activity, file = f_y_merged, col.names = FALSE, row.names = FALSE)
#clearing RAM
remove(training_set_person)
remove(test_set_person)
remove(f_subj_test)
remove(f_subj_train)
remove(f_subj_merged)
remove(training_set_activity)
remove(test_set_activity)
remove(f_y_train)
remove(f_y_test)
remove(f_y_merged)
if (show_messages) {print("Merging and saving inertial signals. Please, wait ...")}
for (i in 1: length(f_inertial_merged)) {
temporary_train_table <- read.table(f_inertial_train[i], sep = "")
temporary_test_table <- read.table(f_inertial_test[i], sep = "")
temporary_merged_table <- rbind(temporary_train_table, temporary_test_table)
temporary_merged_table <- temporary_merged_table[sorted_row_indexes,]
write.table(temporary_merged_table, file = f_inertial_merged[i], col.names = FALSE, row.names = FALSE)
}
#clearing RAM
remove(i)
remove(temporary_train_table)
remove(temporary_test_table)
remove(temporary_merged_table)
remove(f_inertial_train)
remove(f_inertial_test)
remove(f_inertial_merged)
if (show_messages) {print("Loading training data set. Please, wait ...")}
training_set <- read.table(file = f_X_train, sep = "")
if (show_messages) {print("Loading test data set. Please, wait ...")}
test_set <- read.table(file = f_X_test, sep = "")
if (show_messages) {print("Merging and saving features data sets signals. Please, wait ...")}
merged_set <- rbind(training_set, test_set)
merged_set <- merged_set[sorted_row_indexes, ]
write.table(merged_set, file = f_X_merged, col.names = FALSE, row.names = FALSE)
#clearing RAM
remove(training_set)
remove(test_set)
remove(f_X_train)
remove(f_X_test)
remove(f_X_merged)
remove(sorted_row_indexes)
if (show_messages) {print("Extracting only the measurements on the mean and standard deviation for each measurement.
Naming new data set and saving it to .//merged_mean_std_data.txt")}
#finding all column indexes with pattern "mean" in the label
means_indexes <- grep(features, pattern = "mean", value = FALSE)
#finding all column indexes with pattern "std" in the label
stds_indexes <- grep(features, pattern = "std", value = FALSE)
#union columns and sort them to get the original order
needed_columns <- union(means_indexes, stds_indexes)
needed_columns <- sort(needed_columns)
#clearing RAM
remove(means_indexes)
remove(stds_indexes)
#naming the merged data set columns
names(merged_set) <- features
#replacing activity IDs with their discriptive names
merged_activity <- activity_labels[merged_activity]
#creating and naming new data frame which consists of activity and person_id
new_merged_data_set <- data.frame(merged_activity)
new_merged_data_set <- cbind(new_merged_data_set, merged_person)
#naming the new data frame columns ...
names(new_merged_data_set) <- c("activity", "person_id")
#... and binding new columns from merged_set. The result is stored in merged_set
merged_set <- cbind(new_merged_data_set, merged_set)
#saving new data set, extracting only needed columns
write.table(merged_set[c(1,2,needed_columns + 2)], "merged_mean_std_data.txt", row.names = FALSE)
#clearing RAM
remove(activity_labels)
remove(features)
remove(merged_activity)
remove(merged_person)
remove(needed_columns)
remove(new_merged_data_set)
#Now we're going to create a separate data set with averages of each variable
#for each activity and person
if (show_messages) {print("Going to create a separate data set with averages of each variable for each activity and person.
Naming new data set and saving it to .//independent_mean_data.txt")}
#aggregating data by person_id and activity and applying "mean()" function for each column
aggdata <- aggregate(merged_set[,3:dim(merged_set)[2]], by = list(merged_set$person_id, merged_set$activity), FUN = mean, na.rm = TRUE)
#swapping activities and person IDs
aggdata[c(1,2)] <- aggdata[c(2,1)]
#properly naming columns
names(aggdata)[1:2] <- c("activity", "person_id")
names(aggdata)[3:dim(aggdata)[2]] <- paste("mean(", names(aggdata)[3:dim(aggdata)[2]], ")")
#saving data
write.table(aggdata, "independent_mean_data.txt", row.names = FALSE)
#clearing RAM
remove(merged_set)
remove(aggdata)
remove(show_messages)
print("Data processing is finished.")
print("Merged data set in the 'UCI HAR Dataset/merged' directory.")
print("The data subset extrated from merged data set in the 'merged_mean_std_data.txt'")
print("The second indpendent data subset is in 'independent_mean_data.txt'")
} | /run_analysis_main_code.R | no_license | MGN-Roman/getting_data_course_project | R | false | false | 10,552 | r | process_data <- function(show_messages = FALSE) {
# CREATING FILE NAME VARIABLES
f_activity_lables <- ".//UCI HAR Dataset//activity_labels.txt"
f_features <- ".//UCI HAR Dataset//features.txt"
f_X_train <- ".//UCI HAR Dataset//train//X_train.txt"
f_y_train <- ".//UCI HAR Dataset//train//y_train.txt"
f_subj_train <- ".//UCI HAR Dataset//train//subject_train.txt"
f_X_test <- ".//UCI HAR Dataset//test//X_test.txt"
f_y_test <- ".//UCI HAR Dataset//test//y_test.txt"
f_subj_test <- ".//UCI HAR Dataset//test//subject_test.txt"
f_X_merged <- ".//UCI HAR Dataset//merged//X_merged.txt"
f_y_merged <- ".//UCI HAR Dataset//merged//y_merged.txt"
f_subj_merged <- ".//UCI HAR Dataset//merged//subject_merged.txt"
f_inertial_train <- c(".//UCI HAR Dataset//train//Inertial Signals//body_acc_x_train.txt",
".//UCI HAR Dataset//train//Inertial Signals//body_acc_y_train.txt",
".//UCI HAR Dataset//train//Inertial Signals//body_acc_z_train.txt",
".//UCI HAR Dataset//train//Inertial Signals//body_gyro_x_train.txt",
".//UCI HAR Dataset//train//Inertial Signals//body_gyro_y_train.txt",
".//UCI HAR Dataset//train//Inertial Signals//body_gyro_z_train.txt",
".//UCI HAR Dataset//train//Inertial Signals//total_acc_x_train.txt",
".//UCI HAR Dataset//train//Inertial Signals//total_acc_y_train.txt",
".//UCI HAR Dataset//train//Inertial Signals//total_acc_z_train.txt"
)
f_inertial_test <- c(".//UCI HAR Dataset//test//Inertial Signals//body_acc_x_test.txt",
".//UCI HAR Dataset//test//Inertial Signals//body_acc_y_test.txt",
".//UCI HAR Dataset//test//Inertial Signals//body_acc_z_test.txt",
".//UCI HAR Dataset//test//Inertial Signals//body_gyro_x_test.txt",
".//UCI HAR Dataset//test//Inertial Signals//body_gyro_y_test.txt",
".//UCI HAR Dataset//test//Inertial Signals//body_gyro_z_test.txt",
".//UCI HAR Dataset//test//Inertial Signals//total_acc_x_test.txt",
".//UCI HAR Dataset//test//Inertial Signals//total_acc_y_test.txt",
".//UCI HAR Dataset//test//Inertial Signals//total_acc_z_test.txt"
)
f_inertial_merged <- c(".//UCI HAR Dataset//merged//Inertial Signals//body_acc_x_merged.txt",
".//UCI HAR Dataset//merged//Inertial Signals//body_acc_y_merged.txt",
".//UCI HAR Dataset//merged//Inertial Signals//body_acc_z_merged.txt",
".//UCI HAR Dataset//merged//Inertial Signals//body_gyro_x_merged.txt",
".//UCI HAR Dataset//merged//Inertial Signals//body_gyro_y_merged.txt",
".//UCI HAR Dataset//merged//Inertial Signals//body_gyro_z_merged.txt",
".//UCI HAR Dataset//merged//Inertial Signals//total_acc_x_merged.txt",
".//UCI HAR Dataset//merged//Inertial Signals//total_acc_y_merged.txt",
".//UCI HAR Dataset//merged//Inertial Signals//total_acc_z_merged.txt"
)
#CHECKING IF THE DATA ARE AVILABLE
data_found <- TRUE
data_found <- file.exists(f_activity_lables) &&
file.exists(f_features) &&
file.exists(f_X_train) &&
file.exists(f_y_train) &&
file.exists(f_subj_train) &&
file.exists(f_X_test) &&
file.exists(f_y_test) &&
file.exists(f_subj_test) &&
as.logical(prod(file.exists(f_inertial_train))) &&
as.logical(prod(file.exists(f_inertial_test)))
if (!data_found)
{
stop("Data files were not found. Please, inzip 'getdata-projectfiles-UCI HAR Dataset.zip'
into your working directory without changing the archive structure.")
} else {
print("Data found. Processing ...")
}
remove(data_found)
#CREATING DIRECTORIES FOR THE MERGED DATA SETS
if (!file.exists(".//UCI HAR Dataset//merged")) {
dir.create(".//UCI HAR Dataset//merged")
}
if (!file.exists(".//UCI HAR Dataset//merged//Inertial Signals")) {
dir.create(".//UCI HAR Dataset//merged//Inertial Signals")
}
#LOADING DATA AND PROCESSING
if (show_messages) {print("Loading activities lables ...")}
activity_labels <- read.table(file = f_activity_lables, sep = " ")
activity_labels <- as.vector(activity_labels[[2]])
remove(f_activity_lables)
if (show_messages) {print("Loading features lables ...")}
features <- read.table(file = f_features, sep = " ")
features <- as.vector(features[[2]])
remove(f_features)
if (show_messages) {print("Loading trainig data set persons IDs ...")}
training_set_person <- read.table(f_subj_train, sep = "")
training_set_person <- training_set_person[[1]]
if (show_messages) {print("Loading test data set persons IDs ...")}
test_set_person <- read.table(f_subj_test, sep = "")
test_set_person <- test_set_person[[1]]
if (show_messages) {print("Merging persons IDs ...")}
merged_person <- c(training_set_person, test_set_person)
if (show_messages) {print("Loading trainig data set activities IDs ...")}
training_set_activity <- read.table(f_y_train, sep = "")
training_set_activity <- training_set_activity[[1]]
if (show_messages) {print("Loading test data set activities IDs ...")}
test_set_activity <- read.table(f_y_test, sep = "")
test_set_activity <- test_set_activity[[1]]
if (show_messages) {print("Merging activities IDs ...")}
merged_activity <- c(training_set_activity, test_set_activity)
if (show_messages) {print("Sorting persons and activities IDs ...")}
sorted_row_indexes <- order(merged_activity,merged_person)
merged_person <- merged_person[sorted_row_indexes]
merged_activity <- merged_activity[sorted_row_indexes]
if (show_messages) {print("Saving persons and activities IDs ...")}
write.table(merged_person, file = f_subj_merged, col.names = FALSE, row.names = FALSE)
write.table(merged_activity, file = f_y_merged, col.names = FALSE, row.names = FALSE)
#clearing RAM
remove(training_set_person)
remove(test_set_person)
remove(f_subj_test)
remove(f_subj_train)
remove(f_subj_merged)
remove(training_set_activity)
remove(test_set_activity)
remove(f_y_train)
remove(f_y_test)
remove(f_y_merged)
if (show_messages) {print("Merging and saving inertial signals. Please, wait ...")}
for (i in 1: length(f_inertial_merged)) {
temporary_train_table <- read.table(f_inertial_train[i], sep = "")
temporary_test_table <- read.table(f_inertial_test[i], sep = "")
temporary_merged_table <- rbind(temporary_train_table, temporary_test_table)
temporary_merged_table <- temporary_merged_table[sorted_row_indexes,]
write.table(temporary_merged_table, file = f_inertial_merged[i], col.names = FALSE, row.names = FALSE)
}
#clearing RAM
remove(i)
remove(temporary_train_table)
remove(temporary_test_table)
remove(temporary_merged_table)
remove(f_inertial_train)
remove(f_inertial_test)
remove(f_inertial_merged)
if (show_messages) {print("Loading training data set. Please, wait ...")}
training_set <- read.table(file = f_X_train, sep = "")
if (show_messages) {print("Loading test data set. Please, wait ...")}
test_set <- read.table(file = f_X_test, sep = "")
if (show_messages) {print("Merging and saving features data sets signals. Please, wait ...")}
merged_set <- rbind(training_set, test_set)
merged_set <- merged_set[sorted_row_indexes, ]
write.table(merged_set, file = f_X_merged, col.names = FALSE, row.names = FALSE)
#clearing RAM
remove(training_set)
remove(test_set)
remove(f_X_train)
remove(f_X_test)
remove(f_X_merged)
remove(sorted_row_indexes)
if (show_messages) {print("Extracting only the measurements on the mean and standard deviation for each measurement.
Naming new data set and saving it to .//merged_mean_std_data.txt")}
#finding all column indexes with pattern "mean" in the label
means_indexes <- grep(features, pattern = "mean", value = FALSE)
#finding all column indexes with pattern "std" in the label
stds_indexes <- grep(features, pattern = "std", value = FALSE)
#union columns and sort them to get the original order
needed_columns <- union(means_indexes, stds_indexes)
needed_columns <- sort(needed_columns)
#clearing RAM
remove(means_indexes)
remove(stds_indexes)
#naming the merged data set columns
names(merged_set) <- features
#replacing activity IDs with their discriptive names
merged_activity <- activity_labels[merged_activity]
#creating and naming new data frame which consists of activity and person_id
new_merged_data_set <- data.frame(merged_activity)
new_merged_data_set <- cbind(new_merged_data_set, merged_person)
#naming the new data frame columns ...
names(new_merged_data_set) <- c("activity", "person_id")
#... and binding new columns from merged_set. The result is stored in merged_set
merged_set <- cbind(new_merged_data_set, merged_set)
#saving new data set, extracting only needed columns
write.table(merged_set[c(1,2,needed_columns + 2)], "merged_mean_std_data.txt", row.names = FALSE)
#clearing RAM
remove(activity_labels)
remove(features)
remove(merged_activity)
remove(merged_person)
remove(needed_columns)
remove(new_merged_data_set)
#Now we're going to create a separate data set with averages of each variable
#for each activity and person
if (show_messages) {print("Going to create a separate data set with averages of each variable for each activity and person.
Naming new data set and saving it to .//independent_mean_data.txt")}
#aggregating data by person_id and activity and applying "mean()" function for each column
aggdata <- aggregate(merged_set[,3:dim(merged_set)[2]], by = list(merged_set$person_id, merged_set$activity), FUN = mean, na.rm = TRUE)
#swapping activities and person IDs
aggdata[c(1,2)] <- aggdata[c(2,1)]
#properly naming columns
names(aggdata)[1:2] <- c("activity", "person_id")
names(aggdata)[3:dim(aggdata)[2]] <- paste("mean(", names(aggdata)[3:dim(aggdata)[2]], ")")
#saving data
write.table(aggdata, "independent_mean_data.txt", row.names = FALSE)
#clearing RAM
remove(merged_set)
remove(aggdata)
remove(show_messages)
print("Data processing is finished.")
print("Merged data set in the 'UCI HAR Dataset/merged' directory.")
print("The data subset extrated from merged data set in the 'merged_mean_std_data.txt'")
print("The second indpendent data subset is in 'independent_mean_data.txt'")
} |
#'@templateVar aqs_endpoint rawDataNotify
#'
#'@description
#' Returns raw data that match your query criteria. This is an asynchronous
#' query that returns a Transaction ID number. The query is run on our servers
#' and when it is complete, an email will be sent you with a link to your data
#' (tied to the Transaction ID).
#'
#'@template dots
#'@template endpoint_title
#'@export
get_rawDataNotify <- function(...) {
}
| /R/get_rawDataNotify.R | no_license | FluentData/raqdm2 | R | false | false | 426 | r | #'@templateVar aqs_endpoint rawDataNotify
#'
#'@description
#' Returns raw data that match your query criteria. This is an asynchronous
#' query that returns a Transaction ID number. The query is run on our servers
#' and when it is complete, an email will be sent you with a link to your data
#' (tied to the Transaction ID).
#'
#'@template dots
#'@template endpoint_title
#'@export
get_rawDataNotify <- function(...) {
}
|
## FIXME: going crazy trying to figure out the _right_ way to store
## cookies in the package environment.
## For now, just leave them in options(); change get/setMWCookie()
## if/when I figure out how
## JUNK:
## when properly installed (not just loaded via devtools) I get
## Error in assign("cookie", cookie, pos = "package:wwiki") :
## cannot add bindings to a locked environment
## if (!exists("cookie",where="package:wwiki") ||
## is.null(cookie <- get("cookie",pos="package:wwiki")))
## .onLoad <- function(libname,pkgname) {
## message("in onLoad")
## assign("cookie",NULL,parent.frame())
## }
getMWCookie <- function() { getOption("MWcookie") }
setMWCookie <- function(cookie) options(MWcookie=cookie)
##' Log in to a MediaWiki/WorkingWiki server
##' @title Log in to a wiki
##' @param username (character) user name for wiki; if not specified, the function will try to retrieve it via \code{getOption("MWUser")}
##' @param password (character) wiki password; will be prompted for if not specified
##' @param api.url (character) URL for connecting to the wiki
##' @param api.opts options for connecting: list including elements \code{verbose} (logical), \code{useragent} (character), \code{cookie} (?)
##' @param setCookie (logical) should the function cache a cookie in the search path after successful login?
##' @param verbose (logical) verbose output?
##' @return a cookie allowing further MW actions
##' @keywords misc
##' @export
##' @importFrom rjson fromJSON
##' @importFrom RCurl postForm
loginWiki <- function(username=NULL, password=NULL, api.url, api.opts, setCookie=TRUE,
verbose=FALSE)
{
## FIXME: need a closeConnection() somewhere?
## FIXME: allow fail.option (i.e. return NULL cookie or fail here)?
if (is.null(username) && is.null(username <- getOption("MWUser"))) {
stop("Must specify a user name to log in to the wiki")
}
if (is.null(password)) password <- getPassword()
login.postparams <- list(
action = 'login',
lgname = username,
lgpassword = password,
format = 'json' )
## try to log in to the wiki. This will return a token and then I'll have
## to log in for real using the token.
login.result <- postForm( api.url,
.params = login.postparams,
.opts = api.opts,
style = 'post' )
login.data <- fromJSON(login.result)
prefix <- login.data[['login']][['cookieprefix']]
cookie <- ''
if (login.data[['login']][['result']] == 'NeedToken')
{ # add the token and a cookie to the POST fields, and then log in
token <- login.data[['login']][['token']]
login.postparams[['lgtoken']] <- token
## RCurl should handle the cookies automatically, api.php returns suitable
## Set-Cookie: headers, but I've tried to use cookiefile and cookiejar and
## had no success. I must be doing something wrong. But I get this to
## work by setting the cookies myself.
cookie <- paste0(prefix, '_session=',
login.data[['login']][['sessionid']])
api.opts[['cookie']] <- cookie
login.result <- postForm( api.url,
.params = login.postparams,
.opts = api.opts,
style = 'post' )
## should be successful this time.
login.data <- fromJSON(login.result)
if (login.data[['login']][['result']] != 'Success') {
with(login.data[['login']],
stop(paste("Error logging in to wiki:", result)))
}
}
## FIXME: repeated code, should be encapsulated
if (login.data[['login']][['result']] != 'Success') {
with(login.data[['login']],
stop(paste("Error logging in to wiki:", result)))
}
## we're supposed to use these cookies as well once we're logged in
if (cookie != '') { cookie <- paste0(cookie, ';') }
cookie <- paste0(cookie,
prefix, 'UserName=',
login.data[['login']][['lgusername']],';',
prefix, 'UserID=', login.data[['login']][['lguserid']],';',
prefix, 'Token=', login.data[['login']][['lgtoken']])
if (setCookie) {
setMWCookie(cookie)
## assignInNamespace("cookie", cookie, ns="wwiki", pos = "package:wwiki")
## assign("cookie",cookie,pos="package:wwiki")
if (verbose) cat("set cookie\n")
}
return(cookie)
}
## from Barry Rowlingson
## https://stat.ethz.ch/pipermail/r-help/2008-August/170662.html
## FIXME: Is there a good fallback for getting the password (e.g. from the console) without
## printing? Is tcltk OK with RStudio?
##' @importFrom tcltk tktoplevel tclVar tklabel tkentry tkbind tkdestroy tkbutton tkpack tkwait.window tclvalue tktitle<-
getPassword <- function() {
tt <- tktoplevel()
pass <- tclVar("")
label.widget <- tklabel(tt, text="Enter password")
tktitle(tt) <- "" ## no title really necessary
password.widget <- tkentry(tt,show="*",textvariable=pass)
## quit on Return ...
tkbind(password.widget,"<Return>",function(){tkdestroy(tt)})
## or on button-press
ok <- tkbutton(tt,text="OK",default="active",
command=function() { tkdestroy(tt) })
tkpack(label.widget, password.widget,ok)
tkwait.window(tt)
return(tclvalue(pass))
}
| /R/loginWiki.R | no_license | bbolker/wwiki | R | false | false | 5,576 | r | ## FIXME: going crazy trying to figure out the _right_ way to store
## cookies in the package environment.
## For now, just leave them in options(); change get/setMWCookie()
## if/when I figure out how
## JUNK:
## when properly installed (not just loaded via devtools) I get
## Error in assign("cookie", cookie, pos = "package:wwiki") :
## cannot add bindings to a locked environment
## if (!exists("cookie",where="package:wwiki") ||
## is.null(cookie <- get("cookie",pos="package:wwiki")))
## .onLoad <- function(libname,pkgname) {
## message("in onLoad")
## assign("cookie",NULL,parent.frame())
## }
getMWCookie <- function() { getOption("MWcookie") }
setMWCookie <- function(cookie) options(MWcookie=cookie)
##' Log in to a MediaWiki/WorkingWiki server
##' @title Log in to a wiki
##' @param username (character) user name for wiki; if not specified, the function will try to retrieve it via \code{getOption("MWUser")}
##' @param password (character) wiki password; will be prompted for if not specified
##' @param api.url (character) URL for connecting to the wiki
##' @param api.opts options for connecting: list including elements \code{verbose} (logical), \code{useragent} (character), \code{cookie} (?)
##' @param setCookie (logical) should the function cache a cookie in the search path after successful login?
##' @param verbose (logical) verbose output?
##' @return a cookie allowing further MW actions
##' @keywords misc
##' @export
##' @importFrom rjson fromJSON
##' @importFrom RCurl postForm
loginWiki <- function(username=NULL, password=NULL, api.url, api.opts, setCookie=TRUE,
verbose=FALSE)
{
## FIXME: need a closeConnection() somewhere?
## FIXME: allow fail.option (i.e. return NULL cookie or fail here)?
if (is.null(username) && is.null(username <- getOption("MWUser"))) {
stop("Must specify a user name to log in to the wiki")
}
if (is.null(password)) password <- getPassword()
login.postparams <- list(
action = 'login',
lgname = username,
lgpassword = password,
format = 'json' )
## try to log in to the wiki. This will return a token and then I'll have
## to log in for real using the token.
login.result <- postForm( api.url,
.params = login.postparams,
.opts = api.opts,
style = 'post' )
login.data <- fromJSON(login.result)
prefix <- login.data[['login']][['cookieprefix']]
cookie <- ''
if (login.data[['login']][['result']] == 'NeedToken')
{ # add the token and a cookie to the POST fields, and then log in
token <- login.data[['login']][['token']]
login.postparams[['lgtoken']] <- token
## RCurl should handle the cookies automatically, api.php returns suitable
## Set-Cookie: headers, but I've tried to use cookiefile and cookiejar and
## had no success. I must be doing something wrong. But I get this to
## work by setting the cookies myself.
cookie <- paste0(prefix, '_session=',
login.data[['login']][['sessionid']])
api.opts[['cookie']] <- cookie
login.result <- postForm( api.url,
.params = login.postparams,
.opts = api.opts,
style = 'post' )
## should be successful this time.
login.data <- fromJSON(login.result)
if (login.data[['login']][['result']] != 'Success') {
with(login.data[['login']],
stop(paste("Error logging in to wiki:", result)))
}
}
## FIXME: repeated code, should be encapsulated
if (login.data[['login']][['result']] != 'Success') {
with(login.data[['login']],
stop(paste("Error logging in to wiki:", result)))
}
## we're supposed to use these cookies as well once we're logged in
if (cookie != '') { cookie <- paste0(cookie, ';') }
cookie <- paste0(cookie,
prefix, 'UserName=',
login.data[['login']][['lgusername']],';',
prefix, 'UserID=', login.data[['login']][['lguserid']],';',
prefix, 'Token=', login.data[['login']][['lgtoken']])
if (setCookie) {
setMWCookie(cookie)
## assignInNamespace("cookie", cookie, ns="wwiki", pos = "package:wwiki")
## assign("cookie",cookie,pos="package:wwiki")
if (verbose) cat("set cookie\n")
}
return(cookie)
}
## from Barry Rowlingson
## https://stat.ethz.ch/pipermail/r-help/2008-August/170662.html
## FIXME: Is there a good fallback for getting the password (e.g. from the console) without
## printing? Is tcltk OK with RStudio?
##' @importFrom tcltk tktoplevel tclVar tklabel tkentry tkbind tkdestroy tkbutton tkpack tkwait.window tclvalue tktitle<-
getPassword <- function() {
tt <- tktoplevel()
pass <- tclVar("")
label.widget <- tklabel(tt, text="Enter password")
tktitle(tt) <- "" ## no title really necessary
password.widget <- tkentry(tt,show="*",textvariable=pass)
## quit on Return ...
tkbind(password.widget,"<Return>",function(){tkdestroy(tt)})
## or on button-press
ok <- tkbutton(tt,text="OK",default="active",
command=function() { tkdestroy(tt) })
tkpack(label.widget, password.widget,ok)
tkwait.window(tt)
return(tclvalue(pass))
}
|
setwd("C:/cloud/Dropbox/lupine")
library(dplyr)
library(tidyr)
library(testthat)
library(rstan)
library(rstanarm)
library(brms)
library(lme4)
options(stringsAsFactors = F)
source("analysis/format_data/format_scripts.R")
source("analysis/format_data/format_functions.R")
# set rstan options to parallel cores
rstan_options( auto_write = TRUE )
options( mc.cores = parallel::detectCores() )
# data
lupine_08 <- read.csv("data/lupine_05_08.csv")
lupine_18 <- read.csv("data/lupine_08_18.csv")
clim <- read.csv("data/lupine_fc_vars.csv")
enso <- read.csv("data/enso_data.csv")
lupine_df <- bind_rows(lupine_08,lupine_18)
# data format --------------------------------------------------------------
surv <- subset(lupine_df, !is.na(surv_t1) ) %>%
subset( stage_t0 != 'SL' ) %>%
subset( area_t0 != 0) %>%
mutate( log_area_t0 = log(area_t0) )
# climate format ----------------------------------------------------------------
years <- unique(surv$year)
m_obs <- 7
m_back <- 36
expp_beta <- 20 # this for the
# format climate - need to select climate predictor first
if(clim_var == 'prec'){
clim_mat <- subset(clim, clim_var == "prate") %>%
mutate( value = replace(value, value < 0, 0) ) %>%
month_clim_form("precip", years, m_back, m_obs)
} else{
clim_var_input <- clim_var
if( clim_var == 'airt') clim_input <- clim else clim_input <- enso
clim_mat <- subset(clim_input, clim_var == clim_var_input) %>%
month_clim_form(clim_var_input, years, m_back, m_obs)
}
# seedling data
surv_clim <- left_join(surv, clim_mat) %>%
subset( !is.na(location) )
# indices for STAN models
surv_df <- surv_clim %>%
mutate( year_i = year %>% as.factor %>% as.numeric,
site_i = location %>% as.factor %>% as.numeric,
avgt0 = surv_clim %>% select(V1:V12) %>% rowSums,
avgtm1 = surv_clim %>% select(V13:V24) %>% rowSums,
avgtm2 = surv_clim %>% select(V25:V36) %>% rowSums )
# climate data
clim_pred <- dplyr::select(surv_df, year) %>%
inner_join( clim_mat ) %>%
unique %>%
arrange( year ) %>%
dplyr::select(-year)
# fit GLMM models --------------------------------------------------------
mod1 <- glmer(surv_t1 ~ log_area_t0 * avgt0 + (1 | year), data= surv_df,
family = binomial )
mod2 <- glmer(surv_t1 ~ log_area_t0 + avgtm1 + (1 | year), data= surv_df,
family = binomial )
mod3 <- glmer(surv_t1 ~ log_area_t0 + avgtm2 + (1 | year), data= surv_df,
family = binomial )
# write it out
bind_rows( fixef(mod1),fixef(mod2), fixef(mod3) ) %>%
write.csv(paste0('results/lme4/','surv_',clim_var,'.csv'),row.names=F)
# mod1 <- glmer(surv_t1 ~ log_area_t0 * avgt0 + (1 | year), data= surv_df,
# family = binomial )
# mod2 <- glmer(surv_t1 ~ avgt0 + (log_area_t0 | year), data= surv_df,
# family = binomial )
# mod3 <- glmer(surv_t1 ~ avgt0 + (1 | year) + (log_area_t0|location), data= surv_df,
# family = binomial )
# mod4 <- glmer(surv_t1 ~ avgt0 + (log_area_t0 | year) + (log_area_t0|location), data= surv_df,
# family = binomial )
# AIC(mod1,mod2,mod3,mod4)
# fit stan models ----------------------------------------------------------------
# organize data into list to pass to stan
dat_stan <- list(
n = nrow(surv_df),
n_year = surv_df$year_i %>% unique %>% length,
yr_bck = m_back / 12,
n_site = surv_df$site_i %>% unique %>% length,
n_lag = ncol(clim_pred),
y = surv_df$surv_t1,
x_size = surv_df$log_area_t0,
clim = clim_pred,
clim_means = rowMeans(clim_pred),
year_i = surv_df$year_i,
site_i = surv_df$site_i,
expp_beta = expp_beta,
# climate variables
clim1 = t(clim_pred)[1:12 ,],
clim2 = t(clim_pred)[13:24,],
clim3 = t(clim_pred)[25:36,],
clim1_means = rowMeans( clim_pred[,1:12] ),
clim2_means = rowMeans( clim_pred[,13:24] ),
clim3_means = rowMeans( clim_pred[,25:36] ),
K = ncol(clim_pred) / 12,
M = 12
)
# simulation parameters
sim_pars <- list(
warmup = 1000,
iter = 4000,
thin = 2,
chains = 4
)
# # Average of previous 3 years
# fit_avg <- stan(
# file = paste0("analysis/stan/surv/bernoulli_avg.stan"),
# data = dat_stan,
# pars = c('b0', 'b_size', 'b_c'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # store results
# out_summ <- summary(fit_avg)$summary
# out_post <- extract(fit_avg) %>% as.data.frame
#
# write.csv(out_summ, paste0('results/surv_a_summ_',clim_var,'.csv'))
# write.csv(out_post, paste0("results/surv_a_post_",clim_var,".csv"),row.names=F)
#
#
# # Multiple years, weighted with simplex
# fit_3yr <- stan(
# file = paste0("analysis/stan/surv/bernoulli_mYears_simplex.stan"),
# data = dat_stan,
# pars = c('theta_k', 'b0', 'b_size', 'b_c'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # store results
# out_summ <- summary(fit_3yr)$summary
# out_post <- extract(fit_3yr) %>% as.data.frame
#
# write.csv(out_summ, paste0("results/surv_sam_3y_summ_",clim_var,".csv"))
# write.csv(out_post, paste0("results/surv_sam_3y_post_",clim_var,".csv"),row.names=F)
# Average of previous 3 years
fit_avg_re <- stan(
file = paste0("analysis/stan/surv/bernoulli_avg_re.stan"),
data = dat_stan,
pars = c('b0', 's_yr', 'b_yr', 'b_size', 'b_c'),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains#,
#control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
)
# store results
out_summ <- summary(fit_avg_re)$summary
out_post <- extract(fit_avg_re) %>% as.data.frame
write.csv(out_summ, paste0('results/surv_a_summ_re_',clim_var,'.csv'))
write.csv(out_post, paste0("results/surv_a_post_re_",clim_var,".csv"),row.names=F)
# Multiple years, weighted with simplex
fit_3yr_re <- stan(
file = paste0("analysis/stan/surv/bernoulli_mYears_simplex_re.stan"),
data = dat_stan,
pars = c('theta_k', 'b0', 's_yr', 'b_yr', 'b_size', 'b_c'),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains#,
#control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
)
# store results
out_summ <- summary(fit_3yr_re)$summary
out_post <- extract(fit_3yr_re) %>% as.data.frame
write.csv(out_summ, paste0("results/surv_sam_3y_summ_re_",clim_var,".csv"))
write.csv(out_post, paste0("results/surv_sam_3y_post_re_",clim_var,".csv"),row.names=F)
# # 12 month dirichlet
# fit_12_nest <- stan(
# file = paste0("analysis/stan/surv/bernoulli_dirichlet_nest_12.stan"),
# data = dat_stan,
# pars = c('theta_m', 'b0', 'b_size', 'b_c'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.8, stepsize = 0.001, max_treedepth = 10)
# )
#
# # store results
# out_summ <- summary(fit_12_nest)$summary
# out_post <- extract(fit_12_nest) %>% as.data.frame
#
# write.csv(out_summ, paste0("results/surv_sam_12m_summ_",clim_var,".csv"))
# write.csv(out_post, paste0("results/surv_sam_12m_post_",clim_var,".csv"),row.names=F)
# fit1_m <- brm(area_t1 ~ area_t0, data = surv_df )
# fit1 <- brm(log_area_t1 ~ log_area_t0 + (1|year),
# data = surv_df %>% mutate(year=as.factor(year)) )
# fit_f <- brm(surv_t1 ~ year,
# data = mutate(sl_df, year = as.factor(year) ),
# family = bernoulli(link = "logit") )
#
#
# # null model
# fit_mean <- stan(
# file = paste0("analysis/stan/surv/bernoulli_null.stan"),
# data = dat_stan,
# pars = c('b0', 'b_size'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
#
# # random year effect only
# fit_year <- stan(
# file = paste0("analysis/stan/surv/bernoulli_ran_yr.stan"),
# data = dat_stan,
# pars = c('b0', 's_yr', 'b_yr', 'b_size'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # update data list
# dat_stan$clim1 <- t(clim_pred)[1:12 ,]
# dat_stan$clim2 <- t(clim_pred)[13:24,]
# dat_stan$clim3 <- t(clim_pred)[25:36,]
# dat_stan$K <- ncol(dat_stan$clim) / 12
# dat_stan$M <- 12
#
# # power exponential moving window
# fit_36_nest <- stan(
# file = paste0("analysis/stan/surv/bernoulli_dirichlet_nest.stan"),
# data = dat_stan,
# pars = c('theta_y', 'theta_m', 'b0', 'b_size', 'b_c'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
#
# # mutiple years, different betas
# dat_stan$clim1_means <- rowMeans( clim_pred[,1:12] )
# dat_stan$clim2_means <- rowMeans( clim_pred[,13:24] )
# dat_stan$clim3_means <- rowMeans( clim_pred[,25:36] )
#
# # Multiple years, weighted with simplex
# fit_year_simpl <- stan(
# file = paste0("analysis/stan/grow/gaussian_mYears_simplex.stan"),
# data = dat_stan,
# pars = c('theta_k', 'b0', 'b_size', 'b_c', 's'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # Multiple years, weighted with simplex
# fit_year_simpl_re <- stan(
# file = paste0("analysis/stan/grow/gaussian_mYears_simplex_re.stan"),
# data = dat_stan,
# pars = c('theta_k', 'b0', 's_yr', 'b_yr', 'b_size', 's', 'b_c'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
#
# # update data list
# dat_stan$K <- 2
# dat_stan$M <- 12
#
# # power exponential moving window
# fit_24_nest <- stan(
# file = paste0("analysis/stan/bernoulli_dirichlet_nest_24.stan"),
# data = dat_stan,
# pars = c('theta_y', 'theta_m', 'b0', 'b_c'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
#
# # average climate
# dat_stan$clim_means <- rowMeans( clim_pred )
# fit_avg <- stan(
# file = paste0("analysis/bernoulli_avg_site.stan"),
# data = dat_stan,
# pars = c('beta', 'beta_site'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
#
# # Multiple years, weighted with simplex
# fit_mYear_simpl <- stan(
# file = paste0("analysis/bernoulli_mYears_simplex_site.stan"),
# data = dat_stan,
# pars = c('theta', 'beta', 'beta_site'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
#
# # average climate of first year
# dat_stan$clim_means <- rowMeans( clim_pred[,1:12] )
# fit_avg_yr1 <- stan(
# file = paste0("analysis/stan/bernoulli_avg_site.stan"),
# data = dat_stan,
# pars = c('beta', 'beta_site', 'log_lik'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # average climate of first year
# dat_stan$clim_means <- rowMeans( clim_pred[,13:24] )
# fit_avg_yr2 <- stan(
# file = paste0("analysis/stan/bernoulli_avg_site.stan"),
# data = dat_stan,
# pars = c('beta', 'beta_site', 'log_lik'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # average climate of second year
# dat_stan$clim_means <- rowMeans( clim_pred[,25:36] )
# fit_avg_yr3 <- stan(
# file = paste0("analysis/stan/bernoulli_avg_site.stan"),
# data = dat_stan,
# pars = c('beta', 'beta_site', 'log_lik'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
#
#
#
# # Multiple years, weighted with simplex
# fit_mYear_simpl <- stan(
# file = paste0("analysis/bernoulli_mYears_simplex_site.stan"),
# data = dat_stan,
# pars = c('theta', 'beta', 'beta_site'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # power exponential moving window
# fit_gev <- stan(
# file = paste0("analysis/bernoulli_gev_site.stan"),
# data = dat_stan,
# pars = c('loc', 'scale', 'shape', 'beta_site', 'beta'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
#
# dat_stan$clim <- t(clim_pred)
# # power exponential moving window
# fit_24 <- stan(
# file = paste0("analysis/bernoulli_dirichlet_site.stan"),
# data = dat_stan,
# pars = c('theta', 'beta', 'beta_site'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # power exponential moving window
# dat_stan$clim <- clim_pred
# fit_expp <- stan(
# file = paste0("analysis/bernoulli_expp.stan"),
# data = dat_stan,
# pars = c('sens_mu', 'sens_sd', 'beta_site', 'beta'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # gaussian moving window
# fit_gaus <- stan(
# file = paste0("analysis/bernoulli_gaus.stan"),
# data = dat_stan,
# pars = c('sens_mu', 'sens_sd', 'alpha', 'beta'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # # gaussian moving window
# # fit_null <- stan(
# # file = paste0("analysis/bernoulli_null.stan"),
# # data = dat_stan,
# # pars = c('alpha'),
# # warmup = sim_pars$warmup,
# # iter = sim_pars$iter,
# # thin = sim_pars$thin,
# # chains = sim_pars$chains#,
# # #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# # )
#
#
# # plot it out -------------------------------------------------------------------------------------
# theta <- extract(fit_24)[['theta']] %>%
# as.data.frame %>%
# stack %>%
# mutate( ind = as.character(ind) ) %>%
# mutate( ind = gsub("V","", ind) ) %>%
# mutate( ind = as.numeric(ind) )
# theta_mean <- summary(fit_24)$summary[,'mean'][paste0('theta[',1:24,']')]
# beta0_mean <- summary(fit_24)$summary[,'mean'][paste0('beta_site[',1:7,']')] %>% mean
#
#
# xs <- data.frame( year = c(2008:2016),
# xs = rowSums( sweep(clim_pred, 2, theta_mean, '*') ) )
#
# ys <- seedl %>%
# group_by(year, location) %>%
# summarise( surv_sum = sum(surv_t1),
# rep = n() ) %>%
# mutate( prop_surv = surv_sum / rep )
# xs <- data.frame( year = c(2008:2016), x1 = rowMeans(clim_pred[,1:12]),
# x2 = rowMeans(clim_pred[,13:24]) )
# plot_d <- full_join(ys, xs)
# x_seq <- seq(min(plot_d$xs), max(plot_d$xs), length.out=100 )
# pred <- boot::inv.logit( beta0_mean + x_seq * beta0_mean )
#
#
#
# tiff("results/Seedling_survival_prec_dirichlet.tiff", unit="in", width=4.5, height=6.3,
# res=600,compression="lzw")
#
# par(mfrow = c(2,1), mar = c(3,3,0.1,0.1), mgp = c(1.6,0.7,0) )
# boxplot(values ~ ind, data = theta, outline = F,
# ylab = "Month weights", xlab = "Month before demographic observation",
# cex.lab = 1)
# abline(v = 12.5, lty = 2)
# plot(prop_surv ~ xs, data = plot_d, col = as.factor(ys$location), pch = 16,
# ylab = "Average seedling survival (year t)",
# xlab = "Average climate predictor", cex.lab = 1)
# lines(x_seq, pred, lwd = 2)
#
# dev.off()
#
#
#
#
# x_seq1 <- seq(min(plot_d$x1), max(plot_d$x1), length.out=100)
# x_seq2 <- seq(min(plot_d$x2), max(plot_d$x2), length.out=100)
# beta0 <- summary(fit_mYear_beta)$summary[,'mean'][paste0('beta_site[',1:7,']')] %>% mean
# beta1 <- summary(fit_mYear_beta)$summary[,'mean']['beta[1]']
# beta2 <- summary(fit_mYear_beta)$summary[,'mean']['beta[2]']
# pred1 <- boot::inv.logit(beta0 + x_seq1 * beta1)
# pred2 <- boot::inv.logit(beta0 + x_seq2 * beta2)
#
# # get posterior
# fit_extract <- extract(fit_mYear_beta)
# beta1_p <- fit_extract$beta[,1]
# beta2_p <- fit_extract$beta[,2]
# beta0_p <- rowMeans(fit_extract$beta_site)
# pred1_p <- lapply(1:6000, function(ii) boot::inv.logit(beta0_p[ii] + x_seq1 * beta1_p[ii])) %>%
# rbind_l
# pred2_p <- lapply(1:6000, function(ii) boot::inv.logit(beta0_p[ii] + x_seq2 * beta2_p[ii])) %>%
# rbind_l
#
# plot_post <- function(ii, x_vals, pred_vec){
# lines(x_vals, pred_vec[ii,], col="grey")
# }
#
#
# tiff("results/Seedling_survival_precipitation.tiff", unit="in", width=4.5, height=6.3,
# res=600,compression="lzw")
#
# par(mfrow = c(2,1), mar = c(3,3,0.1,5), mgp = c(1.7,0.7,0) )
# plot(prop_surv ~ x1, data=plot_d, ylim = c(0,1), pch = 16, col = as.factor(ys$location),
# ylab = "Average seedling survival (year t)", xlab = "Precipitation anomaly (year t)", type = "n")
# lapply(1:6000, plot_post, x_seq1, pred1_p)
# points(prop_surv ~ x1, data=plot_d, ylim = c(0,1), pch = 16, col = as.factor(ys$location),
# ylab = "Average seedling survival (year t)", xlab = "Precipitation anomaly (year t)")
# lines( x_seq1, pred1, lwd=2 )
#
#
# legend(0.75,1, unique(ys$location), pch =16, col = unique(as.factor(ys$location)), bty = 'n',
# xpd=T)
# plot(prop_surv ~ x2, data=plot_d, ylim = c(0,1), pch = 16, col = as.factor(ys$location),
# ylab = "Average seedling survival (year t)", xlab = "Precipitation anomaly (year t-1)", type = "n")
# lapply(1:6000, plot_post, x_seq2, pred2_p)
# points(prop_surv ~ x2, data=plot_d, ylim = c(0,1), pch = 16, col = as.factor(ys$location),
# ylab = "Average seedling survival (year t)", xlab = "Precipitation anomaly (year t-1)")
# lines( x_seq2, pred2, lwd=2 )
# legend(-0.2,0.2, c("posterior", "mean"), lwd = c(1,2), col = c("grey","black"), bty = 'n',
# xpd=T)
#
# dev.off()
#
#
#
# # parameter values and diagnostics ----------------------------------------------------------------
#
# # list of model fits
# mod_fit <- list(fit_1 = fit_site,
# fit_2 = fit_mYear_beta,
# fit_2 = fit_avg_yr1,
# fit_4 = fit_avg_yr2)
#
# # parameter values
# pars <- c('beta', 'beta_site', 'log_lik')
#
# # get central tendencies
# pars_diag_extract <- function(x){
#
# # central tendencies
# tmp <- rstan::extract(x)
# par_means <- sapply(tmp, function(x) mean(x)) %>%
# setNames( paste0(names(tmp),"_mean") )
# par_medians <- sapply(tmp, function(x) median(x)) %>%
# setNames( paste0(names(tmp),"_median") )
# central_tend<- c(par_means, par_medians)
#
# # diagnostics
# diverg <- do.call(rbind, args = get_sampler_params(x, inc_warmup = F))[,5]
# n_diverg <- length(which(diverg == 1))
# df_summ <- as.data.frame(summary(x)$summary)
# rhat_high <- length(which(df_summ$Rhat > 1.1))
# n_eff <- df_summ$n_eff / length(diverg)
# n_eff_low <- length(which(n_eff < 0.1))
# mcse_high <- length(which(df_summ$se_mean / df_summ$sd > 0.1))
# diagnostics <- c(n_diverg = n_diverg, rhat_high = rhat_high,
# n_eff_low = n_eff_low, mcse_high = mcse_high)
# out <- c( central_tend, diagnostics ) %>% t %>% as.data.frame
#
# rm(tmp) ; return(out)
#
# }
#
# # store posteriors
# posterior_extract <- function(model_fit, model_name){
#
# # central tendencies
# tmp <- rstan::extract(model_fit)
# post_df <- do.call(cbind, tmp) %>% as.data.frame
# ll_id <- grep("V", colnames(post_df) )
# new_names <- paste0("log_lik_", 1:length(ll_id) )
# names(post_df)[ll_id] <- new_names # no way to do this in dplyr
# post_df <- tibble::add_column(post_df,
# model = model_name, .before=1)
#
# rm(tmp) ; return(post_df)
#
# }
#
# # calculate central tendencies
# pars_diag_l <- lapply(mod_fit, pars_diag_extract)
# mod_pars_diag <- Reduce(function(...) bind_rows(...), pars_diag_l) %>%
# tibble::add_column(model = names(mod_fit), .before = 1)
#
# # store posteriors
# posts_l <- Map(posterior_extract, mod_fit, names(mod_fit) )
# posteriors <- Reduce(function(...) bind_rows(...), posts_l)
#
#
# # WAIC model comparison --------------------------------------------------------------------
#
# # wAIC model selection using loo approximation (from library 'loo')
# log_liks <- lapply(mod_fit, extract_log_lik)
#
# # leave-one-out estimates
# loo_l <- lapply(log_liks, loo) %>%
# setNames( c('loo_fit1', 'loo_fit2', 'loo_fit3', 'loo_fit4') )
# loo_df <- loo::compare(loo_l$loo_fit1, loo_l$loo_fit2, loo_l$loo_fit3, loo_l$loo_fit4) %>%
# as.data.frame %>%
# tibble::add_column(model = gsub("loo_","",names(loo_l) ), .before = 1)
#
# # WAIC estimates
# waic_l <- lapply(log_liks, waic) %>%
# setNames( c('waic_fit1', 'waic_fit2', 'waic_fit3', 'waic_fit4') )
# waic_df <- loo::compare(waic_l$waic_fit1, waic_l$waic_fit2, waic_l$waic_fit3, waic_l$waic_fit4) %>%
# as.data.frame %>%
# tibble::add_column(model = gsub("waic_","",names(waic_l) ), .before = 1)
#
| /analysis/vital_rates/surv.R | no_license | AldoCompagnoni/lupine | R | false | false | 23,007 | r | setwd("C:/cloud/Dropbox/lupine")
library(dplyr)
library(tidyr)
library(testthat)
library(rstan)
library(rstanarm)
library(brms)
library(lme4)
options(stringsAsFactors = F)
source("analysis/format_data/format_scripts.R")
source("analysis/format_data/format_functions.R")
# set rstan options to parallel cores
rstan_options( auto_write = TRUE )
options( mc.cores = parallel::detectCores() )
# data
lupine_08 <- read.csv("data/lupine_05_08.csv")
lupine_18 <- read.csv("data/lupine_08_18.csv")
clim <- read.csv("data/lupine_fc_vars.csv")
enso <- read.csv("data/enso_data.csv")
lupine_df <- bind_rows(lupine_08,lupine_18)
# data format --------------------------------------------------------------
surv <- subset(lupine_df, !is.na(surv_t1) ) %>%
subset( stage_t0 != 'SL' ) %>%
subset( area_t0 != 0) %>%
mutate( log_area_t0 = log(area_t0) )
# climate format ----------------------------------------------------------------
years <- unique(surv$year)
m_obs <- 7
m_back <- 36
expp_beta <- 20 # this for the
# format climate - need to select climate predictor first
if(clim_var == 'prec'){
clim_mat <- subset(clim, clim_var == "prate") %>%
mutate( value = replace(value, value < 0, 0) ) %>%
month_clim_form("precip", years, m_back, m_obs)
} else{
clim_var_input <- clim_var
if( clim_var == 'airt') clim_input <- clim else clim_input <- enso
clim_mat <- subset(clim_input, clim_var == clim_var_input) %>%
month_clim_form(clim_var_input, years, m_back, m_obs)
}
# seedling data
surv_clim <- left_join(surv, clim_mat) %>%
subset( !is.na(location) )
# indices for STAN models
surv_df <- surv_clim %>%
mutate( year_i = year %>% as.factor %>% as.numeric,
site_i = location %>% as.factor %>% as.numeric,
avgt0 = surv_clim %>% select(V1:V12) %>% rowSums,
avgtm1 = surv_clim %>% select(V13:V24) %>% rowSums,
avgtm2 = surv_clim %>% select(V25:V36) %>% rowSums )
# climate data
clim_pred <- dplyr::select(surv_df, year) %>%
inner_join( clim_mat ) %>%
unique %>%
arrange( year ) %>%
dplyr::select(-year)
# fit GLMM models --------------------------------------------------------
mod1 <- glmer(surv_t1 ~ log_area_t0 * avgt0 + (1 | year), data= surv_df,
family = binomial )
mod2 <- glmer(surv_t1 ~ log_area_t0 + avgtm1 + (1 | year), data= surv_df,
family = binomial )
mod3 <- glmer(surv_t1 ~ log_area_t0 + avgtm2 + (1 | year), data= surv_df,
family = binomial )
# write it out
bind_rows( fixef(mod1),fixef(mod2), fixef(mod3) ) %>%
write.csv(paste0('results/lme4/','surv_',clim_var,'.csv'),row.names=F)
# mod1 <- glmer(surv_t1 ~ log_area_t0 * avgt0 + (1 | year), data= surv_df,
# family = binomial )
# mod2 <- glmer(surv_t1 ~ avgt0 + (log_area_t0 | year), data= surv_df,
# family = binomial )
# mod3 <- glmer(surv_t1 ~ avgt0 + (1 | year) + (log_area_t0|location), data= surv_df,
# family = binomial )
# mod4 <- glmer(surv_t1 ~ avgt0 + (log_area_t0 | year) + (log_area_t0|location), data= surv_df,
# family = binomial )
# AIC(mod1,mod2,mod3,mod4)
# fit stan models ----------------------------------------------------------------
# organize data into list to pass to stan
dat_stan <- list(
n = nrow(surv_df),
n_year = surv_df$year_i %>% unique %>% length,
yr_bck = m_back / 12,
n_site = surv_df$site_i %>% unique %>% length,
n_lag = ncol(clim_pred),
y = surv_df$surv_t1,
x_size = surv_df$log_area_t0,
clim = clim_pred,
clim_means = rowMeans(clim_pred),
year_i = surv_df$year_i,
site_i = surv_df$site_i,
expp_beta = expp_beta,
# climate variables
clim1 = t(clim_pred)[1:12 ,],
clim2 = t(clim_pred)[13:24,],
clim3 = t(clim_pred)[25:36,],
clim1_means = rowMeans( clim_pred[,1:12] ),
clim2_means = rowMeans( clim_pred[,13:24] ),
clim3_means = rowMeans( clim_pred[,25:36] ),
K = ncol(clim_pred) / 12,
M = 12
)
# simulation parameters
sim_pars <- list(
warmup = 1000,
iter = 4000,
thin = 2,
chains = 4
)
# # Average of previous 3 years
# fit_avg <- stan(
# file = paste0("analysis/stan/surv/bernoulli_avg.stan"),
# data = dat_stan,
# pars = c('b0', 'b_size', 'b_c'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # store results
# out_summ <- summary(fit_avg)$summary
# out_post <- extract(fit_avg) %>% as.data.frame
#
# write.csv(out_summ, paste0('results/surv_a_summ_',clim_var,'.csv'))
# write.csv(out_post, paste0("results/surv_a_post_",clim_var,".csv"),row.names=F)
#
#
# # Multiple years, weighted with simplex
# fit_3yr <- stan(
# file = paste0("analysis/stan/surv/bernoulli_mYears_simplex.stan"),
# data = dat_stan,
# pars = c('theta_k', 'b0', 'b_size', 'b_c'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # store results
# out_summ <- summary(fit_3yr)$summary
# out_post <- extract(fit_3yr) %>% as.data.frame
#
# write.csv(out_summ, paste0("results/surv_sam_3y_summ_",clim_var,".csv"))
# write.csv(out_post, paste0("results/surv_sam_3y_post_",clim_var,".csv"),row.names=F)
# Average of previous 3 years
fit_avg_re <- stan(
file = paste0("analysis/stan/surv/bernoulli_avg_re.stan"),
data = dat_stan,
pars = c('b0', 's_yr', 'b_yr', 'b_size', 'b_c'),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains#,
#control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
)
# store results
out_summ <- summary(fit_avg_re)$summary
out_post <- extract(fit_avg_re) %>% as.data.frame
write.csv(out_summ, paste0('results/surv_a_summ_re_',clim_var,'.csv'))
write.csv(out_post, paste0("results/surv_a_post_re_",clim_var,".csv"),row.names=F)
# Multiple years, weighted with simplex
fit_3yr_re <- stan(
file = paste0("analysis/stan/surv/bernoulli_mYears_simplex_re.stan"),
data = dat_stan,
pars = c('theta_k', 'b0', 's_yr', 'b_yr', 'b_size', 'b_c'),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains#,
#control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
)
# store results
out_summ <- summary(fit_3yr_re)$summary
out_post <- extract(fit_3yr_re) %>% as.data.frame
write.csv(out_summ, paste0("results/surv_sam_3y_summ_re_",clim_var,".csv"))
write.csv(out_post, paste0("results/surv_sam_3y_post_re_",clim_var,".csv"),row.names=F)
# # 12 month dirichlet
# fit_12_nest <- stan(
# file = paste0("analysis/stan/surv/bernoulli_dirichlet_nest_12.stan"),
# data = dat_stan,
# pars = c('theta_m', 'b0', 'b_size', 'b_c'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.8, stepsize = 0.001, max_treedepth = 10)
# )
#
# # store results
# out_summ <- summary(fit_12_nest)$summary
# out_post <- extract(fit_12_nest) %>% as.data.frame
#
# write.csv(out_summ, paste0("results/surv_sam_12m_summ_",clim_var,".csv"))
# write.csv(out_post, paste0("results/surv_sam_12m_post_",clim_var,".csv"),row.names=F)
# fit1_m <- brm(area_t1 ~ area_t0, data = surv_df )
# fit1 <- brm(log_area_t1 ~ log_area_t0 + (1|year),
# data = surv_df %>% mutate(year=as.factor(year)) )
# fit_f <- brm(surv_t1 ~ year,
# data = mutate(sl_df, year = as.factor(year) ),
# family = bernoulli(link = "logit") )
#
#
# # null model
# fit_mean <- stan(
# file = paste0("analysis/stan/surv/bernoulli_null.stan"),
# data = dat_stan,
# pars = c('b0', 'b_size'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
#
# # random year effect only
# fit_year <- stan(
# file = paste0("analysis/stan/surv/bernoulli_ran_yr.stan"),
# data = dat_stan,
# pars = c('b0', 's_yr', 'b_yr', 'b_size'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # update data list
# dat_stan$clim1 <- t(clim_pred)[1:12 ,]
# dat_stan$clim2 <- t(clim_pred)[13:24,]
# dat_stan$clim3 <- t(clim_pred)[25:36,]
# dat_stan$K <- ncol(dat_stan$clim) / 12
# dat_stan$M <- 12
#
# # power exponential moving window
# fit_36_nest <- stan(
# file = paste0("analysis/stan/surv/bernoulli_dirichlet_nest.stan"),
# data = dat_stan,
# pars = c('theta_y', 'theta_m', 'b0', 'b_size', 'b_c'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
#
# # mutiple years, different betas
# dat_stan$clim1_means <- rowMeans( clim_pred[,1:12] )
# dat_stan$clim2_means <- rowMeans( clim_pred[,13:24] )
# dat_stan$clim3_means <- rowMeans( clim_pred[,25:36] )
#
# # Multiple years, weighted with simplex
# fit_year_simpl <- stan(
# file = paste0("analysis/stan/grow/gaussian_mYears_simplex.stan"),
# data = dat_stan,
# pars = c('theta_k', 'b0', 'b_size', 'b_c', 's'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # Multiple years, weighted with simplex
# fit_year_simpl_re <- stan(
# file = paste0("analysis/stan/grow/gaussian_mYears_simplex_re.stan"),
# data = dat_stan,
# pars = c('theta_k', 'b0', 's_yr', 'b_yr', 'b_size', 's', 'b_c'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
#
# # update data list
# dat_stan$K <- 2
# dat_stan$M <- 12
#
# # power exponential moving window
# fit_24_nest <- stan(
# file = paste0("analysis/stan/bernoulli_dirichlet_nest_24.stan"),
# data = dat_stan,
# pars = c('theta_y', 'theta_m', 'b0', 'b_c'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
#
# # average climate
# dat_stan$clim_means <- rowMeans( clim_pred )
# fit_avg <- stan(
# file = paste0("analysis/bernoulli_avg_site.stan"),
# data = dat_stan,
# pars = c('beta', 'beta_site'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
#
# # Multiple years, weighted with simplex
# fit_mYear_simpl <- stan(
# file = paste0("analysis/bernoulli_mYears_simplex_site.stan"),
# data = dat_stan,
# pars = c('theta', 'beta', 'beta_site'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
#
# # average climate of first year
# dat_stan$clim_means <- rowMeans( clim_pred[,1:12] )
# fit_avg_yr1 <- stan(
# file = paste0("analysis/stan/bernoulli_avg_site.stan"),
# data = dat_stan,
# pars = c('beta', 'beta_site', 'log_lik'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # average climate of first year
# dat_stan$clim_means <- rowMeans( clim_pred[,13:24] )
# fit_avg_yr2 <- stan(
# file = paste0("analysis/stan/bernoulli_avg_site.stan"),
# data = dat_stan,
# pars = c('beta', 'beta_site', 'log_lik'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # average climate of second year
# dat_stan$clim_means <- rowMeans( clim_pred[,25:36] )
# fit_avg_yr3 <- stan(
# file = paste0("analysis/stan/bernoulli_avg_site.stan"),
# data = dat_stan,
# pars = c('beta', 'beta_site', 'log_lik'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
#
#
#
# # Multiple years, weighted with simplex
# fit_mYear_simpl <- stan(
# file = paste0("analysis/bernoulli_mYears_simplex_site.stan"),
# data = dat_stan,
# pars = c('theta', 'beta', 'beta_site'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # power exponential moving window
# fit_gev <- stan(
# file = paste0("analysis/bernoulli_gev_site.stan"),
# data = dat_stan,
# pars = c('loc', 'scale', 'shape', 'beta_site', 'beta'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
#
# dat_stan$clim <- t(clim_pred)
# # power exponential moving window
# fit_24 <- stan(
# file = paste0("analysis/bernoulli_dirichlet_site.stan"),
# data = dat_stan,
# pars = c('theta', 'beta', 'beta_site'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # power exponential moving window
# dat_stan$clim <- clim_pred
# fit_expp <- stan(
# file = paste0("analysis/bernoulli_expp.stan"),
# data = dat_stan,
# pars = c('sens_mu', 'sens_sd', 'beta_site', 'beta'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # gaussian moving window
# fit_gaus <- stan(
# file = paste0("analysis/bernoulli_gaus.stan"),
# data = dat_stan,
# pars = c('sens_mu', 'sens_sd', 'alpha', 'beta'),
# warmup = sim_pars$warmup,
# iter = sim_pars$iter,
# thin = sim_pars$thin,
# chains = sim_pars$chains#,
# #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# )
#
# # # gaussian moving window
# # fit_null <- stan(
# # file = paste0("analysis/bernoulli_null.stan"),
# # data = dat_stan,
# # pars = c('alpha'),
# # warmup = sim_pars$warmup,
# # iter = sim_pars$iter,
# # thin = sim_pars$thin,
# # chains = sim_pars$chains#,
# # #control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
# # )
#
#
# # plot it out -------------------------------------------------------------------------------------
# theta <- extract(fit_24)[['theta']] %>%
# as.data.frame %>%
# stack %>%
# mutate( ind = as.character(ind) ) %>%
# mutate( ind = gsub("V","", ind) ) %>%
# mutate( ind = as.numeric(ind) )
# theta_mean <- summary(fit_24)$summary[,'mean'][paste0('theta[',1:24,']')]
# beta0_mean <- summary(fit_24)$summary[,'mean'][paste0('beta_site[',1:7,']')] %>% mean
#
#
# xs <- data.frame( year = c(2008:2016),
# xs = rowSums( sweep(clim_pred, 2, theta_mean, '*') ) )
#
# ys <- seedl %>%
# group_by(year, location) %>%
# summarise( surv_sum = sum(surv_t1),
# rep = n() ) %>%
# mutate( prop_surv = surv_sum / rep )
# xs <- data.frame( year = c(2008:2016), x1 = rowMeans(clim_pred[,1:12]),
# x2 = rowMeans(clim_pred[,13:24]) )
# plot_d <- full_join(ys, xs)
# x_seq <- seq(min(plot_d$xs), max(plot_d$xs), length.out=100 )
# pred <- boot::inv.logit( beta0_mean + x_seq * beta0_mean )
#
#
#
# tiff("results/Seedling_survival_prec_dirichlet.tiff", unit="in", width=4.5, height=6.3,
# res=600,compression="lzw")
#
# par(mfrow = c(2,1), mar = c(3,3,0.1,0.1), mgp = c(1.6,0.7,0) )
# boxplot(values ~ ind, data = theta, outline = F,
# ylab = "Month weights", xlab = "Month before demographic observation",
# cex.lab = 1)
# abline(v = 12.5, lty = 2)
# plot(prop_surv ~ xs, data = plot_d, col = as.factor(ys$location), pch = 16,
# ylab = "Average seedling survival (year t)",
# xlab = "Average climate predictor", cex.lab = 1)
# lines(x_seq, pred, lwd = 2)
#
# dev.off()
#
#
#
#
# x_seq1 <- seq(min(plot_d$x1), max(plot_d$x1), length.out=100)
# x_seq2 <- seq(min(plot_d$x2), max(plot_d$x2), length.out=100)
# beta0 <- summary(fit_mYear_beta)$summary[,'mean'][paste0('beta_site[',1:7,']')] %>% mean
# beta1 <- summary(fit_mYear_beta)$summary[,'mean']['beta[1]']
# beta2 <- summary(fit_mYear_beta)$summary[,'mean']['beta[2]']
# pred1 <- boot::inv.logit(beta0 + x_seq1 * beta1)
# pred2 <- boot::inv.logit(beta0 + x_seq2 * beta2)
#
# # get posterior
# fit_extract <- extract(fit_mYear_beta)
# beta1_p <- fit_extract$beta[,1]
# beta2_p <- fit_extract$beta[,2]
# beta0_p <- rowMeans(fit_extract$beta_site)
# pred1_p <- lapply(1:6000, function(ii) boot::inv.logit(beta0_p[ii] + x_seq1 * beta1_p[ii])) %>%
# rbind_l
# pred2_p <- lapply(1:6000, function(ii) boot::inv.logit(beta0_p[ii] + x_seq2 * beta2_p[ii])) %>%
# rbind_l
#
# plot_post <- function(ii, x_vals, pred_vec){
# lines(x_vals, pred_vec[ii,], col="grey")
# }
#
#
# tiff("results/Seedling_survival_precipitation.tiff", unit="in", width=4.5, height=6.3,
# res=600,compression="lzw")
#
# par(mfrow = c(2,1), mar = c(3,3,0.1,5), mgp = c(1.7,0.7,0) )
# plot(prop_surv ~ x1, data=plot_d, ylim = c(0,1), pch = 16, col = as.factor(ys$location),
# ylab = "Average seedling survival (year t)", xlab = "Precipitation anomaly (year t)", type = "n")
# lapply(1:6000, plot_post, x_seq1, pred1_p)
# points(prop_surv ~ x1, data=plot_d, ylim = c(0,1), pch = 16, col = as.factor(ys$location),
# ylab = "Average seedling survival (year t)", xlab = "Precipitation anomaly (year t)")
# lines( x_seq1, pred1, lwd=2 )
#
#
# legend(0.75,1, unique(ys$location), pch =16, col = unique(as.factor(ys$location)), bty = 'n',
# xpd=T)
# plot(prop_surv ~ x2, data=plot_d, ylim = c(0,1), pch = 16, col = as.factor(ys$location),
# ylab = "Average seedling survival (year t)", xlab = "Precipitation anomaly (year t-1)", type = "n")
# lapply(1:6000, plot_post, x_seq2, pred2_p)
# points(prop_surv ~ x2, data=plot_d, ylim = c(0,1), pch = 16, col = as.factor(ys$location),
# ylab = "Average seedling survival (year t)", xlab = "Precipitation anomaly (year t-1)")
# lines( x_seq2, pred2, lwd=2 )
# legend(-0.2,0.2, c("posterior", "mean"), lwd = c(1,2), col = c("grey","black"), bty = 'n',
# xpd=T)
#
# dev.off()
#
#
#
# # parameter values and diagnostics ----------------------------------------------------------------
#
# # list of model fits
# mod_fit <- list(fit_1 = fit_site,
# fit_2 = fit_mYear_beta,
# fit_2 = fit_avg_yr1,
# fit_4 = fit_avg_yr2)
#
# # parameter values
# pars <- c('beta', 'beta_site', 'log_lik')
#
# # get central tendencies
# pars_diag_extract <- function(x){
#
# # central tendencies
# tmp <- rstan::extract(x)
# par_means <- sapply(tmp, function(x) mean(x)) %>%
# setNames( paste0(names(tmp),"_mean") )
# par_medians <- sapply(tmp, function(x) median(x)) %>%
# setNames( paste0(names(tmp),"_median") )
# central_tend<- c(par_means, par_medians)
#
# # diagnostics
# diverg <- do.call(rbind, args = get_sampler_params(x, inc_warmup = F))[,5]
# n_diverg <- length(which(diverg == 1))
# df_summ <- as.data.frame(summary(x)$summary)
# rhat_high <- length(which(df_summ$Rhat > 1.1))
# n_eff <- df_summ$n_eff / length(diverg)
# n_eff_low <- length(which(n_eff < 0.1))
# mcse_high <- length(which(df_summ$se_mean / df_summ$sd > 0.1))
# diagnostics <- c(n_diverg = n_diverg, rhat_high = rhat_high,
# n_eff_low = n_eff_low, mcse_high = mcse_high)
# out <- c( central_tend, diagnostics ) %>% t %>% as.data.frame
#
# rm(tmp) ; return(out)
#
# }
#
# # store posteriors
# posterior_extract <- function(model_fit, model_name){
#
# # central tendencies
# tmp <- rstan::extract(model_fit)
# post_df <- do.call(cbind, tmp) %>% as.data.frame
# ll_id <- grep("V", colnames(post_df) )
# new_names <- paste0("log_lik_", 1:length(ll_id) )
# names(post_df)[ll_id] <- new_names # no way to do this in dplyr
# post_df <- tibble::add_column(post_df,
# model = model_name, .before=1)
#
# rm(tmp) ; return(post_df)
#
# }
#
# # calculate central tendencies
# pars_diag_l <- lapply(mod_fit, pars_diag_extract)
# mod_pars_diag <- Reduce(function(...) bind_rows(...), pars_diag_l) %>%
# tibble::add_column(model = names(mod_fit), .before = 1)
#
# # store posteriors
# posts_l <- Map(posterior_extract, mod_fit, names(mod_fit) )
# posteriors <- Reduce(function(...) bind_rows(...), posts_l)
#
#
# # WAIC model comparison --------------------------------------------------------------------
#
# # wAIC model selection using loo approximation (from library 'loo')
# log_liks <- lapply(mod_fit, extract_log_lik)
#
# # leave-one-out estimates
# loo_l <- lapply(log_liks, loo) %>%
# setNames( c('loo_fit1', 'loo_fit2', 'loo_fit3', 'loo_fit4') )
# loo_df <- loo::compare(loo_l$loo_fit1, loo_l$loo_fit2, loo_l$loo_fit3, loo_l$loo_fit4) %>%
# as.data.frame %>%
# tibble::add_column(model = gsub("loo_","",names(loo_l) ), .before = 1)
#
# # WAIC estimates
# waic_l <- lapply(log_liks, waic) %>%
# setNames( c('waic_fit1', 'waic_fit2', 'waic_fit3', 'waic_fit4') )
# waic_df <- loo::compare(waic_l$waic_fit1, waic_l$waic_fit2, waic_l$waic_fit3, waic_l$waic_fit4) %>%
# as.data.frame %>%
# tibble::add_column(model = gsub("waic_","",names(waic_l) ), .before = 1)
#
|
#' B-Spline Basis Functions
#'
#' `step_bs` creates a *specification* of a recipe step
#' that will create new columns that are basis expansions of
#' variables using B-splines.
#'
#' @inheritParams step_center
#' @param ... One or more selector functions to choose which
#' variables are affected by the step. See [selections()]
#' for more details. For the `tidy` method, these are not
#' currently used.
#' @param role For model terms created by this step, what analysis
#' role should they be assigned?. By default, the function assumes
#' that the new columns created from the original variables will be
#' used as predictors in a model.
#' @param objects A list of [splines::bs()] objects
#' created once the step has been trained.
#' @param deg_free The degrees of freedom.
#' @param degree The degree of the piecewise polynomial.
#' @param options A list of options for [splines::bs()]
#' which should not include `x`, `degree`, or `df`.
#' @return An updated version of `recipe` with the new step
#' added to the sequence of existing steps (if any). For the
#' `tidy` method, a tibble with columns `terms` which is
#' the columns that will be affected and `holiday`.
#' @keywords datagen
#' @concept preprocessing
#' @concept basis_expansion
#' @export
#' @details `step_bs` can new features from a single variable
#' that enable fitting routines to model this variable in a
#' nonlinear manner. The extent of the possible nonlinearity is
#' determined by the `df`, `degree`, or `knot` arguments of
#' [splines::bs()]. The original variables are removed
#' from the data and new columns are added. The naming convention
#' for the new variables is `varname_bs_1` and so on.
#' @examples
#' data(biomass)
#'
#' biomass_tr <- biomass[biomass$dataset == "Training",]
#' biomass_te <- biomass[biomass$dataset == "Testing",]
#'
#' rec <- recipe(HHV ~ carbon + hydrogen + oxygen + nitrogen + sulfur,
#' data = biomass_tr)
#'
#' with_splines <- rec %>%
#' step_bs(carbon, hydrogen)
#' with_splines <- prep(with_splines, training = biomass_tr)
#'
#' expanded <- bake(with_splines, biomass_te)
#' expanded
#' @seealso [step_poly()] [recipe()] [step_ns()]
#' [prep.recipe()] [bake.recipe()]
step_bs <-
function(recipe,
...,
role = "predictor",
trained = FALSE,
deg_free = NULL,
degree = 3,
objects = NULL,
options = list(),
skip = FALSE,
id = rand_id("bs")) {
add_step(
recipe,
step_bs_new(
terms = ellipse_check(...),
trained = trained,
deg_free = deg_free,
degree = degree,
role = role,
objects = objects,
options = options,
skip = skip,
id = id
)
)
}
step_bs_new <-
function(terms, role, trained, deg_free, degree, objects, options, skip, id) {
step(
subclass = "bs",
terms = terms,
role = role,
trained = trained,
deg_free = deg_free,
degree = degree,
objects = objects,
options = options,
skip = skip,
id = id
)
}
#' @importFrom splines bs
bs_wrapper <- function(x, args) {
if (!("Boundary.knots" %in% names(args)))
args$Boundary.knots <- range(x)
args$x <- x
bs_obj <- do.call("bs", args)
## don't need to save the original data so keep 1 row
out <- matrix(NA, ncol = ncol(bs_obj), nrow = 1)
class(out) <- c("bs", "basis", "matrix")
attr(out, "knots") <- attr(bs_obj, "knots")[]
attr(out, "degree") <- attr(bs_obj, "degree")
attr(out, "Boundary.knots") <- attr(bs_obj, "Boundary.knots")
attr(out, "intercept") <- attr(bs_obj, "intercept")
out
}
#' @export
prep.step_bs <- function(x, training, info = NULL, ...) {
col_names <- terms_select(x$terms, info = info)
check_type(training[, col_names])
opt <- x$options
opt$df <- x$deg_free
opt$degree <- x$degree
obj <- lapply(training[, col_names], bs_wrapper, opt)
for (i in seq(along = col_names))
attr(obj[[i]], "var") <- col_names[i]
step_bs_new(
terms = x$terms,
role = x$role,
trained = TRUE,
deg_free = x$deg_free,
degree = x$degree,
objects = obj,
options = x$options,
skip = x$skip,
id = x$id
)
}
#' @importFrom tibble as_tibble is_tibble
#' @importFrom stats predict
#' @export
bake.step_bs <- function(object, new_data, ...) {
## pre-allocate a matrix for the basis functions.
new_cols <- vapply(object$objects, ncol, c(int = 1L))
bs_values <-
matrix(NA, nrow = nrow(new_data), ncol = sum(new_cols))
colnames(bs_values) <- rep("", sum(new_cols))
strt <- 1
for (i in names(object$objects)) {
cols <- (strt):(strt + new_cols[i] - 1)
orig_var <- attr(object$objects[[i]], "var")
bs_values[, cols] <-
predict(object$objects[[i]], getElement(new_data, i))
new_names <-
paste(orig_var, "bs", names0(new_cols[i], ""), sep = "_")
colnames(bs_values)[cols] <- new_names
strt <- max(cols) + 1
new_data[, orig_var] <- NULL
}
new_data <- bind_cols(new_data, as_tibble(bs_values))
if (!is_tibble(new_data))
new_data <- as_tibble(new_data)
new_data
}
print.step_bs <-
function(x, width = max(20, options()$width - 28), ...) {
cat("B-Splines on ")
printer(names(x$objects), x$terms, x$trained, width = width)
invisible(x)
}
#' @rdname step_bs
#' @param x A `step_bs` object.
#' @export
tidy.step_bs <- function(x, ...) {
if (is_trained(x)) {
cols <- tibble(terms = names(x$objects))
} else {
cols <- sel2char(x$terms)
}
res <- expand.grid(terms = cols, stringsAsFactors = FALSE)
res$id <- x$id
as_tibble(res)
}
| /R/bs.R | no_license | statist-bhfz/recipes | R | false | false | 5,664 | r | #' B-Spline Basis Functions
#'
#' `step_bs` creates a *specification* of a recipe step
#' that will create new columns that are basis expansions of
#' variables using B-splines.
#'
#' @inheritParams step_center
#' @param ... One or more selector functions to choose which
#' variables are affected by the step. See [selections()]
#' for more details. For the `tidy` method, these are not
#' currently used.
#' @param role For model terms created by this step, what analysis
#' role should they be assigned?. By default, the function assumes
#' that the new columns created from the original variables will be
#' used as predictors in a model.
#' @param objects A list of [splines::bs()] objects
#' created once the step has been trained.
#' @param deg_free The degrees of freedom.
#' @param degree The degree of the piecewise polynomial.
#' @param options A list of options for [splines::bs()]
#' which should not include `x`, `degree`, or `df`.
#' @return An updated version of `recipe` with the new step
#' added to the sequence of existing steps (if any). For the
#' `tidy` method, a tibble with columns `terms` which is
#' the columns that will be affected and `holiday`.
#' @keywords datagen
#' @concept preprocessing
#' @concept basis_expansion
#' @export
#' @details `step_bs` can new features from a single variable
#' that enable fitting routines to model this variable in a
#' nonlinear manner. The extent of the possible nonlinearity is
#' determined by the `df`, `degree`, or `knot` arguments of
#' [splines::bs()]. The original variables are removed
#' from the data and new columns are added. The naming convention
#' for the new variables is `varname_bs_1` and so on.
#' @examples
#' data(biomass)
#'
#' biomass_tr <- biomass[biomass$dataset == "Training",]
#' biomass_te <- biomass[biomass$dataset == "Testing",]
#'
#' rec <- recipe(HHV ~ carbon + hydrogen + oxygen + nitrogen + sulfur,
#' data = biomass_tr)
#'
#' with_splines <- rec %>%
#' step_bs(carbon, hydrogen)
#' with_splines <- prep(with_splines, training = biomass_tr)
#'
#' expanded <- bake(with_splines, biomass_te)
#' expanded
#' @seealso [step_poly()] [recipe()] [step_ns()]
#' [prep.recipe()] [bake.recipe()]
step_bs <-
function(recipe,
...,
role = "predictor",
trained = FALSE,
deg_free = NULL,
degree = 3,
objects = NULL,
options = list(),
skip = FALSE,
id = rand_id("bs")) {
add_step(
recipe,
step_bs_new(
terms = ellipse_check(...),
trained = trained,
deg_free = deg_free,
degree = degree,
role = role,
objects = objects,
options = options,
skip = skip,
id = id
)
)
}
step_bs_new <-
function(terms, role, trained, deg_free, degree, objects, options, skip, id) {
step(
subclass = "bs",
terms = terms,
role = role,
trained = trained,
deg_free = deg_free,
degree = degree,
objects = objects,
options = options,
skip = skip,
id = id
)
}
#' @importFrom splines bs
bs_wrapper <- function(x, args) {
if (!("Boundary.knots" %in% names(args)))
args$Boundary.knots <- range(x)
args$x <- x
bs_obj <- do.call("bs", args)
## don't need to save the original data so keep 1 row
out <- matrix(NA, ncol = ncol(bs_obj), nrow = 1)
class(out) <- c("bs", "basis", "matrix")
attr(out, "knots") <- attr(bs_obj, "knots")[]
attr(out, "degree") <- attr(bs_obj, "degree")
attr(out, "Boundary.knots") <- attr(bs_obj, "Boundary.knots")
attr(out, "intercept") <- attr(bs_obj, "intercept")
out
}
#' @export
prep.step_bs <- function(x, training, info = NULL, ...) {
col_names <- terms_select(x$terms, info = info)
check_type(training[, col_names])
opt <- x$options
opt$df <- x$deg_free
opt$degree <- x$degree
obj <- lapply(training[, col_names], bs_wrapper, opt)
for (i in seq(along = col_names))
attr(obj[[i]], "var") <- col_names[i]
step_bs_new(
terms = x$terms,
role = x$role,
trained = TRUE,
deg_free = x$deg_free,
degree = x$degree,
objects = obj,
options = x$options,
skip = x$skip,
id = x$id
)
}
#' @importFrom tibble as_tibble is_tibble
#' @importFrom stats predict
#' @export
bake.step_bs <- function(object, new_data, ...) {
## pre-allocate a matrix for the basis functions.
new_cols <- vapply(object$objects, ncol, c(int = 1L))
bs_values <-
matrix(NA, nrow = nrow(new_data), ncol = sum(new_cols))
colnames(bs_values) <- rep("", sum(new_cols))
strt <- 1
for (i in names(object$objects)) {
cols <- (strt):(strt + new_cols[i] - 1)
orig_var <- attr(object$objects[[i]], "var")
bs_values[, cols] <-
predict(object$objects[[i]], getElement(new_data, i))
new_names <-
paste(orig_var, "bs", names0(new_cols[i], ""), sep = "_")
colnames(bs_values)[cols] <- new_names
strt <- max(cols) + 1
new_data[, orig_var] <- NULL
}
new_data <- bind_cols(new_data, as_tibble(bs_values))
if (!is_tibble(new_data))
new_data <- as_tibble(new_data)
new_data
}
print.step_bs <-
function(x, width = max(20, options()$width - 28), ...) {
cat("B-Splines on ")
printer(names(x$objects), x$terms, x$trained, width = width)
invisible(x)
}
#' @rdname step_bs
#' @param x A `step_bs` object.
#' @export
tidy.step_bs <- function(x, ...) {
if (is_trained(x)) {
cols <- tibble(terms = names(x$objects))
} else {
cols <- sel2char(x$terms)
}
res <- expand.grid(terms = cols, stringsAsFactors = FALSE)
res$id <- x$id
as_tibble(res)
}
|
source("code/packages.R")
# Read Data
INH_ConHum <- read_csv("data_raw/INH_ConHum.csv")
INH_FactInf <- read_csv("data_raw/INH_Fact_Infl.csv")
vars <- read_csv("data_raw/INH_General.xls - INH_General.csv")
index <- read_excel("data_raw/WetlandValues_Index.xlsx")
ConHum_complete <- read_excel("data_raw/ConHum.xlsx")
## Wetland condition (ConHum)
## correct 0s [others = ]
unique(ConHum_complete$Otros)
ConHum <- ConHum_complete %>%
mutate(damage = case_when(is.na(Otros)!=TRUE | Fuego_Quem == 1 ~ 1,
TRUE ~ 0))
ConHum <- ConHum %>%
select(c(Formulario:Cultivado,damage))
View(ConHum)
ConHum[ConHum$Formulario == 3289,][1,"Formulario"]<-32891
ConHum[ConHum$Formulario == 7700,][1,"Formulario"]<-77001
ConHum[ConHum$Formulario == 8585,][1,"Formulario"]<-85851
reps<- ConHum %>% filter(Formulario == 3289|
Formulario == 7700 |
Formulario == 8585)
## Check zeroes
ConHum %>%
filter(BuenEstado==0 & Drenado ==0 & Gan_Presen ==0 & Plantas_In == 0 &
Seco==0 & Artificial==0 & Sediment ==0 & Restaur == 0 & Proc_Resta==0 &
Contamin == 0 & Colmat == 0 & Cultivado ==0 )%>%
summary()
### Influence Factors (FactInf)
FactInf <- INH_FactInf %>%
filter(!is.na(Formulario))
View(FactInf)
FactInf[FactInf$Formulario == 3289,][1,"Formulario"]<-32891
FactInf[FactInf$Formulario == 7700,][1,"Formulario"]<-77001
FactInf[FactInf$Formulario == 8585,][1,"Formulario"]<-85851
reps1 <- FactInf %>% filter(Formulario == 3289|
Formulario == 7700 |
Formulario == 8585)
vars
vars[vars$Formulario == 3289,][1,"Formulario"]<-32891
vars[vars$Formulario == 7700,][1,"Formulario"]<-77001
vars[vars$Formulario == 8585,][1,"Formulario"]<-85851
reps2 <- vars %>% filter(Formulario == 3289|
Formulario == 7700 |
Formulario == 8585)
index_all <- index %>%
filter(!is.na(ID))
View(index)
reps3 <- index_all %>% filter(ID == 3289|
ID == 7700 |
ID == 8585)
index_all[index_all$ID == 3289,][1,"ID"]<-32891
index_all[index_all$ID == 7700,][1,"ID"]<-77001
index_all[index_all$ID == 8585,][1,"ID"]<-85851
dim(index_all);dim(ConHum); dim(FactInf); dim(vars)
save(ConHum, FactInf, index_all, vars, file= "data_clean/data_clean.Rdata")
| /code/clean_data.R | no_license | malfaro2/humedales | R | false | false | 2,418 | r | source("code/packages.R")
# Read Data
INH_ConHum <- read_csv("data_raw/INH_ConHum.csv")
INH_FactInf <- read_csv("data_raw/INH_Fact_Infl.csv")
vars <- read_csv("data_raw/INH_General.xls - INH_General.csv")
index <- read_excel("data_raw/WetlandValues_Index.xlsx")
ConHum_complete <- read_excel("data_raw/ConHum.xlsx")
## Wetland condition (ConHum)
## correct 0s [others = ]
unique(ConHum_complete$Otros)
ConHum <- ConHum_complete %>%
mutate(damage = case_when(is.na(Otros)!=TRUE | Fuego_Quem == 1 ~ 1,
TRUE ~ 0))
ConHum <- ConHum %>%
select(c(Formulario:Cultivado,damage))
View(ConHum)
ConHum[ConHum$Formulario == 3289,][1,"Formulario"]<-32891
ConHum[ConHum$Formulario == 7700,][1,"Formulario"]<-77001
ConHum[ConHum$Formulario == 8585,][1,"Formulario"]<-85851
reps<- ConHum %>% filter(Formulario == 3289|
Formulario == 7700 |
Formulario == 8585)
## Check zeroes
ConHum %>%
filter(BuenEstado==0 & Drenado ==0 & Gan_Presen ==0 & Plantas_In == 0 &
Seco==0 & Artificial==0 & Sediment ==0 & Restaur == 0 & Proc_Resta==0 &
Contamin == 0 & Colmat == 0 & Cultivado ==0 )%>%
summary()
### Influence Factors (FactInf)
FactInf <- INH_FactInf %>%
filter(!is.na(Formulario))
View(FactInf)
FactInf[FactInf$Formulario == 3289,][1,"Formulario"]<-32891
FactInf[FactInf$Formulario == 7700,][1,"Formulario"]<-77001
FactInf[FactInf$Formulario == 8585,][1,"Formulario"]<-85851
reps1 <- FactInf %>% filter(Formulario == 3289|
Formulario == 7700 |
Formulario == 8585)
vars
vars[vars$Formulario == 3289,][1,"Formulario"]<-32891
vars[vars$Formulario == 7700,][1,"Formulario"]<-77001
vars[vars$Formulario == 8585,][1,"Formulario"]<-85851
reps2 <- vars %>% filter(Formulario == 3289|
Formulario == 7700 |
Formulario == 8585)
index_all <- index %>%
filter(!is.na(ID))
View(index)
reps3 <- index_all %>% filter(ID == 3289|
ID == 7700 |
ID == 8585)
index_all[index_all$ID == 3289,][1,"ID"]<-32891
index_all[index_all$ID == 7700,][1,"ID"]<-77001
index_all[index_all$ID == 8585,][1,"ID"]<-85851
dim(index_all);dim(ConHum); dim(FactInf); dim(vars)
save(ConHum, FactInf, index_all, vars, file= "data_clean/data_clean.Rdata")
|
library(e1071)
set.seed(1)
# We now use the svm() function to fit the support vector classifier for a given value of the cost parameter.
# Here we demonstrate the use of this function on a two-dimensional example so that we can plot the resulting
# decision boundary.
# We begin by generating the observations, which belong to two classes.
x=matrix(rnorm(20*2), ncol=2)
y=c(rep(-1,10), rep(1,10))
x[y==1,]=x[y==1,] + 1
x
y
# We begin by checking whether the classes are linearly separable.
plot(x, col=(3-y))
# They are not. Next, we fit the support vector classifier.
# We now create a data frame with the response coded as a factor.
dat <- data.frame(x = x,y = as.factor(y))
svmfit <- svm(y ~., data=dat, kernel="linear", cost=10,scale=FALSE)
# The argument scale=FALSE tells the svm() function not to scale each feature to
# have mean zero or standard deviation one;
# depending on the application, one might prefer to use scale=TRUE.
# We can now plot the support vector classifier obtained:
plot(svmfit , dat)
# Note that the two arguments to the plot.svm() function are the output of the call to svm(),
#as well as the data used in the call to svm().
# The region of feature space that will be assigned to the ???1 class is shown in light blue,
# and the region that will be assigned to the +1 class is shown in purple.
svmfit$index
# Obtain basic information of the support classifier fit
summary(svmfit)
# Try a cost parameter of .1 instead
svmfit <- svm(y ~., data=dat, kernel="linear", cost = 0.1, scale=FALSE)
plot(svmfit , dat)
svmfit$index
######################## Utilizing the Tune function ###################
set.seed (1)
tune.out <- tune(svm, y ~.,data=dat,kernel="linear", ranges=list(cost=c(0.001, 0.01, 0.1, 1,5,10,100)))
# We can easily access the cross-validation errors for each of these models using the summary() command:
summary(tune.out)
bestmod <- tune.out$best.model
summary(bestmod)
# The predict() function can be used to predict the class label on a set of test observations,
# at any given value of the cost parameter. We begin by generating a test data set.
xtest <- matrix(rnorm(20*2), ncol=2)
ytest=sample(c(-1,1), 20, rep=TRUE)
xtest[ytest==1,]=xtest[ytest==1,] + 1
testdat <- data.frame(x=xtest, y=as.factor(ytest))
# Predict the class labels of these observations
ypred <-predict(bestmod ,testdat)
table(predict=ypred, truth=testdat$y)
svmfit <- svm(y~., data=dat, kernel="linear", cost=.01, scale=FALSE)
ypred=predict(svmfit ,testdat)
table(predict=ypred, truth=testdat$y)
# We first further separate the two classes in our simulated data so that they are linearly separable:
x[y==1,]=x[y==1,]+0.5
plot(x, col=(y+5)/2, pch=19)
# Now the observations are just barely linearly separable.
# We fit the support vector classifier and plot the resulting hyperplane,
# using a very large value of cost so that no observations are misclassified.
dat=data.frame(x=x,y=as.factor(y))
svmfit <-svm(y~., data=dat, kernel="linear", cost=1e5)
summary(svmfit)
plot(svmfit,dat)
# We now try a smaller value of cost:
svmfit <- svm(y~., data=dat, kernel="linear", cost=1)
summary(svmfit)
plot(svmfit ,dat)
# Now the observations are just barely linearly separable.
# We fit the support vector classifier and plot the resulting hyperplane,
# using a very large value of cost so that no observations are misclassified.
dat=data.frame(x=x,y=as.factor(y))
svmfit <-svm(y~., data=dat, kernel="linear", cost=1e5)
summary(svmfit)
plot(svmfit,dat)
# No training errors were made and only three support vectors were used.
# However, we can see from the figure that the margin is
# very narrow (because the observations that are not support vectors, indicated as circles, are very
# close to the decision boundary). It seems likely that this model will perform poorly on test data.
# We now try a smaller value of cost:
svmfit <- svm(y~., data=dat, kernel="linear", cost=1)
summary(svmfit)
plot(svmfit ,dat)
# Using cost=1, we misclassify a training observation, but we also obtain a much wider margin and make
# use of seven support vectors.
# It seems likely that this model will perform better on test data than the model with cost=1e5 | /Labs/Lab 2/Group4_Lab2.R | no_license | Dtrain27/DataAnalytics2021_Dominic_Schroeder | R | false | false | 4,186 | r | library(e1071)
set.seed(1)
# We now use the svm() function to fit the support vector classifier for a given value of the cost parameter.
# Here we demonstrate the use of this function on a two-dimensional example so that we can plot the resulting
# decision boundary.
# We begin by generating the observations, which belong to two classes.
x=matrix(rnorm(20*2), ncol=2)
y=c(rep(-1,10), rep(1,10))
x[y==1,]=x[y==1,] + 1
x
y
# We begin by checking whether the classes are linearly separable.
plot(x, col=(3-y))
# They are not. Next, we fit the support vector classifier.
# We now create a data frame with the response coded as a factor.
dat <- data.frame(x = x,y = as.factor(y))
svmfit <- svm(y ~., data=dat, kernel="linear", cost=10,scale=FALSE)
# The argument scale=FALSE tells the svm() function not to scale each feature to
# have mean zero or standard deviation one;
# depending on the application, one might prefer to use scale=TRUE.
# We can now plot the support vector classifier obtained:
plot(svmfit , dat)
# Note that the two arguments to the plot.svm() function are the output of the call to svm(),
#as well as the data used in the call to svm().
# The region of feature space that will be assigned to the ???1 class is shown in light blue,
# and the region that will be assigned to the +1 class is shown in purple.
svmfit$index
# Obtain basic information of the support classifier fit
summary(svmfit)
# Try a cost parameter of .1 instead
svmfit <- svm(y ~., data=dat, kernel="linear", cost = 0.1, scale=FALSE)
plot(svmfit , dat)
svmfit$index
######################## Utilizing the Tune function ###################
set.seed (1)
tune.out <- tune(svm, y ~.,data=dat,kernel="linear", ranges=list(cost=c(0.001, 0.01, 0.1, 1,5,10,100)))
# We can easily access the cross-validation errors for each of these models using the summary() command:
summary(tune.out)
bestmod <- tune.out$best.model
summary(bestmod)
# The predict() function can be used to predict the class label on a set of test observations,
# at any given value of the cost parameter. We begin by generating a test data set.
xtest <- matrix(rnorm(20*2), ncol=2)
ytest=sample(c(-1,1), 20, rep=TRUE)
xtest[ytest==1,]=xtest[ytest==1,] + 1
testdat <- data.frame(x=xtest, y=as.factor(ytest))
# Predict the class labels of these observations
ypred <-predict(bestmod ,testdat)
table(predict=ypred, truth=testdat$y)
svmfit <- svm(y~., data=dat, kernel="linear", cost=.01, scale=FALSE)
ypred=predict(svmfit ,testdat)
table(predict=ypred, truth=testdat$y)
# We first further separate the two classes in our simulated data so that they are linearly separable:
x[y==1,]=x[y==1,]+0.5
plot(x, col=(y+5)/2, pch=19)
# Now the observations are just barely linearly separable.
# We fit the support vector classifier and plot the resulting hyperplane,
# using a very large value of cost so that no observations are misclassified.
dat=data.frame(x=x,y=as.factor(y))
svmfit <-svm(y~., data=dat, kernel="linear", cost=1e5)
summary(svmfit)
plot(svmfit,dat)
# We now try a smaller value of cost:
svmfit <- svm(y~., data=dat, kernel="linear", cost=1)
summary(svmfit)
plot(svmfit ,dat)
# Now the observations are just barely linearly separable.
# We fit the support vector classifier and plot the resulting hyperplane,
# using a very large value of cost so that no observations are misclassified.
dat=data.frame(x=x,y=as.factor(y))
svmfit <-svm(y~., data=dat, kernel="linear", cost=1e5)
summary(svmfit)
plot(svmfit,dat)
# No training errors were made and only three support vectors were used.
# However, we can see from the figure that the margin is
# very narrow (because the observations that are not support vectors, indicated as circles, are very
# close to the decision boundary). It seems likely that this model will perform poorly on test data.
# We now try a smaller value of cost:
svmfit <- svm(y~., data=dat, kernel="linear", cost=1)
summary(svmfit)
plot(svmfit ,dat)
# Using cost=1, we misclassify a training observation, but we also obtain a much wider margin and make
# use of seven support vectors.
# It seems likely that this model will perform better on test data than the model with cost=1e5 |
# Copyright (C) 2014 Open Data ("Open Data" refers to
# one or more of the following companies: Open Data Partners LLC,
# Open Data Research LLC, or Open Data Capital LLC.)
#
# This file is part of Hadrian.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' extract_params.kcca
#'
#' Extract K-centroids model parameters from a kcca object created by
#' the flexclust library
#'
#' @param object an object of class "kcca"
#' @param ... further arguments passed to or from other methods
#' @return PFA as a list-of-lists that can be inserted into a cell or pool
#' @examples
#' model <- flexclust::kcca(iris[,1:4], k = 3, family=flexclust::kccaFamily("kmeans"))
#' extracted_params <- extract_params(model)
#' @export
extract_params.kcca <- function(object, ...) {
if(class(object)[1] %in% c('kcca', 'kccasimple')){
this_centroids <- object@centers
this_input_vars <- gsub('\\.', '_', colnames(this_centroids))
colnames(this_centroids) <- this_input_vars
this_family <- object@family@name
this_k <- object@k
} else if(class(object)[1] == 'kmeans'){
this_centroids <- object$centers
this_input_vars <- gsub('\\.', '_', colnames(this_centroids))
colnames(this_centroids) <- this_input_vars
this_family <- 'kmeans'
this_k <- as.integer(nrow(object$centers))
} else {
stop(sprintf("Currently not supporting cluster models with class %s", class(object)[1]))
}
list(inputs = this_input_vars,
centroids = this_centroids,
family = this_family,
k = this_k)
}
#' PFA Formatting of Fitted K-Centroid Models
#'
#' This function takes a K-centroids model fit using kcca
#' and returns a list-of-lists representing in valid PFA document
#' that could be used for scoring
#'
#' @source pfa_config.R avro_typemap.R avro.R pfa_cellpool.R pfa_expr.R pfa_utils.R
#' @param object an object of class "kcca"
#' @param cluster_names a character vector of length k to name the values relating
#' to each cluster instead of just an integer. If not specified, then the predicted
#' cluster will be the string representation of the cluster index.
#' @param name a character which is an optional name for the scoring engine
#' @param version an integer which is sequential version number for the model
#' @param doc a character which is documentation string for archival purposes
#' @param metadata a \code{list} of strings that is computer-readable documentation for
#' archival purposes
#' @param randseed a integer which is a global seed used to generate all random
#' numbers. Multiple scoring engines derived from the same PFA file have
#' different seeds generated from the global one
#' @param options a \code{list} with value types depending on option name
#' Initialization or runtime options to customize implementation
#' (e.g. optimization switches). May be overridden or ignored by PFA consumer
#' @param ... additional arguments affecting the PFA produced
#' @return a \code{list} of lists that compose valid PFA document
#' @seealso \code{\link[flexclust]{kcca}} \code{\link{extract_params.kcca}}
#' @examples
#' model <- flexclust::kcca(iris[,1:4], k = 3, family=flexclust::kccaFamily("kmeans"))
#' model_as_pfa <- pfa(model)
#' @export
pfa.kcca <- function(object, name=NULL, version=NULL, doc=NULL, metadata=NULL, randseed=NULL, options=NULL,
cluster_names=NULL, ...){
# extract model parameters
extracted_params <- extract_params(object)
if(!(extracted_params$family %in% c('kmeans', 'kmedians', 'angle', 'jaccard'))){
stop(sprintf("Currently not supporting cluster models with distance metric %s", extracted_params$family))
}
if(extracted_params$family %in% c('kmeans', 'kmedians', 'angle')){
input_avro_type <- avro_double
} else {
input_avro_type <- avro_boolean
}
# define the input schema
field_names <- extracted_params$inputs
field_types <- rep(input_avro_type, length(field_names))
names(field_types) <- field_names
input_type <- avro_record(field_types, "Input")
# create list defining the first action of constructing input
kcca_input_list <- list(type = avro_array(input_avro_type),
new = lapply(field_names, function(n) {
paste("input.", n, sep = "")
}))
cast_input_string <- 'kcca_input <- kcca_input_list'
if(is.null(cluster_names)){
cluster_names <- as.character(seq.int(extracted_params$k))
} else {
if(length(cluster_names) != extracted_params$k){
stop(sprintf(paste('Length of provided cluster names (%s) does not match',
'the number of centroids (%s). Please specify a list of names for each centroid.'),
length(cluster_names), extracted_params$k))
}
}
centroids_init <- list()
if(extracted_params$family %in% c('kmeans', 'kmedians')){
input_type <- avro_map(avro_double)
input_name <- 'kcca_input'
centroids_type <- avro_array(avro_record(fields = list(id = avro_string, center = avro_array(avro_double))))
for(i in 1:nrow(extracted_params$centroids)){
centroids_init[[length(centroids_init) + 1]] <- list(id = cluster_names[i],
center = as.list(unname(extracted_params$centroids[i,,drop=F])))
}
} else if(extracted_params$family %in% c('jaccard')){
input_type <- avro_map(avro_boolean)
input_name <- 'kcca_input'
centroids_type <- avro_array(avro_record(fields = list(id = avro_string, center = avro_array(avro_boolean))))
for(i in 1:nrow(extracted_params$centroids)){
if(!(all(extracted_params$centroids[i,] %in% c(TRUE, FALSE)) |
all(extracted_params$centroids[i,] %in% c(0,1)))){
stop('Models of family jaccard or ejaccard must have their inputs coded as binary TRUE/FALSE or 0/1')
}
centroids_init[[length(centroids_init) + 1]] <- list(id = cluster_names[i],
center = as.list(as.logical(unname(extracted_params$centroids[i,,drop=F]))))
}
} else if(extracted_params$family %in% c('angle')){
input_type <- avro_map(avro_double)
input_name <- 'kcca_input2'
special_angle_string <- 'kcca_input2 <- new(avro_array(avro_array(avro_double)), kcca_input)'
cast_input_string <- paste(cast_input_string, special_angle_string, sep='\n')
centroids_type <- avro_array(avro_record(fields = list(id = avro_string, center = avro_array(avro_array(avro_double)))))
for(i in 1:nrow(extracted_params$centroids)){
centroids_init[[length(centroids_init) + 1]] <- list(id = cluster_names[i],
center = list(as.list(unname(extracted_params$centroids[i,,drop=F]))))
}
} else {
stop('Centroids cannot be created for kcca models of family type %s', extracted_params$family)
}
# determine the output based on pred_type
this_cells <- list(centroids = pfa_cell(type = centroids_type,
init = centroids_init))
this_action <- parse(text=paste(cast_input_string,
kcca_func_mapper(family = extracted_params$family,
input_name = input_name,
centroids_name = 'centroids'),
sep='\n'))
this_fcns <- NULL
# construct the pfa_document
doc <- pfa_document(input = input_type,
output = avro_string,
cells = this_cells,
action = this_action,
fcns = this_fcns,
name=name,
version=version,
doc=doc,
metadata=metadata,
randseed=randseed,
options=options,
...
)
return(doc)
}
#' extract_params.kccasimple
#'
#' Extract K-centroids model parameters from a kccasimple object created by
#' the flexclust library
#'
#' @param object an object of class "kccasimple"
#' @param ... further arguments passed to or from other methods
#' @return PFA as a list-of-lists that can be inserted into a cell or pool
#' @examples
#' model <- flexclust::kcca(iris[,1:4], k = 3,
#' family=flexclust::kccaFamily("kmeans"), simple=TRUE)
#' extracted_params <- extract_params(model)
#' @export
extract_params.kccasimple <- extract_params.kcca
#' PFA Formatting of Fitted K-Centroid Models
#'
#' This function takes a K-centroids model fit using kccasimple
#' and returns a list-of-lists representing in valid PFA document
#' that could be used for scoring
#'
#' @source pfa_config.R avro_typemap.R avro.R pfa_cellpool.R pfa_expr.R pfa_utils.R
#' @param object an object of class "kccasimple"
#' @param cluster_names a character vector of length k to name the values relating
#' to each cluster instead of just an integer. If not specified, then the predicted
#' cluster will be the string representation of the cluster index.
#' @param name a character which is an optional name for the scoring engine
#' @param version an integer which is sequential version number for the model
#' @param doc a character which is documentation string for archival purposes
#' @param metadata a \code{list} of strings that is computer-readable documentation for
#' archival purposes
#' @param randseed a integer which is a global seed used to generate all random
#' numbers. Multiple scoring engines derived from the same PFA file have
#' different seeds generated from the global one
#' @param options a \code{list} with value types depending on option name
#' Initialization or runtime options to customize implementation
#' (e.g. optimization switches). May be overridden or ignored by PFA consumer
#' @param ... additional arguments affecting the PFA produced
#' @return a \code{list} of lists that compose valid PFA document
#' @seealso \code{\link[flexclust]{kcca}} \code{\link{extract_params.kccasimple}}
#' @examples
#' model <- flexclust::kcca(iris[,1:4], k = 3,
#' family=flexclust::kccaFamily("kmeans"), simple=TRUE)
#' model_as_pfa <- pfa(model)
#' @export
pfa.kccasimple <- pfa.kcca
#' @keywords internal
kcca_func_mapper <- function(family, input_name, centroids_name) {
switch(family,
kmeans = sprintf('model.cluster.closest(%s,%s)["id"]', input_name, centroids_name),
kmedians = sprintf('model.cluster.closest(%s,%s,manhattan_dist_fun)["id"]', input_name, centroids_name),
angle = sprintf('model.cluster.closest(%s,%s,angle_dist_fun)["id"]', input_name, centroids_name),
jaccard = sprintf('model.cluster.closest(%s,%s,jaccard_dist_fun)["id"]', input_name, centroids_name),
ejaccard = sprintf('model.cluster.closest(%s,%s,jaccard_dist_fun)["id"]', input_name, centroids_name),
stop(sprintf('supplied link function not supported: %s', family)))
} | /aurelius/R/kcca.R | permissive | mafpimentel/hadrian | R | false | false | 11,561 | r | # Copyright (C) 2014 Open Data ("Open Data" refers to
# one or more of the following companies: Open Data Partners LLC,
# Open Data Research LLC, or Open Data Capital LLC.)
#
# This file is part of Hadrian.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' extract_params.kcca
#'
#' Extract K-centroids model parameters from a kcca object created by
#' the flexclust library
#'
#' @param object an object of class "kcca"
#' @param ... further arguments passed to or from other methods
#' @return PFA as a list-of-lists that can be inserted into a cell or pool
#' @examples
#' model <- flexclust::kcca(iris[,1:4], k = 3, family=flexclust::kccaFamily("kmeans"))
#' extracted_params <- extract_params(model)
#' @export
extract_params.kcca <- function(object, ...) {
if(class(object)[1] %in% c('kcca', 'kccasimple')){
this_centroids <- object@centers
this_input_vars <- gsub('\\.', '_', colnames(this_centroids))
colnames(this_centroids) <- this_input_vars
this_family <- object@family@name
this_k <- object@k
} else if(class(object)[1] == 'kmeans'){
this_centroids <- object$centers
this_input_vars <- gsub('\\.', '_', colnames(this_centroids))
colnames(this_centroids) <- this_input_vars
this_family <- 'kmeans'
this_k <- as.integer(nrow(object$centers))
} else {
stop(sprintf("Currently not supporting cluster models with class %s", class(object)[1]))
}
list(inputs = this_input_vars,
centroids = this_centroids,
family = this_family,
k = this_k)
}
#' PFA Formatting of Fitted K-Centroid Models
#'
#' This function takes a K-centroids model fit using kcca
#' and returns a list-of-lists representing in valid PFA document
#' that could be used for scoring
#'
#' @source pfa_config.R avro_typemap.R avro.R pfa_cellpool.R pfa_expr.R pfa_utils.R
#' @param object an object of class "kcca"
#' @param cluster_names a character vector of length k to name the values relating
#' to each cluster instead of just an integer. If not specified, then the predicted
#' cluster will be the string representation of the cluster index.
#' @param name a character which is an optional name for the scoring engine
#' @param version an integer which is sequential version number for the model
#' @param doc a character which is documentation string for archival purposes
#' @param metadata a \code{list} of strings that is computer-readable documentation for
#' archival purposes
#' @param randseed a integer which is a global seed used to generate all random
#' numbers. Multiple scoring engines derived from the same PFA file have
#' different seeds generated from the global one
#' @param options a \code{list} with value types depending on option name
#' Initialization or runtime options to customize implementation
#' (e.g. optimization switches). May be overridden or ignored by PFA consumer
#' @param ... additional arguments affecting the PFA produced
#' @return a \code{list} of lists that compose valid PFA document
#' @seealso \code{\link[flexclust]{kcca}} \code{\link{extract_params.kcca}}
#' @examples
#' model <- flexclust::kcca(iris[,1:4], k = 3, family=flexclust::kccaFamily("kmeans"))
#' model_as_pfa <- pfa(model)
#' @export
pfa.kcca <- function(object, name=NULL, version=NULL, doc=NULL, metadata=NULL, randseed=NULL, options=NULL,
cluster_names=NULL, ...){
# extract model parameters
extracted_params <- extract_params(object)
if(!(extracted_params$family %in% c('kmeans', 'kmedians', 'angle', 'jaccard'))){
stop(sprintf("Currently not supporting cluster models with distance metric %s", extracted_params$family))
}
if(extracted_params$family %in% c('kmeans', 'kmedians', 'angle')){
input_avro_type <- avro_double
} else {
input_avro_type <- avro_boolean
}
# define the input schema
field_names <- extracted_params$inputs
field_types <- rep(input_avro_type, length(field_names))
names(field_types) <- field_names
input_type <- avro_record(field_types, "Input")
# create list defining the first action of constructing input
kcca_input_list <- list(type = avro_array(input_avro_type),
new = lapply(field_names, function(n) {
paste("input.", n, sep = "")
}))
cast_input_string <- 'kcca_input <- kcca_input_list'
if(is.null(cluster_names)){
cluster_names <- as.character(seq.int(extracted_params$k))
} else {
if(length(cluster_names) != extracted_params$k){
stop(sprintf(paste('Length of provided cluster names (%s) does not match',
'the number of centroids (%s). Please specify a list of names for each centroid.'),
length(cluster_names), extracted_params$k))
}
}
centroids_init <- list()
if(extracted_params$family %in% c('kmeans', 'kmedians')){
input_type <- avro_map(avro_double)
input_name <- 'kcca_input'
centroids_type <- avro_array(avro_record(fields = list(id = avro_string, center = avro_array(avro_double))))
for(i in 1:nrow(extracted_params$centroids)){
centroids_init[[length(centroids_init) + 1]] <- list(id = cluster_names[i],
center = as.list(unname(extracted_params$centroids[i,,drop=F])))
}
} else if(extracted_params$family %in% c('jaccard')){
input_type <- avro_map(avro_boolean)
input_name <- 'kcca_input'
centroids_type <- avro_array(avro_record(fields = list(id = avro_string, center = avro_array(avro_boolean))))
for(i in 1:nrow(extracted_params$centroids)){
if(!(all(extracted_params$centroids[i,] %in% c(TRUE, FALSE)) |
all(extracted_params$centroids[i,] %in% c(0,1)))){
stop('Models of family jaccard or ejaccard must have their inputs coded as binary TRUE/FALSE or 0/1')
}
centroids_init[[length(centroids_init) + 1]] <- list(id = cluster_names[i],
center = as.list(as.logical(unname(extracted_params$centroids[i,,drop=F]))))
}
} else if(extracted_params$family %in% c('angle')){
input_type <- avro_map(avro_double)
input_name <- 'kcca_input2'
special_angle_string <- 'kcca_input2 <- new(avro_array(avro_array(avro_double)), kcca_input)'
cast_input_string <- paste(cast_input_string, special_angle_string, sep='\n')
centroids_type <- avro_array(avro_record(fields = list(id = avro_string, center = avro_array(avro_array(avro_double)))))
for(i in 1:nrow(extracted_params$centroids)){
centroids_init[[length(centroids_init) + 1]] <- list(id = cluster_names[i],
center = list(as.list(unname(extracted_params$centroids[i,,drop=F]))))
}
} else {
stop('Centroids cannot be created for kcca models of family type %s', extracted_params$family)
}
# determine the output based on pred_type
this_cells <- list(centroids = pfa_cell(type = centroids_type,
init = centroids_init))
this_action <- parse(text=paste(cast_input_string,
kcca_func_mapper(family = extracted_params$family,
input_name = input_name,
centroids_name = 'centroids'),
sep='\n'))
this_fcns <- NULL
# construct the pfa_document
doc <- pfa_document(input = input_type,
output = avro_string,
cells = this_cells,
action = this_action,
fcns = this_fcns,
name=name,
version=version,
doc=doc,
metadata=metadata,
randseed=randseed,
options=options,
...
)
return(doc)
}
#' extract_params.kccasimple
#'
#' Extract K-centroids model parameters from a kccasimple object created by
#' the flexclust library
#'
#' @param object an object of class "kccasimple"
#' @param ... further arguments passed to or from other methods
#' @return PFA as a list-of-lists that can be inserted into a cell or pool
#' @examples
#' model <- flexclust::kcca(iris[,1:4], k = 3,
#' family=flexclust::kccaFamily("kmeans"), simple=TRUE)
#' extracted_params <- extract_params(model)
#' @export
extract_params.kccasimple <- extract_params.kcca
#' PFA Formatting of Fitted K-Centroid Models
#'
#' This function takes a K-centroids model fit using kccasimple
#' and returns a list-of-lists representing in valid PFA document
#' that could be used for scoring
#'
#' @source pfa_config.R avro_typemap.R avro.R pfa_cellpool.R pfa_expr.R pfa_utils.R
#' @param object an object of class "kccasimple"
#' @param cluster_names a character vector of length k to name the values relating
#' to each cluster instead of just an integer. If not specified, then the predicted
#' cluster will be the string representation of the cluster index.
#' @param name a character which is an optional name for the scoring engine
#' @param version an integer which is sequential version number for the model
#' @param doc a character which is documentation string for archival purposes
#' @param metadata a \code{list} of strings that is computer-readable documentation for
#' archival purposes
#' @param randseed a integer which is a global seed used to generate all random
#' numbers. Multiple scoring engines derived from the same PFA file have
#' different seeds generated from the global one
#' @param options a \code{list} with value types depending on option name
#' Initialization or runtime options to customize implementation
#' (e.g. optimization switches). May be overridden or ignored by PFA consumer
#' @param ... additional arguments affecting the PFA produced
#' @return a \code{list} of lists that compose valid PFA document
#' @seealso \code{\link[flexclust]{kcca}} \code{\link{extract_params.kccasimple}}
#' @examples
#' model <- flexclust::kcca(iris[,1:4], k = 3,
#' family=flexclust::kccaFamily("kmeans"), simple=TRUE)
#' model_as_pfa <- pfa(model)
#' @export
pfa.kccasimple <- pfa.kcca
#' @keywords internal
kcca_func_mapper <- function(family, input_name, centroids_name) {
switch(family,
kmeans = sprintf('model.cluster.closest(%s,%s)["id"]', input_name, centroids_name),
kmedians = sprintf('model.cluster.closest(%s,%s,manhattan_dist_fun)["id"]', input_name, centroids_name),
angle = sprintf('model.cluster.closest(%s,%s,angle_dist_fun)["id"]', input_name, centroids_name),
jaccard = sprintf('model.cluster.closest(%s,%s,jaccard_dist_fun)["id"]', input_name, centroids_name),
ejaccard = sprintf('model.cluster.closest(%s,%s,jaccard_dist_fun)["id"]', input_name, centroids_name),
stop(sprintf('supplied link function not supported: %s', family)))
} |
context("test-bundle")
tb <- tibble::tibble
test_that("relocate_bundled_cols() works", {
expect_equal(relocate_bundled_cols(c("x", "a1", "y", "a2"), a = c("a1", "a2")), c("x", "a", "y"))
expect_equal(relocate_bundled_cols(c("x", "a1", "y", "a2"), !!!list(a = c("a1", "a2"))), c("x", "a", "y"))
expect_equal(relocate_bundled_cols(c("x", "a1", "b1", "y", "b2", "a2"), a = c("a1", "a2"), b = c("b1", "b2")), c("x", "a", "b", "y"))
})
test_that("bundle() for simple cases works", {
d <- data.frame(x = 1:3, a1 = 11:13, y = 1:3, a2 = 21:23)
expected <- d[, c("x", "y"), drop = FALSE]
expected$data <- d[, c("a1", "a2")]
expected <- expected[, c("x", "data", "y")]
# unnamed cases
expect_equal(bundle(d, a1, a2), expected)
expect_equal(bundle(d, c("a1", "a2")), expected)
expect_equal(bundle(d, -x, -y), expected)
expect_equal(bundle(d, starts_with("a")), expected)
# named cases
names(expected) <- c("x", "foo", "y")
expect_equal(bundle(d, foo = c(a1, a2)), expected)
expect_equal(bundle(d, foo = c(-x, -y)), expected)
expect_equal(bundle(d, foo = starts_with("a")), expected)
# unnamed, but specify .key
expect_equal(bundle(d, a1, a2, .key = "foo"), expected)
})
test_that("relocate_unbundled_cols() works", {
expect_equal(relocate_unbundled_cols(c("x", "a", "y"), a = c("a1", "a2")), c("x", "a1", "a2", "y"))
expect_equal(relocate_unbundled_cols(c("x", "a", "y"), !!!list(a = c("a1", "a2"))), c("x", "a1", "a2", "y"))
expect_equal(relocate_unbundled_cols(c("x", "b", "y", "a"), a = c("a1", "a2"), b = c("b1", "b2")), c("x", "b1", "b2", "y", "a1", "a2"))
})
test_that("unbundle() works", {
d <- tb(id = 1:3, a = tb(value = 1:3, type = c("a", "b", "c")))
expect_equal(unbundle(d, a), tb(id = 1:3, a_value = 1:3, a_type = c("a", "b", "c")))
expect_equal(unbundle(d, a, sep = "."), tb(id = 1:3, a.value = 1:3, a.type = c("a", "b", "c")))
expect_equal(unbundle(d, a, sep = NULL), tb(id = 1:3, value = 1:3, type = c("a", "b", "c")))
d <- tb(id = 1:3,
A = tb(a = tb(value = 1:3, type = c("a", "b", "c")), b = tb(value = 4:6, type = c("a", "b", "c"))),
B = tb(a = tb(value = 1:3, type = c("x", "y", "z"))))
d1 <- unbundle(d, A:B)
expect_equal(d1$id, 1:3)
expect_equal(d1$A_a, tb(value = 1:3, type = c("a", "b", "c")))
expect_equal(d1$A_b, tb(value = 4:6, type = c("a", "b", "c")))
expect_equal(d1$B_a, tb(value = 1:3, type = c("x", "y", "z")))
})
| /tests/testthat/test-bundle.R | permissive | yutannihilation/tiedr | R | false | false | 2,433 | r | context("test-bundle")
tb <- tibble::tibble
test_that("relocate_bundled_cols() works", {
expect_equal(relocate_bundled_cols(c("x", "a1", "y", "a2"), a = c("a1", "a2")), c("x", "a", "y"))
expect_equal(relocate_bundled_cols(c("x", "a1", "y", "a2"), !!!list(a = c("a1", "a2"))), c("x", "a", "y"))
expect_equal(relocate_bundled_cols(c("x", "a1", "b1", "y", "b2", "a2"), a = c("a1", "a2"), b = c("b1", "b2")), c("x", "a", "b", "y"))
})
test_that("bundle() for simple cases works", {
d <- data.frame(x = 1:3, a1 = 11:13, y = 1:3, a2 = 21:23)
expected <- d[, c("x", "y"), drop = FALSE]
expected$data <- d[, c("a1", "a2")]
expected <- expected[, c("x", "data", "y")]
# unnamed cases
expect_equal(bundle(d, a1, a2), expected)
expect_equal(bundle(d, c("a1", "a2")), expected)
expect_equal(bundle(d, -x, -y), expected)
expect_equal(bundle(d, starts_with("a")), expected)
# named cases
names(expected) <- c("x", "foo", "y")
expect_equal(bundle(d, foo = c(a1, a2)), expected)
expect_equal(bundle(d, foo = c(-x, -y)), expected)
expect_equal(bundle(d, foo = starts_with("a")), expected)
# unnamed, but specify .key
expect_equal(bundle(d, a1, a2, .key = "foo"), expected)
})
test_that("relocate_unbundled_cols() works", {
expect_equal(relocate_unbundled_cols(c("x", "a", "y"), a = c("a1", "a2")), c("x", "a1", "a2", "y"))
expect_equal(relocate_unbundled_cols(c("x", "a", "y"), !!!list(a = c("a1", "a2"))), c("x", "a1", "a2", "y"))
expect_equal(relocate_unbundled_cols(c("x", "b", "y", "a"), a = c("a1", "a2"), b = c("b1", "b2")), c("x", "b1", "b2", "y", "a1", "a2"))
})
test_that("unbundle() works", {
d <- tb(id = 1:3, a = tb(value = 1:3, type = c("a", "b", "c")))
expect_equal(unbundle(d, a), tb(id = 1:3, a_value = 1:3, a_type = c("a", "b", "c")))
expect_equal(unbundle(d, a, sep = "."), tb(id = 1:3, a.value = 1:3, a.type = c("a", "b", "c")))
expect_equal(unbundle(d, a, sep = NULL), tb(id = 1:3, value = 1:3, type = c("a", "b", "c")))
d <- tb(id = 1:3,
A = tb(a = tb(value = 1:3, type = c("a", "b", "c")), b = tb(value = 4:6, type = c("a", "b", "c"))),
B = tb(a = tb(value = 1:3, type = c("x", "y", "z"))))
d1 <- unbundle(d, A:B)
expect_equal(d1$id, 1:3)
expect_equal(d1$A_a, tb(value = 1:3, type = c("a", "b", "c")))
expect_equal(d1$A_b, tb(value = 4:6, type = c("a", "b", "c")))
expect_equal(d1$B_a, tb(value = 1:3, type = c("x", "y", "z")))
})
|
#' @title plot Hyper-prior function
#' @description This function plots a hyper-prior density function.
#' Currently supported density function are Uniform, Gamma, Normal, Loggamma and Lognormal.
#' The resulting function is used during MCMC \code{\link{mcmc_bite}}
#' to estimate parameters of priors.
#'
#' @details There are three currently implemented density function:
#' Uniform, Gamma and Normal. Each of these densities requires two input parameters and hp.pars
#' must be a vector of two values and cannot be left empty.
#'
#' @param hpf name of a density function. Supported density functions are: Uniform, Gamma and Normal
#' @param col color of the density area. Can be of size 2 (hpriors for the means, hpriors for the logvars) if a jive object is plotted
#' @param border color of the density curve. Can be of size 2 (hpriors for the means, hpriors for the logvars) if a jive object is plotted
#' @param bty,... additional parameters that can be passed to a density function and \code{\link[graphics]{par}}
#' @export
#' @author Theo Gaboriau
#' @encoding UTF-8
#' @examples
#'
#' ## Load test data
#' data(Anolis_traits)
#' data(Anolis_tree)
#'
#' my.hp <- hpfun(hpf="Uniform", hp.pars=c(1,2))
#' plot_hp(my.hp)
#'
#' my.jive <- make_jive(Anolis_tree, Anolis_traits[,-3], model.priors = list(mean="BM", logvar="OU"))
#' plot_hp(my.jive, cex.main = .8)
plot_hp <- function(hpf, col = c("#bfdbf7", "#f49e4c"), border = c("#2e86ab", "#a31621"), bty = "n", ...){
if("JIVE" %in% class(hpf)){
n <- sum(sapply(hpf$priors, function(x) length(x$hprior)))
nrow <- floor(sqrt(n))
ncol <- ceiling(sqrt(n))
ro <- 1
co <- 1
oldpar <- par(no.readonly = T)
on.exit(par(oldpar))
for(p in 1:length(hpf$priors)){
for(i in 1:length(hpf$priors[[p]]$hprior)){
par(fig = c((co-1)/ncol,co/ncol,1-ro/nrow,1-(ro-1)/nrow), new = ifelse(ro == 1 & co == 1, FALSE, TRUE))
plot_hyper(hpf$priors[[p]]$hprior[[i]], col = col[p], border = border[p],
xlab = paste(names(hpf$priors[[p]]$hprior)[i], "[", names(hpf$priors)[p], "]", sep = ""),
bty = bty, ...)
if(co == ncol){
co <- 1
ro <- ro + 1
} else {
co <- co + 1
}
}
}
} else {
plot_hyper(hpf, col = col[1], border = border[1], bty = bty, ...)
}
}
| /R/plot_hp.R | no_license | theogab/bite | R | false | false | 2,379 | r | #' @title plot Hyper-prior function
#' @description This function plots a hyper-prior density function.
#' Currently supported density function are Uniform, Gamma, Normal, Loggamma and Lognormal.
#' The resulting function is used during MCMC \code{\link{mcmc_bite}}
#' to estimate parameters of priors.
#'
#' @details There are three currently implemented density function:
#' Uniform, Gamma and Normal. Each of these densities requires two input parameters and hp.pars
#' must be a vector of two values and cannot be left empty.
#'
#' @param hpf name of a density function. Supported density functions are: Uniform, Gamma and Normal
#' @param col color of the density area. Can be of size 2 (hpriors for the means, hpriors for the logvars) if a jive object is plotted
#' @param border color of the density curve. Can be of size 2 (hpriors for the means, hpriors for the logvars) if a jive object is plotted
#' @param bty,... additional parameters that can be passed to a density function and \code{\link[graphics]{par}}
#' @export
#' @author Theo Gaboriau
#' @encoding UTF-8
#' @examples
#'
#' ## Load test data
#' data(Anolis_traits)
#' data(Anolis_tree)
#'
#' my.hp <- hpfun(hpf="Uniform", hp.pars=c(1,2))
#' plot_hp(my.hp)
#'
#' my.jive <- make_jive(Anolis_tree, Anolis_traits[,-3], model.priors = list(mean="BM", logvar="OU"))
#' plot_hp(my.jive, cex.main = .8)
plot_hp <- function(hpf, col = c("#bfdbf7", "#f49e4c"), border = c("#2e86ab", "#a31621"), bty = "n", ...){
if("JIVE" %in% class(hpf)){
n <- sum(sapply(hpf$priors, function(x) length(x$hprior)))
nrow <- floor(sqrt(n))
ncol <- ceiling(sqrt(n))
ro <- 1
co <- 1
oldpar <- par(no.readonly = T)
on.exit(par(oldpar))
for(p in 1:length(hpf$priors)){
for(i in 1:length(hpf$priors[[p]]$hprior)){
par(fig = c((co-1)/ncol,co/ncol,1-ro/nrow,1-(ro-1)/nrow), new = ifelse(ro == 1 & co == 1, FALSE, TRUE))
plot_hyper(hpf$priors[[p]]$hprior[[i]], col = col[p], border = border[p],
xlab = paste(names(hpf$priors[[p]]$hprior)[i], "[", names(hpf$priors)[p], "]", sep = ""),
bty = bty, ...)
if(co == ncol){
co <- 1
ro <- ro + 1
} else {
co <- co + 1
}
}
}
} else {
plot_hyper(hpf, col = col[1], border = border[1], bty = bty, ...)
}
}
|
# this is gradient descent
gradient_value = function(beta = NULL, df, formula,
family = binomial(), iteration_number = 0,
shuffle_rows = TRUE) {
if (shuffle_rows) {
df = df[sample(nrow(df)), ]
}
y = model.frame(formula, data = df)[,1]
X = model.matrix(formula, data = df)
cc = complete.cases(X)
X = X[cc,]
y = y[cc]
if (is.null(beta)) {
beta = rep(0, ncol(X))
}
linkinv = family$linkinv
variance <- family$variance
mu.eta <- family$mu.eta
# dev.resids <- family$dev.resids
eta = drop(X %*% beta)
mu = linkinv(eta)
# expb = exp(X %*% beta)
# p = expb / (1 + expb)
mu = c(mu)
W = (mu.eta(eta)^2) * variance(mu)
# gradient here is d_loglik/d_beta without variance components
# gradient = drop(t(X) %*% (y - mu))
gradient = drop(t(X) %*% diag(W) %*% (y - mu))
# h = 1 / (1 + exp(-eta))
# log_lik = -(t(y) %*% log(h) + t(1 - y) %*% log(1 - h))
stopifnot(length(gradient) == length(beta))
result = list(
gradient = gradient,
sample_size = nrow(X),
iteration_number = iteration_number
)
return(result)
}
use_glm_gradient_value = function(
beta = NULL, df, formula,
family = binomial(), iteration_number = 0,
shuffle_rows = TRUE) {
if (shuffle_rows) {
df = df[sample(nrow(df)), ]
}
X = model.matrix(formula, data = df)
# mu.eta <- family$mu.eta
if (is.null(beta)) {
beta = rep(0, ncol(X))
}
start = beta
print(start)
mod = glm(
formula = formula,
data = df,
family = family,
start = start,
control = list(maxit = 1))
return(mod)
}
folder_names = function(synced_folder) {
L = list(
# this structure is the same on all sites
model_folder = file.path(synced_folder, "formulas"),
gradients_folder = file.path(synced_folder, "gradients"),
beta_folder = file.path(synced_folder, "betas"),
converged_folder = file.path(synced_folder, "models")
)
return(L)
}
master_beta_file = function(model_name, synced_folder) {
file_list = folder_names(synced_folder)
beta_folder = file_list$beta_folder
all_beta_files = list.files(
beta_folder,
pattern = paste0("^", model_name, "-iteration.*.rds"),
full.names = TRUE)
if (length(all_beta_files) == 0) {
beta = NULL
iteration_number = 1
} else {
beta_number = sub(".*iteration(.*)[.]rds", "\\1",
basename(all_beta_files))
beta_number = as.numeric(beta_number)
beta_list = read_rds(all_beta_files[ which.max(beta_number)])
beta = beta_list$beta
iteration_number = beta_list$iteration_number_next
}
out_beta_file = file.path(
beta_folder,
paste0(
model_name,
sprintf("-iteration%04.0f", iteration_number),
".rds")
)
return(out_beta_file)
}
aggregate_gradients = function(
all_gradient_files,
iteration_number) {
gradient_list = lapply(all_gradient_files, readr::read_rds)
names(gradient_list) = all_gradient_files
sum_grads = sapply(gradient_list, function(x) x$gradient)
if (is.null(beta)) {
beta = rep(0, nrow(sum_grads))
}
stopifnot(is.matrix(sum_grads))
grad = rowSums(sum_grads)
iter_nums = sapply(gradient_list, function(x) x$iteration_number)
stopifnot(is.vector(iter_nums))
stopifnot(all(iter_nums == iteration_number))
ss = sapply(gradient_list, function(x) x$sample_size)
stopifnot(is.vector(ss))
n = sum(ss)
grad = grad / n
# weight the gradient updates - new way
sum_grads = sapply(gradient_list, function(x) {
x$gradient * x$sample_size / n
})
grad = rowSums(sum_grads)
result = list(
gradient = grad,
total_sample_size = n)
return(result)
}
get_current_beta = function(model_name, synced_folder) {
file_list = folder_names(synced_folder)
beta_folder = file_list$beta_folder
all_beta_files = list.files(
beta_folder,
pattern = paste0("^", model_name, "-iteration.*.rds"),
full.names = TRUE)
if (length(all_beta_files) == 0) {
beta = NULL
iteration_number = 1
} else {
beta_number = sub(".*iteration(.*)[.]rds", "\\1",
basename(all_beta_files))
beta_number = as.numeric(beta_number)
beta_list = read_rds(all_beta_files[ which.max(beta_number)])
beta = beta_list$beta
iteration_number = beta_list$iteration_number_next
}
L = list(
iteration_number = iteration_number
)
L$beta = beta
return(L)
}
estimate_site_gradient = function(
model_name, synced_folder,
site_name = "site1", dataset,
all_site_names = paste0("site", 1:3),
shuffle_rows = TRUE) {
site_name = match.arg(site_name, choices = all_site_names)
file_list = folder_names(synced_folder)
gradients_folder = file_list$gradients_folder
model_folder = file_list$model_folder
# which model are we running
formula_file = file.path(model_folder,
paste0(model_name, ".rds"))
if (!file.exists(formula_file)) {
stop(paste0("Formula file: ", formula_file, " doesn't exist!",
" You may need to contact processing site or check your ",
"synced_folder"))
} else {
formula_list = readr::read_rds(formula_file)
formula = formula_list$formula
family = formula_list$family
if (is.character(family)) {
family = get(family, envir = .BaseNamespaceEnv)
}
if (is.function(family)) {
family = family()
}
if (!inherits(family, "family")) {
stop("family specified is not a family object - see setup_model")
}
}
res = get_current_beta(model_name, synced_folder)
beta = res$beta
iteration_number = res$iteration_number
gradient_file = file.path(
gradients_folder,
paste0(model_name, "-",
site_name,
sprintf("-iteration%04.0f", iteration_number),
".rds"))
all_gradient_files = file.path(
gradients_folder,
paste0(model_name, "-",
all_site_names,
sprintf("-iteration%04.0f", iteration_number),
".rds"))
# here we would simply wait
# should check if converged
if (file.exists(gradient_file)) {
if (!all(file.exists(all_gradient_files))) {
print("Waiting for other sites to create gradients")
} else {
print("Waiting for compute site to create new betas")
}
} else {
print(paste0("Creating Gradient, iteration ",
iteration_number))
# use_glm_gradient_value(beta = beta,
# df = dataset,
# formula = formula,
# family = family,
# iteration_number = iteration_number,
# shuffle_rows = shuffle_rows)
grad = gradient_value(beta = beta,
df = dataset,
formula = formula,
family = family,
iteration_number = iteration_number,
shuffle_rows = shuffle_rows)
readr::write_rds(grad, gradient_file)
rm(grad)
}
return(gradient_file)
}
estimate_new_beta = function(
model_name, synced_folder,
all_site_names = paste0("site", 1:3),
tolerance = 1e-8) {
file_list = folder_names(synced_folder)
gradients_folder = file_list$gradients_folder
beta_folder = file_list$beta_folder
converged_folder = file_list$converged_folder
final_file = file.path(converged_folder,
paste0(model_name, ".rds"))
if (file.exists(final_file)) {
stop("Model already converged, delete iterations to run again")
}
res = get_current_beta(model_name, synced_folder)
beta = res$beta
iteration_number = res$iteration_number
out_beta_file = file.path(
beta_folder,
paste0(
model_name,
sprintf("-iteration%04.0f", iteration_number),
".rds")
)
# list_gradient_files = list.files(
# gradients_folder,
# pattern = paste0("^", model_name, ".*",
# sprintf("-iteration%04.0f", iteration_number),
# ".rds"),
# full.names = TRUE)
all_gradient_files = file.path(
gradients_folder,
paste0(model_name, "-",
all_site_names,
sprintf("-iteration%04.0f", iteration_number),
".rds"))
fe = file.exists(all_gradient_files)
# should check if converged
if (!file.exists(out_beta_file)) {
if (!all(fe)) {
print("Waiting for other sites to create gradients")
print("Missing files:")
print(all_gradient_files[!fe])
} else {
print(paste0(
"Reading in gradients, iteration ", iteration_number))
result = aggregate_gradients(
all_gradient_files, iteration_number)
gradient = result$gradient
total_sample_size = result$total_sample_size
if (is.null(beta)) {
beta = rep(0, length(gradient))
epsilon = 10
} else {
# see glm.control
epsilon = max(abs(gradient)/(abs(beta) + 0.1))
}
if (epsilon < tolerance) {
print("Model has converged!")
final_beta_list = list(
beta = beta,
num_iterations = iteration_number,
gradient = gradient,
tolerance = tolerance,
epsilon = epsilon,
total_sample_size = total_sample_size,
max_gradient = max(abs(gradient)))
readr::write_rds(final_beta_list, final_file)
return(final_file)
}
beta = beta + gradient
beta_list = list(
beta = beta,
previous_gradient = gradient,
total_sample_size = total_sample_size,
iteration_number_next = iteration_number + 1,
tolerance = tolerance,
epsilon = epsilon
)
readr::write_rds(beta_list, out_beta_file)
rm(beta_list)
return(out_beta_file)
}
} else {
if (!all(fe)) {
print("Waiting for other sites to create gradients")
print("Missing files:")
print(all_gradient_files[!fe])
}
}
}
clear_model = function(
model_name, synced_folder
) {
file_list = folder_names(synced_folder)
files = sapply(file_list, function(x) {
list.files(path = x,
pattern = paste0("^", model_name, ".*.rds"),
full.names = TRUE)
})
file.remove(unlist(files))
}
| /package_contents.R | no_license | muschellij2/distributed_model | R | false | false | 10,357 | r |
# this is gradient descent
gradient_value = function(beta = NULL, df, formula,
family = binomial(), iteration_number = 0,
shuffle_rows = TRUE) {
if (shuffle_rows) {
df = df[sample(nrow(df)), ]
}
y = model.frame(formula, data = df)[,1]
X = model.matrix(formula, data = df)
cc = complete.cases(X)
X = X[cc,]
y = y[cc]
if (is.null(beta)) {
beta = rep(0, ncol(X))
}
linkinv = family$linkinv
variance <- family$variance
mu.eta <- family$mu.eta
# dev.resids <- family$dev.resids
eta = drop(X %*% beta)
mu = linkinv(eta)
# expb = exp(X %*% beta)
# p = expb / (1 + expb)
mu = c(mu)
W = (mu.eta(eta)^2) * variance(mu)
# gradient here is d_loglik/d_beta without variance components
# gradient = drop(t(X) %*% (y - mu))
gradient = drop(t(X) %*% diag(W) %*% (y - mu))
# h = 1 / (1 + exp(-eta))
# log_lik = -(t(y) %*% log(h) + t(1 - y) %*% log(1 - h))
stopifnot(length(gradient) == length(beta))
result = list(
gradient = gradient,
sample_size = nrow(X),
iteration_number = iteration_number
)
return(result)
}
use_glm_gradient_value = function(
beta = NULL, df, formula,
family = binomial(), iteration_number = 0,
shuffle_rows = TRUE) {
if (shuffle_rows) {
df = df[sample(nrow(df)), ]
}
X = model.matrix(formula, data = df)
# mu.eta <- family$mu.eta
if (is.null(beta)) {
beta = rep(0, ncol(X))
}
start = beta
print(start)
mod = glm(
formula = formula,
data = df,
family = family,
start = start,
control = list(maxit = 1))
return(mod)
}
folder_names = function(synced_folder) {
L = list(
# this structure is the same on all sites
model_folder = file.path(synced_folder, "formulas"),
gradients_folder = file.path(synced_folder, "gradients"),
beta_folder = file.path(synced_folder, "betas"),
converged_folder = file.path(synced_folder, "models")
)
return(L)
}
master_beta_file = function(model_name, synced_folder) {
file_list = folder_names(synced_folder)
beta_folder = file_list$beta_folder
all_beta_files = list.files(
beta_folder,
pattern = paste0("^", model_name, "-iteration.*.rds"),
full.names = TRUE)
if (length(all_beta_files) == 0) {
beta = NULL
iteration_number = 1
} else {
beta_number = sub(".*iteration(.*)[.]rds", "\\1",
basename(all_beta_files))
beta_number = as.numeric(beta_number)
beta_list = read_rds(all_beta_files[ which.max(beta_number)])
beta = beta_list$beta
iteration_number = beta_list$iteration_number_next
}
out_beta_file = file.path(
beta_folder,
paste0(
model_name,
sprintf("-iteration%04.0f", iteration_number),
".rds")
)
return(out_beta_file)
}
aggregate_gradients = function(
all_gradient_files,
iteration_number) {
gradient_list = lapply(all_gradient_files, readr::read_rds)
names(gradient_list) = all_gradient_files
sum_grads = sapply(gradient_list, function(x) x$gradient)
if (is.null(beta)) {
beta = rep(0, nrow(sum_grads))
}
stopifnot(is.matrix(sum_grads))
grad = rowSums(sum_grads)
iter_nums = sapply(gradient_list, function(x) x$iteration_number)
stopifnot(is.vector(iter_nums))
stopifnot(all(iter_nums == iteration_number))
ss = sapply(gradient_list, function(x) x$sample_size)
stopifnot(is.vector(ss))
n = sum(ss)
grad = grad / n
# weight the gradient updates - new way
sum_grads = sapply(gradient_list, function(x) {
x$gradient * x$sample_size / n
})
grad = rowSums(sum_grads)
result = list(
gradient = grad,
total_sample_size = n)
return(result)
}
get_current_beta = function(model_name, synced_folder) {
file_list = folder_names(synced_folder)
beta_folder = file_list$beta_folder
all_beta_files = list.files(
beta_folder,
pattern = paste0("^", model_name, "-iteration.*.rds"),
full.names = TRUE)
if (length(all_beta_files) == 0) {
beta = NULL
iteration_number = 1
} else {
beta_number = sub(".*iteration(.*)[.]rds", "\\1",
basename(all_beta_files))
beta_number = as.numeric(beta_number)
beta_list = read_rds(all_beta_files[ which.max(beta_number)])
beta = beta_list$beta
iteration_number = beta_list$iteration_number_next
}
L = list(
iteration_number = iteration_number
)
L$beta = beta
return(L)
}
estimate_site_gradient = function(
model_name, synced_folder,
site_name = "site1", dataset,
all_site_names = paste0("site", 1:3),
shuffle_rows = TRUE) {
site_name = match.arg(site_name, choices = all_site_names)
file_list = folder_names(synced_folder)
gradients_folder = file_list$gradients_folder
model_folder = file_list$model_folder
# which model are we running
formula_file = file.path(model_folder,
paste0(model_name, ".rds"))
if (!file.exists(formula_file)) {
stop(paste0("Formula file: ", formula_file, " doesn't exist!",
" You may need to contact processing site or check your ",
"synced_folder"))
} else {
formula_list = readr::read_rds(formula_file)
formula = formula_list$formula
family = formula_list$family
if (is.character(family)) {
family = get(family, envir = .BaseNamespaceEnv)
}
if (is.function(family)) {
family = family()
}
if (!inherits(family, "family")) {
stop("family specified is not a family object - see setup_model")
}
}
res = get_current_beta(model_name, synced_folder)
beta = res$beta
iteration_number = res$iteration_number
gradient_file = file.path(
gradients_folder,
paste0(model_name, "-",
site_name,
sprintf("-iteration%04.0f", iteration_number),
".rds"))
all_gradient_files = file.path(
gradients_folder,
paste0(model_name, "-",
all_site_names,
sprintf("-iteration%04.0f", iteration_number),
".rds"))
# here we would simply wait
# should check if converged
if (file.exists(gradient_file)) {
if (!all(file.exists(all_gradient_files))) {
print("Waiting for other sites to create gradients")
} else {
print("Waiting for compute site to create new betas")
}
} else {
print(paste0("Creating Gradient, iteration ",
iteration_number))
# use_glm_gradient_value(beta = beta,
# df = dataset,
# formula = formula,
# family = family,
# iteration_number = iteration_number,
# shuffle_rows = shuffle_rows)
grad = gradient_value(beta = beta,
df = dataset,
formula = formula,
family = family,
iteration_number = iteration_number,
shuffle_rows = shuffle_rows)
readr::write_rds(grad, gradient_file)
rm(grad)
}
return(gradient_file)
}
estimate_new_beta = function(
model_name, synced_folder,
all_site_names = paste0("site", 1:3),
tolerance = 1e-8) {
file_list = folder_names(synced_folder)
gradients_folder = file_list$gradients_folder
beta_folder = file_list$beta_folder
converged_folder = file_list$converged_folder
final_file = file.path(converged_folder,
paste0(model_name, ".rds"))
if (file.exists(final_file)) {
stop("Model already converged, delete iterations to run again")
}
res = get_current_beta(model_name, synced_folder)
beta = res$beta
iteration_number = res$iteration_number
out_beta_file = file.path(
beta_folder,
paste0(
model_name,
sprintf("-iteration%04.0f", iteration_number),
".rds")
)
# list_gradient_files = list.files(
# gradients_folder,
# pattern = paste0("^", model_name, ".*",
# sprintf("-iteration%04.0f", iteration_number),
# ".rds"),
# full.names = TRUE)
all_gradient_files = file.path(
gradients_folder,
paste0(model_name, "-",
all_site_names,
sprintf("-iteration%04.0f", iteration_number),
".rds"))
fe = file.exists(all_gradient_files)
# should check if converged
if (!file.exists(out_beta_file)) {
if (!all(fe)) {
print("Waiting for other sites to create gradients")
print("Missing files:")
print(all_gradient_files[!fe])
} else {
print(paste0(
"Reading in gradients, iteration ", iteration_number))
result = aggregate_gradients(
all_gradient_files, iteration_number)
gradient = result$gradient
total_sample_size = result$total_sample_size
if (is.null(beta)) {
beta = rep(0, length(gradient))
epsilon = 10
} else {
# see glm.control
epsilon = max(abs(gradient)/(abs(beta) + 0.1))
}
if (epsilon < tolerance) {
print("Model has converged!")
final_beta_list = list(
beta = beta,
num_iterations = iteration_number,
gradient = gradient,
tolerance = tolerance,
epsilon = epsilon,
total_sample_size = total_sample_size,
max_gradient = max(abs(gradient)))
readr::write_rds(final_beta_list, final_file)
return(final_file)
}
beta = beta + gradient
beta_list = list(
beta = beta,
previous_gradient = gradient,
total_sample_size = total_sample_size,
iteration_number_next = iteration_number + 1,
tolerance = tolerance,
epsilon = epsilon
)
readr::write_rds(beta_list, out_beta_file)
rm(beta_list)
return(out_beta_file)
}
} else {
if (!all(fe)) {
print("Waiting for other sites to create gradients")
print("Missing files:")
print(all_gradient_files[!fe])
}
}
}
clear_model = function(
model_name, synced_folder
) {
file_list = folder_names(synced_folder)
files = sapply(file_list, function(x) {
list.files(path = x,
pattern = paste0("^", model_name, ".*.rds"),
full.names = TRUE)
})
file.remove(unlist(files))
}
|
# 4. faza: Analiza podatkov
#evropa predikcija
enajsti_graf <- ggplot(podatki3 %>% filter(Cas == 2014),
aes(x = Dolg, y = Deficit)) +
guides(color = guide_legend(ncol = 2)) +
geom_point(aes(color = Drzava, size = Dolg-10*Deficit)) +
geom_hline(yintercept=crta) +
geom_hline(yintercept=crta1, colour="red")
#plot(enajsti_graf+ geom_smooth(method = "lm"))
#eksplicitni izračun deficita po letih
napoved <- lm(data = podatki3 %>% filter(Cas == 2006), Deficit ~ Dolg)
predict(napoved, data.frame(Dolg=seq(0, 250, 25)))
napoved2 <- lm(data = podatki3 %>% filter(Cas == 2014), Deficit ~ Dolg)
predict(napoved2, data.frame(Dolg=seq(0, 250, 25)))
napoved.tabela <- data.frame(Dolg=seq(0, 250, 25))
napoved.tabela$Deficit <- predict(napoved, napoved.tabela)
#View(napoved.tabela)
napoved.tabela2 <- data.frame(Dolg=seq(0, 250, 25))
napoved.tabela2$Deficit <- predict(napoved2, napoved.tabela2)
#View(napoved.tabela2)
#sedaj bi radi ločili države v skupine, glede na dolg in deficit
podatki3_2014 <- podatki3 %>% filter(Cas == 2014)
rownames(podatki3_2014) <- podatki3_2014$Drzava
podatki3_2014 <- podatki3_2014[c("Dolg", "Deficit")]
podatki3.norm <- scale(podatki3_2014)
#prvi zemljevid kaže razdelitev evropskih držav na dve skupini
k <- kmeans(podatki3.norm, 2)
table(k$cluster)
k <- kmeans(podatki3.norm, 2, nstart = 10000)
podatki3.skupine <- data.frame(Drzava = names(k$cluster),
skupina = factor(k$cluster))
skupina <- podatki3.skupine
sever.jug <- skupina[c("SWE", "GRC"), "skupina"]
m3 <- match(svet$adm0_a3, skupina$Drzava)
svet$skupina <- factor(skupina$skupina[m3], levels = sever.jug, ordered = TRUE)
evropa <- pretvori.zemljevid(svet, svet$continent == "Europe")
zem3 <- ggplot() + geom_polygon(data = evropa, aes(x=long, y=lat,
group = group, fill = skupina),
color = "grey") + xlim(-10, 50) +
ylim(34, 72) + xlab("") + ylab("") +
scale_fill_manual(values = setNames(c("#00bfc4", "#f8766d"), sever.jug),
labels = setNames(c("Sever", "Jug"), sever.jug),
na.value = "#7f7f7f")
#plot(zem3)
#Ta graf prikazuje razdelitev evrope na 5 skupin držav
k2 <- kmeans(podatki3.norm, 4, nstart = 10000)
podatki3.skupine2 <- data.frame(Drzava = names(k2$cluster),
skupina2 = factor(k2$cluster))
skupina2 <- podatki3.skupine2
m4 <- match(svet$adm0_a3, skupina2$Drzava)
imena <- skupina2[c("DEU","SWE","SVN","GRC"),"skupina2"]
svet$skupina2 <- factor(skupina2$skupina[m4],levels = imena, ordered = TRUE)
evropa <- pretvori.zemljevid(svet, svet$continent == "Europe")
zem4 <- ggplot() + geom_polygon(data = evropa, aes(x=long, y=lat,
group = group, fill = skupina2),
color = "grey") + xlim(-10, 50) +
ylim(34, 72) + xlab("") + ylab("") +
scale_fill_manual(values = setNames(c("cyan3","chartreuse2","yellow1",
"firebrick1"), imena),
labels = setNames(c("Najboljši","Dobri",
"Zadostni","Slabi"), imena),
na.value = "#7f7f7f")
#plot(zem4)
#narišem dendrogram razporeditev na 5 skupin metoda
razporeditev <- dist(as.matrix(podatki3.norm))
hc <- hclust(razporeditev, method = "ward.D")
n <- 4 # število skupin
dend <- as.dendrogram(hc, main = "Razporeditev držav", sub = "", hang = -1)
sk <- cutree(hc, k = n)
labels_colors(dend) <- rainbow(n)[sk][order.dendrogram(dend)]
#plot(dend)
#sedaj narišem dendrogram z metodo centroid
razporeditev2 <- dist(as.matrix(podatki3.norm))
hc2 <- hclust(razporeditev2, method = "centroid")
n <- 4 # število skupin
dend2 <- as.dendrogram(hc, main = "Razporeditev držav", sub = "", hang = -1)
sk2 <- cutree(hc2, k = n)
labels_colors(dend2) <- rainbow(n)[sk2][order.dendrogram(dend2)]
#plot(dend2)
| /analiza/analiza.r | permissive | aleksandrov2/APPR-2015-16 | R | false | false | 4,025 | r | # 4. faza: Analiza podatkov
#evropa predikcija
enajsti_graf <- ggplot(podatki3 %>% filter(Cas == 2014),
aes(x = Dolg, y = Deficit)) +
guides(color = guide_legend(ncol = 2)) +
geom_point(aes(color = Drzava, size = Dolg-10*Deficit)) +
geom_hline(yintercept=crta) +
geom_hline(yintercept=crta1, colour="red")
#plot(enajsti_graf+ geom_smooth(method = "lm"))
#eksplicitni izračun deficita po letih
napoved <- lm(data = podatki3 %>% filter(Cas == 2006), Deficit ~ Dolg)
predict(napoved, data.frame(Dolg=seq(0, 250, 25)))
napoved2 <- lm(data = podatki3 %>% filter(Cas == 2014), Deficit ~ Dolg)
predict(napoved2, data.frame(Dolg=seq(0, 250, 25)))
napoved.tabela <- data.frame(Dolg=seq(0, 250, 25))
napoved.tabela$Deficit <- predict(napoved, napoved.tabela)
#View(napoved.tabela)
napoved.tabela2 <- data.frame(Dolg=seq(0, 250, 25))
napoved.tabela2$Deficit <- predict(napoved2, napoved.tabela2)
#View(napoved.tabela2)
#sedaj bi radi ločili države v skupine, glede na dolg in deficit
podatki3_2014 <- podatki3 %>% filter(Cas == 2014)
rownames(podatki3_2014) <- podatki3_2014$Drzava
podatki3_2014 <- podatki3_2014[c("Dolg", "Deficit")]
podatki3.norm <- scale(podatki3_2014)
#prvi zemljevid kaže razdelitev evropskih držav na dve skupini
k <- kmeans(podatki3.norm, 2)
table(k$cluster)
k <- kmeans(podatki3.norm, 2, nstart = 10000)
podatki3.skupine <- data.frame(Drzava = names(k$cluster),
skupina = factor(k$cluster))
skupina <- podatki3.skupine
sever.jug <- skupina[c("SWE", "GRC"), "skupina"]
m3 <- match(svet$adm0_a3, skupina$Drzava)
svet$skupina <- factor(skupina$skupina[m3], levels = sever.jug, ordered = TRUE)
evropa <- pretvori.zemljevid(svet, svet$continent == "Europe")
zem3 <- ggplot() + geom_polygon(data = evropa, aes(x=long, y=lat,
group = group, fill = skupina),
color = "grey") + xlim(-10, 50) +
ylim(34, 72) + xlab("") + ylab("") +
scale_fill_manual(values = setNames(c("#00bfc4", "#f8766d"), sever.jug),
labels = setNames(c("Sever", "Jug"), sever.jug),
na.value = "#7f7f7f")
#plot(zem3)
#Ta graf prikazuje razdelitev evrope na 5 skupin držav
k2 <- kmeans(podatki3.norm, 4, nstart = 10000)
podatki3.skupine2 <- data.frame(Drzava = names(k2$cluster),
skupina2 = factor(k2$cluster))
skupina2 <- podatki3.skupine2
m4 <- match(svet$adm0_a3, skupina2$Drzava)
imena <- skupina2[c("DEU","SWE","SVN","GRC"),"skupina2"]
svet$skupina2 <- factor(skupina2$skupina[m4],levels = imena, ordered = TRUE)
evropa <- pretvori.zemljevid(svet, svet$continent == "Europe")
zem4 <- ggplot() + geom_polygon(data = evropa, aes(x=long, y=lat,
group = group, fill = skupina2),
color = "grey") + xlim(-10, 50) +
ylim(34, 72) + xlab("") + ylab("") +
scale_fill_manual(values = setNames(c("cyan3","chartreuse2","yellow1",
"firebrick1"), imena),
labels = setNames(c("Najboljši","Dobri",
"Zadostni","Slabi"), imena),
na.value = "#7f7f7f")
#plot(zem4)
#narišem dendrogram razporeditev na 5 skupin metoda
razporeditev <- dist(as.matrix(podatki3.norm))
hc <- hclust(razporeditev, method = "ward.D")
n <- 4 # število skupin
dend <- as.dendrogram(hc, main = "Razporeditev držav", sub = "", hang = -1)
sk <- cutree(hc, k = n)
labels_colors(dend) <- rainbow(n)[sk][order.dendrogram(dend)]
#plot(dend)
#sedaj narišem dendrogram z metodo centroid
razporeditev2 <- dist(as.matrix(podatki3.norm))
hc2 <- hclust(razporeditev2, method = "centroid")
n <- 4 # število skupin
dend2 <- as.dendrogram(hc, main = "Razporeditev držav", sub = "", hang = -1)
sk2 <- cutree(hc2, k = n)
labels_colors(dend2) <- rainbow(n)[sk2][order.dendrogram(dend2)]
#plot(dend2)
|
args = commandArgs()
basename = sub(".R$", "", sub("^--file=(.*/)?", "", args[grep("^--file=", args)]))
if (length(basename) != 0)
pdf(file=paste0(basename, "_tmp.pdf"), colormodel="gray", width=7, height=3)
par(family="Palatino")
par(mgp=c(2,0.8,0)) # title and axis margins. default: c(3,1,0)
par(mar=c(3,3,2,2)+0.1) # bottom, left, top, right margins. default: c(5,4,4,2)+0.1
# CI = sapply(0:10, function(x) binom.test(x,10)$conf.int)
binomCI = function(n, y) {
a = y + 0.5
b = n - y + 0.5
if (y == 0) {
qbeta(c(0,0.95), a, b)
} else if (y == n) {
qbeta(c(0.05,1), a, b)
} else {
qbeta(c(0.025,0.975), a, b)
}
}
CCI = sapply(0:10, function(y) binomCI(10,y))
f = function(x) {
p = dbinom(0:10, 10, x)
sum(p * (CCI[1,] <= x & x <= CCI[2,]))
}
vf = Vectorize(f)
pb = function(x) (2/pi)*asin(sqrt(x))
qb = function(z) (sin(pi*z/2))^2
curve(pb(vf(qb(x))), n=10001, xlab="", ylab="", xaxt="n", yaxt="n", ylim=c(0.72,1))
abline(h=pb(0.95), lty=3)
axis(1, at=pb((0:10)/10), labels=(0:10)/10)
axis(2, at=pb(seq(0.85,1,0.05)), labels=seq(0.85,1,0.05), las=1)
dev.off()
embedFonts(paste0(basename, "_tmp.pdf"), outfile=paste0(basename, ".pdf"),
options="-c \"<</NeverEmbed []>> setdistillerparams\" -f ")
| /fig/CIcover10c.R | no_license | JDC-Shimada/bayesbook | R | false | false | 1,270 | r | args = commandArgs()
basename = sub(".R$", "", sub("^--file=(.*/)?", "", args[grep("^--file=", args)]))
if (length(basename) != 0)
pdf(file=paste0(basename, "_tmp.pdf"), colormodel="gray", width=7, height=3)
par(family="Palatino")
par(mgp=c(2,0.8,0)) # title and axis margins. default: c(3,1,0)
par(mar=c(3,3,2,2)+0.1) # bottom, left, top, right margins. default: c(5,4,4,2)+0.1
# CI = sapply(0:10, function(x) binom.test(x,10)$conf.int)
binomCI = function(n, y) {
a = y + 0.5
b = n - y + 0.5
if (y == 0) {
qbeta(c(0,0.95), a, b)
} else if (y == n) {
qbeta(c(0.05,1), a, b)
} else {
qbeta(c(0.025,0.975), a, b)
}
}
CCI = sapply(0:10, function(y) binomCI(10,y))
f = function(x) {
p = dbinom(0:10, 10, x)
sum(p * (CCI[1,] <= x & x <= CCI[2,]))
}
vf = Vectorize(f)
pb = function(x) (2/pi)*asin(sqrt(x))
qb = function(z) (sin(pi*z/2))^2
curve(pb(vf(qb(x))), n=10001, xlab="", ylab="", xaxt="n", yaxt="n", ylim=c(0.72,1))
abline(h=pb(0.95), lty=3)
axis(1, at=pb((0:10)/10), labels=(0:10)/10)
axis(2, at=pb(seq(0.85,1,0.05)), labels=seq(0.85,1,0.05), las=1)
dev.off()
embedFonts(paste0(basename, "_tmp.pdf"), outfile=paste0(basename, ".pdf"),
options="-c \"<</NeverEmbed []>> setdistillerparams\" -f ")
|
run_analysis <- function() {
#1. Merges the training and the test sets to create one data set.
#read training and test data from file
trainingX <- read.table("./train/X_train.txt", header = FALSE, colClass = "character")
trainingY <- read.table("./train/y_train.txt", header = FALSE, colClass = "character")
sTrain <- read.table("./train/subject_train.txt", header = FALSE, colClass = "character")
testX <- read.table("./test/X_test.txt", header = FALSE, colClass = "character")
testY <- read.table("./test/y_test.txt", header = FALSE, colClass = "character")
sTest <- read.table("./test/subject_test.txt", header = FALSE, colClass = "character")
#merge all training related data
training <- cbind(sTrain, trainingX, trainingY)
#merge all testing related data
test <- cbind(sTest, testX, testY)
#merge testing and training data
mg <- rbind(training, test)
#apply feature names from file to table heading names while
#retaining subject identifier and results
features <- read.table("./features.txt", header = FALSE, colClass = "character")
headings <- c("subject", features[,2], "resultY")
names(mg) <- headings
#2. Extracts only the measurements on the mean and standard
#deviation for each measurement.
#create a boolean vector of all feature headings containing 'mean'
#and 'std' without discarding 'subject' and 'resultY' columns
booVect <- grepl("(mean|std|subject|resultY)", headings, ignore.case = TRUE)
#create new table with only these columns
mg2 <- mg[,booVect]
#3. Uses descriptive activity names to name the activities
#in the data set
#read activity names from file
activities <- read.table("/Users/drewt/documents/DataSci/3.CleaningData/UCI HAR Dataset/activity_labels.txt", header = FALSE, colClass = "character")
#rename column headings to be consistent with other tables
names(activities) <- c("resultY", "activity")
#4. Appropriately labels the data set with descriptive
# variable names.
#merge the activity names with data table
mg3 <- merge(x = mg2, y = activities, by = c("resultY", "resultY"))
#5. From the data set in step 4, creates a second, independent
#tidy data set with the average of each variable for each activity
#and each subject.
#calculate mean based on subject id and activity name using aggregate
mg4 <- aggregate(mg3[, 2:87], by = list(subject = mg3$subject, result = mg3$activity), mean)
#write table to file
write.table(mg4, file = "./hm5.txt" , row.names = FALSE)
}
| /run_analysis.R | no_license | drewclyde/BodySensorsData | R | false | false | 2,545 | r | run_analysis <- function() {
#1. Merges the training and the test sets to create one data set.
#read training and test data from file
trainingX <- read.table("./train/X_train.txt", header = FALSE, colClass = "character")
trainingY <- read.table("./train/y_train.txt", header = FALSE, colClass = "character")
sTrain <- read.table("./train/subject_train.txt", header = FALSE, colClass = "character")
testX <- read.table("./test/X_test.txt", header = FALSE, colClass = "character")
testY <- read.table("./test/y_test.txt", header = FALSE, colClass = "character")
sTest <- read.table("./test/subject_test.txt", header = FALSE, colClass = "character")
#merge all training related data
training <- cbind(sTrain, trainingX, trainingY)
#merge all testing related data
test <- cbind(sTest, testX, testY)
#merge testing and training data
mg <- rbind(training, test)
#apply feature names from file to table heading names while
#retaining subject identifier and results
features <- read.table("./features.txt", header = FALSE, colClass = "character")
headings <- c("subject", features[,2], "resultY")
names(mg) <- headings
#2. Extracts only the measurements on the mean and standard
#deviation for each measurement.
#create a boolean vector of all feature headings containing 'mean'
#and 'std' without discarding 'subject' and 'resultY' columns
booVect <- grepl("(mean|std|subject|resultY)", headings, ignore.case = TRUE)
#create new table with only these columns
mg2 <- mg[,booVect]
#3. Uses descriptive activity names to name the activities
#in the data set
#read activity names from file
activities <- read.table("/Users/drewt/documents/DataSci/3.CleaningData/UCI HAR Dataset/activity_labels.txt", header = FALSE, colClass = "character")
#rename column headings to be consistent with other tables
names(activities) <- c("resultY", "activity")
#4. Appropriately labels the data set with descriptive
# variable names.
#merge the activity names with data table
mg3 <- merge(x = mg2, y = activities, by = c("resultY", "resultY"))
#5. From the data set in step 4, creates a second, independent
#tidy data set with the average of each variable for each activity
#and each subject.
#calculate mean based on subject id and activity name using aggregate
mg4 <- aggregate(mg3[, 2:87], by = list(subject = mg3$subject, result = mg3$activity), mean)
#write table to file
write.table(mg4, file = "./hm5.txt" , row.names = FALSE)
}
|
#' Simulate a simple experiement
#' <means stop being title>
#' Stimulate potential outcomes and adds a variable
#' assumes SUTVA, defines the observed outcomes, deletes
#' potential otucomes and returns the result
#' @param n number of observations
#' @param seed RNG seed
#' @return data.table
#' @export
#' @import data.table
simulate_potential_outcomes<-function(n,
seed = sample.int(.Machine$integer.max, 1))
{
set.seed(seed)
prob_y0<-.7
prob_y1<-.3
DATA<-data.table(
y0 = 1 * (runif(n) < prob_y0),
y1 = 1 * (runif(n) < prob_y1))
attr(DATA, "seed") <- seed
DATA
}
simulate_simple_experiment <- function(n, prob_treatment,
seed = sample.int(.Machine$integer.max, 1))
{
set.seed(seed)
DATA<-simulate_potential_outcomes(n)
#assign some subjects to treatment
#names var inside the dataset := is defined to be
#no causality yet, just assigning
DATA[, prob := prob_treatment]
DATA[, d := 1 * (runif(n) < prob_treatment)]
DATA[, y:= 1 * d * y1 + (1-d) * y0]
DATA[, `:=`(y1 = NULL, y0 = NULL)]
attr(DATA, "seed")<-seed
DATA
}
simulate_observational_study <- function(n,
seed = sample.int(.Machine$integer.max, 1))
{
set.seed(seed)
DATA<-simulate_potential_outcomes(n)
#assign some subjects to treatment
#names var inside the dataset := is defined to be
#no causality yet, just assigning
DATA[, prob_d_equals_1 := plogis(-2 + 4 * ( y1 - y0))]
DATA[, d := 1 * (runif(n) < prob_d_equals_1)]
DATA[, y:= 1 * d * y1 + (1-d) * y0]
DATA[, `:=`(y1 = NULL, y0 = NULL,
prob_d_equals_1 = NULL)]
attr(DATA, "seed")<-seed
DATA
}
#' Title
#'
#' details
#' @param DATA
#' @return number
#' @import data.table
#' @export
#'
calc_naive_difference_in_means <- function(DATA){
DATA[, mean(y[d==1]) - mean(y[d==0])]
##logical test, is d equal to one, returns true or false... hence ==
##serves to filter...section
}
| /causalA16/R/potential_outcomes.R | no_license | simmt/quant3 | R | false | false | 2,004 | r | #' Simulate a simple experiement
#' <means stop being title>
#' Stimulate potential outcomes and adds a variable
#' assumes SUTVA, defines the observed outcomes, deletes
#' potential otucomes and returns the result
#' @param n number of observations
#' @param seed RNG seed
#' @return data.table
#' @export
#' @import data.table
simulate_potential_outcomes<-function(n,
seed = sample.int(.Machine$integer.max, 1))
{
set.seed(seed)
prob_y0<-.7
prob_y1<-.3
DATA<-data.table(
y0 = 1 * (runif(n) < prob_y0),
y1 = 1 * (runif(n) < prob_y1))
attr(DATA, "seed") <- seed
DATA
}
simulate_simple_experiment <- function(n, prob_treatment,
seed = sample.int(.Machine$integer.max, 1))
{
set.seed(seed)
DATA<-simulate_potential_outcomes(n)
#assign some subjects to treatment
#names var inside the dataset := is defined to be
#no causality yet, just assigning
DATA[, prob := prob_treatment]
DATA[, d := 1 * (runif(n) < prob_treatment)]
DATA[, y:= 1 * d * y1 + (1-d) * y0]
DATA[, `:=`(y1 = NULL, y0 = NULL)]
attr(DATA, "seed")<-seed
DATA
}
simulate_observational_study <- function(n,
seed = sample.int(.Machine$integer.max, 1))
{
set.seed(seed)
DATA<-simulate_potential_outcomes(n)
#assign some subjects to treatment
#names var inside the dataset := is defined to be
#no causality yet, just assigning
DATA[, prob_d_equals_1 := plogis(-2 + 4 * ( y1 - y0))]
DATA[, d := 1 * (runif(n) < prob_d_equals_1)]
DATA[, y:= 1 * d * y1 + (1-d) * y0]
DATA[, `:=`(y1 = NULL, y0 = NULL,
prob_d_equals_1 = NULL)]
attr(DATA, "seed")<-seed
DATA
}
#' Title
#'
#' details
#' @param DATA
#' @return number
#' @import data.table
#' @export
#'
calc_naive_difference_in_means <- function(DATA){
DATA[, mean(y[d==1]) - mean(y[d==0])]
##logical test, is d equal to one, returns true or false... hence ==
##serves to filter...section
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/github-friendly-readmes.R
\name{knit_rmds_to_rdme}
\alias{knit_rmds_to_rdme}
\title{knit all rmarkdown files into .md files in their own directories and name each
README.md so as to take advantage of the github previewing.}
\usage{
knit_rmds_to_rdme()
}
\description{
knit all rmarkdown files into .md files in their own directories and name each
README.md so as to take advantage of the github previewing.
}
\keyword{int}
| /man/knit_rmds_to_rdme.Rd | no_license | npjc/dames | R | false | false | 510 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/github-friendly-readmes.R
\name{knit_rmds_to_rdme}
\alias{knit_rmds_to_rdme}
\title{knit all rmarkdown files into .md files in their own directories and name each
README.md so as to take advantage of the github previewing.}
\usage{
knit_rmds_to_rdme()
}
\description{
knit all rmarkdown files into .md files in their own directories and name each
README.md so as to take advantage of the github previewing.
}
\keyword{int}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1777
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1777
c
c Input Parameter (command line, file):
c input filename QBFLIB/Pan/k_grz_p/k_grz_p-10.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 503
c no.of clauses 1777
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1777
c
c QBFLIB/Pan/k_grz_p/k_grz_p-10.qdimacs 503 1777 E1 [] 0 20 483 1777 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Pan/k_grz_p/k_grz_p-10/k_grz_p-10.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 602 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1777
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1777
c
c Input Parameter (command line, file):
c input filename QBFLIB/Pan/k_grz_p/k_grz_p-10.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 503
c no.of clauses 1777
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1777
c
c QBFLIB/Pan/k_grz_p/k_grz_p-10.qdimacs 503 1777 E1 [] 0 20 483 1777 NONE
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_funcs.R
\name{calc_coordinates}
\alias{calc_coordinates}
\title{A function that calculates the coordinates of parliamentary seats in incomplete circular parliaments
E.g. The US (semicircle) and Australian (horseshoe) shaped parliaments}
\usage{
calc_coordinates(N, M, limits, segment = 0.5)
}
\arguments{
\item{N}{the total of number of seats}
\item{M}{the number of rows in parliament}
\item{limits}{the limits to seq the radii between- controls the 'shape' of the parliament}
\item{segment}{the percentage of a full circle for the final plot- defaults to 0.5 (a semicircle)}
}
\description{
A function that calculates the coordinates of parliamentary seats in incomplete circular parliaments
E.g. The US (semicircle) and Australian (horseshoe) shaped parliaments
}
\author{
Zoe Meers, Rob Hickman
}
| /man/calc_coordinates.Rd | no_license | cran/ggparliament | R | false | true | 914 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_funcs.R
\name{calc_coordinates}
\alias{calc_coordinates}
\title{A function that calculates the coordinates of parliamentary seats in incomplete circular parliaments
E.g. The US (semicircle) and Australian (horseshoe) shaped parliaments}
\usage{
calc_coordinates(N, M, limits, segment = 0.5)
}
\arguments{
\item{N}{the total of number of seats}
\item{M}{the number of rows in parliament}
\item{limits}{the limits to seq the radii between- controls the 'shape' of the parliament}
\item{segment}{the percentage of a full circle for the final plot- defaults to 0.5 (a semicircle)}
}
\description{
A function that calculates the coordinates of parliamentary seats in incomplete circular parliaments
E.g. The US (semicircle) and Australian (horseshoe) shaped parliaments
}
\author{
Zoe Meers, Rob Hickman
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 3874
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 3874
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query60_query05_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1757
c no.of clauses 3874
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 3874
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query60_query05_1344n.qdimacs 1757 3874 E1 [] 0 108 1649 3874 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query60_query05_1344n/query60_query05_1344n.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 714 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 3874
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 3874
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query60_query05_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1757
c no.of clauses 3874
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 3874
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query60_query05_1344n.qdimacs 1757 3874 E1 [] 0 108 1649 3874 NONE
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analytics-report.R
\name{sf_delete_report_instance}
\alias{sf_delete_report_instance}
\title{Delete a report instance}
\usage{
sf_delete_report_instance(report_id, report_instance_id, verbose = FALSE)
}
\arguments{
\item{report_id}{\code{character}; the Salesforce Id assigned to a created
analytics report. It will start with \code{"00O"}.}
\item{report_instance_id}{\code{character}; the Salesforce Id assigned to a
created analytics report instance (an asynchronous run). It will start with
\code{"0LG"}.}
\item{verbose}{\code{logical}; an indicator of whether to print additional
detail for each API call, which is useful for debugging. More specifically, when
set to \code{TRUE} the URL, header, and body will be printed for each request,
along with additional diagnostic information where available.}
}
\value{
\code{logical} indicating whether the report instance was deleted. This function
will return \code{TRUE} if successful in deleting the report instance.
}
\description{
\ifelse{html}{\out{<a href='https://www.tidyverse.org/lifecycle/#experimental'><img src='figures/lifecycle-experimental.svg' alt='Experimental lifecycle'></a>}}{\strong{Experimental}}
If the given report instance has a status of \code{Success} or \code{Error},
delete the report instance.
}
\section{Salesforce Documentation}{
\itemize{
\item \href{https://developer.salesforce.com/docs/atlas.en-us.api_analytics.meta/api_analytics/sforce_analytics_rest_api_instance_resource_results.htm}{Documentation}
}
}
\examples{
\dontrun{
# first, get the Id of a report in your Org
all_reports <- sf_query("SELECT Id, Name FROM Report")
this_report_id <- all_reports$Id[1]
# second, ensure that report has been executed at least once asynchronously
results <- sf_execute_report(this_report_id, async=TRUE)
# check if that report has succeeded, if so (or if it errored), then delete
instance_list <- sf_list_report_instances(this_report_id)
instance_status <- instance_list[[which(instance_list$id == results$id), "status"]]
}
}
\seealso{
Other Report Instance functions:
\code{\link{sf_get_report_instance_results}()},
\code{\link{sf_list_report_instances}()}
}
\concept{Report Instance functions}
| /man/sf_delete_report_instance.Rd | permissive | carlganz/salesforcer | R | false | true | 2,260 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analytics-report.R
\name{sf_delete_report_instance}
\alias{sf_delete_report_instance}
\title{Delete a report instance}
\usage{
sf_delete_report_instance(report_id, report_instance_id, verbose = FALSE)
}
\arguments{
\item{report_id}{\code{character}; the Salesforce Id assigned to a created
analytics report. It will start with \code{"00O"}.}
\item{report_instance_id}{\code{character}; the Salesforce Id assigned to a
created analytics report instance (an asynchronous run). It will start with
\code{"0LG"}.}
\item{verbose}{\code{logical}; an indicator of whether to print additional
detail for each API call, which is useful for debugging. More specifically, when
set to \code{TRUE} the URL, header, and body will be printed for each request,
along with additional diagnostic information where available.}
}
\value{
\code{logical} indicating whether the report instance was deleted. This function
will return \code{TRUE} if successful in deleting the report instance.
}
\description{
\ifelse{html}{\out{<a href='https://www.tidyverse.org/lifecycle/#experimental'><img src='figures/lifecycle-experimental.svg' alt='Experimental lifecycle'></a>}}{\strong{Experimental}}
If the given report instance has a status of \code{Success} or \code{Error},
delete the report instance.
}
\section{Salesforce Documentation}{
\itemize{
\item \href{https://developer.salesforce.com/docs/atlas.en-us.api_analytics.meta/api_analytics/sforce_analytics_rest_api_instance_resource_results.htm}{Documentation}
}
}
\examples{
\dontrun{
# first, get the Id of a report in your Org
all_reports <- sf_query("SELECT Id, Name FROM Report")
this_report_id <- all_reports$Id[1]
# second, ensure that report has been executed at least once asynchronously
results <- sf_execute_report(this_report_id, async=TRUE)
# check if that report has succeeded, if so (or if it errored), then delete
instance_list <- sf_list_report_instances(this_report_id)
instance_status <- instance_list[[which(instance_list$id == results$id), "status"]]
}
}
\seealso{
Other Report Instance functions:
\code{\link{sf_get_report_instance_results}()},
\code{\link{sf_list_report_instances}()}
}
\concept{Report Instance functions}
|
#' Chao-Bunge species richness estimator
#'
#' This function implements the species richness estimation procedure outlined
#' in Chao & Bunge (2002).
#'
#'
#' @param input_data An input type that can be processed by \code{convert()} or a \code{phyloseq} object
#' @param cutoff The maximum frequency to use in fitting.
#' @param output Deprecated; only for backwards compatibility
#' @param answers Deprecated; only for backwards compatibility
#'
#' @return An object of class \code{alpha_estimate}, or \code{alpha_estimates} for \code{phyloseq} objects
#'
#' @author Amy Willis
#' @examples
#'
#' chao_bunge(apples)
#'
#' @export
chao_bunge <- function(input_data,
cutoff=10,
output=NULL, answers=NULL) {
my_warnings <- NULL
if (class(input_data) == "phyloseq") {
if (input_data %>% otu_table %>% taxa_are_rows) {
return(input_data %>%
get_taxa %>%
apply(2, function(x) chao_bunge(make_frequency_count_table(x))) %>%
alpha_estimates)
} else {
return(input_data %>%
otu_table %>%
apply(1, function(x) chao_bunge(make_frequency_count_table(x))) %>%
alpha_estimates)
}
}
my_data <- convert(input_data)
input_data <- my_data
cc <- sum(input_data[,2])
index <- 1:max(my_data[,1])
frequency_index <- rep(0, length(index))
frequency_index[my_data[,1]] <- my_data[,2]
f1 <- frequency_index[1]
n <- sum(frequency_index)
if (min(my_data[, 1]) > cutoff) {
my_warnings <- c(my_warnings, "cutoff exceeds minimum frequency count index")
cutoff <- max(my_data[, 1])
}
my_data <- my_data[ my_data[,1] <= cutoff, ]
cutoff <- max(my_data[,1])
d_a <- sum(input_data[input_data[,1] > cutoff, 2])
k <- 2:cutoff
m <- 1:cutoff
numerator <- frequency_index[k]
denominator <- 1 - f1*sum(m^2*frequency_index[m])/(sum(m*frequency_index[m]))^2 #
if (denominator == 0) {
diversity <- NA
diversity_se <- NA
f0 <- NA
interval_tmp <- NA
my_warnings <- c(my_warnings, "zero denominator in estimate for f0")
return(alpha_estimate(estimate = diversity,
error = diversity_se,
estimand = "richness",
name = "Chao-Bunge",
interval = interval_tmp,
type = "???",
model = "Negative Binomial",
frequentist = TRUE,
parametric = TRUE,
reasonable = TRUE,
interval_type = "Approximate: log-normal",
warnings = my_warnings,
other = list(cutoff = cutoff)))
}
diversity <- d_a + sum(numerator/denominator)
f0 <- diversity - cc
if (diversity >= 0) {
fs_up_to_cut_off <- frequency_index[m]
n_tau <- sum(m * fs_up_to_cut_off)
s_tau <- sum(fs_up_to_cut_off)
H <- sum(m^2 * fs_up_to_cut_off)
derivatives <- n_tau * (n_tau^3 + f1 * n_tau * m^2 *
s_tau - n_tau * f1 * H - f1^2 * n_tau * m^2 - 2 *
f1 * H * m * s_tau + 2 * f1^2 * H * m)/(n_tau^2 -
f1 * H)^2
derivatives[1] <- n_tau * (s_tau - f1) * (f1 * n_tau -
2 * f1 * H + n_tau * H)/(n_tau^2 - f1 * H)^2
covariance <- diag(rep(0, cutoff))
for (i in 1:(cutoff - 1)) {
covariance[i, (i + 1):cutoff] <- -fs_up_to_cut_off[i] * fs_up_to_cut_off[(i +
1):cutoff]/diversity
}
covariance <- t(covariance) + covariance
diag(covariance) <- fs_up_to_cut_off * (1 - fs_up_to_cut_off/diversity)
diversity_se <- c(sqrt(derivatives %*% covariance %*% derivatives))
} else {
# wlrm <- wlrm_untransformed(input_data, print = F, answers = T)
# if (is.null(wlrm$est)) {
# wlrm <- wlrm_transformed(input_data, print = F, answers = T)
# }
diversity <- NA
diversity_se <- NA
f0 <- NA
my_warnings <- c(my_warnings, "negative richness estimate")
}
# if(output) {
# cat("################## Chao-Bunge ##################\n")
# cat("\tThe estimate of total diversity is", round(diversity),
# "\n \t with std error",round(diversity_se),"\n")
# }
# if(answers) {
# result <- list()
# result$name <- "Chao-Bunge"
# result$est <- diversity
# result$seest <- as.vector(diversity_se)
d <- exp(1.96*sqrt(log(1+diversity_se^2/f0)))
# result$ci <- c(n+f0/d,n+f0*d)
# return(result)
# }
alpha_estimate(estimate = diversity,
error = diversity_se,
estimand = "richness",
name = "Chao-Bunge",
interval = c(n + f0/d, n + f0*d),
type = "???",
model = "Negative Binomial",
frequentist = TRUE,
parametric = TRUE,
reasonable = TRUE,
interval_type = "Approximate: log-normal",
warnings = my_warnings,
other = list(cutoff = cutoff))
}
| /R/richness_chao_bunge.R | no_license | bryandmartin/breakaway | R | false | false | 5,209 | r | #' Chao-Bunge species richness estimator
#'
#' This function implements the species richness estimation procedure outlined
#' in Chao & Bunge (2002).
#'
#'
#' @param input_data An input type that can be processed by \code{convert()} or a \code{phyloseq} object
#' @param cutoff The maximum frequency to use in fitting.
#' @param output Deprecated; only for backwards compatibility
#' @param answers Deprecated; only for backwards compatibility
#'
#' @return An object of class \code{alpha_estimate}, or \code{alpha_estimates} for \code{phyloseq} objects
#'
#' @author Amy Willis
#' @examples
#'
#' chao_bunge(apples)
#'
#' @export
chao_bunge <- function(input_data,
cutoff=10,
output=NULL, answers=NULL) {
my_warnings <- NULL
if (class(input_data) == "phyloseq") {
if (input_data %>% otu_table %>% taxa_are_rows) {
return(input_data %>%
get_taxa %>%
apply(2, function(x) chao_bunge(make_frequency_count_table(x))) %>%
alpha_estimates)
} else {
return(input_data %>%
otu_table %>%
apply(1, function(x) chao_bunge(make_frequency_count_table(x))) %>%
alpha_estimates)
}
}
my_data <- convert(input_data)
input_data <- my_data
cc <- sum(input_data[,2])
index <- 1:max(my_data[,1])
frequency_index <- rep(0, length(index))
frequency_index[my_data[,1]] <- my_data[,2]
f1 <- frequency_index[1]
n <- sum(frequency_index)
if (min(my_data[, 1]) > cutoff) {
my_warnings <- c(my_warnings, "cutoff exceeds minimum frequency count index")
cutoff <- max(my_data[, 1])
}
my_data <- my_data[ my_data[,1] <= cutoff, ]
cutoff <- max(my_data[,1])
d_a <- sum(input_data[input_data[,1] > cutoff, 2])
k <- 2:cutoff
m <- 1:cutoff
numerator <- frequency_index[k]
denominator <- 1 - f1*sum(m^2*frequency_index[m])/(sum(m*frequency_index[m]))^2 #
if (denominator == 0) {
diversity <- NA
diversity_se <- NA
f0 <- NA
interval_tmp <- NA
my_warnings <- c(my_warnings, "zero denominator in estimate for f0")
return(alpha_estimate(estimate = diversity,
error = diversity_se,
estimand = "richness",
name = "Chao-Bunge",
interval = interval_tmp,
type = "???",
model = "Negative Binomial",
frequentist = TRUE,
parametric = TRUE,
reasonable = TRUE,
interval_type = "Approximate: log-normal",
warnings = my_warnings,
other = list(cutoff = cutoff)))
}
diversity <- d_a + sum(numerator/denominator)
f0 <- diversity - cc
if (diversity >= 0) {
fs_up_to_cut_off <- frequency_index[m]
n_tau <- sum(m * fs_up_to_cut_off)
s_tau <- sum(fs_up_to_cut_off)
H <- sum(m^2 * fs_up_to_cut_off)
derivatives <- n_tau * (n_tau^3 + f1 * n_tau * m^2 *
s_tau - n_tau * f1 * H - f1^2 * n_tau * m^2 - 2 *
f1 * H * m * s_tau + 2 * f1^2 * H * m)/(n_tau^2 -
f1 * H)^2
derivatives[1] <- n_tau * (s_tau - f1) * (f1 * n_tau -
2 * f1 * H + n_tau * H)/(n_tau^2 - f1 * H)^2
covariance <- diag(rep(0, cutoff))
for (i in 1:(cutoff - 1)) {
covariance[i, (i + 1):cutoff] <- -fs_up_to_cut_off[i] * fs_up_to_cut_off[(i +
1):cutoff]/diversity
}
covariance <- t(covariance) + covariance
diag(covariance) <- fs_up_to_cut_off * (1 - fs_up_to_cut_off/diversity)
diversity_se <- c(sqrt(derivatives %*% covariance %*% derivatives))
} else {
# wlrm <- wlrm_untransformed(input_data, print = F, answers = T)
# if (is.null(wlrm$est)) {
# wlrm <- wlrm_transformed(input_data, print = F, answers = T)
# }
diversity <- NA
diversity_se <- NA
f0 <- NA
my_warnings <- c(my_warnings, "negative richness estimate")
}
# if(output) {
# cat("################## Chao-Bunge ##################\n")
# cat("\tThe estimate of total diversity is", round(diversity),
# "\n \t with std error",round(diversity_se),"\n")
# }
# if(answers) {
# result <- list()
# result$name <- "Chao-Bunge"
# result$est <- diversity
# result$seest <- as.vector(diversity_se)
d <- exp(1.96*sqrt(log(1+diversity_se^2/f0)))
# result$ci <- c(n+f0/d,n+f0*d)
# return(result)
# }
alpha_estimate(estimate = diversity,
error = diversity_se,
estimand = "richness",
name = "Chao-Bunge",
interval = c(n + f0/d, n + f0*d),
type = "???",
model = "Negative Binomial",
frequentist = TRUE,
parametric = TRUE,
reasonable = TRUE,
interval_type = "Approximate: log-normal",
warnings = my_warnings,
other = list(cutoff = cutoff))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{digamma_inv}
\alias{digamma_inv}
\title{Inverse digamma function.}
\usage{
digamma_inv(y, precision = 1e-08)
}
\arguments{
\item{y}{value to evaluate the inverse digamma function at.}
\item{precision}{default = 1e-08.}
}
\value{
Numeric inverse digamma value.
}
\description{
Evaluate the inverse digamma function.
}
| /man/digamma_inv.Rd | no_license | cran/gigg | R | false | true | 435 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{digamma_inv}
\alias{digamma_inv}
\title{Inverse digamma function.}
\usage{
digamma_inv(y, precision = 1e-08)
}
\arguments{
\item{y}{value to evaluate the inverse digamma function at.}
\item{precision}{default = 1e-08.}
}
\value{
Numeric inverse digamma value.
}
\description{
Evaluate the inverse digamma function.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eda_re.R
\name{eda_re}
\alias{eda_re}
\title{Re-expression function}
\usage{
eda_re(x, p = 0, tukey = TRUE)
}
\arguments{
\item{x}{Vector}
\item{p}{Power transformation}
\item{tukey}{If set to TRUE then adopt Tukey's power transformation, if FALSE,
adopt Box-Cox transformation technique}
}
\description{
\code{eda_re} re-expresses a vector following the Tukey or box-cox transformation.
}
\details{
The `eda_re` function is used to re-express data using one of two
transformation techniques: Box-Cox transformation (tukey=FALSE)or
Tukey's power transformation (tukey=TRUE).
}
\examples{
x <- c(15, 28, 17, 73, 8, 83, 2)
eda_re(x, p=-1/3)
}
| /man/eda_re.Rd | no_license | ckingdon/FFR_mgimond-tukeyedar | R | false | true | 725 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eda_re.R
\name{eda_re}
\alias{eda_re}
\title{Re-expression function}
\usage{
eda_re(x, p = 0, tukey = TRUE)
}
\arguments{
\item{x}{Vector}
\item{p}{Power transformation}
\item{tukey}{If set to TRUE then adopt Tukey's power transformation, if FALSE,
adopt Box-Cox transformation technique}
}
\description{
\code{eda_re} re-expresses a vector following the Tukey or box-cox transformation.
}
\details{
The `eda_re` function is used to re-express data using one of two
transformation techniques: Box-Cox transformation (tukey=FALSE)or
Tukey's power transformation (tukey=TRUE).
}
\examples{
x <- c(15, 28, 17, 73, 8, 83, 2)
eda_re(x, p=-1/3)
}
|
### To get the column names of the data set to be built
features <- as.list(read.table("C:/Users/KRISHNAKUMAR/Documents/UCI HAR Dataset/features.txt"))
column_names <- c("activity", "subject", as.character(features$V2))
### To build the training data set
labl_train <- read.table("C:/Users/KRISHNAKUMAR/Documents/UCI HAR Dataset/train/y_train.txt")
subj_train <- read.table("C:/Users/KRISHNAKUMAR/Documents/UCI HAR Dataset/train/subject_train.txt")
data_train <- read.table("C:/Users/KRISHNAKUMAR/Documents/UCI HAR Dataset/train/X_train.txt")
train_data <- cbind(labl_train, subj_train, data_train)
colnames(train_data) <- column_names
### To build the test data set
labl_test <- read.table("C:/Users/KRISHNAKUMAR/Documents/UCI HAR Dataset/test/y_test.txt")
subj_test <- read.table("C:/Users/KRISHNAKUMAR/Documents/UCI HAR Dataset/test/subject_test.txt")
data_test <- read.table("C:/Users/KRISHNAKUMAR/Documents/UCI HAR Dataset/test/X_test.txt")
test_data <- cbind(labl_test, subj_test, data_test)
colnames(test_data) <- column_names
### 1) Merge test and training data as one data set
merged_data <- rbind(train_data, test_data)
### 2) Extracts only the measurements on the mean and standard deviation for each measurement
req_char <- merged_data[,grep("Mean|mean|std",colnames(merged_data))]
### 3) Uses descriptive activity names to name the activities in the data set
activity_lables <- read.table("C:/Users/KRISHNAKUMAR/Documents/UCI HAR Dataset/activity_labels.txt")
merged_data$activity <- factor(merged_data$activity, labels = as.character(activity_lables$V2))
### 4) Appropriately labels the data set with descriptive variable names
### The column names are already added while building the individual data set (in previous steps)
### 5) From the data set in step 4, creates a second, independent tidy data set with the average
### of each variable for each activity and each subject.
merged_data <- merged_data[,grep("Mean|mean|std|activity|subject",colnames(merged_data))]
second_data <- merged_data %>% group_by(activity,subject) %>% summarise_all(funs(mean))
write.table(second_data, file = "./UCI HAR Dataset/tidydata.txt", row.names = FALSE, col.names = TRUE)
### i have used the grep for mean and std again in this step because if i have all the original variables
### (variables not having mean and std) i get error for non-unique column names for summarise_all function
| /UCI HAR Dataset/run_analysis.R | no_license | krish-94/Getting-and-cleaning-data-project-coursera | R | false | false | 2,416 | r | ### To get the column names of the data set to be built
features <- as.list(read.table("C:/Users/KRISHNAKUMAR/Documents/UCI HAR Dataset/features.txt"))
column_names <- c("activity", "subject", as.character(features$V2))
### To build the training data set
labl_train <- read.table("C:/Users/KRISHNAKUMAR/Documents/UCI HAR Dataset/train/y_train.txt")
subj_train <- read.table("C:/Users/KRISHNAKUMAR/Documents/UCI HAR Dataset/train/subject_train.txt")
data_train <- read.table("C:/Users/KRISHNAKUMAR/Documents/UCI HAR Dataset/train/X_train.txt")
train_data <- cbind(labl_train, subj_train, data_train)
colnames(train_data) <- column_names
### To build the test data set
labl_test <- read.table("C:/Users/KRISHNAKUMAR/Documents/UCI HAR Dataset/test/y_test.txt")
subj_test <- read.table("C:/Users/KRISHNAKUMAR/Documents/UCI HAR Dataset/test/subject_test.txt")
data_test <- read.table("C:/Users/KRISHNAKUMAR/Documents/UCI HAR Dataset/test/X_test.txt")
test_data <- cbind(labl_test, subj_test, data_test)
colnames(test_data) <- column_names
### 1) Merge test and training data as one data set
merged_data <- rbind(train_data, test_data)
### 2) Extracts only the measurements on the mean and standard deviation for each measurement
req_char <- merged_data[,grep("Mean|mean|std",colnames(merged_data))]
### 3) Uses descriptive activity names to name the activities in the data set
activity_lables <- read.table("C:/Users/KRISHNAKUMAR/Documents/UCI HAR Dataset/activity_labels.txt")
merged_data$activity <- factor(merged_data$activity, labels = as.character(activity_lables$V2))
### 4) Appropriately labels the data set with descriptive variable names
### The column names are already added while building the individual data set (in previous steps)
### 5) From the data set in step 4, creates a second, independent tidy data set with the average
### of each variable for each activity and each subject.
merged_data <- merged_data[,grep("Mean|mean|std|activity|subject",colnames(merged_data))]
second_data <- merged_data %>% group_by(activity,subject) %>% summarise_all(funs(mean))
write.table(second_data, file = "./UCI HAR Dataset/tidydata.txt", row.names = FALSE, col.names = TRUE)
### i have used the grep for mean and std again in this step because if i have all the original variables
### (variables not having mean and std) i get error for non-unique column names for summarise_all function
|
#1.6
pnorm(85,90,sqrt(18))
pnorm(90,90,sqrt(18))
pnorm(100,90,sqrt(18))
#6
57-qt(.975,24)*10/sqrt(25)
57+qt(.975,24)*10/sqrt(25) | /hw1.R | no_license | mpbrennan7/Nonparametric | R | false | false | 142 | r |
#1.6
pnorm(85,90,sqrt(18))
pnorm(90,90,sqrt(18))
pnorm(100,90,sqrt(18))
#6
57-qt(.975,24)*10/sqrt(25)
57+qt(.975,24)*10/sqrt(25) |
\name{dispersion}
\alias{dispersion,SeqCountSet-method}
\alias{dispersion<-,SeqCountSet,numeric-method}
\alias{dispersion}
\alias{dispersion<-}
\title{
Acessor functions for the 'dispersion' slot in a
SeqCountData object.
}
\description{
Dispersion parameter for a gene represents its coefficient of
variation of expressions. It characterizes the biological
variations.
}
\usage{
\S4method{dispersion}{SeqCountSet}(object)
\S4method{dispersion}{SeqCountSet,numeric}(object) <- value
}
\arguments{
\item{object}{A SeqCountData object.}
\item{value}{A numeric vector with the same length as number of genes.}
}
\details{
If the counts from biological replicates are modeled as negative
binomial distribution, the variance (v) and mean (m) should hold
following relationship: v=m+m^2*phi, where phi is the
dispersion. Another interpretation is that phi represents the
biological variations among replicates when underlying expressions are
modeled as a Gamma distribution.
}
\author{
Hao Wu <hao.wu@emory.edu>
}
\seealso{
normalizationFactor
}
\examples{
data(seqData)
## obtain
seqData=estNormFactors(seqData, "quantile")
seqData=estDispersion(seqData)
dispersion(seqData)
## assign
dispersion(seqData)=rep(0.1, nrow(exprs(seqData)))
}
| /man/dispersion.Rd | no_license | hmyh1202/DSS | R | false | false | 1,267 | rd | \name{dispersion}
\alias{dispersion,SeqCountSet-method}
\alias{dispersion<-,SeqCountSet,numeric-method}
\alias{dispersion}
\alias{dispersion<-}
\title{
Acessor functions for the 'dispersion' slot in a
SeqCountData object.
}
\description{
Dispersion parameter for a gene represents its coefficient of
variation of expressions. It characterizes the biological
variations.
}
\usage{
\S4method{dispersion}{SeqCountSet}(object)
\S4method{dispersion}{SeqCountSet,numeric}(object) <- value
}
\arguments{
\item{object}{A SeqCountData object.}
\item{value}{A numeric vector with the same length as number of genes.}
}
\details{
If the counts from biological replicates are modeled as negative
binomial distribution, the variance (v) and mean (m) should hold
following relationship: v=m+m^2*phi, where phi is the
dispersion. Another interpretation is that phi represents the
biological variations among replicates when underlying expressions are
modeled as a Gamma distribution.
}
\author{
Hao Wu <hao.wu@emory.edu>
}
\seealso{
normalizationFactor
}
\examples{
data(seqData)
## obtain
seqData=estNormFactors(seqData, "quantile")
seqData=estDispersion(seqData)
dispersion(seqData)
## assign
dispersion(seqData)=rep(0.1, nrow(exprs(seqData)))
}
|
require("OpenRepGrid")
require(dplyr)
require(randomNames)
require("RPostgres")
require(data.table)
con <- dbConnect(RPostgres::Postgres(), dbname = "projekt",
host = "localhost", port = 5432,
user = "postgres", pass = "**********")
#Kraje======================================================================================================================================
kraje <- c("Stany Zjednoczone","Stany Zjednoczone","Stany Zjednoczone","Stany Zjednoczone","Stany Zjednoczone","Stany Zjednoczone",
"Wielka Brytania","Wielka Brytania","Wielka Brytania","Wielka Brytania",
"Hiszpania","Hiszpania", "Francja","Francja","Francja", "Wlochy","Wlochy",
"Niemcy","Niemcy", "Dania", "Hong Kong", "Japonia", "Polska", "Szwecja", "Szwecja")
kraje_produkcji <- sample(kraje, il_seriali, replace = TRUE)
write.table(kraje_produkcji, file = "kraje_produkcji.csv", quote = FALSE, row.names = TRUE, col.names = FALSE, sep = ",")
#======================================================================================================================================
#Konta=====================================================================================================================================
email_platforms <- c("@gmail.com", "@onet.pl", "@wp.pl", "@o2.pl", "@yahoo.com")
znaki <- 1:61
znaki[1:26] <- letters
znaki[27:52] <- LETTERS
znaki[53:61] <- 1:9
znaki <- as.character(znaki)
##
sink(file = "konta.txt")
mails <- 1:1000
for(i in 1:1000){
email <- paste(randomWords(1),randomWords(1), sample(1:9, 1), sample(email_platforms, 1, prob = c(0.6, 0.13, 0.13, 0.13, 0.01)), sep = "")
while(email %in% unique(mails))
email <- paste(randomWords(1),randomWords(1), sample(1:9, 1), sample(email_platforms, 1), sep = "")
mails[i] <- email
il_haslo <- sample(8:16)
haslo <- paste(c(sample(znaki, il_haslo, replace = TRUE), sample(1:9, 1)), collapse = "")
data <- sample(seq(as.Date('2013/01/01'), as.Date('2021/01/10'), by="day"), 1)
id_planu <- sample(1:5, 1)
cat(paste0("INSERT INTO konta(email, haslo, data_zalozenia, id_planu) VALUES ('", email, "','",
haslo, "','", data, "','", id_planu, "');", "\n"))
}
sink()
##
#=====================================================================================================================================
##losowanie czy_dla_dzieci do tabeli produkcje dla filmów
filmy.sql <- dbGetQuery(con, "SELECT * FROM produkcje WHERE czy_serial=FALSE;")
liczba_filmow <- nrow(filmy.sql)
czy_dla_dzieci <- sample(c(0,1), liczba_filmow, replace = TRUE, prob=c(0.2, 0.8))
View(czy_dla_dzieci)
write.table(czy_dla_dzieci, file = "czy_dla_dzieci.txt", quote = FALSE, col.names = FALSE)
##losowanie tabeli odcinki
seriale.sql <- dbGetQuery(con, "SELECT * FROM produkcje WHERE czy_serial = TRUE;")
id_ser <- seriale.sql$id_produkcji
il_seriali <- nrow(seriale.sql)
sink(file = "odcinki.txt")
for(i in 1:(il_seriali)){
liczba_sezon <- sample(1:12, 1, prob = c(30, 20, 10:1), replace = TRUE)
for(j in 1:liczba_sezon){
liczba_odc <- sample(1:24, 1, prob = 24:1, replace = TRUE)
for(k in 1:liczba_odc){
czas_trwania <- paste(sample(0:2, 1, prob = c(0.4, 0.5, 0.1)), sample(0:59, 1, replace = TRUE), sample(0:59, 1, replace = TRUE), sep = ":")
tytul <- paste0(as.character(randomWords(sample(2:4, 1, replace = TRUE))), collapse = " ")
cat(paste0("INSERT INTO odcinki(id_produkcji, nr_sezonu, nr_odcinka, dlugosc_odcinka, tytul_odcinka) VALUES (", id_ser[i], ",",
j, ",", k, ",'", czas_trwania, "','",tytul, "');", "\n"))
}
}
}
sink()
##losowanie tabeli w_kategorii
produkcje.sql <- dbGetQuery(con, "SELECT * FROM produkcje;")
kategorie.sql <- dbGetQuery(con, "SELECT * FROM kategorie;")
liczba_produkcji <- nrow(produkcje.sql)
liczba_kategorii <- nrow(kategorie.sql)
sink(file = "w_kategorii.txt")
for(i in 1:(liczba_produkcji)){
a <- sample(1:liczba_kategorii, 3, replace = FALSE)
cat(paste0("INSERT INTO w_kategorii(id_produkcji, id_kategorii) VALUES (", produkcje.sql$id_produkcji[i], ",", a[1], "); \n"))
cat(paste0("INSERT INTO w_kategorii(id_produkcji, id_kategorii) VALUES (", produkcje.sql$id_produkcji[i], ",", a[2], "); \n"))
cat(paste0("INSERT INTO w_kategorii(id_produkcji, id_kategorii) VALUES (", produkcje.sql$id_produkcji[i], ",", a[3], "); \n"))
}
sink()
##losowanie użytkowników
konta.sql <- dbGetQuery(con, "SELECT * FROM konta JOIN plany USING(id_planu);")
# View(konta.sql)
liczba_kont <- nrow(konta.sql)
sink(file = "uzytkownicy.txt")
for(i in 1:liczba_kont){
max_uz <- konta.sql$max_osob[i]
liczba <- sample(1:max_uz, 1) #losujemy ilu użytkowników wylosować
nazwa <- paste(randomWords(liczba), sample(9, liczba), sep="")
czy_dziecko <- c(FALSE, sample(c(TRUE, FALSE), liczba, replace = TRUE))
#bo chcemy choć jednego dorosłego
for(j in 1:liczba){
cat(paste0("INSERT INTO uzytkownicy(id_konta, nazwa, czy_dziecko) VALUES (", konta.sql$id_konta[i] , ",'", nazwa[j] , "', ", czy_dziecko[j] ,"); \n"))
}
}
sink()
##tabela oceny
uzytkownicy.sql <- dbGetQuery(con, "SELECT * FROM uzytkownicy;")
produkcje.sql <- dbGetQuery(con, "SELECT * FROM produkcje;")
sink(file = "oceny.txt")
id_uz <- sample(uzytkownicy.sql$id_uzytkownika, 100000, replace = TRUE)
id_prod <- sample(produkcje.sql$id_produkcji, 100000, replace = TRUE)
a <- unique(cbind(id_uz, id_prod)) #dany użytkownik może ocenić daną produkcję tylko raz
# View(nrow(a))
ocena <- sample(1:10, nrow(a), replace = TRUE, prob = c(1:5, 5:1))
# View(ocena)
cat(paste0("INSERT INTO oceny(id_produkcji, id_uzytkownika, ocena) VALUES (", a[,2] , ",", a[,1] , ", ", ocena ,"); \n"))
sink()
##tabela komentarze
zdanie <- function(n) {
x <- randomSentence(n)
substr(x, 1, 1) <- toupper(substr(x, 1, 1))
znak <- sample(c(".","!", "?" ), 1, prob = c(5, 4, 1))
x <- paste(x, znak, sep="")
return(x)
}
komentarz <- function(n) {
k <- ""
for (i in 1:n){
dlugosc_zdania <- sample(3:8, 1)
k <- paste(k, zdanie(dlugosc_zdania), sep=" ")
}
return(k)
}
produkcje.sql <- dbGetQuery(con, "SELECT * FROM produkcje;")
uzytkownicy.sql <- dbGetQuery(con, "SELECT u.id_uzytkownika, k.data_zalozenia
FROM uzytkownicy u
JOIN konta k USING(id_konta);")
odcinki_serialu.sql <- dbGetQuery(con, "SELECT id_odcinka, id_produkcji FROM odcinki JOIN produkcje USING(id_produkcji);")
#pierwsze komenatrze - te któe nie mają poprzednika
#nie wiem zbytnio jak to skrócić na razie
sink(file = "komentarze_pierwsze.txt")
for(i in 1:5000){
ilosc_zdan <- sample(6, 1)
tresc <- komentarz(ilosc_zdan)
id_uz <- sample(uzytkownicy.sql$id_uzytkownika, 1)
data_zalozenia <- uzytkownicy.sql[uzytkownicy.sql$id_uzytkownika==id_uz,]$data_zalozenia
data <- sample(seq(data_zalozenia, as.Date('2021/01/10'), by = "day"), 1)
id_prod <- sample(produkcje.sql$id_produkcji, 1)
if(produkcje.sql[produkcje.sql$id_produkcji==id_prod,]$czy_serial){
id_odcinka <- sample(odcinki_serialu.sql[odcinki_serialu.sql$id_produkcji == id_prod,]$id_odcinka, 1)
}
else{
id_odcinka <- "NULL"
}
cat(paste0("INSERT INTO komentarze(tresc, id_uzytkownika, data, id_produkcji, id_odcinka) VALUES
('", tresc , "',", id_uz , ", '", data ,"',", id_prod, ",", id_odcinka, "); \n"))
}
sink()
##komenatarze odpowiadające
komentarze.sql <- dbGetQuery(con, "SELECT * FROM komentarze;")
sink(file="komentarze_drugie.txt")
for(i in 1:3000){
id_pop_kom <- sample(komentarze.sql$id_komentarza, 1)
ilosc_zdan <- sample(6, 1)
tresc <- komentarz(ilosc_zdan)
id_uz <- sample(uzytkownicy.sql$id_uzytkownika, 1)
data_zalozenia <- uzytkownicy.sql[uzytkownicy.sql$id_uzytkownika==id_uz,]$data_zalozenia
data_pop_kom <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$data
d <- min(data_pop_kom, data_zalozenia)
data <- sample(seq(as.Date(d), as.Date('2021/01/10'), by = "day"), 1)
id_prod <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$id_produkcji
id_odcinka <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$id_odcinka
if(is.na(id_odcinka)){
id_odcinka <- "NULL"
}
cat(paste0("INSERT INTO komentarze(id_pop_kom, tresc, id_uzytkownika, data, id_produkcji, id_odcinka) VALUES
(",id_pop_kom , ",'", tresc , "',", id_uz , ", '", data ,"',", id_prod, ",", id_odcinka, "); \n"))
}
sink()
##komenatarze odpowiadające
komentarze.sql <- dbGetQuery(con, "SELECT * FROM komentarze;")
sink(file="komentarze_trzecie.txt")
for(i in 1:2000){
id_pop_kom <- sample(komentarze.sql$id_komentarza, 1)
ilosc_zdan <- sample(6, 1)
tresc <- komentarz(ilosc_zdan)
id_uz <- sample(uzytkownicy.sql$id_uzytkownika, 1)
data_zalozenia <- uzytkownicy.sql[uzytkownicy.sql$id_uzytkownika==id_uz,]$data_zalozenia
data_pop_kom <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$data
d <- min(data_pop_kom, data_zalozenia)
data <- sample(seq(as.Date(d), as.Date('2021/01/10'), by = "day"), 1)
id_prod <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$id_produkcji
id_odcinka <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$id_odcinka
if(is.na(id_odcinka)){
id_odcinka <- "NULL"
}
cat(paste0("INSERT INTO komentarze(id_pop_kom, tresc, id_uzytkownika, data, id_produkcji, id_odcinka) VALUES
(",id_pop_kom , ",'", tresc , "',", id_uz , ", '", data ,"',", id_prod, ",", id_odcinka, "); \n"))
}
sink()
##komenatarze odpowiadające
komentarze.sql <- dbGetQuery(con, "SELECT * FROM komentarze;")
sink(file="komentarze_czwarte.txt")
for(i in 1:2000){
id_pop_kom <- sample(komentarze.sql$id_komentarza, 1)
ilosc_zdan <- sample(6, 1)
tresc <- komentarz(ilosc_zdan)
id_uz <- sample(uzytkownicy.sql$id_uzytkownika, 1)
data_zalozenia <- uzytkownicy.sql[uzytkownicy.sql$id_uzytkownika==id_uz,]$data_zalozenia
data_pop_kom <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$data
d <- min(data_pop_kom, data_zalozenia)
data <- sample(seq(as.Date(d), as.Date('2021/01/10'), by = "day"), 1)
id_prod <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$id_produkcji
id_odcinka <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$id_odcinka
if(is.na(id_odcinka)){
id_odcinka <- "NULL"
}
cat(paste0("INSERT INTO komentarze(id_pop_kom, tresc, id_uzytkownika, data, id_produkcji, id_odcinka) VALUES
(",id_pop_kom , ",'", tresc , "',", id_uz , ", '", data ,"',", id_prod, ",", id_odcinka, "); \n"))
}
sink()
## odtworzenia
id_uz.sql <- as.data.table(dbGetQuery(con, "SELECT id_uzytkownika FROM uzytkownicy;"))
liczba_uz <- nrow(id_uz.sql)
produkcje.sql <- as.data.table(dbGetQuery(con, "SELECT * FROM produkcje;"))
nrow(produkcje.sql)
odcinki_serialu.sql <- as.data.table(dbGetQuery(con, "SELECT id_odcinka, id_produkcji, dlugosc_odcinka FROM odcinki JOIN produkcje USING(id_produkcji);"))
start<-as.ITime("00:00:00")
sink(file="odtworzenia.txt")
for (i in 1:liczba_uz){
id_uz <- id_uz.sql[i, id_uzytkownika]
liczba_odtworzen <- sample(1:20, 1)
for(j in 1:liczba_odtworzen){
id_prod <- sample(produkcje.sql$id_produkcji, 1)
if(produkcje.sql[id_produkcji == id_prod, czy_serial]){
id_odc <- sample(odcinki_serialu.sql[id_produkcji == id_prod, id_odcinka], 1)
moment <- sample(seq((start), odcinki_serialu.sql[id_odcinka == id_odc ,dlugosc_odcinka]), 1)
}
else{
moment <- sample(seq((start), produkcje.sql[id_produkcji == id_prod ,dlugosc_filmu]), 1)
id_odc <- "NULL"
}
cat(paste0("INSERT INTO odtworzenia(id_uzytkownika, id_produkcji, moment_zatrzymania, id_odcinka) VALUES
(", id_uz , ",", id_prod , ",'", moment , "', ", id_odc, "); \n"))
}
}
sink()
##losowanie płatności
konta.sql <- as.data.table(dbGetQuery(con, "SELECT * FROM konta JOIN plany USING(id_planu);"))
## pierwszy miesiąc nie płaci
liczba_kont <- nrow(konta.sql)
liczba_kont
View(sum(konta.sql$data_zalozenia==Inf))
library(lubridate)
sink(file = "platnosci.txt")
for (i in 1:liczba_kont){
id_k <- konta.sql[i, id_konta]
kwota <- konta.sql[i, cena]
dzien <- sample(28, 1)
data_zalozenia <- as.Date(konta.sql[i, data_zalozenia], "%Y-%m-%d")
wektor_miesiecy <- seq.Date(data_zalozenia, as.Date('2021-01-10'), by="month")
if(length(wektor_miesiecy) != 1){
for(i in 2:length(wektor_miesiecy)){
data <- wektor_miesiecy[i]
day(data) <- dzien
cat(paste0("INSERT INTO platnosci(id_konta, data, kwota) VALUES(", id_k, ",'", data, "',", kwota , "); \n"))
}
}
}
sink()
| /Pliki_R'owe/los_proj.R | no_license | dmika1234/BazyDanychProjekt | R | false | false | 13,491 | r | require("OpenRepGrid")
require(dplyr)
require(randomNames)
require("RPostgres")
require(data.table)
con <- dbConnect(RPostgres::Postgres(), dbname = "projekt",
host = "localhost", port = 5432,
user = "postgres", pass = "**********")
#Kraje======================================================================================================================================
kraje <- c("Stany Zjednoczone","Stany Zjednoczone","Stany Zjednoczone","Stany Zjednoczone","Stany Zjednoczone","Stany Zjednoczone",
"Wielka Brytania","Wielka Brytania","Wielka Brytania","Wielka Brytania",
"Hiszpania","Hiszpania", "Francja","Francja","Francja", "Wlochy","Wlochy",
"Niemcy","Niemcy", "Dania", "Hong Kong", "Japonia", "Polska", "Szwecja", "Szwecja")
kraje_produkcji <- sample(kraje, il_seriali, replace = TRUE)
write.table(kraje_produkcji, file = "kraje_produkcji.csv", quote = FALSE, row.names = TRUE, col.names = FALSE, sep = ",")
#======================================================================================================================================
#Konta=====================================================================================================================================
email_platforms <- c("@gmail.com", "@onet.pl", "@wp.pl", "@o2.pl", "@yahoo.com")
znaki <- 1:61
znaki[1:26] <- letters
znaki[27:52] <- LETTERS
znaki[53:61] <- 1:9
znaki <- as.character(znaki)
##
sink(file = "konta.txt")
mails <- 1:1000
for(i in 1:1000){
email <- paste(randomWords(1),randomWords(1), sample(1:9, 1), sample(email_platforms, 1, prob = c(0.6, 0.13, 0.13, 0.13, 0.01)), sep = "")
while(email %in% unique(mails))
email <- paste(randomWords(1),randomWords(1), sample(1:9, 1), sample(email_platforms, 1), sep = "")
mails[i] <- email
il_haslo <- sample(8:16)
haslo <- paste(c(sample(znaki, il_haslo, replace = TRUE), sample(1:9, 1)), collapse = "")
data <- sample(seq(as.Date('2013/01/01'), as.Date('2021/01/10'), by="day"), 1)
id_planu <- sample(1:5, 1)
cat(paste0("INSERT INTO konta(email, haslo, data_zalozenia, id_planu) VALUES ('", email, "','",
haslo, "','", data, "','", id_planu, "');", "\n"))
}
sink()
##
#=====================================================================================================================================
##losowanie czy_dla_dzieci do tabeli produkcje dla filmów
filmy.sql <- dbGetQuery(con, "SELECT * FROM produkcje WHERE czy_serial=FALSE;")
liczba_filmow <- nrow(filmy.sql)
czy_dla_dzieci <- sample(c(0,1), liczba_filmow, replace = TRUE, prob=c(0.2, 0.8))
View(czy_dla_dzieci)
write.table(czy_dla_dzieci, file = "czy_dla_dzieci.txt", quote = FALSE, col.names = FALSE)
##losowanie tabeli odcinki
seriale.sql <- dbGetQuery(con, "SELECT * FROM produkcje WHERE czy_serial = TRUE;")
id_ser <- seriale.sql$id_produkcji
il_seriali <- nrow(seriale.sql)
sink(file = "odcinki.txt")
for(i in 1:(il_seriali)){
liczba_sezon <- sample(1:12, 1, prob = c(30, 20, 10:1), replace = TRUE)
for(j in 1:liczba_sezon){
liczba_odc <- sample(1:24, 1, prob = 24:1, replace = TRUE)
for(k in 1:liczba_odc){
czas_trwania <- paste(sample(0:2, 1, prob = c(0.4, 0.5, 0.1)), sample(0:59, 1, replace = TRUE), sample(0:59, 1, replace = TRUE), sep = ":")
tytul <- paste0(as.character(randomWords(sample(2:4, 1, replace = TRUE))), collapse = " ")
cat(paste0("INSERT INTO odcinki(id_produkcji, nr_sezonu, nr_odcinka, dlugosc_odcinka, tytul_odcinka) VALUES (", id_ser[i], ",",
j, ",", k, ",'", czas_trwania, "','",tytul, "');", "\n"))
}
}
}
sink()
##losowanie tabeli w_kategorii
produkcje.sql <- dbGetQuery(con, "SELECT * FROM produkcje;")
kategorie.sql <- dbGetQuery(con, "SELECT * FROM kategorie;")
liczba_produkcji <- nrow(produkcje.sql)
liczba_kategorii <- nrow(kategorie.sql)
sink(file = "w_kategorii.txt")
for(i in 1:(liczba_produkcji)){
a <- sample(1:liczba_kategorii, 3, replace = FALSE)
cat(paste0("INSERT INTO w_kategorii(id_produkcji, id_kategorii) VALUES (", produkcje.sql$id_produkcji[i], ",", a[1], "); \n"))
cat(paste0("INSERT INTO w_kategorii(id_produkcji, id_kategorii) VALUES (", produkcje.sql$id_produkcji[i], ",", a[2], "); \n"))
cat(paste0("INSERT INTO w_kategorii(id_produkcji, id_kategorii) VALUES (", produkcje.sql$id_produkcji[i], ",", a[3], "); \n"))
}
sink()
##losowanie użytkowników
konta.sql <- dbGetQuery(con, "SELECT * FROM konta JOIN plany USING(id_planu);")
# View(konta.sql)
liczba_kont <- nrow(konta.sql)
sink(file = "uzytkownicy.txt")
for(i in 1:liczba_kont){
max_uz <- konta.sql$max_osob[i]
liczba <- sample(1:max_uz, 1) #losujemy ilu użytkowników wylosować
nazwa <- paste(randomWords(liczba), sample(9, liczba), sep="")
czy_dziecko <- c(FALSE, sample(c(TRUE, FALSE), liczba, replace = TRUE))
#bo chcemy choć jednego dorosłego
for(j in 1:liczba){
cat(paste0("INSERT INTO uzytkownicy(id_konta, nazwa, czy_dziecko) VALUES (", konta.sql$id_konta[i] , ",'", nazwa[j] , "', ", czy_dziecko[j] ,"); \n"))
}
}
sink()
##tabela oceny
uzytkownicy.sql <- dbGetQuery(con, "SELECT * FROM uzytkownicy;")
produkcje.sql <- dbGetQuery(con, "SELECT * FROM produkcje;")
sink(file = "oceny.txt")
id_uz <- sample(uzytkownicy.sql$id_uzytkownika, 100000, replace = TRUE)
id_prod <- sample(produkcje.sql$id_produkcji, 100000, replace = TRUE)
a <- unique(cbind(id_uz, id_prod)) #dany użytkownik może ocenić daną produkcję tylko raz
# View(nrow(a))
ocena <- sample(1:10, nrow(a), replace = TRUE, prob = c(1:5, 5:1))
# View(ocena)
cat(paste0("INSERT INTO oceny(id_produkcji, id_uzytkownika, ocena) VALUES (", a[,2] , ",", a[,1] , ", ", ocena ,"); \n"))
sink()
##tabela komentarze
zdanie <- function(n) {
x <- randomSentence(n)
substr(x, 1, 1) <- toupper(substr(x, 1, 1))
znak <- sample(c(".","!", "?" ), 1, prob = c(5, 4, 1))
x <- paste(x, znak, sep="")
return(x)
}
komentarz <- function(n) {
k <- ""
for (i in 1:n){
dlugosc_zdania <- sample(3:8, 1)
k <- paste(k, zdanie(dlugosc_zdania), sep=" ")
}
return(k)
}
produkcje.sql <- dbGetQuery(con, "SELECT * FROM produkcje;")
uzytkownicy.sql <- dbGetQuery(con, "SELECT u.id_uzytkownika, k.data_zalozenia
FROM uzytkownicy u
JOIN konta k USING(id_konta);")
odcinki_serialu.sql <- dbGetQuery(con, "SELECT id_odcinka, id_produkcji FROM odcinki JOIN produkcje USING(id_produkcji);")
#pierwsze komenatrze - te któe nie mają poprzednika
#nie wiem zbytnio jak to skrócić na razie
sink(file = "komentarze_pierwsze.txt")
for(i in 1:5000){
ilosc_zdan <- sample(6, 1)
tresc <- komentarz(ilosc_zdan)
id_uz <- sample(uzytkownicy.sql$id_uzytkownika, 1)
data_zalozenia <- uzytkownicy.sql[uzytkownicy.sql$id_uzytkownika==id_uz,]$data_zalozenia
data <- sample(seq(data_zalozenia, as.Date('2021/01/10'), by = "day"), 1)
id_prod <- sample(produkcje.sql$id_produkcji, 1)
if(produkcje.sql[produkcje.sql$id_produkcji==id_prod,]$czy_serial){
id_odcinka <- sample(odcinki_serialu.sql[odcinki_serialu.sql$id_produkcji == id_prod,]$id_odcinka, 1)
}
else{
id_odcinka <- "NULL"
}
cat(paste0("INSERT INTO komentarze(tresc, id_uzytkownika, data, id_produkcji, id_odcinka) VALUES
('", tresc , "',", id_uz , ", '", data ,"',", id_prod, ",", id_odcinka, "); \n"))
}
sink()
##komenatarze odpowiadające
komentarze.sql <- dbGetQuery(con, "SELECT * FROM komentarze;")
sink(file="komentarze_drugie.txt")
for(i in 1:3000){
id_pop_kom <- sample(komentarze.sql$id_komentarza, 1)
ilosc_zdan <- sample(6, 1)
tresc <- komentarz(ilosc_zdan)
id_uz <- sample(uzytkownicy.sql$id_uzytkownika, 1)
data_zalozenia <- uzytkownicy.sql[uzytkownicy.sql$id_uzytkownika==id_uz,]$data_zalozenia
data_pop_kom <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$data
d <- min(data_pop_kom, data_zalozenia)
data <- sample(seq(as.Date(d), as.Date('2021/01/10'), by = "day"), 1)
id_prod <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$id_produkcji
id_odcinka <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$id_odcinka
if(is.na(id_odcinka)){
id_odcinka <- "NULL"
}
cat(paste0("INSERT INTO komentarze(id_pop_kom, tresc, id_uzytkownika, data, id_produkcji, id_odcinka) VALUES
(",id_pop_kom , ",'", tresc , "',", id_uz , ", '", data ,"',", id_prod, ",", id_odcinka, "); \n"))
}
sink()
##komenatarze odpowiadające
komentarze.sql <- dbGetQuery(con, "SELECT * FROM komentarze;")
sink(file="komentarze_trzecie.txt")
for(i in 1:2000){
id_pop_kom <- sample(komentarze.sql$id_komentarza, 1)
ilosc_zdan <- sample(6, 1)
tresc <- komentarz(ilosc_zdan)
id_uz <- sample(uzytkownicy.sql$id_uzytkownika, 1)
data_zalozenia <- uzytkownicy.sql[uzytkownicy.sql$id_uzytkownika==id_uz,]$data_zalozenia
data_pop_kom <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$data
d <- min(data_pop_kom, data_zalozenia)
data <- sample(seq(as.Date(d), as.Date('2021/01/10'), by = "day"), 1)
id_prod <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$id_produkcji
id_odcinka <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$id_odcinka
if(is.na(id_odcinka)){
id_odcinka <- "NULL"
}
cat(paste0("INSERT INTO komentarze(id_pop_kom, tresc, id_uzytkownika, data, id_produkcji, id_odcinka) VALUES
(",id_pop_kom , ",'", tresc , "',", id_uz , ", '", data ,"',", id_prod, ",", id_odcinka, "); \n"))
}
sink()
##komenatarze odpowiadające
komentarze.sql <- dbGetQuery(con, "SELECT * FROM komentarze;")
sink(file="komentarze_czwarte.txt")
for(i in 1:2000){
id_pop_kom <- sample(komentarze.sql$id_komentarza, 1)
ilosc_zdan <- sample(6, 1)
tresc <- komentarz(ilosc_zdan)
id_uz <- sample(uzytkownicy.sql$id_uzytkownika, 1)
data_zalozenia <- uzytkownicy.sql[uzytkownicy.sql$id_uzytkownika==id_uz,]$data_zalozenia
data_pop_kom <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$data
d <- min(data_pop_kom, data_zalozenia)
data <- sample(seq(as.Date(d), as.Date('2021/01/10'), by = "day"), 1)
id_prod <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$id_produkcji
id_odcinka <- komentarze.sql[komentarze.sql$id_komentarza==id_pop_kom,]$id_odcinka
if(is.na(id_odcinka)){
id_odcinka <- "NULL"
}
cat(paste0("INSERT INTO komentarze(id_pop_kom, tresc, id_uzytkownika, data, id_produkcji, id_odcinka) VALUES
(",id_pop_kom , ",'", tresc , "',", id_uz , ", '", data ,"',", id_prod, ",", id_odcinka, "); \n"))
}
sink()
## odtworzenia
id_uz.sql <- as.data.table(dbGetQuery(con, "SELECT id_uzytkownika FROM uzytkownicy;"))
liczba_uz <- nrow(id_uz.sql)
produkcje.sql <- as.data.table(dbGetQuery(con, "SELECT * FROM produkcje;"))
nrow(produkcje.sql)
odcinki_serialu.sql <- as.data.table(dbGetQuery(con, "SELECT id_odcinka, id_produkcji, dlugosc_odcinka FROM odcinki JOIN produkcje USING(id_produkcji);"))
start<-as.ITime("00:00:00")
sink(file="odtworzenia.txt")
for (i in 1:liczba_uz){
id_uz <- id_uz.sql[i, id_uzytkownika]
liczba_odtworzen <- sample(1:20, 1)
for(j in 1:liczba_odtworzen){
id_prod <- sample(produkcje.sql$id_produkcji, 1)
if(produkcje.sql[id_produkcji == id_prod, czy_serial]){
id_odc <- sample(odcinki_serialu.sql[id_produkcji == id_prod, id_odcinka], 1)
moment <- sample(seq((start), odcinki_serialu.sql[id_odcinka == id_odc ,dlugosc_odcinka]), 1)
}
else{
moment <- sample(seq((start), produkcje.sql[id_produkcji == id_prod ,dlugosc_filmu]), 1)
id_odc <- "NULL"
}
cat(paste0("INSERT INTO odtworzenia(id_uzytkownika, id_produkcji, moment_zatrzymania, id_odcinka) VALUES
(", id_uz , ",", id_prod , ",'", moment , "', ", id_odc, "); \n"))
}
}
sink()
##losowanie płatności
konta.sql <- as.data.table(dbGetQuery(con, "SELECT * FROM konta JOIN plany USING(id_planu);"))
## pierwszy miesiąc nie płaci
liczba_kont <- nrow(konta.sql)
liczba_kont
View(sum(konta.sql$data_zalozenia==Inf))
library(lubridate)
sink(file = "platnosci.txt")
for (i in 1:liczba_kont){
id_k <- konta.sql[i, id_konta]
kwota <- konta.sql[i, cena]
dzien <- sample(28, 1)
data_zalozenia <- as.Date(konta.sql[i, data_zalozenia], "%Y-%m-%d")
wektor_miesiecy <- seq.Date(data_zalozenia, as.Date('2021-01-10'), by="month")
if(length(wektor_miesiecy) != 1){
for(i in 2:length(wektor_miesiecy)){
data <- wektor_miesiecy[i]
day(data) <- dzien
cat(paste0("INSERT INTO platnosci(id_konta, data, kwota) VALUES(", id_k, ",'", data, "',", kwota , "); \n"))
}
}
}
sink()
|
args<-commandArgs(TRUE)
vectr<-read.csv(args[1], header = FALSE, sep = ",")[,-1]
size<-nrow(vectr)
if (size == 1) {
three <- rep(3,ncol(vectr))
zero <- rep(0,ncol(vectr))
vectr <- rbind(three, zero)
}
tool<-basename(args[1])
split<-strsplit(tool, "_")
distance<-mean(dist(vectr, method = "manhattan"))
#write(paste0(split[[1]][1], " cluster ",split[[1]][2] ," euclidian middle distance ",rating),file=args[1],append=TRUE)
cat(paste0(split[[1]][1],",",split[[1]][2],",",distance,",",size,'\n'))
| /Cluster/Scripts/rating.R | no_license | ahenoch/Masterproject | R | false | false | 503 | r | args<-commandArgs(TRUE)
vectr<-read.csv(args[1], header = FALSE, sep = ",")[,-1]
size<-nrow(vectr)
if (size == 1) {
three <- rep(3,ncol(vectr))
zero <- rep(0,ncol(vectr))
vectr <- rbind(three, zero)
}
tool<-basename(args[1])
split<-strsplit(tool, "_")
distance<-mean(dist(vectr, method = "manhattan"))
#write(paste0(split[[1]][1], " cluster ",split[[1]][2] ," euclidian middle distance ",rating),file=args[1],append=TRUE)
cat(paste0(split[[1]][1],",",split[[1]][2],",",distance,",",size,'\n'))
|
# Unit 6 - Introduction to Clustering
# Video 6
# After following the steps in the video, load the data into R
movies = read.table("http://files.grouplens.org/datasets/movielens/ml-100k/u.item", header=FALSE, sep="|",quote="\"")
str(movies)
# Add column names
colnames(movies) = c("ID", "Title", "ReleaseDate", "VideoReleaseDate", "IMDB", "Unknown", "Action", "Adventure", "Animation", "Childrens", "Comedy", "Crime", "Documentary", "Drama", "Fantasy", "FilmNoir", "Horror", "Musical", "Mystery", "Romance", "SciFi", "Thriller", "War", "Western")
str(movies)
# Remove unnecessary variables
movies$ID = NULL
movies$ReleaseDate = NULL
movies$VideoReleaseDate = NULL
movies$IMDB = NULL
# Remove duplicates
movies = unique(movies)
# Take a look at our data again:
str(movies)
# Video 7
# Compute distances
distances = dist(movies[2:20], method = "euclidean")
# Hierarchical clustering
clusterMovies = hclust(distances, method = "ward")
# Plot the dendrogram
plot(clusterMovies)
# Assign points to clusters
clusterGroups = cutree(clusterMovies, k = 10)
#Now let's figure out what the clusters are like.
# Let's use the tapply function to compute the percentage of movies in each genre and cluster
tapply(movies$Action, clusterGroups, mean)
tapply(movies$Romance, clusterGroups, mean)
# We can repeat this for each genre. If you do, you get the results in ClusterMeans.ods
# Find which cluster Men in Black is in.
subset(movies, Title=="Men in Black (1997)")
clusterGroups[257]
# Create a new data set with just the movies from cluster 2
cluster2 = subset(movies, clusterGroups==2)
# Look at the first 10 titles in this cluster:
cluster2$Title[1:10]
#
#AN ADVANCED APPROACH TO FINDING CLUSTER CENTROIDS
#
#In this video, we explain how you can find the cluster centroids by using the function "tapply" for each variable in the dataset. While this approach works and is familiar to us, it can be a little tedious when there are a lot of variables. An alternative approach is to use the colMeans function. With this approach, you only have one command for each cluster instead of one command for each variable. If you run the following command in your R console, you can get all of the column (variable) means for cluster 1:
#
#colMeans(subset(movies[2:20], clusterGroups == 1))
#
#You can repeat this for each cluster by changing the clusterGroups number. However, if you also have a lot of clusters, this approach is not that much more efficient than just using the tapply function.
#
#A more advanced approach uses the "split" and "lapply" functions. The following command will split the data into subsets based on the clusters:
#
#spl = split(movies[2:20], clusterGroups)
#
#Then you can use spl to access the different clusters, because
#
#spl[[1]]
#
#is the same as
#
#subset(movies[2:20], clusterGroups == 1)
#
#so colMeans(spl[[1]]) will output the centroid of cluster 1. But an even easier approach uses the lapply function. The following command will output the cluster centroids for all clusters:
#
#lapply(spl, colMeans)
#
#The lapply function runs the second argument (colMeans) on each element of the first argument (each cluster subset in spl). So instead of using 19 tapply commands, or 10 colMeans commands, we can output our centroids with just two commands: one to define spl, and then the lapply command.
#
#Note that if you have a variable called "split" in your current R session, you will need to remove it with rm(split) so that you can use the split function.
| /Unit6 Clustering/Unit6_Netflix.R | no_license | jpalbino/-AnalyticsEdgeMITx | R | false | false | 3,504 | r | # Unit 6 - Introduction to Clustering
# Video 6
# After following the steps in the video, load the data into R
movies = read.table("http://files.grouplens.org/datasets/movielens/ml-100k/u.item", header=FALSE, sep="|",quote="\"")
str(movies)
# Add column names
colnames(movies) = c("ID", "Title", "ReleaseDate", "VideoReleaseDate", "IMDB", "Unknown", "Action", "Adventure", "Animation", "Childrens", "Comedy", "Crime", "Documentary", "Drama", "Fantasy", "FilmNoir", "Horror", "Musical", "Mystery", "Romance", "SciFi", "Thriller", "War", "Western")
str(movies)
# Remove unnecessary variables
movies$ID = NULL
movies$ReleaseDate = NULL
movies$VideoReleaseDate = NULL
movies$IMDB = NULL
# Remove duplicates
movies = unique(movies)
# Take a look at our data again:
str(movies)
# Video 7
# Compute distances
distances = dist(movies[2:20], method = "euclidean")
# Hierarchical clustering
clusterMovies = hclust(distances, method = "ward")
# Plot the dendrogram
plot(clusterMovies)
# Assign points to clusters
clusterGroups = cutree(clusterMovies, k = 10)
#Now let's figure out what the clusters are like.
# Let's use the tapply function to compute the percentage of movies in each genre and cluster
tapply(movies$Action, clusterGroups, mean)
tapply(movies$Romance, clusterGroups, mean)
# We can repeat this for each genre. If you do, you get the results in ClusterMeans.ods
# Find which cluster Men in Black is in.
subset(movies, Title=="Men in Black (1997)")
clusterGroups[257]
# Create a new data set with just the movies from cluster 2
cluster2 = subset(movies, clusterGroups==2)
# Look at the first 10 titles in this cluster:
cluster2$Title[1:10]
#
#AN ADVANCED APPROACH TO FINDING CLUSTER CENTROIDS
#
#In this video, we explain how you can find the cluster centroids by using the function "tapply" for each variable in the dataset. While this approach works and is familiar to us, it can be a little tedious when there are a lot of variables. An alternative approach is to use the colMeans function. With this approach, you only have one command for each cluster instead of one command for each variable. If you run the following command in your R console, you can get all of the column (variable) means for cluster 1:
#
#colMeans(subset(movies[2:20], clusterGroups == 1))
#
#You can repeat this for each cluster by changing the clusterGroups number. However, if you also have a lot of clusters, this approach is not that much more efficient than just using the tapply function.
#
#A more advanced approach uses the "split" and "lapply" functions. The following command will split the data into subsets based on the clusters:
#
#spl = split(movies[2:20], clusterGroups)
#
#Then you can use spl to access the different clusters, because
#
#spl[[1]]
#
#is the same as
#
#subset(movies[2:20], clusterGroups == 1)
#
#so colMeans(spl[[1]]) will output the centroid of cluster 1. But an even easier approach uses the lapply function. The following command will output the cluster centroids for all clusters:
#
#lapply(spl, colMeans)
#
#The lapply function runs the second argument (colMeans) on each element of the first argument (each cluster subset in spl). So instead of using 19 tapply commands, or 10 colMeans commands, we can output our centroids with just two commands: one to define spl, and then the lapply command.
#
#Note that if you have a variable called "split" in your current R session, you will need to remove it with rm(split) so that you can use the split function.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/track_methods.R
\name{helper}
\alias{helper}
\alias{coords}
\alias{coords.track_xy}
\alias{make_trast}
\alias{make_trast.track_xy}
\alias{extent_x}
\alias{extent_x.track_xy}
\alias{extent_y}
\alias{extent_y.track_xy}
\alias{extent_both}
\alias{extent_both.track_xy}
\alias{extent_max}
\alias{extent_max.track_xy}
\alias{range_x}
\alias{range_x.track_xy}
\alias{range_y}
\alias{range_y.track_xy}
\alias{range_both}
\alias{range_both.track_xy}
\title{Coordinates of a track.}
\usage{
coords(x, ...)
\method{coords}{track_xy}(x, ...)
make_trast(x, ...)
\method{make_trast}{track_xy}(x, factor = 1.5,
res = max(c(extent_max(x)/100, 1e-09)), ...)
extent_x(x, ...)
\method{extent_x}{track_xy}(x, ...)
extent_y(x, ...)
\method{extent_y}{track_xy}(x, ...)
extent_both(x, ...)
\method{extent_both}{track_xy}(x, ...)
extent_max(x, ...)
\method{extent_max}{track_xy}(x, ...)
range_x(x, ...)
\method{range_x}{track_xy}(x, ...)
range_y(x, ...)
\method{range_y}{track_xy}(x, ...)
range_both(x, ...)
\method{range_both}{track_xy}(x, ...)
}
\arguments{
\item{x}{\code{[track_xy, track_xyt]} \cr A track created with \code{make_track}.}
\item{...}{Further arguments, none implemented.}
\item{factor}{\code{[numeric(1)=1.5]{>= 1}}\cr Factor by which the extent of the relocationsis extended.}
\item{res}{\code{[numeric(1)]}\cr Resolution of the output raster.}
}
\value{
\code{[tibble]} \cr The coordinates.
}
\description{
Coordinates of a track.
}
\examples{
data(deer)
coords(deer)
}
| /man/helper.Rd | no_license | nmasto/amt | R | false | true | 1,569 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/track_methods.R
\name{helper}
\alias{helper}
\alias{coords}
\alias{coords.track_xy}
\alias{make_trast}
\alias{make_trast.track_xy}
\alias{extent_x}
\alias{extent_x.track_xy}
\alias{extent_y}
\alias{extent_y.track_xy}
\alias{extent_both}
\alias{extent_both.track_xy}
\alias{extent_max}
\alias{extent_max.track_xy}
\alias{range_x}
\alias{range_x.track_xy}
\alias{range_y}
\alias{range_y.track_xy}
\alias{range_both}
\alias{range_both.track_xy}
\title{Coordinates of a track.}
\usage{
coords(x, ...)
\method{coords}{track_xy}(x, ...)
make_trast(x, ...)
\method{make_trast}{track_xy}(x, factor = 1.5,
res = max(c(extent_max(x)/100, 1e-09)), ...)
extent_x(x, ...)
\method{extent_x}{track_xy}(x, ...)
extent_y(x, ...)
\method{extent_y}{track_xy}(x, ...)
extent_both(x, ...)
\method{extent_both}{track_xy}(x, ...)
extent_max(x, ...)
\method{extent_max}{track_xy}(x, ...)
range_x(x, ...)
\method{range_x}{track_xy}(x, ...)
range_y(x, ...)
\method{range_y}{track_xy}(x, ...)
range_both(x, ...)
\method{range_both}{track_xy}(x, ...)
}
\arguments{
\item{x}{\code{[track_xy, track_xyt]} \cr A track created with \code{make_track}.}
\item{...}{Further arguments, none implemented.}
\item{factor}{\code{[numeric(1)=1.5]{>= 1}}\cr Factor by which the extent of the relocationsis extended.}
\item{res}{\code{[numeric(1)]}\cr Resolution of the output raster.}
}
\value{
\code{[tibble]} \cr The coordinates.
}
\description{
Coordinates of a track.
}
\examples{
data(deer)
coords(deer)
}
|
/final/3loop.R | no_license | Magellen/-gene-project | R | false | false | 2,463 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/expand_inputs.R
\name{add_years}
\alias{add_years}
\title{Add a specified number of years to a year}
\usage{
add_years(yyyy, n)
}
\arguments{
\item{yyyy}{year in YYYY format}
\item{n}{number of years to add}
}
\value{
yyyy + n
}
\description{
Add a specified number of years to a year
}
| /wsim.io/man/add_years.Rd | permissive | isciences/wsim | R | false | true | 366 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/expand_inputs.R
\name{add_years}
\alias{add_years}
\title{Add a specified number of years to a year}
\usage{
add_years(yyyy, n)
}
\arguments{
\item{yyyy}{year in YYYY format}
\item{n}{number of years to add}
}
\value{
yyyy + n
}
\description{
Add a specified number of years to a year
}
|
#TP2 SGD | /COURS ET TP STAT EN GRDE DIMENSION/TP2 SGD.R | no_license | komiagblodoe/M2-SSD | R | false | false | 10 | r |
#TP2 SGD |
## this script will take the data for every year for a single category and place it in a
## a new folder as a tar.gz file
root.source = "K:/Academic Dataset External/"
root.target = "D:/temp/IRI_DATA/sales/"
root.target.win = "D:\\temp\\IRI_DATA\\sales\\beer"
setwd(root.source)
category.files = dir("./parsed stub files 2007")
categories = gsub("prod_|.xlsx","",category.files)
for (category in categories[1])
{
cat.files = character(0)
for (yr in 1:7)
{
pth = paste(getwd(),"/Year", yr, "/External/", category, sep = "")
cat.files = c(cat.files,(dir(pth, pattern = "drug|groc", full.names = TRUE)))
}
print(cat.files)
tar("D:/Temp/cat.tar.gz",cat.files)
}
yr.files
category = "beer"
dir.create(file.path(root.target, category))
?gzfile
f_file.tar.gz = function(src.file, yr, channel) {
src.file = paste(getwd(),src.file,sep="/")
if (file.exists(src.file)) {
out.path <- paste(root.target, category, "/", "yr", yr, "_", channel, ".tgz", sep="")
print(gsub("/","\\\\",src.file))
#tar(tarfile, src.file, compression='gzip')
system(paste("7z a -tgzip ", gsub("/","\\\\",src.file), " -o", root.target.win))
}
}
for (yr in 1:1) {
pth.raw = paste(root.source, "Year", yr, "/External/", category, "/", sep="")
setwd(pth.raw)
# grocery file
pat1 = paste(category,"groc", sep = "_")
fil = dir(pattern = c(pat1))
f_file.tar.gz(fil, yr, "groc")
# drug file
pat2 = paste(category,"drug", sep = "_")
fil = dir(pattern = c(pat2))
f_file.tar.gz(fil, yr, "drug")
}
?channel
#
#
getwd()
| /DataPrep/Archived/00_Zip_Raw.R | no_license | wellermatt/exp1.1 | R | false | false | 1,512 | r |
## this script will take the data for every year for a single category and place it in a
## a new folder as a tar.gz file
root.source = "K:/Academic Dataset External/"
root.target = "D:/temp/IRI_DATA/sales/"
root.target.win = "D:\\temp\\IRI_DATA\\sales\\beer"
setwd(root.source)
category.files = dir("./parsed stub files 2007")
categories = gsub("prod_|.xlsx","",category.files)
for (category in categories[1])
{
cat.files = character(0)
for (yr in 1:7)
{
pth = paste(getwd(),"/Year", yr, "/External/", category, sep = "")
cat.files = c(cat.files,(dir(pth, pattern = "drug|groc", full.names = TRUE)))
}
print(cat.files)
tar("D:/Temp/cat.tar.gz",cat.files)
}
yr.files
category = "beer"
dir.create(file.path(root.target, category))
?gzfile
f_file.tar.gz = function(src.file, yr, channel) {
src.file = paste(getwd(),src.file,sep="/")
if (file.exists(src.file)) {
out.path <- paste(root.target, category, "/", "yr", yr, "_", channel, ".tgz", sep="")
print(gsub("/","\\\\",src.file))
#tar(tarfile, src.file, compression='gzip')
system(paste("7z a -tgzip ", gsub("/","\\\\",src.file), " -o", root.target.win))
}
}
for (yr in 1:1) {
pth.raw = paste(root.source, "Year", yr, "/External/", category, "/", sep="")
setwd(pth.raw)
# grocery file
pat1 = paste(category,"groc", sep = "_")
fil = dir(pattern = c(pat1))
f_file.tar.gz(fil, yr, "groc")
# drug file
pat2 = paste(category,"drug", sep = "_")
fil = dir(pattern = c(pat2))
f_file.tar.gz(fil, yr, "drug")
}
?channel
#
#
getwd()
|
# Defunct: the edits this existed to create are now part of the main package
# phewas_manhattan <-
# function(d, add.phewas.descriptions=T, ...) {
# if(sum(c("phenotype","p") %in% names(d))<2 ) stop("Data input must contain columns phenotype and p.")
# if(class(d$phenotype)!="character") {
# if(class(d$phenotype)=="factor") {
# warning("Factor phenotype input mapped to characters")
# d$phenotype=as.character(d$phenotype)
# } else {
# stop("Non-character or non-factor phenotypes passed in, so an accurate phewas code mapping is not possible.")
# }
# }
# #Check to see if it looks 0-padded
# if(min(nchar(d$phenotype))<3) warning("Phenotypes with length <3 observed, ensure they are are 0-padded (e.g., \"008\")")
# #Add the groups
# #d=addPhewasGroups(d)
# d=addPhecodeInfo(d)
# d["phenotype"]=d$phecode
#
# #Call phenotype plot as normal.
# if(add.phewas.descriptions) {
# #d=addPhewasDescription(d,for.plots=T)
# d=addPhecodeInfo(d, groupnums=T, groupcolors=T)
# d["phenotype"]=d$phecode
# d["description"]=d$description.x
# d["group"]=d$group.x
# phenotype_manhattan(d, annotate.phenotype.description=T,...)
# } else {
# phenotype_manhattan(d,...)
# }
# }
| /R/defunct_phewas_manhattan.R | no_license | ekawaler/simplePheWAS | R | false | false | 1,310 | r | # Defunct: the edits this existed to create are now part of the main package
# phewas_manhattan <-
# function(d, add.phewas.descriptions=T, ...) {
# if(sum(c("phenotype","p") %in% names(d))<2 ) stop("Data input must contain columns phenotype and p.")
# if(class(d$phenotype)!="character") {
# if(class(d$phenotype)=="factor") {
# warning("Factor phenotype input mapped to characters")
# d$phenotype=as.character(d$phenotype)
# } else {
# stop("Non-character or non-factor phenotypes passed in, so an accurate phewas code mapping is not possible.")
# }
# }
# #Check to see if it looks 0-padded
# if(min(nchar(d$phenotype))<3) warning("Phenotypes with length <3 observed, ensure they are are 0-padded (e.g., \"008\")")
# #Add the groups
# #d=addPhewasGroups(d)
# d=addPhecodeInfo(d)
# d["phenotype"]=d$phecode
#
# #Call phenotype plot as normal.
# if(add.phewas.descriptions) {
# #d=addPhewasDescription(d,for.plots=T)
# d=addPhecodeInfo(d, groupnums=T, groupcolors=T)
# d["phenotype"]=d$phecode
# d["description"]=d$description.x
# d["group"]=d$group.x
# phenotype_manhattan(d, annotate.phenotype.description=T,...)
# } else {
# phenotype_manhattan(d,...)
# }
# }
|
## Assume dataset is in current working directory
## Read in entire dataset
powerData <- read.table("household_power_consumption.txt",
header=TRUE,
sep=";",
colClasses=c("character", "character", rep("numeric",7)),
na="?")
# convert date and time variables to Date/Time class
powerData$Time <- strptime(paste(powerData$Date, powerData$Time), "%d/%m/%Y %H:%M:%S")
powerData$Date <- as.Date(powerData$Date, "%d/%m/%Y")
# Subset to only use data from the dates 2007-02-01 and 2007-02-02
dates <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
powerData <- subset(powerData, Date %in% dates)
## Plot 3
with(powerData, {
plot(Time, Sub_metering_1, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Time, Sub_metering_2,col='Red')
lines(Time, Sub_metering_3,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
| /plot3.R | no_license | terrisage/ExData_Plotting1 | R | false | false | 1,118 | r | ## Assume dataset is in current working directory
## Read in entire dataset
powerData <- read.table("household_power_consumption.txt",
header=TRUE,
sep=";",
colClasses=c("character", "character", rep("numeric",7)),
na="?")
# convert date and time variables to Date/Time class
powerData$Time <- strptime(paste(powerData$Date, powerData$Time), "%d/%m/%Y %H:%M:%S")
powerData$Date <- as.Date(powerData$Date, "%d/%m/%Y")
# Subset to only use data from the dates 2007-02-01 and 2007-02-02
dates <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
powerData <- subset(powerData, Date %in% dates)
## Plot 3
with(powerData, {
plot(Time, Sub_metering_1, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Time, Sub_metering_2,col='Red')
lines(Time, Sub_metering_3,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
# Name: performance.r
# Author: Will Landau (landau@iastate.edu)
# Created: June 2012
#
# This program calculates the runtime of
# a user-specified function, gpu_function,
# and compares it to that of some cpu
# analog, cpu_function.
#
# The script creates three plots, each
# comparing the runtimes of gpu_function
# to those of cpu_function based on either
# user time, system time, or total scheduled time.
library(multcomp)
library(gputools)
#############
## GLOBALS ##
#############
# functions to compare
cpu_function = solve
gpu_function = gpuSolve
# global runtime parameters. MUST HAVE length(nrows) == length(ncols) !!!
nrows = floor(10^(c(seq(from = .5, to = 3.5, by = .5), 3.75)))
ncols = nrows # use square matrices here
sizes = nrows * ncols
xs = log(nrows, base = 10) # plotted on horizontal axis
ys = list() # plotted on vertical axis
xlab = "Base 10 Log of Number of Rows (all matrices are square)"
title = "solve() vs gpuSolve()"
plot.name = "performance_gpuSolve"
cols = list(cpu = "blue", gpu = "green", outlier.gpu = "black")
# list of arguments
nargs = length(sizes)
args = list()
print("calculating arguments...")
for(i in 1:nargs){
progress = paste("calculating arg ", i, " of ", nargs, sep = "")
print(progress)
args[[i]] = matrix(rpois(nrows[i]^2, lambda = 5), nrow = nrows[i])
}
print("done.")
####################
## MAIN FUNCTIONS ##
####################
# iter.time() is a wrapper for one iteration of either
# gpu_function or cpu_function. The purpose of the wrapper
# is to create the input data, pass the appropriate
# argument (entry arg of the list, args), to gpu_function
# (or cpu_function, if appropriate), and return the run time.
iter.time = function(arg, type = "cpu"){
if(type == "cpu"){
ptm <- proc.time()
cpu_function(arg)
ptm <- proc.time() - ptm
} else{
ptm <- proc.time()
gpu_function(arg)
ptm <- proc.time() - ptm
}
return(list(user = ptm[1], syst = ptm[2], elap = ptm[3]))
}
# loop.time executes iter.time (i.e., calculates the run time
# of either gpu_function or cpu_function) for each entry in
# params (one run per entry in params, each entry defining
# the magnitude of the computational load on gpu_function or
# cpu_function).
loop.time = function(args, type = "cpu"){
user = c()
syst = c()
total = c()
count = 0
for(arg in args){
times = iter.time(arg = arg, type = type)
user = c(user, times$user)
syst = c(syst, times$syst)
total = c(total, times$user + times$syst)
count = count + 1
progress = paste(type,
" iteration ",
count,
" of ",
nargs,
": elapsed time = ",
times$elap,
sep = "")
print(progress)
}
return(list(user = user, syst = syst, total = total))
}
##################
## MAIN ROUTINE ##
##################
# Main routine: actually run gpu_function and cpu_function
# for various data loads and return the run times. Note:
# outlier.gpu.times measures the computational overhead
# associated with using the gpu for the first time in this
# R script
cpu.times = loop.time(args, type = "cpu")
outlier.gpu.times = loop.time(args[1], type = "gpu")
gpu.times = loop.time(args, type = "gpu")
#########################################
## FORMAT RUNTIME DATA OF MAIN ROUTINE ##
#########################################
# organize runtime data into a convenient list
times = list(cpu = cpu.times,
outlier.gpu = outlier.gpu.times,
gpu = gpu.times)
# format data to plot without confidence
# regions.
for(time in c("user", "syst", "total")){
ys[[time]]$outlier.gpu = times$outlier.gpu[[time]]
for(dev in c("cpu", "gpu")){
ys[[time]][[dev]] = times[[dev]][[time]]
}
}
# Format data for plotting: WITH family-wise
# confidence regions for each run time type
# (time = "user", "syst", or "total") and
# each device (dev = "cpu", "gpu", or
# "outlier.gpu"). The data, ready for plotting,
# are available for each device in ys[[time]][[dev]].
#
# for(dev in c("cpu","gpu")){
# for(time in c("user","syst","total")){
#
# fit = aov(times[[dev]][[time]] ~ as.factor(sizes) - 1)
# glht.fit = glht(fit)
#
# print(glht.fit)
#
# if(!all(glht.fit$coef == 0)){
# famint = confint(glht.fit)
# } else{
# zeroes = rep(0,length(glht.fit$coef))
# famint = list(confint = list(Estimate = zeroes,
# lwr = zeroes,
# upr = zeroes))
# }
#
# ys[[time]][[dev]] = data.frame(famint$confint)
# ys[[time]]$outlier.gpu = times$outlier.gpu[[time]]
# }
#}
#######################
## PLOT RUNTIME DATA ##
#######################
# For each kind of run time, make a plot comparing
# the run times of gpu_function to the run times
# of cpu_function.
for(time in c("user", "syst", "total")){
filename = paste(c(plot.name,"_",time,".pdf"), collapse = "")
pdf(filename)
xbounds = c(min(xs), max(xs))
ybounds = c(min(unlist(ys[[time]])),
1.3 * max(unlist(ys[[time]])))
plot(xbounds,
ybounds,
pch= ".",
col="white",
xlab = xlab,
ylab = paste(c(time, "scheduled runtime (seconds)", collapse = " ")),
main = paste(c(time, "scheduled runtime (seconds):", title, collapse = " ")))
for(dev in c("cpu", "gpu")){
points(xs[1], ys[[time]]$outlier.gpu, col=cols$outlier.gpu)
points(xs, ys[[time]][[dev]], col = cols[[dev]])
lines(xs, ys[[time]][[dev]], col = cols[[dev]], lty=1)
# lines(xs, ys[[time]][[dev]]$upr, col = cols[[dev]], lty=1)
}
legend("topleft",
legend = c("mean cpu runtime",
"mean gpu runtime",
"first gpu run (overhead, discarded from conf. region calculations)"),
col = c(cols$cpu,
cols$gpu,
"black"),
pch = c("o"))
dev.off()
} | /Rcode/R_code/gpuSolve.r | no_license | maoneil/gpuIntroduction | R | false | false | 6,028 | r | # Name: performance.r
# Author: Will Landau (landau@iastate.edu)
# Created: June 2012
#
# This program calculates the runtime of
# a user-specified function, gpu_function,
# and compares it to that of some cpu
# analog, cpu_function.
#
# The script creates three plots, each
# comparing the runtimes of gpu_function
# to those of cpu_function based on either
# user time, system time, or total scheduled time.
library(multcomp)
library(gputools)
#############
## GLOBALS ##
#############
# functions to compare
cpu_function = solve
gpu_function = gpuSolve
# global runtime parameters. MUST HAVE length(nrows) == length(ncols) !!!
nrows = floor(10^(c(seq(from = .5, to = 3.5, by = .5), 3.75)))
ncols = nrows # use square matrices here
sizes = nrows * ncols
xs = log(nrows, base = 10) # plotted on horizontal axis
ys = list() # plotted on vertical axis
xlab = "Base 10 Log of Number of Rows (all matrices are square)"
title = "solve() vs gpuSolve()"
plot.name = "performance_gpuSolve"
cols = list(cpu = "blue", gpu = "green", outlier.gpu = "black")
# list of arguments
nargs = length(sizes)
args = list()
print("calculating arguments...")
for(i in 1:nargs){
progress = paste("calculating arg ", i, " of ", nargs, sep = "")
print(progress)
args[[i]] = matrix(rpois(nrows[i]^2, lambda = 5), nrow = nrows[i])
}
print("done.")
####################
## MAIN FUNCTIONS ##
####################
# iter.time() is a wrapper for one iteration of either
# gpu_function or cpu_function. The purpose of the wrapper
# is to create the input data, pass the appropriate
# argument (entry arg of the list, args), to gpu_function
# (or cpu_function, if appropriate), and return the run time.
iter.time = function(arg, type = "cpu"){
if(type == "cpu"){
ptm <- proc.time()
cpu_function(arg)
ptm <- proc.time() - ptm
} else{
ptm <- proc.time()
gpu_function(arg)
ptm <- proc.time() - ptm
}
return(list(user = ptm[1], syst = ptm[2], elap = ptm[3]))
}
# loop.time executes iter.time (i.e., calculates the run time
# of either gpu_function or cpu_function) for each entry in
# params (one run per entry in params, each entry defining
# the magnitude of the computational load on gpu_function or
# cpu_function).
loop.time = function(args, type = "cpu"){
user = c()
syst = c()
total = c()
count = 0
for(arg in args){
times = iter.time(arg = arg, type = type)
user = c(user, times$user)
syst = c(syst, times$syst)
total = c(total, times$user + times$syst)
count = count + 1
progress = paste(type,
" iteration ",
count,
" of ",
nargs,
": elapsed time = ",
times$elap,
sep = "")
print(progress)
}
return(list(user = user, syst = syst, total = total))
}
##################
## MAIN ROUTINE ##
##################
# Main routine: actually run gpu_function and cpu_function
# for various data loads and return the run times. Note:
# outlier.gpu.times measures the computational overhead
# associated with using the gpu for the first time in this
# R script
cpu.times = loop.time(args, type = "cpu")
outlier.gpu.times = loop.time(args[1], type = "gpu")
gpu.times = loop.time(args, type = "gpu")
#########################################
## FORMAT RUNTIME DATA OF MAIN ROUTINE ##
#########################################
# organize runtime data into a convenient list
times = list(cpu = cpu.times,
outlier.gpu = outlier.gpu.times,
gpu = gpu.times)
# format data to plot without confidence
# regions.
for(time in c("user", "syst", "total")){
ys[[time]]$outlier.gpu = times$outlier.gpu[[time]]
for(dev in c("cpu", "gpu")){
ys[[time]][[dev]] = times[[dev]][[time]]
}
}
# Format data for plotting: WITH family-wise
# confidence regions for each run time type
# (time = "user", "syst", or "total") and
# each device (dev = "cpu", "gpu", or
# "outlier.gpu"). The data, ready for plotting,
# are available for each device in ys[[time]][[dev]].
#
# for(dev in c("cpu","gpu")){
# for(time in c("user","syst","total")){
#
# fit = aov(times[[dev]][[time]] ~ as.factor(sizes) - 1)
# glht.fit = glht(fit)
#
# print(glht.fit)
#
# if(!all(glht.fit$coef == 0)){
# famint = confint(glht.fit)
# } else{
# zeroes = rep(0,length(glht.fit$coef))
# famint = list(confint = list(Estimate = zeroes,
# lwr = zeroes,
# upr = zeroes))
# }
#
# ys[[time]][[dev]] = data.frame(famint$confint)
# ys[[time]]$outlier.gpu = times$outlier.gpu[[time]]
# }
#}
#######################
## PLOT RUNTIME DATA ##
#######################
# For each kind of run time, make a plot comparing
# the run times of gpu_function to the run times
# of cpu_function.
for(time in c("user", "syst", "total")){
filename = paste(c(plot.name,"_",time,".pdf"), collapse = "")
pdf(filename)
xbounds = c(min(xs), max(xs))
ybounds = c(min(unlist(ys[[time]])),
1.3 * max(unlist(ys[[time]])))
plot(xbounds,
ybounds,
pch= ".",
col="white",
xlab = xlab,
ylab = paste(c(time, "scheduled runtime (seconds)", collapse = " ")),
main = paste(c(time, "scheduled runtime (seconds):", title, collapse = " ")))
for(dev in c("cpu", "gpu")){
points(xs[1], ys[[time]]$outlier.gpu, col=cols$outlier.gpu)
points(xs, ys[[time]][[dev]], col = cols[[dev]])
lines(xs, ys[[time]][[dev]], col = cols[[dev]], lty=1)
# lines(xs, ys[[time]][[dev]]$upr, col = cols[[dev]], lty=1)
}
legend("topleft",
legend = c("mean cpu runtime",
"mean gpu runtime",
"first gpu run (overhead, discarded from conf. region calculations)"),
col = c(cols$cpu,
cols$gpu,
"black"),
pch = c("o"))
dev.off()
} |
# Missing Values
v1 = c(1,2,NA,NA,5)
is.na(v1)
mean(v1)
mean(v1, na.rm=T)
v1a = na.omit(v1)
sum(v1a)
?na.omit
anyNA(v1)
#all
v1[is.na(v1)] = mean(v1, na.rm=T)
v1
#denoted by NA
library(VIM)
data(sleep, package='VIM')
head(sleep)
dim(sleep)
complete.cases(sleep)
sum(complete.cases(sleep))
sleep[complete.cases(sleep),]
sleep[!complete.cases(sleep),]
sum(is.na(sleep$Dream))
mean(is.na(sleep$Dream))
42/62
mean(!complete.cases(sleep))
sum(is.na(sleep))
colSums(is.na(sleep))
rowSums(is.na(sleep))
#Tabulate
library(mice)
mice::md.pattern(sleep)
#42 rows without any missing values # 2 rows with 1 NA NonD
#Visualisation
VIM::aggr(sleep, prop=F, numbers=T)
#NonD max NA values
VIM::aggr(sleep, prop=T)
VIM::matrixplot(sleep)
VIM::marginmatrix(sleep)
VIM::marginplot(sleep[c('Gest','Dream')], pch=c(20), col=c("darkgray", "red", "blue"))
#Exploration
x = as.data.frame(abs(is.na(sleep)))
head(sleep, na=5)
head(x, n=5)
y = x[which(apply(x, 2, sum) > 0)]
cor(y) #Dream - NonD 0.9
cor(sleep, y, use="pairwise.complete.obs")
#ignore NA,
#
#Listwise Deletion
options(digits=3)
na.omit(sleep)
cor(na.omit(sleep))
cor(sleep, use="complete.obs")
fit = lm(Dream ~ Span + Gest, data= na.omit(sleep))
summary(fit)
#imputations Packages - mice, Amelia, mi : mice, with, pool
imp = mice::mice(sleep, seed=1234)
?mice | /missingvalue.R | no_license | dhanapalaprudhviraj/analyticsproject1 | R | false | false | 1,317 | r | # Missing Values
v1 = c(1,2,NA,NA,5)
is.na(v1)
mean(v1)
mean(v1, na.rm=T)
v1a = na.omit(v1)
sum(v1a)
?na.omit
anyNA(v1)
#all
v1[is.na(v1)] = mean(v1, na.rm=T)
v1
#denoted by NA
library(VIM)
data(sleep, package='VIM')
head(sleep)
dim(sleep)
complete.cases(sleep)
sum(complete.cases(sleep))
sleep[complete.cases(sleep),]
sleep[!complete.cases(sleep),]
sum(is.na(sleep$Dream))
mean(is.na(sleep$Dream))
42/62
mean(!complete.cases(sleep))
sum(is.na(sleep))
colSums(is.na(sleep))
rowSums(is.na(sleep))
#Tabulate
library(mice)
mice::md.pattern(sleep)
#42 rows without any missing values # 2 rows with 1 NA NonD
#Visualisation
VIM::aggr(sleep, prop=F, numbers=T)
#NonD max NA values
VIM::aggr(sleep, prop=T)
VIM::matrixplot(sleep)
VIM::marginmatrix(sleep)
VIM::marginplot(sleep[c('Gest','Dream')], pch=c(20), col=c("darkgray", "red", "blue"))
#Exploration
x = as.data.frame(abs(is.na(sleep)))
head(sleep, na=5)
head(x, n=5)
y = x[which(apply(x, 2, sum) > 0)]
cor(y) #Dream - NonD 0.9
cor(sleep, y, use="pairwise.complete.obs")
#ignore NA,
#
#Listwise Deletion
options(digits=3)
na.omit(sleep)
cor(na.omit(sleep))
cor(sleep, use="complete.obs")
fit = lm(Dream ~ Span + Gest, data= na.omit(sleep))
summary(fit)
#imputations Packages - mice, Amelia, mi : mice, with, pool
imp = mice::mice(sleep, seed=1234)
?mice |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleBypasses.R
\name{unhideAll}
\alias{unhideAll}
\title{Unhide All}
\usage{
unhideAll(network = NULL, base.url = .defaultBaseUrl)
}
\arguments{
\item{network}{(optional) Name or SUID of the network. Default is the
"current" network active in Cytoscape.}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
None
}
\description{
Unhide all previously hidden nodes and edges, by
clearing the Visible property bypass value.
}
\details{
This method sets node and edge visibility bypass to true, overriding
any defaults or mappings. Pending CyREST updates, this method will
ultimately call the generic function, \link{clearEdgePropertyBypass}, which
can be used to clear any visual property.
}
\examples{
\donttest{
unhideAll()
}
}
\seealso{
{
\link{clearEdgePropertyBypass},
\link{unhideNodes}
\link{unhideEdges}
}
}
| /man/unhideAll.Rd | permissive | cytoscape/RCy3 | R | false | true | 1,088 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleBypasses.R
\name{unhideAll}
\alias{unhideAll}
\title{Unhide All}
\usage{
unhideAll(network = NULL, base.url = .defaultBaseUrl)
}
\arguments{
\item{network}{(optional) Name or SUID of the network. Default is the
"current" network active in Cytoscape.}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
None
}
\description{
Unhide all previously hidden nodes and edges, by
clearing the Visible property bypass value.
}
\details{
This method sets node and edge visibility bypass to true, overriding
any defaults or mappings. Pending CyREST updates, this method will
ultimately call the generic function, \link{clearEdgePropertyBypass}, which
can be used to clear any visual property.
}
\examples{
\donttest{
unhideAll()
}
}
\seealso{
{
\link{clearEdgePropertyBypass},
\link{unhideNodes}
\link{unhideEdges}
}
}
|
#' ISOAbstractCatalogue
#'
#' @docType class
#' @importFrom R6 R6Class
#' @export
#' @keywords ISO abstract catalogue
#' @return Object of \code{\link{R6Class}} for modelling an ISOAbstracCatalogue
#' @format \code{\link{R6Class}} object.
#'
#' @field name
#' @field scope
#' @field fieldOfApplication
#' @field versionNumber
#' @field versionDate
#'
#' @section Methods:
#' \describe{
#' \item{\code{new(xml)}}{
#' This method is used to instantiate an ISOAbstractCatalogue
#' }
#' \item{\code{setName(name)}}{
#' Sets the name
#' }
#' \item{\code{addScope(scope)}}{
#' Adds scope (object of class \code{character})
#' }
#' \item{\code{delScope(scope)}}{
#' Deletes scope
#' }
#' \item{\code{addFieldOfApplication(fieldOfApplication)}}{
#' Adds a field of application (object of class \code{character})
#' }
#' \item{\code{delFieldOfApplication(fieldOfApplication)}}{
#' Deletes fieldOfApplication
#' }
#' \item{\code{setVersionNumber(versionNumber)}}{
#' Sets version number (object of class \code{character})
#' }
#' \item{\code{setVersionDate(versionDate)}}{
#' Sets version date
#' }
#' }
#'
#' @references
#' ISO 19139:2007 Metadata - XML schema implementation
#'
#' @author Emmanuel Blondel <emmanuel.blondel1@@gmail.com>
#'
ISOAbstractCatalogue <- R6Class("ISOAbstractCatalogue",
inherit = ISOAbstractObject,
private = list(
document = TRUE,
xmlElement = "AbstractCT_Catalogue",
xmlNamespacePrefix = "GMX"
),
public = list(
#+ name [1..1]: character
name = NULL,
#+ scope [1..*]: character
scope = list(),
#+ fieldOfApplication [0.*]: character
fieldOfApplication = list(),
#+ versionNumber [1..1]: character
versionNumber = NULL,
#+ versionDate [1..1]: character
versionDate = NULL,
initialize = function(xml = NULL){
super$initialize(xml = xml)
},
#setName
setName = function(name){
if(!is(name,"character")) name <- as(name, "character")
self$name <- name
},
#addScope
addScope = function(scope){
return(self$addListElement("scope", scope))
},
#delScope
delScope = function(scope){
return(self$delListElement("scope", scope))
},
#addFieldOfApplication
addFieldOfApplication = function(fieldOfApplication){
return(self$addListElement("fieldOfApplication", fieldOfApplication))
},
#delFieldOfApplication
delFieldOfApplication = function(fieldOfApplication){
return(self$delListElement("fieldOfApplication", fieldOfApplication))
},
#setVersionNumber
setVersionNumber = function(versionNumber){
if(!is(versionNumber,"character")) versionNumber <- as(versionNumber, "character")
self$versionNumber <- versionNumber
},
#setVersionDate
setVersionDate = function(versionDate){
self$versionDate <- versionDate
}
)
) | /R/ISOAbstractCatalogue.R | no_license | 65MO/geometa | R | false | false | 3,006 | r | #' ISOAbstractCatalogue
#'
#' @docType class
#' @importFrom R6 R6Class
#' @export
#' @keywords ISO abstract catalogue
#' @return Object of \code{\link{R6Class}} for modelling an ISOAbstracCatalogue
#' @format \code{\link{R6Class}} object.
#'
#' @field name
#' @field scope
#' @field fieldOfApplication
#' @field versionNumber
#' @field versionDate
#'
#' @section Methods:
#' \describe{
#' \item{\code{new(xml)}}{
#' This method is used to instantiate an ISOAbstractCatalogue
#' }
#' \item{\code{setName(name)}}{
#' Sets the name
#' }
#' \item{\code{addScope(scope)}}{
#' Adds scope (object of class \code{character})
#' }
#' \item{\code{delScope(scope)}}{
#' Deletes scope
#' }
#' \item{\code{addFieldOfApplication(fieldOfApplication)}}{
#' Adds a field of application (object of class \code{character})
#' }
#' \item{\code{delFieldOfApplication(fieldOfApplication)}}{
#' Deletes fieldOfApplication
#' }
#' \item{\code{setVersionNumber(versionNumber)}}{
#' Sets version number (object of class \code{character})
#' }
#' \item{\code{setVersionDate(versionDate)}}{
#' Sets version date
#' }
#' }
#'
#' @references
#' ISO 19139:2007 Metadata - XML schema implementation
#'
#' @author Emmanuel Blondel <emmanuel.blondel1@@gmail.com>
#'
ISOAbstractCatalogue <- R6Class("ISOAbstractCatalogue",
inherit = ISOAbstractObject,
private = list(
document = TRUE,
xmlElement = "AbstractCT_Catalogue",
xmlNamespacePrefix = "GMX"
),
public = list(
#+ name [1..1]: character
name = NULL,
#+ scope [1..*]: character
scope = list(),
#+ fieldOfApplication [0.*]: character
fieldOfApplication = list(),
#+ versionNumber [1..1]: character
versionNumber = NULL,
#+ versionDate [1..1]: character
versionDate = NULL,
initialize = function(xml = NULL){
super$initialize(xml = xml)
},
#setName
setName = function(name){
if(!is(name,"character")) name <- as(name, "character")
self$name <- name
},
#addScope
addScope = function(scope){
return(self$addListElement("scope", scope))
},
#delScope
delScope = function(scope){
return(self$delListElement("scope", scope))
},
#addFieldOfApplication
addFieldOfApplication = function(fieldOfApplication){
return(self$addListElement("fieldOfApplication", fieldOfApplication))
},
#delFieldOfApplication
delFieldOfApplication = function(fieldOfApplication){
return(self$delListElement("fieldOfApplication", fieldOfApplication))
},
#setVersionNumber
setVersionNumber = function(versionNumber){
if(!is(versionNumber,"character")) versionNumber <- as(versionNumber, "character")
self$versionNumber <- versionNumber
},
#setVersionDate
setVersionDate = function(versionDate){
self$versionDate <- versionDate
}
)
) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{rPVI}
\alias{rPVI}
\title{Raw pairwise variability index.}
\arguments{
\item{x}{A vector of durations in arbitrary unit.}
\item{omit}{Boolean indicating whether NA values should be removed before calculating rPVI.}
}
\value{
A single value representing the rPVI for the vector of durations
}
\description{
Computes the raw Pairwire Variability Index (rPVI) on a supplied vector of durations.
}
\references{
Nolan, F., & Asu, E. L. (2009). The Pairwise Variability Index and Coexisting Rhythms in Language. Phonetica, 66(1-2), 64–77. doi:10.1159/000208931
}
\author{
Fredrik Karlsson
}
| /man/rPVI.Rd | no_license | FredrikKarlssonSpeech/articulated | R | false | true | 686 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{rPVI}
\alias{rPVI}
\title{Raw pairwise variability index.}
\arguments{
\item{x}{A vector of durations in arbitrary unit.}
\item{omit}{Boolean indicating whether NA values should be removed before calculating rPVI.}
}
\value{
A single value representing the rPVI for the vector of durations
}
\description{
Computes the raw Pairwire Variability Index (rPVI) on a supplied vector of durations.
}
\references{
Nolan, F., & Asu, E. L. (2009). The Pairwise Variability Index and Coexisting Rhythms in Language. Phonetica, 66(1-2), 64–77. doi:10.1159/000208931
}
\author{
Fredrik Karlsson
}
|
library(tgp)
# sh1.daily: mean daily specific humidity
# mean.temp: mean daily temperature
# parms(colnames):'qmin','qmax','qmid','R0max','R0diff','D','L','S0','I0','p'
# discrete=T
# calculate R0
calc_R0_AH <- function(in.parms, num.ens, sh1) {
# Create matrices for storing/returning results:
res.temp = res.temp.red = matrix(0, length(sh1), num.ens)
# Loop through all ensemble members:
for (ix in 1:num.ens) {
# Assign parameters:
q.mn <- in.parms[ix, 'qmin']; q.mx <- in.parms[ix, 'qmax']; q.md <- in.parms[ix, 'qmid']
R0.max <- in.parms[ix, 'R0max']; R0.diff <- in.parms[ix, 'R0diff']
if(F){
if (dim(in.parms)[2] == 9) {
q.mn.cut <- in.parms[ix, 9]
} else {
q.mn.cut <- q.mn
}
}
q.mn.cut <- q.mn
# Calculate and correct R0.min
R0.min <- R0.max - R0.diff
if (R0.min < 0) {
R0.min <- 0.1
}
# Calculate parabola params:
if(F){
b <- ((R0.max - R0.min) * (q.mx + q.mn)) / ((q.mx - q.md) * (q.mn - q.md))
a <- (-1 * b) / (q.mx + q.mn)
c <- R0.min - a * q.md ** 2 - b * q.md
}
# given the symmetry:
q.mx.left = 2 * q.md - q.mn;
b.left <- ((R0.max - R0.min) * (q.mx.left + q.mn)) / ((q.mx.left - q.md) * (q.mn - q.md))
a.left <- (-1 * b.left) / (q.mx.left + q.mn)
c.left <- R0.min - a.left * q.md ** 2 - b.left * q.md
q.mn.right = 2 * q.md - q.mx
b.right <- ((R0.max - R0.min) * (q.mx + q.mn.right)) / ((q.mx - q.md) * (q.mn.right - q.md))
a.right <- (-1 * b.right) / (q.mx + q.mn.right)
c.right <- R0.min - a.right * q.md ** 2 - b.right * q.md
fit1 = fit2 =numeric(length(sh1))
# split the data into two sets (those >=q.md, and those <q.md)
idx.left = which(sh1 < q.md); idx.right = which(sh1 >= q.md)
# Full model:
q1.left <- sh1[idx.left]; q1.right = sh1[idx.right]
fit1[idx.left] <- (a.left * q1.left ** 2 + b.left * q1.left + c.left)
fit1[idx.right] <- (a.right * q1.right ** 2 + b.right * q1.right + c.right)
# Reduced model:
q1 <- sh1
q1[q1 < q.mn.cut] <- q.mn.cut; q1[q1 > q.mx] <- q.mx
# t1 <- mean.temp; t1[t1 < Tmin] <- Tmin
q1.left <- q1[idx.left]; q1.right = q1[idx.right]
# t1.left <- t1[idx.left]; t1.right = t1[idx.right]
fit2[idx.left] <- (a.left * q1.left ** 2 + b.left * q1.left + c.left)
fit2[idx.right] <- (a.right * q1.right ** 2 + b.right * q1.right + c.right)
# Store results in matrices:
res.temp[, ix] <- fit1; res.temp.red[, ix] <- fit2
}
# Return results:
return(list(res.temp, res.temp.red))
}
num_ens=dim(parms)[1]
R0 <- calc_R0_AH(parms, num_ens, sh1.daily)[[2]]
# Get other params:
D <- parms[, 'D']; S0 <- parms[, 'S0']; I0 <- parms[, 'I0']; expoI <- parms[, 'p']; L <- parms[,"L"]
tm_strt <- 1; tm_end <- length(sh1.daily)
tm_step <- 1; tm.range <- tm_strt:tm_end
# Calculate betas:
beta <- sapply(1:num_ens, function(ix) {
R0[, ix] / D[ix]
})
rm(R0)
# SIRS model
SIRS_AH <- function(tm_strt, tm_end, tm_step, S0, I0, N, D, L, beta, expoI, realdata=FALSE){
# function to integrate to the next time step
# use SIR model, integrate dicretely with Poisson distributions
# input: tm_strt: starting time; tm_end: ending time; tm_step: time step
# S0, I0: initial states; N: population size
# D: infection period, day; L: immune period, day;
# alpha: rate from exposed to infectious; beta: transmission matrix
# output: S, I for all time steps
cnt=1;
# beta stores only data during the time used for the truth
tm_strt=tm_strt-tm.range[1]+1; # adjust the index to match beta
tm_end=tm_end-tm.range[1]+1;
tm_vec=seq(tm_strt,tm_end,by=tm_step)
tm_sz=length(tm_vec)+1; # including the initial conditions and results of length(tm_vec) integration steps
Np=length(S0); # number of particles
S=I=newI=matrix(0,Np,tm_sz)
S[,1]=S0; I[,1]=I0;
newI[,1]=0;
if(! exists("discrete")) discrete=FALSE;
print(discrete)
if (discrete){
S[,1]=round(S0,0); I[,1]=round(I0,0);
for (t in tm_vec){
cnt=cnt+1;
# toggle pandemic start:
if (t == 4240) {
S[, cnt - 1] <- round(as.vector(lhs(num_ens, pdmSinit*N)), 0)
}
#########################
Eimmloss=tm_step*(1/L*(N-S[,cnt-1]-I[,cnt-1]))
Einf=tm_step*(beta[t,]*pmin(I[,cnt-1], I[,cnt-1]^expoI)*S[,cnt-1]/N)
Erecov=tm_step*(1/D*I[,cnt-1])
Eimmloss[Eimmloss<0]=0 # adjust, set <0 to 0
Einf[Einf<0 | is.na(Einf)]=0
Erecov[Erecov<0]=0
smcl=rpois(Np,Eimmloss)
smci=rpois(Np,Einf)
smcr=rpois(Np,Erecov)
sk1=smcl-smci
ik1=smci-smcr
ik1a=smci
Ts1=S[,cnt-1]+round(sk1/2,0)
Ti1=I[,cnt-1]+round(ik1/2,0)
Eimmloss=tm_step*(1/L*(N-Ts1-Ti1))
Einf=tm_step*(beta[t,]*pmin(Ti1, Ti1^expoI)*Ts1/N)
Erecov=tm_step*(1/D*Ti1)
Eimmloss[Eimmloss<0]=0
Einf[Einf<0 | is.na(Einf)]=0
Erecov[Erecov<0]=0
smcl=rpois(Np,Eimmloss)
smci=rpois(Np,Einf)
smcr=rpois(Np,Erecov)
sk2=smcl-smci
ik2=smci-smcr
ik2a=smci;
Ts2=S[,cnt-1]+round(sk2/2,0)
Ti2=I[,cnt-1]+round(ik2/2,0)
Eimmloss=tm_step*(1/L*(N-Ts2-Ti2))
Einf=tm_step*(beta[t,]*pmin(Ti2, Ti2^expoI)*Ts2/N)
Erecov=tm_step*(1/D*Ti2)
Eimmloss[Eimmloss<0]=0
Einf[Einf<0 | is.na(Einf)]=0
Erecov[Erecov<0]=0
smcl=rpois(Np,Eimmloss)
smci=rpois(Np,Einf)
smcr=rpois(Np,Erecov)
sk3=smcl-smci
ik3=smci-smcr
ik3a=smci;
Ts3=S[,cnt-1]+round(sk3,0)
Ti3=I[,cnt-1]+round(ik3,0)
Eimmloss=tm_step*(1/L*(N-Ts3-Ti3))
Einf=tm_step*(beta[t,]*pmin(Ti3, Ti3^expoI)*Ts3/N)
Erecov=tm_step*(1/D*Ti3)
Eimmloss[Eimmloss<0]=0
Einf[Einf<0 | is.na(Einf)]=0
Erecov[Erecov<0]=0
smcl=rpois(Np,Eimmloss)
smci=rpois(Np,Einf)
smcr=rpois(Np,Erecov)
sk4=smcl-smci
ik4=smci-smcr
ik4a=smci;
seed=rpois(Np,.1)
mu = birth.rate.HK
S[,cnt]=S[,cnt-1]+round(sk1/6+sk2/3+sk3/3+sk4/6 + mu * N - mu * S[, cnt-1],0)-seed
I[,cnt]=I[,cnt-1]+round(ik1/6+ik2/3+ik3/3+ik4/6 - mu * I[, cnt-1],0)+seed
newI[,cnt]=round(newI[,cnt-1]+ik1a/6+ik2a/3+ik3a/3+ik4a/6+seed,0);
}
} else {
# run continuously
for (t in tm_vec){
cnt=cnt+1;
# toggle pandemic start:
if (t == 4240) {
S[, cnt - 1] <- as.vector(lhs(num_ens, c(0.6 * N, 0.8 * N)))
}
#########################
Eimmloss=tm_step*(1/L*(N-S[,cnt-1]-I[,cnt-1]))
Einf=tm_step*(beta[t,]*I[,cnt-1]*S[,cnt-1]/N)
Erecov=tm_step*(1/D*I[,cnt-1])
Eimmloss[Eimmloss<0]=0
Einf[Einf<0 | is.na(Einf)]=0
Erecov[Erecov<0]=0
smcl=Eimmloss
smci=Einf
smcr=Erecov
sk1=smcl-smci
ik1=smci-smcr
ik1a=smci
Ts1=S[,cnt-1]+sk1/2
Ti1=I[,cnt-1]+ik1/2
Eimmloss=tm_step*(1/L*(N-Ts1-Ti1))
Einf=tm_step*(beta[t,]*Ti1*Ts1/N)
Erecov=tm_step*(1/D*Ti1)
Eimmloss[Eimmloss<0]=0
Einf[Einf<0 | is.na(Einf)]=0
Erecov[Erecov<0]=0
smcl=Eimmloss
smci=Einf
smcr=Erecov
sk2=smcl-smci
ik2=smci-smcr
ik2a=smci;
Ts2=S[,cnt-1]+sk2/2
Ti2=I[,cnt-1]+ik2/2
Eimmloss=tm_step*(1/L*(N-Ts2-Ti2))
Einf=tm_step*(beta[t,]*Ti2*Ts2/N)
Erecov=tm_step*(1/D*Ti2)
Eimmloss[Eimmloss<0]=0
Einf[Einf<0 | is.na(Einf)]=0
Erecov[Erecov<0]=0
smcl=Eimmloss
smci=Einf
smcr=Erecov
sk3=smcl-smci
ik3=smci-smcr
ik3a=smci;
Ts3=S[,cnt-1]+sk3
Ti3=I[,cnt-1]+ik3
Eimmloss=tm_step*(1/L*(N-Ts3-Ti3))
Einf=tm_step*(beta[t,]*Ti3*Ts3/N)
Erecov=tm_step*(1/D*Ti3)
Eimmloss[Eimmloss<0]=0
Einf[Einf<0 | is.na(Einf)]=0
Erecov[Erecov<0]=0
smcl=Eimmloss
smci=Einf
smcr=Erecov
sk4=smcl-smci
ik4=smci-smcr
ik4a=smci;
seed = 1
mu = birth.rate.HK
S[,cnt]=S[,cnt-1]+sk1/6+sk2/3+sk3/3+sk4/6-seed + mu * N - mu * S[,cnt-1]
I[,cnt]=I[,cnt-1]+ik1/6+ik2/3+ik3/3+ik4/6+seed - mu * I[,cnt-1] # natural mortality
newI[,cnt]=newI[,cnt-1]+ik1a/6+ik2a/3+ik3a/3+ik4a/6+seed;
}
}
S=t(S); I=t(I); newI=t(newI);
if (realdata==FALSE){
rec=list(S=S,I=I);
} else {
rec=list(S=S,I=I,newI=newI);
}
rec;
}
sim_AH <- SIRS_AH(tm_strt, tm_end, tm_step, S0, I0, N, D, L, beta, expoI, realdata=T)
| /model_code/AH.R | no_license | wan-yang/flu-subtropic-climate-models | R | false | false | 8,620 | r | library(tgp)
# sh1.daily: mean daily specific humidity
# mean.temp: mean daily temperature
# parms(colnames):'qmin','qmax','qmid','R0max','R0diff','D','L','S0','I0','p'
# discrete=T
# calculate R0
calc_R0_AH <- function(in.parms, num.ens, sh1) {
# Create matrices for storing/returning results:
res.temp = res.temp.red = matrix(0, length(sh1), num.ens)
# Loop through all ensemble members:
for (ix in 1:num.ens) {
# Assign parameters:
q.mn <- in.parms[ix, 'qmin']; q.mx <- in.parms[ix, 'qmax']; q.md <- in.parms[ix, 'qmid']
R0.max <- in.parms[ix, 'R0max']; R0.diff <- in.parms[ix, 'R0diff']
if(F){
if (dim(in.parms)[2] == 9) {
q.mn.cut <- in.parms[ix, 9]
} else {
q.mn.cut <- q.mn
}
}
q.mn.cut <- q.mn
# Calculate and correct R0.min
R0.min <- R0.max - R0.diff
if (R0.min < 0) {
R0.min <- 0.1
}
# Calculate parabola params:
if(F){
b <- ((R0.max - R0.min) * (q.mx + q.mn)) / ((q.mx - q.md) * (q.mn - q.md))
a <- (-1 * b) / (q.mx + q.mn)
c <- R0.min - a * q.md ** 2 - b * q.md
}
# given the symmetry:
q.mx.left = 2 * q.md - q.mn;
b.left <- ((R0.max - R0.min) * (q.mx.left + q.mn)) / ((q.mx.left - q.md) * (q.mn - q.md))
a.left <- (-1 * b.left) / (q.mx.left + q.mn)
c.left <- R0.min - a.left * q.md ** 2 - b.left * q.md
q.mn.right = 2 * q.md - q.mx
b.right <- ((R0.max - R0.min) * (q.mx + q.mn.right)) / ((q.mx - q.md) * (q.mn.right - q.md))
a.right <- (-1 * b.right) / (q.mx + q.mn.right)
c.right <- R0.min - a.right * q.md ** 2 - b.right * q.md
fit1 = fit2 =numeric(length(sh1))
# split the data into two sets (those >=q.md, and those <q.md)
idx.left = which(sh1 < q.md); idx.right = which(sh1 >= q.md)
# Full model:
q1.left <- sh1[idx.left]; q1.right = sh1[idx.right]
fit1[idx.left] <- (a.left * q1.left ** 2 + b.left * q1.left + c.left)
fit1[idx.right] <- (a.right * q1.right ** 2 + b.right * q1.right + c.right)
# Reduced model:
q1 <- sh1
q1[q1 < q.mn.cut] <- q.mn.cut; q1[q1 > q.mx] <- q.mx
# t1 <- mean.temp; t1[t1 < Tmin] <- Tmin
q1.left <- q1[idx.left]; q1.right = q1[idx.right]
# t1.left <- t1[idx.left]; t1.right = t1[idx.right]
fit2[idx.left] <- (a.left * q1.left ** 2 + b.left * q1.left + c.left)
fit2[idx.right] <- (a.right * q1.right ** 2 + b.right * q1.right + c.right)
# Store results in matrices:
res.temp[, ix] <- fit1; res.temp.red[, ix] <- fit2
}
# Return results:
return(list(res.temp, res.temp.red))
}
num_ens=dim(parms)[1]
R0 <- calc_R0_AH(parms, num_ens, sh1.daily)[[2]]
# Get other params:
D <- parms[, 'D']; S0 <- parms[, 'S0']; I0 <- parms[, 'I0']; expoI <- parms[, 'p']; L <- parms[,"L"]
tm_strt <- 1; tm_end <- length(sh1.daily)
tm_step <- 1; tm.range <- tm_strt:tm_end
# Calculate betas:
beta <- sapply(1:num_ens, function(ix) {
R0[, ix] / D[ix]
})
rm(R0)
# SIRS model
SIRS_AH <- function(tm_strt, tm_end, tm_step, S0, I0, N, D, L, beta, expoI, realdata=FALSE){
# function to integrate to the next time step
# use SIR model, integrate dicretely with Poisson distributions
# input: tm_strt: starting time; tm_end: ending time; tm_step: time step
# S0, I0: initial states; N: population size
# D: infection period, day; L: immune period, day;
# alpha: rate from exposed to infectious; beta: transmission matrix
# output: S, I for all time steps
cnt=1;
# beta stores only data during the time used for the truth
tm_strt=tm_strt-tm.range[1]+1; # adjust the index to match beta
tm_end=tm_end-tm.range[1]+1;
tm_vec=seq(tm_strt,tm_end,by=tm_step)
tm_sz=length(tm_vec)+1; # including the initial conditions and results of length(tm_vec) integration steps
Np=length(S0); # number of particles
S=I=newI=matrix(0,Np,tm_sz)
S[,1]=S0; I[,1]=I0;
newI[,1]=0;
if(! exists("discrete")) discrete=FALSE;
print(discrete)
if (discrete){
S[,1]=round(S0,0); I[,1]=round(I0,0);
for (t in tm_vec){
cnt=cnt+1;
# toggle pandemic start:
if (t == 4240) {
S[, cnt - 1] <- round(as.vector(lhs(num_ens, pdmSinit*N)), 0)
}
#########################
Eimmloss=tm_step*(1/L*(N-S[,cnt-1]-I[,cnt-1]))
Einf=tm_step*(beta[t,]*pmin(I[,cnt-1], I[,cnt-1]^expoI)*S[,cnt-1]/N)
Erecov=tm_step*(1/D*I[,cnt-1])
Eimmloss[Eimmloss<0]=0 # adjust, set <0 to 0
Einf[Einf<0 | is.na(Einf)]=0
Erecov[Erecov<0]=0
smcl=rpois(Np,Eimmloss)
smci=rpois(Np,Einf)
smcr=rpois(Np,Erecov)
sk1=smcl-smci
ik1=smci-smcr
ik1a=smci
Ts1=S[,cnt-1]+round(sk1/2,0)
Ti1=I[,cnt-1]+round(ik1/2,0)
Eimmloss=tm_step*(1/L*(N-Ts1-Ti1))
Einf=tm_step*(beta[t,]*pmin(Ti1, Ti1^expoI)*Ts1/N)
Erecov=tm_step*(1/D*Ti1)
Eimmloss[Eimmloss<0]=0
Einf[Einf<0 | is.na(Einf)]=0
Erecov[Erecov<0]=0
smcl=rpois(Np,Eimmloss)
smci=rpois(Np,Einf)
smcr=rpois(Np,Erecov)
sk2=smcl-smci
ik2=smci-smcr
ik2a=smci;
Ts2=S[,cnt-1]+round(sk2/2,0)
Ti2=I[,cnt-1]+round(ik2/2,0)
Eimmloss=tm_step*(1/L*(N-Ts2-Ti2))
Einf=tm_step*(beta[t,]*pmin(Ti2, Ti2^expoI)*Ts2/N)
Erecov=tm_step*(1/D*Ti2)
Eimmloss[Eimmloss<0]=0
Einf[Einf<0 | is.na(Einf)]=0
Erecov[Erecov<0]=0
smcl=rpois(Np,Eimmloss)
smci=rpois(Np,Einf)
smcr=rpois(Np,Erecov)
sk3=smcl-smci
ik3=smci-smcr
ik3a=smci;
Ts3=S[,cnt-1]+round(sk3,0)
Ti3=I[,cnt-1]+round(ik3,0)
Eimmloss=tm_step*(1/L*(N-Ts3-Ti3))
Einf=tm_step*(beta[t,]*pmin(Ti3, Ti3^expoI)*Ts3/N)
Erecov=tm_step*(1/D*Ti3)
Eimmloss[Eimmloss<0]=0
Einf[Einf<0 | is.na(Einf)]=0
Erecov[Erecov<0]=0
smcl=rpois(Np,Eimmloss)
smci=rpois(Np,Einf)
smcr=rpois(Np,Erecov)
sk4=smcl-smci
ik4=smci-smcr
ik4a=smci;
seed=rpois(Np,.1)
mu = birth.rate.HK
S[,cnt]=S[,cnt-1]+round(sk1/6+sk2/3+sk3/3+sk4/6 + mu * N - mu * S[, cnt-1],0)-seed
I[,cnt]=I[,cnt-1]+round(ik1/6+ik2/3+ik3/3+ik4/6 - mu * I[, cnt-1],0)+seed
newI[,cnt]=round(newI[,cnt-1]+ik1a/6+ik2a/3+ik3a/3+ik4a/6+seed,0);
}
} else {
# run continuously
for (t in tm_vec){
cnt=cnt+1;
# toggle pandemic start:
if (t == 4240) {
S[, cnt - 1] <- as.vector(lhs(num_ens, c(0.6 * N, 0.8 * N)))
}
#########################
Eimmloss=tm_step*(1/L*(N-S[,cnt-1]-I[,cnt-1]))
Einf=tm_step*(beta[t,]*I[,cnt-1]*S[,cnt-1]/N)
Erecov=tm_step*(1/D*I[,cnt-1])
Eimmloss[Eimmloss<0]=0
Einf[Einf<0 | is.na(Einf)]=0
Erecov[Erecov<0]=0
smcl=Eimmloss
smci=Einf
smcr=Erecov
sk1=smcl-smci
ik1=smci-smcr
ik1a=smci
Ts1=S[,cnt-1]+sk1/2
Ti1=I[,cnt-1]+ik1/2
Eimmloss=tm_step*(1/L*(N-Ts1-Ti1))
Einf=tm_step*(beta[t,]*Ti1*Ts1/N)
Erecov=tm_step*(1/D*Ti1)
Eimmloss[Eimmloss<0]=0
Einf[Einf<0 | is.na(Einf)]=0
Erecov[Erecov<0]=0
smcl=Eimmloss
smci=Einf
smcr=Erecov
sk2=smcl-smci
ik2=smci-smcr
ik2a=smci;
Ts2=S[,cnt-1]+sk2/2
Ti2=I[,cnt-1]+ik2/2
Eimmloss=tm_step*(1/L*(N-Ts2-Ti2))
Einf=tm_step*(beta[t,]*Ti2*Ts2/N)
Erecov=tm_step*(1/D*Ti2)
Eimmloss[Eimmloss<0]=0
Einf[Einf<0 | is.na(Einf)]=0
Erecov[Erecov<0]=0
smcl=Eimmloss
smci=Einf
smcr=Erecov
sk3=smcl-smci
ik3=smci-smcr
ik3a=smci;
Ts3=S[,cnt-1]+sk3
Ti3=I[,cnt-1]+ik3
Eimmloss=tm_step*(1/L*(N-Ts3-Ti3))
Einf=tm_step*(beta[t,]*Ti3*Ts3/N)
Erecov=tm_step*(1/D*Ti3)
Eimmloss[Eimmloss<0]=0
Einf[Einf<0 | is.na(Einf)]=0
Erecov[Erecov<0]=0
smcl=Eimmloss
smci=Einf
smcr=Erecov
sk4=smcl-smci
ik4=smci-smcr
ik4a=smci;
seed = 1
mu = birth.rate.HK
S[,cnt]=S[,cnt-1]+sk1/6+sk2/3+sk3/3+sk4/6-seed + mu * N - mu * S[,cnt-1]
I[,cnt]=I[,cnt-1]+ik1/6+ik2/3+ik3/3+ik4/6+seed - mu * I[,cnt-1] # natural mortality
newI[,cnt]=newI[,cnt-1]+ik1a/6+ik2a/3+ik3a/3+ik4a/6+seed;
}
}
S=t(S); I=t(I); newI=t(newI);
if (realdata==FALSE){
rec=list(S=S,I=I);
} else {
rec=list(S=S,I=I,newI=newI);
}
rec;
}
sim_AH <- SIRS_AH(tm_strt, tm_end, tm_step, S0, I0, N, D, L, beta, expoI, realdata=T)
|
#
# This test file has been generated by kwb.test::create_test_files()
#
test_that("add_site_metadata() works", {
expect_error(kwb.pilot:::add_site_metadata())
})
| /tests/testthat/test-function-add_site_metadata.R | permissive | KWB-R/kwb.pilot | R | false | false | 166 | r | #
# This test file has been generated by kwb.test::create_test_files()
#
test_that("add_site_metadata() works", {
expect_error(kwb.pilot:::add_site_metadata())
})
|
# web scrape --------------------------------------------------------------
library(tidyverse)
library(rvest)
pdftab <- read_html("https://tbeptech.org/data/tech-pubs") %>%
html_nodes('p')
txt <- pdftab %>%
html_text() %>%
.[1:263]
ref <- pdftab %>%
html_nodes('a') %>%
html_attr('href')
auths <- gsub('^.*\\((.*)\\).*$|^.*(Prepared by.*\\.).*$', '\\1\\2', txt)
txt <- gsub('\\(.*\\)|Prepared by.*', '', txt)
num <- gsub('^.*\\#([0-9]*\\-[0-9]*).*$|^.*\\#\\s([0-9]*\\-[0-9]*).*$', '\\1\\2', txt)
writeLines(txt, 'C:/Users/Marcus.SCCWRP2K/Desktop/unclean.txt')
writeLines(ref, 'C:/Users/Marcus.SCCWRP2K/Desktop/ref.txt')
writeLines(auths, 'C:/Users/Marcus.SCCWRP2K/Desktop/authunclean.txt')
writeLines(num, "C:/Users/Marcus.SCCWRP2K/Desktop/numunclean.txt")
| /R/01_webscrape.R | no_license | tbep-tech/tbep-refs | R | false | false | 776 | r | # web scrape --------------------------------------------------------------
library(tidyverse)
library(rvest)
pdftab <- read_html("https://tbeptech.org/data/tech-pubs") %>%
html_nodes('p')
txt <- pdftab %>%
html_text() %>%
.[1:263]
ref <- pdftab %>%
html_nodes('a') %>%
html_attr('href')
auths <- gsub('^.*\\((.*)\\).*$|^.*(Prepared by.*\\.).*$', '\\1\\2', txt)
txt <- gsub('\\(.*\\)|Prepared by.*', '', txt)
num <- gsub('^.*\\#([0-9]*\\-[0-9]*).*$|^.*\\#\\s([0-9]*\\-[0-9]*).*$', '\\1\\2', txt)
writeLines(txt, 'C:/Users/Marcus.SCCWRP2K/Desktop/unclean.txt')
writeLines(ref, 'C:/Users/Marcus.SCCWRP2K/Desktop/ref.txt')
writeLines(auths, 'C:/Users/Marcus.SCCWRP2K/Desktop/authunclean.txt')
writeLines(num, "C:/Users/Marcus.SCCWRP2K/Desktop/numunclean.txt")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pathways.R
\name{writePathways}
\alias{writePathways}
\title{Writes a set of pathways (list of vectors) to a GMT file.}
\usage{
writePathways(pathways, gmtFile)
}
\arguments{
\item{pathways}{(list) named list of vectors}
\item{gmtFile}{(char) name of output GMT file}
}
\value{
GMT-formatted file. Rows represent pathways. Columns represent:
\itemize{
\item pathway ID;
\item description;
\item a list of tab-delimited genes
}
}
\description{
Writes a set of pathways (list of vectors) to a GMT file.
}
\examples{
data(pathwaysXLSX)
writePathways(pathwaysXLSX, tempfile("pathwaysXLSX", fileext = ".gmt"))
}
| /man/writePathways.Rd | permissive | rosscm/fedup | R | false | true | 698 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pathways.R
\name{writePathways}
\alias{writePathways}
\title{Writes a set of pathways (list of vectors) to a GMT file.}
\usage{
writePathways(pathways, gmtFile)
}
\arguments{
\item{pathways}{(list) named list of vectors}
\item{gmtFile}{(char) name of output GMT file}
}
\value{
GMT-formatted file. Rows represent pathways. Columns represent:
\itemize{
\item pathway ID;
\item description;
\item a list of tab-delimited genes
}
}
\description{
Writes a set of pathways (list of vectors) to a GMT file.
}
\examples{
data(pathwaysXLSX)
writePathways(pathwaysXLSX, tempfile("pathwaysXLSX", fileext = ".gmt"))
}
|
PISCR.fn = function(
capture_1, capture_2, gender, activity, traploc,
ndraws = 20, burnin = 10, M = 400, scale = 10^4,
nloopL = 50, n.update = 20, batchsize = 1000, mindelta = 0.01,
sigmaOfProposal.logitphi = 0.05, sigmaOfProposal.logitp0 = 0.08,
sigmaOfProposal.logsigmam = 0.02, sigmaOfProposal.logsigmaf = 0.02,
sigmaOfProposal.L = 4, sigmaOfProposal.s = rep(3, 400), R = 5)
{
library(mvtnorm)
library(MCMCpack)
library(abind)
library(ggplot2)
library(raster)
library(maptools)
library(coda)
library(spatstat)
library(gtools)
options(digits = 8)
source("utility.functions.R")
################################################
area <- activity@polygons[[1]]@Polygons[[1]]@area/1000000
activity1<-as.owin(activity)
trap = traploc[,'LOC_ID']
traploc.xx = traploc[,'X_COORD']
traploc.yy = traploc[,'Y_COORD']
K = dim(traploc)[1]
J = dim(traploc)[2] - 3
dimnames(traploc)[[2]][4:dim(traploc)[2]] = c(1:J)
# convert units of locations (from meters to km) and adjust locations to have x- and y- minima at zero
traploc.x = traploc.xx/scale
traploc.y = traploc.yy/scale
trap.locations=cbind(traploc.x, traploc.y)
left.obs = makedata3d(capture_1,traploc)
numl = dim(left.obs)[1]
left = abind(left.obs, array(0, dim = c( M - numl, K, J)), along = 1)
right.obs = makedata3d(capture_2,traploc)
numr = dim(right.obs)[1]
right = abind(right.obs, array(0, dim = c( M - numr, K, J)), along = 1)
active_trapso=as.matrix(traploc[,4:dim(traploc)[2]])
Jvec = apply(active_trapso, 1, sum)
msk = active_trapso
msk3d = array(NA, c(M, K, J))
for (i in 1:M) msk3d[i, 1:K, 1:J] = msk[1:K, 1:J]
left = left * msk3d
right = right * msk3d
#==========================================================
# Initializing the parameters and processing the variables
logitphi = logit(0.8); logitp0 = logit(0.1); logsigmam = log(0.2); logsigmaf = log(0.15); psi = 0.5; theta = 0.5
## HANDLING THE SEX INFORMATION ##
sexl.obs = gender[1:numl, 2] # length numl
sexr.obs = gender[1:numr, 4] # length numr
sexl = sexr = rep(NA, M)
sexl[c(1:length(sexl.obs))[sexl.obs == 'Male']] = 1
sexl[c(1:length(sexl.obs))[sexl.obs == 'Female']] = 0
sexr[c(1:length(sexr.obs))[sexr.obs == 'Male']] = 1
sexr[c(1:length(sexr.obs))[sexr.obs == 'Female']] = 0
l.guys = levels(gender[,1])
IDfixed = suppressWarnings(sum(!is.na(as.numeric(l.guys))))
if(IDfixed == 0) known = 'none'
if(IDfixed != numl | IDfixed != numr) known = 'some'
if(IDfixed == numl & IDfixed == numr) known = 'ALL'
# Also re-label data sets if needed so that the left side always has more individuals
if (known!="ALL" & numl < numr){
a = left; left = right; right = a
b = numl; numl = numr; numr = b
cc = sexl; sexl = sexr; sexr = cc
}
##################################################
numlm = sum(sexl == 1, na.rm = T); numlf = sum(sexl == 0, na.rm = T)
numrm = sum(sexr == 1, na.rm = T); numrf = sum(sexr == 0, na.rm = T)
sexl1 = c(); sexr1 = c()
if(numlm < numrm) sexl1 = rep(1, numrm - numlm)
if(numrm < numlm) sexr1 = rep(1, numlm - numrm)
if(numlf < numrf) sexl1 = c(sexl1, rep(0, numrf - numlf))
if(numrf < numlf) sexr1 = c(sexr1, rep(0, numlf - numrf))
missing.sex.guys = is.na(sexl)
nnn = max(numlm, numrm) + max(numlf, numrf)
if(numl < nnn){ sexl[(numl+1) : nnn] = sample(sexl1, nnn - numl, replace = F)}
if(numr < nnn){ sexr[(numr+1) : nnn] = sample(sexr1, nnn - numr, replace = F)}
sexl[(nnn+1) : M] = sexr[(nnn+1) : M] = rbinom(M - nnn, 1, theta)
if(sum(is.na(sexl)) > 0) {sexl[is.na(sexl)] = sexr[is.na(sexr)] = rbinom(sum(is.na(sexl)), 1, theta)}
# Latent variable vector for gender
sex = sexl # M x 1
#######################################################
if (known != 'ALL'){ L = Lvec(left, right, numl, numr, sexl, sexr, trap.locations, IDfixed, nloopL)}
s.left <- cbind((runifpoint(n = M , win = activity1))[[3]]/scale,
(runifpoint(n = M , win = activity1))[[4]]/scale)
# =============================
ncapl = rowSums(left)
ncapr = rowSums(right)
if (known != 'ALL') {ncapr.star = ncapr[order(L)]; right.star = right[order(L),,]}
if (known == 'ALL') {ncapr.star = ncapr; right.star = right}
ncap = ncapl + ncapr.star
n0mat = apply(left + right.star, c(1,2), function(a){sum(a > 0)})
n0vec = rowSums(n0mat)
zero.guys = (ncapl + ncapr.star) == 0
numcap = sum((ncapl + ncapr.star) > 0)
z = c(rep(1, numcap), rbinom(M - numcap, 1, psi))
pimat = pimatfn(logitp0, s.left, trap.locations, sex, logsigmam, logsigmaf)
llik = logLfn.da(ncap, n0mat, n0vec, logitphi, pimat, z, J)
# .... assign values for adaptive Metropolis - Hastings sampling
batch = 0
delta = mindelta
continueMCMC = TRUE
draw = drawphi = drawp0 = drawsigmam = drawsigmaf = drawL = drawsex = drawact = 0
naccept.logitphi = naccept.logitp0 = naccept.logsigmam = naccept.logsigmaf = naccept.L = 0
naccept.s = rep(0,M)
ts = format(Sys.time(), "%d%m%y_%H%M%S")
folderName = paste("partiaID.da_", ts, sep = "")
dir.create(path = folderName)
start.time = Sys.time()
cat('Begin MCMC sampling:', '\n', '\n')
cat(c('N', 'psi', 'N.Male', 'theta', 'phi', 'p0', 'sigmam', 'sigmaf'), sep = ',', file = paste(folderName, '/markovchain.txt', sep = ""), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.txt', sep = ""), append = TRUE)
cat(c(paste('sex', 1:M, sep = '')), sep = ',', file = paste(folderName, '/markovchain.sex.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.sex.txt', sep = ''), append = TRUE)
cat(c(paste('z', 1:M, sep = '')), sep = ',', file = paste(folderName, '/markovchain.z.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.z.txt', sep = ''), append = TRUE)
cat(c(paste('sx', 1:M, sep = '')), sep = ',', file = paste(folderName, '/markovchain.sx.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.sx.txt', sep = ''), append = TRUE)
cat(c(paste('sy', 1:M, sep = '')), sep = ',', file = paste(folderName, '/markovchain.sy.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.sy.txt', sep = ''), append = TRUE)
cat(c(paste('loglik', 1:M, sep = '')), sep = ',', file = paste(folderName, '/markovchain.loglikelihood.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.loglikelihood.txt', sep = ''), append = TRUE)
if (known != 'ALL')
{
cat(c(paste("L", 1:M, sep = '')), sep = ',', file = paste(folderName, '/markovchainL.txt', sep = ""), append = TRUE)
cat('\n', file = paste(folderName, '/markovchainL.txt', sep = ""), append = TRUE)
}
while (continueMCMC) {
draw = draw + 1
drawinterval = 500
if (draw == round(draw/drawinterval)*drawinterval) cat('..... drawing sample #', draw, '\n')
# update the increment/decrement for adaptive Metropolis - Hastings samplers
if (floor(draw/batchsize)*batchsize == draw){
batch = batch + 1
if (1/sqrt(batch) < mindelta) delta = 1/sqrt(batch)
}
# update phi
drawphi = drawphi +1
loglik.curr.phi = llik
logitphi.cand = rnorm(1, logitphi, sigmaOfProposal.logitphi)
loglik.cand.phi = logLfn.da(ncap, n0mat, n0vec, logitphi.cand, pimat, z, J)
lognum = loglik.cand.phi + log(exp(logitphi.cand)/((1+exp(logitphi.cand))^2))
logden = loglik.curr.phi + log(exp(logitphi)/((1+exp(logitphi))^2))
if (logden == -Inf){
logitphi = logitphi.cand
loglik.curr.phi = loglik.cand.phi
naccept.logitphi = naccept.logitphi + 1
}
if (logden != -Inf){
logR = lognum - logden
if (runif(1,0,1) <= exp(logR)){
logitphi = logitphi.cand
loglik.curr.phi = loglik.cand.phi
naccept.logitphi = naccept.logitphi + 1
}
}
if (floor(draw/batchsize)*batchsize == draw){
SigmaDiff = ifelse(naccept.logitphi > 0.44*batchsize, exp(2*delta), exp(-2*delta))
if(draw <= burnin){ sigmaOfProposal.logitphi = sigmaOfProposal.logitphi * SigmaDiff}
cat("proposal sd of logitphi = ", sigmaOfProposal.logitphi, ' ')
cat("naccept.logitphi = ", naccept.logitphi, '\n')
naccept.logitphi = 0
}
# update p0
drawp0 = drawp0 +1
loglik.curr.p0 = loglik.curr.phi
logitp0.cand = rnorm(1, logitp0, sigmaOfProposal.logitp0)
pimat.cand = pimatfn(logitp0.cand, s.left, trap.locations, sex, logsigmam, logsigmaf)
loglik.cand.p0 = logLfn.da(ncapl + ncapr.star, n0mat, n0vec, logitphi, pimat.cand, z, J)
lognum = loglik.cand.p0 + log(exp(logitp0.cand)/((1+exp(logitp0.cand))^2))
logden = loglik.curr.p0 + log(exp(logitp0)/((1+exp(logitp0))^2))
if (logden == -Inf){
logitp0 = logitp0.cand
pimat = pimat.cand
loglik.curr.p0 = loglik.cand.p0
naccept.logitp0 = naccept.logitp0 + 1
}
if (logden != -Inf){
logR = lognum - logden
if (runif(1,0,1) <= exp(logR)){
logitp0 = logitp0.cand
pimat = pimat.cand
loglik.curr.p0 = loglik.cand.p0
naccept.logitp0 = naccept.logitp0 + 1
}
}
if (floor(draw/batchsize)*batchsize == draw){
SigmaDiff = ifelse(naccept.logitp0 > 0.44*batchsize, exp(2*delta), exp(-2*delta))
if(draw <= burnin){ sigmaOfProposal.logitp0 = sigmaOfProposal.logitp0 * SigmaDiff}
cat("proposal sd of logitp0 = ", sigmaOfProposal.logitp0, ' ')
cat("naccept.logitp0 = ", naccept.logitp0, '\n')
naccept.logitp0 = 0
}
# update sigmam
drawsigmam = drawsigmam +1
loglik.curr.sigmam = loglik.curr.p0
logsigmam.cand = rnorm(1, logsigmam, sigmaOfProposal.logsigmam)
pimat.cand = pimatfn(logitp0, s.left, trap.locations, sex, logsigmam.cand, logsigmaf)
loglik.cand.sigmam = logLfn.da(ncap, n0mat, n0vec, logitphi, pimat.cand, z, J)
lognum = loglik.cand.sigmam + log(exp(logsigmam.cand)/R)
logden = loglik.curr.sigmam + log(exp(logsigmam)/R)
if (logden == -Inf){
logsigmam = logsigmam.cand
pimat = pimat.cand
loglik.curr.sigmam = loglik.cand.sigmam
naccept.logsigmam = naccept.logsigmam + 1
}
if (logden != -Inf){
logR = lognum - logden
if (runif(1,0,1) <= exp(logR)){
logsigmam = logsigmam.cand
pimat = pimat.cand
loglik.curr.sigmam = loglik.cand.sigmam
naccept.logsigmam = naccept.logsigmam + 1
}
}
if (floor(draw/batchsize)*batchsize == draw){
SigmaDiff = ifelse(naccept.logsigmam > 0.44*batchsize, exp(2*delta), exp(-2*delta))
if(draw <= burnin){ sigmaOfProposal.logsigmam = sigmaOfProposal.logsigmam * SigmaDiff}
cat("proposal sd of logsigmam = ", sigmaOfProposal.logsigmam, ' ')
cat("naccept.logsigmam = ", naccept.logsigmam, '\n')
naccept.logsigmam = 0
}
# update sigmaf
drawsigmaf = drawsigmaf +1
loglik.curr.sigmaf = loglik.curr.sigmam
logsigmaf.cand = rnorm(1, logsigmaf, sigmaOfProposal.logsigmaf)
pimat.cand = pimatfn(logitp0, s.left, trap.locations, sex, logsigmam, logsigmaf.cand)
loglik.cand.sigmaf = logLfn.da(ncap, n0mat, n0vec, logitphi, pimat.cand, z, J)
lognum = loglik.cand.sigmaf + log(exp(logsigmaf.cand)/R)
logden = loglik.curr.sigmaf + log(exp(logsigmaf)/R)
if (logden == -Inf){
logsigmaf = logsigmaf.cand
pimat = pimat.cand
loglik.curr.sigmaf = loglik.cand.sigmaf
naccept.logsigmaf = naccept.logsigmaf + 1
}
if (logden != -Inf){
logR = lognum - logden
if (runif(1,0,1) <= exp(logR)){
logsigmaf = logsigmaf.cand
pimat = pimat.cand
loglik.curr.sigmaf = loglik.cand.sigmaf
naccept.logsigmaf = naccept.logsigmaf + 1
}
}
if (floor(draw/batchsize)*batchsize == draw){
SigmaDiff = ifelse(naccept.logsigmaf > 0.44*batchsize, exp(2*delta), exp(-2*delta))
if(draw <= burnin){ sigmaOfProposal.logsigmaf = sigmaOfProposal.logsigmaf * SigmaDiff}
cat("proposal sd of logsigmaf = ", sigmaOfProposal.logsigmaf, ' ')
cat("naccept.logsigmaf = ", naccept.logsigmaf, '\n')
naccept.logsigmaf = 0
}
# update L
if (known != "ALL"){
loglik.curr.L = loglik.curr.sigmaf
drawL = drawL +1
indx1 = (IDfixed + 1):M
if( IDfixed < numr) rightset1 = order(L)[z == 1 & order(L) >= (IDfixed+1) & order(L) <= numr]
if( IDfixed >= numr) rightset1 = c()
rightset2 = order(L)[z == 1 & order(L) > numr] # real right individuals who went uncaptured
if(length(rightset2) >1) rightset2 = sample(rightset2, min(length(rightset2), n.update), replace = F)
rightset3 = c()
if(IDfixed < numl){
leftset3 = c(1:M)[z == 1 & c(1:M)>IDfixed & c(1:M) <= numl]
if(length(leftset3) > 0) for(ii in leftset3) rightset3 = c(rightset3, c(1:M)[L == ii])
# right individuals to whom left captured individuals are linked with
}
rightset = sort(unique(c(rightset1, rightset2, rightset3)))
for (r.guy1 in rightset){
l.swap.out = L[r.guy1]
possible.L = c(1:M)[z == 1 & c(1:M) > IDfixed & sex == sex[l.swap.out]]
dv = sqrt( (s.left[l.swap.out,1] - s.left[,1]) ^ 2 + (s.left[l.swap.out,2] - s.left[,2]) ^ 2 )
wt.possible.L = exp( - (dv ^ 2) / sigmaOfProposal.L ^ 2)[z == 1 & c(1:M) > IDfixed & sex == sex[l.swap.out]]
if (length(possible.L) > 1) l.swap.in = sample( possible.L, 1, replace = F, prob = wt.possible.L)
if (length(possible.L) == 1) next
if (length(possible.L) == 0) next # this case will never happen since l.swap.out is present there at the centre of the circle dv < 5
if (l.swap.in == l.swap.out) next # saves computation time in for loop
jump.prob.L = wt.possible.L[which(possible.L == l.swap.in, arr.ind = T)] / sum(wt.possible.L) # q(state.curr, state.cand)
dv.back = sqrt( (s.left[l.swap.in,1] - s.left[,1]) ^ 2 + (s.left[l.swap.in,2] - s.left[,2]) ^ 2 )
wt.possible.back.L = exp( - (dv.back ^ 2) / sigmaOfProposal.L ^ 2)[z == 1 & c(1:M) > IDfixed & sex == sex[l.swap.in]]
# Note that, sex[l.swap.out] = sex[l.swap.in], and z[l.swap.out] may be != z[l.swap.in]
jump.prob.back.L = wt.possible.back.L[which(possible.L == l.swap.out, arr.ind = T)] / sum(wt.possible.back.L) # q(state.cand, state.curr)
## Which right encounter history is currently associated with left guy s.swap.in?
r.guy2 = c(1:M)[L == l.swap.in] # which(L == l.swap.in, arr.ind = T)
# if (l.swap.in <=IDfixed) browser()
L.cand = L
L.cand[r.guy1] = l.swap.in
L.cand[r.guy2] = l.swap.out
right.star.cand = right[order(L.cand),,]
ncapr.star.cand = ncapr[order(L.cand)]
ncap.cand = ncapl + ncapr.star.cand
n0mat.cand = apply(left + right.star.cand, c(1,2), function(a){sum(a > 0)})
n0vec.cand = rowSums(n0mat.cand) # apply(n0mat.cand, 1, sum)
loglik.cand.L = logLfn.da(ncap.cand, n0mat.cand, n0vec.cand, logitphi, pimat, z, J)
lognum = loglik.cand.L + log(jump.prob.back.L)
logden = loglik.curr.L + log(jump.prob.L)
if (logden == -Inf){
L = L.cand
loglik.curr.L = loglik.cand.L
right.star = right.star.cand
ncapr.star = ncapr.star.cand
ncap = ncap.cand
n0mat = n0mat.cand
n0vec = n0vec.cand
naccept.L = naccept.L + 1
}
if (logden != -Inf){
logR = lognum - logden
if (runif(1,0,1) <= exp(logR)){
L = L.cand
loglik.curr.L = loglik.cand.L
right.star = right.star.cand
ncapr.star = ncapr.star.cand
ncap = ncap.cand
n0mat = n0mat.cand
n0vec = n0vec.cand
naccept.L = naccept.L + 1
}
}
} # end of for (r.guy1 in rightset)
if (floor(draw/batchsize)*batchsize == draw){
SigmaDiff = ifelse(naccept.L > 0.44*batchsize, exp(2*delta), exp(-2*delta))
if(draw <= burnin){ sigmaOfProposal.L= sigmaOfProposal.L * SigmaDiff}
cat(paste("proposal sd of L = ", sep = ""), sigmaOfProposal.L, ' ')
cat(paste("naccept.L = ", sep = ""), naccept.L, '\n')
naccept.L = 0
}
} # end of if (known!='ALL')
# update z
zero.guys = (ncapl + ncapr.star) == 0
ncprob = (1 - expit(logitphi) * (2 - expit(logitphi)) * pimat) ^ J
prob0 = exp(rowSums(log(ncprob)))
fc = prob0*psi / (prob0*psi + 1 - psi)
z[zero.guys] = rbinom(sum(zero.guys), 1, fc[zero.guys])
z[!zero.guys] = 1
# update psi
psi = rbeta(1, 1 + sum(z), 1 + (M - sum(z) ) )
# update sex
drawsex = drawsex +1
D1 = e2dist(s.left, trap.locations)
pimat.m = expit(logitp0) * exp(- D1 * D1 / (2 * (exp(logsigmam) ^ 2))) # M x K
Am1 = rowSums(log(pimat.m ^ n0mat), na.rm = T) # M x 1
Am2 = rowSums(log((1 - expit(logitphi) * (2 - expit(logitphi)) * pimat.m) ^ (J-n0mat)), na.rm = T) # M x 1
Am1[Am1 == -Inf] = -10^200; Am2[Am2 == -Inf] = -10^200; # .Machine$double.xmax
AAm = exp(Am1 + Am2) # M x 1
pimat.f = expit(logitp0) * exp(- D1 * D1 / (2 * (exp(logsigmaf) ^ 2))) # M x K
Af1 = rowSums(log(pimat.f ^ n0mat), na.rm = T) # M x 1
Af2 = rowSums(log((1 - expit(logitphi) * (2 - expit(logitphi)) * pimat.f) ^ (J-n0mat)), na.rm = T) # M x 1
Af1[Af1 == -Inf] = -10^200; Af2[Af2 == -Inf] = -10^200; # .Machine$double.xmax
AAf = exp(Af1 + Af2) # M x 1
AA = theta * AAm + (1-theta) * AAf # M x 1
theta0 = theta * AAm / AA
mis = (z == 1) & missing.sex.guys & AA > 0
sex[mis] = rbinom(sum(mis), 1, theta0[mis])
pimat = pimatfn(logitp0, s.left, trap.locations, sex, logsigmam, logsigmaf)
# update theta
theta = rbeta( 1, 1 + sum(z*sex), 1 + sum(z*(1 - sex)) )
# update the activity centers
drawact = drawact +1
s.left.cand = as.matrix(cbind(rnorm(M , s.left[, 1], sigmaOfProposal.s), rnorm(M , s.left[, 2], sigmaOfProposal.s)))
Scoord = SpatialPoints(s.left.cand*scale, CRS("+proj=utm +zone=43 +datum=WGS84"))
SinPoly = over(Scoord, activity) # dim M x 1
# Sinpoly[i] equals the index among the mask points if the point i is inside the state space mask polygon, if it is not then Sinpoly = NA
for (i in c(1:M)[z == 1 & !is.na(SinPoly[,1])]) {
logsigma_i = ifelse(sex[i] == 1, logsigmam, logsigmaf)
DD.cand = sqrt((s.left.cand[i,1] - trap.locations[,1]) ^ 2 + (s.left.cand[i,2] - trap.locations[,2]) ^ 2) # a vector Kx1
pi.cand = expit(logitp0) * exp(-DD.cand * DD.cand / (2 * (exp(logsigma_i) ^ 2 )))
yyy3 = log(1 + pi.cand^n0mat[i,])
A3 = log(exp(yyy3) - 1)
yyy4 = log(1 + (1 - expit(logitphi)* (2 - expit(logitphi)) * pi.cand)^(J - n0mat[i,]))
A4 = log(exp(yyy4) - 1)
A3[A3 == -Inf] = -10^200
A4[A4 == -Inf] = -10^200
loglik.cand.s = z[i] * (sum(A3+A4))
yyy3 = log(1 + pimat[i,]^n0mat[i,])
A3 = log(exp(yyy3) - 1)
yyy4 = log(1 + (1 - expit(logitphi)* (2 - expit(logitphi)) * pimat[i,])^(J - n0mat[i,]))
A4 = log(exp(yyy4) - 1)
A3[A3 == -Inf] = -10^200
A4[A4 == -Inf] = -10^200
loglik.curr.s = z[i] * (sum(A3+A4))
lognum = loglik.cand.s
logden = loglik.curr.s
if (logden == -Inf){
s.left[i, ] = s.left.cand[i, ]
naccept.s[i] = naccept.s[i] +1
}
if (logden != -Inf){
logR = lognum - logden
if (runif(1,0,1) <= exp(logR)){
s.left[i, ] = s.left.cand[i, ]
naccept.s[i] = naccept.s[i] +1
}
}
if (floor(draw/batchsize)*batchsize == draw){
SigmaDiff = ifelse(naccept.s[i] > 0.44*batchsize, exp(2*delta), exp(-2*delta))
if(draw <= burnin){ sigmaOfProposal.s[i] = sigmaOfProposal.s[i] * SigmaDiff}
naccept.s[i] = 0
}
}
pimat = pimatfn(logitp0, s.left, trap.locations, sex, logsigmam, logsigmaf)
llik = logLfn.da(ncap, n0mat, n0vec, logitphi, pimat, z, J)
cat(c(sum(z), psi, sum(sex*z), theta,
expit(logitphi), expit(logitp0),
exp(logsigmam)*(scale/1000), # output in kilometers
exp(logsigmaf)*(scale/1000) # output in kilometers
), sep = ',', file = paste(folderName, '/markovchain.txt', sep = ""), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.txt', sep = ""), append = TRUE)
cat(c(sex), sep = ',', file = paste(folderName, '/markovchain.sex.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.sex.txt', sep = ''), append = TRUE)
cat(c(z), sep = ',', file = paste(folderName, '/markovchain.z.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.z.txt', sep = ''), append = TRUE)
cat(c(s.left[,1]), sep = ',', file = paste(folderName, '/markovchain.sx.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.sx.txt', sep = ''), append = TRUE)
cat(c(s.left[,2]), sep = ',', file = paste(folderName, '/markovchain.sy.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.sy.txt', sep = ''), append = TRUE)
cat(llik, sep = ',', file = paste(folderName, '/markovchain.loglikelihood.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.loglikelihood.txt', sep = ''), append = TRUE)
if (known != 'ALL')
{
cat(c(L), sep = ',', file = paste(folderName, '/markovchainL.txt', sep = ""), append = TRUE)
cat('\n', file = paste(folderName, '/markovchainL.txt', sep = ""), append = TRUE)
}
if (draw == ndraws){
cat('Completed ', ndraws, ' draws of MCMC algorithm', '\n')
numOfDraws = 0
if (numOfDraws == 0) continueMCMC = FALSE
}
} # end of MCMC loop
cat('MCMC sampling is completed!', '\n', '\n')
end.time = Sys.time()
(time.taken = end.time - start.time); print(time.taken)
#==================================================================================
post = read.csv(paste(folderName, '/markovchain.txt', sep = ""), sep = ",", header = T)
post = post[(burnin + 1):ndraws,]
geweke_diag = geweke.diag(post, frac1 = 0.2, frac2 = 0.4)
suppressWarnings(write.table(round(unlist(geweke_diag), 3), sep = ' ',
col.names = F, append = F,
file = paste(folderName, '/geweke.diag.txt', sep = "")))
N.chain = post[, 'N']
Nvalues = 0:M
probN = rep(0, (M + 1))
for (i in 1:(M + 1)){ probN[i] = length(N.chain[N.chain == (i - 1)])/(ndraws - burnin)}
post.mode.N = Nvalues[probN == max(probN)][1]
Bayes.Nmean = mean(N.chain, na.rm = T)
Bayes.Nvar = mean(N.chain ^ 2, na.rm = T) - (mean(N.chain, na.rm = T)) ^ 2
ind = cumsum(probN) >= 0.025 & cumsum(probN) <= 0.975
Bayes.Nlower = quantile(N.chain, 0.025, na.rm = T, type = 1)
Bayes.Nupper = quantile(N.chain, 0.975, na.rm = T, type = 1)
fname5 = paste(folderName, "/mcmc plots of N.jpeg", sep = "")
ylimits = c(0, max(probN))
jpeg(fname5, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
plot(Nvalues[1:M], probN[1:M], type = 'h', ylim = ylimits, xlab = 'N', ylab = 'probability' )
dev.off()
out = as.matrix(c(Bayes.Nmean, sqrt(Bayes.Nvar), Bayes.Nlower, post.mode.N, Bayes.Nupper))
dimnames(out) = list(c('Mean.N', 'SE.N', '2.5%', 'post.mode', '97.5%'),c('HB Estimates of N'))
prob.quantiles = c(0.025, 0.5, 0.975)
post.stats = cbind(apply(post,2,mean, na.rm = T), apply(post,2,sd, na.rm = T), t(apply(post, 2, quantile, probs = prob.quantiles, na.rm = T,type = 1)) )
prob.names = paste(as.character(100*prob.quantiles), '%', sep = '')
dimnames(post.stats)[2] = list(c('Mean.Chain', 'SD.Chain', prob.names))
mcse.mean.vec = mcse.sd.vec = rep(0, dim(post)[2])
mcse.lq.vec = mcse.uq.vec = mcse.med.vec = rep(0, dim(post)[2])
for (i in 1:dim(post)[2]){
postvec = post[,i]
mcse.mean.vec[i] = mcse(postvec[!is.na(postvec)], mean)$se
mcse.sd.vec[i] = mcse(postvec[!is.na(postvec)], sd)$se
mcse.med.vec[i] = mcse(postvec[!is.na(postvec)], medQuantile)$se
mcse.lq.vec[i] = mcse(postvec[!is.na(postvec)], lowerQuantile)$se
mcse.uq.vec[i] = mcse(postvec[!is.na(postvec)], upperQuantile)$se
}
mcse.mean.vec = unlist(mcse.mean.vec)
mcse.sd.vec = unlist(mcse.sd.vec)
mcse.med.vec = unlist(mcse.med.vec)
mcse.lq.vec = unlist(mcse.lq.vec)
mcse.uq.vec = unlist(mcse.uq.vec)
mcse.mat = cbind(mcse.mean.vec, mcse.sd.vec, mcse.lq.vec, mcse.med.vec, mcse.uq.vec)
HB_estimates = MCSE_estimates = c('', '', '', '', '')
dim.names = c('Mean.Chain', 'SD.Chain', prob.names)
dimnames(mcse.mat) = dimnames(post.stats)
information = as.data.frame(c(M, numl , numr, IDfixed, known, K, J, ndraws, burnin))
dimnames(information) = list(c('M', 'numl', 'numr', 'IDfixed', 'known', 'K', 'J', 'ndraws', 'burnin'), c('info'))
out.final = rbind(t(out), HB_estimates, dim.names, post.stats, MCSE_estimates, dim.names, mcse.mat)
write.csv(rbind(cbind(out.final, matrix('',nrow(out.final), nrow(information) - ncol(out.final))),
dimnames(information)[[1]], t(information)),
file = paste(folderName, '/EstimatesOfDerivedParam.csv', sep = ""), quote = F,row.names = T)
#============================================================================================================
post = read.csv(paste(folderName, '/markovchain.txt', sep = ""), sep = ",", header = T)
post.mcmc = as.mcmc(post)
fname6 = paste(folderName, "/traceplots_N.jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
traceplot(post.mcmc[,'N'], xlab = "Iterations", ylab = "N", main = "Traceplot of N",
cex.main = 2, cex.lab = 2, cex.axis = 2)
dev.off()
fname6 = paste(folderName, "/traceplots_psi.jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
traceplot(post.mcmc[,'psi'], xlab = "Iterations", ylab = "psi", main = "Traceplot of psi",
cex.main = 2, cex.lab = 2, cex.axis = 2)
dev.off()
fname6 = paste(folderName, "/traceplots_N.Male.jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
traceplot(post.mcmc[,'N.Male'], xlab = "Iterations", ylab = "N.Males", main = "Traceplot of N.Males",
cex.main = 2, cex.lab = 2, cex.axis = 2)
dev.off()
fname6 = paste(folderName, "/traceplots_theta.jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
traceplot(post.mcmc[,'theta'], xlab = "Iterations", ylab = "theta", main = "Traceplot of theta",
cex.main = 2, cex.lab = 2, cex.axis = 2)
dev.off()
fname6 = paste(folderName, "/traceplots_phi.jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
traceplot(post.mcmc[,'phi'], xlab = "Iterations", ylab = "phi", main = "Traceplot of phi",
cex.main = 2, cex.lab = 2, cex.axis = 2)
dev.off()
fname6 = paste(folderName, "/traceplots_p0.jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
traceplot(post.mcmc[,'p0'], xlab = "Iterations", ylab = "p0", main = "Traceplot of p0",
cex.main = 2, cex.lab = 2, cex.axis = 2)
dev.off()
fname6 = paste(folderName, "/traceplots_sigmam.jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
traceplot(post.mcmc[,'sigmam'], xlab = "Iterations", ylab = "sigma.male", main = "Traceplot of sigma.male",
cex.main = 2, cex.lab = 2, cex.axis = 2)
dev.off()
fname6 = paste(folderName, "/traceplots_sigmaf.jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
traceplot(post.mcmc[,'sigmaf'], xlab = "Iterations", ylab = "sigma.female", main = "Traceplot of sigma.female",
cex.main = 2, cex.lab = 2, cex.axis = 2)
dev.off()
#======================================================
post = read.csv(paste(folderName, '/markovchain.txt', sep = ""), sep = ",", header = T)
post = post[(burnin + 1):ndraws,]
seq1 = seq(1, ndraws - burnin, by = 10)
N1 = post[seq1,'N']; N1 = N1[!is.na(N1)]
psi1 = post[seq1,'psi']; psi1 = psi1[!is.na(psi1)]
N.Male1 = post[seq1,'N.Male']; N.Male1 = N.Male1[!is.na(N.Male1)]
theta1 = post[seq1,'theta']; theta1 = theta1[!is.na(theta1)]
phi1 = post[seq1,'phi']; phi1 = phi1[!is.na(phi1)]
p01 = post[seq1,'p0']; p01 = p01[!is.na(p01)]
sigmam1 = post[seq1,'sigmam']; sigmam1 = sigmam1[!is.na(sigmam1)]
sigmaf1 = post[seq1,'sigmaf']; sigmaf1 = sigmaf1[!is.na(sigmaf1)]
df = data.frame(N1, psi1, N.Male1, theta1, phi1, p01, sigmam1, sigmaf1)
fname6 = paste(folderName, "/Scatter Plots_", IDfixed,known, ".jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
pairs(~N1 + psi1 + N.Male1 + theta1 + phi1 + p01 + sigmam1 + sigmaf1, data = df, main = "")
dev.off()
#========================================
save.image(paste(folderName, '/savingRimage.RData', sep = ""))
} # end of function PISCR.fn
| /partialID.da.R | no_license | soumenstat89/bilateral | R | false | false | 28,415 | r |
PISCR.fn = function(
capture_1, capture_2, gender, activity, traploc,
ndraws = 20, burnin = 10, M = 400, scale = 10^4,
nloopL = 50, n.update = 20, batchsize = 1000, mindelta = 0.01,
sigmaOfProposal.logitphi = 0.05, sigmaOfProposal.logitp0 = 0.08,
sigmaOfProposal.logsigmam = 0.02, sigmaOfProposal.logsigmaf = 0.02,
sigmaOfProposal.L = 4, sigmaOfProposal.s = rep(3, 400), R = 5)
{
library(mvtnorm)
library(MCMCpack)
library(abind)
library(ggplot2)
library(raster)
library(maptools)
library(coda)
library(spatstat)
library(gtools)
options(digits = 8)
source("utility.functions.R")
################################################
area <- activity@polygons[[1]]@Polygons[[1]]@area/1000000
activity1<-as.owin(activity)
trap = traploc[,'LOC_ID']
traploc.xx = traploc[,'X_COORD']
traploc.yy = traploc[,'Y_COORD']
K = dim(traploc)[1]
J = dim(traploc)[2] - 3
dimnames(traploc)[[2]][4:dim(traploc)[2]] = c(1:J)
# convert units of locations (from meters to km) and adjust locations to have x- and y- minima at zero
traploc.x = traploc.xx/scale
traploc.y = traploc.yy/scale
trap.locations=cbind(traploc.x, traploc.y)
left.obs = makedata3d(capture_1,traploc)
numl = dim(left.obs)[1]
left = abind(left.obs, array(0, dim = c( M - numl, K, J)), along = 1)
right.obs = makedata3d(capture_2,traploc)
numr = dim(right.obs)[1]
right = abind(right.obs, array(0, dim = c( M - numr, K, J)), along = 1)
active_trapso=as.matrix(traploc[,4:dim(traploc)[2]])
Jvec = apply(active_trapso, 1, sum)
msk = active_trapso
msk3d = array(NA, c(M, K, J))
for (i in 1:M) msk3d[i, 1:K, 1:J] = msk[1:K, 1:J]
left = left * msk3d
right = right * msk3d
#==========================================================
# Initializing the parameters and processing the variables
logitphi = logit(0.8); logitp0 = logit(0.1); logsigmam = log(0.2); logsigmaf = log(0.15); psi = 0.5; theta = 0.5
## HANDLING THE SEX INFORMATION ##
sexl.obs = gender[1:numl, 2] # length numl
sexr.obs = gender[1:numr, 4] # length numr
sexl = sexr = rep(NA, M)
sexl[c(1:length(sexl.obs))[sexl.obs == 'Male']] = 1
sexl[c(1:length(sexl.obs))[sexl.obs == 'Female']] = 0
sexr[c(1:length(sexr.obs))[sexr.obs == 'Male']] = 1
sexr[c(1:length(sexr.obs))[sexr.obs == 'Female']] = 0
l.guys = levels(gender[,1])
IDfixed = suppressWarnings(sum(!is.na(as.numeric(l.guys))))
if(IDfixed == 0) known = 'none'
if(IDfixed != numl | IDfixed != numr) known = 'some'
if(IDfixed == numl & IDfixed == numr) known = 'ALL'
# Also re-label data sets if needed so that the left side always has more individuals
if (known!="ALL" & numl < numr){
a = left; left = right; right = a
b = numl; numl = numr; numr = b
cc = sexl; sexl = sexr; sexr = cc
}
##################################################
numlm = sum(sexl == 1, na.rm = T); numlf = sum(sexl == 0, na.rm = T)
numrm = sum(sexr == 1, na.rm = T); numrf = sum(sexr == 0, na.rm = T)
sexl1 = c(); sexr1 = c()
if(numlm < numrm) sexl1 = rep(1, numrm - numlm)
if(numrm < numlm) sexr1 = rep(1, numlm - numrm)
if(numlf < numrf) sexl1 = c(sexl1, rep(0, numrf - numlf))
if(numrf < numlf) sexr1 = c(sexr1, rep(0, numlf - numrf))
missing.sex.guys = is.na(sexl)
nnn = max(numlm, numrm) + max(numlf, numrf)
if(numl < nnn){ sexl[(numl+1) : nnn] = sample(sexl1, nnn - numl, replace = F)}
if(numr < nnn){ sexr[(numr+1) : nnn] = sample(sexr1, nnn - numr, replace = F)}
sexl[(nnn+1) : M] = sexr[(nnn+1) : M] = rbinom(M - nnn, 1, theta)
if(sum(is.na(sexl)) > 0) {sexl[is.na(sexl)] = sexr[is.na(sexr)] = rbinom(sum(is.na(sexl)), 1, theta)}
# Latent variable vector for gender
sex = sexl # M x 1
#######################################################
if (known != 'ALL'){ L = Lvec(left, right, numl, numr, sexl, sexr, trap.locations, IDfixed, nloopL)}
s.left <- cbind((runifpoint(n = M , win = activity1))[[3]]/scale,
(runifpoint(n = M , win = activity1))[[4]]/scale)
# =============================
ncapl = rowSums(left)
ncapr = rowSums(right)
if (known != 'ALL') {ncapr.star = ncapr[order(L)]; right.star = right[order(L),,]}
if (known == 'ALL') {ncapr.star = ncapr; right.star = right}
ncap = ncapl + ncapr.star
n0mat = apply(left + right.star, c(1,2), function(a){sum(a > 0)})
n0vec = rowSums(n0mat)
zero.guys = (ncapl + ncapr.star) == 0
numcap = sum((ncapl + ncapr.star) > 0)
z = c(rep(1, numcap), rbinom(M - numcap, 1, psi))
pimat = pimatfn(logitp0, s.left, trap.locations, sex, logsigmam, logsigmaf)
llik = logLfn.da(ncap, n0mat, n0vec, logitphi, pimat, z, J)
# .... assign values for adaptive Metropolis - Hastings sampling
batch = 0
delta = mindelta
continueMCMC = TRUE
draw = drawphi = drawp0 = drawsigmam = drawsigmaf = drawL = drawsex = drawact = 0
naccept.logitphi = naccept.logitp0 = naccept.logsigmam = naccept.logsigmaf = naccept.L = 0
naccept.s = rep(0,M)
ts = format(Sys.time(), "%d%m%y_%H%M%S")
folderName = paste("partiaID.da_", ts, sep = "")
dir.create(path = folderName)
start.time = Sys.time()
cat('Begin MCMC sampling:', '\n', '\n')
cat(c('N', 'psi', 'N.Male', 'theta', 'phi', 'p0', 'sigmam', 'sigmaf'), sep = ',', file = paste(folderName, '/markovchain.txt', sep = ""), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.txt', sep = ""), append = TRUE)
cat(c(paste('sex', 1:M, sep = '')), sep = ',', file = paste(folderName, '/markovchain.sex.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.sex.txt', sep = ''), append = TRUE)
cat(c(paste('z', 1:M, sep = '')), sep = ',', file = paste(folderName, '/markovchain.z.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.z.txt', sep = ''), append = TRUE)
cat(c(paste('sx', 1:M, sep = '')), sep = ',', file = paste(folderName, '/markovchain.sx.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.sx.txt', sep = ''), append = TRUE)
cat(c(paste('sy', 1:M, sep = '')), sep = ',', file = paste(folderName, '/markovchain.sy.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.sy.txt', sep = ''), append = TRUE)
cat(c(paste('loglik', 1:M, sep = '')), sep = ',', file = paste(folderName, '/markovchain.loglikelihood.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.loglikelihood.txt', sep = ''), append = TRUE)
if (known != 'ALL')
{
cat(c(paste("L", 1:M, sep = '')), sep = ',', file = paste(folderName, '/markovchainL.txt', sep = ""), append = TRUE)
cat('\n', file = paste(folderName, '/markovchainL.txt', sep = ""), append = TRUE)
}
while (continueMCMC) {
draw = draw + 1
drawinterval = 500
if (draw == round(draw/drawinterval)*drawinterval) cat('..... drawing sample #', draw, '\n')
# update the increment/decrement for adaptive Metropolis - Hastings samplers
if (floor(draw/batchsize)*batchsize == draw){
batch = batch + 1
if (1/sqrt(batch) < mindelta) delta = 1/sqrt(batch)
}
# update phi
drawphi = drawphi +1
loglik.curr.phi = llik
logitphi.cand = rnorm(1, logitphi, sigmaOfProposal.logitphi)
loglik.cand.phi = logLfn.da(ncap, n0mat, n0vec, logitphi.cand, pimat, z, J)
lognum = loglik.cand.phi + log(exp(logitphi.cand)/((1+exp(logitphi.cand))^2))
logden = loglik.curr.phi + log(exp(logitphi)/((1+exp(logitphi))^2))
if (logden == -Inf){
logitphi = logitphi.cand
loglik.curr.phi = loglik.cand.phi
naccept.logitphi = naccept.logitphi + 1
}
if (logden != -Inf){
logR = lognum - logden
if (runif(1,0,1) <= exp(logR)){
logitphi = logitphi.cand
loglik.curr.phi = loglik.cand.phi
naccept.logitphi = naccept.logitphi + 1
}
}
if (floor(draw/batchsize)*batchsize == draw){
SigmaDiff = ifelse(naccept.logitphi > 0.44*batchsize, exp(2*delta), exp(-2*delta))
if(draw <= burnin){ sigmaOfProposal.logitphi = sigmaOfProposal.logitphi * SigmaDiff}
cat("proposal sd of logitphi = ", sigmaOfProposal.logitphi, ' ')
cat("naccept.logitphi = ", naccept.logitphi, '\n')
naccept.logitphi = 0
}
# update p0
drawp0 = drawp0 +1
loglik.curr.p0 = loglik.curr.phi
logitp0.cand = rnorm(1, logitp0, sigmaOfProposal.logitp0)
pimat.cand = pimatfn(logitp0.cand, s.left, trap.locations, sex, logsigmam, logsigmaf)
loglik.cand.p0 = logLfn.da(ncapl + ncapr.star, n0mat, n0vec, logitphi, pimat.cand, z, J)
lognum = loglik.cand.p0 + log(exp(logitp0.cand)/((1+exp(logitp0.cand))^2))
logden = loglik.curr.p0 + log(exp(logitp0)/((1+exp(logitp0))^2))
if (logden == -Inf){
logitp0 = logitp0.cand
pimat = pimat.cand
loglik.curr.p0 = loglik.cand.p0
naccept.logitp0 = naccept.logitp0 + 1
}
if (logden != -Inf){
logR = lognum - logden
if (runif(1,0,1) <= exp(logR)){
logitp0 = logitp0.cand
pimat = pimat.cand
loglik.curr.p0 = loglik.cand.p0
naccept.logitp0 = naccept.logitp0 + 1
}
}
if (floor(draw/batchsize)*batchsize == draw){
SigmaDiff = ifelse(naccept.logitp0 > 0.44*batchsize, exp(2*delta), exp(-2*delta))
if(draw <= burnin){ sigmaOfProposal.logitp0 = sigmaOfProposal.logitp0 * SigmaDiff}
cat("proposal sd of logitp0 = ", sigmaOfProposal.logitp0, ' ')
cat("naccept.logitp0 = ", naccept.logitp0, '\n')
naccept.logitp0 = 0
}
# update sigmam
drawsigmam = drawsigmam +1
loglik.curr.sigmam = loglik.curr.p0
logsigmam.cand = rnorm(1, logsigmam, sigmaOfProposal.logsigmam)
pimat.cand = pimatfn(logitp0, s.left, trap.locations, sex, logsigmam.cand, logsigmaf)
loglik.cand.sigmam = logLfn.da(ncap, n0mat, n0vec, logitphi, pimat.cand, z, J)
lognum = loglik.cand.sigmam + log(exp(logsigmam.cand)/R)
logden = loglik.curr.sigmam + log(exp(logsigmam)/R)
if (logden == -Inf){
logsigmam = logsigmam.cand
pimat = pimat.cand
loglik.curr.sigmam = loglik.cand.sigmam
naccept.logsigmam = naccept.logsigmam + 1
}
if (logden != -Inf){
logR = lognum - logden
if (runif(1,0,1) <= exp(logR)){
logsigmam = logsigmam.cand
pimat = pimat.cand
loglik.curr.sigmam = loglik.cand.sigmam
naccept.logsigmam = naccept.logsigmam + 1
}
}
if (floor(draw/batchsize)*batchsize == draw){
SigmaDiff = ifelse(naccept.logsigmam > 0.44*batchsize, exp(2*delta), exp(-2*delta))
if(draw <= burnin){ sigmaOfProposal.logsigmam = sigmaOfProposal.logsigmam * SigmaDiff}
cat("proposal sd of logsigmam = ", sigmaOfProposal.logsigmam, ' ')
cat("naccept.logsigmam = ", naccept.logsigmam, '\n')
naccept.logsigmam = 0
}
# update sigmaf
drawsigmaf = drawsigmaf +1
loglik.curr.sigmaf = loglik.curr.sigmam
logsigmaf.cand = rnorm(1, logsigmaf, sigmaOfProposal.logsigmaf)
pimat.cand = pimatfn(logitp0, s.left, trap.locations, sex, logsigmam, logsigmaf.cand)
loglik.cand.sigmaf = logLfn.da(ncap, n0mat, n0vec, logitphi, pimat.cand, z, J)
lognum = loglik.cand.sigmaf + log(exp(logsigmaf.cand)/R)
logden = loglik.curr.sigmaf + log(exp(logsigmaf)/R)
if (logden == -Inf){
logsigmaf = logsigmaf.cand
pimat = pimat.cand
loglik.curr.sigmaf = loglik.cand.sigmaf
naccept.logsigmaf = naccept.logsigmaf + 1
}
if (logden != -Inf){
logR = lognum - logden
if (runif(1,0,1) <= exp(logR)){
logsigmaf = logsigmaf.cand
pimat = pimat.cand
loglik.curr.sigmaf = loglik.cand.sigmaf
naccept.logsigmaf = naccept.logsigmaf + 1
}
}
if (floor(draw/batchsize)*batchsize == draw){
SigmaDiff = ifelse(naccept.logsigmaf > 0.44*batchsize, exp(2*delta), exp(-2*delta))
if(draw <= burnin){ sigmaOfProposal.logsigmaf = sigmaOfProposal.logsigmaf * SigmaDiff}
cat("proposal sd of logsigmaf = ", sigmaOfProposal.logsigmaf, ' ')
cat("naccept.logsigmaf = ", naccept.logsigmaf, '\n')
naccept.logsigmaf = 0
}
# update L
if (known != "ALL"){
loglik.curr.L = loglik.curr.sigmaf
drawL = drawL +1
indx1 = (IDfixed + 1):M
if( IDfixed < numr) rightset1 = order(L)[z == 1 & order(L) >= (IDfixed+1) & order(L) <= numr]
if( IDfixed >= numr) rightset1 = c()
rightset2 = order(L)[z == 1 & order(L) > numr] # real right individuals who went uncaptured
if(length(rightset2) >1) rightset2 = sample(rightset2, min(length(rightset2), n.update), replace = F)
rightset3 = c()
if(IDfixed < numl){
leftset3 = c(1:M)[z == 1 & c(1:M)>IDfixed & c(1:M) <= numl]
if(length(leftset3) > 0) for(ii in leftset3) rightset3 = c(rightset3, c(1:M)[L == ii])
# right individuals to whom left captured individuals are linked with
}
rightset = sort(unique(c(rightset1, rightset2, rightset3)))
for (r.guy1 in rightset){
l.swap.out = L[r.guy1]
possible.L = c(1:M)[z == 1 & c(1:M) > IDfixed & sex == sex[l.swap.out]]
dv = sqrt( (s.left[l.swap.out,1] - s.left[,1]) ^ 2 + (s.left[l.swap.out,2] - s.left[,2]) ^ 2 )
wt.possible.L = exp( - (dv ^ 2) / sigmaOfProposal.L ^ 2)[z == 1 & c(1:M) > IDfixed & sex == sex[l.swap.out]]
if (length(possible.L) > 1) l.swap.in = sample( possible.L, 1, replace = F, prob = wt.possible.L)
if (length(possible.L) == 1) next
if (length(possible.L) == 0) next # this case will never happen since l.swap.out is present there at the centre of the circle dv < 5
if (l.swap.in == l.swap.out) next # saves computation time in for loop
jump.prob.L = wt.possible.L[which(possible.L == l.swap.in, arr.ind = T)] / sum(wt.possible.L) # q(state.curr, state.cand)
dv.back = sqrt( (s.left[l.swap.in,1] - s.left[,1]) ^ 2 + (s.left[l.swap.in,2] - s.left[,2]) ^ 2 )
wt.possible.back.L = exp( - (dv.back ^ 2) / sigmaOfProposal.L ^ 2)[z == 1 & c(1:M) > IDfixed & sex == sex[l.swap.in]]
# Note that, sex[l.swap.out] = sex[l.swap.in], and z[l.swap.out] may be != z[l.swap.in]
jump.prob.back.L = wt.possible.back.L[which(possible.L == l.swap.out, arr.ind = T)] / sum(wt.possible.back.L) # q(state.cand, state.curr)
## Which right encounter history is currently associated with left guy s.swap.in?
r.guy2 = c(1:M)[L == l.swap.in] # which(L == l.swap.in, arr.ind = T)
# if (l.swap.in <=IDfixed) browser()
L.cand = L
L.cand[r.guy1] = l.swap.in
L.cand[r.guy2] = l.swap.out
right.star.cand = right[order(L.cand),,]
ncapr.star.cand = ncapr[order(L.cand)]
ncap.cand = ncapl + ncapr.star.cand
n0mat.cand = apply(left + right.star.cand, c(1,2), function(a){sum(a > 0)})
n0vec.cand = rowSums(n0mat.cand) # apply(n0mat.cand, 1, sum)
loglik.cand.L = logLfn.da(ncap.cand, n0mat.cand, n0vec.cand, logitphi, pimat, z, J)
lognum = loglik.cand.L + log(jump.prob.back.L)
logden = loglik.curr.L + log(jump.prob.L)
if (logden == -Inf){
L = L.cand
loglik.curr.L = loglik.cand.L
right.star = right.star.cand
ncapr.star = ncapr.star.cand
ncap = ncap.cand
n0mat = n0mat.cand
n0vec = n0vec.cand
naccept.L = naccept.L + 1
}
if (logden != -Inf){
logR = lognum - logden
if (runif(1,0,1) <= exp(logR)){
L = L.cand
loglik.curr.L = loglik.cand.L
right.star = right.star.cand
ncapr.star = ncapr.star.cand
ncap = ncap.cand
n0mat = n0mat.cand
n0vec = n0vec.cand
naccept.L = naccept.L + 1
}
}
} # end of for (r.guy1 in rightset)
if (floor(draw/batchsize)*batchsize == draw){
SigmaDiff = ifelse(naccept.L > 0.44*batchsize, exp(2*delta), exp(-2*delta))
if(draw <= burnin){ sigmaOfProposal.L= sigmaOfProposal.L * SigmaDiff}
cat(paste("proposal sd of L = ", sep = ""), sigmaOfProposal.L, ' ')
cat(paste("naccept.L = ", sep = ""), naccept.L, '\n')
naccept.L = 0
}
} # end of if (known!='ALL')
# update z
zero.guys = (ncapl + ncapr.star) == 0
ncprob = (1 - expit(logitphi) * (2 - expit(logitphi)) * pimat) ^ J
prob0 = exp(rowSums(log(ncprob)))
fc = prob0*psi / (prob0*psi + 1 - psi)
z[zero.guys] = rbinom(sum(zero.guys), 1, fc[zero.guys])
z[!zero.guys] = 1
# update psi
psi = rbeta(1, 1 + sum(z), 1 + (M - sum(z) ) )
# update sex
drawsex = drawsex +1
D1 = e2dist(s.left, trap.locations)
pimat.m = expit(logitp0) * exp(- D1 * D1 / (2 * (exp(logsigmam) ^ 2))) # M x K
Am1 = rowSums(log(pimat.m ^ n0mat), na.rm = T) # M x 1
Am2 = rowSums(log((1 - expit(logitphi) * (2 - expit(logitphi)) * pimat.m) ^ (J-n0mat)), na.rm = T) # M x 1
Am1[Am1 == -Inf] = -10^200; Am2[Am2 == -Inf] = -10^200; # .Machine$double.xmax
AAm = exp(Am1 + Am2) # M x 1
pimat.f = expit(logitp0) * exp(- D1 * D1 / (2 * (exp(logsigmaf) ^ 2))) # M x K
Af1 = rowSums(log(pimat.f ^ n0mat), na.rm = T) # M x 1
Af2 = rowSums(log((1 - expit(logitphi) * (2 - expit(logitphi)) * pimat.f) ^ (J-n0mat)), na.rm = T) # M x 1
Af1[Af1 == -Inf] = -10^200; Af2[Af2 == -Inf] = -10^200; # .Machine$double.xmax
AAf = exp(Af1 + Af2) # M x 1
AA = theta * AAm + (1-theta) * AAf # M x 1
theta0 = theta * AAm / AA
mis = (z == 1) & missing.sex.guys & AA > 0
sex[mis] = rbinom(sum(mis), 1, theta0[mis])
pimat = pimatfn(logitp0, s.left, trap.locations, sex, logsigmam, logsigmaf)
# update theta
theta = rbeta( 1, 1 + sum(z*sex), 1 + sum(z*(1 - sex)) )
# update the activity centers
drawact = drawact +1
s.left.cand = as.matrix(cbind(rnorm(M , s.left[, 1], sigmaOfProposal.s), rnorm(M , s.left[, 2], sigmaOfProposal.s)))
Scoord = SpatialPoints(s.left.cand*scale, CRS("+proj=utm +zone=43 +datum=WGS84"))
SinPoly = over(Scoord, activity) # dim M x 1
# Sinpoly[i] equals the index among the mask points if the point i is inside the state space mask polygon, if it is not then Sinpoly = NA
for (i in c(1:M)[z == 1 & !is.na(SinPoly[,1])]) {
logsigma_i = ifelse(sex[i] == 1, logsigmam, logsigmaf)
DD.cand = sqrt((s.left.cand[i,1] - trap.locations[,1]) ^ 2 + (s.left.cand[i,2] - trap.locations[,2]) ^ 2) # a vector Kx1
pi.cand = expit(logitp0) * exp(-DD.cand * DD.cand / (2 * (exp(logsigma_i) ^ 2 )))
yyy3 = log(1 + pi.cand^n0mat[i,])
A3 = log(exp(yyy3) - 1)
yyy4 = log(1 + (1 - expit(logitphi)* (2 - expit(logitphi)) * pi.cand)^(J - n0mat[i,]))
A4 = log(exp(yyy4) - 1)
A3[A3 == -Inf] = -10^200
A4[A4 == -Inf] = -10^200
loglik.cand.s = z[i] * (sum(A3+A4))
yyy3 = log(1 + pimat[i,]^n0mat[i,])
A3 = log(exp(yyy3) - 1)
yyy4 = log(1 + (1 - expit(logitphi)* (2 - expit(logitphi)) * pimat[i,])^(J - n0mat[i,]))
A4 = log(exp(yyy4) - 1)
A3[A3 == -Inf] = -10^200
A4[A4 == -Inf] = -10^200
loglik.curr.s = z[i] * (sum(A3+A4))
lognum = loglik.cand.s
logden = loglik.curr.s
if (logden == -Inf){
s.left[i, ] = s.left.cand[i, ]
naccept.s[i] = naccept.s[i] +1
}
if (logden != -Inf){
logR = lognum - logden
if (runif(1,0,1) <= exp(logR)){
s.left[i, ] = s.left.cand[i, ]
naccept.s[i] = naccept.s[i] +1
}
}
if (floor(draw/batchsize)*batchsize == draw){
SigmaDiff = ifelse(naccept.s[i] > 0.44*batchsize, exp(2*delta), exp(-2*delta))
if(draw <= burnin){ sigmaOfProposal.s[i] = sigmaOfProposal.s[i] * SigmaDiff}
naccept.s[i] = 0
}
}
pimat = pimatfn(logitp0, s.left, trap.locations, sex, logsigmam, logsigmaf)
llik = logLfn.da(ncap, n0mat, n0vec, logitphi, pimat, z, J)
cat(c(sum(z), psi, sum(sex*z), theta,
expit(logitphi), expit(logitp0),
exp(logsigmam)*(scale/1000), # output in kilometers
exp(logsigmaf)*(scale/1000) # output in kilometers
), sep = ',', file = paste(folderName, '/markovchain.txt', sep = ""), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.txt', sep = ""), append = TRUE)
cat(c(sex), sep = ',', file = paste(folderName, '/markovchain.sex.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.sex.txt', sep = ''), append = TRUE)
cat(c(z), sep = ',', file = paste(folderName, '/markovchain.z.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.z.txt', sep = ''), append = TRUE)
cat(c(s.left[,1]), sep = ',', file = paste(folderName, '/markovchain.sx.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.sx.txt', sep = ''), append = TRUE)
cat(c(s.left[,2]), sep = ',', file = paste(folderName, '/markovchain.sy.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.sy.txt', sep = ''), append = TRUE)
cat(llik, sep = ',', file = paste(folderName, '/markovchain.loglikelihood.txt', sep = ''), append = TRUE)
cat('\n', file = paste(folderName, '/markovchain.loglikelihood.txt', sep = ''), append = TRUE)
if (known != 'ALL')
{
cat(c(L), sep = ',', file = paste(folderName, '/markovchainL.txt', sep = ""), append = TRUE)
cat('\n', file = paste(folderName, '/markovchainL.txt', sep = ""), append = TRUE)
}
if (draw == ndraws){
cat('Completed ', ndraws, ' draws of MCMC algorithm', '\n')
numOfDraws = 0
if (numOfDraws == 0) continueMCMC = FALSE
}
} # end of MCMC loop
cat('MCMC sampling is completed!', '\n', '\n')
end.time = Sys.time()
(time.taken = end.time - start.time); print(time.taken)
#==================================================================================
post = read.csv(paste(folderName, '/markovchain.txt', sep = ""), sep = ",", header = T)
post = post[(burnin + 1):ndraws,]
geweke_diag = geweke.diag(post, frac1 = 0.2, frac2 = 0.4)
suppressWarnings(write.table(round(unlist(geweke_diag), 3), sep = ' ',
col.names = F, append = F,
file = paste(folderName, '/geweke.diag.txt', sep = "")))
N.chain = post[, 'N']
Nvalues = 0:M
probN = rep(0, (M + 1))
for (i in 1:(M + 1)){ probN[i] = length(N.chain[N.chain == (i - 1)])/(ndraws - burnin)}
post.mode.N = Nvalues[probN == max(probN)][1]
Bayes.Nmean = mean(N.chain, na.rm = T)
Bayes.Nvar = mean(N.chain ^ 2, na.rm = T) - (mean(N.chain, na.rm = T)) ^ 2
ind = cumsum(probN) >= 0.025 & cumsum(probN) <= 0.975
Bayes.Nlower = quantile(N.chain, 0.025, na.rm = T, type = 1)
Bayes.Nupper = quantile(N.chain, 0.975, na.rm = T, type = 1)
fname5 = paste(folderName, "/mcmc plots of N.jpeg", sep = "")
ylimits = c(0, max(probN))
jpeg(fname5, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
plot(Nvalues[1:M], probN[1:M], type = 'h', ylim = ylimits, xlab = 'N', ylab = 'probability' )
dev.off()
out = as.matrix(c(Bayes.Nmean, sqrt(Bayes.Nvar), Bayes.Nlower, post.mode.N, Bayes.Nupper))
dimnames(out) = list(c('Mean.N', 'SE.N', '2.5%', 'post.mode', '97.5%'),c('HB Estimates of N'))
prob.quantiles = c(0.025, 0.5, 0.975)
post.stats = cbind(apply(post,2,mean, na.rm = T), apply(post,2,sd, na.rm = T), t(apply(post, 2, quantile, probs = prob.quantiles, na.rm = T,type = 1)) )
prob.names = paste(as.character(100*prob.quantiles), '%', sep = '')
dimnames(post.stats)[2] = list(c('Mean.Chain', 'SD.Chain', prob.names))
mcse.mean.vec = mcse.sd.vec = rep(0, dim(post)[2])
mcse.lq.vec = mcse.uq.vec = mcse.med.vec = rep(0, dim(post)[2])
for (i in 1:dim(post)[2]){
postvec = post[,i]
mcse.mean.vec[i] = mcse(postvec[!is.na(postvec)], mean)$se
mcse.sd.vec[i] = mcse(postvec[!is.na(postvec)], sd)$se
mcse.med.vec[i] = mcse(postvec[!is.na(postvec)], medQuantile)$se
mcse.lq.vec[i] = mcse(postvec[!is.na(postvec)], lowerQuantile)$se
mcse.uq.vec[i] = mcse(postvec[!is.na(postvec)], upperQuantile)$se
}
mcse.mean.vec = unlist(mcse.mean.vec)
mcse.sd.vec = unlist(mcse.sd.vec)
mcse.med.vec = unlist(mcse.med.vec)
mcse.lq.vec = unlist(mcse.lq.vec)
mcse.uq.vec = unlist(mcse.uq.vec)
mcse.mat = cbind(mcse.mean.vec, mcse.sd.vec, mcse.lq.vec, mcse.med.vec, mcse.uq.vec)
HB_estimates = MCSE_estimates = c('', '', '', '', '')
dim.names = c('Mean.Chain', 'SD.Chain', prob.names)
dimnames(mcse.mat) = dimnames(post.stats)
information = as.data.frame(c(M, numl , numr, IDfixed, known, K, J, ndraws, burnin))
dimnames(information) = list(c('M', 'numl', 'numr', 'IDfixed', 'known', 'K', 'J', 'ndraws', 'burnin'), c('info'))
out.final = rbind(t(out), HB_estimates, dim.names, post.stats, MCSE_estimates, dim.names, mcse.mat)
write.csv(rbind(cbind(out.final, matrix('',nrow(out.final), nrow(information) - ncol(out.final))),
dimnames(information)[[1]], t(information)),
file = paste(folderName, '/EstimatesOfDerivedParam.csv', sep = ""), quote = F,row.names = T)
#============================================================================================================
post = read.csv(paste(folderName, '/markovchain.txt', sep = ""), sep = ",", header = T)
post.mcmc = as.mcmc(post)
fname6 = paste(folderName, "/traceplots_N.jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
traceplot(post.mcmc[,'N'], xlab = "Iterations", ylab = "N", main = "Traceplot of N",
cex.main = 2, cex.lab = 2, cex.axis = 2)
dev.off()
fname6 = paste(folderName, "/traceplots_psi.jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
traceplot(post.mcmc[,'psi'], xlab = "Iterations", ylab = "psi", main = "Traceplot of psi",
cex.main = 2, cex.lab = 2, cex.axis = 2)
dev.off()
fname6 = paste(folderName, "/traceplots_N.Male.jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
traceplot(post.mcmc[,'N.Male'], xlab = "Iterations", ylab = "N.Males", main = "Traceplot of N.Males",
cex.main = 2, cex.lab = 2, cex.axis = 2)
dev.off()
fname6 = paste(folderName, "/traceplots_theta.jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
traceplot(post.mcmc[,'theta'], xlab = "Iterations", ylab = "theta", main = "Traceplot of theta",
cex.main = 2, cex.lab = 2, cex.axis = 2)
dev.off()
fname6 = paste(folderName, "/traceplots_phi.jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
traceplot(post.mcmc[,'phi'], xlab = "Iterations", ylab = "phi", main = "Traceplot of phi",
cex.main = 2, cex.lab = 2, cex.axis = 2)
dev.off()
fname6 = paste(folderName, "/traceplots_p0.jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
traceplot(post.mcmc[,'p0'], xlab = "Iterations", ylab = "p0", main = "Traceplot of p0",
cex.main = 2, cex.lab = 2, cex.axis = 2)
dev.off()
fname6 = paste(folderName, "/traceplots_sigmam.jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
traceplot(post.mcmc[,'sigmam'], xlab = "Iterations", ylab = "sigma.male", main = "Traceplot of sigma.male",
cex.main = 2, cex.lab = 2, cex.axis = 2)
dev.off()
fname6 = paste(folderName, "/traceplots_sigmaf.jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
traceplot(post.mcmc[,'sigmaf'], xlab = "Iterations", ylab = "sigma.female", main = "Traceplot of sigma.female",
cex.main = 2, cex.lab = 2, cex.axis = 2)
dev.off()
#======================================================
post = read.csv(paste(folderName, '/markovchain.txt', sep = ""), sep = ",", header = T)
post = post[(burnin + 1):ndraws,]
seq1 = seq(1, ndraws - burnin, by = 10)
N1 = post[seq1,'N']; N1 = N1[!is.na(N1)]
psi1 = post[seq1,'psi']; psi1 = psi1[!is.na(psi1)]
N.Male1 = post[seq1,'N.Male']; N.Male1 = N.Male1[!is.na(N.Male1)]
theta1 = post[seq1,'theta']; theta1 = theta1[!is.na(theta1)]
phi1 = post[seq1,'phi']; phi1 = phi1[!is.na(phi1)]
p01 = post[seq1,'p0']; p01 = p01[!is.na(p01)]
sigmam1 = post[seq1,'sigmam']; sigmam1 = sigmam1[!is.na(sigmam1)]
sigmaf1 = post[seq1,'sigmaf']; sigmaf1 = sigmaf1[!is.na(sigmaf1)]
df = data.frame(N1, psi1, N.Male1, theta1, phi1, p01, sigmam1, sigmaf1)
fname6 = paste(folderName, "/Scatter Plots_", IDfixed,known, ".jpeg", sep = "")
jpeg(fname6, width = 1000, height = 1000, units = "px", pointsize = 12, quality = 100)
pairs(~N1 + psi1 + N.Male1 + theta1 + phi1 + p01 + sigmam1 + sigmaf1, data = df, main = "")
dev.off()
#========================================
save.image(paste(folderName, '/savingRimage.RData', sep = ""))
} # end of function PISCR.fn
|
### nfl teams (in 2018) by conference, division, or league as a whole ----
# as: parameter to specify how you want teams returned. Three options described below
# --> a length 32 vector of all teams (as="league")
# --> a two element list, where each list is a length 16 vector of teams in each conference (as="conference")
# --> an eight element list, where each list element is a length 4 vector of teams in each division (as="division)
# This function returns all NFL teams in 2018 as a vector, or
# as a list organized by division or conference
teams_2018 = function(as){
# Make sure "as" is valid
if(!(as %in% c("league", "division", "conference"))){
stop('Invalid "as" parameter, please enter "league", "division", or "conference"')
}
# All team names by division
afceast = c("New England Patriots", "New York Jets", "Miami Dolphins", "Buffalo Bills")
afcsouth = c("Indianapolis Colts", "Houston Texans", "Tennessee Titans", "Jacksonville Jaguars")
afcnorth = c("Baltimore Ravens", "Pittsburgh Steelers", "Cincinnati Bengals", "Cleveland Browns")
afcwest = c("Los Angeles Chargers", "Denver Broncos", "Oakland Raiders", "Kansas City Chiefs")
afc = c(afceast, afcsouth, afcnorth, afcwest)
nfceast = c("Philadelphia Eagles", "New York Giants", "Washington Redskins", "Dallas Cowboys")
nfcsouth = c("Carolina Panthers", "Atlanta Falcons", "Tampa Bay Buccaneers", "New Orleans Saints")
nfcnorth = c("Chicago Bears", "Green Bay Packers", "Minnesota Vikings", "Detroit Lions")
nfcwest = c("Arizona Cardinals", "Seattle Seahawks", "San Francisco 49ers", "Los Angeles Rams")
nfc = c(nfceast, nfcsouth, nfcnorth, nfcwest)
# Return a single vector of all NFL teams
if(as == "league"){return(c(afc, nfc))}
# Return a list with 8 vectors, containing the 4 teams in each division
if(as == "division"){return(list(afceast, afcsouth, afcnorth, afcwest, nfceast, nfcsouth, nfcnorth, nfcwest))}
# Return a list with 2 vectors, containing the 16 teams in each conference
if(as == "conference"){return(list(afc, nfc))}
}
### ratings object of Glicko ratings over the initialization period (up until current year) ----
# start_season: what year in the past is your start date for the initialization period?
# current_season: the current season, i.e. the season that will be simulated. Initialization will run from start to current-1
# current_teams: a vector of current NFL teams, for subsetting the final results (use teams_2018 to obtain)
# glicko_cval: what c-value is desired for the Glicko algorithm? "c" controls how fast the Glicko RD changes
# --> for details on default value, see 02_Selecting_Default_C.pdf
# This function will obtain the end season Glicko ratings for the season "current_season - 1",
# based off of all games between start_season and current_season - 1
# This function scrapes data online from Pro-Football Reference,
# formats it for the use of "glicko" function from package "PlayerRatings",
# and calculates the Glicko ratings over the desired time period
initialization_pd_glicko_setup = function(start_season, current_season, current_teams, glicko_cval){
# PFR only has data 1922 and after, so check to make sure "start_season" is >= 1922
# (The NFL started in 1920, so it's possible that someone might want an earlier start season than 1922)
if(start_season < 1922){
stop("Start year must 1922 or after due to data availability on PFR")
}
# Initialize a data frame to store the results for all years in the specified period
scores_df = data.frame("Week"=NA, "Winner.tie"=NA, "Home"=NA, "Loser.tie"=NA, "PtsW"=NA, "PtsL"=NA, "Year"=NA)
# Iteratively add the results from the seasons between "start_season" and "current_season - 1" to the frame
# Data scraped year by year from Pro-Football-Reference
for(i in start_season:(current_season - 1)){
url = paste("https://www.pro-football-reference.com/years/", i, "/games.htm", sep="")
yr = htmltab(url, which = 1, rm_nodata_cols = FALSE)
yr = yr[,c("Week", "Winner/tie", "V2", "Loser/tie", "PtsW", "PtsL")]
names(yr)[c(2,3,4)] = c("Winner.tie", "Home", "Loser.tie")
yr$Year = i
scores_df = rbind(scores_df, yr)
}
scores_df = scores_df[-1,]
# Fixing team names for winners column (accounting for name changes over NFL history)
w = scores_df$Winner.tie
w[which(is.na(w))] = "NA"
w[which(w == "St. Louis Rams" | w == "Cleveland Rams")] = "Los Angeles Rams"
w[which(w == "Tennessee Oilers" | w == "Houston Oilers")] = "Tennessee Titans"
w[which(w == "Los Angeles Raiders")] = "Oakland Raiders"
w[which(w == "Phoenix Cardinals" | w == "St. Louis Cardinals" | w == "Chicago Cardinals" | w == "Chi/Pit Cards/Steelers")] = "Arizona Cardinals"
w[which(w == "Baltimore Colts")] = "Indianapolis Colts"
w[which(w == "Boston Patriots")] = "New England Patriots"
w[which(w == "San Diego Chargers")] = "Los Angeles Chargers"
w[which(w == "Phi/Pit Eagles/Steelers")] = "Philadelphia Eagles"
w[which(w == "Pittsburgh Pirates")] = "Pittsburgh Steelers"
w[which(w == "Boston Redskins" | w == "Boston Braves")] = "Washington Redskins"
w[which(w == "Portsmouth Spartans")] = "Detroit Lions"
w[which(w == "Racine Legion")] = "Racine Tornadoes"
w[which(w == "Louisville Brecks")] = "Louisville Cardinals"
w[which(w == "Akron Pros")] = "Akron Indians"
w[which(w == "Buffalo All-Americans" | w == "Buffalo Bisons")] = "Buffalo Rangers"
w[which(w == "Dayton Triangles" | w == "Brooklyn Dodgers")] = "Brooklyn Tigers"
w[which(w == "Toledo Maroons")] = "Kenosha Maroons"
w[which(w == "Columbus Tigers")] = "Columbus Panhandles"
w[which(w == "Cleveland Bulldogs")] = "Detroit Wolverines"
w[which(w == "Duluth Kelleys")] = "Duluth Eskimos"
w[which(w == "Pottsville Maroons")] = "Boston Bulldogs"
w[which(w == "Kansas City Cowboys")] = "Kansas City Blues"
w[which(w == "Orange Tornadoes")] = "Newark Tornadoes"
w[which(w == "Boston Yanks" | w == "Bos/Bkn Yanks/Tigers" | w == "New York Bulldogs" | w == "New York Yanks")] = "Dallas Texans"
scores_df$Winner.tie = w
# "Fixing team names for losers column (accounting for name changes over NFL history)
l = scores_df$Loser.tie
l[which(is.na(l))] = "NA"
l[which(l == "St. Louis Rams" | l == "Cleveland Rams")] = "Los Angeles Rams"
l[which(l == "Tennessee Oilers" | l == "Houston Oilers")] = "Tennessee Titans"
l[which(l == "Los Angeles Raiders")] = "Oakland Raiders"
l[which(l == "Phoenix Cardinals" | l == "St. Louis Cardinals" | l == "Chicago Cardinals" | l == "Chi/Pit Cards/Steelers")] = "Arizona Cardinals"
l[which(l == "Baltimore Colts")] = "Indianapolis Colts"
l[which(l == "Boston Patriots")] = "New England Patriots"
l[which(l == "San Diego Chargers")] = "Los Angeles Chargers"
l[which(l == "Phi/Pit Eagles/Steelers")] = "Philadelphia Eagles"
l[which(l == "Pittsburgh Pirates")] = "Pittsburgh Steelers"
l[which(l == "Boston Redskins" | l == "Boston Braves")] = "Washington Redskins"
l[which(l == "Portsmouth Spartans")] = "Detroit Lions"
l[which(l == "Racine Legion")] = "Racine Tornadoes"
l[which(l == "Louisville Brecks")] = "Louisville Cardinals"
l[which(l == "Akron Pros")] = "Akron Indians"
l[which(l == "Buffalo All-Americans" | l == "Buffalo Bisons")] = "Buffalo Rangers"
l[which(l == "Dayton Triangles" | l == "Brooklyn Dodgers")] = "Brooklyn Tigers"
l[which(l == "Toledo Maroons")] = "Kenosha Maroons"
l[which(l == "Columbus Tigers")] = "Columbus Panhandles"
l[which(l == "Cleveland Bulldogs")] = "Detroit Wolverines"
l[which(l == "Duluth Kelleys")] = "Duluth Eskimos"
l[which(l == "Pottsville Maroons")] = "Boston Bulldogs"
l[which(l == "Kansas City Cowboys")] = "Kansas City Blues"
l[which(l == "Orange Tornadoes")] = "Newark Tornadoes"
l[which(l == "Boston Yanks" | l == "Bos/Bkn Yanks/Tigers" | l == "New York Bulldogs" | l == "New York Yanks")] = "Dallas Texans"
scores_df$Loser.tie = l
# Edit out null rows left by the scrape")
scores_df = scores_df[scores_df$Winner.tie != "Winner/tie",]
scores_df = scores_df[scores_df$Loser.tie != "Loser/tie",]
scores_df = scores_df[scores_df$Winner.tie != "NA",]
scores_df = scores_df[scores_df$Loser.tie != "NA",]
# Adding a rating period variable based off of season and week
# Playoffs will be included as rating periods
# Organize such that the championship in year y and week 1 of year y+1 are consecutive rating periods
ratingpd = c(0)
for(y in start_season:(current_season - 1)){
weeks = scores_df$Week[scores_df$Year == y]
reg = suppressWarnings(weeks[!is.na(as.numeric(weeks))])
reg = as.numeric(reg)
post = suppressWarnings(weeks[is.na(as.numeric(weeks))])
for(g in 1:length(unique(post))){
reg = append(reg, rep(max(reg)+1, sum(post == unique(post)[g])))
}
pd = reg + max(ratingpd)
ratingpd = append(ratingpd, pd)
}
ratingpd = ratingpd[-1]
scores_df$RatingPd = ratingpd
# Add a result variable (win/loss) for team in leftmost column for each game
scores_df$Result = 1
scores_df$Result[which(scores_df$PtsL == scores_df$PtsW)] = 0.5
# Obtaining Glicko ratings at end year "current_season - 1"
# (Based on period beginning at the beginning of "start_year")
glickodata = scores_df[,c("RatingPd","Winner.tie","Loser.tie","Result")]
full = glicko(glickodata, status = NULL,
init = c(1500, 350), gamma = 0, cval = glicko_cval,
history = TRUE, sort = TRUE)
gr = full$ratings
gr = gr[gr$Player %in% current_teams,]
return(gr)
}
| /R/01_Initialize_Glicko.R | no_license | aaronweinstock/nfl-playoff-simulation | R | false | false | 9,538 | r | ### nfl teams (in 2018) by conference, division, or league as a whole ----
# as: parameter to specify how you want teams returned. Three options described below
# --> a length 32 vector of all teams (as="league")
# --> a two element list, where each list is a length 16 vector of teams in each conference (as="conference")
# --> an eight element list, where each list element is a length 4 vector of teams in each division (as="division)
# This function returns all NFL teams in 2018 as a vector, or
# as a list organized by division or conference
teams_2018 = function(as){
# Make sure "as" is valid
if(!(as %in% c("league", "division", "conference"))){
stop('Invalid "as" parameter, please enter "league", "division", or "conference"')
}
# All team names by division
afceast = c("New England Patriots", "New York Jets", "Miami Dolphins", "Buffalo Bills")
afcsouth = c("Indianapolis Colts", "Houston Texans", "Tennessee Titans", "Jacksonville Jaguars")
afcnorth = c("Baltimore Ravens", "Pittsburgh Steelers", "Cincinnati Bengals", "Cleveland Browns")
afcwest = c("Los Angeles Chargers", "Denver Broncos", "Oakland Raiders", "Kansas City Chiefs")
afc = c(afceast, afcsouth, afcnorth, afcwest)
nfceast = c("Philadelphia Eagles", "New York Giants", "Washington Redskins", "Dallas Cowboys")
nfcsouth = c("Carolina Panthers", "Atlanta Falcons", "Tampa Bay Buccaneers", "New Orleans Saints")
nfcnorth = c("Chicago Bears", "Green Bay Packers", "Minnesota Vikings", "Detroit Lions")
nfcwest = c("Arizona Cardinals", "Seattle Seahawks", "San Francisco 49ers", "Los Angeles Rams")
nfc = c(nfceast, nfcsouth, nfcnorth, nfcwest)
# Return a single vector of all NFL teams
if(as == "league"){return(c(afc, nfc))}
# Return a list with 8 vectors, containing the 4 teams in each division
if(as == "division"){return(list(afceast, afcsouth, afcnorth, afcwest, nfceast, nfcsouth, nfcnorth, nfcwest))}
# Return a list with 2 vectors, containing the 16 teams in each conference
if(as == "conference"){return(list(afc, nfc))}
}
### ratings object of Glicko ratings over the initialization period (up until current year) ----
# start_season: what year in the past is your start date for the initialization period?
# current_season: the current season, i.e. the season that will be simulated. Initialization will run from start to current-1
# current_teams: a vector of current NFL teams, for subsetting the final results (use teams_2018 to obtain)
# glicko_cval: what c-value is desired for the Glicko algorithm? "c" controls how fast the Glicko RD changes
# --> for details on default value, see 02_Selecting_Default_C.pdf
# This function will obtain the end season Glicko ratings for the season "current_season - 1",
# based off of all games between start_season and current_season - 1
# This function scrapes data online from Pro-Football Reference,
# formats it for the use of "glicko" function from package "PlayerRatings",
# and calculates the Glicko ratings over the desired time period
initialization_pd_glicko_setup = function(start_season, current_season, current_teams, glicko_cval){
# PFR only has data 1922 and after, so check to make sure "start_season" is >= 1922
# (The NFL started in 1920, so it's possible that someone might want an earlier start season than 1922)
if(start_season < 1922){
stop("Start year must 1922 or after due to data availability on PFR")
}
# Initialize a data frame to store the results for all years in the specified period
scores_df = data.frame("Week"=NA, "Winner.tie"=NA, "Home"=NA, "Loser.tie"=NA, "PtsW"=NA, "PtsL"=NA, "Year"=NA)
# Iteratively add the results from the seasons between "start_season" and "current_season - 1" to the frame
# Data scraped year by year from Pro-Football-Reference
for(i in start_season:(current_season - 1)){
url = paste("https://www.pro-football-reference.com/years/", i, "/games.htm", sep="")
yr = htmltab(url, which = 1, rm_nodata_cols = FALSE)
yr = yr[,c("Week", "Winner/tie", "V2", "Loser/tie", "PtsW", "PtsL")]
names(yr)[c(2,3,4)] = c("Winner.tie", "Home", "Loser.tie")
yr$Year = i
scores_df = rbind(scores_df, yr)
}
scores_df = scores_df[-1,]
# Fixing team names for winners column (accounting for name changes over NFL history)
w = scores_df$Winner.tie
w[which(is.na(w))] = "NA"
w[which(w == "St. Louis Rams" | w == "Cleveland Rams")] = "Los Angeles Rams"
w[which(w == "Tennessee Oilers" | w == "Houston Oilers")] = "Tennessee Titans"
w[which(w == "Los Angeles Raiders")] = "Oakland Raiders"
w[which(w == "Phoenix Cardinals" | w == "St. Louis Cardinals" | w == "Chicago Cardinals" | w == "Chi/Pit Cards/Steelers")] = "Arizona Cardinals"
w[which(w == "Baltimore Colts")] = "Indianapolis Colts"
w[which(w == "Boston Patriots")] = "New England Patriots"
w[which(w == "San Diego Chargers")] = "Los Angeles Chargers"
w[which(w == "Phi/Pit Eagles/Steelers")] = "Philadelphia Eagles"
w[which(w == "Pittsburgh Pirates")] = "Pittsburgh Steelers"
w[which(w == "Boston Redskins" | w == "Boston Braves")] = "Washington Redskins"
w[which(w == "Portsmouth Spartans")] = "Detroit Lions"
w[which(w == "Racine Legion")] = "Racine Tornadoes"
w[which(w == "Louisville Brecks")] = "Louisville Cardinals"
w[which(w == "Akron Pros")] = "Akron Indians"
w[which(w == "Buffalo All-Americans" | w == "Buffalo Bisons")] = "Buffalo Rangers"
w[which(w == "Dayton Triangles" | w == "Brooklyn Dodgers")] = "Brooklyn Tigers"
w[which(w == "Toledo Maroons")] = "Kenosha Maroons"
w[which(w == "Columbus Tigers")] = "Columbus Panhandles"
w[which(w == "Cleveland Bulldogs")] = "Detroit Wolverines"
w[which(w == "Duluth Kelleys")] = "Duluth Eskimos"
w[which(w == "Pottsville Maroons")] = "Boston Bulldogs"
w[which(w == "Kansas City Cowboys")] = "Kansas City Blues"
w[which(w == "Orange Tornadoes")] = "Newark Tornadoes"
w[which(w == "Boston Yanks" | w == "Bos/Bkn Yanks/Tigers" | w == "New York Bulldogs" | w == "New York Yanks")] = "Dallas Texans"
scores_df$Winner.tie = w
# "Fixing team names for losers column (accounting for name changes over NFL history)
l = scores_df$Loser.tie
l[which(is.na(l))] = "NA"
l[which(l == "St. Louis Rams" | l == "Cleveland Rams")] = "Los Angeles Rams"
l[which(l == "Tennessee Oilers" | l == "Houston Oilers")] = "Tennessee Titans"
l[which(l == "Los Angeles Raiders")] = "Oakland Raiders"
l[which(l == "Phoenix Cardinals" | l == "St. Louis Cardinals" | l == "Chicago Cardinals" | l == "Chi/Pit Cards/Steelers")] = "Arizona Cardinals"
l[which(l == "Baltimore Colts")] = "Indianapolis Colts"
l[which(l == "Boston Patriots")] = "New England Patriots"
l[which(l == "San Diego Chargers")] = "Los Angeles Chargers"
l[which(l == "Phi/Pit Eagles/Steelers")] = "Philadelphia Eagles"
l[which(l == "Pittsburgh Pirates")] = "Pittsburgh Steelers"
l[which(l == "Boston Redskins" | l == "Boston Braves")] = "Washington Redskins"
l[which(l == "Portsmouth Spartans")] = "Detroit Lions"
l[which(l == "Racine Legion")] = "Racine Tornadoes"
l[which(l == "Louisville Brecks")] = "Louisville Cardinals"
l[which(l == "Akron Pros")] = "Akron Indians"
l[which(l == "Buffalo All-Americans" | l == "Buffalo Bisons")] = "Buffalo Rangers"
l[which(l == "Dayton Triangles" | l == "Brooklyn Dodgers")] = "Brooklyn Tigers"
l[which(l == "Toledo Maroons")] = "Kenosha Maroons"
l[which(l == "Columbus Tigers")] = "Columbus Panhandles"
l[which(l == "Cleveland Bulldogs")] = "Detroit Wolverines"
l[which(l == "Duluth Kelleys")] = "Duluth Eskimos"
l[which(l == "Pottsville Maroons")] = "Boston Bulldogs"
l[which(l == "Kansas City Cowboys")] = "Kansas City Blues"
l[which(l == "Orange Tornadoes")] = "Newark Tornadoes"
l[which(l == "Boston Yanks" | l == "Bos/Bkn Yanks/Tigers" | l == "New York Bulldogs" | l == "New York Yanks")] = "Dallas Texans"
scores_df$Loser.tie = l
# Edit out null rows left by the scrape")
scores_df = scores_df[scores_df$Winner.tie != "Winner/tie",]
scores_df = scores_df[scores_df$Loser.tie != "Loser/tie",]
scores_df = scores_df[scores_df$Winner.tie != "NA",]
scores_df = scores_df[scores_df$Loser.tie != "NA",]
# Adding a rating period variable based off of season and week
# Playoffs will be included as rating periods
# Organize such that the championship in year y and week 1 of year y+1 are consecutive rating periods
ratingpd = c(0)
for(y in start_season:(current_season - 1)){
weeks = scores_df$Week[scores_df$Year == y]
reg = suppressWarnings(weeks[!is.na(as.numeric(weeks))])
reg = as.numeric(reg)
post = suppressWarnings(weeks[is.na(as.numeric(weeks))])
for(g in 1:length(unique(post))){
reg = append(reg, rep(max(reg)+1, sum(post == unique(post)[g])))
}
pd = reg + max(ratingpd)
ratingpd = append(ratingpd, pd)
}
ratingpd = ratingpd[-1]
scores_df$RatingPd = ratingpd
# Add a result variable (win/loss) for team in leftmost column for each game
scores_df$Result = 1
scores_df$Result[which(scores_df$PtsL == scores_df$PtsW)] = 0.5
# Obtaining Glicko ratings at end year "current_season - 1"
# (Based on period beginning at the beginning of "start_year")
glickodata = scores_df[,c("RatingPd","Winner.tie","Loser.tie","Result")]
full = glicko(glickodata, status = NULL,
init = c(1500, 350), gamma = 0, cval = glicko_cval,
history = TRUE, sort = TRUE)
gr = full$ratings
gr = gr[gr$Player %in% current_teams,]
return(gr)
}
|
\name{GraphClust}
\alias{GraphClust}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
GraphClust
}
\description{
Finds mutational clusters after reordering the protein using the traveling salesman approach.
}
\usage{
GraphClust(mutation.data, position.data, insertion.type = "cheapest_insertion", alpha = 0.05,
MultComp = "Bonferroni", fix.start.pos = "Y", Include.Culled = "Y",
Include.Full = "Y")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{mutation.data}{
A matrix of 0's (no mutation) and 1's (mutation) where each column represents an amino acid in the protein and each row represents an individual sample (test subject, cell line, etc). Thus if column i in row j had a 1, that would mean that the ith amino acid for person j had a nonsynonomous mutation.
}
\item{position.data}{
A dataframe consisting of six columns: 1) Residue Name, 2) Amino Acid number in the protein, 3) Side Chain, 4) X-coordinate, 5) Y-coordinate and 6) Z-coordinate. Please see \emph{get.Positions} and \emph{get.AlignedPositions} in the \emph{iPAC} package for further information on how to construct this matrix.
}
\item{insertion.type}{
Specifies the type of insertion method used. Please see the \emph{TSP} package for more details.
}
\item{alpha}{
The significance level required in order to find a mutational cluster significance. Please see the NMC package for further information.
}
\item{MultComp}{
The multiple comparison adjustment required as all pairwise mutations are considered. Options are: ``Bonferroni", "BH", or "None".
}
\item{fix.start.pos}{
The TSP package starts the path at a random amino acid. Such that the results are easily reproducible, the default starts the path on the first amino acid in the protein.
}
\item{Include.Culled}{
If "Y", the standard NMC algorithm will be run on the protein after removing the amino acids for which there is no positional data.
}
\item{Include.Full}{
If "Y", the standard NMC algorithm will be run on the full protein sequence.
}
}
\details{
The protein reordering is done using the \emph{TSP} package available on CRAN. This hamiltonian path then serves as the new protein ordering.
The position data can be created via the ``get.AlignedPositions" or the ``get.Positions" functions available via the imported \emph{iPAC} package.
The mutation matrix must have the default R column headings ``V1", ``V2",...,``VN", where N is the last amino acid in the protein. No positions should be skipped in the mutaion matrix.
When unmapping back to the original space, the end points of the cluster in the mapped space are used as the endpoints of the cluster in the unmapped space.
}
\value{
\item{Remapped}{This shows the clusters found while taking the 3D structure into account and remapping the protein using a traveling salesman approach.}
\item{OriginalCulled}{This shows the clusters found if you run the NMC algorithm on the canonical linear protein, but with the amino acids for which we don't have 3D positional data removed.}
\item{Original}{This shows the clusters found if you run the NMC algorithn on the canonical linear protein with all the amino acids.}
\item{candidate.path}{This shows the path found by the TSP package that heuristically minimizes the total distance through the protein.}
\item{path.distance}{The length of the candidate path if traveled from start to finish.}
\item{linear.path.distance}{The length of the sequential path 1,2,3...,N (where N is the total number of amino acids in the protein).}
\item{protein.graph}{A graph object created by the \emph{igraph} package that has edges between amino acids on the candidate.path. This can be passed to plotting functions to create visual represnetations.}
\item{missing.positions}{This shows which amino acids are present in the mutation matrix but for which we do not have positions. These amino acids are cut from the protein when calculating the \emph{Remapped} and \emph{OriginalCulled} results.}
}
\references{
Ye et. al., Statistical method on nonrandom clustering with application to somatic mutations in cancer. \emph{BMC Bioinformatics}. 2010. doi:10.1186/1471-2105-11-11.
Michael Hahsler and Kurt Hornik (2011). Traveling Salesperson Problem (TSP) R package version 1.0-7. \url{http://CRAN.R-project.org/}.
Csardi G, Nepusz T: The igraph software package for complex network research, InterJournal, Complex Systems 1695. 2006. \url{http://igraph.sf.net}
Gregory Ryslik and Hongyu Zhao (2012). iPAC: Identification of Protein Amino acid Clustering. R package version 1.1.3. \url{http://www.bioconductor.org/}.
}
\examples{
\dontrun{
#Load the positional and mutatioanl data
CIF<-"https://files.rcsb.org/view/3GFT.cif"
Fasta<-"https://www.uniprot.org/uniprot/P01116-2.fasta"
KRAS.Positions<-get.Positions(CIF,Fasta, "A")
data(KRAS.Mutations)
#Calculate the required clusters
GraphClust(KRAS.Mutations,KRAS.Positions$Positions,insertion.type = "cheapest_insertion",
alpha = 0.05, MultComp = "Bonferroni")
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ Clusters }
\keyword{ Mutations }% __ONLY ONE__ keyword per line
| /man/GraphClust.Rd | no_license | gryslik/GraphPAC | R | false | false | 5,199 | rd | \name{GraphClust}
\alias{GraphClust}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
GraphClust
}
\description{
Finds mutational clusters after reordering the protein using the traveling salesman approach.
}
\usage{
GraphClust(mutation.data, position.data, insertion.type = "cheapest_insertion", alpha = 0.05,
MultComp = "Bonferroni", fix.start.pos = "Y", Include.Culled = "Y",
Include.Full = "Y")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{mutation.data}{
A matrix of 0's (no mutation) and 1's (mutation) where each column represents an amino acid in the protein and each row represents an individual sample (test subject, cell line, etc). Thus if column i in row j had a 1, that would mean that the ith amino acid for person j had a nonsynonomous mutation.
}
\item{position.data}{
A dataframe consisting of six columns: 1) Residue Name, 2) Amino Acid number in the protein, 3) Side Chain, 4) X-coordinate, 5) Y-coordinate and 6) Z-coordinate. Please see \emph{get.Positions} and \emph{get.AlignedPositions} in the \emph{iPAC} package for further information on how to construct this matrix.
}
\item{insertion.type}{
Specifies the type of insertion method used. Please see the \emph{TSP} package for more details.
}
\item{alpha}{
The significance level required in order to find a mutational cluster significance. Please see the NMC package for further information.
}
\item{MultComp}{
The multiple comparison adjustment required as all pairwise mutations are considered. Options are: ``Bonferroni", "BH", or "None".
}
\item{fix.start.pos}{
The TSP package starts the path at a random amino acid. Such that the results are easily reproducible, the default starts the path on the first amino acid in the protein.
}
\item{Include.Culled}{
If "Y", the standard NMC algorithm will be run on the protein after removing the amino acids for which there is no positional data.
}
\item{Include.Full}{
If "Y", the standard NMC algorithm will be run on the full protein sequence.
}
}
\details{
The protein reordering is done using the \emph{TSP} package available on CRAN. This hamiltonian path then serves as the new protein ordering.
The position data can be created via the ``get.AlignedPositions" or the ``get.Positions" functions available via the imported \emph{iPAC} package.
The mutation matrix must have the default R column headings ``V1", ``V2",...,``VN", where N is the last amino acid in the protein. No positions should be skipped in the mutaion matrix.
When unmapping back to the original space, the end points of the cluster in the mapped space are used as the endpoints of the cluster in the unmapped space.
}
\value{
\item{Remapped}{This shows the clusters found while taking the 3D structure into account and remapping the protein using a traveling salesman approach.}
\item{OriginalCulled}{This shows the clusters found if you run the NMC algorithm on the canonical linear protein, but with the amino acids for which we don't have 3D positional data removed.}
\item{Original}{This shows the clusters found if you run the NMC algorithn on the canonical linear protein with all the amino acids.}
\item{candidate.path}{This shows the path found by the TSP package that heuristically minimizes the total distance through the protein.}
\item{path.distance}{The length of the candidate path if traveled from start to finish.}
\item{linear.path.distance}{The length of the sequential path 1,2,3...,N (where N is the total number of amino acids in the protein).}
\item{protein.graph}{A graph object created by the \emph{igraph} package that has edges between amino acids on the candidate.path. This can be passed to plotting functions to create visual represnetations.}
\item{missing.positions}{This shows which amino acids are present in the mutation matrix but for which we do not have positions. These amino acids are cut from the protein when calculating the \emph{Remapped} and \emph{OriginalCulled} results.}
}
\references{
Ye et. al., Statistical method on nonrandom clustering with application to somatic mutations in cancer. \emph{BMC Bioinformatics}. 2010. doi:10.1186/1471-2105-11-11.
Michael Hahsler and Kurt Hornik (2011). Traveling Salesperson Problem (TSP) R package version 1.0-7. \url{http://CRAN.R-project.org/}.
Csardi G, Nepusz T: The igraph software package for complex network research, InterJournal, Complex Systems 1695. 2006. \url{http://igraph.sf.net}
Gregory Ryslik and Hongyu Zhao (2012). iPAC: Identification of Protein Amino acid Clustering. R package version 1.1.3. \url{http://www.bioconductor.org/}.
}
\examples{
\dontrun{
#Load the positional and mutatioanl data
CIF<-"https://files.rcsb.org/view/3GFT.cif"
Fasta<-"https://www.uniprot.org/uniprot/P01116-2.fasta"
KRAS.Positions<-get.Positions(CIF,Fasta, "A")
data(KRAS.Mutations)
#Calculate the required clusters
GraphClust(KRAS.Mutations,KRAS.Positions$Positions,insertion.type = "cheapest_insertion",
alpha = 0.05, MultComp = "Bonferroni")
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ Clusters }
\keyword{ Mutations }% __ONLY ONE__ keyword per line
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/formatPvalue.R
\name{formatPvalue}
\alias{formatPvalue}
\title{Pretty formatting of \emph{p} values}
\usage{
formatPvalue(values, digits = 3, spaces = TRUE, includeP = TRUE)
}
\arguments{
\item{values}{The p-values to format.}
\item{digits}{The number of digits to round to. Numbers smaller
than this number will be shown as <.001 or <.0001 etc.}
\item{spaces}{Whether to include spaces between symbols,
operators, and digits.}
\item{includeP}{Whether to include the 'p' and '='-symbol in the
results (the '<' symbol is always included).}
}
\value{
A formatted P value, roughly according to APA style
guidelines. This means that the \link{noZero} function is used to
remove the zero preceding the decimal point, and p values
that would round to zero given the requested number of digits
are shown as e.g. p<.001.
}
\description{
Pretty formatting of \emph{p} values
}
\examples{
formatPvalue(cor.test(mtcars$mpg,
mtcars$disp)$p.value);
formatPvalue(cor.test(mtcars$drat,
mtcars$qsec)$p.value);
}
\seealso{
\code{\link[=formatCI]{formatCI()}}, \code{\link[=formatR]{formatR()}}, \code{\link[=noZero]{noZero()}}
}
| /man/formatPvalue.Rd | no_license | cran/rosetta | R | false | true | 1,276 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/formatPvalue.R
\name{formatPvalue}
\alias{formatPvalue}
\title{Pretty formatting of \emph{p} values}
\usage{
formatPvalue(values, digits = 3, spaces = TRUE, includeP = TRUE)
}
\arguments{
\item{values}{The p-values to format.}
\item{digits}{The number of digits to round to. Numbers smaller
than this number will be shown as <.001 or <.0001 etc.}
\item{spaces}{Whether to include spaces between symbols,
operators, and digits.}
\item{includeP}{Whether to include the 'p' and '='-symbol in the
results (the '<' symbol is always included).}
}
\value{
A formatted P value, roughly according to APA style
guidelines. This means that the \link{noZero} function is used to
remove the zero preceding the decimal point, and p values
that would round to zero given the requested number of digits
are shown as e.g. p<.001.
}
\description{
Pretty formatting of \emph{p} values
}
\examples{
formatPvalue(cor.test(mtcars$mpg,
mtcars$disp)$p.value);
formatPvalue(cor.test(mtcars$drat,
mtcars$qsec)$p.value);
}
\seealso{
\code{\link[=formatCI]{formatCI()}}, \code{\link[=formatR]{formatR()}}, \code{\link[=noZero]{noZero()}}
}
|
####### This tests gamma distribution w weights in gbm by comparing results with R ######
test <- function() {
htable = h2o.uploadFile(locate("smalldata/gbm_test/moppe.csv"))
htable$premiekl = as.factor(htable$premiekl)
htable$moptva = as.factor(htable$moptva)
htable$zon = as.factor(htable$zon)
#gg = gbm(formula = medskad ~ premiekl + moptva + zon,data = table.1.2,distribution = "gamma", weights = table.1.2$antskad ,
# n.trees = 20,interaction.depth = 1,n.minobsinnode = 1,shrinkage = 1,bag.fraction = 1,train.fraction = 1)
#pr = predict(gg,newdata = table.1.2,type = "response")
#htable= as.h2o(table.1.2,destination_frame = "htable")
hh = h2o.gbm(x = 1:3,y = "medskad",training_frame = htable,distribution = "gamma",weights_column = "antskad",
ntrees = 20,max_depth = 1,min_rows = 1,learn_rate = 1)
ph = as.vector(as.data.frame(h2o.predict(hh,newdata = htable)))
#expect_equal(gg$initF,hh@model$init_f,tolerance = 1e-6)
#expect_equal(min(pr),min(ph[,1]),tolerance = 1e-6)
#expect_equal(max(pr),max(ph[,1]),tolerance = 1e-6)
#expect_equal(mean(pr),mean(ph[,1]),tolerance = 1e-6)
expect_equal(8.804447,hh@model$init_f,tolerance = 1e-6)
expect_equal(3751.01,min(ph[,1]),tolerance = 1e-4)
expect_equal(15298.87,max(ph[,1]),tolerance = 1e-4)
expect_equal( 8121.98,mean(ph[,1]),tolerance = 1e-4)
}
doTest("GBM weight Test: GBM w/ weight for gamma distribution", test)
| /h2o-r/tests/testdir_algos/gbm/runit_GBM_weight_gamma.R | permissive | StephRoark/h2o-3 | R | false | false | 1,418 | r | ####### This tests gamma distribution w weights in gbm by comparing results with R ######
test <- function() {
htable = h2o.uploadFile(locate("smalldata/gbm_test/moppe.csv"))
htable$premiekl = as.factor(htable$premiekl)
htable$moptva = as.factor(htable$moptva)
htable$zon = as.factor(htable$zon)
#gg = gbm(formula = medskad ~ premiekl + moptva + zon,data = table.1.2,distribution = "gamma", weights = table.1.2$antskad ,
# n.trees = 20,interaction.depth = 1,n.minobsinnode = 1,shrinkage = 1,bag.fraction = 1,train.fraction = 1)
#pr = predict(gg,newdata = table.1.2,type = "response")
#htable= as.h2o(table.1.2,destination_frame = "htable")
hh = h2o.gbm(x = 1:3,y = "medskad",training_frame = htable,distribution = "gamma",weights_column = "antskad",
ntrees = 20,max_depth = 1,min_rows = 1,learn_rate = 1)
ph = as.vector(as.data.frame(h2o.predict(hh,newdata = htable)))
#expect_equal(gg$initF,hh@model$init_f,tolerance = 1e-6)
#expect_equal(min(pr),min(ph[,1]),tolerance = 1e-6)
#expect_equal(max(pr),max(ph[,1]),tolerance = 1e-6)
#expect_equal(mean(pr),mean(ph[,1]),tolerance = 1e-6)
expect_equal(8.804447,hh@model$init_f,tolerance = 1e-6)
expect_equal(3751.01,min(ph[,1]),tolerance = 1e-4)
expect_equal(15298.87,max(ph[,1]),tolerance = 1e-4)
expect_equal( 8121.98,mean(ph[,1]),tolerance = 1e-4)
}
doTest("GBM weight Test: GBM w/ weight for gamma distribution", test)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enrichment_depletion_test.R
\name{enrichment_depletion_test}
\alias{enrichment_depletion_test}
\title{Test for enrichment or depletion of mutations in genomic regions}
\usage{
enrichment_depletion_test(x, by = c())
}
\arguments{
\item{x}{data.frame result from genomic_distribution()}
\item{by}{Optional grouping variable, e.g. tissue type}
}
\value{
data.frame with the observed and expected number of mutations per
genomic region per group (by) or sample
}
\description{
This function aggregates mutations per group (optional) and performs an
enrichment depletion test.
}
\examples{
## See the 'genomic_distribution()' example for how we obtained the
## following data:
distr <- readRDS(system.file("states/distr_data.rds",
package="MutationalPatterns"))
tissue <- c(rep("colon", 3), rep("intestine", 3), rep("liver", 3))
## Perform the enrichment/depletion test by tissue type.
distr_test <- enrichment_depletion_test(distr, by = tissue)
## Or without specifying the 'by' parameter.
distr_test2 <- enrichment_depletion_test(distr)
}
\seealso{
\code{\link{genomic_distribution}},
\code{\link{plot_enrichment_depletion}}
}
| /man/enrichment_depletion_test.Rd | permissive | zongchangli/MutationalPatterns | R | false | true | 1,227 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enrichment_depletion_test.R
\name{enrichment_depletion_test}
\alias{enrichment_depletion_test}
\title{Test for enrichment or depletion of mutations in genomic regions}
\usage{
enrichment_depletion_test(x, by = c())
}
\arguments{
\item{x}{data.frame result from genomic_distribution()}
\item{by}{Optional grouping variable, e.g. tissue type}
}
\value{
data.frame with the observed and expected number of mutations per
genomic region per group (by) or sample
}
\description{
This function aggregates mutations per group (optional) and performs an
enrichment depletion test.
}
\examples{
## See the 'genomic_distribution()' example for how we obtained the
## following data:
distr <- readRDS(system.file("states/distr_data.rds",
package="MutationalPatterns"))
tissue <- c(rep("colon", 3), rep("intestine", 3), rep("liver", 3))
## Perform the enrichment/depletion test by tissue type.
distr_test <- enrichment_depletion_test(distr, by = tissue)
## Or without specifying the 'by' parameter.
distr_test2 <- enrichment_depletion_test(distr)
}
\seealso{
\code{\link{genomic_distribution}},
\code{\link{plot_enrichment_depletion}}
}
|
library(ape)
testtree <- read.tree("13711_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="13711_0_unrooted.txt") | /codeml_files/newick_trees_processed/13711_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("13711_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="13711_0_unrooted.txt") |
#Call this file after defining the relevant variables to generate the HTML report.
source('_scripts/paper_6/paper_6_otitis_media/paper_6_otitis_media_synthetic_control_functions.R', local = TRUE)
packages <-
c(
'curl',
'evaluate',
'digest',
'formatR',
'highr',
'markdown',
'stringr',
'yaml',
'Rcpp',
'htmltools',
'caTools',
'bitops',
'knitr',
'jsonlite',
'base64enc',
'rprojroot',
'rmarkdown'
)
packageHandler(packages, update_packages, install_packages)
if (install_pandoc) {
if (Sys.info()['sysname'] == 'Windows') {
packageHandler(c('installr', 'rmarkdown'),
update_packages,
install_packages)
if (!rmarkdown::pandoc_available()) {
installr::install.pandoc()
}
} else {
if (!(rmarkdown::pandoc_available())) {
warning(
'This system cannot programmatically install/update Pandoc. To install/update Pandoc, visit "https://pandoc.org/installing.html".'
)
}
}
}
if (!exists('exclude_group')) {
exclude_group <- c()
}
sapply(packages,
library,
quietly = TRUE,
character.only = TRUE)
param_list <- list(
update_packages = update_packages,
install_packages = install_packages,
country = country,
n_seasons = n_seasons,
exclude_covar = exclude_covar,
exclude_group = exclude_group,
code_change = code_change,
input_directory = input_directory,
output_directory = output_directory,
file_name = file_name,
group_name = group_name,
date_name = date_name,
outcome_name = outcome_name,
denom_name = denom_name,
start_date = start_date,
intervention_date = intervention_date,
end_date = end_date,
pre_period = pre_period,
post_period = post_period,
eval_period = eval_period
)
run_pandoc <- rmarkdown::pandoc_available()
if (run_pandoc) {
rmarkdown::render(
'paper_6_otitis_media_synthetic_control_report.Rmd',
output_file = 'Paper 6 Otitis Media Synthetic Control Report.html',
output_dir = output_directory,
params = param_list,
envir = environment()
)
} else {
knitr::knit('paper_6_otitis_media_synthetic_control_report.Rmd', envir = environment())
markdown::markdownToHTML(
'paper_6_otitis_media_synthetic_control_report.md',
output = paste(output_directory, 'Paper 6 Otitis Media Synthetic Control Report.html', sep = '')
)
file.remove('paper_6_otitis_media_synthetic_control_report.md')
unlink('figure/', recursive = TRUE)
}
source('_scripts/paper_6/paper_6_otitis_media/paper_6_otitis_media_synthetic_control_write_results.R', local = TRUE) | /_scripts/paper_6/paper_6_otitis_media_sense_analy_pre/paper_6_otitis_media_synthetic_control_report_sa_pre.R | permissive | eliaseythorsson/phd_thesis | R | false | false | 2,902 | r | #Call this file after defining the relevant variables to generate the HTML report.
source('_scripts/paper_6/paper_6_otitis_media/paper_6_otitis_media_synthetic_control_functions.R', local = TRUE)
packages <-
c(
'curl',
'evaluate',
'digest',
'formatR',
'highr',
'markdown',
'stringr',
'yaml',
'Rcpp',
'htmltools',
'caTools',
'bitops',
'knitr',
'jsonlite',
'base64enc',
'rprojroot',
'rmarkdown'
)
packageHandler(packages, update_packages, install_packages)
if (install_pandoc) {
if (Sys.info()['sysname'] == 'Windows') {
packageHandler(c('installr', 'rmarkdown'),
update_packages,
install_packages)
if (!rmarkdown::pandoc_available()) {
installr::install.pandoc()
}
} else {
if (!(rmarkdown::pandoc_available())) {
warning(
'This system cannot programmatically install/update Pandoc. To install/update Pandoc, visit "https://pandoc.org/installing.html".'
)
}
}
}
if (!exists('exclude_group')) {
exclude_group <- c()
}
sapply(packages,
library,
quietly = TRUE,
character.only = TRUE)
param_list <- list(
update_packages = update_packages,
install_packages = install_packages,
country = country,
n_seasons = n_seasons,
exclude_covar = exclude_covar,
exclude_group = exclude_group,
code_change = code_change,
input_directory = input_directory,
output_directory = output_directory,
file_name = file_name,
group_name = group_name,
date_name = date_name,
outcome_name = outcome_name,
denom_name = denom_name,
start_date = start_date,
intervention_date = intervention_date,
end_date = end_date,
pre_period = pre_period,
post_period = post_period,
eval_period = eval_period
)
run_pandoc <- rmarkdown::pandoc_available()
if (run_pandoc) {
rmarkdown::render(
'paper_6_otitis_media_synthetic_control_report.Rmd',
output_file = 'Paper 6 Otitis Media Synthetic Control Report.html',
output_dir = output_directory,
params = param_list,
envir = environment()
)
} else {
knitr::knit('paper_6_otitis_media_synthetic_control_report.Rmd', envir = environment())
markdown::markdownToHTML(
'paper_6_otitis_media_synthetic_control_report.md',
output = paste(output_directory, 'Paper 6 Otitis Media Synthetic Control Report.html', sep = '')
)
file.remove('paper_6_otitis_media_synthetic_control_report.md')
unlink('figure/', recursive = TRUE)
}
source('_scripts/paper_6/paper_6_otitis_media/paper_6_otitis_media_synthetic_control_write_results.R', local = TRUE) |
source("../R/Api.R")
library(rjson)
library("optparse")
option_list = list( make_option(c("-k", "--key"), action="store", default=NA, type='character',
help="Rosette API key"), make_option(c("-u", "--url"), action="store", default=NA, type='character',
help="Rosette API url"))
opt_parser = OptionParser(option_list=option_list)
opt = parse_args(opt_parser)
morphology_compound_components_data <- "Rechtsschutzversicherungsgesellschaften"
key <- "content"
value <- morphology_compound_components_data
key1 <- "morphology"
value1 <- "compound-components"
parameters <- list()
parameters[[ key ]] <- value
parameters[[ key1 ]] <- value1
parameters <- toJSON(parameters)
if(is.na(opt$url)){
result <- api(opt$key, "morphology", parameters)
} else {
result <- api(opt$key, "morphology", parameters, NULL, opt$url)
}
print(result)
| /examples/morphology_compound-components.R | permissive | kbailey-basistech/R-Binding | R | false | false | 865 | r | source("../R/Api.R")
library(rjson)
library("optparse")
option_list = list( make_option(c("-k", "--key"), action="store", default=NA, type='character',
help="Rosette API key"), make_option(c("-u", "--url"), action="store", default=NA, type='character',
help="Rosette API url"))
opt_parser = OptionParser(option_list=option_list)
opt = parse_args(opt_parser)
morphology_compound_components_data <- "Rechtsschutzversicherungsgesellschaften"
key <- "content"
value <- morphology_compound_components_data
key1 <- "morphology"
value1 <- "compound-components"
parameters <- list()
parameters[[ key ]] <- value
parameters[[ key1 ]] <- value1
parameters <- toJSON(parameters)
if(is.na(opt$url)){
result <- api(opt$key, "morphology", parameters)
} else {
result <- api(opt$key, "morphology", parameters, NULL, opt$url)
}
print(result)
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{context}
\alias{context}
\title{Describe the context of a set of tests.}
\usage{
context(desc)
}
\arguments{
\item{desc}{description of context. Should start with a capital letter.}
}
\description{
A context defines a set of tests that test related functionality. Usually
you will have one context per file, but you may have multiple contexts
in a single file if you so choose.
}
\examples{
context("String processing")
context("Remote procedure calls")
}
| /testthat/man/context.Rd | no_license | radfordneal/R-package-mods | R | false | false | 518 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{context}
\alias{context}
\title{Describe the context of a set of tests.}
\usage{
context(desc)
}
\arguments{
\item{desc}{description of context. Should start with a capital letter.}
}
\description{
A context defines a set of tests that test related functionality. Usually
you will have one context per file, but you may have multiple contexts
in a single file if you so choose.
}
\examples{
context("String processing")
context("Remote procedure calls")
}
|
# DA 9/10/14: Added argument for IDvars (ID variables that are not to be used in imputation, but should remain in data)
hot.deck <-
function(data, m = 5, method=c("best.cell", "p.draw"), cutoff=10, sdCutoff=1, optimizeSD = FALSE,
optimStep = 0.1, optimStop = 5, weightedAffinity = FALSE, impContinuous = c("HD", "mice"),
IDvars = NULL, ...){
method <- match.arg(method)
impContinuous <- match.arg(impContinuous)
# DA 9/15/14 Added warning about weighted affinity calculations and correlations among or with categorical variables.
if(weightedAffinity){
warning("Affinity calculations made as a function of pearson correlations among variables coerced to class 'numeric'\ntake care when using this on categorical, especially nominal variables")
}
# DA 9/10/14: If IDvars is specified, remove them from the data and save in a different file.
if(!is.null(IDvars)){
IDdata <- data[, which(names(data) %in% IDvars), drop=FALSE]
data <- data[,-which(names(data) %in% IDvars), drop=FALSE]
# DA 9/10/14: Added code to remove any observations that are always missing
allNA <- apply(data, 1, function(x)all(is.na(x)))
if(any(allNA)){
IDdata <- IDdata[-which(allNA), , drop=FALSE]
data <- data[-which(allNA), , drop=FALSE]
}
}
else{
allNA <- apply(data, 1, function(x)all(is.na(x)))
if(any(allNA)){
data <- data[-which(allNA), , drop=FALSE]
}
}
if(any(allNA)){
warning(paste(sum(allNA), " observations with no observed data. These observations were removed\n", sep="") )
}
facs <- sapply(1:ncol(data), function(x)is.factor(data[,x]))
disc.miss <- which(is.discrete(data, cutoff) & apply(data, 2, function(x)any(is.na(x))))
alldisc <- is.discrete(data, cutoff)
allmiss <- which(is.na(data), arr.ind=TRUE)
cont.miss <- allmiss[-which(allmiss[,2] %in% disc.miss), ]
# DA 10/2/14: moved the warning here in response to Toby's problem where the error was getting tripped
# even though there was no continuous data. This goes a setp further and doesn't trip the warning unless
# there is any continuous data with missing observations.
if(impContinuous == "HD" & method == "p.draw" & length(cont.miss) > 0){
stop("Hot Deck imputation of continuous values can only be used with the best cell method\n")
}
whichna <- which(is.na(data), arr.ind=TRUE)
if(impContinuous == "mice"){
whichna <- whichna[which(whichna[,2] %in% disc.miss), ]
}
# DA 9/5/14: added condition any(!alldisc) so that optimization only happens when there are continuous variables on which to optimize
if(optimizeSD & any(!alldisc)){
mm <- 0
while(sdCutoff <= optimStop & mm < m){
tmp <- scaleContinuous(data, alldisc, sdx=1/sdCutoff)
numdata <- sapply(1:ncol(tmp), function(i)as.numeric(tmp[,i]))
R <- abs(cor(numdata, use="pairwise"))
diag(R) <- 0
# DA 9/5/14: Commented out 2 lines below because I changed the looping mechanism for the affinity calculation
# aff <- t(apply(whichna, 1, function(x)affinity(numdata, x[1], x[2], R, weightedAffinity)))
# aff[which(!is.finite(aff), arr.ind=TRUE)] <- 0
# DA 9/5/14: changed the way affinity is calculated so no duplicates are calculated. This should speed
# up computation particularly when there are many observations missing on many variables. This also makes
# the weighted measure potentially much slower
unnaobs <- unique(whichna[,1])
if(!weightedAffinity){
aff <- t(sapply(unnaobs, function(x)affinity(numdata, x, weighted=FALSE)))
aff <- aff[match(whichna[,1], unnaobs), ]
}
if(weightedAffinity){
aff <- t(apply(whichna, 1, function(x)affinity(numdata, x[1], x[2], R, weightedAffinity)))
}
if(any(!is.finite(aff))){aff[which(!is.finite(aff), arr.ind=TRUE)] <- 0}
# DA 9/5/14: added the following 4 lines to ensure that only valid donors (i.e., those with observed values) have
# non-zero affinity scores.
wnadat <- matrix(1, nrow=nrow(data), ncol=ncol(data))
wnadat[which(is.na(data), arr.ind=TRUE)] <- 0
wnadat <- t(wnadat[, whichna[,2]])
aff <- aff*wnadat
w <- apply(aff, 1, function(x)which(x == max(x)))
donors <- lapply(1:nrow(whichna), function(x)na.omit(data[w[[x]], whichna[x,2]]))
matches <- sapply(donors, length)
mm <- min(matches)
cat("SD Cutoff = ", sprintf("%.2f", sdCutoff), ", # Thin Cells = ", sum(matches < m), "\n", sep="")
if(mm < m & sdCutoff == optimStop){warning(paste("Optimization unsuccessful, ", sum(matches < m), " thin cells remain with SD cutoff of ", sdCutoff, "\n", sep=""))}
if(sdCutoff < optimStop){sdCutoff <- sdCutoff + optimStep}
}
}
# DA 9/10/14: changed result of scaleContinuous here to tmp from data so that the draws for the donors will not come from the scaled, but from the unscaled data.
tmp <- scaleContinuous(data, alldisc, sdx=1/sdCutoff)
numdata <- sapply(1:ncol(tmp), function(i)as.numeric(tmp[,i]))
R <- abs(cor(numdata, use="pairwise"))
diag(R) <- 0
max.emp.aff <- apply(R, 2, sum)[whichna[,2]] # new
max.the.aff <- rep(dim(R)[2] - 1, nrow(whichna)) # new
# DA 9/5/14: Commented out 2 lines below because I changed the looping mechanism for the affinity calculation
# aff <- t(apply(whichna, 1, function(x)affinity(numdata, x[1], x[2], R, weightedAffinity)))
# aff[which(!is.finite(aff), arr.ind=TRUE)] <- 0
# DA 9/5/14: changed the way affinity is calculated so no duplicates are calculated. This should speed
# up computation particularly when there are many observations missing on many variables. This also makes
# the weighted measure potentially much slower
unnaobs <- unique(whichna[,1])
if(!weightedAffinity){
aff <- t(sapply(unnaobs, function(x)affinity(numdata, x, weighted=FALSE)))
aff <- aff[match(whichna[,1], unnaobs), ]
}
if(weightedAffinity){
aff <- t(apply(whichna, 1, function(x)affinity(numdata, x[1], x[2], R, weightedAffinity)))
}
if(any(!is.finite(aff))){aff[which(!is.finite(aff), arr.ind=TRUE)] <- 0}
# DA 9/5/14: added the following 4 lines to ensure that only valid donors (i.e., those with observed values) have
# non-zero affinity scores.
wnadat <- matrix(1, nrow=nrow(data), ncol=ncol(data))
wnadat[which(is.na(data), arr.ind=TRUE)] <- 0
wnadat <- t(wnadat[, whichna[,2]])
aff <- aff*wnadat
if(method == "best.cell"){
w <- apply(aff, 1, function(x)which(x == max(x)))
donors <- lapply(1:nrow(whichna), function(x)na.omit(data[w[[x]], whichna[x,2]]))
matches <- sapply(donors, length)
if(any(matches < m)){warning(paste(sum(matches < m ), " of ", length(matches), " imputations with # donors < ", m, ", consider increasing sdCutoff or using method='p.draw'\n", sep=""))}
repl <- ifelse(matches < m, TRUE, FALSE)
draws <- lapply(1:length(donors), function(x)sample(donors[[x]], m, replace=repl[x]))
}
if(method == "p.draw"){
donors <- lapply(1:nrow(whichna), function(x)aggregate(aff[x, ], list(data[, whichna[x,2]]), mean, na.rm=TRUE))
draws <- lapply(1:length(donors), function(x)sample(donors[[x]][,1], m, replace=TRUE, prob=donors[[x]][,2]))
}
res <- vector(mode="list", length=m)
inp.D <- lapply(1:m, function(x)data)
for(md in 1:m){
for(i in 1:nrow(whichna)){
inp.D[[md]][whichna[i,1], whichna[i,2]] <- draws[[i]][md]
}
if(length(cont.miss) > 0 & impContinuous == "mice"){
mice.D <- mice(inp.D[[md]], m = 1, ...)
res[[md]] <- complete(mice.D)
}
else{
res[[md]] <- inp.D[[md]]
}
# DA 9/10/14: added three lines to put ID variables back in dataset
if(!is.null(IDvars)){
res[[md]] <- cbind(IDdata, res[[md]])
}
}
class(res) <- c("mi","list")
return(list(data = res, affinity = aff, donors = donors, draws = draws, max.emp.aff = max.emp.aff, max.the.aff = max.the.aff))
}
| /hot.deck/R/hot.deck.R | no_license | ingted/R-Examples | R | false | false | 8,075 | r | # DA 9/10/14: Added argument for IDvars (ID variables that are not to be used in imputation, but should remain in data)
hot.deck <-
function(data, m = 5, method=c("best.cell", "p.draw"), cutoff=10, sdCutoff=1, optimizeSD = FALSE,
optimStep = 0.1, optimStop = 5, weightedAffinity = FALSE, impContinuous = c("HD", "mice"),
IDvars = NULL, ...){
method <- match.arg(method)
impContinuous <- match.arg(impContinuous)
# DA 9/15/14 Added warning about weighted affinity calculations and correlations among or with categorical variables.
if(weightedAffinity){
warning("Affinity calculations made as a function of pearson correlations among variables coerced to class 'numeric'\ntake care when using this on categorical, especially nominal variables")
}
# DA 9/10/14: If IDvars is specified, remove them from the data and save in a different file.
if(!is.null(IDvars)){
IDdata <- data[, which(names(data) %in% IDvars), drop=FALSE]
data <- data[,-which(names(data) %in% IDvars), drop=FALSE]
# DA 9/10/14: Added code to remove any observations that are always missing
allNA <- apply(data, 1, function(x)all(is.na(x)))
if(any(allNA)){
IDdata <- IDdata[-which(allNA), , drop=FALSE]
data <- data[-which(allNA), , drop=FALSE]
}
}
else{
allNA <- apply(data, 1, function(x)all(is.na(x)))
if(any(allNA)){
data <- data[-which(allNA), , drop=FALSE]
}
}
if(any(allNA)){
warning(paste(sum(allNA), " observations with no observed data. These observations were removed\n", sep="") )
}
facs <- sapply(1:ncol(data), function(x)is.factor(data[,x]))
disc.miss <- which(is.discrete(data, cutoff) & apply(data, 2, function(x)any(is.na(x))))
alldisc <- is.discrete(data, cutoff)
allmiss <- which(is.na(data), arr.ind=TRUE)
cont.miss <- allmiss[-which(allmiss[,2] %in% disc.miss), ]
# DA 10/2/14: moved the warning here in response to Toby's problem where the error was getting tripped
# even though there was no continuous data. This goes a setp further and doesn't trip the warning unless
# there is any continuous data with missing observations.
if(impContinuous == "HD" & method == "p.draw" & length(cont.miss) > 0){
stop("Hot Deck imputation of continuous values can only be used with the best cell method\n")
}
whichna <- which(is.na(data), arr.ind=TRUE)
if(impContinuous == "mice"){
whichna <- whichna[which(whichna[,2] %in% disc.miss), ]
}
# DA 9/5/14: added condition any(!alldisc) so that optimization only happens when there are continuous variables on which to optimize
if(optimizeSD & any(!alldisc)){
mm <- 0
while(sdCutoff <= optimStop & mm < m){
tmp <- scaleContinuous(data, alldisc, sdx=1/sdCutoff)
numdata <- sapply(1:ncol(tmp), function(i)as.numeric(tmp[,i]))
R <- abs(cor(numdata, use="pairwise"))
diag(R) <- 0
# DA 9/5/14: Commented out 2 lines below because I changed the looping mechanism for the affinity calculation
# aff <- t(apply(whichna, 1, function(x)affinity(numdata, x[1], x[2], R, weightedAffinity)))
# aff[which(!is.finite(aff), arr.ind=TRUE)] <- 0
# DA 9/5/14: changed the way affinity is calculated so no duplicates are calculated. This should speed
# up computation particularly when there are many observations missing on many variables. This also makes
# the weighted measure potentially much slower
unnaobs <- unique(whichna[,1])
if(!weightedAffinity){
aff <- t(sapply(unnaobs, function(x)affinity(numdata, x, weighted=FALSE)))
aff <- aff[match(whichna[,1], unnaobs), ]
}
if(weightedAffinity){
aff <- t(apply(whichna, 1, function(x)affinity(numdata, x[1], x[2], R, weightedAffinity)))
}
if(any(!is.finite(aff))){aff[which(!is.finite(aff), arr.ind=TRUE)] <- 0}
# DA 9/5/14: added the following 4 lines to ensure that only valid donors (i.e., those with observed values) have
# non-zero affinity scores.
wnadat <- matrix(1, nrow=nrow(data), ncol=ncol(data))
wnadat[which(is.na(data), arr.ind=TRUE)] <- 0
wnadat <- t(wnadat[, whichna[,2]])
aff <- aff*wnadat
w <- apply(aff, 1, function(x)which(x == max(x)))
donors <- lapply(1:nrow(whichna), function(x)na.omit(data[w[[x]], whichna[x,2]]))
matches <- sapply(donors, length)
mm <- min(matches)
cat("SD Cutoff = ", sprintf("%.2f", sdCutoff), ", # Thin Cells = ", sum(matches < m), "\n", sep="")
if(mm < m & sdCutoff == optimStop){warning(paste("Optimization unsuccessful, ", sum(matches < m), " thin cells remain with SD cutoff of ", sdCutoff, "\n", sep=""))}
if(sdCutoff < optimStop){sdCutoff <- sdCutoff + optimStep}
}
}
# DA 9/10/14: changed result of scaleContinuous here to tmp from data so that the draws for the donors will not come from the scaled, but from the unscaled data.
tmp <- scaleContinuous(data, alldisc, sdx=1/sdCutoff)
numdata <- sapply(1:ncol(tmp), function(i)as.numeric(tmp[,i]))
R <- abs(cor(numdata, use="pairwise"))
diag(R) <- 0
max.emp.aff <- apply(R, 2, sum)[whichna[,2]] # new
max.the.aff <- rep(dim(R)[2] - 1, nrow(whichna)) # new
# DA 9/5/14: Commented out 2 lines below because I changed the looping mechanism for the affinity calculation
# aff <- t(apply(whichna, 1, function(x)affinity(numdata, x[1], x[2], R, weightedAffinity)))
# aff[which(!is.finite(aff), arr.ind=TRUE)] <- 0
# DA 9/5/14: changed the way affinity is calculated so no duplicates are calculated. This should speed
# up computation particularly when there are many observations missing on many variables. This also makes
# the weighted measure potentially much slower
unnaobs <- unique(whichna[,1])
if(!weightedAffinity){
aff <- t(sapply(unnaobs, function(x)affinity(numdata, x, weighted=FALSE)))
aff <- aff[match(whichna[,1], unnaobs), ]
}
if(weightedAffinity){
aff <- t(apply(whichna, 1, function(x)affinity(numdata, x[1], x[2], R, weightedAffinity)))
}
if(any(!is.finite(aff))){aff[which(!is.finite(aff), arr.ind=TRUE)] <- 0}
# DA 9/5/14: added the following 4 lines to ensure that only valid donors (i.e., those with observed values) have
# non-zero affinity scores.
wnadat <- matrix(1, nrow=nrow(data), ncol=ncol(data))
wnadat[which(is.na(data), arr.ind=TRUE)] <- 0
wnadat <- t(wnadat[, whichna[,2]])
aff <- aff*wnadat
if(method == "best.cell"){
w <- apply(aff, 1, function(x)which(x == max(x)))
donors <- lapply(1:nrow(whichna), function(x)na.omit(data[w[[x]], whichna[x,2]]))
matches <- sapply(donors, length)
if(any(matches < m)){warning(paste(sum(matches < m ), " of ", length(matches), " imputations with # donors < ", m, ", consider increasing sdCutoff or using method='p.draw'\n", sep=""))}
repl <- ifelse(matches < m, TRUE, FALSE)
draws <- lapply(1:length(donors), function(x)sample(donors[[x]], m, replace=repl[x]))
}
if(method == "p.draw"){
donors <- lapply(1:nrow(whichna), function(x)aggregate(aff[x, ], list(data[, whichna[x,2]]), mean, na.rm=TRUE))
draws <- lapply(1:length(donors), function(x)sample(donors[[x]][,1], m, replace=TRUE, prob=donors[[x]][,2]))
}
res <- vector(mode="list", length=m)
inp.D <- lapply(1:m, function(x)data)
for(md in 1:m){
for(i in 1:nrow(whichna)){
inp.D[[md]][whichna[i,1], whichna[i,2]] <- draws[[i]][md]
}
if(length(cont.miss) > 0 & impContinuous == "mice"){
mice.D <- mice(inp.D[[md]], m = 1, ...)
res[[md]] <- complete(mice.D)
}
else{
res[[md]] <- inp.D[[md]]
}
# DA 9/10/14: added three lines to put ID variables back in dataset
if(!is.null(IDvars)){
res[[md]] <- cbind(IDdata, res[[md]])
}
}
class(res) <- c("mi","list")
return(list(data = res, affinity = aff, donors = donors, draws = draws, max.emp.aff = max.emp.aff, max.the.aff = max.the.aff))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.data.frame.R
\name{get.data.frame}
\alias{get.data.frame}
\title{Get a BETS series as a data.frame.}
\usage{
get.data.frame(code, ts = NULL)
}
\arguments{
\item{code}{An \code{integer}. The unique identifier of the series within the BETS database.}
\item{ts}{An \code{ts} object. A time series to be formatted as a data.frame.}
}
\value{
A \code{data.frame}. The first column contains the dates. The second, its values.
}
\description{
By default, \code{\link{BETS.get}} returns a \code{\link[stats]{ts}} object. However, there are many situations in which is more convenient to work with a data.frame. So, \code{get.data.frame} receives the code of a BETS series and returns a \code{\link[base]{data.frame}} containing the data of the corresponding series. Alternatively, a \code{ts} can be supplied, in which case the BETS databases will not be searched.
}
\author{
Talitha Speranza \email{talitha.speranza@fgv.br}
}
| /man/get.data.frame.Rd | no_license | nmecsys/BETSlite | R | false | true | 1,024 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.data.frame.R
\name{get.data.frame}
\alias{get.data.frame}
\title{Get a BETS series as a data.frame.}
\usage{
get.data.frame(code, ts = NULL)
}
\arguments{
\item{code}{An \code{integer}. The unique identifier of the series within the BETS database.}
\item{ts}{An \code{ts} object. A time series to be formatted as a data.frame.}
}
\value{
A \code{data.frame}. The first column contains the dates. The second, its values.
}
\description{
By default, \code{\link{BETS.get}} returns a \code{\link[stats]{ts}} object. However, there are many situations in which is more convenient to work with a data.frame. So, \code{get.data.frame} receives the code of a BETS series and returns a \code{\link[base]{data.frame}} containing the data of the corresponding series. Alternatively, a \code{ts} can be supplied, in which case the BETS databases will not be searched.
}
\author{
Talitha Speranza \email{talitha.speranza@fgv.br}
}
|
## ----opts, eval = TRUE, echo = FALSE, message = FALSE--------------------
options(width = 60)
## ----simulating_data-----------------------------------------------------
# set a seed for reproducibility
set.seed(212)
n <- 5000
A0 <- rbinom(n, size = 1, p = 0.5)
L1 <- rnorm(n, mean = A0 + 1, sd = 1)
A1 <- rbinom(n, size = 1, p = plogis(-1 + L1 + A0))
Y <- rnorm(n, mean = 1 + A1 + 2 * L1, 1)
## ----fit_wrong_Reg-------------------------------------------------------
# fit a regression of Y ~ A1 + L1 + A0
fit <- glm(Y ~ A1 + L1 + A0)
# show results
fit
## ----get_coef, echo = FALSE, eval = TRUE---------------------------------
betas <- round(as.numeric(fit$coef),2)
## ----estimate_reg--------------------------------------------------------
# full data.frame
full_data <- data.frame(A0 = A0, L1 = L1, A1 = A1, Y = Y)
# subset data to observations with A0 = 1 & A1 = 1
data_11 <- subset(full_data, A0 == 1 & A1 == 1)
# fit regression of Y ~ L1
fit_11 <- glm(Y ~ L1, data = data_11)
fit_11
## ----get_coef11, echo = FALSE, eval = TRUE-------------------------------
betas11 <- round(as.numeric(fit_11$coef),2)
## ----estimate_reg2-------------------------------------------------------
# get predicted value for everyone
full_data$Q2n_11 <- predict(fit_11, newdata = full_data)
# subset data to observations with A0 = 1
data_1 <- subset(full_data, A0 == 1)
# fit regression
fit_1 <- glm(Q2n_11 ~ 1, data = data_1)
# intercept is estimate of E[Y(1,1)]
fit_1
## ----gcomp_ex------------------------------------------------------------
# subset data to observations with A0 = a0 & A1 = a1
# fit regression of Y ~ L1 in A0/A1 subset data
# get predicted value for everyone
# subset data to observations with A0 = a0
# fit intercept-only regression in A0 subset data
# intercept is estimate of E[Y(a0,a1)]
## ----gcomp_sol-----------------------------------------------------------
cfmean_gcomp <- function(a0, a1, full_data){
# subset data to observations with A0 = a0 & A1 = a1
data_a0a1 <- subset(full_data, A0 == a0 & A1 == a1)
# fit regression of Y ~ L1 in A0/A1 subset data
fit_a0a1 <- glm(Y ~ L1, data = data_a0a1)
# get predicted value for everyone
full_data$Q2n_a0a1 <- predict(fit_a0a1, newdata = full_data)
# subset data to observations with A0 = a0
data_a0 <- subset(full_data, A0 == a0)
# fit intercept-only regression in A0 subset data
fit_a0 <- glm(Q2n_a0a1 ~ 1, data = data_a0)
# intercept is estimate of E[Y(a0,a1)]
return(as.numeric(fit_a0$coefficients))
}
# evaluate the function
EY11_gcomp <- cfmean_gcomp(a0 = 1, a1 = 1, full_data)
EY10_gcomp <- cfmean_gcomp(a0 = 1, a1 = 0, full_data)
EY01_gcomp <- cfmean_gcomp(a0 = 0, a1 = 1, full_data)
EY00_gcomp <- cfmean_gcomp(a0 = 0, a1 = 0, full_data)
## ----gcomp_sol2----------------------------------------------------------
# should be ~ 6, 5, 4, 3
round(c(EY11_gcomp, EY10_gcomp, EY01_gcomp, EY00_gcomp), 2)
## ----iptw_comp_fn--------------------------------------------------------
cfmean_iptw <- function(a0, a1, full_data){
# subset data to observations with A0 = a0
data_a0 <- subset(full_data, A0 == a0)
# fit logistic regression of I(A1 = a1) ~ L1 in a0 subset
ps_a1 <- glm(I(A1 == a1) ~ L1, data = data_a0, family = binomial())
# get predicted value for everybody
full_data$phat_a1 <- predict(ps_a1, newdata = full_data,
type = 'response')
# fit regression of I(A0 = a0) ~ 1 in full_data
ps_a0 <- glm(I(A0 == a0) ~ 1, data = full_data, family = binomial())
# get predicted value for everybody
full_data$phat_a0 <- predict(ps_a0, newdata = full_data,
type = 'response')
# compute iptw estimator
EYa0a1 <- with(full_data, mean(
as.numeric(A0 == a0) * as.numeric(A1 == a1) / (phat_a0 * phat_a1) * Y
))
# intercept is estimate of E[Y(a0,a1)]
return(EYa0a1)
}
## ----iptw_comp-----------------------------------------------------------
# evaluate the function
EY11_iptw <- cfmean_iptw(a0 = 1, a1 = 1, full_data)
EY10_iptw <- cfmean_iptw(a0 = 1, a1 = 0, full_data)
EY01_iptw <- cfmean_iptw(a0 = 0, a1 = 1, full_data)
EY00_iptw <- cfmean_iptw(a0 = 0, a1 = 0, full_data)
# should be ~ 6,5,4,3
round(c(EY11_iptw, EY10_iptw, EY01_iptw, EY00_iptw),2)
## ----ltmle_data----------------------------------------------------------
# set seed for reproducibility & set sample size of 500
set.seed(212); n <- 500
# baseline variables
L0 <- data.frame(L01 = rnorm(n), L02 = rbinom(n, 1, 0.5))
# first treatment
gA0 <- plogis(0.2 * L0$L01 - 0.2 * L0$L02)
A0 <- rbinom(n = n, size = 1, prob = gA0)
# intermediate variable at time 1
L1 <- rnorm(n = n, mean = -A0 + L0$L01 - L0$L02, sd = 1)
# second treatment decision
gA1 <- plogis(0.2 * A0 - L1 + L0$L01)
A1 <- rbinom(n = n, size = 1, prob = gA1)
# intermediate variable at time 2
L2 <- rnorm(n = n, mean = -A0*A1 + 2*A1 - L0$L01 + L1, sd = 2)
# third treatment decision
gA2 <- plogis(A0 - A1 + 2*A0*A1 - L0$L01 + 0.2 * L1*L0$L02)
A2 <- rbinom(n = n, size = 1, prob = gA2)
# outcome
Y <- rnorm(n = n, mean = L0$L01 * L0$L02 * L2 - A0 - A1 - A2*A0*L2, sd = 2)
# put into a data frame
full_data <- data.frame(L0, A0 = A0, L1 = L1,
A1 = A1, L2 = L2, A2 = A2, Y = Y)
## ----head_data-----------------------------------------------------------
head(full_data)
## ----echo = FALSE, eval = TRUE-------------------------------------------
compute_truth <- function(n = 1e5, a0 = 1, a1 = 1, a2 = 1){
set.seed(212)
L0 <- data.frame(L01 = rnorm(n), L02 = rbinom(n, 1, 0.5))
A0 <- rep(a0, n)
L1 <- rnorm(n = n, mean = -A0 + L0$L01 - L0$L02, sd = 1)
A1 <- rep(a1, n)
L2 <- rnorm(n = n, mean = -A0*A1 + 2*A1 - L0$L01 + L1, sd = 2)
A2 <- rep(a2, n)
# outcome
Y <- rnorm(n = n, mean = L0$L01 * L0$L02 * L2 - A0 - A1 - A2*A0*L2, sd = 2)
# put into a data frame
return(mean(Y))
}
## ----load_drtmle, eval = TRUE, echo = FALSE, message = FALSE-------------
library(ltmle); library(SuperLearner)
## ----simple_call_to_ltmle, echo = TRUE, eval = TRUE, message = FALSE, warning = FALSE----
set.seed(123)
ltmle_fit1 <- ltmle(
data = full_data,
Anodes = c("A0", "A1", "A2"),
Lnodes = c("L01","L02","L1","L2"),
Ynodes = "Y",
SL.library = list(Q = c("SL.earth", "SL.glm", "SL.mean"),
g = c("SL.earth", "SL.glm", "SL.mean")),
stratify = FALSE, abar = list(treatment = c(1,1,1),
control = c(0,0,0))
)
## ----ltmle_sum-----------------------------------------------------------
summary(ltmle_fit1)
## ----look_at_sl_weights--------------------------------------------------
# weights for outcome regressions, because we set stratify = FALSE, the output in
# ltmle_fit1$fit$Q[[1]] is the same as in ltmle_fit1$fit$Q[[2]]
ltmle_fit1$fit$Q[[1]]
## ----look_at_sl_weights2-------------------------------------------------
# weights for propensity scores, because we set stratify = FALSE, the output in
# ltmle_fit1$fit$g[[1]] is the same as in ltmle_fit1$fit$g[[2]]
ltmle_fit1$fit$g[[1]]
## ----echo = FALSE--------------------------------------------------------
tmp <- summary(ltmle_fit1)
EY1 <- tmp$effect.measures$treatment$estimate
EY1_ci <- tmp$effect.measures$treatment$CI
EY0 <- tmp$effect.measures$control$estimate
EY0_ci <- tmp$effect.measures$control$CI
## ----echo = FALSE--------------------------------------------------------
w1 <- formatC(ltmle_fit1$fit$Q[[1]][[1]][,2], digits = 2, format = "f")
w2 <- formatC(ltmle_fit1$fit$Q[[1]][[2]][,2], digits = 2, format = "f")
w3 <- formatC(ltmle_fit1$fit$Q[[1]][[3]][,2], digits = 2, format = "f")
## ----ltmle_cens_data-----------------------------------------------------
set.seed(12)
# censoring prior to time 1 (1 = censored)
gC1 <- plogis(-2 + 0.05 * L0$L01)
C1 <- rbinom(n = n, size = 1, prob = gC1)
# censoring prior to time 2 (1 = censored)
gC2 <- plogis(-3 + 0.05 * A0 + 0.025 * L1 - 0.025 * L0$L02)
C2 <- rbinom(n = n, size = 1, prob = gC2)
# censoring prior to time 3 (1 = censored)
gC3 <- plogis(-3.5 + 0.05*A0*A1 - 0.025*L2 + 0.025 * L1)
C3 <- rbinom(n = n, size = 1, prob = gC3)
# make a cumulative indicator of censoring
anyC1 <- C1 == 1; anyC2 <- C1 == 1 | C2 == 1
anyC3 <- C1 == 1 | C2 == 1 | C3 == 1
# censored data set
cens_data <- data.frame(L0, A0 = A0,
C1 = BinaryToCensoring(is.censored = C1),
L1 = ifelse(anyC1, NA, L1), A1 = ifelse(anyC1, NA, A1),
C2 = BinaryToCensoring(is.censored = ifelse(anyC1, NA, C2)),
L2 = ifelse(anyC2, NA, L2), A2 = ifelse(anyC2, NA, A2),
C3 = BinaryToCensoring(is.censored = ifelse(anyC2, NA, C3)),
Y = ifelse(anyC3, NA, Y))
## ----look_ltmle_cens_data------------------------------------------------
head(cens_data, 9)
## ----simple_call_to_ltmle2, echo=TRUE, eval=TRUE, results='hide', message=FALSE, warning=FALSE----
set.seed(123)
ltmle_fit2 <- ltmle(
data = cens_data,
Anodes = c("A0", "A1", "A2"),
Lnodes = c("L01","L02","L1","L2"),
Cnodes = c("C1","C2","C3"),
Ynodes = "Y",
SL.library = list(Q = c("SL.earth", "SL.glm", "SL.mean"),
g = c("SL.earth", "SL.glm", "SL.mean")),
stratify = FALSE, abar = list(treatment = c(1,1,1),
control = c(0,0,0))
)
## ----ltmle_sum2----------------------------------------------------------
summary(ltmle_fit2)
## ----define_rule---------------------------------------------------------
rule1 <- function(pt_data){
# all patients start on control
A0 <- 0
# patients get treatment at time 1 if L1 > -1
# set patients with missing L1 to NA
if(!is.na(pt_data$L1)){
A1 <- ifelse(pt_data$L1 > -1, 1, 0)
}else{
A1 <- NA
}
# patients get treatment at time 2 if L2 > -1
# set patients with missing L2 to NA
if(!is.na(pt_data$L1)){
A2 <- ifelse(pt_data$L2 > -1, 1, 0)
}else{
A2 <- NA
}
return(c(A0,A1,A2))
}
## ----define_rule2--------------------------------------------------------
rule2 <- function(pt_data){
# all patients start on control
A0 <- 0
# and stay on control unless censored
A1 <- ifelse(is.na(pt_data$L1), NA, 0)
A2 <- ifelse(is.na(pt_data$L2), NA, 0)
return(c(A0,A1,A2))
}
## ----simple_call_to_ltmle3, echo=TRUE, eval=TRUE, results='hide', message=FALSE, warning=FALSE----
set.seed(123)
ltmle_fit3 <- ltmle(
data = cens_data,
Anodes = c("A0", "A1", "A2"),
Lnodes = c("L01","L02","L1","L2"),
Cnodes = c("C1","C2","C3"),
Ynodes = "Y", stratify = FALSE,
SL.library = list(Q = c("SL.earth", "SL.glm", "SL.mean"),
g = c("SL.earth", "SL.glm", "SL.mean")),
rule = list(treatment = rule1, control = rule2)
)
## ----summary_dr_ltmle----------------------------------------------------
summary(ltmle_fit3)
| /Lab3/Lab3_codeonly.R | permissive | benkeser/siscer2020 | R | false | false | 10,787 | r | ## ----opts, eval = TRUE, echo = FALSE, message = FALSE--------------------
options(width = 60)
## ----simulating_data-----------------------------------------------------
# set a seed for reproducibility
set.seed(212)
n <- 5000
A0 <- rbinom(n, size = 1, p = 0.5)
L1 <- rnorm(n, mean = A0 + 1, sd = 1)
A1 <- rbinom(n, size = 1, p = plogis(-1 + L1 + A0))
Y <- rnorm(n, mean = 1 + A1 + 2 * L1, 1)
## ----fit_wrong_Reg-------------------------------------------------------
# fit a regression of Y ~ A1 + L1 + A0
fit <- glm(Y ~ A1 + L1 + A0)
# show results
fit
## ----get_coef, echo = FALSE, eval = TRUE---------------------------------
betas <- round(as.numeric(fit$coef),2)
## ----estimate_reg--------------------------------------------------------
# full data.frame
full_data <- data.frame(A0 = A0, L1 = L1, A1 = A1, Y = Y)
# subset data to observations with A0 = 1 & A1 = 1
data_11 <- subset(full_data, A0 == 1 & A1 == 1)
# fit regression of Y ~ L1
fit_11 <- glm(Y ~ L1, data = data_11)
fit_11
## ----get_coef11, echo = FALSE, eval = TRUE-------------------------------
betas11 <- round(as.numeric(fit_11$coef),2)
## ----estimate_reg2-------------------------------------------------------
# get predicted value for everyone
full_data$Q2n_11 <- predict(fit_11, newdata = full_data)
# subset data to observations with A0 = 1
data_1 <- subset(full_data, A0 == 1)
# fit regression
fit_1 <- glm(Q2n_11 ~ 1, data = data_1)
# intercept is estimate of E[Y(1,1)]
fit_1
## ----gcomp_ex------------------------------------------------------------
# subset data to observations with A0 = a0 & A1 = a1
# fit regression of Y ~ L1 in A0/A1 subset data
# get predicted value for everyone
# subset data to observations with A0 = a0
# fit intercept-only regression in A0 subset data
# intercept is estimate of E[Y(a0,a1)]
## ----gcomp_sol-----------------------------------------------------------
cfmean_gcomp <- function(a0, a1, full_data){
# subset data to observations with A0 = a0 & A1 = a1
data_a0a1 <- subset(full_data, A0 == a0 & A1 == a1)
# fit regression of Y ~ L1 in A0/A1 subset data
fit_a0a1 <- glm(Y ~ L1, data = data_a0a1)
# get predicted value for everyone
full_data$Q2n_a0a1 <- predict(fit_a0a1, newdata = full_data)
# subset data to observations with A0 = a0
data_a0 <- subset(full_data, A0 == a0)
# fit intercept-only regression in A0 subset data
fit_a0 <- glm(Q2n_a0a1 ~ 1, data = data_a0)
# intercept is estimate of E[Y(a0,a1)]
return(as.numeric(fit_a0$coefficients))
}
# evaluate the function
EY11_gcomp <- cfmean_gcomp(a0 = 1, a1 = 1, full_data)
EY10_gcomp <- cfmean_gcomp(a0 = 1, a1 = 0, full_data)
EY01_gcomp <- cfmean_gcomp(a0 = 0, a1 = 1, full_data)
EY00_gcomp <- cfmean_gcomp(a0 = 0, a1 = 0, full_data)
## ----gcomp_sol2----------------------------------------------------------
# should be ~ 6, 5, 4, 3
round(c(EY11_gcomp, EY10_gcomp, EY01_gcomp, EY00_gcomp), 2)
## ----iptw_comp_fn--------------------------------------------------------
cfmean_iptw <- function(a0, a1, full_data){
# subset data to observations with A0 = a0
data_a0 <- subset(full_data, A0 == a0)
# fit logistic regression of I(A1 = a1) ~ L1 in a0 subset
ps_a1 <- glm(I(A1 == a1) ~ L1, data = data_a0, family = binomial())
# get predicted value for everybody
full_data$phat_a1 <- predict(ps_a1, newdata = full_data,
type = 'response')
# fit regression of I(A0 = a0) ~ 1 in full_data
ps_a0 <- glm(I(A0 == a0) ~ 1, data = full_data, family = binomial())
# get predicted value for everybody
full_data$phat_a0 <- predict(ps_a0, newdata = full_data,
type = 'response')
# compute iptw estimator
EYa0a1 <- with(full_data, mean(
as.numeric(A0 == a0) * as.numeric(A1 == a1) / (phat_a0 * phat_a1) * Y
))
# intercept is estimate of E[Y(a0,a1)]
return(EYa0a1)
}
## ----iptw_comp-----------------------------------------------------------
# evaluate the function
EY11_iptw <- cfmean_iptw(a0 = 1, a1 = 1, full_data)
EY10_iptw <- cfmean_iptw(a0 = 1, a1 = 0, full_data)
EY01_iptw <- cfmean_iptw(a0 = 0, a1 = 1, full_data)
EY00_iptw <- cfmean_iptw(a0 = 0, a1 = 0, full_data)
# should be ~ 6,5,4,3
round(c(EY11_iptw, EY10_iptw, EY01_iptw, EY00_iptw),2)
## ----ltmle_data----------------------------------------------------------
# set seed for reproducibility & set sample size of 500
set.seed(212); n <- 500
# baseline variables
L0 <- data.frame(L01 = rnorm(n), L02 = rbinom(n, 1, 0.5))
# first treatment
gA0 <- plogis(0.2 * L0$L01 - 0.2 * L0$L02)
A0 <- rbinom(n = n, size = 1, prob = gA0)
# intermediate variable at time 1
L1 <- rnorm(n = n, mean = -A0 + L0$L01 - L0$L02, sd = 1)
# second treatment decision
gA1 <- plogis(0.2 * A0 - L1 + L0$L01)
A1 <- rbinom(n = n, size = 1, prob = gA1)
# intermediate variable at time 2
L2 <- rnorm(n = n, mean = -A0*A1 + 2*A1 - L0$L01 + L1, sd = 2)
# third treatment decision
gA2 <- plogis(A0 - A1 + 2*A0*A1 - L0$L01 + 0.2 * L1*L0$L02)
A2 <- rbinom(n = n, size = 1, prob = gA2)
# outcome
Y <- rnorm(n = n, mean = L0$L01 * L0$L02 * L2 - A0 - A1 - A2*A0*L2, sd = 2)
# put into a data frame
full_data <- data.frame(L0, A0 = A0, L1 = L1,
A1 = A1, L2 = L2, A2 = A2, Y = Y)
## ----head_data-----------------------------------------------------------
head(full_data)
## ----echo = FALSE, eval = TRUE-------------------------------------------
compute_truth <- function(n = 1e5, a0 = 1, a1 = 1, a2 = 1){
set.seed(212)
L0 <- data.frame(L01 = rnorm(n), L02 = rbinom(n, 1, 0.5))
A0 <- rep(a0, n)
L1 <- rnorm(n = n, mean = -A0 + L0$L01 - L0$L02, sd = 1)
A1 <- rep(a1, n)
L2 <- rnorm(n = n, mean = -A0*A1 + 2*A1 - L0$L01 + L1, sd = 2)
A2 <- rep(a2, n)
# outcome
Y <- rnorm(n = n, mean = L0$L01 * L0$L02 * L2 - A0 - A1 - A2*A0*L2, sd = 2)
# put into a data frame
return(mean(Y))
}
## ----load_drtmle, eval = TRUE, echo = FALSE, message = FALSE-------------
library(ltmle); library(SuperLearner)
## ----simple_call_to_ltmle, echo = TRUE, eval = TRUE, message = FALSE, warning = FALSE----
set.seed(123)
ltmle_fit1 <- ltmle(
data = full_data,
Anodes = c("A0", "A1", "A2"),
Lnodes = c("L01","L02","L1","L2"),
Ynodes = "Y",
SL.library = list(Q = c("SL.earth", "SL.glm", "SL.mean"),
g = c("SL.earth", "SL.glm", "SL.mean")),
stratify = FALSE, abar = list(treatment = c(1,1,1),
control = c(0,0,0))
)
## ----ltmle_sum-----------------------------------------------------------
summary(ltmle_fit1)
## ----look_at_sl_weights--------------------------------------------------
# weights for outcome regressions, because we set stratify = FALSE, the output in
# ltmle_fit1$fit$Q[[1]] is the same as in ltmle_fit1$fit$Q[[2]]
ltmle_fit1$fit$Q[[1]]
## ----look_at_sl_weights2-------------------------------------------------
# weights for propensity scores, because we set stratify = FALSE, the output in
# ltmle_fit1$fit$g[[1]] is the same as in ltmle_fit1$fit$g[[2]]
ltmle_fit1$fit$g[[1]]
## ----echo = FALSE--------------------------------------------------------
tmp <- summary(ltmle_fit1)
EY1 <- tmp$effect.measures$treatment$estimate
EY1_ci <- tmp$effect.measures$treatment$CI
EY0 <- tmp$effect.measures$control$estimate
EY0_ci <- tmp$effect.measures$control$CI
## ----echo = FALSE--------------------------------------------------------
w1 <- formatC(ltmle_fit1$fit$Q[[1]][[1]][,2], digits = 2, format = "f")
w2 <- formatC(ltmle_fit1$fit$Q[[1]][[2]][,2], digits = 2, format = "f")
w3 <- formatC(ltmle_fit1$fit$Q[[1]][[3]][,2], digits = 2, format = "f")
## ----ltmle_cens_data-----------------------------------------------------
set.seed(12)
# censoring prior to time 1 (1 = censored)
gC1 <- plogis(-2 + 0.05 * L0$L01)
C1 <- rbinom(n = n, size = 1, prob = gC1)
# censoring prior to time 2 (1 = censored)
gC2 <- plogis(-3 + 0.05 * A0 + 0.025 * L1 - 0.025 * L0$L02)
C2 <- rbinom(n = n, size = 1, prob = gC2)
# censoring prior to time 3 (1 = censored)
gC3 <- plogis(-3.5 + 0.05*A0*A1 - 0.025*L2 + 0.025 * L1)
C3 <- rbinom(n = n, size = 1, prob = gC3)
# make a cumulative indicator of censoring
anyC1 <- C1 == 1; anyC2 <- C1 == 1 | C2 == 1
anyC3 <- C1 == 1 | C2 == 1 | C3 == 1
# censored data set
cens_data <- data.frame(L0, A0 = A0,
C1 = BinaryToCensoring(is.censored = C1),
L1 = ifelse(anyC1, NA, L1), A1 = ifelse(anyC1, NA, A1),
C2 = BinaryToCensoring(is.censored = ifelse(anyC1, NA, C2)),
L2 = ifelse(anyC2, NA, L2), A2 = ifelse(anyC2, NA, A2),
C3 = BinaryToCensoring(is.censored = ifelse(anyC2, NA, C3)),
Y = ifelse(anyC3, NA, Y))
## ----look_ltmle_cens_data------------------------------------------------
head(cens_data, 9)
## ----simple_call_to_ltmle2, echo=TRUE, eval=TRUE, results='hide', message=FALSE, warning=FALSE----
set.seed(123)
ltmle_fit2 <- ltmle(
data = cens_data,
Anodes = c("A0", "A1", "A2"),
Lnodes = c("L01","L02","L1","L2"),
Cnodes = c("C1","C2","C3"),
Ynodes = "Y",
SL.library = list(Q = c("SL.earth", "SL.glm", "SL.mean"),
g = c("SL.earth", "SL.glm", "SL.mean")),
stratify = FALSE, abar = list(treatment = c(1,1,1),
control = c(0,0,0))
)
## ----ltmle_sum2----------------------------------------------------------
summary(ltmle_fit2)
## ----define_rule---------------------------------------------------------
rule1 <- function(pt_data){
# all patients start on control
A0 <- 0
# patients get treatment at time 1 if L1 > -1
# set patients with missing L1 to NA
if(!is.na(pt_data$L1)){
A1 <- ifelse(pt_data$L1 > -1, 1, 0)
}else{
A1 <- NA
}
# patients get treatment at time 2 if L2 > -1
# set patients with missing L2 to NA
if(!is.na(pt_data$L1)){
A2 <- ifelse(pt_data$L2 > -1, 1, 0)
}else{
A2 <- NA
}
return(c(A0,A1,A2))
}
## ----define_rule2--------------------------------------------------------
rule2 <- function(pt_data){
# all patients start on control
A0 <- 0
# and stay on control unless censored
A1 <- ifelse(is.na(pt_data$L1), NA, 0)
A2 <- ifelse(is.na(pt_data$L2), NA, 0)
return(c(A0,A1,A2))
}
## ----simple_call_to_ltmle3, echo=TRUE, eval=TRUE, results='hide', message=FALSE, warning=FALSE----
set.seed(123)
ltmle_fit3 <- ltmle(
data = cens_data,
Anodes = c("A0", "A1", "A2"),
Lnodes = c("L01","L02","L1","L2"),
Cnodes = c("C1","C2","C3"),
Ynodes = "Y", stratify = FALSE,
SL.library = list(Q = c("SL.earth", "SL.glm", "SL.mean"),
g = c("SL.earth", "SL.glm", "SL.mean")),
rule = list(treatment = rule1, control = rule2)
)
## ----summary_dr_ltmle----------------------------------------------------
summary(ltmle_fit3)
|
library(xgboost)
library(data.table)
library(readr)
library(Matrix)
library(Rtsne)
library(ggplot2)
setwd("/media/branden/SSHD1/kaggle/bnp")
ts1Trans <-fread("./data_trans/ts2Trans_v29.csv")
xgbImpVars <- data.table(read_csv("./stack_models/xgb42Imp.csv"))
load("./data_trans/cvFoldsList.rda")
# varnames <- c(names(ts1Trans[filter==0, !colnames(ts1Trans) %in% c("ID","target","filter","dummy","pred0"), with=FALSE]))
varnames <- xgbImpVars$Feature[1:3000]
dtrain <- xgb.DMatrix(data=data.matrix(ts1Trans[filter==0, c(varnames),with=FALSE]),label=data.matrix(ts1Trans$target[ts1Trans$filter==0]))
param <- list(objective="binary:logistic",
eval_metric="logloss",
eta = .003,
max_depth=7,
min_child_weight=1,
subsample=.8,
colsample_bytree=.4,
nthread=13
)
set.seed(201513)
(tme <- Sys.time())
xgb48cv <- xgb.cv(data = dtrain,
params = param,
nrounds = 8000,
folds=cvFoldsList,
maximize=FALSE,
prediction=TRUE,
print.every.n = 50,
early.stop.round=400)
Sys.time() - tme
save(xgb48cv, file="./stack_models/xgb48cv.rda")
write.csv(data.frame(ID=ts1Trans[filter==0,"ID",with=FALSE], PredictedProb=xgb48cv$pred), "./stack_models/cvPreds/cvPreds_xgb48.csv", row.names=FALSE)
minLossRound <- which.min(xgb48cv$dt$test.logloss.mean)
rounds <- floor(minLossRound * 1.0)
## Create a model using the full dataset -- make predictions on test set for use in future stacking
set.seed(201512)
(tme <- Sys.time())
xgb48full <- xgb.train(data = dtrain,
params = param,
nrounds = rounds,
maximize=FALSE,
print.every.n = 20)
Sys.time() - tme
save(xgb48full, file="./stack_models/xgb48full.rda")
preds <- predict(xgb48full, data.matrix(ts1Trans[filter==2, c(varnames), with=FALSE]))
submission <- data.frame(ID=ts1Trans$ID[ts1Trans$filter==2], PredictedProb=preds)
write.csv(submission, "./stack_models/testPreds/testPreds_xgb48.csv", row.names=FALSE)
xgb48Imp <- xgb.importance(feature_names = colnames(ts1Trans[filter==0, c(varnames), with=FALSE]), model=xgb48full)
write.csv(xgb48Imp, "./stack_models/xgb48Imp.csv", row.names=FALSE)
| /bnp/stack_models/layer1_xgb48.R | no_license | brandenkmurray/kaggle | R | false | false | 2,367 | r | library(xgboost)
library(data.table)
library(readr)
library(Matrix)
library(Rtsne)
library(ggplot2)
setwd("/media/branden/SSHD1/kaggle/bnp")
ts1Trans <-fread("./data_trans/ts2Trans_v29.csv")
xgbImpVars <- data.table(read_csv("./stack_models/xgb42Imp.csv"))
load("./data_trans/cvFoldsList.rda")
# varnames <- c(names(ts1Trans[filter==0, !colnames(ts1Trans) %in% c("ID","target","filter","dummy","pred0"), with=FALSE]))
varnames <- xgbImpVars$Feature[1:3000]
dtrain <- xgb.DMatrix(data=data.matrix(ts1Trans[filter==0, c(varnames),with=FALSE]),label=data.matrix(ts1Trans$target[ts1Trans$filter==0]))
param <- list(objective="binary:logistic",
eval_metric="logloss",
eta = .003,
max_depth=7,
min_child_weight=1,
subsample=.8,
colsample_bytree=.4,
nthread=13
)
set.seed(201513)
(tme <- Sys.time())
xgb48cv <- xgb.cv(data = dtrain,
params = param,
nrounds = 8000,
folds=cvFoldsList,
maximize=FALSE,
prediction=TRUE,
print.every.n = 50,
early.stop.round=400)
Sys.time() - tme
save(xgb48cv, file="./stack_models/xgb48cv.rda")
write.csv(data.frame(ID=ts1Trans[filter==0,"ID",with=FALSE], PredictedProb=xgb48cv$pred), "./stack_models/cvPreds/cvPreds_xgb48.csv", row.names=FALSE)
minLossRound <- which.min(xgb48cv$dt$test.logloss.mean)
rounds <- floor(minLossRound * 1.0)
## Create a model using the full dataset -- make predictions on test set for use in future stacking
set.seed(201512)
(tme <- Sys.time())
xgb48full <- xgb.train(data = dtrain,
params = param,
nrounds = rounds,
maximize=FALSE,
print.every.n = 20)
Sys.time() - tme
save(xgb48full, file="./stack_models/xgb48full.rda")
preds <- predict(xgb48full, data.matrix(ts1Trans[filter==2, c(varnames), with=FALSE]))
submission <- data.frame(ID=ts1Trans$ID[ts1Trans$filter==2], PredictedProb=preds)
write.csv(submission, "./stack_models/testPreds/testPreds_xgb48.csv", row.names=FALSE)
xgb48Imp <- xgb.importance(feature_names = colnames(ts1Trans[filter==0, c(varnames), with=FALSE]), model=xgb48full)
write.csv(xgb48Imp, "./stack_models/xgb48Imp.csv", row.names=FALSE)
|
/Programs/Sources/Libraries/masp/Basics/Ft/draw.R | no_license | in-die-nibelungen/SelfLearning | R | false | false | 415 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tar_envvars.R
\name{tar_envvars}
\alias{tar_envvars}
\title{Show environment variables to customize \code{targets}}
\usage{
tar_envvars(unset = "")
}
\arguments{
\item{unset}{Character of length 1, value to return
for any environment variable that is not set.}
}
\value{
A data frame with one row per environment variable
and columns with the name and current value of each.
An unset environment variable will have a value of \code{""}
by default. (Customize with the \code{unset} argument).
}
\description{
You can customize the behavior of \code{targets}
with special environment variables. The sections in this help file
describe each environment variable, and the \code{tar_envvars()} function
lists their current values.
}
\details{
If you modify environment variables, please set them
in project-level \code{.Renviron} file so you do not lose your
configuration when you restart your R session.
Modify the project-level \code{.Renviron} file with
\code{usethis::edit_r_environ(scope = "project")}. Restart
your R session after you are done editing.
}
\section{TAR_ASK}{
The \code{TAR_ASK} environment variable accepts values \code{"true"} and \code{"false"}.
If \code{TAR_ASK} is not set, or if it is set to \code{"true"},
then \code{targets} asks permission in a menu
before overwriting certain files, such as the target script file
(default: \verb{_targets.R}) in \code{\link[=tar_script]{tar_script()}}.
If \code{TAR_ASK} is \code{"false"}, then \code{targets} overwrites the old files
with the new ones without asking. Once you are comfortable with
\code{\link[=tar_script]{tar_script()}}, \code{\link[=tar_github_actions]{tar_github_actions()}}, and similar functions,
you can safely set \code{TAR_ASK} to \code{"false"} in either a project-level
or user-level \code{.Renviron} file.
}
\section{TAR_CONFIG}{
The \code{TAR_CONFIG} environment variable controls the file path to the
optional YAML configuration file with project settings.
See the help file of \code{\link[=tar_config_set]{tar_config_set()}} for details.
}
\section{TAR_PROJECT}{
The \code{TAR_PROJECT} environment variable sets the name of project
to set and get settings when working with the YAML configuration file.
See the help file of \code{\link[=tar_config_set]{tar_config_set()}} for details.
}
\section{TAR_WARN}{
The \code{TAR_WARN} environment variable accepts values \code{"true"} and \code{"false"}.
If \code{TAR_WARN} is not set, or if it is set to \code{"true"},
then \code{targets} throws warnings in certain edge cases,
such as target/global name conflicts and dangerous use of
\code{devtools::load_all()}. If \code{TAR_WARN} is \code{"false"}, then \code{targets}
does not throw warnings in these cases.
These warnings can detect potentially serious
issues with your pipeline, so please do not set \code{TAR_WARN}
unless your use case absolutely requires it.
}
\examples{
tar_envvars()
}
\seealso{
Other configuration:
\code{\link{tar_config_get}()},
\code{\link{tar_config_set}()},
\code{\link{tar_config_unset}()},
\code{\link{tar_option_get}()},
\code{\link{tar_option_reset}()},
\code{\link{tar_option_set}()}
}
\concept{configuration}
| /man/tar_envvars.Rd | permissive | billdenney/targets | R | false | true | 3,221 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tar_envvars.R
\name{tar_envvars}
\alias{tar_envvars}
\title{Show environment variables to customize \code{targets}}
\usage{
tar_envvars(unset = "")
}
\arguments{
\item{unset}{Character of length 1, value to return
for any environment variable that is not set.}
}
\value{
A data frame with one row per environment variable
and columns with the name and current value of each.
An unset environment variable will have a value of \code{""}
by default. (Customize with the \code{unset} argument).
}
\description{
You can customize the behavior of \code{targets}
with special environment variables. The sections in this help file
describe each environment variable, and the \code{tar_envvars()} function
lists their current values.
}
\details{
If you modify environment variables, please set them
in project-level \code{.Renviron} file so you do not lose your
configuration when you restart your R session.
Modify the project-level \code{.Renviron} file with
\code{usethis::edit_r_environ(scope = "project")}. Restart
your R session after you are done editing.
}
\section{TAR_ASK}{
The \code{TAR_ASK} environment variable accepts values \code{"true"} and \code{"false"}.
If \code{TAR_ASK} is not set, or if it is set to \code{"true"},
then \code{targets} asks permission in a menu
before overwriting certain files, such as the target script file
(default: \verb{_targets.R}) in \code{\link[=tar_script]{tar_script()}}.
If \code{TAR_ASK} is \code{"false"}, then \code{targets} overwrites the old files
with the new ones without asking. Once you are comfortable with
\code{\link[=tar_script]{tar_script()}}, \code{\link[=tar_github_actions]{tar_github_actions()}}, and similar functions,
you can safely set \code{TAR_ASK} to \code{"false"} in either a project-level
or user-level \code{.Renviron} file.
}
\section{TAR_CONFIG}{
The \code{TAR_CONFIG} environment variable controls the file path to the
optional YAML configuration file with project settings.
See the help file of \code{\link[=tar_config_set]{tar_config_set()}} for details.
}
\section{TAR_PROJECT}{
The \code{TAR_PROJECT} environment variable sets the name of project
to set and get settings when working with the YAML configuration file.
See the help file of \code{\link[=tar_config_set]{tar_config_set()}} for details.
}
\section{TAR_WARN}{
The \code{TAR_WARN} environment variable accepts values \code{"true"} and \code{"false"}.
If \code{TAR_WARN} is not set, or if it is set to \code{"true"},
then \code{targets} throws warnings in certain edge cases,
such as target/global name conflicts and dangerous use of
\code{devtools::load_all()}. If \code{TAR_WARN} is \code{"false"}, then \code{targets}
does not throw warnings in these cases.
These warnings can detect potentially serious
issues with your pipeline, so please do not set \code{TAR_WARN}
unless your use case absolutely requires it.
}
\examples{
tar_envvars()
}
\seealso{
Other configuration:
\code{\link{tar_config_get}()},
\code{\link{tar_config_set}()},
\code{\link{tar_config_unset}()},
\code{\link{tar_option_get}()},
\code{\link{tar_option_reset}()},
\code{\link{tar_option_set}()}
}
\concept{configuration}
|
# NANCY: The remaining changes that need to be made: 3 graphs and the download section.
#An array_list called reactions_soc, similar to reactions_pt, needs to be created in elasticsearch.
#all of the functions that interact with the api are within utilities.R
#I have already written the code that uses it.
#finally, you might encounter a problem with escaping characters, lucene query strings don't seem to like a long list of these (they are documented in the function remove_spaces within utilities.R)
#Thank you and good luck with the rest of your time here! - James
library(dbplyr)
library(Hmisc)
library(magrittr)
library(utils)
#library(zoo)
library(pool)
# data visualizations
library(plotly)
library(ggplot2)
library(googleVis)
# Shiny libraries
library(shiny)
library(shinydashboard)
library(shinycssloaders)
library(shinyBS)
library(shinyWidgets)
library(DT)
library(dplyr)
library(tidyr)
library(lubridate)
library(RPostgreSQL)
library(httr)
library(jsonlite)
library(elastic)
#library(rCharts)
source("common_ui.R")
source("linechart.R")
source("pieTableUtil.R")
source("barTableUtil.R")
source("utilities.R")
options(shiny.trace=FALSE)
# -----------------------------------------------------------------------------
#The api key isn't necessary right now, but if in the future it is created place here (include full syntax, for example: '&key=a78asdfkad78')
#connect to elastic gate for meddra data:
connect(es_host = "elastic-gate.hc.local", es_port = 80,errors = "complete")
#auto list for brands
topbrands<-'{
"aggs": {"brandname": {"nested": {"path": "report_drug_detail"},
"aggs": {"brand": {"terms": {
"field": "report_drug_detail.drugname.keyword",
"size": 1000000
}}}}}}'
topbrands <- Search(index='drug_event',body=topbrands,size=0,raw=T)%>%fromJSON()
topbrands<-topbrands$aggregations$brandname$brand$buckets$key%>%sort()
#autolist for ingredients
topings_cv<-'{
"aggs": {"ing": {"nested": {"path": "report_drug_detail"},
"aggs": {"ing": {"terms": {
"field": "report_drug_detail.ingredients.keyword",
"size": 1000000
}}}}}}'
topings_cv <- Search(index='drug_event',body=topings_cv,size=0,raw=T) %>% fromJSON()
topings_cv<-topings_cv$aggregations$ing$ing$buckets$key%>%sort()
#auto lists for both soc and pt (right now there is no soc in elastic - Dan needs to add before soc_choices works)
soc_choices<-'{
"aggs": {"soc": {"terms": {
"field": "reaction_soc.keyword",
"size": 100000
}}}}'
soc_choices<-Search(index='drug_event',body=soc_choices,size=0,raw=T) %>% fromJSON()
soc_choices<-soc_choices$aggregations$soc$buckets$key%>%sort()
smq_body<-'{ "aggs" : {"smq_term" : {
"terms" : {
"field" : "smq_name.keyword",
"size":10000
}}}}'
smq_list<-Search(index='meddra_pt',body=smq_body,size=0)$aggregations$smq_term$buckets
smq<-sapply(smq_list,'[[',1)%>%sort()
pt<-'{
"aggs": {"pt": {"terms": {
"field": "reaction_pt.keyword",
"size": 100000
}}}}'
pt<-Search(index='drug_event',body=pt,size=0,raw=T) %>% fromJSON()
pt<-pt$aggregations$pt$buckets$key%>%sort()
pt_choices<-c(pt,smq)
#from elasticsearch, take the maxiumn receivedate from all reports:
body_date<-'{
"aggs" : {
"max_date" : { "max" : { "field" : "datreceived" } }
}
}'
max_date_res<-Search(index='drug_event',body=body_date,size=0)$aggregations$max_date[[2]]
max_date<-as.Date(max_date_res,format='%Y-%m-%d')
body_med<-'{
"aggs" : {
"max_version" : { "max" : { "field" : "meddra_version" } }
}
}'
max_version<-Search(index='meddra_pt',body=body_med,size=0)$aggregations$max_version[[1]]
max_meddra<-paste0('v.',max_version)
| /apps/CVShiny_elastic/global.R | no_license | hres/cvapps | R | false | false | 3,754 | r | # NANCY: The remaining changes that need to be made: 3 graphs and the download section.
#An array_list called reactions_soc, similar to reactions_pt, needs to be created in elasticsearch.
#all of the functions that interact with the api are within utilities.R
#I have already written the code that uses it.
#finally, you might encounter a problem with escaping characters, lucene query strings don't seem to like a long list of these (they are documented in the function remove_spaces within utilities.R)
#Thank you and good luck with the rest of your time here! - James
library(dbplyr)
library(Hmisc)
library(magrittr)
library(utils)
#library(zoo)
library(pool)
# data visualizations
library(plotly)
library(ggplot2)
library(googleVis)
# Shiny libraries
library(shiny)
library(shinydashboard)
library(shinycssloaders)
library(shinyBS)
library(shinyWidgets)
library(DT)
library(dplyr)
library(tidyr)
library(lubridate)
library(RPostgreSQL)
library(httr)
library(jsonlite)
library(elastic)
#library(rCharts)
source("common_ui.R")
source("linechart.R")
source("pieTableUtil.R")
source("barTableUtil.R")
source("utilities.R")
options(shiny.trace=FALSE)
# -----------------------------------------------------------------------------
#The api key isn't necessary right now, but if in the future it is created place here (include full syntax, for example: '&key=a78asdfkad78')
#connect to elastic gate for meddra data:
connect(es_host = "elastic-gate.hc.local", es_port = 80,errors = "complete")
#auto list for brands
topbrands<-'{
"aggs": {"brandname": {"nested": {"path": "report_drug_detail"},
"aggs": {"brand": {"terms": {
"field": "report_drug_detail.drugname.keyword",
"size": 1000000
}}}}}}'
topbrands <- Search(index='drug_event',body=topbrands,size=0,raw=T)%>%fromJSON()
topbrands<-topbrands$aggregations$brandname$brand$buckets$key%>%sort()
#autolist for ingredients
topings_cv<-'{
"aggs": {"ing": {"nested": {"path": "report_drug_detail"},
"aggs": {"ing": {"terms": {
"field": "report_drug_detail.ingredients.keyword",
"size": 1000000
}}}}}}'
topings_cv <- Search(index='drug_event',body=topings_cv,size=0,raw=T) %>% fromJSON()
topings_cv<-topings_cv$aggregations$ing$ing$buckets$key%>%sort()
#auto lists for both soc and pt (right now there is no soc in elastic - Dan needs to add before soc_choices works)
soc_choices<-'{
"aggs": {"soc": {"terms": {
"field": "reaction_soc.keyword",
"size": 100000
}}}}'
soc_choices<-Search(index='drug_event',body=soc_choices,size=0,raw=T) %>% fromJSON()
soc_choices<-soc_choices$aggregations$soc$buckets$key%>%sort()
smq_body<-'{ "aggs" : {"smq_term" : {
"terms" : {
"field" : "smq_name.keyword",
"size":10000
}}}}'
smq_list<-Search(index='meddra_pt',body=smq_body,size=0)$aggregations$smq_term$buckets
smq<-sapply(smq_list,'[[',1)%>%sort()
pt<-'{
"aggs": {"pt": {"terms": {
"field": "reaction_pt.keyword",
"size": 100000
}}}}'
pt<-Search(index='drug_event',body=pt,size=0,raw=T) %>% fromJSON()
pt<-pt$aggregations$pt$buckets$key%>%sort()
pt_choices<-c(pt,smq)
#from elasticsearch, take the maxiumn receivedate from all reports:
body_date<-'{
"aggs" : {
"max_date" : { "max" : { "field" : "datreceived" } }
}
}'
max_date_res<-Search(index='drug_event',body=body_date,size=0)$aggregations$max_date[[2]]
max_date<-as.Date(max_date_res,format='%Y-%m-%d')
body_med<-'{
"aggs" : {
"max_version" : { "max" : { "field" : "meddra_version" } }
}
}'
max_version<-Search(index='meddra_pt',body=body_med,size=0)$aggregations$max_version[[1]]
max_meddra<-paste0('v.',max_version)
|
source("RandomForestRegressionUtils.R")
library(ggplot2)
library(caret)
randomForestRegressionDataset = importRandomForestRegressionDataset("RandomForestRegression_Position_Salaries.csv")
#reading RandomForestRegression model
randomForestRegressionModel = readRDS("randomForestRegressionModel.RDS")
#visualizing RandomForestRegression result for higher resolution and smother curve
visualisingRandomForestRegressionResultForHigherResolution <- function(randomForestRegressionDataset, randomForestRegressionModel){
x_grid = seq(min(randomForestRegressionDataset$Level), max(randomForestRegressionDataset$Level), 0.01)
ggplot() +
geom_point(aes(x = randomForestRegressionDataset$Level, y = randomForestRegressionDataset$Salary),
colour = 'red') +
geom_line(aes(x = x_grid, y = predict.train(randomForestRegressionModel, newdata = data.frame(Level = x_grid))),
colour = 'blue') +
ggtitle('Truth or Bluff (Random Forest Regression)') +
xlab('Level') +
ylab('Salary')
ggsave("RandomForestRegressionResultForHigherResolution.png")
}
visualisingRandomForestRegressionResultForHigherResolution(randomForestRegressionDataset, randomForestRegressionModel) | /RandomForestRegressionVisualization.R | no_license | Santosh-Sah/Random_Forest_Regression_R | R | false | false | 1,217 | r | source("RandomForestRegressionUtils.R")
library(ggplot2)
library(caret)
randomForestRegressionDataset = importRandomForestRegressionDataset("RandomForestRegression_Position_Salaries.csv")
#reading RandomForestRegression model
randomForestRegressionModel = readRDS("randomForestRegressionModel.RDS")
#visualizing RandomForestRegression result for higher resolution and smother curve
visualisingRandomForestRegressionResultForHigherResolution <- function(randomForestRegressionDataset, randomForestRegressionModel){
x_grid = seq(min(randomForestRegressionDataset$Level), max(randomForestRegressionDataset$Level), 0.01)
ggplot() +
geom_point(aes(x = randomForestRegressionDataset$Level, y = randomForestRegressionDataset$Salary),
colour = 'red') +
geom_line(aes(x = x_grid, y = predict.train(randomForestRegressionModel, newdata = data.frame(Level = x_grid))),
colour = 'blue') +
ggtitle('Truth or Bluff (Random Forest Regression)') +
xlab('Level') +
ylab('Salary')
ggsave("RandomForestRegressionResultForHigherResolution.png")
}
visualisingRandomForestRegressionResultForHigherResolution(randomForestRegressionDataset, randomForestRegressionModel) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_types.R
\name{count_distinct_value}
\alias{count_distinct_value}
\title{get a variables' distinct values matrix}
\usage{
count_distinct_value(df, var.list = names(df))
}
\arguments{
\item{df}{the dataframe}
\item{var.list}{the variables to be show,default: all variables}
}
\description{
get a variables' distinct values matrix. this function is typically used to determine
those discrete variables whose distinct values' number is more than a threshold
}
\examples{
count_distinct_value(p2p)
}
| /my packages/s.PP/man/count_distinct_value.Rd | no_license | fangju2013/RProj | R | false | true | 581 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_types.R
\name{count_distinct_value}
\alias{count_distinct_value}
\title{get a variables' distinct values matrix}
\usage{
count_distinct_value(df, var.list = names(df))
}
\arguments{
\item{df}{the dataframe}
\item{var.list}{the variables to be show,default: all variables}
}
\description{
get a variables' distinct values matrix. this function is typically used to determine
those discrete variables whose distinct values' number is more than a threshold
}
\examples{
count_distinct_value(p2p)
}
|
# add_p.tbl_summary ------------------------------------------------------------
add_p_test_t.test <- function(data, variable, by, test.args, conf.level = 0.95, ...) {
.superfluous_args(variable, ...)
expr(stats::t.test(!!rlang::sym(variable) ~ as.factor(!!rlang::sym(by)),
data = !!data, conf.level = !!conf.level, !!!test.args
)) %>%
eval() %>%
broom::tidy()
}
add_p_test_aov <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
p.value <-
rlang::expr(stats::aov(!!rlang::sym(variable) ~ as.factor(!!rlang::sym(by)), data = !!data)) %>%
eval() %>%
summary() %>%
pluck(1, "Pr(>F)", 1)
tibble::tibble(p.value = p.value, method = "One-way ANOVA")
}
add_p_test_kruskal.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
stats::kruskal.test(data[[variable]], as.factor(data[[by]])) %>%
broom::tidy()
}
add_p_test_wilcox.test <- function(data, variable, by, test.args, ...) {
.superfluous_args(variable, ...)
expr(stats::wilcox.test(as.numeric(!!rlang::sym(variable)) ~ as.factor(!!rlang::sym(by)),
data = !!data, !!!test.args
)) %>%
eval() %>%
broom::tidy() %>%
mutate(
method = case_when(
.data$method == "Wilcoxon rank sum test with continuity correction" ~ "Wilcoxon rank sum test",
TRUE ~ .data$method
)
)
}
add_p_test_chisq.test <- function(data, variable, by, test.args, ...) {
.superfluous_args(variable, ...)
expr(stats::chisq.test(x = !!data[[variable]], y = as.factor(!!data[[by]]), !!!test.args)) %>%
eval() %>%
broom::tidy() %>%
mutate(
method = case_when(
.data$method == "Pearson's Chi-squared test with Yates' continuity correction" ~ "Pearson's Chi-squared test",
TRUE ~ .data$method
)
)
}
add_p_test_chisq.test.no.correct <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
add_p_test_chisq.test(data = data, variable = variable, by = by, test.args = list(correct = FALSE))
}
add_p_test_fisher.test <- function(data, variable, by, test.args, conf.level = 0.95, ...) {
.superfluous_args(variable, ...)
expr(stats::fisher.test(!!data[[variable]], as.factor(!!data[[by]]), conf.level = !!conf.level, !!!test.args)) %>%
eval() %>%
broom::tidy() %>%
mutate(
method = case_when(
.data$method == "Fisher's Exact Test for Count Data" ~ "Fisher's exact test",
TRUE ~ .data$method
)
)
}
add_p_test_mcnemar.test <- function(data, variable, by, test.args = NULL, ...) {
.superfluous_args(variable, ...)
rlang::expr(stats::mcnemar.test(data[[variable]], data[[by]], !!!test.args)) %>%
eval() %>%
broom::tidy() %>%
mutate(
method = case_when(
.data$method == "McNemar's Chi-squared test with continuity correction" ~ "McNemar's Chi-squared test",
TRUE ~ .data$method
)
)
}
add_p_test_lme4 <- function(data, variable, by, group, type, ...) {
.superfluous_args(variable, ...)
assert_package("lme4", "add_p(test = variable ~ 'lme4')")
if (is.null(group)) {
glue(
"Error in 'lme4' test for variable '{variable}'. ",
"`add_p(group=)` cannot by NULL"
) %>%
stop(call. = FALSE)
}
data <-
select(data, variable, by, group) %>%
filter(stats::complete.cases(.))
# creating formulas for base model (without variable) and full model
formula0 <- paste0("as.factor(`", by, "`) ~ 1 + (1 | `", group, "`)")
if (type %in% c("continuous", "continuous2")) {
formula1 <- paste0("as.factor(`", by, "`) ~ `", variable, "` + (1 | `", group, "`)")
} else {
formula1 <- paste0("as.factor(`", by, "`) ~ as.factor(`", variable, "`) + (1 | `", group, "`)")
}
# building base and full models
mod0 <- lme4::glmer(stats::as.formula(formula0),
data = data, family = stats::binomial
)
mod1 <- lme4::glmer(stats::as.formula(formula1),
data = data, family = stats::binomial
)
# returning p-value
p.value <- stats::anova(mod0, mod1)$"Pr(>Chisq)"[2]
tibble::tibble(p.value = p.value, method = "random intercept logistic regression")
}
add_p_tbl_summary_paired.t.test <- function(data, variable, by, group,
test.args = NULL, conf.level = 0.95, ...) {
quiet <- FALSE # need to add support for quiet later
.superfluous_args(variable, ...)
# checking inputs
if (length(data[[by]] %>% stats::na.omit() %>% unique()) != 2) {
stop("`by=` must have exactly 2 levels", call. = FALSE)
}
if (dplyr::group_by_at(data, c(by, group)) %>% dplyr::count(name = "..n..") %>%
pull(.data$..n..) %>% max(na.rm = TRUE) > 1) {
stop("'{variable}': There may only be one observation per `group=` per `by=` level.", call. = FALSE)
}
# reshaping data
data_wide <-
tidyr::pivot_wider(data,
id_cols = all_of(group),
names_from = all_of(by),
values_from = all_of(variable)
)
# message about missing data
if (quiet && any(is.na(data_wide[[2]]) + is.na(data_wide[[3]]) == 1)) {
glue(
"Note for variable '{variable}': Some observations included in the ",
"calculation of summary statistics ",
"were omitted from the p-value calculation due to unbalanced missingness ",
"within group."
) %>%
rlang::inform()
}
# calculate p-value
expr(stats::t.test(data_wide[[2]], data_wide[[3]],
paired = TRUE,
conf.level = !!conf.level, !!!test.args
)) %>%
eval() %>%
broom::tidy()
}
add_p_tbl_summary_paired.wilcox.test <- function(data, variable, by, group,
test.args = NULL, conf.level = 0.95,
quiet = FALSE, ...) {
.superfluous_args(variable, ...)
# checking inputs
if (length(data[[by]] %>% stats::na.omit() %>% unique()) != 2) {
stop("`by=` must have exactly 2 levels", call. = FALSE)
}
if (dplyr::group_by_at(data, c(by, group)) %>% dplyr::count(name = "..n..") %>%
pull(.data$..n..) %>% max(na.rm = TRUE) > 1) {
stop("'{variable}': There may only be one observation per `group=` per `by=` level.", call. = FALSE)
}
# reshaping data
data_wide <-
tidyr::pivot_wider(data,
id_cols = all_of(group),
names_from = all_of(by),
values_from = all_of(variable)
)
# message about missing data
if (quiet && any(is.na(data_wide[[2]]) + is.na(data_wide[[3]]) == 1)) {
glue(
"Note for variable '{variable}': Some observations included in the ",
"calculation of summary statistics ",
"were omitted from the p-value calculation due to unbalanced missingness ",
"within group."
) %>%
rlang::inform()
}
# calculate p-value
expr(stats::wilcox.test(as.numeric(data_wide[[2]]), as.numeric(data_wide[[3]]),
paired = TRUE, !!!test.args)) %>%
eval() %>%
broom::tidy()
}
add_p_test_prop.test <- function(tbl, variable, test.args = NULL, conf.level = 0.95, ...) {
.superfluous_args(variable, ...)
df_counts <-
tbl$meta_data %>%
filter(variable == .env$variable) %>%
purrr::pluck("df_stats", 1)
expr(stats::prop.test(df_counts$n, df_counts$N, conf.level = !!conf.level, !!!test.args)) %>%
eval() %>%
broom::tidy() %>%
mutate(estimate = .data$estimate1 - .data$estimate2) %>%
mutate(
method = case_when(
.data$method == "2-sample test for equality of proportions with continuity correction" ~
"Two sample test for equality of proportions",
TRUE ~ .data$method
)
)
}
add_p_test_ancova <- function(data, variable, by, conf.level = 0.95, adj.vars = NULL, ...) {
.superfluous_args(variable, ...)
# reverse coding the 'by' variable
data[[by]] <-
switch(!is.factor(data[[by]]),
forcats::fct_rev(factor(data[[by]]))
) %||%
forcats::fct_rev(data[[by]])
# assembling formula
rhs <- c(by, adj.vars) %>%
chr_w_backtick() %>%
paste(collapse = " + ")
f <- stringr::str_glue("{chr_w_backtick(variable)} ~ {rhs}") %>% as.formula()
# building model
stats::lm(formula = f, data = data) %>%
broom.helpers::tidy_and_attach(conf.int = TRUE, conf.level = conf.level) %>%
broom.helpers::tidy_remove_intercept() %>%
dplyr::filter(.data$variable %in% .env$by) %>%
select(
.data$estimate, .data$std.error, .data$statistic,
.data$conf.low, .data$conf.high, .data$p.value
) %>%
dplyr::mutate(
method = case_when(
is.null(adj.vars) ~ "One-way ANOVA",
TRUE ~ "ANCOVA"
)
)
}
add_p_test_ancova_lme4 <- function(data, variable, by, group, conf.level = 0.95, adj.vars = NULL, ...) {
assert_package("lme4")
assert_package("broom.mixed")
.superfluous_args(variable, ...)
# reverse coding the 'by' variable
data[[by]] <-
switch(!is.factor(data[[by]]),
forcats::fct_rev(factor(data[[by]]))
) %||%
forcats::fct_rev(data[[by]])
# assembling formula
rhs <- c(by, adj.vars) %>%
chr_w_backtick() %>%
paste(collapse = " + ")
f <- stringr::str_glue("{chr_w_backtick(variable)} ~ {rhs} + (1|{chr_w_backtick(group)})") %>% as.formula()
# building model
lme4::lmer(formula = f, data = data) %>%
broom.helpers::tidy_and_attach(
conf.int = TRUE,
conf.level = conf.level,
tidy_fun = broom.mixed::tidy
) %>%
broom.helpers::tidy_remove_intercept() %>%
dplyr::filter(.data$variable %in% .env$by) %>%
select(any_of(c("estimate", "std.error", "statistic", "conf.low", "conf.high", "p.value"))) %>%
dplyr::mutate(
method = case_when(
is.null(adj.vars) ~ "One-way ANOVA with random intercept",
TRUE ~ "ANCOVA with random intercept"
)
)
}
add_p_test_cohens_d <- function(data, variable, by, conf.level = 0.95, test.args = NULL, ...) {
assert_package("effectsize")
.superfluous_args(variable, ...)
f <- stringr::str_glue("{chr_w_backtick(variable)} ~ {chr_w_backtick(by)}") %>% as.formula()
rlang::expr(effectsize::cohens_d(x = !!f, data = !!data, ci = !!conf.level, !!!test.args)) %>%
eval() %>%
tibble::as_tibble() %>%
select(estimate = .data$Cohens_d, conf.low = .data$CI_low, conf.high = .data$CI_high) %>%
dplyr::mutate(method = "Cohen's D")
}
# add_p.tbl_svysummary ---------------------------------------------------------
add_p_test_svy.chisq.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svychisq(c_form(right = c(variable, by)), data, statistic = "F") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "chi-squared test with Rao & Scott's second-order correction")
}
add_p_test_svy.adj.chisq.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svychisq(c_form(right = c(variable, by)), data, statistic = "Chisq") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "chi-squared test adjusted by a design effect estimate")
}
add_p_test_svy.wald.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svychisq(c_form(right = c(variable, by)), data, statistic = "Wald") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "Wald test of independence for complex survey samples")
}
add_p_test_svy.adj.wald.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svychisq(c_form(right = c(variable, by)), data, statistic = "adjWald") %>%
{
suppressMessages(broom::tidy(.))
} %>%
dplyr::mutate_at(vars(.data$statistic, .data$p.value), as.numeric) %>%
# default saves these cols as a matrix
mutate(method = "adjusted Wald test of independence for complex survey samples")
}
add_p_test_svy.lincom.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svychisq(c_form(right = c(variable, by)), data, statistic = "lincom") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "test of independence using the exact asymptotic distribution for complex survey samples")
}
add_p_test_svy.saddlepoint.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svychisq(c_form(right = c(variable, by)), data, statistic = "saddlepoint") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "test of independence using a saddlepoint approximation for complex survey samples")
}
add_p_test_svy.t.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svyttest(c_form(variable, by), data) %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "t-test adapted to complex survey samples")
}
add_p_test_svy.wilcox.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svyranktest(c_form(variable, by), data, test = "wilcoxon") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "Wilcoxon rank-sum test for complex survey samples")
}
add_p_test_svy.kruskal.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svyranktest(c_form(variable, by), data, test = "KruskalWallis") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "Kruskal-Wallis rank-sum test for complex survey samples")
}
add_p_test_svy.vanderwaerden.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svyranktest(c_form(variable, by), data, test = "vanderWaerden") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "van der Waerden's normal-scores test for complex survey samples")
}
add_p_test_svy.median.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svyranktest(c_form(variable, by), data, test = "median") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "Mood's test for the median for complex survey samples")
}
# add_p.tbl_survfit ------------------------------------------------------------
# returns a list of the formula and data arg calls
extract_formula_data_call <- function(x) {
# extracting survfit call
survfit_call <- x$call %>% as.list()
# index of formula and data
call_index <- names(survfit_call) %in% c("formula", "data") %>% which()
survfit_call[call_index]
}
add_p_tbl_survfit_survdiff <- function(data, variable, test.args, ...) {
.superfluous_args(variable, ...)
# formula and data calls
formula_data_call <- extract_formula_data_call(data)
# converting call into a survdiff call
survdiff_call <- rlang::call2(rlang::expr(survival::survdiff), !!!formula_data_call, !!!test.args)
# evaluating `survdiff()`
survdiff_result <- rlang::eval_tidy(survdiff_call)
# returning p-value
broom::glance(survdiff_result) %>%
dplyr::mutate(
method =
switch(is.null(test.args$rho) || test.args$rho == 0,
"Log-rank test"
) %||%
switch(test.args$rho == 1,
"Peto & Peto modification of Gehan-Wilcoxon test"
) %||%
stringr::str_glue("G-rho (\U03C1 = {test.args$rho}) test")
)
}
add_p_tbl_survfit_logrank <- function(data, variable, ...) {
.superfluous_args(variable, ...)
add_p_tbl_survfit_survdiff(data, test.args = list(rho = 0))
}
add_p_tbl_survfit_petopeto_gehanwilcoxon <- function(data, variable, ...) {
.superfluous_args(variable, ...)
add_p_tbl_survfit_survdiff(data, test.args = list(rho = 1))
}
add_p_tbl_survfit_coxph <- function(data, variable, test_type, test.args, ...) {
.superfluous_args(variable, ...)
# formula and data calls
formula_data_call <- extract_formula_data_call(data)
# converting call into a survdiff call
coxph_call <- rlang::call2(rlang::expr(survival::coxph), !!!formula_data_call, !!!test.args)
# evaluating `coxph()`
coxph_result <- rlang::eval_tidy(coxph_call)
# returning p-value
method <- switch(test_type,
"log" = "Cox regression (LRT)",
"wald" = "Cox regression (Wald)",
"sc" = "Cox regression (Score)"
)
broom::glance(coxph_result) %>%
select(all_of(paste0(c("statistic.", "p.value."), test_type))) %>%
set_names(c("statistic", "p.value")) %>%
mutate(method = method)
}
# checks if test.args was passed incorrectly
.superfluous_args <- function(variable, ...) {
superfluous_args <- list(...) %>%
purrr::discard(is.null) %>%
names() %>%
intersect("test.args")
if (!rlang::is_empty(superfluous_args)) {
glue::glue(
"Note for variable '{variable}': Argument(s) {quoted_list(superfluous_args)} ",
"do not apply and were ignored. ",
"See `?tests` for details."
) %>%
stringr::str_wrap() %>%
rlang::inform()
}
}
| /R/utils-add_p_tests.R | permissive | IbrahimHE/gtsummary | R | false | false | 16,741 | r | # add_p.tbl_summary ------------------------------------------------------------
add_p_test_t.test <- function(data, variable, by, test.args, conf.level = 0.95, ...) {
.superfluous_args(variable, ...)
expr(stats::t.test(!!rlang::sym(variable) ~ as.factor(!!rlang::sym(by)),
data = !!data, conf.level = !!conf.level, !!!test.args
)) %>%
eval() %>%
broom::tidy()
}
add_p_test_aov <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
p.value <-
rlang::expr(stats::aov(!!rlang::sym(variable) ~ as.factor(!!rlang::sym(by)), data = !!data)) %>%
eval() %>%
summary() %>%
pluck(1, "Pr(>F)", 1)
tibble::tibble(p.value = p.value, method = "One-way ANOVA")
}
add_p_test_kruskal.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
stats::kruskal.test(data[[variable]], as.factor(data[[by]])) %>%
broom::tidy()
}
add_p_test_wilcox.test <- function(data, variable, by, test.args, ...) {
.superfluous_args(variable, ...)
expr(stats::wilcox.test(as.numeric(!!rlang::sym(variable)) ~ as.factor(!!rlang::sym(by)),
data = !!data, !!!test.args
)) %>%
eval() %>%
broom::tidy() %>%
mutate(
method = case_when(
.data$method == "Wilcoxon rank sum test with continuity correction" ~ "Wilcoxon rank sum test",
TRUE ~ .data$method
)
)
}
add_p_test_chisq.test <- function(data, variable, by, test.args, ...) {
.superfluous_args(variable, ...)
expr(stats::chisq.test(x = !!data[[variable]], y = as.factor(!!data[[by]]), !!!test.args)) %>%
eval() %>%
broom::tidy() %>%
mutate(
method = case_when(
.data$method == "Pearson's Chi-squared test with Yates' continuity correction" ~ "Pearson's Chi-squared test",
TRUE ~ .data$method
)
)
}
add_p_test_chisq.test.no.correct <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
add_p_test_chisq.test(data = data, variable = variable, by = by, test.args = list(correct = FALSE))
}
add_p_test_fisher.test <- function(data, variable, by, test.args, conf.level = 0.95, ...) {
.superfluous_args(variable, ...)
expr(stats::fisher.test(!!data[[variable]], as.factor(!!data[[by]]), conf.level = !!conf.level, !!!test.args)) %>%
eval() %>%
broom::tidy() %>%
mutate(
method = case_when(
.data$method == "Fisher's Exact Test for Count Data" ~ "Fisher's exact test",
TRUE ~ .data$method
)
)
}
add_p_test_mcnemar.test <- function(data, variable, by, test.args = NULL, ...) {
.superfluous_args(variable, ...)
rlang::expr(stats::mcnemar.test(data[[variable]], data[[by]], !!!test.args)) %>%
eval() %>%
broom::tidy() %>%
mutate(
method = case_when(
.data$method == "McNemar's Chi-squared test with continuity correction" ~ "McNemar's Chi-squared test",
TRUE ~ .data$method
)
)
}
add_p_test_lme4 <- function(data, variable, by, group, type, ...) {
.superfluous_args(variable, ...)
assert_package("lme4", "add_p(test = variable ~ 'lme4')")
if (is.null(group)) {
glue(
"Error in 'lme4' test for variable '{variable}'. ",
"`add_p(group=)` cannot by NULL"
) %>%
stop(call. = FALSE)
}
data <-
select(data, variable, by, group) %>%
filter(stats::complete.cases(.))
# creating formulas for base model (without variable) and full model
formula0 <- paste0("as.factor(`", by, "`) ~ 1 + (1 | `", group, "`)")
if (type %in% c("continuous", "continuous2")) {
formula1 <- paste0("as.factor(`", by, "`) ~ `", variable, "` + (1 | `", group, "`)")
} else {
formula1 <- paste0("as.factor(`", by, "`) ~ as.factor(`", variable, "`) + (1 | `", group, "`)")
}
# building base and full models
mod0 <- lme4::glmer(stats::as.formula(formula0),
data = data, family = stats::binomial
)
mod1 <- lme4::glmer(stats::as.formula(formula1),
data = data, family = stats::binomial
)
# returning p-value
p.value <- stats::anova(mod0, mod1)$"Pr(>Chisq)"[2]
tibble::tibble(p.value = p.value, method = "random intercept logistic regression")
}
add_p_tbl_summary_paired.t.test <- function(data, variable, by, group,
test.args = NULL, conf.level = 0.95, ...) {
quiet <- FALSE # need to add support for quiet later
.superfluous_args(variable, ...)
# checking inputs
if (length(data[[by]] %>% stats::na.omit() %>% unique()) != 2) {
stop("`by=` must have exactly 2 levels", call. = FALSE)
}
if (dplyr::group_by_at(data, c(by, group)) %>% dplyr::count(name = "..n..") %>%
pull(.data$..n..) %>% max(na.rm = TRUE) > 1) {
stop("'{variable}': There may only be one observation per `group=` per `by=` level.", call. = FALSE)
}
# reshaping data
data_wide <-
tidyr::pivot_wider(data,
id_cols = all_of(group),
names_from = all_of(by),
values_from = all_of(variable)
)
# message about missing data
if (quiet && any(is.na(data_wide[[2]]) + is.na(data_wide[[3]]) == 1)) {
glue(
"Note for variable '{variable}': Some observations included in the ",
"calculation of summary statistics ",
"were omitted from the p-value calculation due to unbalanced missingness ",
"within group."
) %>%
rlang::inform()
}
# calculate p-value
expr(stats::t.test(data_wide[[2]], data_wide[[3]],
paired = TRUE,
conf.level = !!conf.level, !!!test.args
)) %>%
eval() %>%
broom::tidy()
}
add_p_tbl_summary_paired.wilcox.test <- function(data, variable, by, group,
test.args = NULL, conf.level = 0.95,
quiet = FALSE, ...) {
.superfluous_args(variable, ...)
# checking inputs
if (length(data[[by]] %>% stats::na.omit() %>% unique()) != 2) {
stop("`by=` must have exactly 2 levels", call. = FALSE)
}
if (dplyr::group_by_at(data, c(by, group)) %>% dplyr::count(name = "..n..") %>%
pull(.data$..n..) %>% max(na.rm = TRUE) > 1) {
stop("'{variable}': There may only be one observation per `group=` per `by=` level.", call. = FALSE)
}
# reshaping data
data_wide <-
tidyr::pivot_wider(data,
id_cols = all_of(group),
names_from = all_of(by),
values_from = all_of(variable)
)
# message about missing data
if (quiet && any(is.na(data_wide[[2]]) + is.na(data_wide[[3]]) == 1)) {
glue(
"Note for variable '{variable}': Some observations included in the ",
"calculation of summary statistics ",
"were omitted from the p-value calculation due to unbalanced missingness ",
"within group."
) %>%
rlang::inform()
}
# calculate p-value
expr(stats::wilcox.test(as.numeric(data_wide[[2]]), as.numeric(data_wide[[3]]),
paired = TRUE, !!!test.args)) %>%
eval() %>%
broom::tidy()
}
add_p_test_prop.test <- function(tbl, variable, test.args = NULL, conf.level = 0.95, ...) {
.superfluous_args(variable, ...)
df_counts <-
tbl$meta_data %>%
filter(variable == .env$variable) %>%
purrr::pluck("df_stats", 1)
expr(stats::prop.test(df_counts$n, df_counts$N, conf.level = !!conf.level, !!!test.args)) %>%
eval() %>%
broom::tidy() %>%
mutate(estimate = .data$estimate1 - .data$estimate2) %>%
mutate(
method = case_when(
.data$method == "2-sample test for equality of proportions with continuity correction" ~
"Two sample test for equality of proportions",
TRUE ~ .data$method
)
)
}
add_p_test_ancova <- function(data, variable, by, conf.level = 0.95, adj.vars = NULL, ...) {
.superfluous_args(variable, ...)
# reverse coding the 'by' variable
data[[by]] <-
switch(!is.factor(data[[by]]),
forcats::fct_rev(factor(data[[by]]))
) %||%
forcats::fct_rev(data[[by]])
# assembling formula
rhs <- c(by, adj.vars) %>%
chr_w_backtick() %>%
paste(collapse = " + ")
f <- stringr::str_glue("{chr_w_backtick(variable)} ~ {rhs}") %>% as.formula()
# building model
stats::lm(formula = f, data = data) %>%
broom.helpers::tidy_and_attach(conf.int = TRUE, conf.level = conf.level) %>%
broom.helpers::tidy_remove_intercept() %>%
dplyr::filter(.data$variable %in% .env$by) %>%
select(
.data$estimate, .data$std.error, .data$statistic,
.data$conf.low, .data$conf.high, .data$p.value
) %>%
dplyr::mutate(
method = case_when(
is.null(adj.vars) ~ "One-way ANOVA",
TRUE ~ "ANCOVA"
)
)
}
add_p_test_ancova_lme4 <- function(data, variable, by, group, conf.level = 0.95, adj.vars = NULL, ...) {
assert_package("lme4")
assert_package("broom.mixed")
.superfluous_args(variable, ...)
# reverse coding the 'by' variable
data[[by]] <-
switch(!is.factor(data[[by]]),
forcats::fct_rev(factor(data[[by]]))
) %||%
forcats::fct_rev(data[[by]])
# assembling formula
rhs <- c(by, adj.vars) %>%
chr_w_backtick() %>%
paste(collapse = " + ")
f <- stringr::str_glue("{chr_w_backtick(variable)} ~ {rhs} + (1|{chr_w_backtick(group)})") %>% as.formula()
# building model
lme4::lmer(formula = f, data = data) %>%
broom.helpers::tidy_and_attach(
conf.int = TRUE,
conf.level = conf.level,
tidy_fun = broom.mixed::tidy
) %>%
broom.helpers::tidy_remove_intercept() %>%
dplyr::filter(.data$variable %in% .env$by) %>%
select(any_of(c("estimate", "std.error", "statistic", "conf.low", "conf.high", "p.value"))) %>%
dplyr::mutate(
method = case_when(
is.null(adj.vars) ~ "One-way ANOVA with random intercept",
TRUE ~ "ANCOVA with random intercept"
)
)
}
add_p_test_cohens_d <- function(data, variable, by, conf.level = 0.95, test.args = NULL, ...) {
assert_package("effectsize")
.superfluous_args(variable, ...)
f <- stringr::str_glue("{chr_w_backtick(variable)} ~ {chr_w_backtick(by)}") %>% as.formula()
rlang::expr(effectsize::cohens_d(x = !!f, data = !!data, ci = !!conf.level, !!!test.args)) %>%
eval() %>%
tibble::as_tibble() %>%
select(estimate = .data$Cohens_d, conf.low = .data$CI_low, conf.high = .data$CI_high) %>%
dplyr::mutate(method = "Cohen's D")
}
# add_p.tbl_svysummary ---------------------------------------------------------
add_p_test_svy.chisq.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svychisq(c_form(right = c(variable, by)), data, statistic = "F") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "chi-squared test with Rao & Scott's second-order correction")
}
add_p_test_svy.adj.chisq.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svychisq(c_form(right = c(variable, by)), data, statistic = "Chisq") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "chi-squared test adjusted by a design effect estimate")
}
add_p_test_svy.wald.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svychisq(c_form(right = c(variable, by)), data, statistic = "Wald") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "Wald test of independence for complex survey samples")
}
add_p_test_svy.adj.wald.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svychisq(c_form(right = c(variable, by)), data, statistic = "adjWald") %>%
{
suppressMessages(broom::tidy(.))
} %>%
dplyr::mutate_at(vars(.data$statistic, .data$p.value), as.numeric) %>%
# default saves these cols as a matrix
mutate(method = "adjusted Wald test of independence for complex survey samples")
}
add_p_test_svy.lincom.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svychisq(c_form(right = c(variable, by)), data, statistic = "lincom") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "test of independence using the exact asymptotic distribution for complex survey samples")
}
add_p_test_svy.saddlepoint.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svychisq(c_form(right = c(variable, by)), data, statistic = "saddlepoint") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "test of independence using a saddlepoint approximation for complex survey samples")
}
add_p_test_svy.t.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svyttest(c_form(variable, by), data) %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "t-test adapted to complex survey samples")
}
add_p_test_svy.wilcox.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svyranktest(c_form(variable, by), data, test = "wilcoxon") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "Wilcoxon rank-sum test for complex survey samples")
}
add_p_test_svy.kruskal.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svyranktest(c_form(variable, by), data, test = "KruskalWallis") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "Kruskal-Wallis rank-sum test for complex survey samples")
}
add_p_test_svy.vanderwaerden.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svyranktest(c_form(variable, by), data, test = "vanderWaerden") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "van der Waerden's normal-scores test for complex survey samples")
}
add_p_test_svy.median.test <- function(data, variable, by, ...) {
.superfluous_args(variable, ...)
survey::svyranktest(c_form(variable, by), data, test = "median") %>%
{
suppressMessages(broom::tidy(.))
} %>%
mutate(method = "Mood's test for the median for complex survey samples")
}
# add_p.tbl_survfit ------------------------------------------------------------
# returns a list of the formula and data arg calls
extract_formula_data_call <- function(x) {
# extracting survfit call
survfit_call <- x$call %>% as.list()
# index of formula and data
call_index <- names(survfit_call) %in% c("formula", "data") %>% which()
survfit_call[call_index]
}
add_p_tbl_survfit_survdiff <- function(data, variable, test.args, ...) {
.superfluous_args(variable, ...)
# formula and data calls
formula_data_call <- extract_formula_data_call(data)
# converting call into a survdiff call
survdiff_call <- rlang::call2(rlang::expr(survival::survdiff), !!!formula_data_call, !!!test.args)
# evaluating `survdiff()`
survdiff_result <- rlang::eval_tidy(survdiff_call)
# returning p-value
broom::glance(survdiff_result) %>%
dplyr::mutate(
method =
switch(is.null(test.args$rho) || test.args$rho == 0,
"Log-rank test"
) %||%
switch(test.args$rho == 1,
"Peto & Peto modification of Gehan-Wilcoxon test"
) %||%
stringr::str_glue("G-rho (\U03C1 = {test.args$rho}) test")
)
}
add_p_tbl_survfit_logrank <- function(data, variable, ...) {
.superfluous_args(variable, ...)
add_p_tbl_survfit_survdiff(data, test.args = list(rho = 0))
}
add_p_tbl_survfit_petopeto_gehanwilcoxon <- function(data, variable, ...) {
.superfluous_args(variable, ...)
add_p_tbl_survfit_survdiff(data, test.args = list(rho = 1))
}
add_p_tbl_survfit_coxph <- function(data, variable, test_type, test.args, ...) {
.superfluous_args(variable, ...)
# formula and data calls
formula_data_call <- extract_formula_data_call(data)
# converting call into a survdiff call
coxph_call <- rlang::call2(rlang::expr(survival::coxph), !!!formula_data_call, !!!test.args)
# evaluating `coxph()`
coxph_result <- rlang::eval_tidy(coxph_call)
# returning p-value
method <- switch(test_type,
"log" = "Cox regression (LRT)",
"wald" = "Cox regression (Wald)",
"sc" = "Cox regression (Score)"
)
broom::glance(coxph_result) %>%
select(all_of(paste0(c("statistic.", "p.value."), test_type))) %>%
set_names(c("statistic", "p.value")) %>%
mutate(method = method)
}
# checks if test.args was passed incorrectly
.superfluous_args <- function(variable, ...) {
superfluous_args <- list(...) %>%
purrr::discard(is.null) %>%
names() %>%
intersect("test.args")
if (!rlang::is_empty(superfluous_args)) {
glue::glue(
"Note for variable '{variable}': Argument(s) {quoted_list(superfluous_args)} ",
"do not apply and were ignored. ",
"See `?tests` for details."
) %>%
stringr::str_wrap() %>%
rlang::inform()
}
}
|
### Global Variables to incorporate from YML
# VERBOSE_REPORTING
# SILENT_REPORTING
# DATA_EXTRACT_RDAT_LOCATION
# XML_DSD_LOCATION
# FLAG.Flextable = this should be set by the script
# FLAG.Flextable = compareVersion( as.character(pandoc_version()),"2.0" ) >= 0
# parameter.vocabulary
# TRIAL.dose_scale = c( 2, 3, 5, 7, 10 )
# TRIAL.starting_dose = 5
# glossary.dictionary
# RECIST.COMPLETE_value = 10
# data_queries.list = NULL
# missing_data_limit = 10
# RECIST.COMPLETE_value = 10
### Acroynms to define
# SSR
# DSD
# CRF
### Functionality to develop
# - Accomodate different units for BMI (calculate_BMI)
| /R/variables_to_incorporate.R | no_license | LisaHopcroft/CTutils | R | false | false | 617 | r | ### Global Variables to incorporate from YML
# VERBOSE_REPORTING
# SILENT_REPORTING
# DATA_EXTRACT_RDAT_LOCATION
# XML_DSD_LOCATION
# FLAG.Flextable = this should be set by the script
# FLAG.Flextable = compareVersion( as.character(pandoc_version()),"2.0" ) >= 0
# parameter.vocabulary
# TRIAL.dose_scale = c( 2, 3, 5, 7, 10 )
# TRIAL.starting_dose = 5
# glossary.dictionary
# RECIST.COMPLETE_value = 10
# data_queries.list = NULL
# missing_data_limit = 10
# RECIST.COMPLETE_value = 10
### Acroynms to define
# SSR
# DSD
# CRF
### Functionality to develop
# - Accomodate different units for BMI (calculate_BMI)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deploy.R
\name{get_image}
\alias{get_image}
\title{Get the Image}
\usage{
get_image(content, path)
}
\arguments{
\item{content}{A content object}
\item{path}{The path to the image on disk}
}
\description{
Get the Image
}
\seealso{
Other content: \code{\link{content_item}},
\code{\link{get_vanity_url}},
\code{\link{set_image_path}},
\code{\link{set_vanity_url}}
}
\concept{content}
| /man/get_image.Rd | no_license | conceptslearningmachine-FEIN-85-1759293/connectapi | R | false | true | 468 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deploy.R
\name{get_image}
\alias{get_image}
\title{Get the Image}
\usage{
get_image(content, path)
}
\arguments{
\item{content}{A content object}
\item{path}{The path to the image on disk}
}
\description{
Get the Image
}
\seealso{
Other content: \code{\link{content_item}},
\code{\link{get_vanity_url}},
\code{\link{set_image_path}},
\code{\link{set_vanity_url}}
}
\concept{content}
|
corr <- function(directory, threshold = 0) {
setwd(file.path(getwd(), directory)) ## setting the directory
correlationVector = NULL ## initializing the correlation matrix
#Looping thru ALL the directory's files
for (i in 1:332)
{
## Due to the format of the filename, i.e 001, 010 instead of 1, 10. I became aware that the following method works but not efficient,
## but at the time of the completion of this assignment, it was the only way I knew how to do it.
if (i <10) {
data <- read.csv(
paste("0","0", as.character(i), ".csv", sep=""), ## for example, if 'id' =7, we get 007.csv
header = T,
na.strings=c("NA","NaN", " ")
)
}
else if (i>=10 & i<100) {
data <- read.csv(
paste("0", as.character(i), ".csv", sep=""), ## for example, if 'id' = 17, we get 017.csv
header = T,
na.strings=c("NA","NaN", " ")
)
}
else {
data <- read.csv(
paste(as.character(i), ".csv", sep=""), ## Normal
header = T,
na.strings=c("NA","NaN", " ")
)
}
## getting rid of all the "NA" values and, consequently, all the non-complete ovservations (the ones with at least one NA in row)
data = na.omit(data)
## if the number of complete observed cases meets the quota, find the correlation between the pollutants for the given monitor AND
## store the results in the correlation matrix
if (nrow(data) > threshold) {
correlationVector = c(correlationVector, cor(data[,2], data[,3]))
}
}
setwd("..") # reseting working directory path
return (correlationVector)
} | /corr.R | no_license | riccardocaneve/datasciencecoursera | R | false | false | 2,270 | r | corr <- function(directory, threshold = 0) {
setwd(file.path(getwd(), directory)) ## setting the directory
correlationVector = NULL ## initializing the correlation matrix
#Looping thru ALL the directory's files
for (i in 1:332)
{
## Due to the format of the filename, i.e 001, 010 instead of 1, 10. I became aware that the following method works but not efficient,
## but at the time of the completion of this assignment, it was the only way I knew how to do it.
if (i <10) {
data <- read.csv(
paste("0","0", as.character(i), ".csv", sep=""), ## for example, if 'id' =7, we get 007.csv
header = T,
na.strings=c("NA","NaN", " ")
)
}
else if (i>=10 & i<100) {
data <- read.csv(
paste("0", as.character(i), ".csv", sep=""), ## for example, if 'id' = 17, we get 017.csv
header = T,
na.strings=c("NA","NaN", " ")
)
}
else {
data <- read.csv(
paste(as.character(i), ".csv", sep=""), ## Normal
header = T,
na.strings=c("NA","NaN", " ")
)
}
## getting rid of all the "NA" values and, consequently, all the non-complete ovservations (the ones with at least one NA in row)
data = na.omit(data)
## if the number of complete observed cases meets the quota, find the correlation between the pollutants for the given monitor AND
## store the results in the correlation matrix
if (nrow(data) > threshold) {
correlationVector = c(correlationVector, cor(data[,2], data[,3]))
}
}
setwd("..") # reseting working directory path
return (correlationVector)
} |
#' @name pbapply
#' @title \code{apply} family functions with progress bars
#' @aliases pbsapply
#' @aliases pblapply
#' @aliases pbreplicate
#'
#' @param X either an array for \code{pbapply} or a vector (or list) for
#' \code{pblapply}, \code{pbsapply}, and \code{pbreplicate}
#' @param MARGIN a vector giving the subscripts which the function will be
#' applied over. E.g., for a matrix 1 indicates rows, 2 indicates columns,
#' c(1, 2) indicates rows and columns. Where X has named dimnames, it can be a
#' character vector selecting dimension names.
#' @param FUN the function to be applied: see 'Details'. In the case of
#' functions like +, \%*\%, etc., the function name must be backquoted or quoted.
#' @param ... optional arguments to \code{FUN}
#' @param n integer: the number of replications.
#' @param expr the expression to evaluate repeatedly.
#' @param simplify logical or character string; should the result be simplified
#' to a vector, matrix or higher dimensional array if possible? For sapply it
#' must be named and not abbreviated. The default value, TRUE, returns a vector
#' or matrix if appropriate, whereas if simplify = "array" the result may be an
#' \code{\link[base]{array}} of "rank" (=length(dim(.))) one higher than the
#' result of FUN(X[[i]]).
#' @param USE.NAMES logical; if TRUE and if X is character, use X as
#' \code{\link[base]{names}} for the result unless it had names already. Since
#' this argument follows ... its name cannot be abbreviated.
#'
#' @examples
#' x <- sapply(1:10, function(x) runif(10))
#' y <- lapply(1:10, function(x) runif(10))
#'
#' pbapply(x, 1, mean)
#' pbsapply(y, mean)
#' pblapply(y, mean)
#'
#' @description Wrap \code{\link[base]{apply}} family with progress bars.
#' @seealso \code{\link[base]{apply}}, \code{\link[base]{sapply}},
#' \code{\link[base]{lapply}}, \code{\link[base]{replicate}}
#' @rdname pbapply
#' @export
pbapply <-
function (X, MARGIN, FUN, ...)
{
FUN <- match.fun(FUN)
dl <- length(dim(X))
if (!dl)
stop("dim(X) must have a positive length")
if (is.object(X))
X <- if (dl == 2L)
as.matrix(X)
else as.array(X)
d <- dim(X)
dn <- dimnames(X)
ds <- seq_len(dl)
if (is.character(MARGIN)) {
if (is.null(dnn <- names(dn)))
stop("'X' must have named dimnames")
MARGIN <- match(MARGIN, dnn)
if (anyNA(MARGIN))
stop("not all elements of 'MARGIN' are names of dimensions")
}
s.call <- ds[-MARGIN]
s.ans <- ds[MARGIN]
d.call <- d[-MARGIN]
d.ans <- d[MARGIN]
dn.call <- dn[-MARGIN]
dn.ans <- dn[MARGIN]
d2 <- prod(d.ans)
if (d2 == 0L) {
newX <- array(vector(typeof(X), 1L), dim = c(prod(d.call),
1L))
ans <- forceAndCall(1, FUN, if (length(d.call) < 2L) newX[,
1] else array(newX[, 1L], d.call, dn.call), ...)
return(if (is.null(ans)) ans else if (length(d.ans) <
2L) ans[1L][-1L] else array(ans, d.ans, dn.ans))
}
newX <- aperm(X, c(s.call, s.ans))
dim(newX) <- c(prod(d.call), d2)
ans <- vector("list", d2)
pb <- startpb(0, d2) # pb_specific_code
if (length(d.call) < 2L) {
if (length(dn.call))
dimnames(newX) <- c(dn.call, list(NULL))
for (i in 1L:d2) {
tmp <- forceAndCall(1, FUN, newX[, i], ...)
if (!is.null(tmp))
ans[[i]] <- tmp
setpb(pb, i) # pb_specific_code
}
}
else for (i in 1L:d2) {
tmp <- forceAndCall(1, FUN, array(newX[, i], d.call,
dn.call), ...)
if (!is.null(tmp))
ans[[i]] <- tmp
setpb(pb, i) # pb_specific_code
}
closepb(pb) # pb_specific_code
ans.list <- is.recursive(ans[[1L]])
l.ans <- length(ans[[1L]])
ans.names <- names(ans[[1L]])
if (!ans.list)
ans.list <- any(unlist(lapply(ans, length)) != l.ans)
if (!ans.list && length(ans.names)) {
all.same <- vapply(ans, function(x) identical(names(x),
ans.names), NA)
if (!all(all.same))
ans.names <- NULL
}
len.a <- if (ans.list)
d2
else length(ans <- unlist(ans, recursive = FALSE))
if (length(MARGIN) == 1L && len.a == d2) {
names(ans) <- if (length(dn.ans[[1L]]))
dn.ans[[1L]]
return(ans)
}
if (len.a == d2)
return(array(ans, d.ans, dn.ans))
if (len.a && len.a%%d2 == 0L) {
if (is.null(dn.ans))
dn.ans <- vector(mode = "list", length(d.ans))
dn1 <- if (length(dn.call) && length(ans.names) == length(dn.call[[1L]]))
dn.call[1L]
else list(ans.names)
dn.ans <- c(dn1, dn.ans)
return(array(ans, c(len.a%/%d2, d.ans), if (!is.null(names(dn.ans)) ||
!all(vapply(dn.ans, is.null, NA))) dn.ans))
}
return(ans)
}
#' @rdname pbapply
#' @export
pbsapply <-
function (X, FUN, ..., simplify = TRUE, USE.NAMES = TRUE)
{
FUN <- match.fun(FUN)
answer <- pblapply(X = X, FUN = FUN, ...) # pb_specific_code
if (USE.NAMES && is.character(X) && is.null(names(answer)))
names(answer) <- X
if (!identical(simplify, FALSE) && length(answer))
simplify2array(answer, higher = (simplify == "array"))
else answer
}
#' @rdname pbapply
#' @export
pblapply <-
function (X, FUN, ...)
{
FUN <- match.fun(FUN)
if (!is.vector(X) || is.object(X))
X <- as.list(X)
B <- length(X)
if (!(interactive() && dopb() && B >= 1))
return(lapply(X, FUN, ...))
pb <- startpb(0, B)
rval <- vector("list", B)
for (i in 1:B) {
rval[i] <- list(FUN(X[[i]], ...))
setpb(pb, i)
}
close(pb)
names(rval) <- names(X)
rval
}
#' @rdname pbapply
#' @export
pbreplicate <-
function (n, expr, simplify = "array")
pbsapply(integer(n), eval.parent(substitute(function(...) expr)),
simplify = simplify)
| /R/pbapply.R | no_license | sboysel/pbapply | R | false | false | 6,026 | r | #' @name pbapply
#' @title \code{apply} family functions with progress bars
#' @aliases pbsapply
#' @aliases pblapply
#' @aliases pbreplicate
#'
#' @param X either an array for \code{pbapply} or a vector (or list) for
#' \code{pblapply}, \code{pbsapply}, and \code{pbreplicate}
#' @param MARGIN a vector giving the subscripts which the function will be
#' applied over. E.g., for a matrix 1 indicates rows, 2 indicates columns,
#' c(1, 2) indicates rows and columns. Where X has named dimnames, it can be a
#' character vector selecting dimension names.
#' @param FUN the function to be applied: see 'Details'. In the case of
#' functions like +, \%*\%, etc., the function name must be backquoted or quoted.
#' @param ... optional arguments to \code{FUN}
#' @param n integer: the number of replications.
#' @param expr the expression to evaluate repeatedly.
#' @param simplify logical or character string; should the result be simplified
#' to a vector, matrix or higher dimensional array if possible? For sapply it
#' must be named and not abbreviated. The default value, TRUE, returns a vector
#' or matrix if appropriate, whereas if simplify = "array" the result may be an
#' \code{\link[base]{array}} of "rank" (=length(dim(.))) one higher than the
#' result of FUN(X[[i]]).
#' @param USE.NAMES logical; if TRUE and if X is character, use X as
#' \code{\link[base]{names}} for the result unless it had names already. Since
#' this argument follows ... its name cannot be abbreviated.
#'
#' @examples
#' x <- sapply(1:10, function(x) runif(10))
#' y <- lapply(1:10, function(x) runif(10))
#'
#' pbapply(x, 1, mean)
#' pbsapply(y, mean)
#' pblapply(y, mean)
#'
#' @description Wrap \code{\link[base]{apply}} family with progress bars.
#' @seealso \code{\link[base]{apply}}, \code{\link[base]{sapply}},
#' \code{\link[base]{lapply}}, \code{\link[base]{replicate}}
#' @rdname pbapply
#' @export
pbapply <-
function (X, MARGIN, FUN, ...)
{
FUN <- match.fun(FUN)
dl <- length(dim(X))
if (!dl)
stop("dim(X) must have a positive length")
if (is.object(X))
X <- if (dl == 2L)
as.matrix(X)
else as.array(X)
d <- dim(X)
dn <- dimnames(X)
ds <- seq_len(dl)
if (is.character(MARGIN)) {
if (is.null(dnn <- names(dn)))
stop("'X' must have named dimnames")
MARGIN <- match(MARGIN, dnn)
if (anyNA(MARGIN))
stop("not all elements of 'MARGIN' are names of dimensions")
}
s.call <- ds[-MARGIN]
s.ans <- ds[MARGIN]
d.call <- d[-MARGIN]
d.ans <- d[MARGIN]
dn.call <- dn[-MARGIN]
dn.ans <- dn[MARGIN]
d2 <- prod(d.ans)
if (d2 == 0L) {
newX <- array(vector(typeof(X), 1L), dim = c(prod(d.call),
1L))
ans <- forceAndCall(1, FUN, if (length(d.call) < 2L) newX[,
1] else array(newX[, 1L], d.call, dn.call), ...)
return(if (is.null(ans)) ans else if (length(d.ans) <
2L) ans[1L][-1L] else array(ans, d.ans, dn.ans))
}
newX <- aperm(X, c(s.call, s.ans))
dim(newX) <- c(prod(d.call), d2)
ans <- vector("list", d2)
pb <- startpb(0, d2) # pb_specific_code
if (length(d.call) < 2L) {
if (length(dn.call))
dimnames(newX) <- c(dn.call, list(NULL))
for (i in 1L:d2) {
tmp <- forceAndCall(1, FUN, newX[, i], ...)
if (!is.null(tmp))
ans[[i]] <- tmp
setpb(pb, i) # pb_specific_code
}
}
else for (i in 1L:d2) {
tmp <- forceAndCall(1, FUN, array(newX[, i], d.call,
dn.call), ...)
if (!is.null(tmp))
ans[[i]] <- tmp
setpb(pb, i) # pb_specific_code
}
closepb(pb) # pb_specific_code
ans.list <- is.recursive(ans[[1L]])
l.ans <- length(ans[[1L]])
ans.names <- names(ans[[1L]])
if (!ans.list)
ans.list <- any(unlist(lapply(ans, length)) != l.ans)
if (!ans.list && length(ans.names)) {
all.same <- vapply(ans, function(x) identical(names(x),
ans.names), NA)
if (!all(all.same))
ans.names <- NULL
}
len.a <- if (ans.list)
d2
else length(ans <- unlist(ans, recursive = FALSE))
if (length(MARGIN) == 1L && len.a == d2) {
names(ans) <- if (length(dn.ans[[1L]]))
dn.ans[[1L]]
return(ans)
}
if (len.a == d2)
return(array(ans, d.ans, dn.ans))
if (len.a && len.a%%d2 == 0L) {
if (is.null(dn.ans))
dn.ans <- vector(mode = "list", length(d.ans))
dn1 <- if (length(dn.call) && length(ans.names) == length(dn.call[[1L]]))
dn.call[1L]
else list(ans.names)
dn.ans <- c(dn1, dn.ans)
return(array(ans, c(len.a%/%d2, d.ans), if (!is.null(names(dn.ans)) ||
!all(vapply(dn.ans, is.null, NA))) dn.ans))
}
return(ans)
}
#' @rdname pbapply
#' @export
pbsapply <-
function (X, FUN, ..., simplify = TRUE, USE.NAMES = TRUE)
{
FUN <- match.fun(FUN)
answer <- pblapply(X = X, FUN = FUN, ...) # pb_specific_code
if (USE.NAMES && is.character(X) && is.null(names(answer)))
names(answer) <- X
if (!identical(simplify, FALSE) && length(answer))
simplify2array(answer, higher = (simplify == "array"))
else answer
}
#' @rdname pbapply
#' @export
pblapply <-
function (X, FUN, ...)
{
FUN <- match.fun(FUN)
if (!is.vector(X) || is.object(X))
X <- as.list(X)
B <- length(X)
if (!(interactive() && dopb() && B >= 1))
return(lapply(X, FUN, ...))
pb <- startpb(0, B)
rval <- vector("list", B)
for (i in 1:B) {
rval[i] <- list(FUN(X[[i]], ...))
setpb(pb, i)
}
close(pb)
names(rval) <- names(X)
rval
}
#' @rdname pbapply
#' @export
pbreplicate <-
function (n, expr, simplify = "array")
pbsapply(integer(n), eval.parent(substitute(function(...) expr)),
simplify = simplify)
|
#### iBeta ---------------------------------------------------------------------
Beta<-function(Y, X='^GSPC', Since=NULL, Until=NULL, Interval=2, Frequency='Weekly', Theme='BB'){
# ----------------------------------------------------------------------------
#### iBeta 5.3 (An Online Regression Beta Calculator) ------------------------
require(quantmod)
require(tidyquant)
require(ggplot2)
require(grid)
require(gridExtra)
require(lattice)
#### Fix non-standard names --------------------------------------------------
Fix<-function(x){
if(class(x)=='character'){
#Collect the name of the variable
name<-deparse(substitute(x))
#Rename indicies to standard names
if(x=='^DJI'){x<-'DJIA'}
if(x=='^GSPC'){x<-'SPX'}
if(x=='^IXIC'){x<-'IXIC'}
if(x=='^RUT'){x<-'RUT'}
if(x=='^VIX'){x<-'VIX'}
if(x=='^GSPTSE'){x<-'GSPTSE'}
if(x=='^BVSP'){x<-'BVSP'}
if(x=='^MXX'){x<-'MXX'}
if(x=='^GDAXI'){x<-'GDAXI'}
if(x=='^FTSE'){x<-'FTSE'}
if(x=='^FCHI'){x<-'FCHI'}
if(x=='^STOXX50E'){x<-'STOXX50E'}
if(x=='^AEX'){x<-'AEX'}
if(x=='^IBEX'){x<-'IBEX'}
if(x=='FTSEMIB.MI'){x<-'FTSEMIB'}
if(x=='^SSMI'){x<-'SSMI'}
if(x=='PSI20.LS'){x<-'PSI20'}
if(x=='^BFX'){x<-'BFX'}
if(x=='^ATX'){x<-'ATX'}
if(x=='^OMX'){x<-'OMX'}
if(x=='^OMXC25'){x<-'OMXC25'}
if(x=='IMOEX.ME'){x<-'MOEX'}
if(x=='RTSI.ME'){x<-'RTSI'}
if(x=='XU100.IS'){x<-'XU100'}
if(x=='TA35.TA'){x<-'TA35'}
if(x=='^TASI.SR'){x<-'SASEIDX'}
if(x=='^N225'){x<-'NKY'}
if(x=='^AXJO'){x<-'AS51'}
if(x=='^SSEC'){x<-'SHCOMP'}
if(x=='399001.SZ'){x<-'SICOM'}
if(x=='XINA50N.FGI'){x<-'TXIN9'}
if(x=='^DJSH'){x<-'DJSH'}
if(x=='^HSI'){x<-'HSI'}
if(x=='^TWII'){x<-'TWSE'}
if(x=='^SET.BK'){x<-'SET'}
if(x=='^KS11'){x<-'KOSPI'}
if(x=='^JKSE'){x<-'JCI'}
if(x=='^NSEI'){x<-'NIFTY'}
if(x=='^BSESN'){x<-'SENSEX'}
if(x=='PSEI.PS'){x<-'PCOMP'}
if(x=='^STI'){x<-'FSSTI'}
if(x=='^KSE'){x<-'KSE100'}
}
return(
assign(name, x, env=parent.frame())
)
}
# ----------------------------------------------------------------------------
Xname<-NULL
Yname<-NULL
if(is.xts(X)=='FALSE'){
Xname<-as.character(X)
}else{
x.name<-sub('\\..*', '', (names(X[,1])))
}
if(is.xts(Y)=='FALSE'){
Yname<-as.character(Y)
}else{
y.name<-sub('\\..*', '', (names(Y[,1])))
}
price<-function(x, get = 'stock.prices', rename = NULL, from = NULL){
if(is.xts(x)=='FALSE'){
#Name the data
name<-x
output<-deparse(substitute(x))
#Collect the data
if(is.null(from)==FALSE){
x<-tq_get(x, get = get, from = from)
}else{
x<-tq_get(x, get = get)
}
#Rename if rename is required
if(class(rename)=='character'){
name<-rename
}
#Extract date series
dates<-x$date
#Extract price data
Open<-x$open
High<-x$high
Low<-x$low
Close<-x$close
Volume<-x$volume
Adjusted<-x$adjusted
# Define the data frame.
data<-data.frame(cbind(Open,High,Low,Close,Volume,Adjusted))
colnames(data)[1]<-paste(name,'Open', sep='.')
colnames(data)[2]<-paste(name,'High', sep='.')
colnames(data)[3]<-paste(name,'Low', sep='.')
colnames(data)[4]<-paste(name,'Close', sep='.')
colnames(data)[5]<-paste(name,'Volume', sep='.')
colnames(data)[6]<-paste(name,'Adjusted', sep='.')
row.names(data)<-dates
# Assigning the data to the environment.
return(
assign(output, (data<-as.xts(data)), env=parent.frame())
)
}
}
price(Y, get = 'stock.prices', rename = NULL, from = '1900-01-01')
price(X, get = 'stock.prices', rename = NULL, from = '1900-01-01')
Y<-to.daily(Y)
X<-to.daily(X)
# ----------------------------------------------------------------------------
#### Converting X- and Y-Series to a Common Length ---------------------------
#### If the length of the Y-time series is shorter than that of the X- -------
if((as.numeric((as.numeric(start(X)))-(as.numeric(start(Y)))))==0){
Earliest.Date.Available<-(as.Date((max(start(X),start(Y)))))
}
if(length(Y)<length(X)){
Earliest.Date.Available<-start(Y)
minlength<-as.double(length(Y [,1]))
#### Reacquiring price data from a common date (Start of the Y-series) -----
Y<-as.xts(tail(Y, n=minlength))
X<-as.xts(tail(X, n=minlength))
}
#### If the length of the X-time series is shorter than that of the Y- -------
if(length(Y)>length(X)){
Earliest.Date.Available<-start(X)
minlength<-as.double(length(X [,1]))
#### Reacquiring price data from a common date (Start of the X-series) -----
Y<-as.xts(tail(Y, n=minlength))
X<-as.xts(tail(X, n=minlength))
}
#### Removable Control Module (3.0) ------------------------------------------
# ----------------------------------------------------------------------------
if(class(Since)=='character'){
Since<-as.Date(Since)
}
if(class(Until)=='character'){
Until<-as.Date(Until)
}
if(class(Interval)=='numeric'){
Interval<-as.double(Interval)
}
### Scenario 1: No Parameters are specified (Implied) -----------------------
### Scenario 2B: 'Since and 'Until' Parameters are specified -----------------
if(class(Since)=='Date'){
if(class(Until)=='Date'){
Since=as.Date(max(Since,Earliest.Date.Available))
X<-X[paste(Since,Until,sep='::')]
Y<-Y[paste(Since,Until,sep='::')]
}
}
### Scenario 2C: Only the 'Until' Parameter is specified ---------------------
if(class(Until)=='Date'){
X<-X[paste('',Until,sep='::')]
Y<-Y[paste('',Until,sep='::')]
}else{
### Scenario 2A: The 'Since' Parameter is specified ------------------------
if(class(Since)=='Date'){
Since=as.Date(max(Since,Earliest.Date.Available))
X<-X[paste(Since,'',sep='::')]
Y<-Y[paste(Since,'',sep='::')]
}else{
### Scenario 3: Interval Mode (Takes precedence) ------------------------
if(class(Interval)=='numeric'){
if((Interval)>0){
Since=as.Date((max(end(X),end(Y))))-(Interval*365)
Until=as.Date((min(end(X),end(Y))))
Since=as.Date(max((max(start(X),start(Y))),Since))
X<-X[paste(Since,Until,sep='::')] # This might work
Y<-Y[paste(Since,Until,sep='::')] # This might work
}
}
}
}
#### Reconvert to Common Length (Failsafe) -----------------------------------
#### Converting X- and Y-Series to a Common Length. ####
#### If the length of the Y-time series is shorter than that of the X- -------
if(length(Y)<length(X)){
Earliest.Date.Available<-start(Y)
minlength<-as.double(length(Y [,1]))
#### Reacquiring price data from a common date (Start of the Y-series) -----
Y<-as.xts(tail(Y, n=minlength))
X<-as.xts(tail(X, n=minlength))
}
#### If the length of the X-time series is shorter than that of the Y- -------
if(length(Y)>length(X)){
Earliest.Date.Available<-start(X)
minlength<-as.double(length(X [,1]))
#### Reacquiring price data from a common date (Start of the X-series) -----
Y<-as.xts(tail(Y, n=minlength))
X<-as.xts(tail(X, n=minlength))
}
#### End of Removable Control Module -----------------------------------------
# ----------------------------------------------------------------------------
#### Determining Frequency: If Daily, Convert to Daily -----------------------
if(Frequency=='Daily'){
#### Converting data to Daily, if Frequency = Daily ------------------------
adj.Y<-to.daily(Y)
adj.X<-to.daily(X)
pardiff<-5
}
#### Determining Frequency: If Weekly, Convert to Weekly ---------------------
if(Frequency=='Weekly'){
#### Converting data to Weekly, if Frequency = Weekly ----------------------
adj.Y<-to.weekly(Y)
adj.X<-to.weekly(X)
pardiff<-5
}
#### Determining Frequency: If Monthly, Convert to Monthly -------------------
if(Frequency=='Monthly'){
#### Converting data to Monthly, if Frequency = Monthly --------------------
adj.Y<-to.monthly(Y)
adj.X<-to.monthly(X)
pardiff<-25
}
#### Closing Price Extraction ------------------------------------------------
#### Extracting the columns containing closing price data --------------------
YClose<-as.xts(adj.Y[,4])
XClose<-as.xts(adj.X[,4])
Latest.Date.Used<-as.Date(max(end(XClose),end(YClose)))
Latest.to.Today<-as.double(difftime(as.Date(Sys.Date()),Latest.Date.Used))
#### Returns Calculation -----------------------------------------------------
#### Converting Price data to Returns data -----------------------------------
YReturns<-(diff(YClose)/YClose[-length(YClose)])
XReturns<-(diff(XClose)/XClose[-length(XClose)])
#### Aggregation into Data Frame ---------------------------------------------
#### Creating a data frame to make the plot ----------------------------------
GGFRAME<-data.frame(as.xts(merge(XReturns, YReturns)))
GGFRAME<-data.frame(na.omit(GGFRAME))
if(Frequency=='Daily'){
Since<-as.Date(rownames(GGFRAME)[1]) #Error here if Frequency = 'Monthly'. The 'if' statements obviate the error.
Until<-as.Date(rownames(GGFRAME)[(length(rownames(GGFRAME)))]) #Error here if Frequency = 'Monthly'
Observations<-(length(rownames(GGFRAME)))
}
if(Frequency=='Weekly'){
Since<-as.Date(rownames(GGFRAME)[1]) #Error here if Frequency = 'Monthly'. The 'if' statements obviate the error.
Until<-as.Date(rownames(GGFRAME)[(length(rownames(GGFRAME)))]) #Error here if Frequency = 'Monthly'
Observations<-(length(rownames(GGFRAME)))
}
Observations<-(length(rownames(GGFRAME)))
#### Three-step process for making the x- and y- labels ----------------------
#### Setting up dates --------------------------------------------------------
xdate<-max((start(XClose)),(start(YClose)))
ydate<-max((start(XClose)),(start(YClose)))
#### Here are the beginning month and year -----------------------------------
xmonth<-paste(format(xdate, '%b'), format(xdate, '%Y'))
ymonth<-paste(format(ydate, '%b'), format(ydate, '%Y'))
#### Names of the variables --------------------------------------------------
if(is.null(Xname)=='FALSE'){
x.name<-Xname
}
if(is.null(Yname)=='FALSE'){
y.name<-Yname
}
x.name<-Fix(x.name)
y.name<-Fix(y.name)
#### If Beta is taken over an interval, here are the ending month and year ---
if((Latest.to.Today)>=pardiff){
xend<-end(XClose)
yend<-end(YClose)
xmonth.end<-paste(format(xend, '%b'), format(xend, '%Y'))
ymonth.end<-paste(format(yend, '%b'), format(xend, '%Y'))
### If Beta is taken over an interval, here are the x- and y- labels -------
xlabel<-paste0((Frequency),' Returns on ', x.name, ' (From ', (xmonth),',',' To ',(xmonth.end),') [Interval-Bound]')
ylabel<-paste0((Frequency),' Returns on ', y.name, ' (From ', (ymonth),',',' To ',(ymonth.end),')')
}
#### Setting up regular x-, and y-labels -------------------------------------
if((Latest.to.Today)<pardiff){
xlabel<-paste0((Frequency),' Returns on ', x.name, ' (Since ', (xmonth),')')
ylabel<-paste0((Frequency),' Returns on ', y.name, ' (Since ', (ymonth),')')
}
#### Generating a linear model to find Beta ----------------------------------
Results<-lm(Y.Close ~ X.Close, GGFRAME)
#### Extracting coefficients -------------------------------------------------
cf <- round(coef(Results),6)
#### Restate the 'Interval' variable -----------------------------------------
Interval<-signif(as.double((difftime(max(end(adj.Y),end(adj.X)),min(start(adj.Y),start(adj.X)), units='days')/360)), digits=4)
#### Calculating Time Interval and Unit for Reporting ------------------------
if(difftime(max(end(adj.Y),end(adj.X)),min(start(adj.Y),start(adj.X)), units='days')>300) {
ReportingInterval<-round(as.double((difftime(max(end(adj.Y),end(adj.X)),min(start(adj.Y),start(adj.X)), units='days')/360)), digits=0)
Unit<-'-Year, '
t.name<-paste(ReportingInterval, 'Y', sep='')
}
#### Calculating Time Interval and Unit for Reporting ------------------------
if(difftime(max(end(adj.Y),end(adj.X)),min(start(adj.Y),start(adj.X)), units='days')<300) {
ReportingInterval<-round(as.double((difftime(max(end(adj.Y),end(adj.X)),min(start(adj.Y),start(adj.X)), units='days')/30)), digits=0)
Unit<-'-Month, '
t.name<-paste(ReportingInterval, 'M', sep='')
}
#### Reporting ---------------------------------------------------------------
GraphTitle1<-paste0('Beta Calculation: ', y.name,' (', ReportingInterval, Unit, Frequency,'): ')
### Beta ---------------------------------------------------------------------
GraphTitle2<-paste0('Beta = ')
BetaValue<-(as.double(sprintf('%.4f', (cf[2]))))
GraphTitle3<-paste0(', ')
### Alpha --------------------------------------------------------------------
GraphTitle4<-paste0('Alpha = ')
Alpha.percent<-(as.double(cf[1])*100)
GraphTitle5<-paste0(', ')
### R-Squared ----------------------------------------------------------------
GraphTitle6<-paste0(' = ')
R.Squared<-as.double((format(summary(Results)$r.squared, digits=4)))
# ----------------------------------------------------------------------------
#### Setting up base plot, color, linear regression, and theme ---------------
#### Setting up common limits ------------------------------------------------
if(Theme=='GV'){
plotcolor=rgb(0.4,0,1) # --------------------------------------------------- 1
linecolor='blue' # --------------------------------------------------------- 2
pointsize=3 # -------------------------------------------------------------- 3
textcolor=rgb(0.15,0.15,0.15) # -------------------------------------------- 4
background=rgb(1,1,1) # ---------------------------------------------------- 5
axislines=rgb(0.75,0.75,0.75) # -------------------------------------------- 6
gridlines=rgb(0.9,0.9,0.9) # ----------------------------------------------- 7
shade=rgb(0.8,0.8,0.8) # --------------------------------------------------- 8
}
if(Theme=='BB'){
plotcolor='orange' # ------------------------------------------------------- 1
linecolor='red' # ---------------------------------------------------------- 2
pointsize=2 # -------------------------------------------------------------- 3
textcolor=rgb(1,1,1) # ----------------------------------------------------- 4
background=rgb(0,0,0) # ---------------------------------------------------- 5
axislines=rgb(0.25,0.25,0.25) # -------------------------------------------- 6
gridlines=rgb(0.15,0.15,0.15) # -------------------------------------------- 7
shade=rgb(0.5,0.5,0.5) # --------------------------------------------------- 8
}
xrange <- range(pretty((GGFRAME[,1])))
yrange <- range(pretty((GGFRAME[,2])))
#### Setting up y-margin histogram -------------------------------------------
p.left <- ggplot(GGFRAME, aes(Y.Close)) +
geom_vline(aes(xintercept=0), colour=axislines, linetype='solid')+
geom_density(color=linecolor, size=0.65, fill=shade, alpha=0.45)+
geom_histogram(fill=plotcolor,
color='black', alpha=0.7) +
#geom_vline(data=GGFRAME, aes(xintercept=mean(as.numeric(Y.Close)), colour='red', linetype='solid'))+
#geom_vline(data=GGFRAME, aes(xintercept=quantile(as.numeric(Y.Close), 0.25), colour='red', linetype='solid'))+
#geom_vline(data=GGFRAME, aes(xintercept=quantile(as.numeric(Y.Close), 0.75), colour='red', linetype='solid'))+
lims(x = yrange) +
coord_flip() +
theme_light() +
xlab(ylabel)+
ylab('')+
theme(
text = element_text(colour=textcolor),
panel.background = element_rect(fill=background),
rect = element_rect(fill =background, color=background),
axis.text.y = element_text(colour=textcolor),
axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
panel.grid.major = element_line(colour =gridlines),
panel.grid.minor = element_line(colour =gridlines),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
plot.margin = unit(c(1, 0.05, 0.05, 1), 'lines'),
legend.position='none'
)
#### Setting up empty plot ---------------------------------------------------
p.blank <- ggplot() +
theme_void() +
theme(plot.margin = unit(rep(0, 4), 'lines'))
#### Setting up main plot ----------------------------------------------------
p.main <- ggplot(GGFRAME, aes(X.Close, Y.Close))+
geom_vline(aes(xintercept=0), colour=axislines, linetype='solid')+
geom_hline(aes(yintercept=0), colour=axislines, linetype='solid')+
geom_smooth(method='lm', se=TRUE, color=linecolor, fill=shade, size=0.70)+
geom_point(color=plotcolor, size=pointsize, alpha=0.7)+
geom_abline(aes(slope=cf[2],intercept=cf[1]), color=linecolor)+
annotate('text', x=-Inf, y=Inf, label='hat(y) == beta(x) + alpha', hjust=-.2, vjust=2, size=(5), parse=TRUE, col=textcolor) +
lims(x=xrange, y=yrange)+
theme_light() +
theme(
panel.background = element_rect(fill=background),
rect = element_rect(fill =background, color=background),
panel.grid.major = element_line(colour =gridlines),
panel.grid.minor = element_line(colour =gridlines),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
plot.margin = unit(c(1, 1, 0.05, 0.05), 'lines'),
legend.position='none'
)
#### Setting up x-margin histogram -------------------------------------------
p.bottom <- ggplot(GGFRAME, aes(X.Close)) +
geom_vline(aes(xintercept=0), colour=axislines, linetype='solid')+
geom_density(color=linecolor, size=0.65, fill=shade, alpha=0.45)+
geom_histogram(fill=plotcolor,
color='black', alpha=0.7) +
#geom_vline(data=GGFRAME, aes(xintercept=mean(as.numeric(X.Close)), colour='red', linetype='solid'))+
#geom_vline(data=GGFRAME, aes(xintercept=quantile(as.numeric(X.Close), 0.25), colour='red', linetype='solid'))+
#geom_vline(data=GGFRAME, aes(xintercept=quantile(as.numeric(X.Close), 0.75), colour='red', linetype='solid'))+
lims(x = xrange) +
theme_light() +
xlab(xlabel)+
ylab('')+
theme(
text = element_text(colour=textcolor),
panel.background = element_rect(fill=background),
rect = element_rect(fill =background, color=background),
axis.text.x = element_text(colour=textcolor),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
panel.grid.major = element_line(colour =gridlines),
panel.grid.minor = element_line(colour =gridlines),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
plot.margin = unit(c(0.05, 1, 1, 0.05), 'lines'),
legend.position='none'
)
lm <- matrix(1:4, nrow = 2)
GGGRID<-grid.arrange(
p.left, p.blank, p.main, p.bottom,
top = textGrob((paste0('\n',(GraphTitle1), (GraphTitle2), (BetaValue), (GraphTitle3), (GraphTitle4), (Alpha.percent), '%', (GraphTitle5), 'R-Squared', (GraphTitle6), (R.Squared), '\n ')), gp=gpar(col=textcolor, cex=1.1, fontface='bold')),
#(paste0('\n',(GraphTitle1), (GraphTitle2), (BetaValue), (GraphTitle3), (GraphTitle4), (Alpha.percent), '%', (GraphTitle5), 'R-Squared', (GraphTitle6), (R.Squared))),
layout_matrix = lm,
widths = c(1, 5),
heights = c(5, 1),
padding = unit(0.05, 'line')
)
grid.draw(grobTree(rectGrob(gp=gpar(fill=background, col=background, lwd=0.05)), GGGRID))
# ----------------------------------------------------------------------------
#### Generating the Beta, and labelling it by its parameters -----------------
if(Frequency=='Daily'){
freq.name<-'d'
}
if(Frequency=='Weekly'){
freq.name<-'w'
}
if(Frequency=='Monthly'){
freq.name<-'m'
}
if((Latest.to.Today)>=pardiff){
freq.name<-paste(freq.name, 'p', sep='.')
}
BetaName <- paste('Beta', x.name, t.name, freq.name, y.name, sep='.')
Beta <- as.double(sprintf('%.4f', (cf[2])))
Adj.Beta <- as.double(sprintf('%.4f', ((cf[2]))*(2/3)+(1/3)))
# ----------------------------------------------------------------------------
return(
#### Beta ------------------------------------------------------------------
assign(BetaName, data.frame(Beta,Adj.Beta,Alpha.percent,R.Squared,Interval,Frequency,Observations,Since,Until,row.names = BetaName), env=parent.frame())
)
} | /iBeta.R | no_license | FinanceStudyGroup/iBeta.R | R | false | false | 21,461 | r | #### iBeta ---------------------------------------------------------------------
Beta<-function(Y, X='^GSPC', Since=NULL, Until=NULL, Interval=2, Frequency='Weekly', Theme='BB'){
# ----------------------------------------------------------------------------
#### iBeta 5.3 (An Online Regression Beta Calculator) ------------------------
require(quantmod)
require(tidyquant)
require(ggplot2)
require(grid)
require(gridExtra)
require(lattice)
#### Fix non-standard names --------------------------------------------------
Fix<-function(x){
if(class(x)=='character'){
#Collect the name of the variable
name<-deparse(substitute(x))
#Rename indicies to standard names
if(x=='^DJI'){x<-'DJIA'}
if(x=='^GSPC'){x<-'SPX'}
if(x=='^IXIC'){x<-'IXIC'}
if(x=='^RUT'){x<-'RUT'}
if(x=='^VIX'){x<-'VIX'}
if(x=='^GSPTSE'){x<-'GSPTSE'}
if(x=='^BVSP'){x<-'BVSP'}
if(x=='^MXX'){x<-'MXX'}
if(x=='^GDAXI'){x<-'GDAXI'}
if(x=='^FTSE'){x<-'FTSE'}
if(x=='^FCHI'){x<-'FCHI'}
if(x=='^STOXX50E'){x<-'STOXX50E'}
if(x=='^AEX'){x<-'AEX'}
if(x=='^IBEX'){x<-'IBEX'}
if(x=='FTSEMIB.MI'){x<-'FTSEMIB'}
if(x=='^SSMI'){x<-'SSMI'}
if(x=='PSI20.LS'){x<-'PSI20'}
if(x=='^BFX'){x<-'BFX'}
if(x=='^ATX'){x<-'ATX'}
if(x=='^OMX'){x<-'OMX'}
if(x=='^OMXC25'){x<-'OMXC25'}
if(x=='IMOEX.ME'){x<-'MOEX'}
if(x=='RTSI.ME'){x<-'RTSI'}
if(x=='XU100.IS'){x<-'XU100'}
if(x=='TA35.TA'){x<-'TA35'}
if(x=='^TASI.SR'){x<-'SASEIDX'}
if(x=='^N225'){x<-'NKY'}
if(x=='^AXJO'){x<-'AS51'}
if(x=='^SSEC'){x<-'SHCOMP'}
if(x=='399001.SZ'){x<-'SICOM'}
if(x=='XINA50N.FGI'){x<-'TXIN9'}
if(x=='^DJSH'){x<-'DJSH'}
if(x=='^HSI'){x<-'HSI'}
if(x=='^TWII'){x<-'TWSE'}
if(x=='^SET.BK'){x<-'SET'}
if(x=='^KS11'){x<-'KOSPI'}
if(x=='^JKSE'){x<-'JCI'}
if(x=='^NSEI'){x<-'NIFTY'}
if(x=='^BSESN'){x<-'SENSEX'}
if(x=='PSEI.PS'){x<-'PCOMP'}
if(x=='^STI'){x<-'FSSTI'}
if(x=='^KSE'){x<-'KSE100'}
}
return(
assign(name, x, env=parent.frame())
)
}
# ----------------------------------------------------------------------------
Xname<-NULL
Yname<-NULL
if(is.xts(X)=='FALSE'){
Xname<-as.character(X)
}else{
x.name<-sub('\\..*', '', (names(X[,1])))
}
if(is.xts(Y)=='FALSE'){
Yname<-as.character(Y)
}else{
y.name<-sub('\\..*', '', (names(Y[,1])))
}
price<-function(x, get = 'stock.prices', rename = NULL, from = NULL){
if(is.xts(x)=='FALSE'){
#Name the data
name<-x
output<-deparse(substitute(x))
#Collect the data
if(is.null(from)==FALSE){
x<-tq_get(x, get = get, from = from)
}else{
x<-tq_get(x, get = get)
}
#Rename if rename is required
if(class(rename)=='character'){
name<-rename
}
#Extract date series
dates<-x$date
#Extract price data
Open<-x$open
High<-x$high
Low<-x$low
Close<-x$close
Volume<-x$volume
Adjusted<-x$adjusted
# Define the data frame.
data<-data.frame(cbind(Open,High,Low,Close,Volume,Adjusted))
colnames(data)[1]<-paste(name,'Open', sep='.')
colnames(data)[2]<-paste(name,'High', sep='.')
colnames(data)[3]<-paste(name,'Low', sep='.')
colnames(data)[4]<-paste(name,'Close', sep='.')
colnames(data)[5]<-paste(name,'Volume', sep='.')
colnames(data)[6]<-paste(name,'Adjusted', sep='.')
row.names(data)<-dates
# Assigning the data to the environment.
return(
assign(output, (data<-as.xts(data)), env=parent.frame())
)
}
}
price(Y, get = 'stock.prices', rename = NULL, from = '1900-01-01')
price(X, get = 'stock.prices', rename = NULL, from = '1900-01-01')
Y<-to.daily(Y)
X<-to.daily(X)
# ----------------------------------------------------------------------------
#### Converting X- and Y-Series to a Common Length ---------------------------
#### If the length of the Y-time series is shorter than that of the X- -------
if((as.numeric((as.numeric(start(X)))-(as.numeric(start(Y)))))==0){
Earliest.Date.Available<-(as.Date((max(start(X),start(Y)))))
}
if(length(Y)<length(X)){
Earliest.Date.Available<-start(Y)
minlength<-as.double(length(Y [,1]))
#### Reacquiring price data from a common date (Start of the Y-series) -----
Y<-as.xts(tail(Y, n=minlength))
X<-as.xts(tail(X, n=minlength))
}
#### If the length of the X-time series is shorter than that of the Y- -------
if(length(Y)>length(X)){
Earliest.Date.Available<-start(X)
minlength<-as.double(length(X [,1]))
#### Reacquiring price data from a common date (Start of the X-series) -----
Y<-as.xts(tail(Y, n=minlength))
X<-as.xts(tail(X, n=minlength))
}
#### Removable Control Module (3.0) ------------------------------------------
# ----------------------------------------------------------------------------
if(class(Since)=='character'){
Since<-as.Date(Since)
}
if(class(Until)=='character'){
Until<-as.Date(Until)
}
if(class(Interval)=='numeric'){
Interval<-as.double(Interval)
}
### Scenario 1: No Parameters are specified (Implied) -----------------------
### Scenario 2B: 'Since and 'Until' Parameters are specified -----------------
if(class(Since)=='Date'){
if(class(Until)=='Date'){
Since=as.Date(max(Since,Earliest.Date.Available))
X<-X[paste(Since,Until,sep='::')]
Y<-Y[paste(Since,Until,sep='::')]
}
}
### Scenario 2C: Only the 'Until' Parameter is specified ---------------------
if(class(Until)=='Date'){
X<-X[paste('',Until,sep='::')]
Y<-Y[paste('',Until,sep='::')]
}else{
### Scenario 2A: The 'Since' Parameter is specified ------------------------
if(class(Since)=='Date'){
Since=as.Date(max(Since,Earliest.Date.Available))
X<-X[paste(Since,'',sep='::')]
Y<-Y[paste(Since,'',sep='::')]
}else{
### Scenario 3: Interval Mode (Takes precedence) ------------------------
if(class(Interval)=='numeric'){
if((Interval)>0){
Since=as.Date((max(end(X),end(Y))))-(Interval*365)
Until=as.Date((min(end(X),end(Y))))
Since=as.Date(max((max(start(X),start(Y))),Since))
X<-X[paste(Since,Until,sep='::')] # This might work
Y<-Y[paste(Since,Until,sep='::')] # This might work
}
}
}
}
#### Reconvert to Common Length (Failsafe) -----------------------------------
#### Converting X- and Y-Series to a Common Length. ####
#### If the length of the Y-time series is shorter than that of the X- -------
if(length(Y)<length(X)){
Earliest.Date.Available<-start(Y)
minlength<-as.double(length(Y [,1]))
#### Reacquiring price data from a common date (Start of the Y-series) -----
Y<-as.xts(tail(Y, n=minlength))
X<-as.xts(tail(X, n=minlength))
}
#### If the length of the X-time series is shorter than that of the Y- -------
if(length(Y)>length(X)){
Earliest.Date.Available<-start(X)
minlength<-as.double(length(X [,1]))
#### Reacquiring price data from a common date (Start of the X-series) -----
Y<-as.xts(tail(Y, n=minlength))
X<-as.xts(tail(X, n=minlength))
}
#### End of Removable Control Module -----------------------------------------
# ----------------------------------------------------------------------------
#### Determining Frequency: If Daily, Convert to Daily -----------------------
if(Frequency=='Daily'){
#### Converting data to Daily, if Frequency = Daily ------------------------
adj.Y<-to.daily(Y)
adj.X<-to.daily(X)
pardiff<-5
}
#### Determining Frequency: If Weekly, Convert to Weekly ---------------------
if(Frequency=='Weekly'){
#### Converting data to Weekly, if Frequency = Weekly ----------------------
adj.Y<-to.weekly(Y)
adj.X<-to.weekly(X)
pardiff<-5
}
#### Determining Frequency: If Monthly, Convert to Monthly -------------------
if(Frequency=='Monthly'){
#### Converting data to Monthly, if Frequency = Monthly --------------------
adj.Y<-to.monthly(Y)
adj.X<-to.monthly(X)
pardiff<-25
}
#### Closing Price Extraction ------------------------------------------------
#### Extracting the columns containing closing price data --------------------
YClose<-as.xts(adj.Y[,4])
XClose<-as.xts(adj.X[,4])
Latest.Date.Used<-as.Date(max(end(XClose),end(YClose)))
Latest.to.Today<-as.double(difftime(as.Date(Sys.Date()),Latest.Date.Used))
#### Returns Calculation -----------------------------------------------------
#### Converting Price data to Returns data -----------------------------------
YReturns<-(diff(YClose)/YClose[-length(YClose)])
XReturns<-(diff(XClose)/XClose[-length(XClose)])
#### Aggregation into Data Frame ---------------------------------------------
#### Creating a data frame to make the plot ----------------------------------
GGFRAME<-data.frame(as.xts(merge(XReturns, YReturns)))
GGFRAME<-data.frame(na.omit(GGFRAME))
if(Frequency=='Daily'){
Since<-as.Date(rownames(GGFRAME)[1]) #Error here if Frequency = 'Monthly'. The 'if' statements obviate the error.
Until<-as.Date(rownames(GGFRAME)[(length(rownames(GGFRAME)))]) #Error here if Frequency = 'Monthly'
Observations<-(length(rownames(GGFRAME)))
}
if(Frequency=='Weekly'){
Since<-as.Date(rownames(GGFRAME)[1]) #Error here if Frequency = 'Monthly'. The 'if' statements obviate the error.
Until<-as.Date(rownames(GGFRAME)[(length(rownames(GGFRAME)))]) #Error here if Frequency = 'Monthly'
Observations<-(length(rownames(GGFRAME)))
}
Observations<-(length(rownames(GGFRAME)))
#### Three-step process for making the x- and y- labels ----------------------
#### Setting up dates --------------------------------------------------------
xdate<-max((start(XClose)),(start(YClose)))
ydate<-max((start(XClose)),(start(YClose)))
#### Here are the beginning month and year -----------------------------------
xmonth<-paste(format(xdate, '%b'), format(xdate, '%Y'))
ymonth<-paste(format(ydate, '%b'), format(ydate, '%Y'))
#### Names of the variables --------------------------------------------------
if(is.null(Xname)=='FALSE'){
x.name<-Xname
}
if(is.null(Yname)=='FALSE'){
y.name<-Yname
}
x.name<-Fix(x.name)
y.name<-Fix(y.name)
#### If Beta is taken over an interval, here are the ending month and year ---
if((Latest.to.Today)>=pardiff){
xend<-end(XClose)
yend<-end(YClose)
xmonth.end<-paste(format(xend, '%b'), format(xend, '%Y'))
ymonth.end<-paste(format(yend, '%b'), format(xend, '%Y'))
### If Beta is taken over an interval, here are the x- and y- labels -------
xlabel<-paste0((Frequency),' Returns on ', x.name, ' (From ', (xmonth),',',' To ',(xmonth.end),') [Interval-Bound]')
ylabel<-paste0((Frequency),' Returns on ', y.name, ' (From ', (ymonth),',',' To ',(ymonth.end),')')
}
#### Setting up regular x-, and y-labels -------------------------------------
if((Latest.to.Today)<pardiff){
xlabel<-paste0((Frequency),' Returns on ', x.name, ' (Since ', (xmonth),')')
ylabel<-paste0((Frequency),' Returns on ', y.name, ' (Since ', (ymonth),')')
}
#### Generating a linear model to find Beta ----------------------------------
Results<-lm(Y.Close ~ X.Close, GGFRAME)
#### Extracting coefficients -------------------------------------------------
cf <- round(coef(Results),6)
#### Restate the 'Interval' variable -----------------------------------------
Interval<-signif(as.double((difftime(max(end(adj.Y),end(adj.X)),min(start(adj.Y),start(adj.X)), units='days')/360)), digits=4)
#### Calculating Time Interval and Unit for Reporting ------------------------
if(difftime(max(end(adj.Y),end(adj.X)),min(start(adj.Y),start(adj.X)), units='days')>300) {
ReportingInterval<-round(as.double((difftime(max(end(adj.Y),end(adj.X)),min(start(adj.Y),start(adj.X)), units='days')/360)), digits=0)
Unit<-'-Year, '
t.name<-paste(ReportingInterval, 'Y', sep='')
}
#### Calculating Time Interval and Unit for Reporting ------------------------
if(difftime(max(end(adj.Y),end(adj.X)),min(start(adj.Y),start(adj.X)), units='days')<300) {
ReportingInterval<-round(as.double((difftime(max(end(adj.Y),end(adj.X)),min(start(adj.Y),start(adj.X)), units='days')/30)), digits=0)
Unit<-'-Month, '
t.name<-paste(ReportingInterval, 'M', sep='')
}
#### Reporting ---------------------------------------------------------------
GraphTitle1<-paste0('Beta Calculation: ', y.name,' (', ReportingInterval, Unit, Frequency,'): ')
### Beta ---------------------------------------------------------------------
GraphTitle2<-paste0('Beta = ')
BetaValue<-(as.double(sprintf('%.4f', (cf[2]))))
GraphTitle3<-paste0(', ')
### Alpha --------------------------------------------------------------------
GraphTitle4<-paste0('Alpha = ')
Alpha.percent<-(as.double(cf[1])*100)
GraphTitle5<-paste0(', ')
### R-Squared ----------------------------------------------------------------
GraphTitle6<-paste0(' = ')
R.Squared<-as.double((format(summary(Results)$r.squared, digits=4)))
# ----------------------------------------------------------------------------
#### Setting up base plot, color, linear regression, and theme ---------------
#### Setting up common limits ------------------------------------------------
if(Theme=='GV'){
plotcolor=rgb(0.4,0,1) # --------------------------------------------------- 1
linecolor='blue' # --------------------------------------------------------- 2
pointsize=3 # -------------------------------------------------------------- 3
textcolor=rgb(0.15,0.15,0.15) # -------------------------------------------- 4
background=rgb(1,1,1) # ---------------------------------------------------- 5
axislines=rgb(0.75,0.75,0.75) # -------------------------------------------- 6
gridlines=rgb(0.9,0.9,0.9) # ----------------------------------------------- 7
shade=rgb(0.8,0.8,0.8) # --------------------------------------------------- 8
}
if(Theme=='BB'){
plotcolor='orange' # ------------------------------------------------------- 1
linecolor='red' # ---------------------------------------------------------- 2
pointsize=2 # -------------------------------------------------------------- 3
textcolor=rgb(1,1,1) # ----------------------------------------------------- 4
background=rgb(0,0,0) # ---------------------------------------------------- 5
axislines=rgb(0.25,0.25,0.25) # -------------------------------------------- 6
gridlines=rgb(0.15,0.15,0.15) # -------------------------------------------- 7
shade=rgb(0.5,0.5,0.5) # --------------------------------------------------- 8
}
xrange <- range(pretty((GGFRAME[,1])))
yrange <- range(pretty((GGFRAME[,2])))
#### Setting up y-margin histogram -------------------------------------------
p.left <- ggplot(GGFRAME, aes(Y.Close)) +
geom_vline(aes(xintercept=0), colour=axislines, linetype='solid')+
geom_density(color=linecolor, size=0.65, fill=shade, alpha=0.45)+
geom_histogram(fill=plotcolor,
color='black', alpha=0.7) +
#geom_vline(data=GGFRAME, aes(xintercept=mean(as.numeric(Y.Close)), colour='red', linetype='solid'))+
#geom_vline(data=GGFRAME, aes(xintercept=quantile(as.numeric(Y.Close), 0.25), colour='red', linetype='solid'))+
#geom_vline(data=GGFRAME, aes(xintercept=quantile(as.numeric(Y.Close), 0.75), colour='red', linetype='solid'))+
lims(x = yrange) +
coord_flip() +
theme_light() +
xlab(ylabel)+
ylab('')+
theme(
text = element_text(colour=textcolor),
panel.background = element_rect(fill=background),
rect = element_rect(fill =background, color=background),
axis.text.y = element_text(colour=textcolor),
axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
panel.grid.major = element_line(colour =gridlines),
panel.grid.minor = element_line(colour =gridlines),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
plot.margin = unit(c(1, 0.05, 0.05, 1), 'lines'),
legend.position='none'
)
#### Setting up empty plot ---------------------------------------------------
p.blank <- ggplot() +
theme_void() +
theme(plot.margin = unit(rep(0, 4), 'lines'))
#### Setting up main plot ----------------------------------------------------
p.main <- ggplot(GGFRAME, aes(X.Close, Y.Close))+
geom_vline(aes(xintercept=0), colour=axislines, linetype='solid')+
geom_hline(aes(yintercept=0), colour=axislines, linetype='solid')+
geom_smooth(method='lm', se=TRUE, color=linecolor, fill=shade, size=0.70)+
geom_point(color=plotcolor, size=pointsize, alpha=0.7)+
geom_abline(aes(slope=cf[2],intercept=cf[1]), color=linecolor)+
annotate('text', x=-Inf, y=Inf, label='hat(y) == beta(x) + alpha', hjust=-.2, vjust=2, size=(5), parse=TRUE, col=textcolor) +
lims(x=xrange, y=yrange)+
theme_light() +
theme(
panel.background = element_rect(fill=background),
rect = element_rect(fill =background, color=background),
panel.grid.major = element_line(colour =gridlines),
panel.grid.minor = element_line(colour =gridlines),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
plot.margin = unit(c(1, 1, 0.05, 0.05), 'lines'),
legend.position='none'
)
#### Setting up x-margin histogram -------------------------------------------
p.bottom <- ggplot(GGFRAME, aes(X.Close)) +
geom_vline(aes(xintercept=0), colour=axislines, linetype='solid')+
geom_density(color=linecolor, size=0.65, fill=shade, alpha=0.45)+
geom_histogram(fill=plotcolor,
color='black', alpha=0.7) +
#geom_vline(data=GGFRAME, aes(xintercept=mean(as.numeric(X.Close)), colour='red', linetype='solid'))+
#geom_vline(data=GGFRAME, aes(xintercept=quantile(as.numeric(X.Close), 0.25), colour='red', linetype='solid'))+
#geom_vline(data=GGFRAME, aes(xintercept=quantile(as.numeric(X.Close), 0.75), colour='red', linetype='solid'))+
lims(x = xrange) +
theme_light() +
xlab(xlabel)+
ylab('')+
theme(
text = element_text(colour=textcolor),
panel.background = element_rect(fill=background),
rect = element_rect(fill =background, color=background),
axis.text.x = element_text(colour=textcolor),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
panel.grid.major = element_line(colour =gridlines),
panel.grid.minor = element_line(colour =gridlines),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
plot.margin = unit(c(0.05, 1, 1, 0.05), 'lines'),
legend.position='none'
)
lm <- matrix(1:4, nrow = 2)
GGGRID<-grid.arrange(
p.left, p.blank, p.main, p.bottom,
top = textGrob((paste0('\n',(GraphTitle1), (GraphTitle2), (BetaValue), (GraphTitle3), (GraphTitle4), (Alpha.percent), '%', (GraphTitle5), 'R-Squared', (GraphTitle6), (R.Squared), '\n ')), gp=gpar(col=textcolor, cex=1.1, fontface='bold')),
#(paste0('\n',(GraphTitle1), (GraphTitle2), (BetaValue), (GraphTitle3), (GraphTitle4), (Alpha.percent), '%', (GraphTitle5), 'R-Squared', (GraphTitle6), (R.Squared))),
layout_matrix = lm,
widths = c(1, 5),
heights = c(5, 1),
padding = unit(0.05, 'line')
)
grid.draw(grobTree(rectGrob(gp=gpar(fill=background, col=background, lwd=0.05)), GGGRID))
# ----------------------------------------------------------------------------
#### Generating the Beta, and labelling it by its parameters -----------------
if(Frequency=='Daily'){
freq.name<-'d'
}
if(Frequency=='Weekly'){
freq.name<-'w'
}
if(Frequency=='Monthly'){
freq.name<-'m'
}
if((Latest.to.Today)>=pardiff){
freq.name<-paste(freq.name, 'p', sep='.')
}
BetaName <- paste('Beta', x.name, t.name, freq.name, y.name, sep='.')
Beta <- as.double(sprintf('%.4f', (cf[2])))
Adj.Beta <- as.double(sprintf('%.4f', ((cf[2]))*(2/3)+(1/3)))
# ----------------------------------------------------------------------------
return(
#### Beta ------------------------------------------------------------------
assign(BetaName, data.frame(Beta,Adj.Beta,Alpha.percent,R.Squared,Interval,Frequency,Observations,Since,Until,row.names = BetaName), env=parent.frame())
)
} |
\name{genesbyehistolyticaexpressiontiming}
\alias{GenesByEHistolyticaExpressionTiming}
\title{Find <i>E. histolytica</i> genes based on microarray expression fold change calculations between different strains in specified stage and media.}
\description{<br><br>(1) Gene expression in <i>Entamoeba histolytica</i> cysts and trophozoites<br>RNA was isolated from <i>E. histolytica</i> parasites recently derived from human patients and grown in xenic culture (Robinson's media) for 1-8 weeks. These recently isolated cultures contained a mixture of trophozoites and cysts (as assayed by staining with calcofluor). Two different strains, isolated from different patients, were used. Gene expression in these strains was compared to gene expression in multiple E. histolytica strains (HMI:IMSS, Rahman and 200:NIH) that contained only trophozoites, which had been grown in axenic culture (TYI-S-33) for > 10 years, using a full genome Affymetrix-platform microarray. Multiple lines of evidence suggest that many genes differentially expressed between these two conditions (recent isolates vs. long term lab growth) are developmentally regulated.<br><br>All samples were isolated using Trizol and samples other than the cyst containing clinical isolate samples were further purified with the Qiagen RNAeasy kit (clinical isolates produced insufficient RNA for cleanup). Samples GSM154008 (HM1replicate 3) and GSM154011 (Rahman replicate 3) were amplified using the SPIA protocol, and hybridized and scanned at VBI. All other samples were processed entirely by Stanford PAN facility. Samples GSM154022, GSM154023, and GSM154024 (the cyst containing clinical samples) were amplified 2 rounds using the standard affy T7 procedure. All others had sufficient RNA and only underwent the standard affy in vitro transcription labeling protocol.<br><a href="http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE6648">GEO Dataset: Gene expression in <i>Entamoeba histolytica</i> cysts and trophozoites</a><br><br>
(2) An Affymetrix platform gene expression array was designed for this analysis that included probe sets for 9435 open reading frames (ORFs) and 9066 5' and 3' flanking regions. Transcripts were detected for > 80\% of all ORFs. A total of 523 transcripts (5.2\% of all <i>E. histolytica</i> genes) were significantly changed in amebae isolated from the intestine on Days 1 and 29 after infection: 326 and 109 solely on Days 1 and 29, and 88 on both days. Quantitative real-time reverse transcriptase PCR confirmed these changes in 11/12 genes tested using mRNA isolated from an additional six mice.<br><a href="http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE8484">GEO Dataset: Impact of intestinal colonization and invasion on the <i>Entamoeba histolytica</i> transcriptome.</a><br><br><br>For further details, please refer,<br>
Gilchrist CA, Houpt E, Trapaidze N, Fei Z et al.
Impact of intestinal colonization and invasion on the <i>Entamoeba histolytica</i> transcriptome.<br>
Mol Biochem Parasitol 2006 Jun;147(2):163-76. PMID: 16569449<br><br>
Ehrenkaufer GM, Haque R, Hackney JA, Eichinger DJ et al.
Identification of developmentally regulated genes in <i>Entamoeba histolytica</i>: insights into mechanisms of stage conversion in a protozoan parasite.<br>
Cell Microbiol 2007 Jun;9(6):1426-44. PMID: 17250591}
\arguments{
\item{profileSetEHisto}{Choose a microarray dataset from which you would like to identify genes}
\item{eh_fc_one_fl}{Choose one or more strain/stage/media. NOTE: if more than one is chosen the fold change will be calculated using the average of all samples within the group
Provide one or more values. Use comma as a delimter.}
\item{eh_fc_two_fl}{Choose one or more strain/stage/media. NOTE: if more than one is chosen the fold change will be calculated using the average of all samples within the group
Provide one or more values. Use comma as a delimter.}
\item{fold_change}{Enter a non-negative number. NOTE: Fold change is reported in the summary as positive numbers for up-regulated genes and negative numbers for down-regulated genes}
\item{protein_coding_only}{Should only protein coding genes be returned?}
\item{regulated_dir}{For ConditionA vs. ConditionB, select up-regulated for genes where ConditionA > ConditionB and select down-regulated for genes where ConditionB > ConditionA.}
\item{o-fields}{Single valued attributes of the feature.
Provide one or more values. Use comma as a delimter.}
\item{o-tables}{Multi-valued attributes of the feature.
Provide one or more values. Use comma as a delimter.}
\item{.convert}{a logical value or a function that controls how the result of the method is returned. If this is a function, the character string or raw vector is passed to this function and it converts it appropriately. If this is a logical value and \code{TRUE}, then we attempt to convert the result based on its Content-Type returned by the Web server. If this is \code{FALSE}, the value from the Web server is returned as is.}
\item{.url}{the URL for the Web request. This defaults to the correct value, but can be specified by the caller if the method is available at a different URL, e.g. locally or in a mirror server.}
\item{.json}{a logical value controlling whether to use the JSON or the XML version of the method}}
\value{text/xml
text/plain}
\author{}
| /man/genesbyehistolyticaexpressiontiming.Rd | no_license | duncantl/REuPathDB | R | false | false | 5,406 | rd | \name{genesbyehistolyticaexpressiontiming}
\alias{GenesByEHistolyticaExpressionTiming}
\title{Find <i>E. histolytica</i> genes based on microarray expression fold change calculations between different strains in specified stage and media.}
\description{<br><br>(1) Gene expression in <i>Entamoeba histolytica</i> cysts and trophozoites<br>RNA was isolated from <i>E. histolytica</i> parasites recently derived from human patients and grown in xenic culture (Robinson's media) for 1-8 weeks. These recently isolated cultures contained a mixture of trophozoites and cysts (as assayed by staining with calcofluor). Two different strains, isolated from different patients, were used. Gene expression in these strains was compared to gene expression in multiple E. histolytica strains (HMI:IMSS, Rahman and 200:NIH) that contained only trophozoites, which had been grown in axenic culture (TYI-S-33) for > 10 years, using a full genome Affymetrix-platform microarray. Multiple lines of evidence suggest that many genes differentially expressed between these two conditions (recent isolates vs. long term lab growth) are developmentally regulated.<br><br>All samples were isolated using Trizol and samples other than the cyst containing clinical isolate samples were further purified with the Qiagen RNAeasy kit (clinical isolates produced insufficient RNA for cleanup). Samples GSM154008 (HM1replicate 3) and GSM154011 (Rahman replicate 3) were amplified using the SPIA protocol, and hybridized and scanned at VBI. All other samples were processed entirely by Stanford PAN facility. Samples GSM154022, GSM154023, and GSM154024 (the cyst containing clinical samples) were amplified 2 rounds using the standard affy T7 procedure. All others had sufficient RNA and only underwent the standard affy in vitro transcription labeling protocol.<br><a href="http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE6648">GEO Dataset: Gene expression in <i>Entamoeba histolytica</i> cysts and trophozoites</a><br><br>
(2) An Affymetrix platform gene expression array was designed for this analysis that included probe sets for 9435 open reading frames (ORFs) and 9066 5' and 3' flanking regions. Transcripts were detected for > 80\% of all ORFs. A total of 523 transcripts (5.2\% of all <i>E. histolytica</i> genes) were significantly changed in amebae isolated from the intestine on Days 1 and 29 after infection: 326 and 109 solely on Days 1 and 29, and 88 on both days. Quantitative real-time reverse transcriptase PCR confirmed these changes in 11/12 genes tested using mRNA isolated from an additional six mice.<br><a href="http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE8484">GEO Dataset: Impact of intestinal colonization and invasion on the <i>Entamoeba histolytica</i> transcriptome.</a><br><br><br>For further details, please refer,<br>
Gilchrist CA, Houpt E, Trapaidze N, Fei Z et al.
Impact of intestinal colonization and invasion on the <i>Entamoeba histolytica</i> transcriptome.<br>
Mol Biochem Parasitol 2006 Jun;147(2):163-76. PMID: 16569449<br><br>
Ehrenkaufer GM, Haque R, Hackney JA, Eichinger DJ et al.
Identification of developmentally regulated genes in <i>Entamoeba histolytica</i>: insights into mechanisms of stage conversion in a protozoan parasite.<br>
Cell Microbiol 2007 Jun;9(6):1426-44. PMID: 17250591}
\arguments{
\item{profileSetEHisto}{Choose a microarray dataset from which you would like to identify genes}
\item{eh_fc_one_fl}{Choose one or more strain/stage/media. NOTE: if more than one is chosen the fold change will be calculated using the average of all samples within the group
Provide one or more values. Use comma as a delimter.}
\item{eh_fc_two_fl}{Choose one or more strain/stage/media. NOTE: if more than one is chosen the fold change will be calculated using the average of all samples within the group
Provide one or more values. Use comma as a delimter.}
\item{fold_change}{Enter a non-negative number. NOTE: Fold change is reported in the summary as positive numbers for up-regulated genes and negative numbers for down-regulated genes}
\item{protein_coding_only}{Should only protein coding genes be returned?}
\item{regulated_dir}{For ConditionA vs. ConditionB, select up-regulated for genes where ConditionA > ConditionB and select down-regulated for genes where ConditionB > ConditionA.}
\item{o-fields}{Single valued attributes of the feature.
Provide one or more values. Use comma as a delimter.}
\item{o-tables}{Multi-valued attributes of the feature.
Provide one or more values. Use comma as a delimter.}
\item{.convert}{a logical value or a function that controls how the result of the method is returned. If this is a function, the character string or raw vector is passed to this function and it converts it appropriately. If this is a logical value and \code{TRUE}, then we attempt to convert the result based on its Content-Type returned by the Web server. If this is \code{FALSE}, the value from the Web server is returned as is.}
\item{.url}{the URL for the Web request. This defaults to the correct value, but can be specified by the caller if the method is available at a different URL, e.g. locally or in a mirror server.}
\item{.json}{a logical value controlling whether to use the JSON or the XML version of the method}}
\value{text/xml
text/plain}
\author{}
|
# Across the United States, how have emissions from coal combustion-related sources changed from 1999–2008?
library(ggplot2)
# pre-processing
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Subset coal combustion related NEI data
coal_comb <- grepl("coal", SCC$EI.Sector, ignore.case=TRUE) & grepl("comb", SCC$EI.Sector, ignore.case=TRUE)
SCC_coal_comb <- SCC[coal_comb,]$SCC
NEI_coal_comb <- NEI[NEI$SCC %in% SCC_coal_comb,]
png(file = "plot4.png", width = 480, height = 480, bg = "transparent")
ggp <- ggplot(NEI_coal_comb, aes(factor(year), Emissions/10^6))
ggp <- ggp + geom_bar(stat="identity") + guides(fill = FALSE) +
labs(x="Year", y="Total PM2.5 Emission (Million Tons)") +
labs(title="Total PM2.5 Emissions from coal combustion-related sources")
print(ggp)
dev.off() | /plot4.R | no_license | kaipinliao/ExData_Plotting2 | R | false | false | 831 | r | # Across the United States, how have emissions from coal combustion-related sources changed from 1999–2008?
library(ggplot2)
# pre-processing
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Subset coal combustion related NEI data
coal_comb <- grepl("coal", SCC$EI.Sector, ignore.case=TRUE) & grepl("comb", SCC$EI.Sector, ignore.case=TRUE)
SCC_coal_comb <- SCC[coal_comb,]$SCC
NEI_coal_comb <- NEI[NEI$SCC %in% SCC_coal_comb,]
png(file = "plot4.png", width = 480, height = 480, bg = "transparent")
ggp <- ggplot(NEI_coal_comb, aes(factor(year), Emissions/10^6))
ggp <- ggp + geom_bar(stat="identity") + guides(fill = FALSE) +
labs(x="Year", y="Total PM2.5 Emission (Million Tons)") +
labs(title="Total PM2.5 Emissions from coal combustion-related sources")
print(ggp)
dev.off() |
source('rankhospital.R')
RankAll <- function(outcome, num = "best") {
# Returns a 2-column data frame containing the hospital in each state that
# has the ranking specified in num.
#
# Args:
# outcome: outcome name. Can be one of “heart attack”, “heart failure”,
# or “pneumonia”.
# num: can take values “best”, “worst”, or an integer indicating the ranking.
#
# Returns:
# a 2-column data frame containing the hospital in each state that
# has the ranking specified in num.
outcome.data <- read.csv('outcome-of-care-measures.csv', colClasses = 'character')
states <- unique(outcome.data$State)
df <- data.frame(hospital = character(), state = character())
rankall.names <- c('hospital', 'state')
names(df) <- c('')
for (st in states) {
de <- data.frame(RankHospital(st, outcome, num), st)
names(de) <- rankall.names
df <- rbind(df, de)
}
return(df)
} | /programming_assignment_3/rankall.R | no_license | marcusmonteiro/r_programming_for_data_science | R | false | false | 944 | r | source('rankhospital.R')
RankAll <- function(outcome, num = "best") {
# Returns a 2-column data frame containing the hospital in each state that
# has the ranking specified in num.
#
# Args:
# outcome: outcome name. Can be one of “heart attack”, “heart failure”,
# or “pneumonia”.
# num: can take values “best”, “worst”, or an integer indicating the ranking.
#
# Returns:
# a 2-column data frame containing the hospital in each state that
# has the ranking specified in num.
outcome.data <- read.csv('outcome-of-care-measures.csv', colClasses = 'character')
states <- unique(outcome.data$State)
df <- data.frame(hospital = character(), state = character())
rankall.names <- c('hospital', 'state')
names(df) <- c('')
for (st in states) {
de <- data.frame(RankHospital(st, outcome, num), st)
names(de) <- rankall.names
df <- rbind(df, de)
}
return(df)
} |
pdf(file='P4_46_day7.pdf',width=4.5,height=4.5);
gstable=read.table('P4_46_day7.gene_summary.txt',header=T)
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=3
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("SOX9","AAVS1","PROM1","ROSA26","CCR5","CTRL","LRIG1","mKate2","KRT20","EGFP")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='6_vs_4 neg.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(7121.442555638859,1948.1722943930413),c(6088.079012827491,902.1819962678186),c(6364.5416898602225,1424.562579937877),c(3441.4546046482965,830.8924107316694),c(8018.260507964547,1822.8009543122273),c(12232.63058468301,4861.212255094309))
targetgene="SOX9"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(7149.257398145201,6239.067865198156),c(5926.247201881502,4391.684295183809),c(5578.140233544557,4477.72345014123),c(3147.291573293348,2415.241992733329),c(5473.623855641939,4823.109200756022),c(2152.700235187791,2026.8366646398267),c(4420.8742104776675,3414.5253210245232),c(7887.615035586275,6798.322372421396),c(2309.474802041718,2252.9967290993345),c(2572.4514948289498,2346.4106687673916))
targetgene="AAVS1"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(5281.448580143578,6082.968255489692),c(7992.974287504237,7068.73114514472),c(5835.216808224383,5228.722359841008),c(8622.601176965974,7726.316115176441),c(6282.782910371884,5508.964178845181),c(12758.583970257474,11328.898446322184))
targetgene="PROM1"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(16374.513496081916,14254.229714874511),c(6462.315075640091,5615.898557149405),c(8910.864090213518,7417.804288114829),c(5917.818461728066,5919.493861070591),c(5486.266965872095,4882.10747844111),c(9012.00897205476,7360.035141214847),c(8867.034641415645,6413.604436683211),c(4174.754997997309,3720.578886515922),c(2333.9181484866845,1971.5257793100557),c(5428.951532828723,5764.623382147233))
targetgene="ROSA26"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(4532.976454518379,4544.096512536955),c(7012.711807659522,6096.488694125858),c(3428.811494418141,3425.5874980904773),c(3503.82728178373,3180.9904718543794),c(4770.6669268453,4058.5898524200775),c(5021.84338341772,4005.7372286605187),c(11868.509010054535,9386.871805854673),c(7946.6162166603335,6219.40177263646),c(3151.5059433700662,3055.619131773565),c(5365.735981677947,5213.972790419736))
targetgene="CCR5"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(4771.509800860644,3654.2058241201967),c(4431.831572677135,4321.623840432766),c(16230.382039458143,13789.618278104435),c(2535.3650381538273,2641.4020571928368),c(4707.4513756945225,4631.364798279483),c(10174.332239213712,9003.383000901595),c(6085.55039078146,4991.500118315546),c(5584.040351651963,5414.321108392017),c(4715.037241832616,4316.707317292342),c(7826.928106481529,8145.449712897594))
targetgene="CTRL"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(6886.280705357968,6010.449539168437),c(3776.0755887397427,3382.5679206117666),c(3469.2694471546383,3381.3387898266606),c(7381.890626380059,7223.601624068078),c(11194.20979777958,9924.001958946004),c(3973.3081083301668,3833.6589187456757))
targetgene="LRIG1"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(3335.2524787149914,3187.1361257799094),c(4323.100824697799,4101.609429898788),c(4586.077517485031,3924.614596843521),c(4184.869486181434,3882.8241501499165),c(3847.7198800439564,3547.271445815973),c(7451.006295638243,6631.160585646977))
targetgene="mKate2"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1045.1637790261789,5896.140376153577),c(1405.0709835779355,8201.98972901247),c(1041.792282964804,7168.2907387383075),c(526.7962595898078,2724.982950580046),c(1627.5897236286703,9117.692163916456),c(1274.425511199663,4678.071768113511))
targetgene="KRT20"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(559.6683461882119,7871.353547818951),c(810.0019287452885,9524.534453786548),c(345.57834629091394,5925.639514996122),c(448.40897616284445,5269.283675749507),c(659.9703540141113,8780.910328797405),c(2903.7009828590208,8822.70077549101))
targetgene="EGFP"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=9
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("EGFP","KRT20","PROM1","ROSA26","LRIG1","CTRL","CCR5","mKate2","AAVS1","SOX9")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='6_vs_4 pos.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(559.6683461882119,7871.353547818951),c(810.0019287452885,9524.534453786548),c(345.57834629091394,5925.639514996122),c(448.40897616284445,5269.283675749507),c(659.9703540141113,8780.910328797405),c(2903.7009828590208,8822.70077549101))
targetgene="EGFP"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1045.1637790261789,5896.140376153577),c(1405.0709835779355,8201.98972901247),c(1041.792282964804,7168.2907387383075),c(526.7962595898078,2724.982950580046),c(1627.5897236286703,9117.692163916456),c(1274.425511199663,4678.071768113511))
targetgene="KRT20"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(5281.448580143578,6082.968255489692),c(7992.974287504237,7068.73114514472),c(5835.216808224383,5228.722359841008),c(8622.601176965974,7726.316115176441),c(6282.782910371884,5508.964178845181),c(12758.583970257474,11328.898446322184))
targetgene="PROM1"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(16374.513496081916,14254.229714874511),c(6462.315075640091,5615.898557149405),c(8910.864090213518,7417.804288114829),c(5917.818461728066,5919.493861070591),c(5486.266965872095,4882.10747844111),c(9012.00897205476,7360.035141214847),c(8867.034641415645,6413.604436683211),c(4174.754997997309,3720.578886515922),c(2333.9181484866845,1971.5257793100557),c(5428.951532828723,5764.623382147233))
targetgene="ROSA26"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(6886.280705357968,6010.449539168437),c(3776.0755887397427,3382.5679206117666),c(3469.2694471546383,3381.3387898266606),c(7381.890626380059,7223.601624068078),c(11194.20979777958,9924.001958946004),c(3973.3081083301668,3833.6589187456757))
targetgene="LRIG1"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(4771.509800860644,3654.2058241201967),c(4431.831572677135,4321.623840432766),c(16230.382039458143,13789.618278104435),c(2535.3650381538273,2641.4020571928368),c(4707.4513756945225,4631.364798279483),c(10174.332239213712,9003.383000901595),c(6085.55039078146,4991.500118315546),c(5584.040351651963,5414.321108392017),c(4715.037241832616,4316.707317292342),c(7826.928106481529,8145.449712897594))
targetgene="CTRL"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(4532.976454518379,4544.096512536955),c(7012.711807659522,6096.488694125858),c(3428.811494418141,3425.5874980904773),c(3503.82728178373,3180.9904718543794),c(4770.6669268453,4058.5898524200775),c(5021.84338341772,4005.7372286605187),c(11868.509010054535,9386.871805854673),c(7946.6162166603335,6219.40177263646),c(3151.5059433700662,3055.619131773565),c(5365.735981677947,5213.972790419736))
targetgene="CCR5"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(3335.2524787149914,3187.1361257799094),c(4323.100824697799,4101.609429898788),c(4586.077517485031,3924.614596843521),c(4184.869486181434,3882.8241501499165),c(3847.7198800439564,3547.271445815973),c(7451.006295638243,6631.160585646977))
targetgene="mKate2"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(7149.257398145201,6239.067865198156),c(5926.247201881502,4391.684295183809),c(5578.140233544557,4477.72345014123),c(3147.291573293348,2415.241992733329),c(5473.623855641939,4823.109200756022),c(2152.700235187791,2026.8366646398267),c(4420.8742104776675,3414.5253210245232),c(7887.615035586275,6798.322372421396),c(2309.474802041718,2252.9967290993345),c(2572.4514948289498,2346.4106687673916))
targetgene="AAVS1"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(7121.442555638859,1948.1722943930413),c(6088.079012827491,902.1819962678186),c(6364.5416898602225,1424.562579937877),c(3441.4546046482965,830.8924107316694),c(8018.260507964547,1822.8009543122273),c(12232.63058468301,4861.212255094309))
targetgene="SOX9"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
dev.off()
Sweave("P4_46_day7_summary.Rnw");
library(tools);
texi2dvi("P4_46_day7_summary.tex",pdf=TRUE);
| /P4_46_day7/P4_46_day7.R | no_license | davidchen0420/Miniscreen | R | false | false | 36,455 | r | pdf(file='P4_46_day7.pdf',width=4.5,height=4.5);
gstable=read.table('P4_46_day7.gene_summary.txt',header=T)
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=3
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("SOX9","AAVS1","PROM1","ROSA26","CCR5","CTRL","LRIG1","mKate2","KRT20","EGFP")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='6_vs_4 neg.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(7121.442555638859,1948.1722943930413),c(6088.079012827491,902.1819962678186),c(6364.5416898602225,1424.562579937877),c(3441.4546046482965,830.8924107316694),c(8018.260507964547,1822.8009543122273),c(12232.63058468301,4861.212255094309))
targetgene="SOX9"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(7149.257398145201,6239.067865198156),c(5926.247201881502,4391.684295183809),c(5578.140233544557,4477.72345014123),c(3147.291573293348,2415.241992733329),c(5473.623855641939,4823.109200756022),c(2152.700235187791,2026.8366646398267),c(4420.8742104776675,3414.5253210245232),c(7887.615035586275,6798.322372421396),c(2309.474802041718,2252.9967290993345),c(2572.4514948289498,2346.4106687673916))
targetgene="AAVS1"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(5281.448580143578,6082.968255489692),c(7992.974287504237,7068.73114514472),c(5835.216808224383,5228.722359841008),c(8622.601176965974,7726.316115176441),c(6282.782910371884,5508.964178845181),c(12758.583970257474,11328.898446322184))
targetgene="PROM1"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(16374.513496081916,14254.229714874511),c(6462.315075640091,5615.898557149405),c(8910.864090213518,7417.804288114829),c(5917.818461728066,5919.493861070591),c(5486.266965872095,4882.10747844111),c(9012.00897205476,7360.035141214847),c(8867.034641415645,6413.604436683211),c(4174.754997997309,3720.578886515922),c(2333.9181484866845,1971.5257793100557),c(5428.951532828723,5764.623382147233))
targetgene="ROSA26"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(4532.976454518379,4544.096512536955),c(7012.711807659522,6096.488694125858),c(3428.811494418141,3425.5874980904773),c(3503.82728178373,3180.9904718543794),c(4770.6669268453,4058.5898524200775),c(5021.84338341772,4005.7372286605187),c(11868.509010054535,9386.871805854673),c(7946.6162166603335,6219.40177263646),c(3151.5059433700662,3055.619131773565),c(5365.735981677947,5213.972790419736))
targetgene="CCR5"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(4771.509800860644,3654.2058241201967),c(4431.831572677135,4321.623840432766),c(16230.382039458143,13789.618278104435),c(2535.3650381538273,2641.4020571928368),c(4707.4513756945225,4631.364798279483),c(10174.332239213712,9003.383000901595),c(6085.55039078146,4991.500118315546),c(5584.040351651963,5414.321108392017),c(4715.037241832616,4316.707317292342),c(7826.928106481529,8145.449712897594))
targetgene="CTRL"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(6886.280705357968,6010.449539168437),c(3776.0755887397427,3382.5679206117666),c(3469.2694471546383,3381.3387898266606),c(7381.890626380059,7223.601624068078),c(11194.20979777958,9924.001958946004),c(3973.3081083301668,3833.6589187456757))
targetgene="LRIG1"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(3335.2524787149914,3187.1361257799094),c(4323.100824697799,4101.609429898788),c(4586.077517485031,3924.614596843521),c(4184.869486181434,3882.8241501499165),c(3847.7198800439564,3547.271445815973),c(7451.006295638243,6631.160585646977))
targetgene="mKate2"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1045.1637790261789,5896.140376153577),c(1405.0709835779355,8201.98972901247),c(1041.792282964804,7168.2907387383075),c(526.7962595898078,2724.982950580046),c(1627.5897236286703,9117.692163916456),c(1274.425511199663,4678.071768113511))
targetgene="KRT20"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(559.6683461882119,7871.353547818951),c(810.0019287452885,9524.534453786548),c(345.57834629091394,5925.639514996122),c(448.40897616284445,5269.283675749507),c(659.9703540141113,8780.910328797405),c(2903.7009828590208,8822.70077549101))
targetgene="EGFP"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=9
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("EGFP","KRT20","PROM1","ROSA26","LRIG1","CTRL","CCR5","mKate2","AAVS1","SOX9")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='6_vs_4 pos.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(559.6683461882119,7871.353547818951),c(810.0019287452885,9524.534453786548),c(345.57834629091394,5925.639514996122),c(448.40897616284445,5269.283675749507),c(659.9703540141113,8780.910328797405),c(2903.7009828590208,8822.70077549101))
targetgene="EGFP"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1045.1637790261789,5896.140376153577),c(1405.0709835779355,8201.98972901247),c(1041.792282964804,7168.2907387383075),c(526.7962595898078,2724.982950580046),c(1627.5897236286703,9117.692163916456),c(1274.425511199663,4678.071768113511))
targetgene="KRT20"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(5281.448580143578,6082.968255489692),c(7992.974287504237,7068.73114514472),c(5835.216808224383,5228.722359841008),c(8622.601176965974,7726.316115176441),c(6282.782910371884,5508.964178845181),c(12758.583970257474,11328.898446322184))
targetgene="PROM1"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(16374.513496081916,14254.229714874511),c(6462.315075640091,5615.898557149405),c(8910.864090213518,7417.804288114829),c(5917.818461728066,5919.493861070591),c(5486.266965872095,4882.10747844111),c(9012.00897205476,7360.035141214847),c(8867.034641415645,6413.604436683211),c(4174.754997997309,3720.578886515922),c(2333.9181484866845,1971.5257793100557),c(5428.951532828723,5764.623382147233))
targetgene="ROSA26"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(6886.280705357968,6010.449539168437),c(3776.0755887397427,3382.5679206117666),c(3469.2694471546383,3381.3387898266606),c(7381.890626380059,7223.601624068078),c(11194.20979777958,9924.001958946004),c(3973.3081083301668,3833.6589187456757))
targetgene="LRIG1"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(4771.509800860644,3654.2058241201967),c(4431.831572677135,4321.623840432766),c(16230.382039458143,13789.618278104435),c(2535.3650381538273,2641.4020571928368),c(4707.4513756945225,4631.364798279483),c(10174.332239213712,9003.383000901595),c(6085.55039078146,4991.500118315546),c(5584.040351651963,5414.321108392017),c(4715.037241832616,4316.707317292342),c(7826.928106481529,8145.449712897594))
targetgene="CTRL"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(4532.976454518379,4544.096512536955),c(7012.711807659522,6096.488694125858),c(3428.811494418141,3425.5874980904773),c(3503.82728178373,3180.9904718543794),c(4770.6669268453,4058.5898524200775),c(5021.84338341772,4005.7372286605187),c(11868.509010054535,9386.871805854673),c(7946.6162166603335,6219.40177263646),c(3151.5059433700662,3055.619131773565),c(5365.735981677947,5213.972790419736))
targetgene="CCR5"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(3335.2524787149914,3187.1361257799094),c(4323.100824697799,4101.609429898788),c(4586.077517485031,3924.614596843521),c(4184.869486181434,3882.8241501499165),c(3847.7198800439564,3547.271445815973),c(7451.006295638243,6631.160585646977))
targetgene="mKate2"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(7149.257398145201,6239.067865198156),c(5926.247201881502,4391.684295183809),c(5578.140233544557,4477.72345014123),c(3147.291573293348,2415.241992733329),c(5473.623855641939,4823.109200756022),c(2152.700235187791,2026.8366646398267),c(4420.8742104776675,3414.5253210245232),c(7887.615035586275,6798.322372421396),c(2309.474802041718,2252.9967290993345),c(2572.4514948289498,2346.4106687673916))
targetgene="AAVS1"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(7121.442555638859,1948.1722943930413),c(6088.079012827491,902.1819962678186),c(6364.5416898602225,1424.562579937877),c(3441.4546046482965,830.8924107316694),c(8018.260507964547,1822.8009543122273),c(12232.63058468301,4861.212255094309))
targetgene="SOX9"
collabel=c("P4_F1.fq","P4_F3.fq")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
dev.off()
Sweave("P4_46_day7_summary.Rnw");
library(tools);
texi2dvi("P4_46_day7_summary.tex",pdf=TRUE);
|
\name{ultraFastAnc}
\alias{ultraFastAnc}
\title{
Ultra-fast maximum likelihood ancestral state reconstruction
}
\description{
This function performs ancestral state reconstruction using a fast algorithm based on phylogenetically independent contrasts (Felsenstein 1985). Contrary to \link[phytools]{fastAnc}, which reroots the tree at each internal node and recalculates the root state for each tree, this algorithm only requires a single tree traversal (Goolsby, In Review). This function is several hundred to several thousand times faster than the rerooting method used in \link[phytools]{fastAnc}, which is the next-fastest maximum likelihood ancestral state reconstruction method currently described (Revell 2012). As in the \link[phytools]{fastAnc} function, the variance and 95\% intervals of estimates are optionally returned.
NOTE: trees with polytomies are supported but (currently) slow down the algorithm considerably.
}
\usage{
ultraFastAnc(phy, x, vars = FALSE, CI = FALSE)
}
\arguments{
\item{phy}{
A tree of class 'phylo'
}
\item{x}{
A named vector of trait values
}
\item{vars}{
Whether to return the variances of the (restricted) maximum likelihood estimates
}
\item{CI}{
Whether to return 95\% confidence intervals of the (restricted) maximum likelihood estimates
}
}
\value{
A named vector of maximum likelihood ancestral states (with names corresponding to node number). If vars or CI is set to TRUE, a list is returned with these values included.
}
\references{
Goolsby E.W. (In Review). "Ultra-fast ancestral state reconstruction of continuous characters: a rerooting-free maximum likelihood approach."
Felsenstein, J. (1985) Phylogenies and the comparative method. American Naturalist, 125, 1-15.
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). Methods Ecol. Evol., 3, 217-223.
}
\author{
Eric W. Goolsby
}
\seealso{
\link[phytools]{fastAnc}, \link[ape]{ace}, \link[ape]{pic}
}
\examples{
require(ape)
tree <- rtree(1e4) # random tree with 10,000 taxa
x <- setNames(rnorm(1e4),tree$tip.label) # random trait data
recon <- ultraFastAnc(phy=tree,x=x,CI=TRUE)
} | /man/ultraFastAnc.Rd | no_license | EddiePerochon/phylocurve | R | false | false | 2,146 | rd | \name{ultraFastAnc}
\alias{ultraFastAnc}
\title{
Ultra-fast maximum likelihood ancestral state reconstruction
}
\description{
This function performs ancestral state reconstruction using a fast algorithm based on phylogenetically independent contrasts (Felsenstein 1985). Contrary to \link[phytools]{fastAnc}, which reroots the tree at each internal node and recalculates the root state for each tree, this algorithm only requires a single tree traversal (Goolsby, In Review). This function is several hundred to several thousand times faster than the rerooting method used in \link[phytools]{fastAnc}, which is the next-fastest maximum likelihood ancestral state reconstruction method currently described (Revell 2012). As in the \link[phytools]{fastAnc} function, the variance and 95\% intervals of estimates are optionally returned.
NOTE: trees with polytomies are supported but (currently) slow down the algorithm considerably.
}
\usage{
ultraFastAnc(phy, x, vars = FALSE, CI = FALSE)
}
\arguments{
\item{phy}{
A tree of class 'phylo'
}
\item{x}{
A named vector of trait values
}
\item{vars}{
Whether to return the variances of the (restricted) maximum likelihood estimates
}
\item{CI}{
Whether to return 95\% confidence intervals of the (restricted) maximum likelihood estimates
}
}
\value{
A named vector of maximum likelihood ancestral states (with names corresponding to node number). If vars or CI is set to TRUE, a list is returned with these values included.
}
\references{
Goolsby E.W. (In Review). "Ultra-fast ancestral state reconstruction of continuous characters: a rerooting-free maximum likelihood approach."
Felsenstein, J. (1985) Phylogenies and the comparative method. American Naturalist, 125, 1-15.
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). Methods Ecol. Evol., 3, 217-223.
}
\author{
Eric W. Goolsby
}
\seealso{
\link[phytools]{fastAnc}, \link[ape]{ace}, \link[ape]{pic}
}
\examples{
require(ape)
tree <- rtree(1e4) # random tree with 10,000 taxa
x <- setNames(rnorm(1e4),tree$tip.label) # random trait data
recon <- ultraFastAnc(phy=tree,x=x,CI=TRUE)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meeting.R
\name{extract_property}
\alias{extract_property}
\title{Extract properties from an object}
\usage{
extract_property(obj, arr, propName)
}
\arguments{
\item{obj}{Object to traverse}
\item{arr}{Array of property values to look at e.g. meetings$12029, meetings$12030}
\item{propertyName}{Property to extract from each traversed object}
}
\description{
Extract properties from an object
}
\examples{
extract_property( all_meetings, meetingIds, 'name' )
extract_property( { '12345': { 'name': 'x' } }, [ 12345 ], 'name' ) > [ x ]
}
\keyword{json,}
\keyword{utils}
| /man/extract_property.Rd | no_license | jkadcav/meeting | R | false | true | 650 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meeting.R
\name{extract_property}
\alias{extract_property}
\title{Extract properties from an object}
\usage{
extract_property(obj, arr, propName)
}
\arguments{
\item{obj}{Object to traverse}
\item{arr}{Array of property values to look at e.g. meetings$12029, meetings$12030}
\item{propertyName}{Property to extract from each traversed object}
}
\description{
Extract properties from an object
}
\examples{
extract_property( all_meetings, meetingIds, 'name' )
extract_property( { '12345': { 'name': 'x' } }, [ 12345 ], 'name' ) > [ x ]
}
\keyword{json,}
\keyword{utils}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pkglog.R
\name{has_pkglog}
\alias{has_pkglog}
\title{Does the folder contain a package log file?}
\usage{
has_pkglog(path = ".")
}
\arguments{
\item{path}{folder to check for a package log}
}
\description{
Does the folder contain a package log file?
}
| /man/has_pkglog.Rd | permissive | mikmart/pkglog | R | false | true | 330 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pkglog.R
\name{has_pkglog}
\alias{has_pkglog}
\title{Does the folder contain a package log file?}
\usage{
has_pkglog(path = ".")
}
\arguments{
\item{path}{folder to check for a package log}
}
\description{
Does the folder contain a package log file?
}
|
#' @title Update a WordPress Post
#'
#' @description Updates an existing WordPress post with new values.
#'
#' @param root_url The domain on which you wish to create the post.
#' @param user The username to be passed into the API call to create the post.
#' @param pass The password to be used in the API call to create the post.
#' To get this value, you must have the Application Passwords plugin
#' installed, and must create an application using that plugin via your WordPress admin panel;
#' there you will get the password needed.
#' @param title_val The title of the page you are creating.
#' @param excerpt_val The excerpt to be shown where your WordPress features excerpts of post.
#' @param content_val The content of the post.
#' @param fifu_val If the Featured Image From URL plugin is installed, users can specify a
#' remotely hosted image file to use as the featured image for the post.
#' This field defaults to a value of NULL.
#' @param status_val The status of the post. Can be one of 'draft','publish','pending','future',
#' 'private'.
#' @param post_id The ID of the post to be updated.
#' @param author_val The user ID of the author creating the post.
#' @param format_val The WordPress format to use. Defaults to 'standard'.
#' @param categories_val The category IDs the post is to be associated with;
#' comma separate in a character string if more than one.
#' @param tag_val The tag IDs the post is to be associated with; comma separate in a category
#' string if more than one.
#'
#' @return A list containing the status code of the API call. A status code of 200 indicates
#' the call was a success.
#'
#' @examples
#' \dontrun{
#'create_wp_post(root_url = 'https://domain.com',user = Sys.getenv('username'),
#'pass = Sys.getenv('password'),
#'title_val = 'post title',excerpt_val = 'post excerpt',
#'content_val = 'the post content as a string, with wordpress-accepted
#'<strong>html</strong> (or bbcode!)',
#'fifu_val = 'https://domain.com/image.png',
#'status_val = 'draft',post_id = 123,format_val = 'standard',categories_val = 1, tag_val = 1)
#'}
#'
#' @export update_wp_post
#' @import tibble
#' @import httr
#' @import dplyr
update_wp_post <- function(root_url,user,pass,title_val,excerpt_val ='',content_val,
fifu_val = NULL,status_val,post_id,author_val,
format_val = 'standard',categories_val, tag_val = '') {
pb <- list(title = title_val,
excerpt = excerpt_val,
content = content_val,
fifu = fifu_val,
status = status_val,
author=author_val,
format=format_val,
categories=categories_val,
tags = tag_val)
pb <- ifelse(is.null(fifu_val),within(pb, rm(fifu)),pb)
ch = httr::POST(paste0(root_url,"/wp-json/wp/v2/posts/",post_id),
httr::authenticate(user,pass),
body = pb,
encode = "json")
return(ch)
}
| /R/update-wp-post.R | no_license | cran/wordpressr | R | false | false | 2,941 | r | #' @title Update a WordPress Post
#'
#' @description Updates an existing WordPress post with new values.
#'
#' @param root_url The domain on which you wish to create the post.
#' @param user The username to be passed into the API call to create the post.
#' @param pass The password to be used in the API call to create the post.
#' To get this value, you must have the Application Passwords plugin
#' installed, and must create an application using that plugin via your WordPress admin panel;
#' there you will get the password needed.
#' @param title_val The title of the page you are creating.
#' @param excerpt_val The excerpt to be shown where your WordPress features excerpts of post.
#' @param content_val The content of the post.
#' @param fifu_val If the Featured Image From URL plugin is installed, users can specify a
#' remotely hosted image file to use as the featured image for the post.
#' This field defaults to a value of NULL.
#' @param status_val The status of the post. Can be one of 'draft','publish','pending','future',
#' 'private'.
#' @param post_id The ID of the post to be updated.
#' @param author_val The user ID of the author creating the post.
#' @param format_val The WordPress format to use. Defaults to 'standard'.
#' @param categories_val The category IDs the post is to be associated with;
#' comma separate in a character string if more than one.
#' @param tag_val The tag IDs the post is to be associated with; comma separate in a category
#' string if more than one.
#'
#' @return A list containing the status code of the API call. A status code of 200 indicates
#' the call was a success.
#'
#' @examples
#' \dontrun{
#'create_wp_post(root_url = 'https://domain.com',user = Sys.getenv('username'),
#'pass = Sys.getenv('password'),
#'title_val = 'post title',excerpt_val = 'post excerpt',
#'content_val = 'the post content as a string, with wordpress-accepted
#'<strong>html</strong> (or bbcode!)',
#'fifu_val = 'https://domain.com/image.png',
#'status_val = 'draft',post_id = 123,format_val = 'standard',categories_val = 1, tag_val = 1)
#'}
#'
#' @export update_wp_post
#' @import tibble
#' @import httr
#' @import dplyr
update_wp_post <- function(root_url,user,pass,title_val,excerpt_val ='',content_val,
fifu_val = NULL,status_val,post_id,author_val,
format_val = 'standard',categories_val, tag_val = '') {
pb <- list(title = title_val,
excerpt = excerpt_val,
content = content_val,
fifu = fifu_val,
status = status_val,
author=author_val,
format=format_val,
categories=categories_val,
tags = tag_val)
pb <- ifelse(is.null(fifu_val),within(pb, rm(fifu)),pb)
ch = httr::POST(paste0(root_url,"/wp-json/wp/v2/posts/",post_id),
httr::authenticate(user,pass),
body = pb,
encode = "json")
return(ch)
}
|
#' @name rdpTrain
#' @title Training the RDP classifier
#'
#' @description Training the RDP presence/absence K-mer method on sequence data.
#'
#' @param sequence Character vector of 16S sequences.
#' @param taxon Character vector of taxon labels for each sequence.
#' @param K Word length (integer).
#' @param cnames Logical indicating if column names should be added to the trained model matrix.
#'
#' @details The training step of the RDP method means looking for K-mers on all sequences,
#' and computing the probability of each K-mer being present for each unique taxon. This is an
#' attempt to re-implement the method described by Wang et tal (2007), but without the bootstrapping.
#' See that publications for all details.
#'
#' The word-length \code{K} is by default 8, since this is the value used by Wang et al. Larger values
#' may lead to memory-problems since the trained model is a matrix with 4^K columns. Adding the K-mers
#' as column names will slow down all computations.
#'
#' The relative taxon sizes are also computed, and returned as an attribute to the model matrix. They may
#' be used as empirical priors in the classification step.
#'
#' @return A list with two elements. The first element is \code{Method}, which is the text
#' \code{"RDPclassifier"} in this case. The second element is \code{Fitted}, which is a
#' matrix with one row for each unique \code{taxon} and one column for
#' each possible word of length \code{K}. The value in row i and column j is the probability that
#' word j is present in taxon i.
#'
#' @references Wang, Q, Garrity, GM, Tiedje, JM, Cole, JR (2007). Naive Bayesian Classifier for
#' Rapid Assignment of rRNA Sequences into the New Bacterial Taxonomy. Applied and Enviromental
#' Microbiology, 73: 5261-5267.
#'
#' @author Kristian Hovde Liland and Lars Snipen.
#'
#' @seealso \code{\link{rdpClassify}}.
#'
#' @examples
#' # See examples for rdpClassify.
#'
#' @export rdpTrain
#'
rdpTrain <- function(sequence, taxon, K = 8, cnames = FALSE){
taxInt <- taxon
sizes <- as.numeric(table(taxon))
taxInt <- factor(taxon)
taxLevels <- levels(taxInt)
taxInt <- as.integer(taxInt)
prior <- sizes / length(taxon)
classesIn <- lapply(1:max(taxInt), function(i)which(i==taxInt) )
presence.prob <- rdpTrainCpp(charToInt(sequence), K, cnames, classesIn, -1)
presence.prob <- CountsToRDP(presence.prob, sizes)
if(is.character(taxon)){
dimnames(presence.prob) <- list(taxLevels, NULL) # Avoids copying
}
attr(presence.prob, "prior") <- prior
trained.model <- list(Method = "RDPclassifier", Fitted = presence.prob)
return(trained.model)
}
#' @name rdpClassify
#' @title Classifying with the RDP classifier
#'
#' @description Classifying sequences by a trained presence/absence K-mer model.
#'
#' @param sequence Character vector of sequences to classify.
#' @param trained.model A list with a trained model, see \code{\link{rdpTrain}}.
#' @param post.prob Logical indicating if posterior log-probabilities should be returned.
#' @param prior Logical indicating if classification should be done by flat priors (default)
#' or with empirical priors (prior=TRUE).
#'
#' @details The classification step of the presence/absence method known as the RDP classifier
#' (Wang et al 2007) means looking for K-mers on all sequences, and computing the posterior
#' probabilities for each taxon using a trained model and a naive Bayes assumption. The predicted
#' taxon is the one producing the maximum posterior probability, for each \code{sequence}.
#'
#' The classification is parallelized through RcppParallel
#' employing Intel TBB and TinyThread. By default all available
#' processing cores are used. This can be changed using the
#' function \code{\link{setParallel}}.
#'
#' @return A character vector with the predicted taxa, one for each \code{sequence}.
#'
#' @references Wang, Q, Garrity, GM, Tiedje, JM, Cole, JR (2007). Naive Bayesian Classifier for
#' Rapid Assignment of rRNA Sequences into the New Bacterial Taxonomy. Applied and Enviromental
#' Microbiology, 73: 5261-5267.
#'
#' @author Kristian Hovde Liland and Lars Snipen.
#'
#' @seealso \code{\link{rdpTrain}}.
#'
#' @examples
#' data("small.16S")
#' seq <- small.16S$Sequence
#' tax <- sapply(strsplit(small.16S$Header,split=" "),function(x){x[2]})
#' \dontrun{
#' trn <- rdpTrain(seq,tax)
#' primer.515f <- "GTGYCAGCMGCCGCGGTAA"
#' primer.806rB <- "GGACTACNVGGGTWTCTAAT"
#' reads <- amplicon(seq, primer.515f, primer.806rB)
#' predicted <- rdpClassify(unlist(reads[nchar(reads)>0]),trn)
#' print(predicted)
#' }
#'
#' @export rdpClassify
#'
rdpClassify <- function(sequence, trained.model, post.prob = FALSE, prior = FALSE){
if(trained.model$Method != "RDPclassifier") stop("Trained model is not an RDPclassifier!")
presence.prob <- trained.model$Fitted
K <- as.integer(log2(dim(presence.prob)[2])/2)
if(prior){
priors <- log2(attr(presence.prob, "prior"))
} else {
priors <- rep(0, dim(presence.prob)[1])
}
X <- rdpClassifyCpp(charToInt(sequence), K, presence.prob, priors, TRUE)
if(post.prob){
return(data.frame(Taxon.1 = rownames(presence.prob)[X$first_ind], Post.prob.1 = X$first,
Taxon.2 = rownames(presence.prob)[X$second_ind], Post.prob.2 = X$second,
stringsAsFactors = FALSE))
} else {
return(rownames(presence.prob)[X$first_ind])
}
# taxon.hat <- rownames(presence.prob)[ret$first_ind]
# taxon.hat[ret$first == ret$second] <- "unclassified"
# return( taxon.hat )
}
# Older, slower versions
# rdpClassify <- function( sequence, trained.model ){
# if( trained.model$Method != "RDPclassifier" ) stop( "Trained model is not an RDPclassifier!" )
# presence.prob <- trained.model$Fitted
# K <- as.integer( log2( dim( presence.prob )[2] )/2 )
# ret <- rdpClassifyCpp( presence.prob, charToInt( sequence ), K )
# C <- as.integer( ret$C )
# taxon.hat <- rep( "unclassified", length(C) )
# is.tie <- (C<0)
# taxon.hat[!is.tie] <- rownames( presence.prob )[C[!is.tie]]
# return( taxon.hat )
# }
# rdpTrain <- function( sequence, taxon, K=8, cnames=FALSE ){
# taxInt <- taxon
# if( is.character( taxon ) ){
# taxInt <- factor( taxon )
# taxLevels <- levels( taxInt )
# taxInt <- as.integer( taxInt )
# prior <- as.numeric( table( taxon ) /length( taxon ) )
# }
# presence.prob <- rdpTrainCpp( charToInt( sequence ), K,
# cnames, taxInt-1, max(taxInt), as.numeric( table( taxon ) ) )
# if( is.character( taxon ) ){
# dimnames( presence.prob ) <- list( taxLevels, NULL ) # Avoids copying
# }
# attr( presence.prob, "prior" ) <- prior
# trained.model <- list( Method="RDPclassifier", Fitted=presence.prob )
# return( trained.model )
# }
| /R/rdpClassifier.R | no_license | cran/microclass | R | false | false | 6,960 | r | #' @name rdpTrain
#' @title Training the RDP classifier
#'
#' @description Training the RDP presence/absence K-mer method on sequence data.
#'
#' @param sequence Character vector of 16S sequences.
#' @param taxon Character vector of taxon labels for each sequence.
#' @param K Word length (integer).
#' @param cnames Logical indicating if column names should be added to the trained model matrix.
#'
#' @details The training step of the RDP method means looking for K-mers on all sequences,
#' and computing the probability of each K-mer being present for each unique taxon. This is an
#' attempt to re-implement the method described by Wang et tal (2007), but without the bootstrapping.
#' See that publications for all details.
#'
#' The word-length \code{K} is by default 8, since this is the value used by Wang et al. Larger values
#' may lead to memory-problems since the trained model is a matrix with 4^K columns. Adding the K-mers
#' as column names will slow down all computations.
#'
#' The relative taxon sizes are also computed, and returned as an attribute to the model matrix. They may
#' be used as empirical priors in the classification step.
#'
#' @return A list with two elements. The first element is \code{Method}, which is the text
#' \code{"RDPclassifier"} in this case. The second element is \code{Fitted}, which is a
#' matrix with one row for each unique \code{taxon} and one column for
#' each possible word of length \code{K}. The value in row i and column j is the probability that
#' word j is present in taxon i.
#'
#' @references Wang, Q, Garrity, GM, Tiedje, JM, Cole, JR (2007). Naive Bayesian Classifier for
#' Rapid Assignment of rRNA Sequences into the New Bacterial Taxonomy. Applied and Enviromental
#' Microbiology, 73: 5261-5267.
#'
#' @author Kristian Hovde Liland and Lars Snipen.
#'
#' @seealso \code{\link{rdpClassify}}.
#'
#' @examples
#' # See examples for rdpClassify.
#'
#' @export rdpTrain
#'
rdpTrain <- function(sequence, taxon, K = 8, cnames = FALSE){
taxInt <- taxon
sizes <- as.numeric(table(taxon))
taxInt <- factor(taxon)
taxLevels <- levels(taxInt)
taxInt <- as.integer(taxInt)
prior <- sizes / length(taxon)
classesIn <- lapply(1:max(taxInt), function(i)which(i==taxInt) )
presence.prob <- rdpTrainCpp(charToInt(sequence), K, cnames, classesIn, -1)
presence.prob <- CountsToRDP(presence.prob, sizes)
if(is.character(taxon)){
dimnames(presence.prob) <- list(taxLevels, NULL) # Avoids copying
}
attr(presence.prob, "prior") <- prior
trained.model <- list(Method = "RDPclassifier", Fitted = presence.prob)
return(trained.model)
}
#' @name rdpClassify
#' @title Classifying with the RDP classifier
#'
#' @description Classifying sequences by a trained presence/absence K-mer model.
#'
#' @param sequence Character vector of sequences to classify.
#' @param trained.model A list with a trained model, see \code{\link{rdpTrain}}.
#' @param post.prob Logical indicating if posterior log-probabilities should be returned.
#' @param prior Logical indicating if classification should be done by flat priors (default)
#' or with empirical priors (prior=TRUE).
#'
#' @details The classification step of the presence/absence method known as the RDP classifier
#' (Wang et al 2007) means looking for K-mers on all sequences, and computing the posterior
#' probabilities for each taxon using a trained model and a naive Bayes assumption. The predicted
#' taxon is the one producing the maximum posterior probability, for each \code{sequence}.
#'
#' The classification is parallelized through RcppParallel
#' employing Intel TBB and TinyThread. By default all available
#' processing cores are used. This can be changed using the
#' function \code{\link{setParallel}}.
#'
#' @return A character vector with the predicted taxa, one for each \code{sequence}.
#'
#' @references Wang, Q, Garrity, GM, Tiedje, JM, Cole, JR (2007). Naive Bayesian Classifier for
#' Rapid Assignment of rRNA Sequences into the New Bacterial Taxonomy. Applied and Enviromental
#' Microbiology, 73: 5261-5267.
#'
#' @author Kristian Hovde Liland and Lars Snipen.
#'
#' @seealso \code{\link{rdpTrain}}.
#'
#' @examples
#' data("small.16S")
#' seq <- small.16S$Sequence
#' tax <- sapply(strsplit(small.16S$Header,split=" "),function(x){x[2]})
#' \dontrun{
#' trn <- rdpTrain(seq,tax)
#' primer.515f <- "GTGYCAGCMGCCGCGGTAA"
#' primer.806rB <- "GGACTACNVGGGTWTCTAAT"
#' reads <- amplicon(seq, primer.515f, primer.806rB)
#' predicted <- rdpClassify(unlist(reads[nchar(reads)>0]),trn)
#' print(predicted)
#' }
#'
#' @export rdpClassify
#'
rdpClassify <- function(sequence, trained.model, post.prob = FALSE, prior = FALSE){
if(trained.model$Method != "RDPclassifier") stop("Trained model is not an RDPclassifier!")
presence.prob <- trained.model$Fitted
K <- as.integer(log2(dim(presence.prob)[2])/2)
if(prior){
priors <- log2(attr(presence.prob, "prior"))
} else {
priors <- rep(0, dim(presence.prob)[1])
}
X <- rdpClassifyCpp(charToInt(sequence), K, presence.prob, priors, TRUE)
if(post.prob){
return(data.frame(Taxon.1 = rownames(presence.prob)[X$first_ind], Post.prob.1 = X$first,
Taxon.2 = rownames(presence.prob)[X$second_ind], Post.prob.2 = X$second,
stringsAsFactors = FALSE))
} else {
return(rownames(presence.prob)[X$first_ind])
}
# taxon.hat <- rownames(presence.prob)[ret$first_ind]
# taxon.hat[ret$first == ret$second] <- "unclassified"
# return( taxon.hat )
}
# Older, slower versions
# rdpClassify <- function( sequence, trained.model ){
# if( trained.model$Method != "RDPclassifier" ) stop( "Trained model is not an RDPclassifier!" )
# presence.prob <- trained.model$Fitted
# K <- as.integer( log2( dim( presence.prob )[2] )/2 )
# ret <- rdpClassifyCpp( presence.prob, charToInt( sequence ), K )
# C <- as.integer( ret$C )
# taxon.hat <- rep( "unclassified", length(C) )
# is.tie <- (C<0)
# taxon.hat[!is.tie] <- rownames( presence.prob )[C[!is.tie]]
# return( taxon.hat )
# }
# rdpTrain <- function( sequence, taxon, K=8, cnames=FALSE ){
# taxInt <- taxon
# if( is.character( taxon ) ){
# taxInt <- factor( taxon )
# taxLevels <- levels( taxInt )
# taxInt <- as.integer( taxInt )
# prior <- as.numeric( table( taxon ) /length( taxon ) )
# }
# presence.prob <- rdpTrainCpp( charToInt( sequence ), K,
# cnames, taxInt-1, max(taxInt), as.numeric( table( taxon ) ) )
# if( is.character( taxon ) ){
# dimnames( presence.prob ) <- list( taxLevels, NULL ) # Avoids copying
# }
# attr( presence.prob, "prior" ) <- prior
# trained.model <- list( Method="RDPclassifier", Fitted=presence.prob )
# return( trained.model )
# }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SimulateMultipleMethods.R
\name{SimulateMultipleMethods}
\alias{SimulateMultipleMethods}
\title{Compare various strategies for Multi-Armed Bandit in stationary
and non-stationary scenarios}
\usage{
SimulateMultipleMethods(method = "Thompson-Sampling",
method.par = list(ndraws.TS = 1000), iter, nburnin, nperiod,
reward.mean.family, reward.family, narms.family, npulls.family,
stationary = TRUE, nonstationary.type = NULL, data.par,
regret.plot = FALSE)
}
\arguments{
\item{method}{A vector of character strings choosing from "Epsilon-Greedy",
"Epsilon-Decreasing", "Thompson-Sampling",
"EXP3", "UCB", "Bayes-Poisson-TS", "Greedy-Thompson-Sampling",
"EXP3-Thompson-Sampling",
"Greedy-Bayes-Poisson-TS", "EXP3-Bayes-Poisson-TS" and "HyperTS".
See \code{\link{SimulateMultiplePeriods}} for more details.
Default is "Thompson-Sampling".}
\item{method.par}{A list of parameters needed for different methods:
\code{epsilon}: A real number between 0 and 1; needed for "Epsilon-Greedy",
"Epsilon-Decreasing", "Greedy-Thompson-Sampling" and
"Greedy-Bayes-Poisson-TS".
\code{ndraws.TS}: A positive integer specifying
the number of random draws from the posterior;
needed for "Thompson-Sampling", "Greedy-Thompson-Sampling"
and "EXP3-Thompson-Sampling". Default is 1000.
\code{EXP3}: A list consisting of two real numbers \code{eta} and
\code{gamma};
\eqn{eta > 0} and \eqn{0 <= gamma < 1}; needed for "EXP3",
"EXP3-Thompson-Sampling" and "EXP3-Bayes-Poisson-TS".
\code{BP}: A list consisting of three postive integers \code{iter.BP},
\code{ndraws.BP} and \code{interval.BP};
needed for "Bayes-Poisson-TS", "Greedy-Bayes-Poisson-TS"
and "EXP3-Bayes-Poisson-TS"; \code{iter.BP} specifies the number
of iterations to compute posterior;
\code{ndraws.BP} specifies the number of posterior samples drawn
from posterior distribution; \code{interval.BP} is specified to
draw each posterior sample from
a sample sequence of length \code{interval.BP}.
\code{HyperTS}: A list consisting of a vector \code{method.list},
needed for "HyperTS". \code{method.list} is a vector of character strings
choosing from "Epsilon-Greedy", "Epsilon-Decreasing", "Thompson-Sampling",
"EXP3", "UCB", "Bayes-Poisson-TS", "Greedy-Thompson-Sampling",
"EXP3-Thompson-Sampling",
"Greedy-Bayes-Poisson-TS" and "EXP3-Bayes-Poisson-TS".
"HyperTS" will construct an ensemble consisting all the methods
in \code{method.list}.}
\item{iter}{A positive integer specifying the number of iterations.}
\item{nburnin}{A positive integer specifying the number of periods
to allocate each arm equal traffic before applying any strategy.}
\item{nperiod}{A positive integer specifying the number of periods
to apply various strategies.}
\item{reward.mean.family}{A character string specifying
the distribution family to generate mean reward of each arm.
Available distribution includes "Uniform", "Beta" and "Gaussian".}
\item{reward.family}{A character string specifying the distribution family
of reward. Available distribution includes
"Bernoulli", "Poisson" and "Gaussian".
If "Gaussian" is chosen to be the reward distribution,
a vector of standard deviation should be provided in
\code{sd.reward} in \code{data.par}.}
\item{narms.family}{A character string specifying the distribution family
of the number of arms. Available distribution includes "Poisson" and
"Binomial".}
\item{npulls.family}{A character string specifying the distribution family
of the number of pulls per period.
For continuous distribution, the number of pulls will be rounded up.
Available distribution includes "Log-Normal" and "Poisson".}
\item{stationary}{A logic value indicating whether a stationary
Multi-Armed Bandit is considered (corresponding to the case that
the reward mean is unchanged). Default to be TRUE.}
\item{nonstationary.type}{A character string indicating
how the mean reward varies. Available types include "Random Walk" and
"Geometric Random Walk"
(reward mean follows random walk in the log scale). Default to be NULL.}
\item{data.par}{A list of data generating parameters:
\code{reward.mean}: A list of parameters of \code{reward.mean.family}:
\code{min} and \code{max} are two real numbers specifying
the bounds when \eqn{reward.mean.family = "Uniform"}; \code{shape1} and
\code{shape2} are two shape parameters when
\eqn{reward.mean.family = "Beta"};
\code{mean} and \code{sd} specify mean and standard deviation
when \eqn{reward.mean.family = "Gaussian"}.
\code{reward.family}: A list of parameters of \code{reward.family}:
\code{sd} is a vector of non-negative numbers specifying standard deviation
of each arm's reward distribution
if "Gaussian" is chosen to be the reward distribution.
\code{narms.family}: A list of parameters of \code{narms.family}:
\code{lambda} is a positive parameter specifying the mean when
\eqn{narms.family = "Poisson"}; \code{size} and \code{prob}
are 2 parameters needed when \eqn{narms.family = "Binomial"}.
\code{npulls.family}: A list of parameters of \code{npulls.family}:
\code{meanlog} and \code{sdlog} are 2 positive parameters specifying the mean
and standard deviation in the log scale
when \eqn{npulls.family = "Log-Normal"};
\code{lambda} is a positive parameter
specifying the mean when \eqn{npulls.family = "Poisson"}.
\code{nonstationary.family}:
A list of parameters of \code{nonstationary.type}:
\code{sd} is a positive parameter specifying the standard deviation
of white noise
when \eqn{nonstationary.type = "Random Walk"}; \code{sdlog} is
a positive parameter specifying the log standard deviation of white noise
when \eqn{nonstationary.type = "Geometric Random Walk"}.}
\item{regret.plot}{A logic value indicating whether an average regret plot
is returned. Default to be FALSE.}
}
\value{
a list consisting of:
\item{regret.matrix}{A three-dimensional array with each dimension corresponding to the period, iteration and method.}
\item{regret.plot.object}{If regret.plot = TRUE, a ggplot object is returned.}
}
\description{
This function is aimed to simulate data in different scenarios to
compare various strategies in Multi-Armed Bandit.
Users can specify the distribution of the number of arms,
the distribution of mean reward, the distribution of the number of pulls
in one period and the stationariness to simulate different scenarios.
Relative regret is returned and average relative regret plot is returned
if needed.
See \code{\link{SimulateMultiplePeriods}} for more details.
}
\examples{
### Compare Epsilon-Greedy and Thompson Sampling in the stationary case.
set.seed(100)
res <- SimulateMultipleMethods(
method = c("Epsilon-Greedy", "Thompson-Sampling"),
method.par = list(epsilon = 0.1, ndraws.TS = 1000),
iter = 100,
nburnin = 30,
nperiod = 180,
reward.mean.family = "Uniform",
reward.family = "Bernoulli",
narms.family = "Poisson",
npulls.family = "Log-Normal",
data.par = list(reward.mean = list(min = 0, max = 0.1),
npulls.family = list(meanlog = 3, sdlog = 1.5),
narms.family = list(lambda = 5)),
regret.plot = TRUE)
res$regret.plot.object
### Compare Epsilon-Greedy, Thompson Sampling and EXP3 in the non-stationary case.
set.seed(100)
res <- SimulateMultipleMethods(
method = c("Epsilon-Greedy", "Thompson-Sampling", "EXP3"),
method.par = list(epsilon = 0.1,
ndraws.TS = 1000,
EXP3 = list(gamma = 0, eta = 0.1)),
iter = 100,
nburnin = 30,
nperiod = 90,
reward.mean.family = "Beta",
reward.family = "Bernoulli",
narms.family = "Binomial",
npulls.family = "Log-Normal",
stationary = FALSE,
nonstationary.type = "Geometric Random Walk",
data.par = list(reward.mean = list(shape1 = 2, shape2 = 5),
npulls.family = list(meanlog = 3, sdlog = 1),
narms.family = list(size = 10, prob = 0.5),
nonstationary.family = list(sdlog = 0.05)),
regret.plot = TRUE)
res$regret.plot.object
}
| /man/SimulateMultipleMethods.Rd | permissive | datasciences/MAB | R | false | true | 8,271 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SimulateMultipleMethods.R
\name{SimulateMultipleMethods}
\alias{SimulateMultipleMethods}
\title{Compare various strategies for Multi-Armed Bandit in stationary
and non-stationary scenarios}
\usage{
SimulateMultipleMethods(method = "Thompson-Sampling",
method.par = list(ndraws.TS = 1000), iter, nburnin, nperiod,
reward.mean.family, reward.family, narms.family, npulls.family,
stationary = TRUE, nonstationary.type = NULL, data.par,
regret.plot = FALSE)
}
\arguments{
\item{method}{A vector of character strings choosing from "Epsilon-Greedy",
"Epsilon-Decreasing", "Thompson-Sampling",
"EXP3", "UCB", "Bayes-Poisson-TS", "Greedy-Thompson-Sampling",
"EXP3-Thompson-Sampling",
"Greedy-Bayes-Poisson-TS", "EXP3-Bayes-Poisson-TS" and "HyperTS".
See \code{\link{SimulateMultiplePeriods}} for more details.
Default is "Thompson-Sampling".}
\item{method.par}{A list of parameters needed for different methods:
\code{epsilon}: A real number between 0 and 1; needed for "Epsilon-Greedy",
"Epsilon-Decreasing", "Greedy-Thompson-Sampling" and
"Greedy-Bayes-Poisson-TS".
\code{ndraws.TS}: A positive integer specifying
the number of random draws from the posterior;
needed for "Thompson-Sampling", "Greedy-Thompson-Sampling"
and "EXP3-Thompson-Sampling". Default is 1000.
\code{EXP3}: A list consisting of two real numbers \code{eta} and
\code{gamma};
\eqn{eta > 0} and \eqn{0 <= gamma < 1}; needed for "EXP3",
"EXP3-Thompson-Sampling" and "EXP3-Bayes-Poisson-TS".
\code{BP}: A list consisting of three postive integers \code{iter.BP},
\code{ndraws.BP} and \code{interval.BP};
needed for "Bayes-Poisson-TS", "Greedy-Bayes-Poisson-TS"
and "EXP3-Bayes-Poisson-TS"; \code{iter.BP} specifies the number
of iterations to compute posterior;
\code{ndraws.BP} specifies the number of posterior samples drawn
from posterior distribution; \code{interval.BP} is specified to
draw each posterior sample from
a sample sequence of length \code{interval.BP}.
\code{HyperTS}: A list consisting of a vector \code{method.list},
needed for "HyperTS". \code{method.list} is a vector of character strings
choosing from "Epsilon-Greedy", "Epsilon-Decreasing", "Thompson-Sampling",
"EXP3", "UCB", "Bayes-Poisson-TS", "Greedy-Thompson-Sampling",
"EXP3-Thompson-Sampling",
"Greedy-Bayes-Poisson-TS" and "EXP3-Bayes-Poisson-TS".
"HyperTS" will construct an ensemble consisting all the methods
in \code{method.list}.}
\item{iter}{A positive integer specifying the number of iterations.}
\item{nburnin}{A positive integer specifying the number of periods
to allocate each arm equal traffic before applying any strategy.}
\item{nperiod}{A positive integer specifying the number of periods
to apply various strategies.}
\item{reward.mean.family}{A character string specifying
the distribution family to generate mean reward of each arm.
Available distribution includes "Uniform", "Beta" and "Gaussian".}
\item{reward.family}{A character string specifying the distribution family
of reward. Available distribution includes
"Bernoulli", "Poisson" and "Gaussian".
If "Gaussian" is chosen to be the reward distribution,
a vector of standard deviation should be provided in
\code{sd.reward} in \code{data.par}.}
\item{narms.family}{A character string specifying the distribution family
of the number of arms. Available distribution includes "Poisson" and
"Binomial".}
\item{npulls.family}{A character string specifying the distribution family
of the number of pulls per period.
For continuous distribution, the number of pulls will be rounded up.
Available distribution includes "Log-Normal" and "Poisson".}
\item{stationary}{A logic value indicating whether a stationary
Multi-Armed Bandit is considered (corresponding to the case that
the reward mean is unchanged). Default to be TRUE.}
\item{nonstationary.type}{A character string indicating
how the mean reward varies. Available types include "Random Walk" and
"Geometric Random Walk"
(reward mean follows random walk in the log scale). Default to be NULL.}
\item{data.par}{A list of data generating parameters:
\code{reward.mean}: A list of parameters of \code{reward.mean.family}:
\code{min} and \code{max} are two real numbers specifying
the bounds when \eqn{reward.mean.family = "Uniform"}; \code{shape1} and
\code{shape2} are two shape parameters when
\eqn{reward.mean.family = "Beta"};
\code{mean} and \code{sd} specify mean and standard deviation
when \eqn{reward.mean.family = "Gaussian"}.
\code{reward.family}: A list of parameters of \code{reward.family}:
\code{sd} is a vector of non-negative numbers specifying standard deviation
of each arm's reward distribution
if "Gaussian" is chosen to be the reward distribution.
\code{narms.family}: A list of parameters of \code{narms.family}:
\code{lambda} is a positive parameter specifying the mean when
\eqn{narms.family = "Poisson"}; \code{size} and \code{prob}
are 2 parameters needed when \eqn{narms.family = "Binomial"}.
\code{npulls.family}: A list of parameters of \code{npulls.family}:
\code{meanlog} and \code{sdlog} are 2 positive parameters specifying the mean
and standard deviation in the log scale
when \eqn{npulls.family = "Log-Normal"};
\code{lambda} is a positive parameter
specifying the mean when \eqn{npulls.family = "Poisson"}.
\code{nonstationary.family}:
A list of parameters of \code{nonstationary.type}:
\code{sd} is a positive parameter specifying the standard deviation
of white noise
when \eqn{nonstationary.type = "Random Walk"}; \code{sdlog} is
a positive parameter specifying the log standard deviation of white noise
when \eqn{nonstationary.type = "Geometric Random Walk"}.}
\item{regret.plot}{A logic value indicating whether an average regret plot
is returned. Default to be FALSE.}
}
\value{
a list consisting of:
\item{regret.matrix}{A three-dimensional array with each dimension corresponding to the period, iteration and method.}
\item{regret.plot.object}{If regret.plot = TRUE, a ggplot object is returned.}
}
\description{
This function is aimed to simulate data in different scenarios to
compare various strategies in Multi-Armed Bandit.
Users can specify the distribution of the number of arms,
the distribution of mean reward, the distribution of the number of pulls
in one period and the stationariness to simulate different scenarios.
Relative regret is returned and average relative regret plot is returned
if needed.
See \code{\link{SimulateMultiplePeriods}} for more details.
}
\examples{
### Compare Epsilon-Greedy and Thompson Sampling in the stationary case.
set.seed(100)
res <- SimulateMultipleMethods(
method = c("Epsilon-Greedy", "Thompson-Sampling"),
method.par = list(epsilon = 0.1, ndraws.TS = 1000),
iter = 100,
nburnin = 30,
nperiod = 180,
reward.mean.family = "Uniform",
reward.family = "Bernoulli",
narms.family = "Poisson",
npulls.family = "Log-Normal",
data.par = list(reward.mean = list(min = 0, max = 0.1),
npulls.family = list(meanlog = 3, sdlog = 1.5),
narms.family = list(lambda = 5)),
regret.plot = TRUE)
res$regret.plot.object
### Compare Epsilon-Greedy, Thompson Sampling and EXP3 in the non-stationary case.
set.seed(100)
res <- SimulateMultipleMethods(
method = c("Epsilon-Greedy", "Thompson-Sampling", "EXP3"),
method.par = list(epsilon = 0.1,
ndraws.TS = 1000,
EXP3 = list(gamma = 0, eta = 0.1)),
iter = 100,
nburnin = 30,
nperiod = 90,
reward.mean.family = "Beta",
reward.family = "Bernoulli",
narms.family = "Binomial",
npulls.family = "Log-Normal",
stationary = FALSE,
nonstationary.type = "Geometric Random Walk",
data.par = list(reward.mean = list(shape1 = 2, shape2 = 5),
npulls.family = list(meanlog = 3, sdlog = 1),
narms.family = list(size = 10, prob = 0.5),
nonstationary.family = list(sdlog = 0.05)),
regret.plot = TRUE)
res$regret.plot.object
}
|
#Load the libraries needed in this code
library(dplyr)
library(DT)
library(shiny)
library(leaflet)
#UI-------------------------------------------------------------------------------------------------------------------------------------
shinyUI(navbarPage("Accident tracker", id="nav", collapsible=T,
tabPanel("Map",
div(class="outer",
tags$head(
includeScript("analytics.js"),
tags$link(rel = "stylesheet", type = "text/css",
href = "ion.rangeSlider.skinFlat.css"),
includeScript("spin.min.js"),
includeCSS("styles.css")
),
leafletOutput("mymap", width="100%", height="100%"),
tags$script("
var spinner = new Spinner().spin();
$( 'div#mymap' ).append(spinner.el);"),
absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE,
draggable = TRUE, top = 100, left = "auto", right = 20, bottom = "auto",
width = 360, height = "auto",
h2(),
p(class="intro",
strong("Accident tracker"), " shows vechicle collisions in",
"the city of Edinburgh, UK. Data from",
a("Edinburgh Open Data.",
href="http://www.edinburghopendata.info/dataset/vehicle-collisions",
target="_blank")),
tabsetPanel(
tabPanel("Controls",
dateRangeInput('dates',
label = 'Occurred between:',
start = as.Date("2010-01-01"), end = as.Date("2013-07-01")),
selectInput("color", "Marker:",
choices=c("None", "Severity")),
sliderInput("alpha", label="Opacity:",
min=0, max=1, value=0.4, step=.025, ticks=T),
fluidRow(
column(6,
sliderInput("base", label="Point size:",
min=1, max=5, value=1)
),
column(6,
selectInput("scale", label="Scale by:", width=120,
selected="Vehicles",
choices=c("Casualties", "Vehicles"))#)
)
),
hr(class="thin"),
p("Modified by",
a("Andreas, Thompson, Jerry", href="github.com/Andreas001 or Tom112151",
target="_blank"),
HTML("•"), "See the code on ",
a("github", href="http://github.com/Andreas001/UPH_Land-Transport-Accident-Tracker",
target="_blank"),
class="foot")
),
tabPanel("Graphs",
#p("Static plots"),
plotOutput("monthTotals", height = "200px"),
plotOutput("month_waffle", height = "120px"),
#hr(),
plotOutput("involving", height = "120px", width="100%"),
hr(class="thin")
),
tabPanel("About",
p(class="topp", "Explore vehicle collisions recorded in Edinburgh",
"between 2010 and 2013 in this interactive data visualisation."
),
p("This is written in ",
a("Shiny,", href="http://shiny.rstudio.com/", target="_blank"),
"a web application framework for the R language.",
"Maps are built with ",
a("leaflet.js", href="http://leafletjs.com/", target="_blank"),
"via the",
a("R language bindings,", href="https://rstudio.github.io/leaflet/",
target="_blank"),
"and using map data from",
a("Open Street Map.", href="http://www.openstreetmap.org/copyright",
target="_blank")
),
p("Project modified by ",
a("@Andreas, Thompson, Jerry", href="http://twitter.com/benjaminlmoore",
target="_blank"),
HTML("—"),
"see the full code on ",
a("github", href="http://github.com/Andreas001/UPH_Land-Transport-Accident-Tracker",
target="_blank"),
"or run locally with:"
),
pre("shiny::runGitHub('Andreas001/UPH_Land-Transport-Accident-Tracker')"),
hr(class="thin")
)
# end about panel
),
tags$script('
Shiny.addCustomMessageHandler("map_done",
function(s) {
spinner.stop();
$( "div#mymap" ).remove(spinner);
});')
),
#Control / setting panel
div(class="mobile-panel",
p(strong("Blackspot"), " shows vechicle collisions in",
"the city of Edinburgh, UK. Original & modified by",
a("github.com/blmmoore", href="/Andreas001 & /Tom112151"),
"see the code on ",
a("github.", href="http://github.com/Andreas001/UPH_Land-Accident-Tracker"),
"Data: ",
a("Edinburgh Open Data.",
href="http://www.edinburghopendata.info/dataset/vehicle-collisions")),
hr(class="thin"),
radioButtons("color_mob", "Colour by:", inline=T,
choices=c("None", "Severity", "Casualties", "Time", "Vehicles", "Speed limit"))
)
)
), tabPanel("Table", DT::dataTableOutput("table"))
)
)
| /ui.R | permissive | Andreas001/UPH_Land-Transport-Accident-Tracker | R | false | false | 5,213 | r | #Load the libraries needed in this code
library(dplyr)
library(DT)
library(shiny)
library(leaflet)
#UI-------------------------------------------------------------------------------------------------------------------------------------
shinyUI(navbarPage("Accident tracker", id="nav", collapsible=T,
tabPanel("Map",
div(class="outer",
tags$head(
includeScript("analytics.js"),
tags$link(rel = "stylesheet", type = "text/css",
href = "ion.rangeSlider.skinFlat.css"),
includeScript("spin.min.js"),
includeCSS("styles.css")
),
leafletOutput("mymap", width="100%", height="100%"),
tags$script("
var spinner = new Spinner().spin();
$( 'div#mymap' ).append(spinner.el);"),
absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE,
draggable = TRUE, top = 100, left = "auto", right = 20, bottom = "auto",
width = 360, height = "auto",
h2(),
p(class="intro",
strong("Accident tracker"), " shows vechicle collisions in",
"the city of Edinburgh, UK. Data from",
a("Edinburgh Open Data.",
href="http://www.edinburghopendata.info/dataset/vehicle-collisions",
target="_blank")),
tabsetPanel(
tabPanel("Controls",
dateRangeInput('dates',
label = 'Occurred between:',
start = as.Date("2010-01-01"), end = as.Date("2013-07-01")),
selectInput("color", "Marker:",
choices=c("None", "Severity")),
sliderInput("alpha", label="Opacity:",
min=0, max=1, value=0.4, step=.025, ticks=T),
fluidRow(
column(6,
sliderInput("base", label="Point size:",
min=1, max=5, value=1)
),
column(6,
selectInput("scale", label="Scale by:", width=120,
selected="Vehicles",
choices=c("Casualties", "Vehicles"))#)
)
),
hr(class="thin"),
p("Modified by",
a("Andreas, Thompson, Jerry", href="github.com/Andreas001 or Tom112151",
target="_blank"),
HTML("•"), "See the code on ",
a("github", href="http://github.com/Andreas001/UPH_Land-Transport-Accident-Tracker",
target="_blank"),
class="foot")
),
tabPanel("Graphs",
#p("Static plots"),
plotOutput("monthTotals", height = "200px"),
plotOutput("month_waffle", height = "120px"),
#hr(),
plotOutput("involving", height = "120px", width="100%"),
hr(class="thin")
),
tabPanel("About",
p(class="topp", "Explore vehicle collisions recorded in Edinburgh",
"between 2010 and 2013 in this interactive data visualisation."
),
p("This is written in ",
a("Shiny,", href="http://shiny.rstudio.com/", target="_blank"),
"a web application framework for the R language.",
"Maps are built with ",
a("leaflet.js", href="http://leafletjs.com/", target="_blank"),
"via the",
a("R language bindings,", href="https://rstudio.github.io/leaflet/",
target="_blank"),
"and using map data from",
a("Open Street Map.", href="http://www.openstreetmap.org/copyright",
target="_blank")
),
p("Project modified by ",
a("@Andreas, Thompson, Jerry", href="http://twitter.com/benjaminlmoore",
target="_blank"),
HTML("—"),
"see the full code on ",
a("github", href="http://github.com/Andreas001/UPH_Land-Transport-Accident-Tracker",
target="_blank"),
"or run locally with:"
),
pre("shiny::runGitHub('Andreas001/UPH_Land-Transport-Accident-Tracker')"),
hr(class="thin")
)
# end about panel
),
tags$script('
Shiny.addCustomMessageHandler("map_done",
function(s) {
spinner.stop();
$( "div#mymap" ).remove(spinner);
});')
),
#Control / setting panel
div(class="mobile-panel",
p(strong("Blackspot"), " shows vechicle collisions in",
"the city of Edinburgh, UK. Original & modified by",
a("github.com/blmmoore", href="/Andreas001 & /Tom112151"),
"see the code on ",
a("github.", href="http://github.com/Andreas001/UPH_Land-Accident-Tracker"),
"Data: ",
a("Edinburgh Open Data.",
href="http://www.edinburghopendata.info/dataset/vehicle-collisions")),
hr(class="thin"),
radioButtons("color_mob", "Colour by:", inline=T,
choices=c("None", "Severity", "Casualties", "Time", "Vehicles", "Speed limit"))
)
)
), tabPanel("Table", DT::dataTableOutput("table"))
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/derivatives.r
\name{d_bernoulli_itemblock_proxy}
\alias{d_bernoulli_itemblock_proxy}
\title{Partial derivatives for binary items by item-blocks using observed score proxy.}
\usage{
d_bernoulli_itemblock_proxy(
p_item,
pred_data,
item_data_current,
prox_data,
samp_size,
num_items,
num_predictors,
num_quad
)
}
\arguments{
\item{p_item}{Vector of item parameters.}
\item{pred_data}{Matrix or dataframe of DIF and/or impact predictors.}
\item{item_data_current}{Vector of current item responses.}
\item{prox_data}{Vector of observed proxy scores.}
\item{samp_size}{Sample size in dataset.}
\item{num_items}{Number of items in dataset.}
\item{num_predictors}{Number of predictors in dataset.}
}
\value{
a \code{"list"} of first and second partial derivatives for Bernoulli item likelihood (to
use with multivariate Newton-Raphson and observed proxy scores)
}
\description{
Partial derivatives for binary items by item-blocks using observed score proxy.
}
\keyword{internal}
| /man/d_bernoulli_itemblock_proxy.Rd | permissive | wbelzak/regDIF | R | false | true | 1,071 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/derivatives.r
\name{d_bernoulli_itemblock_proxy}
\alias{d_bernoulli_itemblock_proxy}
\title{Partial derivatives for binary items by item-blocks using observed score proxy.}
\usage{
d_bernoulli_itemblock_proxy(
p_item,
pred_data,
item_data_current,
prox_data,
samp_size,
num_items,
num_predictors,
num_quad
)
}
\arguments{
\item{p_item}{Vector of item parameters.}
\item{pred_data}{Matrix or dataframe of DIF and/or impact predictors.}
\item{item_data_current}{Vector of current item responses.}
\item{prox_data}{Vector of observed proxy scores.}
\item{samp_size}{Sample size in dataset.}
\item{num_items}{Number of items in dataset.}
\item{num_predictors}{Number of predictors in dataset.}
}
\value{
a \code{"list"} of first and second partial derivatives for Bernoulli item likelihood (to
use with multivariate Newton-Raphson and observed proxy scores)
}
\description{
Partial derivatives for binary items by item-blocks using observed score proxy.
}
\keyword{internal}
|
library(RKEEL)
### Name: CHC_C
### Title: CHC_C KEEL Classification Algorithm
### Aliases: CHC_C R6_CHC_C
### Keywords: classification
### ** Examples
#data_train <- RKEEL::loadKeelDataset("iris_train")
#data_test <- RKEEL::loadKeelDataset("iris_test")
#Create algorithm
#algorithm <- RKEEL::CHC_C(data_train, data_test)
#Run algorithm
#algorithm$run()
#See results
#algorithm$testPredictions
| /data/genthat_extracted_code/RKEEL/examples/CHC-C.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 403 | r | library(RKEEL)
### Name: CHC_C
### Title: CHC_C KEEL Classification Algorithm
### Aliases: CHC_C R6_CHC_C
### Keywords: classification
### ** Examples
#data_train <- RKEEL::loadKeelDataset("iris_train")
#data_test <- RKEEL::loadKeelDataset("iris_test")
#Create algorithm
#algorithm <- RKEEL::CHC_C(data_train, data_test)
#Run algorithm
#algorithm$run()
#See results
#algorithm$testPredictions
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/File.R
\name{HSDSFile}
\alias{HSDSFile}
\title{Construct an object of type HSDSFile}
\usage{
HSDSFile(src, domain)
}
\arguments{
\item{src}{an object of type HSDSSource, the server which exposes the file}
\item{domain}{the domain string; the file's location on the server's
file system.}
}
\value{
an initialized object of type HSDSFile
}
\description{
A HSDSFile is a representation of an HDF5 file the contents of which are accessible
exposed by a HDF5 server.
}
\details{
This function is deprecated and will be defunct in the next release.
}
\examples{
if (check_hsds()) {
src <- HSDSSource('http://hsdshdflab.hdfgroup.org')
f10x <- HSDSFile(src, '/shared/bioconductor/tenx_full.h5')
}
}
| /man/HSDSFile.Rd | no_license | vjcitn/rhdf5client | R | false | true | 774 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/File.R
\name{HSDSFile}
\alias{HSDSFile}
\title{Construct an object of type HSDSFile}
\usage{
HSDSFile(src, domain)
}
\arguments{
\item{src}{an object of type HSDSSource, the server which exposes the file}
\item{domain}{the domain string; the file's location on the server's
file system.}
}
\value{
an initialized object of type HSDSFile
}
\description{
A HSDSFile is a representation of an HDF5 file the contents of which are accessible
exposed by a HDF5 server.
}
\details{
This function is deprecated and will be defunct in the next release.
}
\examples{
if (check_hsds()) {
src <- HSDSSource('http://hsdshdflab.hdfgroup.org')
f10x <- HSDSFile(src, '/shared/bioconductor/tenx_full.h5')
}
}
|
#' format_priors
#'
#' @param dataset_list
#' list of datasets to be analyzed of length S
#' @param config
#' list of vectors of indices 1:S of length R
#' @param p1
#' vector of probability of length S or 1
#' @param p12
#' vector of probability of length R or 1
#' @return
#' list with elements
#' p1 vector of probability of length S
#' p12 vector of probability of length R
format_priors <- function(dataset_list, config, p1, p12) {
if (!(length(p1)==1 | length(p1) == length(dataset_list))) {
stop("p1 should be either the length of dataset_list or 1 if the same for all datasets")
}
if (!(length(p12)==1 | length(p12) == length(config))) {
stop("p12 should either be length of config 1 if the same for all datasets")
}
if (length(p1)==1) {
p1 <- rep(p1, length(dataset_list))
}
if (length(p12)==1) {
p12 <- rep(p12, length(config))
}
return(list(p1=p1, p12=p12))
}
| /R/format_priors.R | permissive | raonyguimaraes/POEMColoc | R | false | false | 906 | r | #' format_priors
#'
#' @param dataset_list
#' list of datasets to be analyzed of length S
#' @param config
#' list of vectors of indices 1:S of length R
#' @param p1
#' vector of probability of length S or 1
#' @param p12
#' vector of probability of length R or 1
#' @return
#' list with elements
#' p1 vector of probability of length S
#' p12 vector of probability of length R
format_priors <- function(dataset_list, config, p1, p12) {
if (!(length(p1)==1 | length(p1) == length(dataset_list))) {
stop("p1 should be either the length of dataset_list or 1 if the same for all datasets")
}
if (!(length(p12)==1 | length(p12) == length(config))) {
stop("p12 should either be length of config 1 if the same for all datasets")
}
if (length(p1)==1) {
p1 <- rep(p1, length(dataset_list))
}
if (length(p12)==1) {
p12 <- rep(p12, length(config))
}
return(list(p1=p1, p12=p12))
}
|
df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query=
"select COUNTY, STATE, HOUSINGUNITS, cume_dist()
OVER (PARTITION BY STATE order by HOUSINGUNITS) AS cume_dist
from COUNTIES2
order by 2,3 desc"
')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521:ORCL', USER='C##cs329e_mks2426', PASS='orcl_mks2426', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df)
| /01 SQL Crosstabs/Cume_Dist.R | no_license | mitalisathaye/DV_TableauProject2 | R | false | false | 474 | r | df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query=
"select COUNTY, STATE, HOUSINGUNITS, cume_dist()
OVER (PARTITION BY STATE order by HOUSINGUNITS) AS cume_dist
from COUNTIES2
order by 2,3 desc"
')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521:ORCL', USER='C##cs329e_mks2426', PASS='orcl_mks2426', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df)
|
testlist <- list(Rs = numeric(0), atmp = 0, relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541591304061e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615854126-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 659 | r | testlist <- list(Rs = numeric(0), atmp = 0, relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541591304061e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
#' Plugin update of Constantine's C, using ynew
#' @param C A const_C object, the result of a call to C_GP
#' @param xnew The new design point
#' @noRd
expectCup <- function(C, xnew){
if(is.null(nrow(xnew))) xnew <- matrix(xnew, nrow = 1)
nvar <- ncol(xnew)
Cup <- C$mat
kn1 <- cov_gen(xnew, C$model$X0, theta = C$model$theta, type = C$model$covtype)
# for shorter expressions
theta <- sqrt(C$model$theta/2)
new_lambda <- predict(object = C$model, x = xnew, nugs.only = TRUE)$nugs/C$model$nu_hat
vn <- drop(1 - kn1 %*% tcrossprod(C$model$Ki, kn1)) + new_lambda + C$model$eps
# precomputations
Kikn <- tcrossprod(C$model$Ki, kn1)
for(i in 1:nvar) {
for(j in i:nvar){
wa <- drop(W_kappa_ij2(C$model$X0, xnew, theta = theta, i - 1, j - 1, ct = C$ct)) # w(X, xnew)
wb <- drop(W_kappa_ij2(xnew, rbind(C$model$X0, xnew), theta = theta, i - 1, j - 1, ct = C$ct)) # c(w(xnew, X), w(xnew, xnew))
w <- wb[length(wb)]# w(xnew, xnew)
wb <- wb[-length(wb)]
Cup[i, j] <- Cup[j, i] <- C$mat[i, j] + crossprod(wa + wb, Kikn/vn) - crossprod(Kikn, C$Wij[[i]][[j]]) %*% Kikn/vn - w/vn
}
}
return(Cup)
}
| /R/deprecated.R | no_license | cran/activegp | R | false | false | 1,177 | r | #' Plugin update of Constantine's C, using ynew
#' @param C A const_C object, the result of a call to C_GP
#' @param xnew The new design point
#' @noRd
expectCup <- function(C, xnew){
if(is.null(nrow(xnew))) xnew <- matrix(xnew, nrow = 1)
nvar <- ncol(xnew)
Cup <- C$mat
kn1 <- cov_gen(xnew, C$model$X0, theta = C$model$theta, type = C$model$covtype)
# for shorter expressions
theta <- sqrt(C$model$theta/2)
new_lambda <- predict(object = C$model, x = xnew, nugs.only = TRUE)$nugs/C$model$nu_hat
vn <- drop(1 - kn1 %*% tcrossprod(C$model$Ki, kn1)) + new_lambda + C$model$eps
# precomputations
Kikn <- tcrossprod(C$model$Ki, kn1)
for(i in 1:nvar) {
for(j in i:nvar){
wa <- drop(W_kappa_ij2(C$model$X0, xnew, theta = theta, i - 1, j - 1, ct = C$ct)) # w(X, xnew)
wb <- drop(W_kappa_ij2(xnew, rbind(C$model$X0, xnew), theta = theta, i - 1, j - 1, ct = C$ct)) # c(w(xnew, X), w(xnew, xnew))
w <- wb[length(wb)]# w(xnew, xnew)
wb <- wb[-length(wb)]
Cup[i, j] <- Cup[j, i] <- C$mat[i, j] + crossprod(wa + wb, Kikn/vn) - crossprod(Kikn, C$Wij[[i]][[j]]) %*% Kikn/vn - w/vn
}
}
return(Cup)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Lab8.R
\name{clt_pois}
\alias{clt_pois}
\title{Central Limit Theorem on Poisson Distribution
Produce plots pertaining the distribution of the sample mean from a population distributed as
Poisson distribution}
\usage{
clt_pois(n, iter, lambda = 10, ...)
}
\arguments{
\item{n}{number of sample}
\item{iter}{number of iteration}
\item{lambda}{the rate of poisson distribution}
\item{...}{}
}
\value{
plots simulating CLT outcomes of Poisson distribution
}
\description{
Central Limit Theorem on Poisson Distribution
Produce plots pertaining the distribution of the sample mean from a population distributed as
Poisson distribution
}
\examples{
clt_pois(n = 50, iter = 100000, ...)
}
| /man/clt_pois.Rd | no_license | fraclad/math4753 | R | false | true | 762 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Lab8.R
\name{clt_pois}
\alias{clt_pois}
\title{Central Limit Theorem on Poisson Distribution
Produce plots pertaining the distribution of the sample mean from a population distributed as
Poisson distribution}
\usage{
clt_pois(n, iter, lambda = 10, ...)
}
\arguments{
\item{n}{number of sample}
\item{iter}{number of iteration}
\item{lambda}{the rate of poisson distribution}
\item{...}{}
}
\value{
plots simulating CLT outcomes of Poisson distribution
}
\description{
Central Limit Theorem on Poisson Distribution
Produce plots pertaining the distribution of the sample mean from a population distributed as
Poisson distribution
}
\examples{
clt_pois(n = 50, iter = 100000, ...)
}
|
library(fTrading)
library(zoo)
library(forecast)
library(tseries)
library(ggplot2)
x <- 1:30
t <- 2*x
s <- 12*sin(2*x/pi)
e <- 4*rnorm(30)
y <- t + s + e
datos <- data.frame(fenomeno = y,
tendencia = t,
estacionalidad = s,
error = e,
tiempo = x)
alpha = 0.6
beta = 0.6
n2 <- y[2]
t2 <- y[2]-y[1]
n3 <- alpha*y[3]+(1-alpha)*(n2+t2)
t3 <- beta*(n3-n2) + (1-beta)*t2
y3_e <- n3 + t3
error = y3_e - y[3]
predicciones <- array(y[1], y[2])
nia <- y[2]
tia <- y[2]-y[1]
for(i in 1:length(y)){
ni <- alpha*y[i]+(1-alpha)*(nia+tia)
ti <- beta*(ni-nia) + (1-beta)*t2
yi_e <- ni + ti
print(yi_e - y[i])
nia <- ni
tia <- ti
predicciones[i] <- yi_e
}
plot(y, type = "l")
lines(predicciones, col = "blue")
# Algortimo basado en el nivel ----------------------------------------
activos <- MSFT #Activos de fondos suizos, Precio y volumen diaro de microsoft y tazas de cambio
activos_train <- activos[1:(nrow(activos)-20), "Close"] # PRecios de cierre
activos_test <- activos[(nrow(activos)-20+1):nrow(activos), "Close"] # PRecios de cierre
activos_ajustados <- emaTA(activos_train, lambda = 0.189)
seriesPlot(activos_train)
lines(activos_ajustados, col="red")
predicciones <- predict(activos_ajustados, 20)
sum((predicciones$mean - activos_test)^2)
plot(predicciones)
# Algoritmo basado en nivel y tendencia -----------------------------------
serie <- co2
plot(serie)
fit <- stl(serie, s.window="period")
plot(fit)
monthplot(serie)
seasonplot(serie)
# Ajuste de los modelos de prediccion
# Ajustando sólamente el nivel de la serie
m <- HoltWinters(serie, beta = FALSE, gamma = FALSE)
plot(m)
# Ajustando el nivel y la tendencia
m1 <- HoltWinters(serie, gamma = FALSE)
plot(m1)
# Ajustando el nivel, la tendencia y la estacionalidad
m2 <- HoltWinters(serie)
plot(m2)
# Predecir valores futuros
pred <- forecast(m, 12)
plot(pred)
accuracy(pred)
| /M7_series_tiempo/suavizamiento_exponencial.R | no_license | macc-urosario/dcd2019 | R | false | false | 1,956 | r | library(fTrading)
library(zoo)
library(forecast)
library(tseries)
library(ggplot2)
x <- 1:30
t <- 2*x
s <- 12*sin(2*x/pi)
e <- 4*rnorm(30)
y <- t + s + e
datos <- data.frame(fenomeno = y,
tendencia = t,
estacionalidad = s,
error = e,
tiempo = x)
alpha = 0.6
beta = 0.6
n2 <- y[2]
t2 <- y[2]-y[1]
n3 <- alpha*y[3]+(1-alpha)*(n2+t2)
t3 <- beta*(n3-n2) + (1-beta)*t2
y3_e <- n3 + t3
error = y3_e - y[3]
predicciones <- array(y[1], y[2])
nia <- y[2]
tia <- y[2]-y[1]
for(i in 1:length(y)){
ni <- alpha*y[i]+(1-alpha)*(nia+tia)
ti <- beta*(ni-nia) + (1-beta)*t2
yi_e <- ni + ti
print(yi_e - y[i])
nia <- ni
tia <- ti
predicciones[i] <- yi_e
}
plot(y, type = "l")
lines(predicciones, col = "blue")
# Algortimo basado en el nivel ----------------------------------------
activos <- MSFT #Activos de fondos suizos, Precio y volumen diaro de microsoft y tazas de cambio
activos_train <- activos[1:(nrow(activos)-20), "Close"] # PRecios de cierre
activos_test <- activos[(nrow(activos)-20+1):nrow(activos), "Close"] # PRecios de cierre
activos_ajustados <- emaTA(activos_train, lambda = 0.189)
seriesPlot(activos_train)
lines(activos_ajustados, col="red")
predicciones <- predict(activos_ajustados, 20)
sum((predicciones$mean - activos_test)^2)
plot(predicciones)
# Algoritmo basado en nivel y tendencia -----------------------------------
serie <- co2
plot(serie)
fit <- stl(serie, s.window="period")
plot(fit)
monthplot(serie)
seasonplot(serie)
# Ajuste de los modelos de prediccion
# Ajustando sólamente el nivel de la serie
m <- HoltWinters(serie, beta = FALSE, gamma = FALSE)
plot(m)
# Ajustando el nivel y la tendencia
m1 <- HoltWinters(serie, gamma = FALSE)
plot(m1)
# Ajustando el nivel, la tendencia y la estacionalidad
m2 <- HoltWinters(serie)
plot(m2)
# Predecir valores futuros
pred <- forecast(m, 12)
plot(pred)
accuracy(pred)
|
require(tidyverse)
require(lubridate)
require(leaflet)
require(lubridate)
require(sp)
require(shinydashboard)
require(DT)
require(shinycssloaders)
require(sf)
require(plotly)
header <- dashboardHeader(
title = "NYS COVID-19 TRACKER"
)
body <- dashboardBody(
includeCSS("https://raw.githubusercontent.com/willoutcault/608_final/master/styles.css"),
fluidRow(
column(width = 3,
box(width = NULL, status = "warning",
selectInput("daterange", "Date Range: ", c("Week", "Month", "Overall"), selected = "Week")
),
box(width = NULL, status = "warning",
selectInput("variable", "Variable: ", c("Daily Cases", "Overall Cases", "Daily Tests", "Overall Tests"), selected = "Daily Cases")
)
),
column(width = 6,
box(width = NULL, solidHeader = TRUE,
leafletOutput("map") %>% withSpinner(color="#3c8dbc")
)
),
column(width = 3,
box(width = NULL, status = "warning",
plotOutput("plot1",)
)
),
column(width = 9,
box(width = NULL, status = "warning",
plotlyOutput("plot2")
)
)
)
)
ui <- dashboardPage(
header,
dashboardSidebar(disable = TRUE),
body
)
server <- function(input,output){
# Read Data
covid.data <- read.csv("https://health.data.ny.gov/resource/xdss-u53e.csv?$limit=100000", sep=",", header = T)
county.data <- rgdal::readOGR("https://raw.githubusercontent.com/willoutcault/covid/main/cugir-007865-geojson.json")
polys_sf<-st_read("https://raw.githubusercontent.com/willoutcault/covid/main/cugir-007865-geojson.json") %>%
st_transform(crs="+init=epsg:4326")
# Save test dates as dates
covid.data$test_date <- as.Date(covid.data$test_date)
# Aggregate by date function
aggregate <- function(daterange1, daterange2, covid.data){
covid.data %>%
filter(test_date >= daterange1 & test_date <= daterange2) %>%
group_by(county) %>%
summarize("new_positives" = sum(new_positives),
"cumulative_number_of_positives" = sum(cumulative_number_of_positives),
"total_number_of_tests" = sum(total_number_of_tests),
"cumulative_number_of_tests" = sum(cumulative_number_of_tests))
}
# Data filter based on leaflet coordinates
output$map <- renderLeaflet({
if (input$daterange=="Week"){
daterange <- c(Sys.Date()-7, Sys.Date())
}
if (input$daterange=="Month"){
daterange <- c(Sys.Date() %m-% months(1), Sys.Date())
}
if (input$daterange=="Overall"){
daterange <- c(as.Date("2020-03-01", format="%Y-%m-%d"), Sys.Date())
}
date.range <- daterange[2] - daterange[1]
covid.data.filtered <- aggregate(daterange[1], daterange[2], covid.data)
covid.data.filtered.old <- aggregate(daterange[1]-date.range, daterange[2]-date.range, covid.data)
covid.data.filtered$pct_change <- round(((covid.data.filtered$new_positives/
covid.data.filtered.old$new_positives) - 1)*100, 2)
covid.data.filtered$old_positives <- covid.data.filtered.old$new_positives
merged.spdf <- sp::merge(county.data, covid.data.filtered, by.x="name", by.y="county")
popup <- paste0("<h1>",merged.spdf@data$name," County</h1>",
"<strong>",format(daterange[1]-date.range, format="%B %d")," - ",
format(daterange[2]-date.range, format="%B %d")," :</strong>",
merged.spdf@data$old_positives, "<br>",
"<strong>",format(daterange[1], format="%B %d")," - ",
format(daterange[2], format="%B %d")," :</strong>",
merged.spdf@data$new_positives, "<br>",
"<strong>Percent Change: </strong>", merged.spdf@data$pct_change,"%")
pal <- colorNumeric(
palette = "Oranges",
domain = merged.spdf@data$new_positives)
leaflet(merged.spdf) %>%
setView(-73.8, 40.9, 8.3) %>%
addPolygons(weight = 2, popup=popup, stroke = FALSE, smoothFactor = 0.2,
fillOpacity = 1, color = ~pal(new_positives))
})
data_map <- reactive({
xmin <- input$map_bounds$west
xmax <- input$map_bounds$east
ymax <- input$map_bounds$north
ymin <- input$map_bounds$south
filt_bbox <- sf::st_bbox(c(xmin = ifelse(is.na(xmin), -180, xmin),
ymin = ifelse(is.na(ymin), -90, ymin),
xmax = ifelse(is.na(xmax), +180, xmax),
ymax = ifelse(is.na(ymax), +90, ymax)),
crs = st_crs(4326)) %>%
sf::st_as_sfc(.)
find_data <- sf::st_within(polys_sf, filt_bbox)
filt_data <- polys_sf[which(lengths(find_data) != 0), ]
county.data <- county.data[county.data$county %in% filt_data$county,]
})
output$plot1<- renderPlot({
if (input$daterange=="Week"){
daterange <- c(Sys.Date()-7, Sys.Date())
}
if (input$daterange=="Month"){
daterange <- c(Sys.Date() %m-% months(1), Sys.Date())
}
if (input$daterange=="Overall"){
daterange <- c(as.Date("2020-03-01", format="%Y-%m-%d"), Sys.Date())
}
date.range <- daterange[2] - daterange[1]
covid.data.filtered <- aggregate(daterange[1], daterange[2], covid.data)
covid.data.filtered.old <- aggregate(daterange[1]-date.range, daterange[2]-date.range, covid.data)
covid.data.filtered$old_positives <- covid.data.filtered.old$new_positives
covid.data.filtered$case_count_diff <- covid.data.filtered$new_positives - covid.data.filtered$old_positives
covid.data.filtered$case_count_diff_type <- ifelse(covid.data.filtered$case_count_diff < 0, "good", "bad")
covid.data.filtered <- covid.data.filtered[covid.data.filtered$county %in% data_map()$name,]
ggplot(covid.data.filtered, aes(x=reorder(county, case_count_diff), y=case_count_diff, label=case_count_diff)) +
geom_bar(stat='identity', aes(fill=case_count_diff_type)) +
scale_fill_manual(name="Mileage",
labels = c("More Cases", "Less Cases"),
values = c("good"="#00ba38", "bad"="#f8766d")) +
labs(title= paste("This ",input$daterange," Versus Last ", input$daterange, sep="")) +
coord_flip()
})
output$plot2<- renderPlotly({
if (input$daterange=="Week"){
daterange <- c(Sys.Date()-7, Sys.Date())
}
if (input$daterange=="Month"){
daterange <- c(Sys.Date() %m-% months(1), Sys.Date())
}
if (input$daterange=="Overall"){
daterange <- c(as.Date("2020-03-01", format="%Y-%m-%d"), Sys.Date())
}
covid.data.filtered <- covid.data %>%
filter(test_date >= daterange[1] & test_date <= daterange[2]) %>%
group_by(county, test_date) %>%
summarize("new_positives" = sum(new_positives),
"cumulative_number_of_positives" = sum(cumulative_number_of_positives),
"total_number_of_tests" = sum(total_number_of_tests),
"cumulative_number_of_tests" = sum(cumulative_number_of_tests))
covid.data.filtered <- covid.data.filtered[covid.data.filtered$county %in% data_map()$name,]
if(input$variable == "Daily Cases"){
covid.data.filtered$y <- covid.data.filtered$new_positives
}
if(input$variable == "Overall Cases"){
covid.data.filtered$y <- covid.data.filtered$cumulative_number_of_positives
}
if(input$variable == "Daily Tests"){
covid.data.filtered$y <- covid.data.filtered$total_number_of_tests
}
if(input$variable == "Overall Tests"){
covid.data.filtered$y <- covid.data.filtered$cumulative_number_of_tests
}
d <- setNames(covid.data.filtered, names(covid.data.filtered))
plot_ly(d) %>%
add_lines(x = ~test_date, y = ~y, color = ~county)
})
}
shinyApp(ui = ui, server = server)
| /appV2.R | no_license | willoutcault/covid | R | false | false | 9,081 | r | require(tidyverse)
require(lubridate)
require(leaflet)
require(lubridate)
require(sp)
require(shinydashboard)
require(DT)
require(shinycssloaders)
require(sf)
require(plotly)
header <- dashboardHeader(
title = "NYS COVID-19 TRACKER"
)
body <- dashboardBody(
includeCSS("https://raw.githubusercontent.com/willoutcault/608_final/master/styles.css"),
fluidRow(
column(width = 3,
box(width = NULL, status = "warning",
selectInput("daterange", "Date Range: ", c("Week", "Month", "Overall"), selected = "Week")
),
box(width = NULL, status = "warning",
selectInput("variable", "Variable: ", c("Daily Cases", "Overall Cases", "Daily Tests", "Overall Tests"), selected = "Daily Cases")
)
),
column(width = 6,
box(width = NULL, solidHeader = TRUE,
leafletOutput("map") %>% withSpinner(color="#3c8dbc")
)
),
column(width = 3,
box(width = NULL, status = "warning",
plotOutput("plot1",)
)
),
column(width = 9,
box(width = NULL, status = "warning",
plotlyOutput("plot2")
)
)
)
)
ui <- dashboardPage(
header,
dashboardSidebar(disable = TRUE),
body
)
server <- function(input,output){
# Read Data
covid.data <- read.csv("https://health.data.ny.gov/resource/xdss-u53e.csv?$limit=100000", sep=",", header = T)
county.data <- rgdal::readOGR("https://raw.githubusercontent.com/willoutcault/covid/main/cugir-007865-geojson.json")
polys_sf<-st_read("https://raw.githubusercontent.com/willoutcault/covid/main/cugir-007865-geojson.json") %>%
st_transform(crs="+init=epsg:4326")
# Save test dates as dates
covid.data$test_date <- as.Date(covid.data$test_date)
# Aggregate by date function
aggregate <- function(daterange1, daterange2, covid.data){
covid.data %>%
filter(test_date >= daterange1 & test_date <= daterange2) %>%
group_by(county) %>%
summarize("new_positives" = sum(new_positives),
"cumulative_number_of_positives" = sum(cumulative_number_of_positives),
"total_number_of_tests" = sum(total_number_of_tests),
"cumulative_number_of_tests" = sum(cumulative_number_of_tests))
}
# Data filter based on leaflet coordinates
output$map <- renderLeaflet({
if (input$daterange=="Week"){
daterange <- c(Sys.Date()-7, Sys.Date())
}
if (input$daterange=="Month"){
daterange <- c(Sys.Date() %m-% months(1), Sys.Date())
}
if (input$daterange=="Overall"){
daterange <- c(as.Date("2020-03-01", format="%Y-%m-%d"), Sys.Date())
}
date.range <- daterange[2] - daterange[1]
covid.data.filtered <- aggregate(daterange[1], daterange[2], covid.data)
covid.data.filtered.old <- aggregate(daterange[1]-date.range, daterange[2]-date.range, covid.data)
covid.data.filtered$pct_change <- round(((covid.data.filtered$new_positives/
covid.data.filtered.old$new_positives) - 1)*100, 2)
covid.data.filtered$old_positives <- covid.data.filtered.old$new_positives
merged.spdf <- sp::merge(county.data, covid.data.filtered, by.x="name", by.y="county")
popup <- paste0("<h1>",merged.spdf@data$name," County</h1>",
"<strong>",format(daterange[1]-date.range, format="%B %d")," - ",
format(daterange[2]-date.range, format="%B %d")," :</strong>",
merged.spdf@data$old_positives, "<br>",
"<strong>",format(daterange[1], format="%B %d")," - ",
format(daterange[2], format="%B %d")," :</strong>",
merged.spdf@data$new_positives, "<br>",
"<strong>Percent Change: </strong>", merged.spdf@data$pct_change,"%")
pal <- colorNumeric(
palette = "Oranges",
domain = merged.spdf@data$new_positives)
leaflet(merged.spdf) %>%
setView(-73.8, 40.9, 8.3) %>%
addPolygons(weight = 2, popup=popup, stroke = FALSE, smoothFactor = 0.2,
fillOpacity = 1, color = ~pal(new_positives))
})
data_map <- reactive({
xmin <- input$map_bounds$west
xmax <- input$map_bounds$east
ymax <- input$map_bounds$north
ymin <- input$map_bounds$south
filt_bbox <- sf::st_bbox(c(xmin = ifelse(is.na(xmin), -180, xmin),
ymin = ifelse(is.na(ymin), -90, ymin),
xmax = ifelse(is.na(xmax), +180, xmax),
ymax = ifelse(is.na(ymax), +90, ymax)),
crs = st_crs(4326)) %>%
sf::st_as_sfc(.)
find_data <- sf::st_within(polys_sf, filt_bbox)
filt_data <- polys_sf[which(lengths(find_data) != 0), ]
county.data <- county.data[county.data$county %in% filt_data$county,]
})
output$plot1<- renderPlot({
if (input$daterange=="Week"){
daterange <- c(Sys.Date()-7, Sys.Date())
}
if (input$daterange=="Month"){
daterange <- c(Sys.Date() %m-% months(1), Sys.Date())
}
if (input$daterange=="Overall"){
daterange <- c(as.Date("2020-03-01", format="%Y-%m-%d"), Sys.Date())
}
date.range <- daterange[2] - daterange[1]
covid.data.filtered <- aggregate(daterange[1], daterange[2], covid.data)
covid.data.filtered.old <- aggregate(daterange[1]-date.range, daterange[2]-date.range, covid.data)
covid.data.filtered$old_positives <- covid.data.filtered.old$new_positives
covid.data.filtered$case_count_diff <- covid.data.filtered$new_positives - covid.data.filtered$old_positives
covid.data.filtered$case_count_diff_type <- ifelse(covid.data.filtered$case_count_diff < 0, "good", "bad")
covid.data.filtered <- covid.data.filtered[covid.data.filtered$county %in% data_map()$name,]
ggplot(covid.data.filtered, aes(x=reorder(county, case_count_diff), y=case_count_diff, label=case_count_diff)) +
geom_bar(stat='identity', aes(fill=case_count_diff_type)) +
scale_fill_manual(name="Mileage",
labels = c("More Cases", "Less Cases"),
values = c("good"="#00ba38", "bad"="#f8766d")) +
labs(title= paste("This ",input$daterange," Versus Last ", input$daterange, sep="")) +
coord_flip()
})
output$plot2<- renderPlotly({
if (input$daterange=="Week"){
daterange <- c(Sys.Date()-7, Sys.Date())
}
if (input$daterange=="Month"){
daterange <- c(Sys.Date() %m-% months(1), Sys.Date())
}
if (input$daterange=="Overall"){
daterange <- c(as.Date("2020-03-01", format="%Y-%m-%d"), Sys.Date())
}
covid.data.filtered <- covid.data %>%
filter(test_date >= daterange[1] & test_date <= daterange[2]) %>%
group_by(county, test_date) %>%
summarize("new_positives" = sum(new_positives),
"cumulative_number_of_positives" = sum(cumulative_number_of_positives),
"total_number_of_tests" = sum(total_number_of_tests),
"cumulative_number_of_tests" = sum(cumulative_number_of_tests))
covid.data.filtered <- covid.data.filtered[covid.data.filtered$county %in% data_map()$name,]
if(input$variable == "Daily Cases"){
covid.data.filtered$y <- covid.data.filtered$new_positives
}
if(input$variable == "Overall Cases"){
covid.data.filtered$y <- covid.data.filtered$cumulative_number_of_positives
}
if(input$variable == "Daily Tests"){
covid.data.filtered$y <- covid.data.filtered$total_number_of_tests
}
if(input$variable == "Overall Tests"){
covid.data.filtered$y <- covid.data.filtered$cumulative_number_of_tests
}
d <- setNames(covid.data.filtered, names(covid.data.filtered))
plot_ly(d) %>%
add_lines(x = ~test_date, y = ~y, color = ~county)
})
}
shinyApp(ui = ui, server = server)
|
# conver all pdf to png ------------------------------------------
pdf2png <- function(fname, dpi = 600) {
system(sprintf("2png -ag=4 -at=4 -dpi=%i %s", dpi, fname))
}
pdf2png("figures/reliable_model_plots.pdf")
pdf2png("figures/indicator_plots.pdf")
pdfs <- paste0("figures/", dir("figures/", pattern = "*.pdf"))
lapply(pdfs, pdf2png)
system("gswin64c -q -dBATCH -dNOPAUSE -sDEVICE=bbox -dLastPage=1 figures/reliable_model_plots.pdf")
system(sprintf('gswin64c -o figures/out.png -sDEVICE=pngalpha -g%ix%i -dLastPage=1 -c "%i %i translate" -f figures/reliable_model_plots.pdf',
550-45+1, 673-170+1, 0, 0))
system(sprintf("gswin64c -o figures/out.png -sDEVICE=pngalpha -r720 -g%ix%i -dLastPage=1 -f figures/reliable_model_plots.pdf",
10*(550-45+1), 10*(673-170+1)))
| /scripts/OxygenDebt/zz_convert_plots_to_png.R | no_license | susanii/HEAT | R | false | false | 807 | r |
# conver all pdf to png ------------------------------------------
pdf2png <- function(fname, dpi = 600) {
system(sprintf("2png -ag=4 -at=4 -dpi=%i %s", dpi, fname))
}
pdf2png("figures/reliable_model_plots.pdf")
pdf2png("figures/indicator_plots.pdf")
pdfs <- paste0("figures/", dir("figures/", pattern = "*.pdf"))
lapply(pdfs, pdf2png)
system("gswin64c -q -dBATCH -dNOPAUSE -sDEVICE=bbox -dLastPage=1 figures/reliable_model_plots.pdf")
system(sprintf('gswin64c -o figures/out.png -sDEVICE=pngalpha -g%ix%i -dLastPage=1 -c "%i %i translate" -f figures/reliable_model_plots.pdf',
550-45+1, 673-170+1, 0, 0))
system(sprintf("gswin64c -o figures/out.png -sDEVICE=pngalpha -r720 -g%ix%i -dLastPage=1 -f figures/reliable_model_plots.pdf",
10*(550-45+1), 10*(673-170+1)))
|
library(testthat)
library(MLAKdane)
#test_check("MLAKdane")
| /tests/testthat.R | no_license | zozlak/MLAKdane | R | false | false | 61 | r | library(testthat)
library(MLAKdane)
#test_check("MLAKdane")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FDRSU.R
\name{GTBY.p.adjust}
\alias{GTBY.p.adjust}
\title{The adjusted p-values for Gilbert-Tarone-BY step-up FDR controlling procedure.}
\usage{
GTBY.p.adjust(p, p.set, alpha, make.decision)
}
\arguments{
\item{p}{numeric vector of p-values (possibly with \code{\link[base]{NA}}s). Any other R is coerced by \code{\link[base]{as.numeric}}. Same as in \code{\link[stats]{p.adjust}}.}
\item{p.set}{a list of numeric vectors, where each vector is the vector of all attainable p-values containing the available p-value for the corresponding hypothesis.}
\item{alpha}{significant level used to compare with adjusted p-values to make decisions, the default value is 0.05.}
\item{make.decision}{logical; if \code{TRUE}, then the output include the decision rules compared adjusted p-values with significant level \eqn{\alpha}}
}
\value{
A numeric vector of the adjusted p-values (of the same length as \code{p}).
}
\description{
The function for calculating the adjusted p-values based on original available p-values and all attaianble p-values.
}
\examples{
p <- c(pbinom(1,8,0.5),pbinom(1,5,0.75),pbinom(1,6,0.6))
p.set <-list(pbinom(0:8,8,0.5),pbinom(0:5,5,0.75),pbinom(0:6,6,0.6))
GTBY.p.adjust(p,p.set)
}
\references{
Gilbert, P. B. (2005).
A modified false discovery rate multiple-comparisons procedure for discrete data, applied to human immunodeficiency virus genetics.
\emph{Journal of the Royal Statistical Society: Series C (Applied Statistics)}, \strong{54}: 143-158.
Benjamini, Y., and Yekutieli, D. (2001).
The control of the false discovery rate in multiple testing under dependency.
\emph{Annals of Statistics}, \strong{29}: 1165-1188.
}
\seealso{
\code{\link{GTBH.p.adjust}}, \code{\link{MBH.p.adjust}}, \code{\link{MBY.p.adjust}}
}
\author{
Yalin Zhu
}
| /man/GTBY.p.adjust.Rd | no_license | allenzhuaz/MHTdiscrete | R | false | true | 1,850 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FDRSU.R
\name{GTBY.p.adjust}
\alias{GTBY.p.adjust}
\title{The adjusted p-values for Gilbert-Tarone-BY step-up FDR controlling procedure.}
\usage{
GTBY.p.adjust(p, p.set, alpha, make.decision)
}
\arguments{
\item{p}{numeric vector of p-values (possibly with \code{\link[base]{NA}}s). Any other R is coerced by \code{\link[base]{as.numeric}}. Same as in \code{\link[stats]{p.adjust}}.}
\item{p.set}{a list of numeric vectors, where each vector is the vector of all attainable p-values containing the available p-value for the corresponding hypothesis.}
\item{alpha}{significant level used to compare with adjusted p-values to make decisions, the default value is 0.05.}
\item{make.decision}{logical; if \code{TRUE}, then the output include the decision rules compared adjusted p-values with significant level \eqn{\alpha}}
}
\value{
A numeric vector of the adjusted p-values (of the same length as \code{p}).
}
\description{
The function for calculating the adjusted p-values based on original available p-values and all attaianble p-values.
}
\examples{
p <- c(pbinom(1,8,0.5),pbinom(1,5,0.75),pbinom(1,6,0.6))
p.set <-list(pbinom(0:8,8,0.5),pbinom(0:5,5,0.75),pbinom(0:6,6,0.6))
GTBY.p.adjust(p,p.set)
}
\references{
Gilbert, P. B. (2005).
A modified false discovery rate multiple-comparisons procedure for discrete data, applied to human immunodeficiency virus genetics.
\emph{Journal of the Royal Statistical Society: Series C (Applied Statistics)}, \strong{54}: 143-158.
Benjamini, Y., and Yekutieli, D. (2001).
The control of the false discovery rate in multiple testing under dependency.
\emph{Annals of Statistics}, \strong{29}: 1165-1188.
}
\seealso{
\code{\link{GTBH.p.adjust}}, \code{\link{MBH.p.adjust}}, \code{\link{MBY.p.adjust}}
}
\author{
Yalin Zhu
}
|
## Get Data
path <- getwd()
file <- "household_power_consumption.txt"
filepath <- paste(path, file, sep="/")
data <- subset(data.table(read.table(filepath,header=TRUE,sep=";",na.strings="?")),
xor(Date=="1/2/2007",Date== "2/2/2007"))
## create Date & Time field
DateTime <- paste(data$Date, data$Time, sep=" ")
DateTime <- strptime(DateTime, "%d/%m/%Y %H:%M:%S")
data <- as.data.frame(data)
data <- cbind(data, DateTime)
data$Date <- as.Date(data$Date, "%d/%m/%Y")
## Plot 3 and export to png format
png(filename = paste(path,"plot3.png",sep="/"), width = 480, height = 480, units = "px")
par(mfrow=c(1,1))
plot(data$DateTime, data$Sub_metering_1,type="n", ylab="Energy sub metering", xlab="")
lines(data$DateTime, data$Sub_metering_1,type="l", col ="Black")
lines(data$DateTime, data$Sub_metering_2,type="l", col ="Red")
lines(data$DateTime, data$Sub_metering_3,type="l", col ="Blue")
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1), col = c("Black", "Red", "Blue"))
dev.off()
| /plot3.R | no_license | SwissRef/ExData_Plotting1 | R | false | false | 1,075 | r |
## Get Data
path <- getwd()
file <- "household_power_consumption.txt"
filepath <- paste(path, file, sep="/")
data <- subset(data.table(read.table(filepath,header=TRUE,sep=";",na.strings="?")),
xor(Date=="1/2/2007",Date== "2/2/2007"))
## create Date & Time field
DateTime <- paste(data$Date, data$Time, sep=" ")
DateTime <- strptime(DateTime, "%d/%m/%Y %H:%M:%S")
data <- as.data.frame(data)
data <- cbind(data, DateTime)
data$Date <- as.Date(data$Date, "%d/%m/%Y")
## Plot 3 and export to png format
png(filename = paste(path,"plot3.png",sep="/"), width = 480, height = 480, units = "px")
par(mfrow=c(1,1))
plot(data$DateTime, data$Sub_metering_1,type="n", ylab="Energy sub metering", xlab="")
lines(data$DateTime, data$Sub_metering_1,type="l", col ="Black")
lines(data$DateTime, data$Sub_metering_2,type="l", col ="Red")
lines(data$DateTime, data$Sub_metering_3,type="l", col ="Blue")
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1), col = c("Black", "Red", "Blue"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.configservice_operations.R
\name{delete_retention_configuration}
\alias{delete_retention_configuration}
\title{Deletes the retention configuration}
\usage{
delete_retention_configuration(RetentionConfigurationName)
}
\arguments{
\item{RetentionConfigurationName}{[required] The name of the retention configuration to delete.}
}
\description{
Deletes the retention configuration.
}
\section{Accepted Parameters}{
\preformatted{delete_retention_configuration(
RetentionConfigurationName = "string"
)
}
}
| /service/paws.configservice/man/delete_retention_configuration.Rd | permissive | CR-Mercado/paws | R | false | true | 588 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.configservice_operations.R
\name{delete_retention_configuration}
\alias{delete_retention_configuration}
\title{Deletes the retention configuration}
\usage{
delete_retention_configuration(RetentionConfigurationName)
}
\arguments{
\item{RetentionConfigurationName}{[required] The name of the retention configuration to delete.}
}
\description{
Deletes the retention configuration.
}
\section{Accepted Parameters}{
\preformatted{delete_retention_configuration(
RetentionConfigurationName = "string"
)
}
}
|
\name{eNchange-package}
\alias{eNchange-package}
\alias{eNchange}
\docType{package}
\title{
Ensemble Methods for Multiple Change-Point Detection
}
\description{
Implements a segmentation algorithm for multiple change-point detection in univariate time series using the Ensemble Binary Segmentation of Korkas (2020) <arXiv:2003.03649>.
}
\details{
We propose a new technique for
consistent estimation of the number and locations of the
change-points in the structure of an irregularly spaced time series. The
core of the segmentation procedure is the Ensemble Binary Segmentation
method (EBS), a technique in which a
large number of multiple change-point detection tasks using the Binary Segmentation (BS) method are applied on sub-samples of the
data of differing lengths, and then the results are combined to create
an overall answer. This methodology is applied to irregularly time series models such as the time-varying Autoregressive Conditional Duration model or the time-varying Hawkes process.
}
\author{
Karolos K. Korkas <kkorkas@yahoo.co.uk>.
Maintainer: Karolos K. Korkas <kkorkas@yahoo.co.uk>
}
\references{
Korkas Karolos. "Ensemble Binary Segmentation for irregularly spaced data with change-points" <arXiv:2003.03649>.
}
\keyword{ eNchange }
\examples{
\dontrun{
pw.acd.obj <- new("simACD")
pw.acd.obj@cp.loc <- seq(0.1,0.95,by=0.025)
pw.acd.obj@lambda_0 <- rep(c(0.5,2),1+length(pw.acd.obj@cp.loc)/2)
pw.acd.obj@alpha <- rep(0.2,1+length(pw.acd.obj@cp.loc))
pw.acd.obj@beta <- rep(0.4,1+length(pw.acd.obj@cp.loc))
pw.acd.obj@N <- 5000
pw.acd.obj <- pc_acdsim(pw.acd.obj)
ts.plot(pw.acd.obj@x,main="Ensemble BS");abline(v=EnBinSeg(pw.acd.obj@x)[[1]],col="red")
#real change-points in grey
abline(v=floor(pw.acd.obj@cp.loc*pw.acd.obj@N),col="grey",lty=2)
ts.plot(pw.acd.obj@x,main="Standard BS");abline(v=BinSeg(pw.acd.obj@x)[[1]],col="blue")
#real change-points in grey
abline(v=floor(pw.acd.obj@cp.loc*pw.acd.obj@N),col="grey",lty=2)
}
}
| /man/eNchange-package.Rd | no_license | cran/eNchange | R | false | false | 2,026 | rd | \name{eNchange-package}
\alias{eNchange-package}
\alias{eNchange}
\docType{package}
\title{
Ensemble Methods for Multiple Change-Point Detection
}
\description{
Implements a segmentation algorithm for multiple change-point detection in univariate time series using the Ensemble Binary Segmentation of Korkas (2020) <arXiv:2003.03649>.
}
\details{
We propose a new technique for
consistent estimation of the number and locations of the
change-points in the structure of an irregularly spaced time series. The
core of the segmentation procedure is the Ensemble Binary Segmentation
method (EBS), a technique in which a
large number of multiple change-point detection tasks using the Binary Segmentation (BS) method are applied on sub-samples of the
data of differing lengths, and then the results are combined to create
an overall answer. This methodology is applied to irregularly time series models such as the time-varying Autoregressive Conditional Duration model or the time-varying Hawkes process.
}
\author{
Karolos K. Korkas <kkorkas@yahoo.co.uk>.
Maintainer: Karolos K. Korkas <kkorkas@yahoo.co.uk>
}
\references{
Korkas Karolos. "Ensemble Binary Segmentation for irregularly spaced data with change-points" <arXiv:2003.03649>.
}
\keyword{ eNchange }
\examples{
\dontrun{
pw.acd.obj <- new("simACD")
pw.acd.obj@cp.loc <- seq(0.1,0.95,by=0.025)
pw.acd.obj@lambda_0 <- rep(c(0.5,2),1+length(pw.acd.obj@cp.loc)/2)
pw.acd.obj@alpha <- rep(0.2,1+length(pw.acd.obj@cp.loc))
pw.acd.obj@beta <- rep(0.4,1+length(pw.acd.obj@cp.loc))
pw.acd.obj@N <- 5000
pw.acd.obj <- pc_acdsim(pw.acd.obj)
ts.plot(pw.acd.obj@x,main="Ensemble BS");abline(v=EnBinSeg(pw.acd.obj@x)[[1]],col="red")
#real change-points in grey
abline(v=floor(pw.acd.obj@cp.loc*pw.acd.obj@N),col="grey",lty=2)
ts.plot(pw.acd.obj@x,main="Standard BS");abline(v=BinSeg(pw.acd.obj@x)[[1]],col="blue")
#real change-points in grey
abline(v=floor(pw.acd.obj@cp.loc*pw.acd.obj@N),col="grey",lty=2)
}
}
|
library(ape)
testtree <- read.tree("3712_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3712_0_unrooted.txt") | /codeml_files/newick_trees_processed/3712_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("3712_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3712_0_unrooted.txt") |
/Postwork/Sesión 4/Postwork 4.R | no_license | CarolinaBer/BEDU_R | R | false | false | 8,654 | r | ||
## functions to write x- file
make_archive <- function(out_file, overwrite = F, encoding){
options(encoding = encoding)
if (file.exists(out_file)) {
if (overwrite) {
options(encoding = encoding)
pf <- file(out_file, open = "w")
} else {
rnum <- round(runif(1, 10000, 20000), 0)
tmpvar <- unlist(strsplit(out_file, "/", fixed = T))
pth_ref <- paste(tmpvar[1:(length(tmpvar) - 1)], collapse = "/")
out_file <- paste(pth_ref, "/copy-", rnum, "_", tmpvar[length(tmpvar)], sep = "")
pf <- file(out_file, open = "w")
}
} else {
options(encoding = encoding)
pf <- file(out_file, open ="w")
}
# close(pf)
return(pf)
}
write_details <- function(name_exp, information){
# General stuff
options(encoding = "UTF-8")
cat(paste0(information$DETAILS, "\n"), file = name_exp)
cat("\n",file = name_exp)
cat("*GENERAL\n@PEOPLE\n", file = name_exp)
cat(paste(sprintf("%-12s", as.character(information$PEOPLE)), "\n", sep = ""), file = name_exp)
cat("@ADDRESS\n", file = name_exp)
cat(paste(sprintf("%-12s", as.character(information$ADDRESS)), "\n", sep = ""), file = name_exp)
cat("@SITE\n", file = name_exp)
cat(paste(sprintf("%-12s", as.character(information$SITE)), "\n", sep = ""), file = name_exp)
}
# information <- make_details()
# write_details(proof, information)
# close(proof)
write_treatments <- function(name_exp, information){
cat("*TREATMENTS -------------FACTOR LEVELS------------\n", file = name_exp)
cat("@N R O C TNAME.................... CU FL SA IC MP MI MF MR MC MT ME MH SM\n", file = name_exp)
for (i in 1:nrow(information)) {
cat(paste(sprintf("%1$2d%2$2d%3$2d%4$2d",as.integer(information$N[i]),as.integer(information$R[i]),
as.integer(information$O[i]),as.integer(information$C[i])),
" ",sprintf("%1$-25s%2$3d%3$3d%4$3d%5$3d%6$3d%7$3d%8$3d%9$3d%10$3d%11$3d%12$3d%13$3d%14$3d",information$TNAME[i],
as.integer(information$CU[i]),as.integer(information$FL[i]),as.integer(information$SA[i]),
as.integer(information$IC[i]),as.integer(information$MP[i]),as.integer(information$MI[i]),
as.integer(information$MF[i]),as.integer(information$MR[i]),as.integer(information$MC[i]),
as.integer(information$MT[i]),as.integer(information$ME[i]),as.integer(information$MH[i]),
as.integer(information$SM[i])),
"\n", sep = ""), file = name_exp)
}
cat("\n", file = name_exp)
}
# write_details(proof, make_treatments(IC, MI, MF, MH))
# close(proof)
write_cultivars <- function(name_exp, information){
cat("*CULTIVARS\n", file = name_exp)
cat("@C CR INGENO CNAME\n", file = name_exp)
for (i in 1:nrow(information)) {
cat(paste(sprintf("%2d",as.integer(information$C[i]))," ",sprintf("%2s", information$CR[i]),
" ", sprintf("%6s",information$INGENO[i])," ",sprintf("%-12s",information$CNAME[i]),
"\n", sep = ""), file = name_exp)
}
cat("\n", file = name_exp)
}
# write_cultivars(proof, make_cultivars(CR, INGENO, CNAME))
# close(proof)
write_fields <- function(name_exp, information){
# fields
cat("*FIELDS\n", file = name_exp)
cat("@L ID_FIELD WSTA.... FLSA FLOB FLDT FLDD FLDS FLST SLTX SLDP ID_SOIL FLNAME\n", file = name_exp)
for (i in 1:nrow(information)) {
cat(paste(sprintf("%2d",as.integer(information$L[i]))," ",sprintf("%-8s",information$ID_FIELD[i]),
" ",sprintf("%-8s",information$WSTA[i]),sprintf("%6d",as.integer(information$FLSA[i])),
sprintf("%6d",as.integer(information$FLOB[i])),sprintf("%6s",information$FLDT[i]),
sprintf("%6d",as.integer(information$FLDD[i])),sprintf("%6s",as.integer(information$FLDS[i])),
sprintf("%6d",as.integer(information$FLST[i]))," ",sprintf("%-4d",as.integer(information$SLTX[i])),
sprintf("%6d",as.integer(information$SLDP[i]))," ",sprintf("%-10s",information$ID_SOIL[i])," ",
sprintf("%-12s",information$FLNAME[i]),"\n", sep=""),file=name_exp)
}
cat("\n", file = name_exp)
cat("@L ..........XCRD ...........YCRD .....ELEV .............AREA .SLEN .FLWR .SLAS FLHST FHDUR\n",file=name_exp)
#
# for (i in 1:nrow(information)) {
#
#
# cat(paste(sprintf("%2d",as.integer(information$L[i]))," ",sprintf("%15.3f",information$XCRD[i])," ",
# sprintf("%15.3f",information$YCRD[i])," ",sprintf("%9d",as.integer(information$ELEV[i]))," ",
# sprintf("%17d",as.integer(information$AREA[i]))," ",sprintf("%5d",as.integer(information$SLEN[i]))," ",
# sprintf("%5d",as.integer(information$FLWR[i]))," ",sprintf("%5d",as.integer(information$SLAS[i]))," ",
# sprintf("%5d",as.integer(information$FLHST[i]))," ",sprintf("%5d",as.integer(information$FHDUR[i])),
# "\n",sep=""),file=name_exp)
#
#
# }
for (i in 1:nrow(information)) {
cat(paste(sprintf("%2d",as.integer(information$L[i]))," ",sprintf("%14.d",information$XCRD[i])," ",
sprintf("%15.d",information$YCRD[i])," ",sprintf("%9d",as.integer(information$ELEV[i]))," ",
sprintf("%17d",as.integer(information$AREA[i]))," ",sprintf("%5d",as.integer(information$SLEN[i]))," ",
sprintf("%5d",as.integer(information$FLWR[i]))," ",sprintf("%5d",as.integer(information$SLAS[i]))," ",
sprintf("%5d",as.integer(information$FLHST[i]))," ",sprintf("%5d",as.integer(information$FHDUR[i])),
"\n",sep=""), file=name_exp)
}
cat("\n",file=name_exp)
}
# write_fields(proof, make_fields(WSTA, ID_SOIL))
# close(proof)
write_IC <- function(name_exp, information){
#initial conditions
cat("*INITIAL CONDITIONS\n",file=name_exp)
cat("@C PCR ICDAT ICRT ICND ICRN ICRE ICWD ICRES ICREN ICREP ICRIP ICRID ICNAME\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$field$C))," ",sprintf("%5s",information$field$PCR),
" ",sprintf("%5s",information$field$ICDAT)," ",sprintf("%5d",as.integer(information$field$ICRT)),
" ",sprintf("%5d",as.integer(information$field$ICND))," ",sprintf("%5d",as.integer(information$field$ICRN)),
" ",sprintf("%5d",as.integer(information$field$ICRE))," ",sprintf("%5d",as.integer(information$field$ICWD)),
" ",sprintf("%5d",as.integer(information$field$ICRES))," ",sprintf("%5d",as.integer(information$field$ICREN)),
" ",sprintf("%5d",as.integer(information$field$ICREP))," ",sprintf("%5d",as.integer(information$field$ICRIP)),
" ",sprintf("%5d",as.integer(information$field$ICRID))," ",sprintf("%-12s",information$field$ICNAME),
"\n",sep=""),file=name_exp)
cat("@C ICBL SH2O SNH4 SNO3\n",file=name_exp)
for (i in 1:nrow(information$values)) {
cat(paste(sprintf("%2d %5d %5d %5.2f %5.2f", 1, information$values[i, 'ICBL'], information$values[i, 'SH20'],
information$values[i, 'SNH4'], information$values[i , 'SNO3'])), "\n", file = name_exp)
}
cat("\n",file=name_exp)
}
# write_IC(proof, make_IC(ICBL, SH20, SNH4, SNO3))
write_MF <- function(name_exp, information){
cat("*FERTILIZERS (INORGANIC)\n", file = name_exp)
cat("@F FDATE FMCD FACD FDEP FAMN FAMP FAMK FAMC FAMO FOCD FERNAME \n", file = name_exp)
for(i in 1:dim(information)[1]){
cat(paste(sprintf("%2d %5i %5s %5i %5i %5.2f %5.2f %5.2f %5i %5i %5i %5-i", 1, information$FDATE[i], information$FMCD[i],
information$FACD[i], information$FDEP[i], information$FAMN[i], information$FAMP[i],
information$FAMK[i], information$FAMC[i], information$FAMO[i],
information$FOCD[i], information$FERNAME[i]), '\n'), file = name_exp)
}
cat("\n", file = name_exp)
}
# write_MF(proof, make_MF(input_fertilizer))
write_pDetails <- function(name_exp, information){
#planting details
cat("*PLANTING DETAILS\n",file = name_exp)
cat("@P PDATE EDATE PPOP PPOE PLME PLDS PLRS PLRD PLDP PLWT PAGE PENV PLPH SPRL PLNAME\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$P))," ",sprintf("%5s",information$PDATE),
" ",sprintf("%5s",information$EDATE)," ",sprintf("%5d",as.integer(information$PPOP)),
" ",sprintf("%5d",as.integer(information$PPOE))," ",sprintf("%5s",information$PLME),
" ",sprintf("%5s",information$PLDS)," ",sprintf("%5d",as.integer(information$PLRS)),
" ",sprintf("%5d",as.integer(information$PLRD))," ",sprintf("%5d",as.integer(information$PLDP)),
" ",sprintf("%5d",as.integer(information$PLWT))," ",sprintf("%5d",as.integer(information$PAGE)),
" ",sprintf("%5d",as.integer(information$PENV))," ",sprintf("%5d",as.integer(information$PLPH)),
" ",sprintf("%5d",as.integer(information$SPRL))," ",sprintf("%29s",information$PLNAME),
"\n", sep = ""), file = name_exp)
cat("\n", file = name_exp)
}
# write_pDetails(name_exp, make_pDetails(input_pDetails))
write__sControls <- function(name_exp, information){
#simulation controls
cat("*SIMULATION CONTROLS\n", file = name_exp)
cat("@N GENERAL NYERS NREPS START SDATE RSEED SNAME.................... SMODEL\n", file = name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$GENERAL),
" ",sprintf("%5d",as.integer(information$NYERS))," ",sprintf("%5d",as.integer(information$NREPS)),
" ",sprintf("%5s",information$START)," ",sprintf("%5s",information$SDATE),
" ",sprintf("%5d",as.integer(information$RSEED))," ",sprintf("%-25s",information$SNAME),
" ",sprintf("%-6s",information$SMODEL),"\n",sep=""),file=name_exp)
cat("@N OPTIONS WATER NITRO SYMBI PHOSP POTAS DISES CHEM TILL CO2\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$OPTIONS),
" ",sprintf("%5s",information$WATER)," ",sprintf("%5s",information$NITRO),
" ",sprintf("%5s",information$SYMBI)," ",sprintf("%5s",information$PHOSP),
" ",sprintf("%5s",information$POTAS)," ",sprintf("%5s",information$DISES),
" ",sprintf("%5s",information$CHEM)," ",sprintf("%5s",information$TILL),
" ",sprintf("%5s",information$CO2),"\n",sep=""),file=name_exp)
cat("@N METHODS WTHER INCON LIGHT EVAPO INFIL PHOTO HYDRO NSWIT MESOM MESEV MESOL\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$METHODS),
" ",sprintf("%5s",information$WTHER)," ",sprintf("%5s",information$INCON),
" ",sprintf("%5s",information$LIGHT)," ",sprintf("%5s",information$EVAPO),
" ",sprintf("%5s",information$INFIL)," ",sprintf("%5s",information$PHOTO),
" ",sprintf("%5s",information$HYDRO)," ",sprintf("%5d",as.integer(information$NSWIT)),
" ",sprintf("%5s",information$MESOM)," ",sprintf("%5s",information$MESEV),
" ",sprintf("%5d",as.integer(information$MESOL)),"\n",sep=""),file=name_exp)
cat("@N MANAGEMENT PLANT IRRIG FERTI RESID HARVS\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$MANAGEMENT),
" ",sprintf("%5s",information$PLANT)," ",sprintf("%5s",information$IRRIG),
" ",sprintf("%5s",information$FERTI)," ",sprintf("%5s",information$RESID),
" ",sprintf("%5s",information$HARVS),"\n",sep=""),file=name_exp)
cat("@N OUTPUTS FNAME OVVEW SUMRY FROPT GROUT CAOUT WAOUT NIOUT MIOUT DIOUT VBOSE CHOUT OPOUT\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$OUTPUTS),
" ",sprintf("%5s",information$FNAME)," ",sprintf("%5s",information$OVVEW),
" ",sprintf("%5s",information$SUMRY)," ",sprintf("%5s",information$FROPT),
" ",sprintf("%5s",information$GROUT)," ",sprintf("%5s",information$CAOUT),
" ",sprintf("%5s",information$WAOUT)," ",sprintf("%5s",information$NIOUT),
" ",sprintf("%5s",information$MIOUT)," ",sprintf("%5s",information$DIOUT),
" ",sprintf("%5s",information$VBOSE)," ",sprintf("%5s",information$CHOUT),
" ",sprintf("%5s",information$OPOUT),"\n",sep=""),file=name_exp)
cat("\n", file = name_exp)
}
# write__sControls(proof, make_sControls(input_sControls))
write_Amgmt <- function(name_exp, information){
cat("@ AUTOMATIC MANAGEMENT\n", file = name_exp)
cat("@N PLANTING PFRST PLAST PH2OL PH2OU PH2OD PSTMX PSTMN\n", file = name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$PLANTING),
" ",sprintf("%5s",information$PFRST)," ",sprintf("%5s",information$PLAST),
" ",sprintf("%5d",as.integer(information$PH2OL))," ",sprintf("%5d",as.integer(information$PH2OU)),
" ",sprintf("%5d",as.integer(information$PH2OD))," ",sprintf("%5d",as.integer(information$PSTMX)),
" ",sprintf("%5d",as.integer(information$PSTMN)),"\n",sep=""),file=name_exp)
cat("@N IRRIGATION IMDEP ITHRL ITHRU IROFF IMETH IRAMT IREFF\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$IRRIGATION),
" ",sprintf("%5d",as.integer(information$IMDEP))," ",sprintf("%5d",as.integer(information$ITHRL)),
" ",sprintf("%5d",as.integer(information$ITHRU))," ",sprintf("%5s",information$IROFF),
" ",sprintf("%5s",information$IMETH)," ",sprintf("%5d",as.integer(information$IRAMT)),
" ",sprintf("%5d",as.integer(information$IREFF)),"\n",sep=""),file=name_exp)
cat("@N NITROGEN NMDEP NMTHR NAMNT NCODE NAOFF\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$NITROGEN),
" ",sprintf("%5d",as.integer(information$NMDEP))," ",sprintf("%5d",as.integer(information$NMTHR)),
" ",sprintf("%5d",as.integer(information$NAMNT))," ",sprintf("%5s",information$NCODE),
" ",sprintf("%5s",information$NAOFF),"\n",sep=""),file=name_exp)
cat("@N RESIDUES RIPCN RTIME RIDEP\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$RESIDUES),
" ",sprintf("%5d",as.integer(information$RIPCN))," ",sprintf("%5d",as.integer(information$RTIME)),
" ",sprintf("%5d",as.integer(information$RIDEP)),"\n",sep=""),file=name_exp)
cat("@N HARVEST HFRST HLAST HPCNP HPCNR\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$HARVEST),
" ",sprintf("%5d",as.integer(information$HFRST))," ",sprintf("%5d",as.integer(information$HLAST)),
" ",sprintf("%5d",as.integer(information$HPCNP))," ",sprintf("%5d",as.integer(information$HPCNR)),
"\n",sep=""),file=name_exp)
cat("\n", file = name_exp)
} | /functions_xfile.R | no_license | haachicanoy/workshop_urosario | R | false | false | 15,240 | r | ## functions to write x- file
make_archive <- function(out_file, overwrite = F, encoding){
options(encoding = encoding)
if (file.exists(out_file)) {
if (overwrite) {
options(encoding = encoding)
pf <- file(out_file, open = "w")
} else {
rnum <- round(runif(1, 10000, 20000), 0)
tmpvar <- unlist(strsplit(out_file, "/", fixed = T))
pth_ref <- paste(tmpvar[1:(length(tmpvar) - 1)], collapse = "/")
out_file <- paste(pth_ref, "/copy-", rnum, "_", tmpvar[length(tmpvar)], sep = "")
pf <- file(out_file, open = "w")
}
} else {
options(encoding = encoding)
pf <- file(out_file, open ="w")
}
# close(pf)
return(pf)
}
write_details <- function(name_exp, information){
# General stuff
options(encoding = "UTF-8")
cat(paste0(information$DETAILS, "\n"), file = name_exp)
cat("\n",file = name_exp)
cat("*GENERAL\n@PEOPLE\n", file = name_exp)
cat(paste(sprintf("%-12s", as.character(information$PEOPLE)), "\n", sep = ""), file = name_exp)
cat("@ADDRESS\n", file = name_exp)
cat(paste(sprintf("%-12s", as.character(information$ADDRESS)), "\n", sep = ""), file = name_exp)
cat("@SITE\n", file = name_exp)
cat(paste(sprintf("%-12s", as.character(information$SITE)), "\n", sep = ""), file = name_exp)
}
# information <- make_details()
# write_details(proof, information)
# close(proof)
write_treatments <- function(name_exp, information){
cat("*TREATMENTS -------------FACTOR LEVELS------------\n", file = name_exp)
cat("@N R O C TNAME.................... CU FL SA IC MP MI MF MR MC MT ME MH SM\n", file = name_exp)
for (i in 1:nrow(information)) {
cat(paste(sprintf("%1$2d%2$2d%3$2d%4$2d",as.integer(information$N[i]),as.integer(information$R[i]),
as.integer(information$O[i]),as.integer(information$C[i])),
" ",sprintf("%1$-25s%2$3d%3$3d%4$3d%5$3d%6$3d%7$3d%8$3d%9$3d%10$3d%11$3d%12$3d%13$3d%14$3d",information$TNAME[i],
as.integer(information$CU[i]),as.integer(information$FL[i]),as.integer(information$SA[i]),
as.integer(information$IC[i]),as.integer(information$MP[i]),as.integer(information$MI[i]),
as.integer(information$MF[i]),as.integer(information$MR[i]),as.integer(information$MC[i]),
as.integer(information$MT[i]),as.integer(information$ME[i]),as.integer(information$MH[i]),
as.integer(information$SM[i])),
"\n", sep = ""), file = name_exp)
}
cat("\n", file = name_exp)
}
# write_details(proof, make_treatments(IC, MI, MF, MH))
# close(proof)
write_cultivars <- function(name_exp, information){
cat("*CULTIVARS\n", file = name_exp)
cat("@C CR INGENO CNAME\n", file = name_exp)
for (i in 1:nrow(information)) {
cat(paste(sprintf("%2d",as.integer(information$C[i]))," ",sprintf("%2s", information$CR[i]),
" ", sprintf("%6s",information$INGENO[i])," ",sprintf("%-12s",information$CNAME[i]),
"\n", sep = ""), file = name_exp)
}
cat("\n", file = name_exp)
}
# write_cultivars(proof, make_cultivars(CR, INGENO, CNAME))
# close(proof)
write_fields <- function(name_exp, information){
# fields
cat("*FIELDS\n", file = name_exp)
cat("@L ID_FIELD WSTA.... FLSA FLOB FLDT FLDD FLDS FLST SLTX SLDP ID_SOIL FLNAME\n", file = name_exp)
for (i in 1:nrow(information)) {
cat(paste(sprintf("%2d",as.integer(information$L[i]))," ",sprintf("%-8s",information$ID_FIELD[i]),
" ",sprintf("%-8s",information$WSTA[i]),sprintf("%6d",as.integer(information$FLSA[i])),
sprintf("%6d",as.integer(information$FLOB[i])),sprintf("%6s",information$FLDT[i]),
sprintf("%6d",as.integer(information$FLDD[i])),sprintf("%6s",as.integer(information$FLDS[i])),
sprintf("%6d",as.integer(information$FLST[i]))," ",sprintf("%-4d",as.integer(information$SLTX[i])),
sprintf("%6d",as.integer(information$SLDP[i]))," ",sprintf("%-10s",information$ID_SOIL[i])," ",
sprintf("%-12s",information$FLNAME[i]),"\n", sep=""),file=name_exp)
}
cat("\n", file = name_exp)
cat("@L ..........XCRD ...........YCRD .....ELEV .............AREA .SLEN .FLWR .SLAS FLHST FHDUR\n",file=name_exp)
#
# for (i in 1:nrow(information)) {
#
#
# cat(paste(sprintf("%2d",as.integer(information$L[i]))," ",sprintf("%15.3f",information$XCRD[i])," ",
# sprintf("%15.3f",information$YCRD[i])," ",sprintf("%9d",as.integer(information$ELEV[i]))," ",
# sprintf("%17d",as.integer(information$AREA[i]))," ",sprintf("%5d",as.integer(information$SLEN[i]))," ",
# sprintf("%5d",as.integer(information$FLWR[i]))," ",sprintf("%5d",as.integer(information$SLAS[i]))," ",
# sprintf("%5d",as.integer(information$FLHST[i]))," ",sprintf("%5d",as.integer(information$FHDUR[i])),
# "\n",sep=""),file=name_exp)
#
#
# }
for (i in 1:nrow(information)) {
cat(paste(sprintf("%2d",as.integer(information$L[i]))," ",sprintf("%14.d",information$XCRD[i])," ",
sprintf("%15.d",information$YCRD[i])," ",sprintf("%9d",as.integer(information$ELEV[i]))," ",
sprintf("%17d",as.integer(information$AREA[i]))," ",sprintf("%5d",as.integer(information$SLEN[i]))," ",
sprintf("%5d",as.integer(information$FLWR[i]))," ",sprintf("%5d",as.integer(information$SLAS[i]))," ",
sprintf("%5d",as.integer(information$FLHST[i]))," ",sprintf("%5d",as.integer(information$FHDUR[i])),
"\n",sep=""), file=name_exp)
}
cat("\n",file=name_exp)
}
# write_fields(proof, make_fields(WSTA, ID_SOIL))
# close(proof)
write_IC <- function(name_exp, information){
#initial conditions
cat("*INITIAL CONDITIONS\n",file=name_exp)
cat("@C PCR ICDAT ICRT ICND ICRN ICRE ICWD ICRES ICREN ICREP ICRIP ICRID ICNAME\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$field$C))," ",sprintf("%5s",information$field$PCR),
" ",sprintf("%5s",information$field$ICDAT)," ",sprintf("%5d",as.integer(information$field$ICRT)),
" ",sprintf("%5d",as.integer(information$field$ICND))," ",sprintf("%5d",as.integer(information$field$ICRN)),
" ",sprintf("%5d",as.integer(information$field$ICRE))," ",sprintf("%5d",as.integer(information$field$ICWD)),
" ",sprintf("%5d",as.integer(information$field$ICRES))," ",sprintf("%5d",as.integer(information$field$ICREN)),
" ",sprintf("%5d",as.integer(information$field$ICREP))," ",sprintf("%5d",as.integer(information$field$ICRIP)),
" ",sprintf("%5d",as.integer(information$field$ICRID))," ",sprintf("%-12s",information$field$ICNAME),
"\n",sep=""),file=name_exp)
cat("@C ICBL SH2O SNH4 SNO3\n",file=name_exp)
for (i in 1:nrow(information$values)) {
cat(paste(sprintf("%2d %5d %5d %5.2f %5.2f", 1, information$values[i, 'ICBL'], information$values[i, 'SH20'],
information$values[i, 'SNH4'], information$values[i , 'SNO3'])), "\n", file = name_exp)
}
cat("\n",file=name_exp)
}
# write_IC(proof, make_IC(ICBL, SH20, SNH4, SNO3))
write_MF <- function(name_exp, information){
cat("*FERTILIZERS (INORGANIC)\n", file = name_exp)
cat("@F FDATE FMCD FACD FDEP FAMN FAMP FAMK FAMC FAMO FOCD FERNAME \n", file = name_exp)
for(i in 1:dim(information)[1]){
cat(paste(sprintf("%2d %5i %5s %5i %5i %5.2f %5.2f %5.2f %5i %5i %5i %5-i", 1, information$FDATE[i], information$FMCD[i],
information$FACD[i], information$FDEP[i], information$FAMN[i], information$FAMP[i],
information$FAMK[i], information$FAMC[i], information$FAMO[i],
information$FOCD[i], information$FERNAME[i]), '\n'), file = name_exp)
}
cat("\n", file = name_exp)
}
# write_MF(proof, make_MF(input_fertilizer))
write_pDetails <- function(name_exp, information){
#planting details
cat("*PLANTING DETAILS\n",file = name_exp)
cat("@P PDATE EDATE PPOP PPOE PLME PLDS PLRS PLRD PLDP PLWT PAGE PENV PLPH SPRL PLNAME\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$P))," ",sprintf("%5s",information$PDATE),
" ",sprintf("%5s",information$EDATE)," ",sprintf("%5d",as.integer(information$PPOP)),
" ",sprintf("%5d",as.integer(information$PPOE))," ",sprintf("%5s",information$PLME),
" ",sprintf("%5s",information$PLDS)," ",sprintf("%5d",as.integer(information$PLRS)),
" ",sprintf("%5d",as.integer(information$PLRD))," ",sprintf("%5d",as.integer(information$PLDP)),
" ",sprintf("%5d",as.integer(information$PLWT))," ",sprintf("%5d",as.integer(information$PAGE)),
" ",sprintf("%5d",as.integer(information$PENV))," ",sprintf("%5d",as.integer(information$PLPH)),
" ",sprintf("%5d",as.integer(information$SPRL))," ",sprintf("%29s",information$PLNAME),
"\n", sep = ""), file = name_exp)
cat("\n", file = name_exp)
}
# write_pDetails(name_exp, make_pDetails(input_pDetails))
write__sControls <- function(name_exp, information){
#simulation controls
cat("*SIMULATION CONTROLS\n", file = name_exp)
cat("@N GENERAL NYERS NREPS START SDATE RSEED SNAME.................... SMODEL\n", file = name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$GENERAL),
" ",sprintf("%5d",as.integer(information$NYERS))," ",sprintf("%5d",as.integer(information$NREPS)),
" ",sprintf("%5s",information$START)," ",sprintf("%5s",information$SDATE),
" ",sprintf("%5d",as.integer(information$RSEED))," ",sprintf("%-25s",information$SNAME),
" ",sprintf("%-6s",information$SMODEL),"\n",sep=""),file=name_exp)
cat("@N OPTIONS WATER NITRO SYMBI PHOSP POTAS DISES CHEM TILL CO2\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$OPTIONS),
" ",sprintf("%5s",information$WATER)," ",sprintf("%5s",information$NITRO),
" ",sprintf("%5s",information$SYMBI)," ",sprintf("%5s",information$PHOSP),
" ",sprintf("%5s",information$POTAS)," ",sprintf("%5s",information$DISES),
" ",sprintf("%5s",information$CHEM)," ",sprintf("%5s",information$TILL),
" ",sprintf("%5s",information$CO2),"\n",sep=""),file=name_exp)
cat("@N METHODS WTHER INCON LIGHT EVAPO INFIL PHOTO HYDRO NSWIT MESOM MESEV MESOL\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$METHODS),
" ",sprintf("%5s",information$WTHER)," ",sprintf("%5s",information$INCON),
" ",sprintf("%5s",information$LIGHT)," ",sprintf("%5s",information$EVAPO),
" ",sprintf("%5s",information$INFIL)," ",sprintf("%5s",information$PHOTO),
" ",sprintf("%5s",information$HYDRO)," ",sprintf("%5d",as.integer(information$NSWIT)),
" ",sprintf("%5s",information$MESOM)," ",sprintf("%5s",information$MESEV),
" ",sprintf("%5d",as.integer(information$MESOL)),"\n",sep=""),file=name_exp)
cat("@N MANAGEMENT PLANT IRRIG FERTI RESID HARVS\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$MANAGEMENT),
" ",sprintf("%5s",information$PLANT)," ",sprintf("%5s",information$IRRIG),
" ",sprintf("%5s",information$FERTI)," ",sprintf("%5s",information$RESID),
" ",sprintf("%5s",information$HARVS),"\n",sep=""),file=name_exp)
cat("@N OUTPUTS FNAME OVVEW SUMRY FROPT GROUT CAOUT WAOUT NIOUT MIOUT DIOUT VBOSE CHOUT OPOUT\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$OUTPUTS),
" ",sprintf("%5s",information$FNAME)," ",sprintf("%5s",information$OVVEW),
" ",sprintf("%5s",information$SUMRY)," ",sprintf("%5s",information$FROPT),
" ",sprintf("%5s",information$GROUT)," ",sprintf("%5s",information$CAOUT),
" ",sprintf("%5s",information$WAOUT)," ",sprintf("%5s",information$NIOUT),
" ",sprintf("%5s",information$MIOUT)," ",sprintf("%5s",information$DIOUT),
" ",sprintf("%5s",information$VBOSE)," ",sprintf("%5s",information$CHOUT),
" ",sprintf("%5s",information$OPOUT),"\n",sep=""),file=name_exp)
cat("\n", file = name_exp)
}
# write__sControls(proof, make_sControls(input_sControls))
write_Amgmt <- function(name_exp, information){
cat("@ AUTOMATIC MANAGEMENT\n", file = name_exp)
cat("@N PLANTING PFRST PLAST PH2OL PH2OU PH2OD PSTMX PSTMN\n", file = name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$PLANTING),
" ",sprintf("%5s",information$PFRST)," ",sprintf("%5s",information$PLAST),
" ",sprintf("%5d",as.integer(information$PH2OL))," ",sprintf("%5d",as.integer(information$PH2OU)),
" ",sprintf("%5d",as.integer(information$PH2OD))," ",sprintf("%5d",as.integer(information$PSTMX)),
" ",sprintf("%5d",as.integer(information$PSTMN)),"\n",sep=""),file=name_exp)
cat("@N IRRIGATION IMDEP ITHRL ITHRU IROFF IMETH IRAMT IREFF\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$IRRIGATION),
" ",sprintf("%5d",as.integer(information$IMDEP))," ",sprintf("%5d",as.integer(information$ITHRL)),
" ",sprintf("%5d",as.integer(information$ITHRU))," ",sprintf("%5s",information$IROFF),
" ",sprintf("%5s",information$IMETH)," ",sprintf("%5d",as.integer(information$IRAMT)),
" ",sprintf("%5d",as.integer(information$IREFF)),"\n",sep=""),file=name_exp)
cat("@N NITROGEN NMDEP NMTHR NAMNT NCODE NAOFF\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$NITROGEN),
" ",sprintf("%5d",as.integer(information$NMDEP))," ",sprintf("%5d",as.integer(information$NMTHR)),
" ",sprintf("%5d",as.integer(information$NAMNT))," ",sprintf("%5s",information$NCODE),
" ",sprintf("%5s",information$NAOFF),"\n",sep=""),file=name_exp)
cat("@N RESIDUES RIPCN RTIME RIDEP\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$RESIDUES),
" ",sprintf("%5d",as.integer(information$RIPCN))," ",sprintf("%5d",as.integer(information$RTIME)),
" ",sprintf("%5d",as.integer(information$RIDEP)),"\n",sep=""),file=name_exp)
cat("@N HARVEST HFRST HLAST HPCNP HPCNR\n",file=name_exp)
cat(paste(sprintf("%2d",as.integer(information$N))," ",sprintf("%-11s",information$HARVEST),
" ",sprintf("%5d",as.integer(information$HFRST))," ",sprintf("%5d",as.integer(information$HLAST)),
" ",sprintf("%5d",as.integer(information$HPCNP))," ",sprintf("%5d",as.integer(information$HPCNR)),
"\n",sep=""),file=name_exp)
cat("\n", file = name_exp)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.