blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6178557bffca92040f4997f2362fa093a2881ff2 | f1372147a5410d09b5c3b8607dd9e27214ccf0c5 | /Unit 2/Resampling/LOOCV.R | ae2725dd697d66ba4bc3886e716d15847aae91ca | [] | no_license | prady026/Prady_Data_Science | 6262abd4d873ce8e75d6932f08f8d36849e0e8af | d6070650c03a01dedebab53e9de70172691eeb16 | refs/heads/master | 2020-08-06T11:21:33.108365 | 2019-10-05T07:20:14 | 2019-10-05T07:20:14 | 212,957,892 | 1 | 0 | null | 2019-10-05T07:20:15 | 2019-10-05T06:57:53 | Python | UTF-8 | R | false | false | 1,181 | r | LOOCV.R | library (ISLR)
install.packages("boot")
library (boot)
Auto=read.delim(file="D:/CARRER/My_Course/Daily Classes/Module2/9 Assessing Performance/Re-Sampling Methods/CV/Auto.csv",header=T,sep=",")
dim(Auto)
names(Auto)
attach(Auto)
summary(Auto)
glm.fit=glm(mpg~horsepower ,data=Auto)
cv.err = cv.glm(Auto ,glm.fit)
cv.err$delta[1]
# The two numbers in the delta vector contain the cross-validation results.
# In this case the numbers are identical (up to two decimal places) and correspond
# to the LOOCV statistic
#delta
# A vector of length two.
# The first component is the raw cross-validation estimate of prediction error.
# The second component is the adjusted cross-validation estimate.
# The adjustment is designed to compensate for the bias introduced by not using LOOCV.
# seed : The value of .Random.seed when cv.glm was called
# Now i want to check MSE on differerent models
cv.error=c()
for (i in 1:5)
{
glm.fit=glm(mpg~poly(horsepower ,i),data=Auto)
cv.error[i]=cv.glm (Auto ,glm.fit)$delta [1]
}
cv.error
plot(x = 1:5, y = cv.error, type='b',xlab = "Polynomial Degree", ylab = "Cross Validation Error", main = "LOOCV-Bias / Variance Tradeoff")
|
59c604d947b5be9f3158aa0f29e37ce1afd882a6 | ab92c90db51c5ffb6e9e5b417936bef8c7318136 | /FindMaliciousEvents.R | 152a66cbcf539b4ce0abdfe0813b7ac37df93e8e | [
"MIT"
] | permissive | Richl-lab/recognize-unusual-logins | 87308b2052aa6207525234795fcb1c03e79fa5b5 | d82c685b94acfb9beb4c5e87407373387a27d8a5 | refs/heads/main | 2023-07-28T06:03:35.283611 | 2021-09-14T15:10:12 | 2021-09-14T15:10:12 | 375,682,044 | 2 | 0 | MIT | 2021-09-14T15:13:51 | 2021-06-10T12:01:15 | R | UTF-8 | R | false | false | 87,987 | r | FindMaliciousEvents.R | #!/usr/bin/Rscript --vanilla
#https://www.r-bloggers.com/2019/11/r-scripts-as-command-line-tools/
# Module: Bachelor thesis
# Theme: Detect malicious/unusual Login Events
# Author: Richard Mey <richard.mey@syss.de>
# Status: 28.07.2021
###########
# Comment #
###########
# Duration depends on:
# Number of events in the selected period
# Size of the selected period
# Depends on the perspective how many Users/Hosts/Sources are contained
# Used machine learning method
#####################
# Command arguments #
#####################
# Loading arguments from command line
args <- commandArgs()
# Set the path to the R site packages
.libPaths("~/.R")
#################
# Main Function #
#################
main <- function(args) {
validate_not_empty_arguments(args)
splitted_args <- split_arguments(args)
args <- splitted_args$args
envr_args <- splitted_args$envr_args
initalize_global_variables()
validate_arguments(args)
validate_envr_arguments(envr_args)
load_libraries()
parsed_arguments <- parse_arguments(args, envr_args)
config_data <- load_machine_learning_config(parsed_arguments)
features <- extract_features_from_file(parsed_arguments)
validate_config(config_data, parsed_arguments, features)
anomaly_detection(features, parsed_arguments, config_data)
if (parsed_arguments$with_plots) {
visualization_results(features, parsed_arguments$path, parsed_arguments$group,
parsed_arguments$rank, parsed_arguments$rank_method)
}
cat("Done.", fill = 1)
}
validate_not_empty_arguments <- function(args) {
if (length(args[(grep("^--args$", args))]) == 0) {
stop_and_help("Dataset to prcoess is missing.", call. = F)
}
}
split_arguments <- function(args) {
envr_args <- args[1:grep("^--args$", args)]
args <- args[(grep("^--args$", args) + 1):length(args)]
return(list(args = args, envr_args = envr_args))
}
validate_arguments <- function(args) {
read_and_write_permission <- c(4, 2)
if (args[1] == "--help") {
help_output()
quit()
}else if (file.exists(args[1]) == F) {
stop_and_help(paste0("The file ", args[1], " needs to exist."), call. = F)
}else if (dir.exists(args[2]) == F) {
stop_and_help(paste0("The directory ", args[2], " needs to exists."), call. = F)
}else if (file.access(as.character(args[2]), read_and_write_permission) == -1) {
stop_and_help(paste0("The directory (", args[2], ") sufficient rights (w,r) are not given.."), call. = F)
}
}
validate_envr_arguments <- function(envr_args) {
if ("--restore" %in% envr_args) {
stop_and_help("The option --restore should not be used, it will slow down the process.")
}
}
initalize_global_variables <- function() {
assign("time_bin_hour", "h", envir = .GlobalEnv)
assign("time_bin_day", "d", envir = .GlobalEnv)
assign("time_bin_day_and_hour", "dh", envir = .GlobalEnv)
assign("time_sorted_filename", "time_sort.csv", envir = .GlobalEnv)
assign("view_user", 4, envir = .GlobalEnv)
assign("view_host", 2, envir = .GlobalEnv)
assign("view_source_ip", 5, envir = .GlobalEnv)
assign("read_permission", 4, envir = .GlobalEnv)
assign("mean_rank", "m", envir = .GlobalEnv)
assign("variance_fifo_rank", "v", envir = .GlobalEnv)
assign("raw_data_types", c("integer", "character", "character", "numeric", "character", "character", "integer", "integer"),
envir = .GlobalEnv)
assign("raw_data_col_names", c("Event_ID", "Host", "Time", "Logon_ID", "User", "Source", "Source_Port", "Logon_Type"),
envir = .GlobalEnv)
assign("feature_name_id", "Identifier", envir = .GlobalEnv)
assign("feature_name_day", "day", envir = .GlobalEnv)
assign("feature_name_weekday", "weekday", envir = .GlobalEnv)
assign("feature_name_hour", "hour", envir = .GlobalEnv)
assign("read_file_ext", "csv", envir = .GlobalEnv)
}
#################
# Help Function #
#################
help_output <- function() {
cat("Usage: FindMaliciousEvents [file] [dir] [--options]",
"Currently supported file formats are: csv. The File needs the following construction (Event ID, Host, Time, Logon ID, User, Source, Source Port, Logon Type).",
"Options:",
"",
"--help Help output",
"-os Gives overall statictics to given data",
"-v Specification of the perspective with the following argument, default is User",
" u User From a users point of view",
" h Host From a hosts point of view",
" s Source From a source point of view",
"-t Specification of the time slot with the following argument, default is day",
" d Day",
" d Use days instead of weakdays",
" h Hour",
" dh Day&Hour",
" default is one hour for h&dh, write a number of hours as next argument it to change it",
"-d Choose a start- and enddate, default is a quantile",
" m Manual establishing",
" startdate Y-M-D",
" enddate Y-M-D, is not included",
" v Complet span",
"-e If you are already got an extracted feature set, you can use it instead of the data file",
"-m Choose one of the given machine learning algorithm for evaluation, default is an kNN",
" kNN k-nearest-neigbhour",
" IF Isolation forest",
" DAGMM Deep Autoencoding Gausian Mixture Model",
" RF Randomforest - special to rank is the only option",
" The config will be loaded automatic, you can configure it to use different machine learning Hyperparameters",
"-p Use this to limit your cores to use. The next argument should be the logical count of cores to use, default is cores-1",
"-r The output will be a complet ranked list, default principle is first comes first. Not used for RF.",
" m If you want to get it mean ranked ",
" v If you want to get it by variance rank of the feature: Hosts per User. Only usable with the user view.",
"-gc This argument can be used with Random Forest, it will use the number of group changes to rank,",
" instead of the visited groups.",
"-s Save the trained model",
"-lm The next argument should be the path to the directory with the trained model information",
"-n Plots will not be generated",
"-i Use the option to ignore the users from x to y, these could reflect the well-know users. The default is 0 to 10.000.",
" Start of ignore",
" End of ignore",
"-c Insert a number behind -c to use another number of clusters, default is 13. The number should be higher than 3 and lower than 10000.",
"-sc Using spectral Clustering instead of k-Means",
"-ro If the data is readed in parts, this argument with number behind can establish the number of ",
" rows that should be readed per round. The default is 10 Million.",
fill = 43)
}
stop_and_help <- function(message, call. = F, domain = NULL) {
stop(message,
"\n",
help_output(),
call. = call.,
domain = domain
)
}
#########
# Setup #
#########
load_libraries <- function() {
suppressMessages(library(tools))
suppressMessages(library(dplyr))
suppressMessages(library(ggplot2))
suppressMessages(library(lubridate, quietly = T, mask.ok = F))
suppressMessages(library(hms))
suppressMessages(library(doParallel))
suppressMessages(library(R.utils))
suppressMessages(library(reticulate))
suppressMessages(library(fmsb))
suppressMessages(library(BBmisc))
suppressMessages(library(ranger))
suppressMessages(library(caret))
suppressMessages(library(e1071))
suppressMessages(library(clue))
suppressMessages(library(yaml))
suppressMessages(library(kernlab))
# Options
options(lubridate.week.start = 1) #weekday starts monday
options("scipen" = 999) # Not realy needed, shows numbers full printed
Sys.setenv(TZ = 'UTC') # Set System timezone
pdf(NULL) # GGplot will generate pdf else
}
parse_arguments <- function(args, envr_args) {
parsed_arguments <- list()
parsed_arguments$path <- create_output_folder(args)
parsed_arguments$data_path <- data_path_argument(args)
parsed_arguments$statistics <- statistics_argument(args, statistics = F)
parsed_arguments$view <- view_argument(args, view = view_user)
machine_learning_and_group <- machine_learning_argument(args, machine_learning = "kNN", group = T)
parsed_arguments$machine_learning <- machine_learning_and_group$machine_learning
parsed_arguments$group <- machine_learning_and_group$group
time_arguments <- time_bin_argument(args, time_bin = time_bin_day, time_bin_size = 0, days_instead = F)
parsed_arguments$time_bin <- time_arguments$time_bin
parsed_arguments$time_bin_size <- time_arguments$time_bin_size
parsed_arguments$days_instead <- time_arguments$days_instead
time_windows <- time_window_argument(args, completely = F, startdate = NULL, enddate = NULL)
parsed_arguments$startdate <- time_windows$startdate
parsed_arguments$enddate <- time_windows$enddate
parsed_arguments$completely <- time_windows$completely
rank_argsuments <- rank_argument(args, rank = F, rank_method = NULL, parsed_arguments$view)
parsed_arguments$rank <- rank_argsuments$rank
parsed_arguments$rank_method <- rank_argsuments$rank_method
parsed_arguments$group_changes <- group_changes_argument(args, group_changes = F)
parsed_arguments$cores <- cores_argument(args, cores = (detectCores() - 1))
loaded_model <- load_model_argument(args, machine_learning = parsed_arguments$machine_learning, model_path = "", load_model = F)
parsed_arguments$load_model <- loaded_model$load_model
parsed_arguments$model_path <- loaded_model$model_path
parsed_arguments$save_model <- save_model_argument(args, path = parsed_arguments$path, save_model = F)
parsed_arguments$with_plots <- with_plots_argument(args, with_plots = T)
parsed_arguments$extracted_features <- extracted_features_argument(args, extracted_features = F)
ignore_interval_users <- ignore_interval_users_argument(args, first_user_to_ignore = 0, last_user_to_ignore = 10000)
parsed_arguments$first_user_to_ignore <- ignore_interval_users$first_user_to_ignore
parsed_arguments$last_user_to_ignore <- ignore_interval_users$last_user_to_ignore
parsed_arguments$number_clusters <- number_clusters_argument(args, number_clusters = 13)
parsed_arguments$spectral_clustering <- spectral_clustering_argument(args, spectral_clustering = F)
parsed_arguments$parted_readed_rows <- rows_argument(args, parted_readed_rows = 10000000)
parsed_arguments$absolute_path <- detect_absolute_path_script(envr_args)
return(parsed_arguments)
}
# Creates new Folder with _X
create_output_folder <- function(args) {
path <- paste0(as.character(args[2]), "/FindMaliciousEvents_1/")
if (dir.exists(path) == F) {
dir.create(path)
}else {
directory <- list.dirs(as.character(args[2]), recursive = F, full.names = F)
findmaliciousevents_directorys <- grep("FindMaliciousEvents_[0-9]+", directory)
path <- paste0(as.character(args[2]), "/FindMaliciousEvents_",
(max(as.numeric(sub("[^0-9]+", "", directory[findmaliciousevents_directorys]))) + 1), "/")
dir.create(path)
}
return(path)
}
data_path_argument <- function(args) {
return(getAbsolutePath.default(as.character(args[1])))
}
statistics_argument <- function(args, statistics) {
if (length(grep("^-os$", as.character(args))) != 0) {
statistics <- T
}
return(statistics)
}
view_argument <- function(args, view) {
if (length(grep("^-v$", as.character(args))) != 0) {
if (is.na(args[grep("^-v$", as.character(args)) + 1])) {
stop_and_help("You did not specify any of the views (u,h,s).", call. = F)
}else {
if (as.character(args[grep("^-v$", as.character(args)) + 1]) == "u" ||
as.character(args[grep("^-v$", as.character(args)) + 1]) == time_bin_hour ||
as.character(args[grep("^-v$", as.character(args)) + 1]) == "s") {
if (as.character(args[grep("^-v$", as.character(args)) + 1]) == "u") {
view <- view_user
}else if (as.character(args[grep("^-v$", as.character(args)) + 1]) == time_bin_hour) {
view <- view_host
}else {
view <- view_source_ip
}
}else {
stop_and_help("You did not specify any of the validate views (u,h,s).", call. = F)
}
}
}
return(view)
}
machine_learning_argument <- function(args, machine_learning, group) {
if (length(grep("^-m$", as.character(args))) != 0) {
if (is.na(args[grep("^-m$", as.character(args)) + 1])) {
stop_and_help("You did not specify any of the machine learning options (IF,kNN,DAGMM,RF).", call. = F)
}else {
if (as.character(args[grep("^-m$", as.character(args)) + 1]) == "IF" ||
as.character(args[grep("^-m$", as.character(args)) + 1]) == "kNN" ||
as.character(args[grep("^-m$", as.character(args)) + 1]) == "DAGMM" ||
as.character(args[grep("^-m$", as.character(args)) + 1]) == "RF") {
if (as.character(args[grep("^-m$", as.character(args)) + 1]) == "IF") {
machine_learning <- "IF"
}else if (as.character(args[grep("^-m$", as.character(args)) + 1]) == "kNN") {
machine_learning <- "kNN"
}else if (as.character(args[grep("^-m$", as.character(args)) + 1]) == "DAGMM") {
machine_learning <- "DAGMM"
}else {
machine_learning <- "RF"
group <- F
}
}else {
stop_and_help("You did not specify any of valid machine learning options (IF,kNN,DAGMM,RF).", call. = F)
}
}
}
return(list(machine_learning = machine_learning, group = group))
}
time_bin_argument <- function(args, time_bin, time_bin_size, days_instead) {
if (length(grep("^-t$", as.character(args))) != 0) {
if (is.na(args[grep("^-t$", as.character(args)) + 1])) {
stop_and_help("You did not specify any of time slot options (d,h,dh).", call. = F)
}else {
if (as.character(args[grep("^-t$", as.character(args)) + 1]) == time_bin_hour ||
as.character(args[grep("^-t$", as.character(args)) + 1]) == time_bin_day ||
as.character(args[grep("^-t$", as.character(args)) + 1]) == time_bin_day_and_hour) {
if (as.character(args[grep("^-t$", as.character(args)) + 1]) == time_bin_day) {
time_bin <- time_bin_day
if (is.na(args[grep("^-t$", as.character(args)) + 2]) == F &&
length(grep("-", args[grep("^-t$", as.character(args)) + 2])) == F) {
if (as.character(args[grep("^-t$", as.character(args)) + 2]) == time_bin_day) {
days_instead <- T
}else {
stop_and_help("The only option you can use here is d.", call. = F)
}
}
}else {
if (as.character(args[grep("^-t$", as.character(args)) + 1]) == time_bin_hour) {
time_bin <- time_bin_hour
}else {
time_bin <- time_bin_day_and_hour
}
if (is.na(args[grep("^-t$", as.character(args)) + 2]) == F &&
length(grep("-", args[grep("^-t$", as.character(args)) + 2])) == F) {
if (length(grep("^[0-9]*$", as.character(args[grep("^-t$", as.character(args)) + 2]))) != 0) {
time_bin_size <- as.numeric(args[grep("^-t$", as.character(args)) + 2]) - 1
if (time_bin_size < 0 || time_bin_size > 71) {
stop_and_help("The number of hours need to be, bigger then 0 and smaller then 73.", call. = F)
}
}else {
stop_and_help("Missing a number behind the hour/day-hour time bin format.", call. = F)
}
}else {
}
}
}else {
stop_and_help("You did not specify any of the valid time slot options (d,h,dh).", call. = F)
}
}
}
return(list(time_bin = time_bin, time_bin_size = time_bin_size, days_instead = days_instead))
}
rank_argument <- function(args, rank, rank_method, view) {
if (length(grep("^-r$", as.character(args))) != 0) {
if (is.na(args[grep("^-r$", as.character(args)) + 1]) != T &&
length(grep("-", args[grep("^-r$", as.character(args)) + 1])) == F) {
if (as.character(args[grep("^-r$", as.character(args)) + 1]) == "m") {
rank_method <- mean_rank
}else if (as.character(args[grep("^-r$", as.character(args)) + 1]) == "v") {
rank_method <- variance_fifo_rank
if (view != view_user) {
stop_and_help("This ranking method is just working with user view.")
}
}else {
stop_and_help("You did not specify any of the valid rank ptions (m).", call. = F)
}
}
rank <- T
}
return(list(rank = rank, rank_method = rank_method))
}
group_changes_argument <- function(args, group_changes) {
if (length(grep("^-gc$", as.character(args))) != 0) {
group_changes <- T
}
return(group_changes)
}
time_window_argument <- function(args, completely, startdate, enddate) {
if (length(grep("^-d$", as.character(args))) != 0) {
if (is.na(args[grep("^-d$", as.character(args)) + 1])) {
stop_and_help("You did not specify an option for start- and enddate (m,v).", call. = F)
}else {
if (as.character(args[grep("^-d$", as.character(args)) + 1]) == "m" ||
as.character(args[grep("^-d$", as.character(args)) + 1]) == "v") {
if (as.character(args[grep("^-d$", as.character(args)) + 1]) == "m") {
if (is.na(args[grep("^-d$", as.character(args)) + 2]) && is.na(args[grep("^-d$", as.character(args)) + 3])) {
stop_and_help("Missing start- and enddate.", call. = F)
}else {
tryCatch(expr = {
startdate <- as_date(args[grep("^-d$", as.character(args)) + 2])
enddate <- as_date(args[grep("^-d$", as.character(args)) + 3])
}, warning = function(w) {
stop_and_help("Missing a valid start- and enddate.", call. = F)
})
if (startdate > enddate) {
stop_and_help("Your startdate is older then the enddate, change the information.", call. = F)
}
}
}else {
completely <- T
}
}else {
stop_and_help("You did not specify a valid option for the start- and enddate (m,v).", call. = F)
}
}
}
return(list(startdate = startdate, enddate = enddate, completely = completely))
}
cores_argument <- function(args, cores) {
if (length(grep("^-p$", as.character(args))) != 0) {
if (is.na(args[grep("^-p$", as.character(args)) + 1])) {
stop_and_help("Missing a number of logical processors to use.", call. = F)
}else {
tryCatch(expr = {
cores <- as.numeric(args[grep("^-p$", as.character(args)) + 1])
if (cores > detectCores()) {
stop_and_help("You can´t use a bigger number of logicals processors then available.", call. = F)
}else if (cores < 1) {
stop_and_help("You can´t use a smaller number of logicals processors then one.", call. = F)
}
}, warning = function(w) {
stop_and_help("You did not specify a number of cores, based on your processor.", call. = F)
})
}
}
return(cores)
}
load_model_argument <- function(args, machine_learning, model_path, load_model) {
if (length(grep("^-lm$", as.character(args))) != 0) {
if (is.na(args[grep("^-lm$", as.character(args)) + 1])) {
stop_and_help("Missing a path to the directory with the model information.", call. = F)
}else {
model_path <- as.character(args[grep("^-lm$", as.character(args)) + 1])
if (dir.exists(model_path) == F) {
stop_and_help("You did not specify an existing model directory.", call. = F)
}
if (file.exists(paste0(model_path, "cluster.rds")) == F ||
(file.exists(paste0(model_path, "min_max.rds")) == F && machine_learning != "RF") ||
(file.exists(paste0(model_path, "model.joblib")) == F &&
file.exists(paste0(model_path, "model.rds")) == F &&
file.exists(paste0(model_path, "model.index")) == F)) {
stop_and_help("Missing a directory that contains the following content: (min_max.rds), cluster.rds, model.(rds/joblib/index). ")
}
if ((file.exists(paste0(model_path, "model.rds")) == F && machine_learning == "RF") ||
(file.exists(paste0(model_path, "model.rds")) == T && (machine_learning == "IF" || machine_learning == "kNN")) ||
(file.exists(paste0(model_path, "model.index")) == F && machine_learning == "DAGMM")) {
stop_and_help("The loaded model is not compatible with the machine learning option.", call. = F)
}
load_model <- T
}
}
return(list(load_model = load_model, model_path = model_path))
}
save_model_argument <- function(args, path, save_model) {
if (length(grep("^-s$", as.character(args))) != 0) {
save_model <- T
dir.create(paste0(path, "model/"))
}
return(save_model)
}
with_plots_argument <- function(args, with_plots) {
if (length(grep("^-n$", as.character(args))) != 0) {
with_plots <- F
}
return(with_plots)
}
extracted_features_argument <- function(args, extracted_features) {
if (length(grep("^-e$", as.character(args))) != 0) {
extracted_features <- T
}
return(extracted_features)
}
ignore_interval_users_argument <- function(args, first_user_to_ignore, last_user_to_ignore) {
if (length(grep("^-i$", as.character(args))) != 0) {
if (is.na(args[grep("^-i$", as.character(args)) + 1]) || is.na(args[grep("^-i$", as.character(args)) + 2])) {
stop_and_help("You did not specify an interval for first- and last users to ignore.", call. = F)
}else {
if (length(grep("[-0-9]*", args[grep("^-i$", as.character(args)) + 1])) == 0 ||
length(grep("[-0-9]*", args[grep("^-i$", as.character(args)) + 2])) == 0) {
stop_and_help("You did not specify a numeric interval for first- and last users to ignore.", call. = F)
}else {
tryCatch(
expr = {
first_user_to_ignore <- as.integer(args[grep("^-i$", as.character(args)) + 1])
last_user_to_ignore <- as.integer(args[grep("^-i$", as.character(args)) + 2])
}, warning = function(w) {
stop_and_help(paste("One of the inserted numbers", args[grep("^-i$", as.character(args)) + 1], ",",
args[grep("^-i$", as.character(args)) + 2], "for the first and last user to ignore, is not numeric."))
}
)
}
}
}
return(list(first_user_to_ignore = first_user_to_ignore, last_user_to_ignore = last_user_to_ignore))
}
number_clusters_argument <- function(args, number_clusters) {
if (length(grep("^-c", as.character(args))) != 0) {
if (is.na(args[grep("^-c$", as.character(args)) + 1])) {
stop_and_help("Missing a number of clusters behind the argument.", call. = F)
}else {
if (length(grep("[0-9]*", args[grep("^-c$", as.character(args)) + 1])) == 0) {
stop_and_help("You did not specify a numeric number of clusters.", call. = F)
}else {
tryCatch(
expr = {
number_clusters <- as.integer(args[grep("^-c$", as.character(args)) + 1])
if (number_clusters > 0) {
stop_and_help("The inserted number of clusters is lower than 4 or higher than 1000.")
}else {
return(number_clusters)
}
}, warning = function(w) {
stop_and_help("The inserted Cluster number is not correct.")
}
)
}
}
}
return(number_clusters)
}
spectral_clustering_argument <- function(args, spectral_clustering) {
if (length(grep("^-sc$", as.character(args))) != 0) {
spectral_clustering <- T
}
return(spectral_clustering)
}
rows_argument <- function(args, parted_readed_rows) {
if (length(grep("^-ro$", as.character(args))) != 0) {
if (is.na(args[grep("^-ro$", as.character(args)) + 1])) {
stop_and_help("Missing a number of rows behind the -ro argument.", call. = F)
}else {
if (length(grep("[0-9]*", args[grep("^-ro$", as.character(args)) + 1])) == 0) {
stop_and_help("You did not specify a numeric number of rows.", call. = F)
}else {
parted_readed_rows <- args[grep("^-ro$", as.character(args)) + 1]
if (parted_readed_rows < 100000) {
stop_and_help("The number of rows should be not lower than 100.000.")
}
}
}
}
return(parted_readed_rows)
}
extract_features_from_file <- function(parsed_arguments) {
if (parsed_arguments$extracted_features) {
features <- read_in_features_from_file(parsed_arguments$data_path)
}else {
data <- read_in_data(parsed_arguments$data_path, parsed_arguments$path)
if (is.null(nrow(data)) == F) {
features <- extract_features(data, parsed_arguments)
}else {
if (is.null(parsed_arguments$startdate) == T && parsed_arguments$completely == F) {
stop_and_help("Missing a start- and enddate, if the file is to large to be splitted.", call. = F)
}
features <- feature_extraction_parted_from_file(parsed_arguments)
}
}
return(features)
}
# If the option -e has been choosen, Features which has been created with this program can be loaded
read_in_features_from_file <- function(data_path) {
if (file_ext(data_path) == read_file_ext) {
if (file.access(data_path, read_permission) == -1) {
stop_and_help("Missing a file for which you got the rights to read.", call. = F)
}
tryCatch(expr = {
features <- read_in(data_path, row.names = 1)
possible_features <- "weekday|number_events|proportion_[0-9_]+|hour|day|events_per_second|Identifier|Users_per_Host|Users_per_Source|Hosts_per_User|Hosts_per_Source|Sources_per_User|Sources_per_Host"
if (length(grep(possible_features, colnames(features), invert = T)) != 0) {
stop_and_help("The inserted Feature set, does not match the feature the programs generate.", call. = F)
}
return(features)
}, error = function(e) {
stop_and_help("The file is not empty or valid.", call. = F)
}, warning = function(w) {
})
}else {
stop_and_help("The specified file needs to match with one of the acceptable file formats (csv).", call. = F)
}
}
read_in <- function(path, row.names = NULL, header = T, nrows = -1, colClasses = NA, skip = 0, col.names = NULL) {
if (is.null(col.names) == T) {
data <- read.csv(path, row.names = row.names, header = header, nrows = nrows, colClasses = colClasses, skip = skip)
}else {
data <- read.csv(path, row.names = row.names, header = header, nrows = nrows, colClasses = colClasses, skip = skip, col.names = col.names)
}
return(data)
}
write_out <- function(data, path, row.names = T, col.names = T) {
if (file_ext(path) == "csv") {
write.csv(data, path, row.names = row.names)
}else {
write.table(data, path, row.names = row.names, col.names = col.names)
}
}
read_in_data <- function(data_path, path) {
# R loads all data in the memory, so if the raw data it cant read all without crashing, thats why it can be splited read in
memory <- get_free_memory()
file_size <- as.numeric(file.info(data_path)$size) / 1000000
if (file_ext(data_path) == read_file_ext) {
if (file.access(data_path, read_permission) == -1) {
stop_and_help("Missing a file for which you got the rights to read.", call. = F)
}
# If the raw data file, occupied more than 40% of the memory -> parted read in
if (file_size >= memory * 0.4) {
cat("The specified file is too large, hence the read-in/ preprocessing/ feature extraction will be splited. This process might take more time.",
fill = 2)
split <- T
# To let the features be complety, sort it by time
system(paste0("sort -k3 -t, ", data_path, " >> ", path, time_sorted_filename))
return(split)
}else {
tryCatch(expr = {
data <- read_in(data_path, colClasses = raw_data_types, header = F)
}, error = function(e) {
stop_and_help("Missing a valid, non-empty file and in accordance with the format: Int,Num,Date,Num,Num,Num,Int,Int.", call. = F)
}, warning = function(w) {
stop_and_help("The file needs the following columns: Event_ID,Host,Time,Logon_ID,User,Source,Source Port,Logon Typ.", call. = F)
})
# If the data is smaller than 1000 rows, the program will stop_and_help working, because its too less
if (nrow(data) < 1000) {
stop_and_help("The file contains fewer then 1000 rows. You should use one with more.", call. = F)
}
# Rename columns and delet all Events that dont fit to 4624
colnames(data) <- raw_data_col_names #ActivityID oder LogonGUID
data <- data[(data$Event_ID == 4624),]
data$Time<-convert_to_datetime(data$Time)
return(data)
}
}else {
stop_and_help("The specified file needs to match with one of the acceptable file formats (csv).", call. = F)
}
}
convert_to_datetime<-function (datetime_as_char){
datetime<-as_datetime(datetime_as_char)
return(datetime)
}
get_free_memory <- function() {
memory <- system('free -m', intern = T)
memory <- strsplit(memory, " ")
memory <- as.numeric(tail(memory[[2]], n = 1))
return(memory)
}
# If the file size is to large, read it in parts
parted_read_in_data <- function(path, row_multi, back, parted_readed_rows) {
tryCatch(expr = {
# read in x rows and skip all before
data_new <- read_in(paste0(path, time_sorted_filename), nrows = parted_readed_rows, skip = (row_multi * parted_readed_rows) - back,
colClasses = raw_data_types,
header = F)
colnames(data_new) <- raw_data_col_names
data_new$Time<-convert_to_datetime(data_new$Time)
data_new <- data_new[(data_new$Event_ID == 4624),]
return(data_new)
}, error = function(e) {
if (row_multi == 0) {
stop_and_help("Missing a valid, non-empty file and in accordance with the format: Int,Num,Date,Num,Num,Num,Int,Int.")
}else {
finished <- T
return(finished)
}
}, warning = function(w) {
stop_and_help("The file needs the following columns: Event_ID,Host,Time,Logon_ID,User,Source,Source Port,Logon Typ.")
})
}
# Feature extraction without splitting data before
extract_features <- function(data, parsed_arguments) {
start_and_enddate <- set_start_and_enddate(data, parsed_arguments)
parsed_arguments$startdate <- start_and_enddate$startdate
parsed_arguments$enddate <- start_and_enddate$enddate
# If statistics is true do pre and post statistics
if (parsed_arguments$statistics) {
data_statistics(data, parsed_arguments, "pre")
}
data <- preprocessing(data, parsed_arguments)
if (parsed_arguments$statistics) {
data_statistics(data, parsed_arguments, "post")
ask_for_stop()
}
if (nrow(data[(data$Time >= (as.Date(parsed_arguments$startdate)) &
(data$Time < (as.Date(parsed_arguments$enddate)))),]) == 0) {
stop_and_help("Missing a start- and enddate, that fits to the data.", call. = F)
}
features <- feature_extraction(data, parsed_arguments)
write_out(features, paste0(parsed_arguments$path, "Features.csv"))
return(features)
}
# Control the start- and enddate, if no dates are given calculate some
set_start_and_enddate <- function(data, parsed_arguments) {
if (is.null(parsed_arguments$startdate)) {
if (parsed_arguments$completely) {
startdate <- as_date(min(data$Time))
enddate <- as_date(max(data$Time)) + days(1)
}else {
calculated_start_and_enddate <- calculate_start_and_enddate(generate_timeline_month(data))
startdate <- calculated_start_and_enddate[[1]]
enddate <- calculated_start_and_enddate[[2]]
}
}else {
startdate <- parsed_arguments$startdate
enddate <- parsed_arguments$enddate
}
return(list(startdate = startdate, enddate = enddate))
}
ask_for_stop <- function() {
cat("Data statistics are done, you would like to break now? Then type in: Yes/Y: ")
stop_answer <- as.character(readLines("stdin", n = 1))
if (length(grep("^(Yes|YES|Y)$", stop_answer)) == 1) {
quit()
}
}
feature_extraction_parted_from_file <- function(parsed_arguments) {
finished <- F
row_multi <- 0
back <- 0
features <- data.frame()
# Read-in data until its finished
while (finished == F) {
data <- parted_read_in_data(parsed_arguments$path, row_multi, back, parsed_arguments$parted_readed_rows)
if (is.null(nrow(data)) == F) {
optimized_date <- optimize_date(data, parsed_arguments)
finished <- optimized_date$finished
# If data contains date interval, that doesnt fit to start and enddate, ignore it
if (optimized_date$ignore_period == F) {
parted_feature_result <- parted_feature_extraction(data, finished, optimized_date$optimized_arguments,
back, row_multi)
finished <- parted_feature_result$finished
back <- parted_feature_result$back
features <- rbind(features, parted_feature_result$features)
}
rm(data)
}else {
finished <- T
}
row_multi <- row_multi + 1
}
validate_not_empty_features(features)
if (parsed_arguments$group == T) {
grouped_features <- group_features(features, parsed_arguments$time_bin, parsed_arguments$cores,
load_model = parsed_arguments$load_model,
model_path = parsed_arguments$model_path,
save_model = parsed_arguments$save_model, path = parsed_arguments$path,
number_clusters = parsed_arguments$number_clusters,
spectral_clustering = parsed_arguments$spectral_clustering)
}
write_out(grouped_features, paste0(parsed_arguments$path, "Features.csv"))
return(grouped_features)
}
validate_not_empty_features <- function(features) {
if (is.null(features[1, 1])) {
stop_and_help("Missing a start- and enddate, that fits to the data.", call. = F)
}
}
# Optimize date on parted Feature extraction
optimize_date <- function(data, parsed_arguments) {
finished <- F
ignore_period <- F
optimized_arguments <- parsed_arguments
if (parsed_arguments$completely != T) {
enddate_optimized <- parsed_arguments$enddate
startdate_optimized <- parsed_arguments$startdate
if (startdate_optimized > as_date(max(data$Time))) {
ignore_period <- T
}else if (enddate_optimized < as_date(min(data$Time))) {
finished <- T
ignore_period <- T
}else if (enddate_optimized > as_date(max(data$Time))) {
enddate_optimized <- as_date(max(data$Time))
}
if (as_date(min(data$Time)) > startdate_optimized) {
startdate_optimized <- as_date(min(data$Time))
}
}else {
enddate_optimized <- as_date(max(data$Time))+days(1)
startdate_optimized <- as_date(min(data$Time))
}
optimized_arguments$startdate <- startdate_optimized
optimized_arguments$enddate <- enddate_optimized
return(list(optimized_arguments = optimized_arguments, ignore_period = ignore_period, finished = finished))
}
# Function to check how many steps to go back and to do feature extraction
parted_feature_extraction <- function(data, optimized_arguments, back, row_multi) {
edgeless_data_finished_flag <- delete_edges(data, optimized_arguments, back, row_multi)
edgeless_data <- edgeless_data_finished_flag$edgeless_data
new_back <- (optimized_arguments$parted_readed_rows - (nrow(edgeless_data))) + back
preprocessed_data <- preprocessing(edgeless_data, optimized_arguments)
features <- feature_extraction(preprocessed_data, optimized_arguments, split = T)
return(list(features = features, finished = edgeless_data_finished_flag$finished, back = new_back))
}
# If the next row in the data, contains a line that should be included in the last time bin,
# delete the last time bin data
delete_edges <- function(data, optimized_arguments, back, row_multi) {
time_bin <- optimized_arguments$time_bin
time_bin_size <- optimized_arguments$time_bin_size
tryCatch(expr = {
next_row_of_data <- read_in(paste0(optimized_arguments$path, time_sorted_filename), nrows = 1,
skip = ((row_multi + 1) * optimized_arguments$parted_readed_rows) - back + 1,
colClasses = raw_data_types,
header = F, col.names = raw_data_col_names)
next_row_of_data$Time<-convert_to_datetime(next_row_of_data$Time)
if (date(data[nrow(data), 3]) == date(next_row_of_data[1, 3]) && time_bin == time_bin_day) {
edgeless_data <- data[!(date(data$Time) == date(check[1, 3])),]
}else if ((as.integer(difftime(next_row_of_data, data[1, 3], units = "hours")) -
as.integer(difftime(data[nrow(data), 3], data[1, 3], units = "hours")) <= time_bin_size) &&
(time_bin == time_bin_day_and_hour || time_bin == time_bin_hour) &&
optimized_arguments$time_bin_size > 0) {
edgeless_data <- data[!(data$Time >= (data[1, 3] + hours(as.integer(difftime(data[nrow(data), 3], data[1, 3], units = "hours")) -
as.integer(difftime(data[nrow(data), 3], data[1, 3], units = "hours"))
%% (time_bin_size + 1)))),]
}else if (date(data[nrow(data), 3]) == date(next_row_of_data[1, 3]) &&
hour(data[nrow(data), 3]) == hour(next_row_of_data[1, 3]) &&
(time_bin == time_bin_day_and_hour || time_bin == time_bin_hour)) {
edgeless_data <- data[!(date(data$Time) == date(check[1, 3]) & hour(data[nrow(data), 3]) == hour(check[1, 3])),]
}
}, error = function(e) {
return(list(edgeless_data = data, finished = T))
})
return(list(edgeless_data = edgeless_data, finished = F))
}
# To ignore unnecessary data all user ids from first_user_to_ignore to last_user_to_ignore will be deleted
# Duplicates will also be deleted
preprocessing <- function(data, parsed_arguments) {
deleted_users_data <- data[!(data$User %in%
parsed_arguments$first_user_to_ignore:parsed_arguments$last_user_to_ignore),]
without_duplicates_data <- deleted_users_data %>%
distinct(Event_ID, User, Host, Time, Source, Source_Port, Logon_Type)
return(without_duplicates_data)
}
################################
# Function to extract features #
################################
feature_extraction <- function(data, parsed_arguments, split = F) {
view <- parsed_arguments$view
startdate <- parsed_arguments$startdate
enddate <- parsed_arguments$enddate
cores <- parsed_arguments$cores
time_bin_size <- parsed_arguments$time_bin_size
functionset <- build_functionset_extraction(parsed_arguments)
feature_extractors <- functionset$feature_extractors
event_type <- functionset$event_type
time_window <- functionset$time_window
# Cluster out of x cores, to speed up
cluster_of_cores <- makeCluster(cores)
registerDoParallel(cluster_of_cores)
features <- data.frame()
# If source view has been choosen delete all NA values
if (view == view_source_ip) {
data <- data[(is.na(data$Source) != T),]
}
cat("Please magnify the window big enough to present the progress bar completly.", fill = 2)
progress_bar <- create_progress_bar(data, startdate, enddate)
processed <- 0
i <- 0
# Iterates through the time interval
repeat {
# Extract all data in this time window
window <- data[(data$Time >= (as_datetime(startdate) %m+% time_window(i)) &
(data$Time < (as_datetime(startdate) %m+% time_window(i + 1 + time_bin_size)))),]
if (nrow(window) > 0) {
# Extract per view user/sources/hosts without duplicates
iterator <- distinct(window, window[[view]])
# parallelised
results <- foreach(j = seq_along(iterator[, 1]), .packages = c("lubridate", "dplyr", "hms", "R.utils"),
.combine = rbind) %dopar% {
# Extract data for this view
data_identifier <- window[(window[, view] == iterator[j, 1]),]
result <- data.frame()
# Use the functions for extraction
for (k in seq_along(feature_extractors)) {
result[1, k] <- doCall(feature_extractors[[k]], args = list(data_identifier = data_identifier,
view = view, startdate = startdate, i = i,
event_type = event_type[[k]],
time_window = time_window), .ignoreUnusedArgs = T)
}
return(result)
}
features <- rbind(features, results)
processed <- processed + nrow(window)
setTxtProgressBar(progress_bar, processed, title = "Feature extraction:")
}
if ((as_datetime(startdate) %m+% time_window(i + 1 + time_bin_size)) >= as_datetime(enddate)) {
break
}
i <- i + 1 + time_bin_size
}
stopCluster(cluster_of_cores)
close(progress_bar)
colnames(features) <- functionset$feature_namens
# If its splitted it needs to be done later on the complet feature set
if (split != T && parsed_arguments$group == T) {
features <- group_features(features, time_bin = parsed_arguments$time_bin, cores,
load_model = parsed_arguments$load_model,
model_path = parsed_arguments$model_path, save_model = parsed_arguments$save_model,
path = parsed_arguments$path, number_clusters = parsed_arguments$number_clusters,
spectral_clustering = parsed_arguments$spectral_clustering)
}
return(features)
}
create_progress_bar <- function(data, startdate, enddate) {
processing_data <- nrow(data[(data$Time >= (as.Date(startdate)) & (data$Time < (as.Date(enddate)))),])
progress_bar <- txtProgressBar(min = 0, max = processing_data, width = 100, style = 3, char = "=", file = stderr(),
title = "Feature extraction:")
return(progress_bar)
}
build_functionset_extraction <- function(parsed_arguments) {
# Which Feature will be used, needed becuase of modularity
feature_extractors <- NULL
feature_namens <- NULL
# ID will always be used
feature_extractors <- append(feature_extractors, Identifier_extractor)
feature_namens <- append(feature_namens, feature_name_id)
# Time Features
time_bin_functions <- time_bin_functionset_build(parsed_arguments$time_bin, parsed_arguments$days_instead,
feature_extractors, feature_namens)
feature_extractors <- time_bin_functions$feature_extractors
feature_namens <- time_bin_functions$feature_namens
time_window <- time_bin_functions$time_window
# Count Features
feature_extractors <- append(feature_extractors, number_events_extractor)
feature_namens <- append(feature_namens, "number_events")
types <- list(2, 3, 9, 10, c(11, 12))
start_typ <- length(feature_extractors) + 1
for (z in seq_along(types)) {
feature_extractors <- append(feature_extractors, proportion_event_extractor)
feature_namens <- append(feature_namens, paste("proportion",
paste(as.character(unlist(types[[z]])), collapse = "_"), sep = "_"))
}
end_typ <- start_typ + length(types) - 1
feature_extractors <- append(feature_extractors, events_per_second_extractor)
feature_namens <- append(feature_namens, "events_per_second")
# Features per View
view_functions <- view_functionset_build(parsed_arguments$view, feature_extractors, feature_namens)
feature_extractors <- view_functions$feature_extractors
feature_namens <- view_functions$feature_namens
# Later its needable to have an iteratble list, thats why logon type list contains unimportant information
event_type <- rep(list(0), length(feature_extractors))
for (z in 1:(end_typ - start_typ + 1) - 1) {
event_type[[(start_typ + z)]] <- types[[z + 1]]
}
return(list(feature_extractors = feature_extractors, feature_namens = feature_namens,
event_type = event_type, time_window = time_window))
}
# Time Feature
time_bin_functionset_build <- function(time_bin, days_instead, feature_extractors, feature_namens) {
switch(time_bin,
"d" = {
if (days_instead) {
feature_extractors <- append(feature_extractors, day_feature_2)
feature_namens <- append(feature_namens, feature_name_day)
}else {
feature_extractors <- append(feature_extractors, weekday_extractor)
feature_namens <- append(feature_namens, feature_name_weekday)
}
time_window <- days
},
"h" = {
feature_extractors <- append(feature_extractors, hour_extractor)
feature_namens <- append(feature_namens, feature_name_hour)
time_window <- hours
},
"dh" = {
feature_extractors <- append(feature_extractors, day_extractor)
feature_extractors <- append(feature_extractors, hour_extractor)
feature_namens <- append(feature_namens, c(feature_name_day, feature_name_hour))
time_window <- hours
}
)
return(list(feature_extractors = feature_extractors, feature_namens = feature_namens, time_window = time_window))
}
# View Feature
view_functionset_build <- function(view, feature_extractors, feature_namens) {
switch(as.character(view),
"2" = {
feature_extractors <- append(feature_extractors, Users_per_X_extractor)
feature_extractors <- append(feature_extractors, Sources_per_X_extractor)
feature_namens <- append(feature_namens, c("Users_per_Host", "Sources_per_Host"))
},
"4" = {
feature_extractors <- append(feature_extractors, Hosts_per_X_extractor)
feature_extractors <- append(feature_extractors, Sources_per_X_extractor)
feature_namens <- append(feature_namens, c("Hosts_per_User", "Sources_per_User"))
},
"5" = {
feature_extractors <- append(feature_extractors, Users_per_X_extractor)
feature_extractors <- append(feature_extractors, Hosts_per_X_extractor)
feature_namens <- append(feature_namens, c("Users_per_Source", "Hosts_per_Source"))
}
)
return(list(feature_extractors = feature_extractors, feature_namens = feature_namens))
}
##############################
# LIST OF FEATURE EXTRACTORS #
##############################
Identifier_extractor <- function(data_identifier, view, ...) {
return(data_identifier[1, view])
}
weekday_extractor <- function(startdate, i, ...) {
return(wday(ymd(as.Date(startdate) %m+% days(i)), week_start = getOption("lubridate.week.start", 1)))
}
hour_extractor <- function(i, ...) {
return(as_hms(((i) %% 24) * 60 * 60))
}
day_extractor <- function(startdate, i, time_window, ...) {
return(as_date((as.Date(startdate) %m+% time_window((i)))))
}
number_events_extractor <- function(data_identifier, ...) {
return(nrow(data_identifier))
}
proportion_event_extractor <- function(data_identifier, event_type, ...) {
return(nrow(data_identifier[(data_identifier$Logon_Type %in% event_type),]) / nrow(data_identifier))
}
events_per_second_extractor <- function(data_identifier, ...) {
number_of_rows <- nrow(data_identifier)
if (number_of_rows == 1) {
return(0)
}else if (as.numeric(difftime(max(data_identifier[, 3]), min(data_identifier[, 3]), units = "secs")) == 0) {
return(1)
}else {
return(number_of_rows / as.numeric(difftime(max(data_identifier$Time),
min(data_identifier$Time), units = "secs")))
}
}
Hosts_per_X_extractor <- function(data_identifier, view, ...) {
return((data_identifier %>%
distinct(Host, X = .[[view]]) %>%
group_by(X) %>%
summarise(n()))$`n()`)
}
Sources_per_X_extractor <- function(data_identifier, view, ...) {
return((data_identifier %>%
distinct(Source, X = .[[view]]) %>%
group_by(X) %>%
summarise(n()))$`n()`)
}
Users_per_X_extractor <- function(data_identifier, view, ...) {
return((data_identifier %>%
distinct(User, X = .[[view]]) %>%
group_by(X) %>%
summarise(n()))$`n()`)
}
# ----------------------------------------------------------------------------------------------------------------------
##############
# Group View #
##############
# Function to group data into clusters by their means
group_features <- function(features, time_bin, cores, label = F, load_model, model_path, save_model, path,
number_clusters, spectral_clustering) {
cat("Features will be grouped now.", fill = 1)
iter_means <- calculate_means(features, cores)
cluserted_features <- calculate_cluster(iter_means, features, number_clusters, label,
load_model, model_path, save_model, path, spectral_clustering)
# Save min-max
if (save_model && label == F) {
min_max <- calculate_min_max(cluserted_features, time_bin)
saveRDS(min_max, paste0(path, "model/min_max.rds"))
}
# If its not used as label 0-1 normalize it to speed up the machine_learning process
normalized_clustered_features <- if_needed_normalize_features(cluserted_features, label, load_model, model_path, time_bin)
return(normalized_clustered_features)
}
# Calculate means
calculate_means <- function(features, cores) {
options(warn = -1)
# Ignore Feature like time
tryCatch(expr = {
features_without_factors <- select(features,
!one_of(c(feature_name_id, feature_name_day, feature_name_weekday, feature_name_hour)))
})
# IDs
iterator <- distinct(features, Identifier)
# Cluster
cluster_of_cores <- makeCluster(cores)
registerDoParallel(cluster_of_cores)
# Build means per User/Host/Source
means <- foreach(j = seq_along(iterator[, 1]), .packages = c("lubridate", "dplyr"), .combine = rbind) %dopar% {
data_iter <- features_without_factors[(features$Identifier == iterator[j, 1]),]
result <- data.frame()
for (j in seq_len(ncol(features_without_factors))) {
result[1, j] <- mean(data_iter[, j])
}
return(result)
}
stopCluster(cluster_of_cores)
# Name means
colnames(means) <- colnames(features_without_factors)
return(list(iterator, means))
}
# Cluster
calculate_cluster <- function(iter_means, features, number_clusters, label, load_model, model_path, save_model,
path, spectral_clustering) {
# If a loaded model is used, its also needed to load the old cluster
if (load_model) {
cluster <- readRDS(file = paste0(model_path, "cluster.rds"))
groups <- data.frame(Groups = as.numeric(cl_predict(cluster, iter_means[[2]], type = "class_id")))
}else {
# Seed + cluster data
if (spectral_clustering) {
tryCatch(
expr = {
cluster <- specc(as.matrix(iter_means[[2]]), number_clusters, seed = 123)
}, error = function(e) {
stop_and_help("Spectral clustering works bad with many Features/Data, choose a smaller cluster number.")
}
)
groups <- data.frame(Groups = cluster@.Data)
}else {
set.seed(123)
cluster <- stats::kmeans(x = iter_means[[2]], centers = number_clusters, algorithm = "Hartigan-Wong", nstart = 100)
# Extract cluster numbers as labels/feature
groups <- data.frame(Groups = cluster[["cluster"]])
}
}
if (save_model) {
saveRDS(cluster, paste0(path, "model/cluster.rds"))
}
# Feature -> first conditions else as Label
if (label == F) {
# Group ID and cluster number
iterator <- data.frame(Identifier = iter_means[[1]], Group = as.factor(groups[, 1]))
# Join Features and iterator to add cluster numbers
features <- left_join(features, iterator, by = feature_name_id)
# Construct unique IDs
uniq_rownames <- make.names(features[, 1], unique = T)
rownames(features) <- uniq_rownames
features <- features[, -which(names(features) %in% feature_name_id)]
features <- features %>%
rename(Identifier = Group)
return(features)
}else {
# Use it as Label
labeled_mean_data <- data.frame(iter_means[[2]], Group = as.factor(groups[, 1]))
return(labeled_mean_data)
}
}
# If its used for Random Forest (as label) it shouldnt be normalized
if_needed_normalize_features <- function(cluserted_features, label, load_model, model_path, time_bin) {
if (label == F) {
if (load_model) {
min_max <- readRDS(paste0(model_path, "min_max.rds"))
min_max_new <- calculate_min_max(cluserted_features, time_bin)
min_max <- as.numeric(unlist(calculate_from_two_min_max(min_max, min_max_new)))
if (time_bin == time_bin_day_and_hour) {
cluserted_features[, 3:(ncol(cluserted_features) - 1)] <- complet_normalize_features(
cluserted_features[, 3:(ncol(cluserted_features) - 1)], min_max)
}else {
cluserted_features[, 2:(ncol(cluserted_features) - 1)] <- complet_normalize_features(
cluserted_features[, 2:(ncol(cluserted_features) - 1)], min_max)
}
}else {
if (time_bin == time_bin_day_and_hour) {
cluserted_features[, 3:(ncol(cluserted_features) - 1)] <- normalize(
cluserted_features[, 3:(ncol(cluserted_features) - 1)], method = "range", range = c(0, 1))
}else {
cluserted_features[, 2:(ncol(cluserted_features) - 1)] <- normalize(
cluserted_features[, 2:(ncol(cluserted_features) - 1)], method = "range", range = c(0, 1))
}
}
}
return(cluserted_features)
}
# Calculates the max and min
calculate_min_max <- function(features, time_bin) {
date_and_hour <- time_bin_day_and_hour
if (time_bin == date_and_hour) {
start <- 3
}else {
start <- 2
}
min_max <- data.frame()
j <- 1
for (i in seq_len(ncol(features[, start:(ncol(features) - 1)])) + start - 1) {
min_max[j, 1] <- min(features[, i])
min_max[j, 2] <- max(features[, i])
j <- j + 1
}
return(min_max)
}
# Calcs the new min max if loaded model with existing min maxs are used
calculate_from_two_min_max <- function(min_max_loaded, min_max_new) {
min_max <- data.frame()
for (i in seq_len(ncol(min_max_loaded))) {
min_max[i, 1] <- min(min_max_loaded[i, 1], min_max_new[i, 1])
min_max[i, 2] <- max(min_max_loaded[i, 2], min_max_new[i, 2])
}
return(min_max)
}
normalize_features <- function(features, min, max) {
min_max_normalize <- function(features, min, max) {
return((features - min) / (max - min))
}
return(sapply(features, min_max_normalize, min = min, max = max))
}
complet_normalize_features <- function(features, min_max) {
for (i in seq_len(ncol(features))) {
features[, i] <- normalize_features(features[, i], min = min_max[i], max = min_max[ncol(features) + 1])
}
return(features)
}
#########################
# Statistical Functions #
#########################
data_statistics <- function(data, parsed_arguments, type) {
statistics_path <- paste0(parsed_arguments$path, type, "_statistics/")
dir.create(statistics_path)
write_general_infos(data, statistics_path)
plot_partition_logontype(data, statistics_path)
plot_timeline_month(generate_timeline_month(data), statistics_path)
generate_and_plot_timeline_day(data, statistics_path, parsed_arguments$startdate, parsed_arguments$enddate)
write_users_with_most_logon_proportion(data, statistics_path)
}
plot_partition_logontype <- function(data, path) {
logontype <- data.frame()
for (i in 1:14 - 1) {
logontype_x <- data[(data$Logon_Type == i),]
logontype[i + 1, 1] <- i
logontype[i + 1, 2] <- length(logontype_x[, 1])
}
logontype_plot <- ggplot(data = logontype, aes(x = logontype[, 1], y = logontype[, 2])) +
geom_bar(stat = "identity") +
xlab("Logon Type") +
ylab("Count")
suppressMessages(ggsave(paste0(path, "Logon_type.png"), logontype_plot, width = 10, dpi = 300, limitsize = F))
}
write_general_infos <- function(data, path) {
infos <- NULL
infos[1] <- paste("Existing Well known Source Ports:", paste(as.character(data[(data$Source_Port %in% 1:1023 &
is.na(data$Source_Port) != T), "Source_Port"]), collapse = ", "))
infos[2] <- paste("Number of Hosts:", nrow(group_by(data, data$Host) %>%
summarise(n())))
infos[3] <- paste("Number of Users:", nrow(group_by(data, data$User) %>%
summarise(n())))
infos[4] <- paste("Number of Source-IPs:", nrow(group_by(data, data$Source) %>%
summarise(n())))
infos[5] <- paste("Smallest date of the data:", min(data$Time))
infos[6] <- paste("Newest date:", max(data$Time))
write_out(infos, paste0(path, "general_infos.txt"), row.names = F, col.names = F)
}
generate_timeline_month <- function(data) {
i <- 0
min_date <- as.Date(paste(year(min(data$Time)), month(min(data$Time)), "01", sep = "-"))
max_date <- as.Date(paste(year(max(data$Time)), month(max(data$Time)), "01", sep = "-"))
timeline <- data.frame()
repeat {
timeline[i + 1, 1] <- (min_date %m+% months(i))
timeline[i + 1, 2] <- nrow(data[(data$Time >= (min_date %m+% months(i)) &
(data$Time < (min_date %m+% months(i + 1)))),])
if ((min_date %m+% months(i)) == max_date) {
break
}
i <- i + 1
}
colnames(timeline) <- c("Time", "Count")
return(timeline)
}
plot_timeline_month <- function(timeline, path) {
timeplot <- ggplot(timeline, aes(x = Time, y = Count)) +
geom_area(fill = "#69b3a2", alpha = 0.5) +
geom_line()
suppressMessages(ggsave(paste0(path, "Complet_timeseries_months.png"), timeplot, width = 50,
dpi = 300, limitsize = F))
}
calculate_start_and_enddate <- function(timeline) {
timeline[, 3] <- scale(timeline[, 2])
border <- as.numeric(quantile(timeline[, 3], (0.90 + nrow(timeline) * 0.00019)))
left <- timeline[timeline[, 3] > border,]
return(list(left[1, 1], as.Date(left[nrow(left), 1]) %m+% months(1)))
}
generate_and_plot_timeline_day <- function(data, path, startdate, enddate) {
i <- 0
timeline <- data.frame()
repeat {
timeline[i + 1, 1] <- (as.Date(startdate) %m+% days(i))
timeline[i + 1, 2] <- nrow(data[(data$Time >= (as.Date(startdate) %m+% days(i)) &
(data$Time < (as.Date(startdate) %m+% days(i + 1)))),])
if ((as.Date(startdate) %m+% days(i)) == as.Date(enddate)) {
break
}
i <- i + 1
}
colnames(timeline) <- c("Time", "Count")
timeplot <- ggplot(timeline, aes(x = Time, y = Count)) +
geom_area(fill = "#69b3a2", alpha = 0.5) +
geom_line()
suppressMessages(ggsave(paste0(path, "Quantil_timeseries_days.png"), timeplot, width = 50, dpi = 300, limitsize = F))
}
write_users_with_most_logon_proportion <- function(data, path) {
logon_types <- distinct(data, data$Logon_Type)
logons <- NULL
for (i in logon_types[, 1]) {
users_with_counts <- data[(data$Logon_Type == i),] %>%
group_by(User) %>%
summarise(n())
users_with_counts <- users_with_counts[order(users_with_counts$`n()`, decreasing = T),]
sum_logontype <- sum(users_with_counts$`n()`)
users_with_counts[, 2] <- apply(users_with_counts[, 2], 2, function(x) { x / sum_logontype })
users_with_counts <- slice(users_with_counts, 1:5)
logons <- append(logons, paste0("Users with the most ", i, " Logon types:"))
for (k in seq_len(nrow(users_with_counts))) {
logons <- append(logons, paste(" ", users_with_counts[k, 1],
users_with_counts[k, 2]))
}
logons <- append(logons, "")
}
write_out(logons, paste0(path, "Users_with_most_logon_types.txt"), row.names = F, col.names = F)
}
#-----------------------------------------------------------------------------------------------------------------------
# If the script start from console original path is needed
detect_absolute_path_script <- function(file) {
file_loc <- sub("[^/]*$", "", sub("--file=", "", file[grep("--file=.*", file)]))
if (file_ext(file_loc) == "ln") {
if (substring(file_loc, 1, 1) == ".") {
path_exec <- system("pwd", intern = T)
link_path <- paste0(path_exec, substring(file_loc, 2))
}else {
link_path <- substring(file_loc, 2)
}
# Relative path from link file to dir
relativ_path <- Sys.readlink(paste0("/", link_path, "FindMaliciousEvents"))
# Calculate absolute path
absolute_path <- paste0(getAbsolutePath.default(sub("FindMaliciousEvents.R$", "", relativ_path),
workDirectory = paste0("/", link_path)), "/")
}else {
absolute_path <- paste0(getAbsolutePath.default(sub("FindMaliciousEvents.R$", "", file_loc),
workDirectory = paste0(system("pwd", intern = T))), "/")
}
return(absolute_path)
}
anomaly_detection <- function(features, parsed_arguments, config_data) {
machine_learning <- parsed_arguments$machine_learning
path <- parsed_arguments$path
data_path <- set_data_path_to_features(parsed_arguments)
cores <- parsed_arguments$cores
load_model <- parsed_arguments$load_model
save_model <- parsed_arguments$save_model
model_path <- parsed_arguments$model_path
rank <- parsed_arguments$rank
rank_method <- parsed_arguments$rank_method
absolute_path <- parsed_arguments$absolute_path
cat("Machine Learning is processing...")
if (machine_learning == "IF" ||
machine_learning == "kNN" ||
machine_learning == "DAGMM") {
setup_python(absolute_path)
tryCatch(expr = {
switch(machine_learning,
"IF" = python_machine_learning_isolationforest(absolute_path, path, data_path, cores,
rank, rank_method, load_model, save_model, model_path,
config_data = config_data[['isolationforest']]),
"kNN" = python_machine_learning_kNN(absolute_path, path, data_path, cores,
rank, rank_method, load_model, save_model, model_path,
config_data = config_data[['k_nearest_neigbhour']]),
"DAGMM" = python_machine_learning_dagmm(absolute_path, path, data_path,
rank, rank_method, load_model, save_model, model_path,
config_data = config_data[['deep_autoencoding_gaussian_mixture_model']])
)
}, error = function(e) {
stop_and_help(paste0("An errror appeared into python script.\n", e), call. = F)
})
}else {
machine_learning_randomforest(features, parsed_arguments$view, parsed_arguments$time_bin, cores,
path, load_model, model_path, save_model,
config_data = config_data[['randomforest']], parsed_arguments$number_clusters,
spectral_clustering = parsed_arguments$spectral_clustering,
group_changes = parsed_arguments$group_changes)
}
}
# If -e is used the data_path needs to be the original from the console and not the new one (inside the path)
set_data_path_to_features <- function(parsed_arguments) {
if (parsed_arguments$extracted_features) {
return(parsed_arguments$data_path)
}else {
return(paste0(parsed_arguments$path, "Features.csv"))
}
}
load_machine_learning_config <- function(parsed_arguments) {
config_file <- paste0(parsed_arguments$absolute_path, "config.yaml")
validate_config_file(config_file)
tryCatch(
expr = {
config_data <- read_yaml(config_file)
return(config_data)
}, error = function(e) {
stop_and_help(paste0("The config file (", config_file, ") is not correct formated."), call. = F)
}
)
}
validate_config_file <- function(config_file) {
if (file.exists(config_file) == F) {
stop_and_help(paste0("The config file (", config_file, ") dont exists anymore."), call. = F)
}else if (file.access(config_file, read_permission) == -1) {
stop_and_help(paste0("The config file (", config_file, ") dont have read permissions."), call. = F)
}else if (file_ext(config_file) != "yaml") {
stop_and_help(paste0("The config file (", config_file, ") doesnt fit to .yaml file type."), call. = F)
}
}
validate_config <- function(config_data, parsed_arguments, features) {
switch(parsed_arguments$machine_learning,
"IF" = validate_isolationforest_arguments(config_data),
"kNN" = validate_knn_arguments(config_data, features),
"DAGMM" = validate_dagmm_arguments(config_data, features),
"RF" = validate_randomforest_arguments(config_data, features)
)
}
validate_isolationforest_arguments <- function(config_data) {
validate_machine_learning_method_exists(config_data, "isolationforest")
hyperparameters <- c("n_estimators", "max_samples", "contamination", "max_features", "random_state")
validate_machine_learning_hyperparamters_exist(config_data, "isolationforest", hyperparameters)
validate_machine_learning_hyperparamters_isolationforest(config_data)
}
validate_knn_arguments <- function(config_data, features) {
validate_machine_learning_method_exists(config_data, "k_nearest_neigbhour")
hyperparameters <- c("contamination", "n_neighbors", "method", "algorithm", "metric")
validate_machine_learning_hyperparamters_exist(config_data, "k_nearest_neigbhour", hyperparameters)
validate_machine_learning_hyperparamters_k_nearest_neigbhour(config_data, features)
}
validate_dagmm_arguments <- function(config_data, features) {
validate_machine_learning_method_exists(config_data, "deep_autoencoding_gaussian_mixture_model")
hyperparameters <- c("comp_hiddens", "comp_activation", "est_hiddens", "est_activation", "est_dropout_ratio",
"epoch_size", "minibatch_size", "random_seed", "dynamic")
validate_machine_learning_hyperparamters_exist(config_data, "deep_autoencoding_gaussian_mixture_model",
hyperparameters)
validate_machine_learning_hyperparamters_deep_autoencoding_gaussian_mixture_model(config_data, features)
}
validate_randomforest_arguments <- function(config_data, features) {
validate_machine_learning_method_exists(config_data, "randomforest")
hyperparameters <- c("num.trees", "mtry", "min.node.size", "sample.fraction", "max.depth", "seed", "dynamic")
validate_machine_learning_hyperparamters_exist(config_data, "randomforest", hyperparameters)
validate_machine_learning_hyperparamters_randomforest(config_data, features)
}
validate_machine_learning_method_exists <- function(config_data, method) {
if (is.null(config_data[[method]])) {
stop_and_help(paste("The config for", method, "does not exists."), call. = F)
}
}
validate_machine_learning_hyperparamters_exist <- function(config_data, method, hyperparameters) {
if (all(c(names(config_data[[method]]) %in% hyperparameters), (hyperparameters %in% names(config_data[[method]]))) == F) {
stop_and_help(paste("The config for method", method, "is damaged."), call. = F)
}
}
validate_machine_learning_hyperparamters_isolationforest <- function(config_data) {
isolationforest_config_data <- config_data[['isolationforest']]
validate_hyperparameter(isolationforest_config_data, "n_estimators", NULL, F, T, T, 0, 100000, F)
validate_hyperparameter(isolationforest_config_data, "max_samples", "auto", F, T, F, 0, 1.0, F)
validate_hyperparameter(isolationforest_config_data, "contamination", NULL, F, T, F, 0, 0.99999999, F)
validate_hyperparameter(isolationforest_config_data, "max_features", NULL, F, T, F, 0, 1.0, F)
validate_hyperparameter(isolationforest_config_data, "random_state", NULL, T, T, T, -Inf, Inf, F)
}
validate_machine_learning_hyperparamters_k_nearest_neigbhour <- function(config_data, features) {
knn_config_data <- config_data[['k_nearest_neigbhour']]
validate_hyperparameter(knn_config_data, "contamination", NULL, F, T, F, 0, .99999999, F)
validate_hyperparameter(knn_config_data, "n_neighbors", NULL, F, T, T, 0, nrow(features), F)
validate_hyperparameter(knn_config_data, "method", c("largest", "mean", "median"), F, F, F, NULL, NULL, F)
validate_hyperparameter(knn_config_data, "algorithm", c("ball_tree", "kd_tree", "brute", "auto"), F, F, F, NULL, NULL, F)
possible_metrics <- c('cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan', 'braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule')
validate_hyperparameter(knn_config_data, "metric", possible_metrics, F, F, F, NULL, NULL, F)
}
validate_machine_learning_hyperparamters_deep_autoencoding_gaussian_mixture_model <- function(config_data, features) {
dagmm_config_data <- config_data[['deep_autoencoding_gaussian_mixture_model']]
validate_hyperparameter(dagmm_config_data, "comp_hiddens", NULL, F, T, T, 0, Inf, T)
activation_functions <- c("deserialize", "elu", "exponential", "gelu", "get", "hard_sigmoid", "linear", "relu", "selu", "serialize", "sigmoid", "softmax", "softplus", "softsign", "swish", "tanh")
validate_hyperparameter(dagmm_config_data, "dynamic", possible_logic = T)
validate_hyperparameter(dagmm_config_data, "comp_activation", activation_functions, F, F, F, NULL, NULL, F)
validate_hyperparameter(dagmm_config_data, "est_hiddens", NULL, F, T, T, 0, Inf, T)
validate_hyperparameter(dagmm_config_data, "est_activation", activation_functions, F, F, F, NULL, NULL, F)
validate_hyperparameter(dagmm_config_data, "est_dropout_ratio", NULL, T, T, F, 0, 0.99999999, F)
validate_hyperparameter(dagmm_config_data, "epoch_size", NULL, F, T, T, 99, Inf, F)
validate_hyperparameter(dagmm_config_data, "minibatch_size", NULL, F, T, T, 0, nrow(features), F)
validate_hyperparameter(dagmm_config_data, "random_seed", NULL, F, T, T, -Inf, Inf, F)
}
validate_machine_learning_hyperparamters_randomforest <- function(config_data, features) {
randomforest_config_data <- config_data[['randomforest']]
validate_hyperparameter(randomforest_config_data, "dynamic", possible_logic = T)
validate_hyperparameter(randomforest_config_data, "num.trees", NULL, F, T, T, 0, 100000, F)
validate_hyperparameter(randomforest_config_data, "mtry", NULL, F, T, T, 0, ncol(features), F)
validate_hyperparameter(randomforest_config_data, "min.node.size", NULL, F, T, T, 0, Inf, F)
validate_hyperparameter(randomforest_config_data, "sample.fraction", NULL, F, T, F, 0, 1.0, F)
validate_hyperparameter(randomforest_config_data, "max.depth", NULL, T, T, T, 0, Inf, F)
validate_hyperparameter(randomforest_config_data, "seed", NULL, T, T, T, -Inf, Inf, F)
}
validate_hyperparameter <- function(method_config_data, hyperparameter, possible_strings = NULL,
possible_null = F, possible_number = F, integer = F, left_interval = NULL,
right_interval = NULL, possible_vector = F, possible_logic = F) {
if (is.character(method_config_data[[hyperparameter]])) {
if ((method_config_data[[hyperparameter]] %in% possible_strings) == F) {
stop_and_help(paste0("The Hyperparamter ", hyperparameter, " doesnt fit to the character option."))
}
}else if (is.null(method_config_data[[hyperparameter]]) || (method_config_data[[hyperparameter]] == 0) &&
is.logical(method_config_data[[hyperparameter]]) == F) {
if (possible_null == F) {
stop_and_help(paste("The Hyperparameter", hyperparameter, "can not be a null/0 value."))
}
}else if (is.numeric(method_config_data[[hyperparameter]]) && is.na(method_config_data[[hyperparameter]][2]) ) {
if (possible_number) {
if (integer) {
validate_hyperparamter_is_integer(method_config_data, hyperparameter)
}else {
validate_hyperparamter_is_double(method_config_data, hyperparameter)
}
validate_hyperprameter_interval(method_config_data, hyperparameter, left_interval, right_interval)
}else {
stop_and_help(paste("The Hyperparameter", hyperparameter, "can not be a numeric value."))
}
}else if (is.logical(method_config_data[[hyperparameter]])) {
if (possible_logic == F) {
stop_and_help(paste0("The ", hyperparameter, " needs to be TRUE or FALSE."))
}
}else if (is.vector(method_config_data[[hyperparameter]])) {
if (possible_vector) {
if (is.numeric(method_config_data[[hyperparameter]]) == F || is.vector(method_config_data[[hyperparameter]]) == F) {
stop_and_help(paste0("The Hyperparamter ", hyperparameter, " needs to be a numeric vector."))
}else if (is.unsorted(rev(method_config_data[[hyperparameter]]))) {
stop_and_help(paste0("The Hyperparamter ", hyperparameter, " needs to be inverted sorted."))
}else if (method_config_data[[hyperparameter]][[length(method_config_data[[hyperparameter]])]] <= 0) {
stop_and_help(paste0("The last Array Hyperparamter ", hyperparameter, " needs to be bigger than zero."))
}
}else {
stop_and_help(paste("The Hyperparameter", hyperparameter, "can not be a vector/array."))
}
}else {
stop_and_help(paste("The Hyperparameter", hyperparameter, "does not fit to the options."))
}
}
validate_hyperparamter_is_integer <- function(method_config_data, hyperparameter) {
if (is.numeric(method_config_data[[hyperparameter]]) == F) {
stop_and_help(paste0("The Hyperparamter ", hyperparameter, " is not a number."), call. = F)
}else if (method_config_data[[hyperparameter]] %% 1 != 0) {
stop_and_help(paste0("The Hyperparameter ", hyperparameter, " is not an integer."), call. = F)
}
}
validate_hyperparamter_is_double <- function(method_config_data, hyperparameter) {
if (is.double(method_config_data[[hyperparameter]]) == F) {
stop_and_help(paste0("The Hyperparamter ", hyperparameter, " is not a double."), call. = F)
}
}
validate_hyperprameter_interval <- function(method_config_data, hyperparameter, left_interval, right_interval) {
if (method_config_data[[hyperparameter]] <= left_interval) {
stop_and_help(paste0("The Hyperparamter ", hyperparameter, " is too small."), call. = F)
}else if (method_config_data[[hyperparameter]] > right_interval) {
stop_and_help(paste0("The Hyperparamter ", hyperparameter, " is too big."), call. = F)
}
}
# Check if Python 3 is installed, if its installed activate a virtual envirmonent
setup_python <- function(path) {
tryCatch(expr = {
use_python(as.character(system("which python3", intern = T)))
}, error = function(e) {
stop_and_help("Python 3 is not installed.", call. = F)
})
tryCatch(expr = {
use_virtualenv(paste0(path, "maliciousevents"), required = T)
}, error = function(e) {
stop_and_help(paste0("The PATH:", path, " is not containing the maliciousevents virtual environment."), call. = F)
})
}
# Use python function with the isolationforest
python_machine_learning_isolationforest <- function(Input_path, Output_path, data_path, cores,
rank, rank_method, load_model, save_model, model_path, config_data) {
source_python(paste0(Input_path, "ml/IsolationForest_Anwendung.py"))
isolationforest_exec(Output_path, data_path, as.integer(cores), rank, rank_method, load_model, save_model, model_path, config_data)
}
# Use python function with the kNN
python_machine_learning_kNN <- function(Input_path, Output_path, data_path, cores,
rank, rank_method, load_model, save_model, model_path, config_data) {
source_python(paste0(Input_path, "ml/kNN_Anwendung.py"))
knn_exec(Output_path, data_path, as.integer(cores), rank, rank_method, load_model, save_model, model_path, config_data)
}
# Use python function with the dagmm
python_machine_learning_dagmm <- function(Input_path, Output_path, data_path,
rank, rank_method, load_model, save_model, model_path, config_data) {
source_python(paste0(Input_path, "ml/DAGMM_Anwendung.py"))
dagmm_exec(Output_path, data_path, rank, rank_method, load_model, save_model, model_path, config_data)
}
# Use function with the randomforest, to predict the number of clusters the view is visting
machine_learning_randomforest <- function(features, view, time_bin, cores,
path, load_model, model_path, save_model, config_data,
number_clusters, spectral_clustering, group_changes) {
#Clustert die Daten und gibt die Mittelwertdaten+ die Clusternummer als Label zurück
means_label <- group_features(features, time_bin, cores, label = T, load_model, model_path,
save_model, path, number_clusters, spectral_clustering)
class_distribution <- table(means_label$Group)
weights <- class_distribution / nrow(means_label)
cluster_of_cores <- makeCluster(cores)
registerDoParallel(cluster_of_cores)
if (load_model) {
model <- load_randomforest_model(model_path, means_label)
}else if (config_data[["dynamic"]] == F) {
model <- train(
as.factor(Group) ~ .,
data = means_label,
method = "ranger",
preProcess = "BoxCox",
trControl = trainControl(method = "cv", number = 5),
num.trees = config_data[['num.trees']],
tuneGrid = expand.grid(mtry = config_data[['mtry']], min.node.size = config_data[['min.node.size']], splitrule = "gini"),
sample.fraction = config_data[['sample.fraction']],
class.weights = weights,
max.depth = config_data[['max.depth']],
seed = config_data[['seed']]
)
}else {
# Grid search hyperparameter
hyper_grid <- grid_search_randomforest(means_label)
# Trains the model with the best hyperparameters
model <- train(
as.factor(Group) ~ .,
data = means_label,
method = "ranger",
preProcess = "BoxCox",
trControl = trainControl(method = "cv", number = 5),
num.trees = 500,
tuneGrid = expand.grid(mtry = hyper_grid$mtry[1], min.node.size = hyper_grid$node_size[1], splitrule = "gini"),
sample.fraction = hyper_grid$sampe_size[1],
class.weights = weights,
max.depth = hyper_grid$max_deph[1],
seed = 123
)
}
stopCluster(cluster_of_cores)
if (save_model) {
saveRDS(model, paste0(path, "model/", "model.rds"))
}
# Predict classes on the full data set
tryCatch(
expr = {
preds <- predict(model, newdata = features[, colnames(means_label[-ncol(means_label)])], type = "raw")
}, error = function(e) {
stop_and_help("The features of the data should be the same like the model features.", call. = F)
}
)
# ID+Group
id_with_associated_group <- data.frame(Identifier = features[, 1], Group = as.factor(preds))
if (group_changes) {
result <- count_changed_groups(id_with_associated_group)
}else {
result <- detect_visited_groups(id_with_associated_group)
}
# Write result
write_out(result, paste0(path, "results.csv"), row.names = F)
}
#Function to load a saved model
load_randomforest_model <- function(model_path, means_label) {
model <- readRDS(paste0(model_path, "model.rds"))
tryCatch(
expr = {
model_type <- attr(model$finalModel$forest, "class")
if (model_type != "ranger.forest") {
stop_and_help("Missing the correct model on load with the correct machine learning option.", call. = F)
}
}, error = function(e) {
stop_and_help("Missing the correct model on load with the correct machine learning option.", call. = F)
}
)
if (any((model[["finalModel"]][["forest"]][["independent.variable.names"]] %in% colnames(means_label)) == F)) {
cat("Your given model contains a feature that is note included in your extracted feature set.", fill = 1)
}
return(model)
}
grid_search_randomforest <- function(means_label) {
# Split the means values in train and test data
train <- means_label[sample(seq_len(nrow(means_label)), nrow(means_label) * 0.7),]
test <- means_label[!(rownames(means_label) %in% rownames(train)),]
# Create hyperparameter grid
hyper_grid <- create_hypergrid_for_gridsearch(ncol(means_label))
# Iterate truth the net and calculate the accuracy
for (i in seq_len(nrow(hyper_grid))) {
# Train model
model <- ranger(
formula = Group ~ .,
data = train,
num.trees = 500,
mtry = hyper_grid$mtry[i],
min.node.size = hyper_grid$node_size[i],
sample.fraction = hyper_grid$sampe_size[i],
max.depth = hyper_grid$max_deph[i],
seed = 123
)
# Add OOB error to grid
hyper_grid$OOB_RMSE[i] <- sqrt(model$prediction.error)
preds <- predict(model, data = test, type = "response")
conf <- confusionMatrix(preds$predictions, test$Group)
hyper_grid$pred_test[i] <- as.numeric(conf$overall[1])
}
# Sort the net by accuracy
hyper_grid <- hyper_grid[order(hyper_grid$pred_test, decreasing = T),]
return(hyper_grid)
}
create_hypergrid_for_gridsearch <- function(cols_means) {
hyper_grid <- expand.grid(
mtry = seq(2, cols_means - 1, by = 1),
node_size = seq(3, 9, by = 2),
sampe_size = c(.55, .632, .70, .80),
max_deph = seq(5, 14, by = 2),
OOB_RMSE = 0,
pred_test = 0
)
return(hyper_grid)
}
count_changed_groups <- function(id_with_associated_group) {
iterator <- distinct(id_with_associated_group, Identifier)
result <- data.frame(iterator, changed_groups = 0)
for (i in seq_len(nrow(result))) {
data_iterator <- id_with_associated_group[id_with_associated_group$Identifier == result[i, 1],]
last_group <- data_iterator$Group[1]
changed_groups <- 0
if (nrow(data_iterator) >= 2) {
for (j in 2:nrow(data_iterator)) {
if (data_iterator$Group[j] != last_group) {
last_group <- data_iterator$Group[j]
changed_groups <- changed_groups + 1
}
}
}
result[i, 2] <- changed_groups
}
result <- result[order(result$changed_groups, decreasing = T),]
return(result)
}
detect_visited_groups <- function(id_with_associated_group) {
# Counts how many groups are visted by the person and sorts it
result <- id_with_associated_group %>%
distinct(Identifier, Group) %>%
group_by(Identifier) %>%
summarise(n())
result <- as.data.frame(result[order(result$`n()`, decreasing = T),])
return(result)
}
#############
# Visualize #
#############
visualization_results <- function(features, path, not_randomforest, rank, rank_method) {
# Ignore warnings
options(warn = -1)
results <- read_in(paste0(path, "results.csv"))
# Convert the date/hour formats into numeric values
if (is.na(results[1, 1]) == F) {
if (feature_name_hour %in% colnames(features)) {
features[feature_name_hour] <- as.numeric(seconds(as_hms(sapply(features[feature_name_hour], as.character))))
if (length(grep(mean_rank, rank_method))==1) {
results[feature_name_hour] <- as.numeric(seconds(as_hms(sapply(results[feature_name_hour], as.character))))
}
}
identifier <- data.frame(Identifier = sub("^X", "", sub("\\.[0-9]*$", "", results[, 1])))
iterator <- distinct(identifier, Identifier = Identifier)
if (not_randomforest == F || rank == T) {
iterator <- iterator %>%
slice(1:50)
}
path <- paste0(path, "Radarplots/")
dir.create(path)
palette <- colorRampPalette(colors = c("#000000", "#FFFFF0"))
palette_outsider <- colorRampPalette(c("red", "purple"))
set_plot_margin()
for (i in seq_len(nrow(iterator))) {
create_plot(results, features, iterator, i, not_randomforest, palette_outsider, palette, path, rank_method)
}
delete_empty_directory(path)
}else {
cat("Nothing to plot, results are empty.", fill = 1)
}
}
# Change margin for plots
set_plot_margin <- function() {
par(mar = c(1, 1, 2, 1))
par(oma = c(0, 0, 0, 0))
}
create_plot <- function(results, features, iterator, i, not_randomforest, palette_outsider, palette, path, rank_method) {
tryCatch(
expr = {
extracted_insider_and_outsider <- extract_insider_and_outsider(not_randomforest, rank_method, iterator[i, 1],
results, features)
outsider <- extracted_insider_and_outsider$outsider
insider <- extracted_insider_and_outsider$insider
colors <- extracted_insider_and_outsider$colors
# Delet the outsider from the insiders
if (not_randomforest) {
insider <- subset(insider, !(insider %in% outsider))
}
# Get the Plot-data/color for each entity
if (not_randomforest) {
not_included <- c(feature_name_id, feature_name_day)
if (length(grep(mean_rank, rank_method))==1) {
colors <- palette(length(colors))
plot_data <- select(features[insider,], !one_of(not_included))
}else {
colors[1:(length(colors) - length(outsider))] <- palette((length(colors) - length(outsider)))
colors[(length(colors) - length(outsider) + 1):length(colors)] <- palette_outsider(length(outsider))
plot_data <- rbind(select(features[insider,], !one_of(not_included)),
select(features[outsider,], !one_of(not_included)))
}
}else {
colors <- palette(nrow(insider))
plot_data <- insider[-1]
}
colors_inside <- ggplot2::alpha(colors, 0.2)
jpeg(paste0(path, i, "_", iterator[i, 1], ".jpg"), width = 1900, height = 1900, quality = 100,
pointsize = 40, res = 120)
radarchart(plot_data, maxmin = F, axistype = 1, pcol = colors, pfcol = colors_inside,
plwd = 1, plty = 2, cglty = 1, cglwd = 0.8, cglcol = "#466D3A", vlcex = 0.8, axislabcol = "#00008B")
dev.off()
}, error = function(e) {
cat(paste0("No Radarplots for ", iterator[i, 1],
" generated, because there is just one Feature per view to be plotted."), fill = 1)
}
)
}
extract_insider_and_outsider <- function(not_randomforest, rank_method, iterator, results, features) {
# In - & outsider deppends from the machine learning Method and what meathod is used for ranking
# because the identifier is different presented or the anomaly is not especially detected
if (not_randomforest) {
if (length(grep(mean_rank, rank_method))==1) {
outsider <- ""
}else {
outsider <- grep(paste0("^X", iterator, "(\\.[0-9]+$){0,1}"), results[, 1], value = T)
}
insider <- grep(paste0("^X", iterator, "(\\.[0-9]+$){0,1}"), rownames(features), value = T)
if (length(insider) > 50) {
insider <- sample(insider, 50)
}
colors <- character(length(insider))
}else {
insider <- features[(features$Identifier == iterator),]
outsider <- ""
if (nrow(insider) > 50) {
insider <- insider[sample(seq_len(nrow(insider)), 50),]
}
colors <- character(nrow(insider))
}
return(list(outsider = outsider, insider = insider, colors = colors))
}
delete_empty_directory <- function(path) {
if (length(dir(path = path)) == 0) {
unlink(path, recursive = T)
}
}
main(args)
|
9d1a6f867383c8817d9ddd81d28fad33850499b1 | 58c08466de73ff0d150c849063257684788e52cb | /man/project_maxent.Rd | af5be6cff2a8abf6a7e4252550d416f0c75f9fc9 | [] | no_license | johnbaums/things | 59649377b1e683bb912a2a21043f89efdb81ca33 | 3930b7d2e52fa80fe4956596ecb09c8d146ef7e7 | refs/heads/master | 2020-12-24T08:15:31.484749 | 2016-08-11T05:07:02 | 2016-08-11T05:07:02 | 39,105,105 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 3,519 | rd | project_maxent.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/project_maxent.R
\name{project_maxent}
\alias{project_maxent}
\title{Project a fitted Maxent model}
\usage{
project_maxent(lambdas, newdata, mask, quiet = FALSE)
}
\arguments{
\item{lambdas}{Either a \code{MaxEnt} fitted model object (fitted with the
\code{maxent} function in the \code{dismo} package), or a file path to a
Maxent .lambdas file.}
\item{newdata}{A \code{RasterStack}, \code{RasterBrick}, \code{list},
\code{data.frame}, \code{data.table}, or \code{matrix} that has
layers/elements/columns whose names correspond to the names of predictors
used to fit the model. These layers/elements/columns must all have the same
length.}
\item{mask}{(Optional; requires that \code{newdata} is a \code{Raster*}
object.) A \code{Raster} object with \code{NA} values in cells for which
the model should \emph{not} be projected. These cells will be assigned
\code{NA} in the returned output.}
\item{quiet}{Logical. Should projection progress be reported?}
}
\value{
If \code{newdata} is a \code{RasterStack} or \code{RasterBrick}, a
list with two elements:
\itemize{
\item{\code{prediction_raw}}{: a \code{Raster} layer giving the raw Maxent
prediction; and}
\item{\code{prediction_logistic}}{: a \code{Raster} layer giving the
logistic Maxent prediction.}
}
If \code{newdata} is \emph{not} a \code{RasterStack} or \code{RasterBrick},
the raster layers will be replaced with \code{data.table}s in the returned
list.
}
\description{
Project a fitted Maxent model by predicting to new environmental data.
}
\details{
\code{project_maxent} uses feature weights described in a .lambas
file or \code{MaxEnt} object to predict a Maxent model to environmental
data. This function performs the projection entirely in R, without the need
for the Maxent Java software. For tested datasets, it performs the
projection in roughly one third of the time taken for the same projection
by maxent.jar.
}
\section{Warning}{
This function is still in development, and no guarantee is made for the
accuracy of its projections.
}
\examples{
# Below we use the dismo::maxent example to fit a Maxent model:
if (require(dismo) && require(rJava) &&
file.exists(file.path(system.file(package='dismo'), 'java/maxent.jar'))) {
fnames <- list.files(path=paste(system.file(package="dismo"), '/ex', sep=''),
pattern='grd', full.names=TRUE )
predictors <- stack(fnames)
occurence <- paste(system.file(package="dismo"), '/ex/bradypus.csv', sep='')
occ <- read.table(occurence, header=TRUE, sep=',')[,-1]
me <- maxent(predictors, occ, factors='biome')
# ... and then predict it to the full environmental grids:
pred <- project_maxent(me, predictors)
# This is equivalent to using the predict method for MaxEnt objects:
pred2 <- predict(me, predictors)
all.equal(values(pred$prediction_logistic), values(pred2))
}
}
\author{
John B. Baumgartner, \email{johnbaums@gmail.com}
}
\references{
\itemize{
\item{Wilson, P. W. (2009) \href{http://gsp.humboldt.edu/OLM/GSP_570/Learning Modules/10 BlueSpray_Maxent_Uncertinaty/MaxEnt lambda files.pdf}{\emph{Guidelines for computing MaxEnt model output values from a lambdas file}}.}
\item{\emph{Maxent software for species habitat modeling, version 3.3.3k} help file (software freely available \href{https://www.cs.princeton.edu/~schapire/maxent/}{here}).}
}
}
\seealso{
\code{\link{read_mxe}}
}
\keyword{maxent,}
\keyword{predict,}
\keyword{project}
|
5dac291c0adde380da5a00f6dc1426d6f3b7b233 | 3af5d90172af27b5eafd7243d70be19e3fdab0c4 | /exercise-1/exercise.R | 69d635502a93a602d819b728f82ebe439b2068bf | [
"MIT"
] | permissive | Yubo-W/m8-dataframes | d129a49e5324112ef8405e04ce77ab8a36c9a6d0 | 47651f9abc30a80445837d81b34d67633d347805 | refs/heads/master | 2021-01-10T23:12:09.715629 | 2016-10-13T22:04:37 | 2016-10-13T22:04:37 | 70,634,404 | 0 | 0 | null | 2016-10-11T20:52:54 | 2016-10-11T20:52:53 | null | UTF-8 | R | false | false | 896 | r | exercise.R | # Exercise 1: Lists
# Create a vector of everything you ate for breakfast
my.breakfast <- c("fried rice", "bacon")
# Create a vector of everything you ate for lunch
my.lunch <- c("sausage", "eggs", "onions")
# Create a list `meals` that has contains your breakfast and lunch
meals <- list(breakfast=my.breakfast, lunch=my.lunch)
# Add a `dinner` index to your `meals` list that has what you plan to eat for dinner
meals$dinner <- c("Broccoli", "Stir fry")
# Extract your 'dinner' element from your list and save it in a vector called 'dinner'
dinner <- meals[["dinner"]]
### Bonus ###
# Create a list that has the number of items you ate for each meal
number <- list(lengths(meals))
# Write a function that adds pizza to every meal
add <- function(ls) {
for (index in 1:length(ls)) {
index <- c(ls[index], "pizza")
}
return(ls)
}
# Add pizza to every meal!
meals <- add(meals) |
f2fe8bdcdf6e17c7ed95d8bf4968ae997755543c | 967d4193faebe58aaf9326144bb087e616a52349 | /src/COMP_LOG_FRAC.R | 060a2d44569f29c81109c2364aabb54873b285bb | [] | no_license | princerachit/stabilityofchaoticmaps | c81a55094be4ddba4d0f2bf5528e00621a3c4532 | 062cc54645712b9a64f5e2aaefb9445bde09ccfc | refs/heads/master | 2021-09-08T10:34:02.936692 | 2018-03-08T09:34:46 | 2018-03-08T09:34:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 832 | r | COMP_LOG_FRAC.R |
#Generation of fractals from complex logistic map
#Mamta Rani a, * , Rashi Agarwal b
cols <- colorRampPalette(c("white","yellow","red","black"))(5)
# variables
x <- seq(xmin, xmax,by=gap)
y <- seq(ymin, ymax,by=gap)
c <- outer(x,y*1i,FUN="+")
p <- matrix(c, nrow=length(x), ncol=length(y))
k <- matrix(0.0, nrow=length(x), ncol=length(y))
for (rep in 1:n) {
index <- which(Mod(p) <= (2/B))
px = Re(p[index])
py = Im((p[index]))
pxt = B*(rx*(px - px*px + py*py)- ry*(py - 2*px*py)) + (1-B)*px
pyt = B*(rx*(py-2*px*py) + ry*(px - px*px+py*py)) + (1-B)*py
px = pxt
py = pyt
p[index] = px + py*1i;
k[index] <- k[index] + 1
}
image(x, y, k, col = cols,ylab=sprintf("xmin=%.2f,xmax=%.3f,ymin=%.3f,ymax=%.3f",xmin,xmax,ymin,ymax),xlab=sprintf("pixelgap=%.3f,rx=%.3f,ry=%.3f,n=%d",gap,rx,ry,n))
##image(x,y,k, col=cols)
|
f60201838beb7699cfa94c3469375e501a4e9af2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/quanteda/examples/fcm-class.Rd.R | 6858b2709aaa2572b89ab1083168f04e89062495 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 841 | r | fcm-class.Rd.R | library(quanteda)
### Name: fcm-class
### Title: Virtual class "fcm" for a feature co-occurrence matrix The fcm
### class of object is a special type of fcm object with additional
### slots, described below.
### Aliases: fcm-class t,fcm-method Arith,fcm,numeric-method
### Arith,numeric,fcm-method [,fcm,index,index,missing-method
### [,fcm,index,index,logical-method [,fcm,missing,missing,missing-method
### [,fcm,missing,missing,logical-method
### [,fcm,index,missing,missing-method [,fcm,index,missing,logical-method
### [,fcm,missing,index,missing-method [,fcm,missing,index,logical-method
### Keywords: internal
### ** Examples
# fcm subsetting
y <- fcm(tokens(c("this contains lots of stopwords",
"no if, and, or but about it: lots"),
remove_punct = TRUE))
y[1:3, ]
y[4:5, 1:5]
|
7c3023b4232ee71e315097aefc193aeb20272d67 | bbf8b4fa0d3e5bcbd1076bcb332da488c6a0f14f | /R/R_API_celib.R | 931854fda683f2bbe6230123b7cf8dd8da32ea4e | [] | no_license | regardscitoyens/openfisca-web-notebook | 46611edd4ef159eb5de3aee040dcac3c5ac2ff3a | f4d8d58680011fbbb2f6c8ad24a023f2ee5c988d | refs/heads/master | 2021-01-18T09:18:12.959893 | 2014-03-22T14:07:03 | 2014-03-22T14:07:03 | 18,010,662 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,531 | r | R_API_celib.R | ####
### API needs JSON format input and gives JSON outputs
### This program :
# 1. converts an input data to a json file
# 2. get the results from the API and converts them to R data the outputs
##### 0. Preparation #####
# Warning : order matters in packages loading
library(rjson)
library(jsonlite)
library(httr)
setwd("my_repository")
source("R_json.R")
##### 1. R to JSON ####
## from your dataframe to API's input file
# all possible output variables are in model.py
# or available from this link :
# http://nbviewer.ipython.org/github/openfisca/openfisca-web-notebook/blob/master/liste-des-variables.ipynb
## 1.1 Scenario / Input variables ##
# In this example we use the following case
# Celibataire sans enfant with 4 INDIVIDUAL variables
var <- 4
data <- array(NA, dim=c(var,3))
data[1,] <- c("cadre", "true", 0) # or "false"
data[2,] <- c("activite", "Actif occupé", 1)
data[3,] <- c("birth", "1970", 0) # birth year
data[4,] <- c("sali", "20000", 0) # taxable income
# O if boolean of numerical values // 1 if character
## 1.2 Decomposition / Choice of output variables ##
# This example gives as output :
## 'salsuperbrut' # salaire superbrut
## 'salnet' # salaire net
## 'sali' # salaire imposable
## 'loyer' # loyer
## 'revdisp' # revenu disponible
## FINAL INPUT
json_input <- R_to_json(data)
##### 2. CALLING THE API (website calculator) ####
## Get the results : Output = Openfisca(Input)
## json output file is converted to R dataframe object
result <- json_to_R(json_input)
|
70b594b72e816e075c5c098becfb01af2cb1f1d8 | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /ProFound/man/profoundDrawEllipse.Rd | c5a79e5dae2cf32a9526be45f0f6b6972e2bacd1 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,849 | rd | profoundDrawEllipse.Rd | \name{profoundDrawEllipse}
\alias{profoundDrawEllipse}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Draw Ellipse
}
\description{
Draws multiple ellipses on a plot window.
}
\usage{
profoundDrawEllipse(xcen = 0, ycen = 0, rad = 1, axrat = 1, ang = 0, box = 0, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{xcen}{
Numeric vector; x centre/s of the ellipse/s.
}
\item{ycen}{
Numeric vector; y centre/s of the ellipse/s.
}
\item{rad}{
Numeric vector; the major axis extent of the ellipse/s.
}
\item{axrat}{
Numeric vector; the axial ratio of the ellipse/s as given by \option{radlo}/\option{radhi}.
}
\item{ang}{
Numeric vector; the angle of the ellipse/s in the usual ProFit sense, see \code{profitMakeModel}.
}
\item{box}{
Numeric vector; the boxiness of the ellipse/s in the usual ProFit sense, see \code{profitMakeModel}.
}
\item{\dots}{
Further arguments to be passed to \code{\link{lines}} to draw the ellipse/s.
}
}
\details{
This function uses all the standard \code{ProFit} conventions to define the input parameters
}
\value{
No value is returned, this function is run purely for the side effect of drawing an ellipse.
}
\author{
Aaron Robotham
}
\seealso{
\code{\link{profoundGetEllipsesPlot}}, \code{\link{profoundGetEllipses}}, \code{\link{profoundGetEllipse}}
}
\examples{
\dontrun{
image=readFITS(system.file("extdata", 'VIKING/mystery_VIKING_Z.fits', package="ProFound"))
profound=profoundProFound(image, magzero=30, verbose=TRUE, plot=TRUE)
profoundDrawEllipse(profound$segstats$xcen, profound$segstats$ycen,
profound$segstats$R100/0.339, profound$segstats$axrat, profound$segstats$ang,
col='white', lty=2)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\concept{ ellipse }% use one of RShowDoc("KEYWORDS")
|
572278c59f7e1002b8946c18aab7704e42f055f4 | f0b305ef32ab21d2ffa0b9aba3c86f56e91bd12b | /Environment_/Linear Regression/MLR_Assignment.R | eefd57aceac6b73a89d1e099a99a02bdc0db1011 | [] | no_license | manojnahak02/R-Irfan-Sir-Codes | f31c25140888c96453d652308271fff54611e036 | 8e7eae13fce0befb43142568ee04339268fc0c25 | refs/heads/master | 2022-11-24T21:31:46.879754 | 2020-07-29T17:03:54 | 2020-07-29T17:03:54 | 283,553,861 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,371 | r | MLR_Assignment.R | ## OBJECTIVE: To find out the whether there is a relation between PM10 and other pollutants.
# To select the file
data1<-read.csv(file.choose())
#To show the data
View(data1)
#To show the header of each column
names(data1)
#Check the data type for each column
str(data1)
#converting Zn to num as it's data type is int
data1$Zn<-as.numeric(data1$Zn)
str(data1)
#to check outlier for each variables
boxplot(data1$Pb)
boxplot(data1$Cd)
boxplot(data1$Cu)
boxplot(data1$Cr)
boxplot(data1$Zn)
boxplot(data1$NOx)
boxplot(data1$SO2)
boxplot(data1$PM10)
#treatment of outlier for PM
summary(data1$PM10)
upperPM<-89.82+1.5*IQR(data1$PM10);upperPM
data1$PM10[data1$PM10>upperPM]<-upperPM
boxplot(data1$PM10)
summary(data1$PM10)
#treatment of outlier for Pb
summary(data1$Pb)
upperPb<-0.96+1.5*IQR(data1$Pb);upperPb
data1$Pb[data1$Pb>upperPb]<-upperPb
boxplot(data1$Pb)
summary(data1$Pb)
#treatment of outlier for Cd
summary(data1$Cd)
upperCd<-0.00+1.5*IQR(data1$Cd);upperCd
data1$Cd[data1$Cd>upperCd]<-upperCd
boxplot(data1$Cd)
summary(data1$Cd)
#treatment of outlier for Cu
summary(data1$Cu)
upperCu<-0.53+1.5*IQR(data1$Cu);upperCu
data1$Cu[data1$Cu>upperCu]<-upperCu
boxplot(data1$Cu)
summary(data1$Cu)
#treatment of outlier for Cr
summary(data1$Cr)
upperCr<-0.58+1.5*IQR(data1$Cr);upperCr
data1$Cr[data1$Cr>upperCr]<-upperCr
boxplot(data1$Cr)
summary(data1$Cr)
#treatment of outlier for Nox
summary(data1$NOx)
upperNOx<-54.70+1.5*IQR(data1$NOx);upperNOx
data1$NOx[data1$NOx>upperNOx]<-upperNOx
boxplot(data1$NOx)
summary(data1$NOx)
#To check if the outlier can be removed with quantile method for NOx
outputQuantile<-quantile(data1$NOx,seq(0,1,by=0.05))
outputQuantile
cbind(outputQuantile)
qn = quantile(data1$NOx,c(0.01,0.99),na.rm = TRUE)
df = within(data1,{NOx = ifelse(NOx>qn[2],qn[2],NOx)})
summary(data1)
summary(df)
#As quantile method didn't worked we replace the outlier by mean
NoxMean=mean(data1$NOx)
NoxMean
for (i in 1:length(data1$NOx)) {
if(data1$NOx[i]=="98.14"){
data1$NOx[i] <- 42.50
}
}
summary(data1)
#As the outlier 98.14 value has been replaced by the mean of NOx, we would again run treatment for NOx outlier
upperNOx<-53.09+1.5*IQR(data1$NOx);upperNOx
data1$NOx[data1$NOx>upperNOx]<-upperNOx
boxplot(data1$NOx)
summary(data1$NOx)
#To remove Cd and Zn column as it has value 0 and wouldn't have any effect on data
data1$Cd<-NULL
data1$Zn<-NULL
#data partition
library(caret)
Train<-createDataPartition(data1$PM10,p=0.70,list=FALSE)
training<-data1[Train,]
testing<-data1[-Train,]
#check collinearity
library(corrplot)
corrplot(cor(training),method='number')
model<-lm(PM10~.,data=training)
summary(model)
#hist(training$PM10)
#transformation to have bell shaped curve as the graph was left skewed
#hist((1/training$PM10))
#hist(log(training$PM10))
library(lmtest)
library(car)
model2<-step(lm(log(PM10)~.,data = data1),direction = "both")
summary(model2)
vif(model2)
par(mfrow=c(2,2))
plot(model2)
dwtest(model2)
#library(car)
ncvTest(model2)
#Prediction
#predict as we have took log for charges so to do antilog ww use exp function - we always submit the origibnal value
#testing$fitted<-predict(model2,testing)
#testing$original<-exp(testing$fitted)
|
6c1669ad2c9ae69d5f59c3a785c1b9570c441d34 | 88a4f8854572e5482baf63c0b4bde90d2a7ec31b | /R/timelinejs.R | c13d5cf64dded922250b62ec859540bcff11c207 | [
"MIT"
] | permissive | jpmarindiaz/timelinejs | f999222d72fa58a76e51e70b1d9cfc4868f7a73c | c5e78b27e816f5e308431a960cd04a134ec5191d | refs/heads/master | 2021-01-15T12:02:20.902512 | 2017-11-01T23:19:45 | 2017-11-01T23:19:45 | 99,644,256 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,158 | r | timelinejs.R |
#' @export
timelinejs <- function(d, title = NULL, era = NULL, scale = "human",
opts = NULL, debug = FALSE,
width = NULL, height = NULL, ...) {
#opts <- parseOpts()
opts <- parseOpts(opts = opts, ...)
if(!scale %in% c("human","cosmological"))
stop("Scale must be human or cosmological")
data <- prepData(d)
#writeLines(jsonlite::toJSON(data,auto_unbox = TRUE, na = "null"), "inst/htmlwidgets/tmp.json")
# pass the data and settings using 'x'
x <- list(
data = data,
settings = opts,
debug = debug
)
if(debug){
message("X")
str(x)
}
attr(x, 'TOJSON_ARGS') <- list(auto_unbox = TRUE, na = "null")
htmlwidgets::createWidget("timelinejs", x, width = width, height = height)
}
#' @export
timelinejsOutput <- function(outputId, width = "100%", height = "500px") {
shinyWidgetOutput(outputId, "timelinejs", width, height, package = "timelinejs")
}
#' @export
renderTimelinejs <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
shinyRenderWidget(expr, timelinejsOutput, env, quoted = TRUE)
}
|
b72384230ba10e701d0a657c8aeb29884542928c | 8a9e282803403adaa8ec2a8eafd048ced37265ca | /11 - Support vector machine/shiny/ui.r | dc091dbcd794625794d02e5127669883fdbdee82 | [
"MIT"
] | permissive | shadowusr/ml-course | 481f350fa34881aac21ce9ea84d3a5bfb168824f | 5e336dc47ed9dff71877e830a4d67afd8a23a8a1 | refs/heads/master | 2023-02-03T02:28:50.792565 | 2020-12-21T16:43:00 | 2020-12-21T16:43:00 | 294,619,134 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,201 | r | ui.r | library(shiny)
library(matrixcalc)
library(plotly)
ui <- fluidPage(
titlePanel("SVM demo"),
sidebarLayout(
sidebarPanel(
h3("The first class specs"),
h6("Expected value, μ"),
fluidRow(
column(6,
numericInput("mu11", value = 1.825, label = "μ1")
),
column(6,
numericInput("mu12", value = 1.825, label = "μ2")
)
),
h6("Covariance matrix, Σ"),
fluidRow(
column(6,
numericInput("E11", value = 1.5, label = "Σ (1,1)")
),
column(6,
numericInput("E12", value = 0.25, label = "Σ (1,2)")
)
),
fluidRow(
column(6,
numericInput("E13", value = 0.25, label = "Σ (2,1)")
),
column(6,
numericInput("E14", value = 1.5, label = "Σ (2,2)")
)
),
fluidRow(
column(12,
numericInput("count1", value = 50, label = "Samples count")
)
),
textOutput("err"),
h3("The second class specs"),
h6("Expected value, μ"),
fluidRow(
column(6,
numericInput("mu21", value = 4.57, label = "μ1")
),
column(6,
numericInput("mu22", value = 4.57, label = "μ2")
)
),
h6("Covariance matrix, Σ"),
fluidRow(
column(6,
numericInput("E21", value = 2.36, label = "Σ (1,1)")
),
column(6,
numericInput("E22", value = 0.11, label = "Σ (1,2)")
)
),
fluidRow(
column(6,
numericInput("E23", value = 0.11, label = "Σ (2,1)")
),
column(6,
numericInput("E24", value = 2.36, label = "Σ (2,2)")
)
),
fluidRow(
column(12,
numericInput("count2", value = 50, label = "Samples count")
)
),
fluidRow(
column(12,
numericInput("C", value = 1, label = "C parameter")
)
)
),
mainPanel(
fluidRow(
column(6, plotOutput(outputId = "p1")),
column(6, plotOutput(outputId = "p2"))
)
)
)
) |
0dd23a20211a83f00b3b9238d013573a7be19d6e | 7aeb132d2cc105818a4437e1eaa2390d66582fa1 | /man/get_ha_number.Rd | 37880e9bb81ada83dda8508423d78c3d5413bf0a | [] | no_license | sokabayashi/nhlutils | d7d57ad1db14f01f631738b801c799cc84351ab3 | 9ab46a6ebb3b0bee711e53113184b08b4924d9d9 | refs/heads/master | 2021-03-24T13:12:33.880828 | 2016-09-07T15:38:23 | 2016-09-07T15:38:23 | 44,276,711 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 415 | rd | get_ha_number.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gameutils.R
\name{get_ha_number}
\alias{get_ha_number}
\title{Convert (ha, number) pair into ha_number}
\usage{
get_ha_number(ha, number)
}
\arguments{
\item{ha}{String "H" or "A"}
\item{number}{Jersey number}
}
\value{
String of form ha_number, unique to a player in a game.
}
\description{
Convert (ha, number) pair into ha_number
}
|
69d688033d0208a04e8e26685e24b53164d80840 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/SSBtools/examples/HierarchicalGroups.Rd.R | ba15fb3d19d63f8bf01b2824c93db672abda0ba4 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 300 | r | HierarchicalGroups.Rd.R | library(SSBtools)
### Name: HierarchicalGroups
### Title: Finding hierarchical variable groups
### Aliases: HierarchicalGroups
### ** Examples
x <- rep(c("A","B","C"),3)
y <- rep(c(11,22,11),3)
z <- c(1,1,1,2,2,2,3,3,3)
zy <- paste(z,y,sep="")
m <- cbind(x,y,z,zy)
HierarchicalGroups(m)
|
1a61563454695734de728258bd989b41269c8f47 | 20925a992f5e542366b049232bdea1aa7aca1112 | /man-roxygen/dat.R | def12f4d0bfcdee785ccae98e9fb1c5cffa045c9 | [] | no_license | r4atlantis/atlantisom | 608a42edcfa16a4e4ee1cbc449d12f26fd15421a | b39a88f268a76014de9844ae8fffa67876c218fd | refs/heads/master | 2023-06-25T13:57:15.235092 | 2022-06-30T22:30:05 | 2022-06-30T22:30:05 | 47,479,048 | 7 | 6 | null | 2021-04-29T14:03:50 | 2015-12-06T01:14:26 | R | UTF-8 | R | false | false | 359 | r | dat.R | #' @param dat A \code{data.frame} of numbers at age containing the following columns:
#' \itemize{
#' \item{species}
#' \item{agecl}
#' \item{polygon}
#' \item{layer}
#' \item{time}
#' \item{atoutput}
#' }
#' The \code{data.frame} is generated from either \code{\link{create_survey}}
#' or \code{\link{create_fishery_subset}}.
|
8033c1322c14d78d4372a74ac766a16935ad66e0 | 2a1a0925c7ea2826e5f4148e08ecc4775f57fbd6 | /example-2/hrs_wyppt_mm/calling-script.R | 8457db9bdd60fa7623f14f266aa9cf9bb7ada518 | [] | no_license | nationalparkservice/bayes-for-non-ignorable-designs | e87b5682c3189c9d308834f1d3931f4469c97484 | fec95df52b0cb9a55171c5bdfa85f586d2293091 | refs/heads/master | 2023-08-03T14:01:22.197469 | 2021-09-16T18:33:53 | 2021-09-16T18:33:53 | 404,710,509 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,514 | r | calling-script.R | library(tidyverse)
library(rjags)
library(coda)
load.module("dic")
source("utils.R")
args <- commandArgs(trailingOnly = TRUE)
frac_iter <- if (length(args) == 0) 1 else 1 / 4
# Set example / project paths and source required utilities.
ex_path <- get_ex_path()
output_path <- get_output_path(ex_path)
# Load the tabular data, just for reference.
d <- read_csv(file.path(ex_path, "00-input", "state-variable-data.csv"))
# Load the model description.
jags_model_file <- list.files(ex_path, "^model*.jags$", full.names = TRUE)
# Load the data list required by JAGS.
jags_data <- readRDS(file.path(ex_path, "00-input", "jags-data.rds"))
# Basic analysis breadcrumbs (likelihood, deterministic model, etc.).
jags_info <- readRDS(file.path(ex_path, "00-input", "jags-info.rds"))
jags_n_iters <- readRDS(file.path(ex_path, "00-input", "jags-n-iters.rds"))
eval_mean_for_tv_covariates <- # determines whether 'bumpy' plots are produced
readRDS(file.path(ex_path, "00-input", "eval-mean-for-tv-covariates.rds"))
# Load the 'inits' originally used to fit the model.
jags_inits <- readRDS(file.path(ex_path, "00-input", "jags-inits.rds"))
# Load the variables we're watching.
jags_vars <- readRDS(file.path(ex_path, "00-input", "jags-vars.rds"))
# coda_vars <- readRDS(file.path(ex_path, '00-input', 'coda-vars.rds'))
# head(jags_data$X)
# jags_data$X <- d %>%
# #bind_cols(read_csv(file.path(ex_path, 'new_X.csv'))) %>%
# select(stratum_id, any_of(dimnames(jags_data$X)[[2]])) %>%
# group_by(stratum_id) %>%
# mutate(fe_hrs = scale(fe_hrs)[,1], fe_WYppt_mm = scale(fe_WYppt_mm)[,1]) %>%
# ungroup() %>%
# select(fe_hrs, fe_WYppt_mm) %>%
# as.matrix()
scale_atts_df <- readRDS(file.path(ex_path, "00-input/covariate-moments.rds"))
stratum_lookup <- read_csv(file.path(ex_path, "00-input", "stratum-ids-and-indices.csv"))
X_raw <- d %>%
# bind_cols(read_csv(file.path(ex_path, 'new_X.csv'))) %>%
# select(stratum_id, any_of(dimnames(jags_data$X)[[2]])) %>%
transmute(stratum_id,
fe_WYppt_mm = fe_WYppt_mm *
scale_atts_df$`scaled:scale`[scale_atts_df$fe_column == "WYppt_mm"] +
scale_atts_df$`scaled:center`[scale_atts_df$fe_column == "WYppt_mm"],
fe_hrs = fe_hrs *
scale_atts_df$`scaled:scale`[scale_atts_df$fe_column == "hrs"] +
scale_atts_df$`scaled:center`[scale_atts_df$fe_column == "hrs"]
)
jags_data$X <- X_raw %>%
group_by(stratum_id) %>%
mutate(fe_hrs = scale(fe_hrs)[, 1], fe_WYppt_mm = scale(fe_WYppt_mm)[, 1]) %>% # fe_hrs = scale(fe_hrs)[,1],
ungroup() %>%
select(fe_hrs, fe_WYppt_mm) %>% # fe_hrs,
as.matrix()
X_raw_moments <- X_raw %>%
group_by(stratum_id) %>%
summarise(
mean_hrs = mean(fe_hrs), sd_hrs = sd(fe_hrs),
mean_WYppt_mm = mean(fe_WYppt_mm), sd_WYppt_mm = sd(fe_WYppt_mm),
.groups = "drop"
) %>%
left_join(stratum_lookup)
X_pred_raw <- as_tibble(jags_data$X.pred) %>%
transmute(
fe_WYppt_mm = fe_WYppt_mm *
scale_atts_df$`scaled:scale`[scale_atts_df$fe_column == "WYppt_mm"] +
scale_atts_df$`scaled:center`[scale_atts_df$fe_column == "WYppt_mm"],
fe_hrs = fe_hrs *
scale_atts_df$`scaled:scale`[scale_atts_df$fe_column == "hrs"] +
scale_atts_df$`scaled:center`[scale_atts_df$fe_column == "hrs"]
)
jags_data$X.pred <- X_pred_raw %>%
mutate(stratum_index = jags_data$k.pred) %>%
left_join(X_raw_moments) %>%
transmute(
fe_hrs = (fe_hrs - mean_hrs) / sd_hrs,
fe_WYppt_mm = (fe_WYppt_mm - mean_WYppt_mm) / sd_WYppt_mm
) %>%
as.matrix()
# Adapt and update.
jags_model <- jags.model(
file = jags_model_file,
data = jags_data,
# inits = jags_inits,
n.chains = 3,
n.adapt = floor(5000 * frac_iter) # jags_n_iters['n_adapt']
)
update(
object = jags_model,
n.iter = floor(5000 * frac_iter) # jags_n_iters['n_update']
)
# colMeans(jags_data$X)
# d %>% group_by(stratum_id) %>% summarise(mean_hrs = mean(fe_hrs), sd_hrs = sd(fe_hrs))
# ggplot(d) +
# facet_wrap(~stratum_id) +
# geom_vline(xintercept = 0, color = 'red') +
# geom_histogram(aes(x = fe_hrs, y = ..density..))
gc()
# Sample.
not_needed <- paste(
c(
"*.tilde", "j.hat.draw", "*.site", "sigma", "site.wt", "tau",
"i.pred", "theta", "epsilon"
),
collapse = "|"
)
z_jags <- jags.samples(
model = jags_model,
variable.names = c(
grep(not_needed, jags_vars, value = TRUE, invert = TRUE),
"pred.park.pr"
),
n.iter = floor(3000 * frac_iter) # jags_n_iters['n_iter']
)
save_object(z_jags, file.path(output_path, "99-misc"), "z-jags.rds")
# saveRDS(z_jags, file.path(output_path[2], 'z-jags.rds'))
# str(z_jags$Beta)
# which(dimnames(jags_data$X)[[2]] == "fe_hrs")
# hist(z_jags$Beta[1, 1, , ])
# hist(z_jags$Beta[2, 1, , ])
# hist(z_jags$Beta[3, 1, , ])
# hist(z_jags$Beta[4, 1, , ])
# z_coda <- coda.samples(
# model = jags_model,
# variable.names = coda_vars,
# n.iter = floor(3000 * frac_iter)#jags_n_iters['n_iter']
# )
#
# # Model checking and diagnostics.
# convergence_diagnostic <- gelman.diag(z_coda, multivariate = FALSE)
# bayesian_p <- sapply(c('p.mean', 'p.sd'), function(t_stat) {
# summary(z_jags[[t_stat]], mean)$stat
# }) # see also: summary(z_coda)
#
# # Posterior predictive loss, DIC, etc.
# L <- jags_info$likelihood
# post_pred_loss <- z_jags %>% get_ppl(d, L)
# DIC <- z_jags %>% get_dic(d, L, jags_data)
#
# Inference.
response_desc <- jags_info$description
get_park_scale_inference(z_jags, d, jags_data, output_path,
response_desc,
n_draws = 1000, seed = 123
)
get_trend_inference(z_jags, d, output_path, response_desc)
|
3c39e544a1cae0e37fc9ae06e4139bb7364c622d | 9d3fef10fd5845f31373ef19599f2a4498ac8837 | /R/easingr.R | 468964337a0c86d5f688688e9a10c2ad4e46a271 | [] | no_license | arturochian/easingr | 8dcdc301c70043cbb4edd93caf41864d1a93d074 | a46917a9c5a95951ba04f695a61fd6cf167030ef | refs/heads/master | 2020-12-24T14:10:29.145820 | 2014-11-05T01:07:36 | 2014-11-05T01:07:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,218 | r | easingr.R | #' @title Get credit easing data.
#' @description Downloads Cleveland FRB credit easing policy tool data.
#' @details Forms a query using the given \code{id} and submits the query to the data site, thereby downloading the requested data in CSV format. Transforms the CSV into a data frame, transforms the character date into \code{Date} objects, and then an \code{xts} object.
#' @return A list of class \code{easing} containing query items and \code{xts} result.
#' @param id the query type identifier code as specified by the FRB data site, default NA
#' @param startDate the desired start month and year number in MMYYYY format, default 012007
#' @param endDate the desired end month and year number in MMYYYY format, default 122100 for all available data
#' @return List of class type \code{easing} containing \code{xts} time history object \code{df}, query start date \code{startDate}, query stop date \code{stopDate}, plot colors array \code{colors}, plot main title \code{main}, and plot y-axis label \code{ylab}.
#' @export
#' @importFrom utils read.csv
#' @import xts
#' @references \url{http://www.clevelandfed.org/research/data/credit_easing/index.cfm}
#' @seealso getEasingSummary getEasingDetails getEasingLending getEasingCreditDepository getEasingCreditExtensions getEasingProvidingLiquidity getEasingMaidenLane getEasingTraditionalHoldings getEasingAgencyDebt getEasingLongTermPurchases
#' @note Meant for internal use by the other, more specific, query functions.
#' @examples
#' \dontrun{
#' getEasingData(id=1)
#' }
#'
getEasingData <- function(id=NA,startDate="012007",endDate="122100") {
stopifnot(is.na(id)==FALSE)
cfurl <- paste("http://www.clevelandfed.org/research/data/credit_easing/chart_csv.cfm?id="
,id,
"&start=",
startDate,
"&end=",
endDate,
sep='')
df <- read.csv(cfurl)
df <- df[,-ncol(df)]
colnames(df) <- gsub("\\."," ",colnames(df))
colnames(df)[1] <- "Date"
df$Date <- as.Date(df$Date,format="%m/%d/%y")
# convert to time series
dfxts <- xts(df[,-1],order.by=df[,1])
colnames(dfxts) <- colnames(df)[-1]
rv <- list(id=id,
df=dfxts,
startDate=startDate,
endDate=endDate,
colors=c(),
main="Easing Policy Tools",
ylab="(Millions of Dollars)")
class(rv) <- "easing"
rv
}
#' @title Get credit easing summary.
#' @description Downloads FRB credit easing policy summary data.
#' @details Downloads the Cleveland FRB data product for credit easing policy summary weekly time series, including columns for
#' \itemize{
#' \item traditional security holdings
#' \item long-term treasury purchases
#' \item lending to financial institutinos
#' \item liquidity to key credit markets
#' \item federal agency debt mortgage backed securities purchases
#' }
#' @param startDate query start date string in MMYYYY format, default 012007
#' @param endDate query end date string in MMYYYY format, default 122100
#' @return A list of class \code{easing}
#' @export
#' @seealso getEasingData getEasingDetails getEasingLending getEasingCreditDepository getEasingCreditExtensions getEasingProvidingLiquidity getEasingMaidenLane getEasingTraditionalHoldings getEasingAgencyDebt getEasingLongTermPurchases
#' @examples
#' \dontrun{
#' es <- getEasingSummary()
#' head(es$df)
#' }
getEasingSummary <- function(startDate="012007",endDate="122100") {
fedColors = c(
"#ce9257", # traditional security holdings
"#d9d28b", # long-term treasury purchases
"#9a9a9a", # lending to financial institutinos
"#397093", # liquidity to key credit markets
"#aa947c" # federal agency debt mortgage backed securities purchases
)
rv <- getEasingData(1,startDate,endDate)
rv$colors = fedColors
rv$main="Credit Easing Policy Tools Summary"
rv
}
#' @title Get credit easing details.
#' @description Downloads FRB credit easing policy details data.
#' @details Downloads the Cleveland FRB data product for credit easing policy detail weekly time series, including columns for
#' \itemize{
#' \item traditional security
#' \item securities lent to dealers
#' \item repurchase agreements
#' \item other fed assets
#' \item currency swaps
#' \item term auction credit
#' \item primary dealer
#' \item primary credit
#' \item secondary credit
#' \item seasonal credit
#' \item maiden lane 1
#' \item maiden lane 2
#' \item maiden lane 3
#' \item asset-backed commercial paper
#' \item net portfolio holdings commercial paper
#' \item other credit
#' \item credit to AIG
#' \item mortgage-backed securities
#' \item federal agency debt securities
#' \item term asset-backed securities
#' \item long-term treasury purchases
#' }
#' @param startDate query start date string in MMYYYY format, default 012007
#' @param endDate query end date string in MMYYYY format, default 122100
#' @return A list of class \code{easing}
#' @export
#' @seealso getEasingData getEasingSummary getEasingLending getEasingCreditDepository getEasingCreditExtensions getEasingProvidingLiquidity getEasingMaidenLane getEasingTraditionalHoldings getEasingAgencyDebt getEasingLongTermPurchases
#' @examples
#' \dontrun{
#' ed <- getEasingDetails()
#' head(ed$df)
#' }
getEasingDetails <- function(startDate="012007",endDate="122100") {
fedColors = c(
"#be7c45", # traditional security
"#d3c682", # securities lent to dealers
"#808080", # repurchase agreements
"#184f73", # other fed assets
"#917960", # currency swaps
"#537362", # term auction credit
"#163e5f", # primary dealer
"#72758b", # primary credit
"#527a9c", # secondary credit
"#529870", # seasonal credit
"#00317a", # maiden lane 1
"#c45b24", # maiden lane 2
"#d7a73a", # maiden lane 3
"#8a1e10", # asset-backed commercial paper
"#023360", # net portfolio holdings commercial paper
"#9c4019", # other credit
"#53601b", # credit to AIG
"#001c4c", # mortgage-backed securities
"#7d2563", # federal agency debt securities
"#002a8c", # term asset-backed securities
"#39872e" # long-term treasury purchases
)
rv <- getEasingData(2,startDate,endDate)
rv$colors = fedColors
rv$main="Credit Easing Policy Tools Detail"
rv
}
#' @title Get credit easing lending.
#' @description Downloads FRB credit easing policy lending data.
#' @details Downloads the Cleveland FRB data product for credit easing policy lending weekly time series, including columns for
#' \itemize{
#' \item repurchase agreements
#' \item credit to depository institutions
#' \item other fed assets
#' \item currency swaps
#' \item term auction credit
#' \item securities lent to dealers
#' \item credit extensions
#' }
#' @param startDate query start date string in MMYYYY format, default 012007
#' @param endDate query end date string in MMYYYY format, default 122100
#' @return A list of class \code{easing}
#' @export
#' @seealso getEasingData getEasingSummary getEasingDetails getEasingCreditDepository getEasingCreditExtensions getEasingProvidingLiquidity getEasingMaidenLane getEasingTraditionalHoldings getEasingAgencyDebt getEasingLongTermPurchases
#' @examples
#' \dontrun{
#' el <- getEasingLending()
#' head(el$df)
#' }
getEasingLending <- function(startDate="012007",endDate="122100") {
fedColors = c(
"#be7c45", # repurchase agreements
"#d3c682", # credit to depository institutions
"#808080", # other fed assets
"#184f73", # currency swaps
"#917960", # term auction credit
"#537362", # securities lent to dealers
"#163e5f" # credit extensions
)
rv <- getEasingData(3,startDate,endDate)
rv$colors = fedColors
rv$main="Lending to Financial Institutions"
rv
}
#' @title Get credit easing credit to depository institutions
#' @description Downloads FRB credit easing policy credit to depository institutions data.
#' @details Downloads the Cleveland FRB data product for credit easing policy credit to depository institutions weekly time series, including columns for
#' \itemize{
#' \item primary credit
#' \item secondary credit
#' \item seasonal credit
#' }
#' @param startDate query start date string in MMYYYY format, default 012007
#' @param endDate query end date string in MMYYYY format, default 122100
#' @return A list of class \code{easing}
#' @export
#' @seealso getEasingData getEasingSummary getEasingDetails getEasingLending getEasingCreditExtensions getEasingProvidingLiquidity getEasingMaidenLane getEasingTraditionalHoldings getEasingAgencyDebt getEasingLongTermPurchases
#' @examples
#' \dontrun{
#' cd <- getEasingCreditDepository()
#' head(cd$df)
#' }
getEasingCreditDepository <- function(startDate="012007",endDate="122100") {
fedColors = c(
"#d1955a", # primary credit
"#d3ca72", # secondary credit
"#808080" # seasonal credit
)
rv <- getEasingData(4,startDate,endDate)
rv$colors = fedColors
rv$main="Credit to Depository Institutions"
rv
}
#' @title Get credit easing credit extensions data
#' @description Downloads FRB credit easing policy credit extensions data.
#' @details Downloads the Cleveland FRB data product for credit easing policy credit extensions weekly time series, including columns for
#' \itemize{
#' \item primary/other broker dealer
#' \item credit to AIG
#' \item other credit
#' }
#' @param startDate query start date string in MMYYYY format, default 012007
#' @param endDate query end date string in MMYYYY format, default 122100
#' @return A list of class \code{easing}
#' @export
#' @seealso getEasingData getEasingSummary getEasingDetails getEasingLending getEasingCreditDepository getEasingProvidingLiquidity getEasingMaidenLane getEasingTraditionalHoldings getEasingAgencyDebt getEasingLongTermPurchases
#' @examples
#' \dontrun{
#' ce <- getEasingCreditExtensions()
#' head(ce$df)
#' }
getEasingCreditExtensions <- function(startDate="012007",endDate="122100") {
fedColors <- c(
"#d1955a", # primary/other broker dealer
"#d3ca72", # credit to AIG
"#808080" # other credit
)
rv <- getEasingData(5,startDate,endDate)
rv$colors = fedColors
rv$main="Credit Extensions"
rv
}
#' @title Get credit easing credit providing liquidity data
#' @description Downloads FRB credit easing policy tools providing liquidity to key credit markets data.
#' @details Downloads the Cleveland FRB data product for credit easing policy tools providing liquidity weekly time series, including columns for
#' \itemize{
#' \item Maiden Lane
#' \item asset-backed commercial paper
#' \item net portfolio holdings commercial paper
#' \item term asset-backed securities
#' }
#' @param startDate query start date string in MMYYYY format, default 012007
#' @param endDate query end date string in MMYYYY format, default 122100
#' @return A list of class \code{easing}
#' @export
#' @seealso getEasingData getEasingSummary getEasingDetails getEasingLending getEasingCreditDepository getEasingCredtExtensions getEasingMaidenLane getEasingTraditionalHoldings getEasingAgencyDebt getEasingLongTermPurchases
#' @examples
#' \dontrun{
#' pl <- getEasingProvidingLiquidity()
#' head(pl$df)
#' }
getEasingProvidingLiquidity <- function(startDate="012007",endDate="122100") {
fedColors = c(
"#c89365", # maiden lane
"#dcd19a", # asset-backed commercial paper
"#979797", # net portfolio holdings commercial paper
"#406f8c" # term asset-backed securities
)
rv <- getEasingData(6,startDate,endDate)
rv$colors = fedColors
rv$main="Providing Liquidity to Key Credit Markets"
rv
}
#' @title Get credit easing credit Maiden Lane data.
#' @description Downloads FRB credit easing policy tools Maiden Lane data.
#' @details Downloads the Cleveland FRB data product for credit easing policy tools Maiden Lane weekly time series, including columns for
#' \itemize{
#' \item Maiden Lane 1
#' \item Maiden Lane 2
#' \item Maiden Lane 3
#' }
#' @param startDate query start date string in MMYYYY format, default 012007
#' @param endDate query end date string in MMYYYY format, default 122100
#' @return A list of class \code{easing}
#' @export
#' @seealso getEasingData getEasingSummary getEasingDetails getEasingLending getEasingCreditDepository getEasingCredtExtensions getEasingProvidingLiquidity getEasingTraditionalHoldings getEasingAgencyDebt getEasingLongTermPurchases
#' @examples
#' \dontrun{
#' ml <- getEasingMaidenLane()
#' head(ml$df)
#' }
getEasingMaidenLane <- function(startDate="012007",endDate="122100") {
fedColors <- c(
"#d1955a", # maiden lane 1
"#d3ca72", # maiden lane 2
"#808080" # maiden lane 3
)
rv <- getEasingData(7,startDate,endDate)
rv$colors = fedColors
rv$main="Maiden Lane"
rv
}
#' @title Get credit easing credit traditional security holdings data.
#' @description Downloads FRB credit easing policy tools traditional security holdings data.
#' @details Downloads the Cleveland FRB data product for credit easing policy tools traditional security holdings weekly time series, including columns for
#' \itemize{
#' \item traditional security holdings
#' }
#' @param startDate query start date string in MMYYYY format, default 012007
#' @param endDate query end date string in MMYYYY format, default 122100
#' @return A list of class \code{easing}
#' @export
#' @seealso getEasingData getEasingSummary getEasingDetails getEasingLending getEasingCreditDepository getEasingCredtExtensions getEasingProvidingLiquidity getEasingMaidenLane getEasingAgencyDebt getEasingLongTermPurchases
#' @examples
#' \dontrun{
#' th <- getEasingTraditionalHoldings()
#' head(th$df)
#' }
getEasingTraditionalHoldings <- function(startDate="012007",endDate="122100") {
fedColors = c(
"#cb9668" # treasury holdings
)
rv <- getEasingData(8,startDate,endDate)
rv$colors = fedColors
rv$main="Traditional Security Holdings"
rv
}
#' @title Get credit easing credit agency debt data.
#' @description Downloads FRB credit easing policy tools federal agency debt and mortgage-backed securities data.
#' @details Downloads the Cleveland FRB data product for credit easing policy tools federal agency debt and mortgage-backed securities weekly time series, including columns for
#' \itemize{
#' \item federal agency debt
#' \item mortgage-backed securities
#' }
#' @param startDate query start date string in MMYYYY format, default 012007
#' @param endDate query end date string in MMYYYY format, default 122100
#' @return A list of class \code{easing}
#' @export
#' @seealso getEasingData getEasingSummary getEasingDetails getEasingLending getEasingCreditDepository getEasingCredtExtensions getEasingProvidingLiquidity getEasingMaidenLane getEasingTraditionalHoldings getEasingLongTermPurchases
#' @examples
#' \dontrun{
#' ad <- getEasingAgencyDebt()
#' head(ad$df)
#' }
getEasingAgencyDebt <- function(startDate="012007",endDate="122100") {
fedColors = c(
"#cb9668", # federal agency debt
"#dcd19a" # mortgage-backed securities
)
rv <- getEasingData(9,startDate,endDate)
rv$colors = fedColors
rv$main="Federal Agency Debt and Mortgage-Backed Securities"
rv
}
#' @title Get credit easing long-term treasury purchases data.
#' @description Downloads FRB credit easing policy tools long-term treasury purchases data.
#' @details Downloads the Cleveland FRB data product for credit easing policy tools long-term treasury purchases weekly time series, including columns for
#' \itemize{
#' \item treasury purchases
#' }
#' @param startDate query start date string in MMYYYY format, default 012007
#' @param endDate query end date string in MMYYYY format, default 122100
#' @return A list of class \code{easing}
#' @export
#' @seealso getEasingData getEasingSummary getEasingDetails getEasingLending getEasingCreditDepository getEasingCredtExtensions getEasingProvidingLiquidity getEasingMaidenLane getEasingTraditionalHoldings getEasingAgencyDebt
#' @examples
#' \dontrun{
#' lt <- getEasingLongTermPurchases()
#' head(lt$df)
#' }
getEasingLongTermPurchases <- function(startDate="012007",endDate="122100") {
fedColors = c(
"#cb9668" # treasury purchases
)
rv <- getEasingData(30,startDate,endDate)
rv$colors = fedColors
rv$main="Long-Term Treasury Purchases"
rv
}
|
73cbce534d05164944b47486d0099198a04b1fdb | 3185e97bf12b55436851d0a1ac141e249907d6cf | /Plot4.R | 90b6d506f1eb12b8272d0d7a18477075c9d4ac12 | [] | no_license | SamyakShah21/Exploratory-Data-Analysis-Week-4-Project | b72e2057b396ec97bb551b7dbb1720ee1361e7f8 | a57d729d9338a589fbcf49200ecfec5a27adf61d | refs/heads/master | 2022-06-10T16:39:40.387116 | 2020-05-08T19:48:44 | 2020-05-08T19:48:44 | 262,411,236 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 808 | r | Plot4.R | #It is assumed that the file has been dwnloaded and unzipped.
#The path for the directory may vary.
setwd("C:/Users/Samyak/Desktop/Academics/Coursera/Data_Science_JHU_4/Week_4")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
library(ggplot2)
library(dplyr)
png(filename = "plot4.png")
coal_SCC <- SCC[grep("[Cc][Oo][Aa][Ll]", SCC$EI.Sector), "SCC"]
coal_NEI <- filter(NEI,SCC %in% coal_SCC)
coal_summary <- coal_NEI %>% group_by(year) %>% summarise(Emissions = sum(Emissions))
plot_4 <- ggplot(coal_summary, aes(x=year, y=round(Emissions/1000,2), fill=year)) +
ylab(expression('PM'[2.5]*' Emissions in Kilotons')) + xlab("Year") +
ggtitle("Coal Combustion Emissions, 1999 to 2008.")+
geom_bar(stat="identity")
print(plot_4)
dev.off() |
cbe3c08f0a805c4b4f7979f5f10e35b2316b15e8 | 60f7c788937e67e94fa86f0f9bbdaac3f2d7e2c4 | /Scripts/01_Fn_SimPropModel.R | dfad2934af996f9a691f4be24ad622591f22ef8f | [] | no_license | AMDraghici/correlation_within_pair_bonds_CJS | 3a812429105db0ee25e342392338d67cdf4a2683 | d6a2acf2577667fc9c1f580cb2e2fc76c0cadbea | refs/heads/main | 2023-04-19T05:32:07.224213 | 2022-05-01T06:09:39 | 2022-05-01T06:09:39 | 328,504,069 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,214 | r | 01_Fn_SimPropModel.R | #############################################################################################################
### This script contains functions used to simulate a single dataset from the proposed model of the manuscript
###
### NOTE: Code should not be run directly in here. Source() in this script using
### the code from 11_RunSimulationStudy.R or 12_RunCompareChatEstimators.R
### as if you would do so with an R Package.
#############################################################################################################
#Extract joint binomial parameters
compute_jbin_cjs <- function(prob.f,prob.m){
#Compute Components
prob.prod <- prob.m * prob.f
sig.prob.f <- sqrt(prob.f*(1-prob.f))
sig.prob.m <- sqrt(prob.m*(1-prob.m))
sig.prod <- sig.prob.f*sig.prob.m
##Upper and Lower bounds for cov of joint binomial
lub <- pmin(1,(prob.f-prob.prod)/(sig.prod),(prob.m-prob.prod)/(sig.prod))
glb <- pmax(-1,(-prob.prod)/(sig.prod),(prob.f+prob.m - prob.prod - 1)/(sig.prod))
#Return values
return(list(prob.prod,sig.prob.f,sig.prob.m,sig.prod,glb,lub))
}
#Generate Derived Survival Probabilities
param_surv_cjs_dgr <- function(prob.f,prob.m,cor){
#Extract Parameters
parameters <- compute_jbin_cjs(prob.f,prob.m)
cor_upper_bound <- parameters[[6]]
cor_lower_bound <- parameters[[5]]
sig.prob.f <- parameters[[2]]
sig.prob.m <- parameters[[3]]
###Joint Probability Distn for survivorship
prob.mf <- cor * sig.prob.m * sig.prob.f + (prob.f*prob.m)
prob.f0 <- prob.f - prob.mf
prob.m0 <- prob.m - prob.mf
prob.00 <- 1 - prob.f0 - prob.m0 - prob.mf
#List of parameters
param <- list(prob.mf,prob.f0,prob.m0,prob.00)
#Return values
return(param)
}
#Survival Function
survival_cjs_dgr <- function(previous_state,mated,j,phi.m,phi.f,phi.00,phi.f0,phi.m0,phi.mf){
if(previous_state == 3){
next_state <-
which(rmultinom(c(1,2,3,4),1,
c((1-phi.m[j]),0,phi.m[j],0)) == 1)
} else if(previous_state == 2){
next_state <-
which(rmultinom(c(1,2,3,4),1,
c((1-phi.f[j]),phi.f[j],0,0)) == 1)
} else if(previous_state == 4) {
if(mated == 1){
next_state <-
which(rmultinom(c(1,2,3,4),1,
c((1-phi.m[j])*(1-phi.f[j]),(1-phi.m[j])*phi.f[j],phi.m[j]*(1-phi.f[j]),phi.m[j]*phi.f[j])) == 1)
} else if(mated ==2){
next_state <-
which(rmultinom(c(1,2,3,4),1,
c(phi.00[j],phi.f0[j],phi.m0[j],phi.mf[j])) == 1)
}
} else {
next_state <- 1
}
return(next_state)
}
#Recapture Function
recapture_cjs_dgr <- function(current_state,mated,j,p.m,p.f,p.00,p.f0,p.m0,p.mf){
if(current_state == 3){
obs <-
which(rmultinom(c(1,2,3,4),1,
c((1-p.m[j]),0,p.m[j],0)) == 1)
} else if(current_state == 2){
obs <-
which(rmultinom(c(1,2,3,4),1,
c((1-p.f[j]),p.f[j],0,0)) == 1)
} else if(current_state == 4) {
if(mated==1){
obs <-
which(rmultinom(c(1,2,3,4),1,
c((1-p.m[j])*(1-p.f[j]),(1-p.m[j])*p.f[j],p.m[j]*(1-p.f[j]),p.m[j]*p.f[j])) == 1)
} else if(mated==2){
obs <-
which(rmultinom(c(1,2,3,4),1,
c(p.00[j],p.f0[j],p.m0[j],p.mf[j])) == 1)
}
} else {
obs <- 1
}
return(obs)
}
#Divorce function
divorce_dgr <- function(previous_state,j,delta){
if(previous_state == 4){
mated <- rbinom(1,1,delta[j]) + 1
} else {
mated <- 1
}
return(mated)
}
init_pop_cjs_dgr <- function(n,prob.female,prob.partner=1,k){
#Construct Data Frame with animals, genders, and initial entry
population <- data_frame(animal_id = 1:n,
sex = ifelse(rbinom(n,1,prob.female)==1,"F","M"), #c(rep("F",n/2),rep("M",n/2)),
partner = ifelse(rbinom(n,1,prob.partner)==1,T,F))
##Split by mated animals and single animals
mated_animals <- population %>% filter(partner==T)
single_animals <- population %>% filter(partner==F)
###Assign single individual to female column or male column (indexed by postion in 1:n)
##0 implies that there is no mate (eg. Male - 177 and Female - 0 implies a single male)
single_animals <- data.frame(female = ifelse(single_animals$sex == "F",
single_animals$animal_id,0),
male = ifelse(single_animals$sex == "M",
single_animals$animal_id,0)
)
###Divide mated tables by gender
mated_females <- mated_animals %>% filter(sex == "F")
mated_males <- mated_animals %>% filter(sex == "M")
#Assign initial pairings
pairs <- mated_males %>%
mutate(partner_id = sample(c(mated_females$animal_id),replace=F)[1:nrow(mated_males)]) %>%
rename("male" = animal_id,"female"=partner_id) %>% dplyr::select("female","male") %>%
mutate(female = ifelse(is.na(female),0,female))
#Set up data frame containing all relevant information
entities <- bind_rows(pairs,single_animals,
right_join(filter(pairs,female>0),mated_females,by=c("female"="animal_id")) %>%
filter(is.na(male)) %>%
select(female,male) %>% mutate(male = replace_na(male,0))) %>%
mutate(ID=1:n(),initial_entry=1)#as.integer(sample(c(1:round(k/2)),size=n(),replace=T)))
entities2 <- merge(entities$ID,1:k) %>%
rename(ID=x,Time=y) %>%
inner_join(entities,by="ID") %>%
arrange(ID,Time) %>%
mutate(mated = ifelse(initial_entry==Time & pmin(female,male) != 0,2,ifelse(pmin(female,male)==0,1,NA)),
SV = ifelse(initial_entry==Time,ifelse(female==0,3,ifelse(male==0,2,4)),NA),
RC = ifelse(initial_entry==Time,ifelse(female==0,3,ifelse(male==0,2,4)),NA),
a = ifelse(initial_entry==Time,ifelse(female==0,3,ifelse(male==0,2,4)),NA),
d=mated)
return(entities2)
}
sim_mated_cjs_dgr <- function(n,k,prob.female,prob.partner=1,phi.f,phi.m,gamma,p.f,p.m,rho,delta){
#Generate Data Template
entities <- init_pop_cjs_dgr(n,prob.female,prob.partner,k)
#Probabilities
s.param <- param_surv_cjs_dgr(phi.f,phi.m,gamma)
r.param <- param_surv_cjs_dgr(p.f,p.m,rho)
#Unpack survival probs
phi.mf <- s.param[[1]]
phi.f0 <- s.param[[2]]
phi.m0 <- s.param[[3]]
phi.00 <- s.param[[4]]
#Unpack recapture Probs
p.mf <- r.param[[1]]
p.f0 <- r.param[[2]]
p.m0 <- r.param[[3]]
p.00 <- r.param[[4]]
#Assign values sequentially
for(i in unique(entities$ID)){
init <- filter(entities,ID==i) %>% dplyr::select(initial_entry) %>% unique() %>% t() %>% as.vector()
#Loop through unknown states/observations
for(j in (init):k){
if(j == init){
next
} else {
#animal_id
female <- entities[(i - 1) * k + j,3]
male <- entities[(i - 1) * k + j,4]
#Previous survival state
previous_state <- entities[(i - 1) * k + j - 1,7]
#Simulated Mated
entities[(i - 1) * k + j,6] <- mated <- divorce_dgr(previous_state,j,delta)
#Survival
entities[(i - 1) * k + j,7] <- current_state <- survival_cjs_dgr(previous_state,mated,j,phi.m,phi.f,phi.00,phi.f0,phi.m0,phi.mf)
#Recapture
entities[(i - 1) * k + j,8] <- current_observation <- recapture_cjs_dgr(current_state,mated,j,p.m,p.f,p.00,p.f0,p.m0,p.mf)
#Confounded Survival
entities[(i - 1) * k + j,9] <- ifelse(current_observation==1,NA,current_observation)
#Add confounded mated
entities[(i - 1) * k + j,10] <- ifelse(current_observation==4|female==0|male==0,mated,NA)
rm(female,male,mated,current_state,current_observation)
}
}
}
#Confounded survival states at time i
state_confounded <- entities %>% filter(ID == i) %>% dplyr::select(a) %>% t() %>% as.vector()
#Positions
last.female.seen <- ifelse(abs(max(which(state_confounded %in%
c(2))))==Inf,0,max(which(state_confounded %in% c(2))))
last.male.seen <- ifelse(abs(max(which(state_confounded %in%
c(3))))==Inf,0,max(which(state_confounded %in% c(3))))
last.both.seen <- ifelse(abs(max(which(state_confounded %in%
c(4))))==Inf,0,max(which(state_confounded %in% c(4))))
last.seen <- max(last.both.seen,last.male.seen,last.female.seen)
#Update Confounded survival Information
for(l in init:last.seen){
if(l == init){
next
} else {
state_confounded[l] <- ifelse(last.both.seen>=l|(last.male.seen>=l&last.female.seen>=l),4,
ifelse(last.male.seen>=l,3,ifelse(last.female.seen>=l,2,NA)))
entities[(i - 1) * k + l,9] <- state_confounded[l]
}
}
return(entities)
}
#Format MRC data into std cjs model
extract_cjs_dgr <- function(Data){
#Split data apart for generic CJS model
females.seperate <- Data %>%
select(Time,female,initial_entry,RC,a) %>%
filter(female > 0) %>%
mutate(RC = ifelse(RC ==1,0,ifelse(RC==2|RC==4,1,0)),
a = ifelse(a==4|a==2,1,ifelse(a==1|a==3,0,NA)),
gender = "F") %>%
rename(ID = female)
males.seperate <- Data %>%
select(Time,male,initial_entry,RC,a) %>%
filter(male > 0) %>%
mutate(RC = ifelse(RC ==1,0,ifelse(RC==3|RC==4,1,0)),
a = ifelse(a==4|a==3,1,ifelse(a==1|a==2,0,NA)),
gender = "M") %>%
rename(ID = male)
#Table of individual results
split.results <- bind_rows(females.seperate,males.seperate)
#Set up results in list format
a <- split.results %>% select(ID,Time,a) %>% tidyr::spread(Time,a) %>% select(-ID)
x <- split.results %>% select(ID,Time,RC) %>% tidyr::spread(Time,RC) %>% select(-ID)
first <- split.results %>%
select(ID,initial_entry) %>% distinct() %>%
arrange(ID) %>% select(-ID) %>% t() %>% as.vector()
sex <- split.results %>%
select(ID,gender) %>% distinct() %>%
arrange(ID) %>% select(-ID) %>% t() %>% as.vector()
k <- ncol(a)
n <- nrow(a)
# Prior generating function
PGF <- function(n) {
phi <- rbeta(n, 40, 10)
p <- rbeta(n, 40, 10)
return(list("phi" = phi, "p" = p))
}
DGF <- function(n, phi, p, log = TRUE){
dbeta(phi, 40, 10, log = log) +
dbeta(p, 40, 10, log = log)
}
# Param Names
param.names <- c("Phi", "P")
#Results
cjs_dat <- list(
"PGF" = PGF,
"DGF" = DGF,
"k" = k,
"n" = n,
"a" = a,
"x" = x,
"first" = first,
"param" = param.names,
"sex" = sex
)
return(cjs_dat)
}
compile_cjs_dgr <- function(parameter_list,raw= TRUE){
#Parameters
n <- parameter_list[["n"]] ##Sample Size
k <- parameter_list[["k"]] ##Sampling Occasions
delta <- parameter_list[["delta"]] #Breeding Probability
phi.f <- parameter_list[["phi.f"]] #Marginal Female Survival from j to j+1
phi.m <- parameter_list[["phi.m"]] #Marginal Male Survival from j to j+1
gamma <- parameter_list[["gamma"]] #Correlation between males and female survival
p.f <- parameter_list[["p.f"]] #Marginal Female Recapture at j
p.m <- parameter_list[["p.m"]] #Marginal Male Recapture at j
rho <- parameter_list[["rho"]] #Correlation between Female and Male Recapture
prob.female <- parameter_list[["prob.female"]] #Proportion of females in the population
#Simulate Results
sim <- sim_mated_cjs_dgr(n,k,prob.female,prob.partner=1,phi.f,phi.m,gamma,p.f,p.m,rho,delta)
#Filter Down to Raw CJS data
cjs_dat <- extract_cjs_dgr(sim)
#Add Survival/Recapture Probabilities
cjs_dat$phi.m <- phi.m
cjs_dat$p.m <- p.m
cjs_dat$phi.f <- phi.f
cjs_dat$p.f <- p.f
#Which Data do you want?
if(raw == TRUE){
return(cjs_dat)
} else {
return(sim)
}
}
|
5e2eeb5ee2c97d3faaa6649fa18927f32d26cce3 | ac743b1fa58c9c5030efd800e36c1932fd85362f | /R/save_tmaps.R | 5d3926db3443dfc6dd7a7392fd7c80b14de99cfa | [] | no_license | DanOvando/demons | 1a76e8e37c65d9e1a62d23fe56679ebd12e9d8d7 | 448e5db5d4dcad337d8e43352814840c946789e9 | refs/heads/master | 2020-06-13T21:23:39.763298 | 2018-10-23T18:10:37 | 2018-10-23T18:10:37 | 83,343,896 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,023 | r | save_tmaps.R | #' save_tmaps saves all plots in the environment tagged
#' with the appropriate variable name, such as '_plot'
#'
#' @param plot_dir the directory to save plots
#' @param extension the extension to save the plots as
#' @param plot_tag the tag to identify what's a plot
#' @param fig_width the height of the figure in inches
#' @param fig_height the width of the figure in inches
#'
#' @return saved plots
#' @export
#'
#' @examples save_tmaps(plot_dir = 'results/')
save_tmaps <- function(plot_dir = '.', extension = '.pdf',
plot_tag = '_map', fig_width = 6,
fig_height = 4){
plot_list <- names(.GlobalEnv)[stringr::str_detect(names(.GlobalEnv),paste0(plot_tag,'$'))]
for (i in seq_along(plot_list)) {
eval(parse(text = paste('this_graph =', plot_list[i], sep = '')))
tmap::save_tmap(tm = this_graph,
filename = paste(plot_dir, '/', plot_list[i], extension, sep = ''),
height = fig_height,
width = fig_width
)
}
}
|
41f27ef547771476d6deef490e8024bed26a2aab | 28c837b92420a08ef0c6adcb961704094257f61f | /replicationCode/13-generateComparativeExample.R | b04960334e4e0b19b8f001f24bc3c776561f7585 | [] | no_license | theo-s/ridgeEvaluation | 07d50bbfee1463cd7c2be28064a5b03297d3f89d | b3d9447b69afad79188a76a2f2aee581d1446a6b | refs/heads/master | 2021-06-10T21:10:37.319036 | 2021-05-05T21:03:00 | 2021-05-05T21:03:00 | 149,820,691 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,648 | r | 13-generateComparativeExample.R | # A Simple example illustrating the flexibility of Linear Random Forests
setwd("~/Dropbox/ridgeEvaluationCode")
set.seed(4335)
library(forestry)
library(grf)
library(ggplot2)
library(dplyr)
library(reshape)
n <- 100
x <- runif(n,0,2 )
noise_less_y <- ifelse(x > 1, -2*x+ 4, 2*x )
y <- noise_less_y + rnorm(n, sd = .3)
df <- data.frame(x,y)
# Locally linear data set ------------------------------------------------------
#plot <- ggplot(data = df, aes(x = x, y = y)) + geom_point(shape = 1) + theme_classic()
# Standard CART tree -----------------------------------------------------------
rf <- forestry(x = x,
y = y,
nodesizeStrictSpl = 5,
ntree = 1)
rf_out <- predict(rf, x)
rf_df <- data.frame(x, rf_out)
#ggplot(data = rf_df, aes(x = x, y = rf_out)) + geom_point(shape = 1) + theme_classic()
# Linear CART tree -------------------------------------------------------------
lrf <- forestry(x = x,
y = y,
minSplitGain = .1,
nodesizeStrictSpl = 5,
ntree = 1,
linear = TRUE,
overfitPenalty = .01)
lrf_out <- predict(lrf, x, aggregation = "coefs")
lrf_df <- data.frame(x, lrf_out$predictions)
#ggplot(data = lrf_df, aes(x = x, y = lrf_out.predictions)) + geom_point(shape = 1) + theme_classic()
# Local Linear Forest tree -----------------------------------------------------
llf <- ll_regression_forest(X = as.matrix(x),
Y = y,
num.trees = 1,
enable.ll.split = TRUE)
llf_out <- predict(llf, as.data.frame(x))$predictions
llf_df <- data.frame(x, llf_out)
#ggplot(data = llf_df, aes(x = x, y = llf_out)) + geom_point(shape = 1, color = "red") + geom_line() + theme_classic()
# Retrieve Split points and coefficients ---------------------------------------
plot <- data.frame(
x = x,
Truth = noise_less_y,
CART = rf_out,
LocalLinearForest = llf_out,
RidgeRF = lrf_out$predictions
) %>% melt(id = "x") %>%
dplyr::rename(Estimator = variable, Truth = value) %>%
mutate(Estimator = as.character(Estimator)) %>%
ggplot(aes(x = x, y = Truth, color = Estimator, linetype = Estimator,
size = Estimator)) +
geom_line() +
scale_linetype_manual(values = c("Truth" = "dotted",
"LocalLinearForest" = "solid",
"CART" = "solid",
"RidgeRF" = "solid")) +
scale_size_manual(values = c("Truth" = .5,
"LocalLinearForest" = .7,
"CART" = .7,
"RidgeRF" = .7)) +
scale_color_manual(values = c("Truth" = "black",
"LocalLinearForest" = "green",
"CART" = "red",
"RidgeRF" = "blue")) +
geom_point(aes(x = x, y = y), data = data.frame(x = x, y = y),
inherit.aes = FALSE, size = 0.5, alpha = .8)
plot + theme_classic() +
scale_x_continuous(breaks = seq(0, 2, length.out = 5) + 1) +
coord_cartesian(ylim = c(-1, 3), xlim = c(0, 2.1)) + ggtitle(label = "CART and Linear Aggregation Comparison")
# We can compare the complexity of the trees -----------------------------------
plot(rf, tree.id = 1)
plot(lrf, tree.id = 1)
# A second example with added smoothness + differential in local noise levels due to axial noise
n <- 300
x <- runif(n,0,6)
noise_less_y <- ifelse(x > 2,
-sin(.8*(x-1)) + sin(1),
ifelse(x > 1,
-2*x+ 4,
2*x ))
plot(x, noise_less_y)
y <- noise_less_y + rnorm(n, sd = .15)
plot(x, y)
df <- data.frame(x,y)
# Standard CART tree -----------------------------------------------------------
rf <- forestry(x = x,
y = y,
nodesizeStrictSpl = 5,
ntree = 1)
rf_out <- predict(rf, x)
rf_df <- data.frame(x, rf_out)
# Linear CART tree -------------------------------------------------------------
lrf <- forestry(x = x,
y = y,
minSplitGain = .1,
nodesizeStrictSpl = 10,
ntree = 1,
linear = TRUE,
overfitPenalty = .01)
lrf_out <- predict(lrf, x, aggregation = "coefs")
lrf_df <- data.frame(x, lrf_out$predictions)
# Local Linear Forest tree -----------------------------------------------------
llf <- ll_regression_forest(X = as.matrix(x),
Y = y,
num.trees = 1,
enable.ll.split = TRUE)
llf_out <- predict(llf, as.data.frame(x))$predictions
llf_df <- data.frame(x, llf_out)
# Retrieve Split points and coefficients ---------------------------------------
plot <- data.frame(
x = x,
Truth = noise_less_y,
CART = rf_out,
LocalLinearForest = llf_out,
RidgeRF = lrf_out$predictions
) %>% melt(id = "x") %>%
dplyr::rename(Estimator = variable, Truth = value) %>%
mutate(Estimator = as.character(Estimator)) %>%
ggplot(aes(x = x, y = Truth, color = Estimator, linetype = Estimator,
size = Estimator)) +
geom_line() +
scale_linetype_manual(values = c("Truth" = "dotted",
"LocalLinearForest" = "solid",
"CART" = "solid",
"RidgeRF" = "solid")) +
scale_size_manual(values = c("Truth" = .5,
"LocalLinearForest" = .7,
"CART" = .7,
"RidgeRF" = .7)) +
scale_color_manual(values = c("Truth" = "black",
"LocalLinearForest" = "green",
"CART" = "red",
"RidgeRF" = "blue")) +
geom_point(aes(x = x, y = y), data = data.frame(x = x, y = y),
inherit.aes = FALSE, size = 0.5, alpha = .8)
plot + theme_classic() +
scale_x_continuous(breaks = seq(0, 6, length.out = 5) + 1) +
coord_cartesian(ylim = c(-1, 3), xlim = c(0, 6.1)) + ggtitle(label = "CART and Linear Aggregation Comparison #2")
# We can compare the complexity of the trees -----------------------------------
plot(rf, tree.id = 1)
plot(lrf, tree.id = 1)
# Simple V example ----------------------------------------------------------------
set.seed(4335)
n <- 500
p <- 10
X <- matrix(rnorm(n*p), n, p)
Y <- ifelse(X[,1] > 0, 3 * X[,1], - 3 * X[,1])
# GRF Local Linear Forest ------------------------------------------------------
grf_linear_forest <- ll_regression_forest(
X,
Y,
mtry = 10,
num.trees = 1,
enable.ll.split = TRUE,
ci.group.size = 1,
min.node.size = 100)
pred_grf <- predict(grf_linear_forest, X)$predictions
# Forestry Linear Random Forest ---------------------------------------------------------
forestry_linear_rf <- forestry(
X,
Y,
mtry = 10,
ntree = 1,
nodesizeStrictSpl = 100,
linear = TRUE
)
pred_forestry <- predict(forestry_linear_rf, X)
# Plot data --------------------------------------------------------------------
data.frame(
x = X[,1],
Signal = Y,
GRF = pred_grf,
Linear_forestry = pred_forestry
) %>% melt(id = "x") %>%
ggplot(aes(x = x, y = value, color = variable)) +
geom_line() +
scale_color_manual(values = c("Signal" = "blue",
"Linear_forestry" = "green",
"GRF" = "red")) +
theme_bw()
ggsave("~/Downloads/lrf_grf_comparison.pdf", width = 8, height = 8)
|
7c643d60012411d6d88cb57c3bd25f2df18f197e | d9c9a98ebac494fbddea79be4996b89a6d5f86e2 | /functions_sampling.R | 285a7be9dac151fdc6007b4f27b58cf4b53dc051 | [] | no_license | cbarbu/SynWood | 9ec37be85a269cb507dd5a67137723b8a6e0e9bc | 706f44d122ed6e689bf812fba56f0a9b7d3f1378 | refs/heads/master | 2021-01-16T21:22:23.649273 | 2012-10-19T17:31:18 | 2012-10-19T17:31:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,161 | r | functions_sampling.R |
#============================
# Perso MH
#===========================
# function sampling a parameter fed to Model
normSample<-function(Model,Data,oldTheta,nameParam,sdprop){
# identify param to sample
names(oldTheta)<-Data$parm.names
old<-oldTheta[nameParam]
# cat("parm:",nameParam,"old:",old,"oldTheta:",oldTheta,"\n")
# sample proposal
prop<-rnorm(1,mean=old,sd=sdprop);
# include proposal in theta
propTheta<-oldTheta
attributes(propTheta)<-NULL # important to avoid growing thetas
names(propTheta)<-Data$parm.names
propTheta[nameParam]<-prop
# get LLH for proposal
outModel<-Model(propTheta,Data)
LLHprop<-outModel$LP
LLHold<-attributes(oldTheta)$outModel$LP
# accept/reject
# always 0 for symmetric distribution, only for record
hasting_term<-dnorm(old,prop,sdprop,log=TRUE)-dnorm(prop,old,sdprop,log=TRUE);
lnr <- LLHprop-LLHold+hasting_term;
# cat("otheta:",oldTheta,"ptheta",propTheta,"lnr:",lnr,"(",LLHprop,"-",LLHold,"+",hasting_term);
if(lnr>=log(runif(1))) {
newTheta <- propTheta;
attributes(newTheta)$new<-TRUE
attributes(newTheta)$outModel<-outModel
# cat(nameParam," accept 1\n");
}else{
newTheta<-oldTheta
attributes(newTheta)$new<-FALSE
# cat(nameParam," accept 0\n");
}
return(newTheta)
}
# generic function for sampling, see test-functions_sampling.R for use
omniSample<-function(Model,Data,oldTheta,nameParam,sdprop){
# identify param to sample
names(oldTheta)<-Data$parm.names
old<-oldTheta[nameParam]
# cat("parm:",nameParam,"old:",old,"oldTheta:",oldTheta,"\n")
# init rprop and dprop according to Data$sampling
if(Data$sampling[nameParam]=="norm"){
rprop<-function(center,disp){
return(rnorm(1,mean=center,sd=disp))
}
dprop<-function(val,center,disp){
return(dnorm(val,mean=center,sd=disp,log=TRUE))
}
}else if(Data$sampling[nameParam]=="lnorm"){
rprop<-function(center,disp){
return(rlnorm(1,meanlog=log(center),sdlog=disp))
}
dprop<-function(val,center,disp){
return(dlnorm(val,meanlog=log(center),sdlog=disp,log=TRUE))
}
}else{
stop("unknown sampling method for",nameParam)
}
# sample proposal
prop<-rprop(old,sdprop);
# include proposal in theta
propTheta<-oldTheta
attributes(propTheta)<-NULL # important to avoid growing thetas
names(propTheta)<-Data$parm.names
propTheta[nameParam]<-prop
# get LLH for proposal
outModel<-Model(propTheta,Data)
LLHprop<-outModel$LP
LLHold<-attributes(oldTheta)$outModel$LP
# accept/reject
# always 0 for symmetric distribution, only for record
hasting_term<-dprop(old,prop,sdprop)-dprop(prop,old,sdprop);
lnr <- LLHprop-LLHold+hasting_term;
rand<-log(runif(1))
# cat("otheta:",oldTheta,"ptheta",propTheta,"lnr:",lnr,"(",LLHprop,"-",LLHold,"+",hasting_term,"rand:",rand,"\n");
if(lnr>=rand){
newTheta <- propTheta;
attributes(newTheta)$new<-TRUE
attributes(newTheta)$LLH<-LLHold
attributes(newTheta)$outModel<-outModel
# cat(nameParam," accept 1\n");
}else{
newTheta<-oldTheta
attributes(newTheta)$new<-FALSE
# cat(nameParam," accept 0\n");
}
return(newTheta)
}
|
8e71480abee363f276a0117efff88d9ac53b1e1a | 0c2c555accd7311bc988ef87c3093055db4eaf25 | /R/odin.R | 99dd2ae13f2253782f4e1fda0b3acbd3bd707f4d | [] | no_license | Geidelberg/cotonou | 1dc491e8f659ba431d2fbbec43ad19934457e7ba | b0cc0c0afae63bad0e2aeb073c60a60928c51c50 | refs/heads/master | 2021-06-05T09:43:07.530294 | 2020-07-27T11:37:32 | 2020-07-27T11:37:32 | 87,826,062 | 0 | 3 | null | 2020-01-15T10:02:21 | 2017-04-10T15:18:39 | R | UTF-8 | R | false | false | 14,106 | r | odin.R | ## Automatically generated by odin - do not edit
.main_model <- odin:::odin_c_class("main_model", list(get_internal = "main_model_get_internal", finalise = "main_model_finalise", create = "main_model_create", initmod_desolve = "main_model_initmod_desolve", contents = "main_model_contents", set_user = "main_model_set_user", metadata = "main_model_metadata", initial_conditions = "main_model_initial_conditions", rhs = "main_model_rhs", rhs_dde = "main_model_rhs_dde", rhs_desolve = "main_model_rhs_desolve", output = "main_model_output_dde", rhs_r = "main_model_rhs_r"), c("above_500_by_group", "alpha01", "alpha02", "alpha03", "alpha04", "alpha05", "alpha11", "alpha22", "alpha23", "alpha24", "alpha25", "alpha32", "alpha33_without_supp", "alpha34_without_supp", "alpha35_without_supp", "alpha42", "alpha43", "alpha44", "alpha45", "art_dropout_interruption_parm_t", "art_dropout_interruption_parm_y", "ART_eligible_CD4_200_349_t", "ART_eligible_CD4_200_349_y", "ART_eligible_CD4_350_500_t", "ART_eligible_CD4_350_500_y", "ART_eligible_CD4_above_500_t", "ART_eligible_CD4_above_500_y", "ART_eligible_CD4_below_200_t", "ART_eligible_CD4_below_200_y", "art_initiation_interruption_parm_t", "art_initiation_interruption_parm_y", "ART_RR", "beta_comm", "beta_noncomm", "c_t_comm", "c_t_noncomm", "c_y_comm", "c_y_noncomm", "cost_1_year_of_ART_government_FSW", "cost_1_year_of_ART_rest_of_population", "cost_1_year_of_ART_study_FSW", "cost_1_year_PrEP_intermediate_adherence_government", "cost_1_year_PrEP_intermediate_adherence_study", "cost_1_year_PrEP_non_adherence_government", "cost_1_year_PrEP_non_adherence_study", "cost_1_year_PrEP_perfect_adherence_government", "cost_1_year_PrEP_perfect_adherence_study", "cost_FSW_1_year_ART_Patient_costs", "cost_FSW_initiation_ART_Patient_costs", "cost_Initiation_ART_rest_of_population", "cost_Initiation_of_ART_government_FSW", "cost_Initiation_of_ART_study_FSW", "cost_Initiation_of_PrEP_government", "cost_Initiation_of_PrEP_study", "cost_PREP_1_year_ART_Patient_costs", "cost_PREP_initiation_Patient_costs", "count_PrEP_1a", "count_PrEP_1b", "count_PrEP_1c", "cumuInf_init", "dur_FSW", "ec", "eP", "eP0", "eP1a", "eP1b", "eP1c", "eP1d", "epsilon_t", "epsilon_y", "fc_t_comm", "fc_t_noncomm", "fc_y_comm", "fc_y_noncomm", "fP_t_comm", "fP_t_noncomm", "fP_y_comm", "fP_y_noncomm", "fPa", "fPb", "fPc", "fraction_FSW_foreign", "FSW_ONLY", "gamma01", "gamma02", "gamma03", "gamma04", "gamma11", "gamma22", "gamma23", "gamma24", "gamma32_without_supp", "gamma33_without_supp", "gamma34_without_supp", "gamma42", "gamma43", "gamma44", "I01_init", "I02_init", "I03_init", "I04_init", "I05_init", "I11_init", "I22_init", "I23_init", "I24_init", "I25_init", "I32_init", "I33_init", "I34_init", "I35_init", "I42_init", "I43_init", "I44_init", "I45_init", "infect_acute", "infect_AIDS", "infect_ART_t", "infect_ART_y", "infected_FSW_incoming", "intervention_ART_increase", "intervention_testing_increase", "iota", "kappa1", "kappaa", "kappab", "kappac", "M_comm", "M_noncomm", "mu", "n_t_comm", "n_t_noncomm", "n_y_comm", "n_y_noncomm", "Nage", "Ncat", "nu", "old_VS_assumption", "omega", "pfFSW_t", "pfFSW_y", "phi2", "phi3", "phi4", "phi5", "prep_efficacious_t", "prep_efficacious_y", "prep_intervention_t", "prep_intervention_y", "PrEP_reinit_OnOff_t", "PrEP_reinit_OnOff_y", "PrEPOnOff", "psia", "psib", "R", "rate_leave_pro_FSW", "rate_move_in", "rate_move_out", "rate_move_out_PrEP", "re_init_interruption_parm_t", "re_init_interruption_parm_y", "replaceDeaths", "rho", "rho_intervention_t", "rho_intervention_y", "RR_test_CD4200", "S0_init", "S1a_init", "S1b_init", "S1c_init", "S1d_init", "sigma", "TasP_testing", "tau_intervention_t", "tau_intervention_y", "test_rate_prep", "testing_prob_t", "testing_prob_y", "theta", "viral_supp_t", "viral_supp_y", "W0", "W1", "W2", "W3", "who_believe_comm"), list(discrete = FALSE, has_array = TRUE, has_output = TRUE, has_user = TRUE, has_delay = FALSE, has_interpolate = TRUE, has_stochastic = FALSE, has_include = TRUE, initial_time_dependent = FALSE), "cotonou", "odin/main_model.json", TRUE)
main_model <- structure(function (above_500_by_group, alpha01, alpha02, alpha03,
alpha04, alpha05, alpha11, alpha22, alpha23, alpha24, alpha25,
alpha32, alpha33_without_supp, alpha34_without_supp, alpha35_without_supp,
alpha42, alpha43, alpha44, alpha45, art_dropout_interruption_parm_t,
art_dropout_interruption_parm_y, ART_eligible_CD4_200_349_t,
ART_eligible_CD4_200_349_y, ART_eligible_CD4_350_500_t, ART_eligible_CD4_350_500_y,
ART_eligible_CD4_above_500_t, ART_eligible_CD4_above_500_y,
ART_eligible_CD4_below_200_t, ART_eligible_CD4_below_200_y,
art_initiation_interruption_parm_t, art_initiation_interruption_parm_y,
ART_RR, beta_comm, beta_noncomm, c_t_comm, c_t_noncomm, c_y_comm,
c_y_noncomm, cost_1_year_of_ART_government_FSW, cost_1_year_of_ART_rest_of_population,
cost_1_year_of_ART_study_FSW, cost_1_year_PrEP_intermediate_adherence_government,
cost_1_year_PrEP_intermediate_adherence_study, cost_1_year_PrEP_non_adherence_government,
cost_1_year_PrEP_non_adherence_study, cost_1_year_PrEP_perfect_adherence_government,
cost_1_year_PrEP_perfect_adherence_study, cost_FSW_1_year_ART_Patient_costs,
cost_FSW_initiation_ART_Patient_costs, cost_Initiation_ART_rest_of_population,
cost_Initiation_of_ART_government_FSW, cost_Initiation_of_ART_study_FSW,
cost_Initiation_of_PrEP_government, cost_Initiation_of_PrEP_study,
cost_PREP_1_year_ART_Patient_costs, cost_PREP_initiation_Patient_costs,
count_PrEP_1a, count_PrEP_1b, count_PrEP_1c, cumuInf_init,
dur_FSW, ec, eP, eP0, eP1a, eP1b, eP1c, eP1d, epsilon_t,
epsilon_y, fc_t_comm, fc_t_noncomm, fc_y_comm, fc_y_noncomm,
fP_t_comm, fP_t_noncomm, fP_y_comm, fP_y_noncomm, fPa, fPb,
fPc, fraction_FSW_foreign, FSW_ONLY, gamma01, gamma02, gamma03,
gamma04, gamma11, gamma22, gamma23, gamma24, gamma32_without_supp,
gamma33_without_supp, gamma34_without_supp, gamma42, gamma43,
gamma44, I01_init, I02_init, I03_init, I04_init, I05_init,
I11_init, I22_init, I23_init, I24_init, I25_init, I32_init,
I33_init, I34_init, I35_init, I42_init, I43_init, I44_init,
I45_init, infect_acute, infect_AIDS, infect_ART_t, infect_ART_y,
infected_FSW_incoming, intervention_ART_increase, intervention_testing_increase,
iota, kappa1, kappaa, kappab, kappac, M_comm, M_noncomm,
mu, n_t_comm, n_t_noncomm, n_y_comm, n_y_noncomm, Nage, Ncat,
nu, old_VS_assumption, omega, pfFSW_t, pfFSW_y, phi2, phi3,
phi4, phi5, prep_efficacious_t, prep_efficacious_y, prep_intervention_t,
prep_intervention_y, PrEP_reinit_OnOff_t, PrEP_reinit_OnOff_y,
PrEPOnOff, psia, psib, R, rate_leave_pro_FSW, rate_move_in,
rate_move_out, rate_move_out_PrEP, re_init_interruption_parm_t,
re_init_interruption_parm_y, replaceDeaths, rho, rho_intervention_t,
rho_intervention_y, RR_test_CD4200, S0_init, S1a_init, S1b_init,
S1c_init, S1d_init, sigma, TasP_testing, tau_intervention_t,
tau_intervention_y, test_rate_prep, testing_prob_t, testing_prob_y,
theta, viral_supp_t, viral_supp_y, W0, W1, W2, W3, who_believe_comm,
user = list(above_500_by_group = above_500_by_group, alpha01 = alpha01,
alpha02 = alpha02, alpha03 = alpha03, alpha04 = alpha04,
alpha05 = alpha05, alpha11 = alpha11, alpha22 = alpha22,
alpha23 = alpha23, alpha24 = alpha24, alpha25 = alpha25,
alpha32 = alpha32, alpha33_without_supp = alpha33_without_supp,
alpha34_without_supp = alpha34_without_supp, alpha35_without_supp = alpha35_without_supp,
alpha42 = alpha42, alpha43 = alpha43, alpha44 = alpha44,
alpha45 = alpha45, art_dropout_interruption_parm_t = art_dropout_interruption_parm_t,
art_dropout_interruption_parm_y = art_dropout_interruption_parm_y,
ART_eligible_CD4_200_349_t = ART_eligible_CD4_200_349_t,
ART_eligible_CD4_200_349_y = ART_eligible_CD4_200_349_y,
ART_eligible_CD4_350_500_t = ART_eligible_CD4_350_500_t,
ART_eligible_CD4_350_500_y = ART_eligible_CD4_350_500_y,
ART_eligible_CD4_above_500_t = ART_eligible_CD4_above_500_t,
ART_eligible_CD4_above_500_y = ART_eligible_CD4_above_500_y,
ART_eligible_CD4_below_200_t = ART_eligible_CD4_below_200_t,
ART_eligible_CD4_below_200_y = ART_eligible_CD4_below_200_y,
art_initiation_interruption_parm_t = art_initiation_interruption_parm_t,
art_initiation_interruption_parm_y = art_initiation_interruption_parm_y,
ART_RR = ART_RR, beta_comm = beta_comm, beta_noncomm = beta_noncomm,
c_t_comm = c_t_comm, c_t_noncomm = c_t_noncomm, c_y_comm = c_y_comm,
c_y_noncomm = c_y_noncomm, cost_1_year_of_ART_government_FSW = cost_1_year_of_ART_government_FSW,
cost_1_year_of_ART_rest_of_population = cost_1_year_of_ART_rest_of_population,
cost_1_year_of_ART_study_FSW = cost_1_year_of_ART_study_FSW,
cost_1_year_PrEP_intermediate_adherence_government = cost_1_year_PrEP_intermediate_adherence_government,
cost_1_year_PrEP_intermediate_adherence_study = cost_1_year_PrEP_intermediate_adherence_study,
cost_1_year_PrEP_non_adherence_government = cost_1_year_PrEP_non_adherence_government,
cost_1_year_PrEP_non_adherence_study = cost_1_year_PrEP_non_adherence_study,
cost_1_year_PrEP_perfect_adherence_government = cost_1_year_PrEP_perfect_adherence_government,
cost_1_year_PrEP_perfect_adherence_study = cost_1_year_PrEP_perfect_adherence_study,
cost_FSW_1_year_ART_Patient_costs = cost_FSW_1_year_ART_Patient_costs,
cost_FSW_initiation_ART_Patient_costs = cost_FSW_initiation_ART_Patient_costs,
cost_Initiation_ART_rest_of_population = cost_Initiation_ART_rest_of_population,
cost_Initiation_of_ART_government_FSW = cost_Initiation_of_ART_government_FSW,
cost_Initiation_of_ART_study_FSW = cost_Initiation_of_ART_study_FSW,
cost_Initiation_of_PrEP_government = cost_Initiation_of_PrEP_government,
cost_Initiation_of_PrEP_study = cost_Initiation_of_PrEP_study,
cost_PREP_1_year_ART_Patient_costs = cost_PREP_1_year_ART_Patient_costs,
cost_PREP_initiation_Patient_costs = cost_PREP_initiation_Patient_costs,
count_PrEP_1a = count_PrEP_1a, count_PrEP_1b = count_PrEP_1b,
count_PrEP_1c = count_PrEP_1c, cumuInf_init = cumuInf_init,
dur_FSW = dur_FSW, ec = ec, eP = eP, eP0 = eP0, eP1a = eP1a,
eP1b = eP1b, eP1c = eP1c, eP1d = eP1d, epsilon_t = epsilon_t,
epsilon_y = epsilon_y, fc_t_comm = fc_t_comm, fc_t_noncomm = fc_t_noncomm,
fc_y_comm = fc_y_comm, fc_y_noncomm = fc_y_noncomm, fP_t_comm = fP_t_comm,
fP_t_noncomm = fP_t_noncomm, fP_y_comm = fP_y_comm, fP_y_noncomm = fP_y_noncomm,
fPa = fPa, fPb = fPb, fPc = fPc, fraction_FSW_foreign = fraction_FSW_foreign,
FSW_ONLY = FSW_ONLY, gamma01 = gamma01, gamma02 = gamma02,
gamma03 = gamma03, gamma04 = gamma04, gamma11 = gamma11,
gamma22 = gamma22, gamma23 = gamma23, gamma24 = gamma24,
gamma32_without_supp = gamma32_without_supp, gamma33_without_supp = gamma33_without_supp,
gamma34_without_supp = gamma34_without_supp, gamma42 = gamma42,
gamma43 = gamma43, gamma44 = gamma44, I01_init = I01_init,
I02_init = I02_init, I03_init = I03_init, I04_init = I04_init,
I05_init = I05_init, I11_init = I11_init, I22_init = I22_init,
I23_init = I23_init, I24_init = I24_init, I25_init = I25_init,
I32_init = I32_init, I33_init = I33_init, I34_init = I34_init,
I35_init = I35_init, I42_init = I42_init, I43_init = I43_init,
I44_init = I44_init, I45_init = I45_init, infect_acute = infect_acute,
infect_AIDS = infect_AIDS, infect_ART_t = infect_ART_t,
infect_ART_y = infect_ART_y, infected_FSW_incoming = infected_FSW_incoming,
intervention_ART_increase = intervention_ART_increase,
intervention_testing_increase = intervention_testing_increase,
iota = iota, kappa1 = kappa1, kappaa = kappaa, kappab = kappab,
kappac = kappac, M_comm = M_comm, M_noncomm = M_noncomm,
mu = mu, n_t_comm = n_t_comm, n_t_noncomm = n_t_noncomm,
n_y_comm = n_y_comm, n_y_noncomm = n_y_noncomm, Nage = Nage,
Ncat = Ncat, nu = nu, old_VS_assumption = old_VS_assumption,
omega = omega, pfFSW_t = pfFSW_t, pfFSW_y = pfFSW_y,
phi2 = phi2, phi3 = phi3, phi4 = phi4, phi5 = phi5, prep_efficacious_t = prep_efficacious_t,
prep_efficacious_y = prep_efficacious_y, prep_intervention_t = prep_intervention_t,
prep_intervention_y = prep_intervention_y, PrEP_reinit_OnOff_t = PrEP_reinit_OnOff_t,
PrEP_reinit_OnOff_y = PrEP_reinit_OnOff_y, PrEPOnOff = PrEPOnOff,
psia = psia, psib = psib, R = R, rate_leave_pro_FSW = rate_leave_pro_FSW,
rate_move_in = rate_move_in, rate_move_out = rate_move_out,
rate_move_out_PrEP = rate_move_out_PrEP, re_init_interruption_parm_t = re_init_interruption_parm_t,
re_init_interruption_parm_y = re_init_interruption_parm_y,
replaceDeaths = replaceDeaths, rho = rho, rho_intervention_t = rho_intervention_t,
rho_intervention_y = rho_intervention_y, RR_test_CD4200 = RR_test_CD4200,
S0_init = S0_init, S1a_init = S1a_init, S1b_init = S1b_init,
S1c_init = S1c_init, S1d_init = S1d_init, sigma = sigma,
TasP_testing = TasP_testing, tau_intervention_t = tau_intervention_t,
tau_intervention_y = tau_intervention_y, test_rate_prep = test_rate_prep,
testing_prob_t = testing_prob_t, testing_prob_y = testing_prob_y,
theta = theta, viral_supp_t = viral_supp_t, viral_supp_y = viral_supp_y,
W0 = W0, W1 = W1, W2 = W2, W3 = W3, who_believe_comm = who_believe_comm),
unused_user_action = NULL, use_dde = FALSE)
{
.main_model$new(user, unused_user_action, use_dde)
}, ir = "odin/main_model.json", class = "odin_generator")
class(main_model) <- "odin_generator"
attr(main_model, "ir") <- .main_model$public_fields$ir
|
81ea5258e7d875c132418e1d5c8bccee165c6490 | a034d355e35d8fa4fc562b8f71a771bca760a877 | /R/QC.R | 11883415c8ea1ca10d9e49ea4a34c6ca57157a57 | [] | no_license | tshmak/Tmisc | eeda1df8d449c3df7dd0d91f6ee698b10f6f3839 | 81f224e89a8d2ee9455f5ccfd1eae34e0ef7d8c6 | refs/heads/master | 2021-07-15T13:27:45.526848 | 2020-05-14T05:34:18 | 2020-05-14T05:34:18 | 144,684,908 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 849 | r | QC.R | QC <- function(X, MAF.min=0.05, HWE.min=1e-6, batch=F, ...) {
### Function to do QC
### X should be a matrix of SNP genotypes
### batch: make use of ffcolapply in ff package for batch processing
### to save memory
### ...: Other arguments to pass to ffcolapply
if(!batch) {
colMeans <- colMeans(X)
select <- (1 - abs(1-colMeans)) > MAF.min
HWE <- HWE.chisq.matrix(X)
select <- select & HWE > HWE.min
}
else {
colMeans <- ff::ffcolapply(colMeans(X[,i1:i2,drop=F]), X=X, RETURN=T,
CFUN="c", USE.NAMES=F, ...)
select <- (1 - abs(1-colMeans)) > MAF.min
HWE <- ff::ffcolapply(HWE.chisq.matrix(X[,i1:i2,drop=F]), X=X, RETURN=T,
CFUN="c", USE.NAMES=F, ...)
select <- select & HWE > HWE.min
}
}
|
087900b018a98866bf9a58840e501b16d1e22118 | a854f6b525ee5b2434a813ef7599c6164209fcf2 | /content/post/2022-04-23-tf-dataset-from-3d-nifti/featured.R | 9c8e5a78000fcb5a554691bc8271b4195a7b2f19 | [] | no_license | psadil/psadil | 22514d470296f340961153b87f906a9d1ac9d463 | b45b1de5c46e2cc77360c7cfe8fb90b9b89d55c2 | refs/heads/main | 2023-09-04T11:39:47.437314 | 2023-08-22T12:51:40 | 2023-08-22T12:51:40 | 115,663,041 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 403 | r | featured.R |
library(flametree)
# pick some colours
shades <- c(
"deepskyblue",
"darkolivegreen",
"gold3",
"red3")
# data structure defining the trees
d <- flametree_grow(
time = 6,
trees = 2,
seed = 365,
split = 3)
# draw the plot
d |>
flametree_plot(
background = "gainsboro",
palette = shades,
style = "minimal"
)
ggplot2::ggsave("featured.png", device=ragg::agg_png())
|
3d56240d43be2d665943588621102f1639a23f4a | ed6d69f508ef11d9a24deece7fa40e52f91453e6 | /plot2.R | 97338c4a98ed27ad6c5a27ce708d170c29077ff5 | [] | no_license | Matthew-Amos/ExData_Plotting1 | bac80b5ba21105b3f24eff20f79d6b681bf03650 | e60ba0ec77e8c73ed9a443cc68a21eccdb73fad0 | refs/heads/master | 2021-06-01T00:44:56.279096 | 2016-02-03T01:29:50 | 2016-02-03T01:29:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 194 | r | plot2.R | source('load_data.R')
png('plot2.png', bg = "transparent")
plot(dat$DateTime, dat$Global_active_power,
type="l",
xlab = '',
ylab = 'Global Active Power (kilowatts)')
dev.off() |
616df22be40435dcca84102b7f729f6b6748ad68 | 187d170f88518defcceb303502419962b453fa71 | /man/CriteriaSet.Rd | 4361ea3c8c68101176e317af4fd86f398c72a433 | [] | no_license | vubiostat/wfccmr | 8cec84461825caae45fa19fd2372ce3d7eb33bb6 | 9a1a340ac5e93133eec781418206d0d1fc9e460e | refs/heads/master | 2020-06-01T06:57:12.552197 | 2009-02-23T22:29:08 | 2009-02-23T22:29:08 | 115,112 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 708 | rd | CriteriaSet.Rd | \name{CriteriaSet}
\alias{CriteriaSet}
\alias{is.CriteriaSet}
\title{CriteriaSet objects}
\description{
Create or test for objects of type \code{"CriteriaSet"}.
}
\usage{
CriteriaSet(criteria = Criteria(), pass = "")
is.CriteriaSet(x)
}
\arguments{
\item{criteria}{object of type \code{"Criteria"}}
\item{pass}{character string. A function to help determine winners.}
\item{x}{object to be coerced or tested.}
}
\value{
\code{CriteriaSet} creates an object of type CriteriaSet.
\code{is.CriteriaSet} returns \code{TRUE} or \code{FALSE} depending on whether its argument is of CriteriaSet type or not.
}
\seealso{
\code{\link{CriteriaSet-class}}
}
\examples{
}
\keyword{classes}
\keyword{methods}
|
a231c68d77d4dbbf0121ba31fae7cb80061e0c12 | 1d0c61f8a764dcefcd333b60d2916cefb9fe7b0e | /run_analysis.R | 2547e5c592217462bab168b0070ccd0ccdf4bdef | [] | no_license | datasciencefan/GettingCleaningData | ee49811ffe45c71e3f1dce3a4619b77806a6f944 | e5b01ce1fb390ea55bf120c1e5080358456fb10e | refs/heads/master | 2016-09-06T19:47:59.255387 | 2014-04-27T18:23:49 | 2014-04-27T18:23:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,530 | r | run_analysis.R | #Load the raw data into R
trainX <- read.table("./UCI HAR Dataset/train/X_train.txt")
trainY <- read.table("./UCI HAR Dataset/train/Y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
testX <- read.table("./UCI HAR Dataset/test/X_test.txt")
testY <- read.table("./UCI HAR Dataset/test/Y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
features <- read.table("./UCI HAR Dataset/features.txt")
#Combine the test and train files
dataX <- rbind(testX, trainX)
dataY <- rbind(testY, trainY)
dataSub <- rbind(subject_test, subject_train)
#Extract only the mean() and std() information
b<-sapply(c("mean()", "std()"), grep, features[,2], ignore.case=1)
bIndex <-unlist(b)
#Give column names to data
colnames(dataX) <- features[ ,2]
colnames(dataY) <- "Activity"
colnames(dataSub) <- "Subject"
#Create a filtered X variable using only std and mean data
stdMeanDataX <- dataX[ ,bIndex]
#Define the activities
activities <- c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING")
dataNameY <- dataY
colnames(dataNameY) <- "Activity"
for (i in 1:nrow(dataY)) {
dataNameY[[1]][i] <- activities[dataY[[1]][i]]
}
#Combine all data into one variable
dataSet <- cbind(dataSub, dataNameY, stdMeanDataX)
#Create the tidy data set
dataMelt <- melt(dataSet, id.vars = c("Activity", "Subject"))
dataCast <- dcast(dataMelt, Subject + Activity ~ variable, mean)
tidyData<- dataCast
write.table(tidyData, "./tidyData.txt", sep="\t")
|
182fd0500d4d252d4b8ce19c47234a87b2a26e01 | a0aacfded5e9cc6b90ee9e4a5dcd41d80f6015e9 | /R/re_exports.R | 894ca9ade01eceac48cf6f79440f4f84fc8d5ab4 | [
"MIT",
"CC-BY-4.0"
] | permissive | lbusett/antanym | f86746bcb777e699c75cd6354188540d8b9a337d | 2a6fee4e09dc039922832a04b6ec14ba3032e659 | refs/heads/master | 2020-03-11T05:15:28.618319 | 2018-04-22T21:13:56 | 2018-04-22T21:13:56 | 129,797,394 | 0 | 0 | MIT | 2018-04-16T19:50:51 | 2018-04-16T19:50:51 | null | UTF-8 | R | false | false | 27 | r | re_exports.R | #' @export
magrittr::`%>%`
|
cff9360e41996efe32d1fc6e6088997746fc2377 | bf64af11943bfe65c4904cb0f673328e1d0315a0 | /pipelines/Deprecated/Trying to make functions.R | 218d9b6fd64f4740a765bbb49893364c75a7368f | [] | no_license | Primordial-Haggis/birds_misc_R_Scripts | 41f07674b9d91b981c8c64e57fdd3ce6f5aee809 | 1d71a711ed8e7b925d8b637e921f6048ae7ac10d | refs/heads/master | 2022-06-17T22:43:38.070518 | 2019-10-25T17:50:23 | 2019-10-25T17:50:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,768 | r | Trying to make functions.R | library(dplyr)
library(readr)
library(data.table)
library(DescTools)
library(tidyr)
crusade_df <- read_csv("data_sum_OTU.dat")
#data_sum_OTU <- crusade_df %>%
# dplyr::select(c(Order, Family, Genus, OTU), c(8:38))
# Trying to replace All of this nonsense with a function ------------------
clean_test <- crusade_df%>%
dplyr::mutate(Order = case_when(Order %like any% c("%uncultured%", "%unclassified%", "%unidentified%") ~ "unclassified",
TRUE ~ as.character(Order)))%>%
dplyr::mutate(Family = case_when(Order == "unclassified" ~ "",
TRUE ~ as.character(Family)))%>%
dplyr::mutate(Genus = case_when(Order == "unclassified" ~ "",
TRUE ~ as.character(Genus)))%>%
dplyr::mutate(OTU = case_when(Order == "unclassified" ~ "",
TRUE ~ as.character(OTU)))%>%
dplyr::mutate(Family = case_when(Family %like any% c("%uncultured%", "%unclassified%", "%unidentified%") ~ "unclassified",
TRUE ~ as.character(Family)))%>%
dplyr::mutate(Genus = case_when(Family == "unclassified" ~ "",
TRUE ~ as.character(Genus)))%>%
dplyr::mutate(OTU = case_when(Family == "unclassified" ~ "",
TRUE ~ as.character(OTU)))%>%
dplyr::mutate(Genus = case_when(Genus %like any% c("%uncultured%", "%unclassified%", "%unidentified%") ~ "unclassified",
TRUE ~ as.character(Genus)))%>%
dplyr::mutate(OTU = case_when(Genus == "unclassified" ~ "",
TRUE ~ as.character(OTU)))%>%
dplyr::mutate(OTU = case_when(OTU %like any% c("%uncultured%", "%unclassified%", "%unidentified%") ~ "sp",
TRUE ~ as.character(OTU)))
# Function Workshop -------------------------------------------------------
otu_OFGS <- function(data, Order, Family, Genus, OTU) {
dplyr::mutate(data, Order = case_when(Order %like any% c("%uncultured%", "%unclassified%", "%unidentified%") ~ "unclassified",
TRUE ~ as.character(Order)))
dplyr::mutate(data, Family = case_when(Order == "unclassified" ~ "",
TRUE ~ as.character(Family)))
dplyr::mutate(data, Genus = case_when(Order == "unclassified" ~ "",
TRUE ~ as.character(Genus)))
dplyr::mutate(data, OTU = case_when(Order == "unclassified" ~ "",
TRUE ~ as.character(OTU)))
dplyr::mutate(data, Family = case_when(Family %like any% c("%uncultured%", "%unclassified%", "%unidentified%") ~ "unclassified",
TRUE ~ as.character(Family)))
dplyr::mutate(data, Genus = case_when(Family == "unclassified" ~ "",
TRUE ~ as.character(Genus)))
dplyr::mutate(data, OTU = case_when(Family == "unclassified" ~ "",
TRUE ~ as.character(OTU)))
dplyr::mutate(data, Genus = case_when(Genus %like any% c("%uncultured%", "%unclassified%", "%unidentified%") ~ "unclassified",
TRUE ~ as.character(Genus)))
dplyr::mutate(data, OTU = case_when(Genus == "unclassified" ~ "",
TRUE ~ as.character(OTU)))
dplyr::mutate(data, OTU = case_when(OTU %like any% c("%uncultured%", "%unclassified%", "%unidentified%") ~ "sp",
TRUE ~ as.character(OTU)))
}
otu_order <- function(data, taxon_c) {
dplyr::select(data, -Order)
dplyr::rename(data, "Order" = taxon_c)
}
cleanish <-otu_OFGS(crusade_df, crusade_df$Order, crusade_df$Family, crusade_df$Genus, crusade_df$OTU)
View(cleanish)
|
01b17a4339feb7b7bd85fa86360cd2f0cc42b1e8 | ba5e8bafef9c419a5f2c458b0fb3b9ea29fdbcb1 | /R/reverseEdgeDirections.R | 1a164f09c75623eb97fa7c23cafecdaee18e74b2 | [] | no_license | Bioconductor/graph | eb5e26d0a287eafed39f99694f9cd04ee952cd83 | 91706d22ecc17cb42de0395803272740a99d5942 | refs/heads/devel | 2023-08-29T12:39:45.270517 | 2023-04-25T13:38:48 | 2023-04-25T13:38:48 | 102,150,187 | 3 | 15 | null | 2023-03-27T20:24:28 | 2017-09-01T20:22:29 | R | UTF-8 | R | false | false | 314 | r | reverseEdgeDirections.R | reverseEdgeDirections <- function(g) {
## FIXME: This needs to fix edge attributes, but for now, we punt
## and we are only using node attrs here anyhow...
gam <- as(g, "graphAM")
nodeNames <- nodes(g)
gam@adjMat <- t(gam@adjMat)
colnames(gam@adjMat) <- nodeNames
as(gam, "graphNEL")
}
|
cdf715f92719c2d7b1dc34524de1abd33f7ffc93 | e6e018524f8b64671864317831a9fb12a1ed763f | /Getting_&_cleaning_data/Getting&CleaningData/run_analysis.R | 69b93ab7eae09e5e9e2778f6acb810d9963420ef | [] | no_license | Mbe45/Gettingandcleaningdata | cf45dec850830ef6773fcd874e5281520fc7ae6e | 6b80d2bf65bbd6548828f0042cac3a156204375e | refs/heads/master | 2021-01-21T12:50:13.052730 | 2017-08-21T21:39:29 | 2017-08-21T21:39:29 | 91,802,560 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,596 | r | run_analysis.R | ###### this script is about the curse
###### project in getting and cleaning data,
###### which is part of the Data Science Specialization
##### offer by JHU through Coursera Platform
##### 19/05/2017
####Mbecho Techago Emmanuel
###The task to perform
###1Merges the training and the test sets to create one data set.
###2 Extracts only the measurements on the mean
###and standard deviation for each measurement.
###3 Uses descriptive activity names to name
###the activities in the data set
###4 Appropriately labels the data set with
###descriptive variable names.
###5From the data set in step 4, creates a second,
###independent tidy data set with the average
###of each variable for each activity and each subject.
###Approach:
###loading the traing dataset.
train <- read.table("Getting_&_cleaning_dat/UCI HAR Dataset/train/X_train.txt")
###looking at a fraction of the dataset
##by selecting some fews rows and columns
##reading the subject_id
subject_ID <- read.table("Getting_&_cleaning_dat/UCI HAR Dataset/train/subject_train.txt")
###reading the activity lebels
y_train <- read.table("Getting_&_cleaning_dat/UCI HAR Dataset/train/y_train.txt")
###binding the subject id with the train label set(y_train)
subject_ID <- cbind(subject_ID, y_train)
###It is very important to give a meaniful
###naming to our variables for both ease of
##understanding when going through the codes
##and also when explaining to another person.
##the dataset currently, does not have a descriptive
##names. we will give it a descriptive one.
##the features contains the names of the variables.
feature <- read.table("Getting_&_cleaning_dat/UCI HAR Dataset/features.txt",
header = F)
###using the names from feature to set the variables
###names in the trainingset.
col <- feature$V2
colnames(train) <- col
###binding the subject ID to the training set
train <- cbind(subject_ID, train)
names(train)[1:2] <- c("subject_ID", "activity")
#################################################################################
##moving to the test dataset.
##the same procedure as above is followed.
###loading the test dataset.
test <- read.table("Getting_&_cleaning_dat/UCI HAR Dataset/test/X_test.txt")
###looking at a fraction of the dataset
##by selecting some fews rows and columns
##reading the subject_id
subject_ID1 <- read.table("Getting_&_cleaning_dat/UCI HAR Dataset/test/subject_test.txt")
###reading the test label (y_text)
y_test <- read.table("Getting_&_cleaning_dat/UCI HAR Dataset/test/y_test.txt")
###binding the subject id with the test labels
subject_ID1 <- cbind(subject_ID1, y_test)
###It is very important to give a meaniful
###naming to our variables for both ease of
##understanding when going through the codes
##3nd also when explaining to another persn.
##the dataset currently, does not have a descriptive
##names. we will give it a descriptive one.
##the features contains the names of the variables.
feature <- read.table("Getting_&_cleaning_dat/UCI HAR Dataset/features.txt",
header = F)
###using the names from feature to set the variables
###names in the trainingset.
col <- feature$V2
colnames(test) <- col
###binding the subject ID to the training set
test <- cbind(subject_ID1, test)
names(test)[1:2] <- c("subject_ID", "activity")
##column bind the test and the training data together
mergedata <- rbind(train, test)
####Extracts only the measurements on the
##mean and standard deviation for each measurement.
###by using regular expression function grep()
mean_std_measurement <- mergedata[grep("(subject_ID|activity|mean()|std())",names(mergedata))]
##labelling the activity variable
##using the factor function
mean_std_measurement$activity <- factor(mean_std_measurement$activity,
levels = c(1,2,3,4,5,6),
labels = c("WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
))
#############################################
###creating the tidy_data set which
###the average of each variable for each activity and each subject.
###loading dply
library(dplyr)
tidy_data <- mean_std_measurement %>%
group_by(activity, subject_ID) %>%
summarise_all(mean)
write.table(tidy_data, "tidy_data.txt", row.names = F)
|
8e53a90c86cb73b2453a7ff20b52c153e90228eb | 89ba97bd314b82085d4cd4b5bce59e4f1686698f | /man/facet_limits.Rd | b115d3193c61175c42d5b217a4e18594fe012a56 | [] | no_license | raredd/plotr | b863bc3a1d5d6623b4ce9fb01e9524434efe710b | a39426d3fe4a1dd1870bd8e5d64c789b15677e93 | refs/heads/master | 2022-09-11T16:19:30.157318 | 2022-09-02T03:53:19 | 2022-09-02T03:53:19 | 27,737,506 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,838 | rd | facet_limits.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplots.R
\name{facet_limits}
\alias{facet_limits}
\title{Custom facet limits}
\usage{
facet_limits(pl, limits, useNA = c("ifany", "no", "always"))
}
\arguments{
\item{pl}{a \code{\link{ggplot}} object}
\item{limits}{a named list containing one for more limits for each subplot}
\item{useNA}{how to handle \code{NA} values}
}
\description{
Modify x-axis limits for \code{\link{facet_wrap}}.
When melting a data set of factor variables, the desired levels are dropped
during the reshaping to a single variable. Therefore, variables will either
show all unique values as levels or only levels where data is present. This
function gives ability to set custom x limits for each variable (including
the side effect of working for continuous variables).
}
\examples{
library('ggplot2')
datf <- datn <- mtcars[, c(2, 8:11)]
datf[] <- lapply(datf, factor)
datfl <- reshape2::melt(datf, id.vars = 'am')
datnl <- reshape2::melt(datn, id.vars = 'am')
datfl$value[datfl$value == 4] <- NA
# datfl <- na.omit(datfl)
pf <- ggplot(datfl, aes(value, fill = factor(am))) +
# facet_wrap(~ variable) +
facet_wrap(~ variable, scales = 'free') +
geom_bar(position = 'dodge')
## facets either show all levels for each plot or only those with data
pf
facet_limits(pf, list(gear = c(3,4,5), vs = 0:6), useNA = 'no')
facet_limits(pf, lapply(datf, levels), useNA = 'no')
facet_limits(pf, lapply(datf, levels), useNA = 'ifany')
facet_limits(pf, lapply(datf, levels), useNA = 'always')
pn <- ggplot(datnl, aes(value, fill = factor(am))) +
facet_wrap(~ variable, scales = 'free_x') +
geom_bar(position = 'dodge')
pn
facet_limits(pn, list(carb = c(0,100)))
facet_limits(pn, lapply(datn, extendrange, f = 1))
facet_limits(pn, list(c(0,10), c(-1,5), c(2,8), c(0,20)))
}
|
f256e9b20c7ebf0fc501da25afd0e5513a4d57e0 | a84d320d18ad67fc717a70f2ed196af3797eaaca | /reporting-grofwild/man/loadToekenningen.Rd | d023e113d7b09a230398c5ba7d8234a8f2175c62 | [
"MIT"
] | permissive | inbo/reporting-rshiny-grofwildjacht | 858e92cc4e701a621788e76e75596b693f7d81c9 | 4bebff97f0e84146e2a702b057bf671716df1fc8 | refs/heads/master | 2023-07-25T16:15:27.551309 | 2023-07-14T13:04:20 | 2023-07-14T13:04:20 | 88,876,066 | 1 | 1 | MIT | 2023-09-06T07:42:48 | 2017-04-20T14:37:50 | HTML | UTF-8 | R | false | true | 1,042 | rd | loadToekenningen.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_load.R
\name{loadToekenningen}
\alias{loadToekenningen}
\title{Read toekenningen (Ree) data}
\usage{
loadToekenningen(
bucket = config::get("bucket", file = system.file("config.yml", package =
"reportingGrofwild"))
)
}
\arguments{
\item{bucket}{character, name of the S3 bucket as specified in the config.yml file;
default value is "inbo-wbe-uat-data"}
}
\value{
data.frame with columns:
\itemize{
\item{'labeltype': }{character, type of Ree, one of \code{c("geit", "bok", "kits")}}
\item{'WBE_Naam': }{character, WBE name}
\item{'labeljaar': }{integer, year}
\item{'provincie_toek': }{character, province}
\item{'toegekend': }{integer, no. of assigned animals}
\item{'verwezenlijkt': }{integer, no. of shot animals}
\item{'percentage_verwezenlijkt': }{numeric, percentage shot animals}
\item{'KboNummer_Toek': }{character, WBE KBO number}
}
and attribute 'Date', the date that this data file was created
}
\description{
Read toekenningen (Ree) data
}
|
a949a43f76c539058427450aac6b69723c91ff86 | 9d3a4709b30707fd58a6e179c2719a9c0d117ccd | /machine-learning-ex4/R version/checkNNGradients.R | 290e56a08ceed96829c91cb8a8cb197d33362825 | [] | no_license | lucariel/ML_coursera | 0d02b150de5b09af3e4ccc4be2e44637e4850562 | 8b4c57814fcc6b0939b1eea25bd0ea29c7ca4cb1 | refs/heads/master | 2020-04-21T14:40:08.104809 | 2019-03-22T13:26:44 | 2019-03-22T13:26:44 | 169,642,627 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,161 | r | checkNNGradients.R | #########checkNNGradients and auxiliar functions for the checking
checkNNGradients<-function(lambda = 0){
source("aux_functions.R")
source("nnCostFunction.R")
input_layer_size <- 3
hidden_layer_size <- 5
num_labels <- 3
m <- 5
# We generate some 'random' test data
Theta1 <- debugInitializeWeights(hidden_layer_size, input_layer_size)
Theta2 <- debugInitializeWeights(num_labels, hidden_layer_size)
##Reusing debugInitializeWeights to generate X
X <- debugInitializeWeights(m, (input_layer_size - 1))
y <- 1 + seq(1:m)%%num_labels
#######Unroll parameters
Theta1_unrolled<-matrix(Theta1, ncol = 1, byrow = F)
Theta2_unrolled<-matrix(Theta2, ncol = 1, byrow = F)
nn_params<-rbind(Theta1_unrolled,Theta2_unrolled)
grad <- gradFunc(nn_params)
numgrad <- computeNumericalGradient(costFunc, nn_params)
print(cbind(numgrad, grad))
writeLines("The above two columns you get should be very similar")
diff = norm(as.matrix(numgrad - grad),"f")/norm(as.matrix(numgrad + grad),"f")
cat(sprintf(c("If your backpropagation implementation is correct, then\n",
"the relative difference will be small. \n",
"\nRelative Difference: %g\n"), diff))
}
computeNumericalGradient <- function (J, theta)
{
numgrad <- rep(0, length(theta))
perturb <- numgrad
e <- 1e-4
for (p in 1:length(theta))
{
perturb[p] <- e
loss1 <-J(theta - perturb)
loss2<- J(theta + perturb)
numgrad[p] <- (loss2 - loss1)/(2*e)
perturb[p] <- 0
}
return(numgrad)
}
costFunc <- function(p){
##Getting only Cost
results<-nnCostFunction(nn_params = p,
input_layer_size,
hidden_layer_size,
num_labels,
X, y, lambda)
results<-(results[[1]])
}
gradFunc <- function(p){
##Getting only thetas_grad
results<-nnCostFunction(nn_params = p,
input_layer_size,
hidden_layer_size,
num_labels,
X, y, lambda)
results<-unlist(results[[2]])
results
}
|
df5c5e1074277543d3c9ffcd1c2c72d41dadeefd | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/JMbayes/examples/survfitJM.Rd.R | 795eaf598c4fd66c79c001eb56a88522c94bab50 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,679 | r | survfitJM.Rd.R | library(JMbayes)
### Name: survfitJM
### Title: Prediction in Joint Models
### Aliases: survfitJM survfitJM.JMbayes survfitJM.mvJMbayes
### Keywords: methods
### ** Examples
## Not run:
##D # we construct the composite event indicator (transplantation or death)
##D pbc2$status2 <- as.numeric(pbc2$status != "alive")
##D pbc2.id$status2 <- as.numeric(pbc2.id$status != "alive")
##D
##D # we fit the joint model using splines for the subject-specific
##D # longitudinal trajectories and a spline-approximated baseline
##D # risk function
##D lmeFit <- lme(log(serBilir) ~ ns(year, 2), data = pbc2,
##D random = ~ ns(year, 2) | id)
##D survFit <- coxph(Surv(years, status2) ~ drug, data = pbc2.id, x = TRUE)
##D jointFit <- jointModelBayes(lmeFit, survFit, timeVar = "year")
##D
##D # we will compute survival probabilities for Subject 2 in a dynamic manner,
##D # i.e., after each longitudinal measurement is recorded
##D ND <- pbc2[pbc2$id == 2, ] # the data of Subject 2
##D survPreds <- vector("list", nrow(ND))
##D for (i in 1:nrow(ND)) {
##D survPreds[[i]] <- survfitJM(jointFit, newdata = ND[1:i, ])
##D }
##D survPreds
##D
##D ###########################################################################
##D
##D # Predictions from multivariate models
##D
##D pbc2 <- pbc2[!is.na(pbc2$serChol), ]
##D pbc2.id <- pbc2[!duplicated(pbc2$id), ]
##D pbc2.id$Time <- pbc2.id$years
##D pbc2.id$event <- as.numeric(pbc2.id$status != "alive")
##D
##D # Fit a trivariate joint model
##D MixedModelFit <- mvglmer(list(log(serBilir) ~ year + (year | id),
##D sqrt(serChol) ~ year + (year | id),
##D hepatomegaly ~ year + (year | id)), data = pbc2,
##D families = list(gaussian, gaussian, binomial), engine = "STAN")
##D
##D CoxFit <- coxph(Surv(Time, event) ~ drug + age, data = pbc2.id, model = TRUE)
##D
##D JMFit <- mvJointModelBayes(MixedModelFit, CoxFit, timeVar = "year")
##D
##D # We want survival probabilities for three subjects
##D ND <- pbc2[pbc2$id %in% c(2, 25, 81), ]
##D
##D sprobs <- survfitJM(JMFit, ND)
##D sprobs
##D
##D # Basic plot
##D plot(sprobs)
##D
##D # split in a 2 rows 2 columns and include the survival function in
##D # a separate panel; plot only the third & first subjects; change various defaults
##D plot(sprobs, split = c(3, 2), surv_in_all = FALSE, which_subjects = c(3, 1),
##D lty_lines_CI = 3, col_lines = "blue", col_fill_CI = "red",
##D col_points = "pink", pch_points = 12)
##D
##D ###########################################################################
##D
##D # run Shiny app
##D runDynPred()
## End(Not run)
|
8c6db951715d5d8e02220328b4952605157a610e | 8ca4a11106d883c6ada47acae72757018a77cb0d | /V1/func.R | 27fde3a18ebea8d2a9272f99b7c28379970ab37b | [] | no_license | kgalinsky/GroupLasso | b8e818779ba10c0b9f8280113bb30ea378f40f27 | 209af88aee576b325c1e9553a95d384df03de102 | refs/heads/master | 2020-03-28T16:27:47.536281 | 2018-09-13T20:49:22 | 2018-09-13T20:49:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,934 | r | func.R | ### Logistic
f <- function(beta,X,y) { -t(y)%*%(X%*%beta) + sum(log(1+exp(X%*%beta))) } # objective function
gradf <- function(beta,X,y) { -t(X)%*%(y-plogis(X%*%beta)) } # gradient
### Ordinary
f <- function(beta,X,y){ 0.5*norm(X%*%beta - y, "F")^2 }
gradf <- function(beta,X,y){ t(X)%*%(X%*%beta - y) }
### Penalty
split_beta<-function(beta,m_X,m_W,m_G1,m_G2,m_I){
ma<-rep(1:5,c(m_X,m_W,m_G1,m_G2,m_I))
beta<-split(beta,ma)
names(beta)=c("X","W","G1","G2","I")
return(beta)
}
split_X<-function(X,m_X,m_W,m_G1,m_G2,m_I){
ma<-rep(1:(2+m_G1+m_G2),c(m_X,m_W,rep(1,m_G1),rep(1,m_G2)))
ma<-c(ma,rep((2+m_G1+1):(2+m_G1+m_G2),rep(1,m_I)))
X_split<-split(X,ma)
X_split<-lapply(X_split,function(x) x=matrix(x,nrow=n))
return(X_split)
}
# Penalty parameters
group_penalty<-function(X,m_X,m_W,m_G1,m_G2,m_I){
X_split<-split_X(X,m_X,m_W,m_G1,m_G2,m_I)
para<-sapply(X_split, function(x) norm(x,'F'))[-1:-2]
para<-split(para,rep(1:2,c(m_G1,m_G2)))
names(para)<-c("G1","G2")
return(para)
}
g <- function(X,beta,m_X,m_W,m_G1,m_G2,m_I,lambda1,lambda2) {
beta<-split_beta(beta,m_X,m_W,m_G1,m_G2,m_I)
para<-group_penalty(X,m_X,m_W,m_G1,m_G2,m_I)
penalty<-lambda1*norm(as.matrix(para$G1*beta$G1),'1')+lambda2*sum(para$G2*sqrt(beta$G2^2+beta$I^2))
return(penalty)
}
proxg <- function(X,beta,m_X,m_W,m_G1,m_G2,m_I,tau,lambda1,lambda2) {
beta<-split_beta(beta,m_X,m_W,m_G1,m_G2,m_I)
para<-group_penalty(X,m_X,m_W,m_G1,m_G2,m_I)
beta$G1<-sign(beta$G1)*(sapply(abs(beta$G1) - tau*lambda1*para$G1,FUN=function(x) {max(x,0)}))
beta_temp<-cbind(beta$G2,beta$I)
beta_temp<-split(beta_temp,rep(1:m_G2,rep(1,m_G2)))
#beta_temp<-apply(beta_temp,1,FUN=function(x) { max(0,1-lambda2*tau/(sum(x^2))^0.5) *x})
beta_temp<-mapply(FUN=function(x,y) { max(0,1-lambda2*tau*y/(sum(x^2))^0.5) *x},beta_temp,para$G2)
beta$G2<-beta_temp[1,]
beta$I<-beta_temp[2,]
beta<-unlist(beta,use.names = F)
return(beta)
}
|
796b58589b09c481c02cd4e275d8add5da35c9a6 | cf4f79fa6328ae5cd50ecc4d4f4c0ae283b65db1 | /R/1_Discretization/main_discretization.R | 5dcc1eb0cea8cfa16a63f4617bb7e4ef5351bbd8 | [] | no_license | keizai-seminar-quant-macro/chapter2 | 9736332138fe516fca851aebc817d025d91de714 | 8f9b53a2edf473fce804d67148ede4cd35c4b090 | refs/heads/master | 2020-04-21T19:38:36.150834 | 2019-04-18T08:58:51 | 2019-04-18T08:58:51 | 169,814,328 | 6 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,008 | r | main_discretization.R | # メインファイル
# 状態変数と操作変数を離散化して2期間モデルを解く.
# environmentから変数をクリア
rm(list=ls())
source("CRRA.R")
# カリブレーション
beta <- 0.985**30 # 割引因子
gamma <- 2.0 # 相対的危険回避度
rent <- 1.025**30-1.0 # 純利子率
# パラメータ
nw <- 10 # 所得グリッドの数
na <- 40 # 貯蓄グリッドの数
w_max <- 1.0 # 所得グリッドの最大値
w_min <- 0.1 # 所得グリッドの最小値
a_max <- 1.0 # 貯蓄グリッドの最大値
a_min <- 0.025 # 貯蓄グリッドの最小値
# 計算開始
time_start <- proc.time()
cat('', "\n")
cat('-+- Solve two period model using discretization -+-', "\n")
cat('', "\n")
# グリッドポイントを計算
grid_w <- seq(from = w_min, to = w_max, length.out = nw)
grid_a <- seq(from = a_min, to = a_max, length.out = na)
# あらゆる(w,a)の組み合わせについて生涯効用を計算
# 初期化
obj <- matrix(data = 0.0, nrow = na, ncol = nw)
for (i in 1:nw) {
for (j in 1:na) {
cons <- grid_w[i] - grid_a[j]
if (cons > 0.0) {
obj[j, i] <- CRRA(cons, gamma) + beta*CRRA((1.0+rent)*grid_a[j], gamma)
} else {
obj[j, i] <- -10000.0
}
}
}
# 効用を最大にする操作変数を探し出す:政策関数
# 初期化
pol <- matrix(data = 0.0, nrow = nw, ncol = 1)
# 各wについて生涯効用を最大にするaを探す
for (i in 1:nw) {
maxl <- which.max(obj[1:na, i])
pol[i] = grid_a[maxl]
}
# 計算時間をカウント終了
cat(" Time = ", proc.time() - time_start, "\n")
cat('', "\n")
# 図を描く
library("ggplot2")
library("extrafont")
# Figure 1: value function
df <- data.frame(grid = grid_w, policy = pol)
g1 <- ggplot()
g1 <- g1 + geom_line(data=df, aes(x = grid, y = policy), size=1.5)
g1 <- g1 + labs(title="Policy Function", x="Wage: w", y="Saving: a")
plot(g1)
ggsave(file = "Fig_pol_discr.eps", width = 8, height = 6)
|
cb6eea75b88f83b38f89afde67dd815bea167616 | 4c0ee84e0691c47eee62348fe5b5621236c633cf | /Meyers-monograph/appendix/SCC.R | fe94cf0100aa133a4319eae6c25fa4b34594cdd3 | [] | no_license | problemofpoints/advanced-reserve-methods | 2ebdeb5400b3405511b745480cd51a20c18d42f1 | 464b96891f2ac75396ef972925d46fa6ed290c9f | refs/heads/master | 2020-07-28T10:57:41.677904 | 2020-01-03T21:57:30 | 2020-01-03T21:57:30 | 209,399,584 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,672 | r | SCC.R | #
# Script to run the SCC Model on data from the CAS Loss Reserve Database
# Uses Stan for the MCMC run
# by Glenn Meyers
# Run time on my computer - < 1 minute
#
####### Most of the script is in functions - They are called at the end.
#
#
rm(list = ls()) # clear workspace
t0=Sys.time()
#
# get packages
#
library(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
library(loo)
library(data.table)
library(actuar)
library(ChainLadder)
#
# function to get the loss data
#
ins.line.data=function(a,g.code){
b=subset(a,a$GRCODE==g.code)
name=b$GRNAME
grpcode=b$GRCODE
w=b$AccidentYear-min(b$AccidentYear)+1
d=b$DevelopmentLag
cal=w+d-1
cum_incloss=b[,6]
cum_pdloss=b[,7]
bulk_loss=b[,8]
dir_premium=b[,9]
ced_premium=b[,10]
net_premium=b[,11]
single=b[,12]
posted_reserve97=b[,13]
cum_incloss=cum_incloss-bulk_loss #per CAS Loss Reserve Database
# get incremental paid losses - assume data is sorted by ay and lag
inc_pdloss=numeric(0)
for (i in unique(w)){
s=(w==i)
pl=c(0,cum_pdloss[s])
ndev=length(pl)-1
il=rep(0,ndev)
for (j in 1:ndev){
il[j]=pl[j+1]-pl[j]
}
inc_pdloss=c(inc_pdloss,il)
}
# ad hoc adjustment to accomodate lognormal distribution
cum_incloss=pmax(cum_incloss,1)
cum_pdloss=pmax(cum_pdloss,1)
#
data.out=data.table(grpcode,w,d,cal,net_premium,dir_premium,ced_premium,
cum_pdloss,cum_incloss,bulk_loss,inc_pdloss,single,
posted_reserve97)
data.out=data.out[order(d,w)]
return(data.out)
}
#
# residual plot function
#
SCC_resid_plot=function(co_model,grpcode,lineobus,losstype){
lossData <- ins.line.data(CASdata,grpcode)
train_data=subset(lossData,lossData$cal<11)
if (losstype=="Paid"){logloss=log(train_data$cum_pdloss)}
if (losstype=="Incurred"){logloss=log(train_data$cum_incloss)}
w=train_data$w
d=train_data$d
Premium=train_data$net_premium[1:10]
#
std_resid=NULL
ay=NULL
dy=NULL
samp=sample(1:length(co_model$logelr),100)
for (s in samp){
for (i in 1:length(logloss)){
mu=log(Premium[w[i]])+co_model$logelr[s]+
co_model$beta[s,d[i]]
std_resid=c(std_resid,(logloss[i]-mu)/co_model$sig[s,d[i]])
ay=c(ay,w[i])
dy=c(dy,d[i])
}
}
par(mfrow=c(1,2))
AY=list(AY1=std_resid[ay==1],
AY2=std_resid[ay==2],
AY3=std_resid[ay==3],
AY4=std_resid[ay==4],
AY5=std_resid[ay==5],
AY6=std_resid[ay==6],
AY7=std_resid[ay==7],
AY8=std_resid[ay==8],
AY9=std_resid[ay==9],
AY10=std_resid[ay==10])
boxplot(AY,notch=T,col="gray",names=1:10,
main="",xlab="Accident Year")
abline(h=0,lwd=5)
abline(h=qnorm(.25))
abline(h=qnorm(.75))
#
DY=list(DY1=std_resid[dy==1],
DY2=std_resid[dy==2],
DY3=std_resid[dy==3],
DY4=std_resid[dy==4],
DY5=std_resid[dy==5],
DY6=std_resid[dy==6],
DY7=std_resid[dy==7],
DY8=std_resid[dy==8],
DY9=std_resid[dy==9],
DY10=std_resid[dy==10])
boxplot(DY,notch=T,col="gray",names=1:10,
main="",xlab="Development Year")
abline(h=0,lwd=5)
abline(h=qnorm(.25))
abline(h=qnorm(.75))
mtext((paste("SCC Model Standardized Residual Box Plots",
"\n",lineobus," Group",grpcode)),
side = 3, line = -2.75, outer = TRUE)
}
#
# function to make simulations reproducible - from Zongwen Tan
#
extract_ordered <- function(stanfit) {
library(dplyr)
array <- rstan::extract(stanfit, permuted = FALSE)
dim_fit <- dim(array)
n_chains <- dim_fit[2]
n_parameters <- dim_fit[3]
mat_list <- lapply(1:n_chains, function(x) as.data.frame(array[, x, ]) )
mat_df <- bind_rows(mat_list)
cols_all <- colnames(mat_df)
collapse_names <- gsub("[0-9]|\\[|\\]", "", cols_all) %>% unique()
posterior_list <- lapply(collapse_names,
function(x) mat_df[cols_all[grepl(x, cols_all)]] %>% as.matrix)
names(posterior_list) <- collapse_names
return(posterior_list)
}
#
# Stan SCC script
#
SCCmodel_stan = "
data{
int<lower=1> len_data;
real logprem[len_data];
real logloss[len_data];
int<lower=1,upper=10> w[len_data];
int<lower=1,upper=10> d[len_data];
}
parameters{
real r_beta[9];
real logelr;
real <lower=0,upper=100000> a_ig[10];
}
transformed parameters{
real beta[10];
real sig2[10];
real sig[10];
real mu[len_data];
for (i in 1:9) beta[i] = r_beta[i];
beta[10] = 0;
sig2[10] = gamma_cdf(1/a_ig[10],1,1);
for (i in 1:9) sig2[10-i] = sig2[11-i]+gamma_cdf(1/a_ig[i],1,1);
for (i in 1:10) sig[i] = sqrt(sig2[i]);
for (i in 1:len_data){
mu[i] = logprem[i]+logelr+beta[d[i]];
}
}
model {
r_beta ~ normal(0,3.162);
for (i in 1:10) a_ig[i] ~ inv_gamma(1,1);
logelr ~ normal(-.4,3.162);
for (i in 1:len_data) logloss[i] ~ normal(mu[i],sig[d[i]]);
}
generated quantities{
vector[len_data] log_lik;
for (i in 1:len_data) log_lik[i] = normal_lpdf(logloss[i]|mu[i],sig[d[i]]);
}
"
#
# initialization function for SCCModel
#
init.SCC=function(chain_id){
set.seed(12345)
list(r_beta=runif(9),a_ig=runif(10),
logelr=runif(1,-0.75,-0.5))
}
#
# dummy data for compiling
#
data.dummy=list(len_data = 55,
logprem = rep(8,55),
logloss = rep(8,55),
w = c(1:10,1:9,1:8,1:7,1:6,1:5,1:4,1:3,1,2,1),
d = c(rep(1,10),rep(2,9),rep(3,8),rep(4,7),rep(5,6),
rep(6,5),rep(7,4),rep(8,3),rep(9,2),10))
#
# compile the SCC model
#
fitC_SCC = stan(model_code=SCCmodel_stan,data=data.dummy,
seed=12345,
init=init.SCC,chains=0)
#
pars.list=c("logelr","beta","sig","log_lik")
#
# set up function to run stan model and create output
#
model_function=function(CASdata,grpcode,losstype){
lossData <- ins.line.data(CASdata,grpcode)
train_data=subset(lossData,lossData$cal<11)
test_data=subset(lossData,lossData$cal>10)
Premium=train_data$net_premium[1:10]
if (losstype=="Paid"){
loss=train_data$cum_pdloss
test=test_data$cum_pdloss
}
if (losstype=="Incurred"){
loss=train_data$cum_incloss
test=test_data$cum_incloss
}
#
# data for the model
#
data.SCC=list(len_data = length(loss),
logprem = log(train_data$net_premium),
logloss = log(loss),
w = train_data$w,
d = train_data$d)
#
# run the model
#
stan_thin=1
stan_iter=5000
Rhat_target=1.05
max_Rhat=2
while ((max_Rhat > Rhat_target)&(stan_thin<17)){
fitSCC=stan(fit = fitC_SCC, data = data.SCC,init=init.SCC,
seed = 12345,iter=stan_iter,thin=stan_thin,
chains = 4,pars=pars.list,
control=list(adapt_delta=.9999,max_treedepth=50),
refresh=0)
fitSCC_summary=
as.matrix(summary(fitSCC)$summary)[1:21,c(1,3,10)]
mrh=subset(fitSCC_summary,is.na(fitSCC_summary[,3])==F)
max_Rhat=round(max(mrh[,3]),4)
print(paste("Maximum Rhat =",max_Rhat,"Thin =",stan_thin))
stan_thin=2*stan_thin
stan_iter=2*stan_iter
}
stan_thin=stan_thin/2
#
# goodness of fit statistics for comparing models
#
loglik1=extract_log_lik(fitSCC)
elpd_stats=loo(loglik1)
#
# extract information from stan output to process in R
#
#b=extract(fitSCC,permuted=FALSE)
b <- extract_ordered(fitSCC)
beta=b$beta
logelr=b$logelr
sig=b$sig
num.mcmc=length(logelr)
#
# calculate test_elpd for test data
#
test_elpd=0
for (i in 1:dim(test_data)[1]){
mu.test=log(Premium[test_data$w[i]])+logelr+beta[,test_data$d[i]]
test_elpd=test_elpd+
log(mean(dnorm(log(test[i]),mu.test,sig[,test_data$d[i]],log=F)))
}
test_elpd=round(test_elpd,3)
#
# simulate loss statistics by accident year
#
set.seed(12345)
value=exp(data.SCC$logloss)
origin=data.SCC$w
dev=data.SCC$d
asrectangle=data.frame(origin,dev,value)
astriangle=as.triangle(asrectangle)
at.wd10=matrix(loss[55],num.mcmc,10)
mu.wd10=rep(0,num.mcmc)
for (w in 2:10){
mu.wd10=log(Premium[w])+logelr
mu.diag=log(Premium[w])+logelr+beta[,11-w]
at.wd10[,w]=ceiling(rlnorm(num.mcmc,mu.wd10,sig[,10]))-
exp(mu.diag+sig[,11-w]^2/2)+astriangle[w,11-w]
}
#
# calculate loss statistics and output to data frame
#
ss.wd10=rep(0,10)
ms.wd10=rep(0,10)
#
ms.wd10[1]=mean(at.wd10[,1])
for (w in 2:10){
ms.wd10[w]=mean(at.wd10[,w])
ss.wd10[w]=sd(at.wd10[,w])
}
Pred.SCC=rowSums(at.wd10)
ms.td10=mean(Pred.SCC)
ss.td10=sd(Pred.SCC)
SCC.Estimate=round(ms.wd10)
SCC.SE=round(ss.wd10)
SCC.CV=round(SCC.SE/SCC.Estimate,4)
if (losstype=="Paid"){act=subset(lossData$cum_pdloss,lossData$d==10)}
if (losstype=="Incurred"){act=subset(lossData$cum_incloss,lossData$d==10)}
sumact=sum(act)
pct.SCC=sum(Pred.SCC<=sumact)/length(Pred.SCC)*100 #
# put SCC accident year statistics into a data frame
#
SCC.Estimate=c(SCC.Estimate,round(ms.td10))
SCC.SE=c(SCC.SE,round(ss.td10))
SCC.CV=c(SCC.CV,round(ss.td10/ms.td10,4))
Premium=c(Premium,sum(Premium))
Outcome=act
Outcome=c(Outcome,sum(Outcome))
Group=rep(grpcode,11)
SCC.Pct=c(rep(NA,10),pct.SCC)
W=c(1:10,"Total")
risk=data.frame(Group,W,Premium,SCC.Estimate,SCC.SE,SCC.CV,Outcome,SCC.Pct)
Group=grpcode
Premium=Premium[11]
SCC.Estimate=SCC.Estimate[11]
SCC.SE=SCC.SE[11]
Outcome=Outcome[11]
SCC.Pct=SCC.Pct[11]
elpd_loo=round(elpd_stats$estimates[1,1],3)
p_loo=round(elpd_stats$estimates[2,1],3)
test_elpd=round(test_elpd,3)
SumStats=data.frame(Group,Premium,SCC.Estimate,SCC.SE,Outcome,SCC.Pct,
stan_thin,elpd_loo,p_loo,test_elpd)
output=list(risk=risk,
Predictive_Outcome=Pred.SCC,
ParmSummary=fitSCC_summary,
logelr=logelr,
alpha=alpha,
beta=beta,
sig=sig,
SumStats=SumStats)
return(output)
}
#
# Single triangle
#
CASdata = read.csv("~/Dropbox/CAS Loss Reserve Database/comauto_pos.csv")
# Location of files in the CAS Loss Reserve Database
# http://www.casact.org/research/reserve_data/comauto_pos.csv
# http://www.casact.org/research/reserve_data/ppauto_pos.csv
# http://www.casact.org/research/reserve_data/wkcomp_pos.csv
# http://www.casact.org/research/reserve_data/othliab_pos.csv
lineobus="CA"
losstype="Paid" #choices here are "Paid" or "Incurred"
grpcode=353
co_model=model_function(CASdata,grpcode,losstype)
print(co_model$ParmSummary)
print(" ")
print(co_model$risk)
print("")
print(co_model$SumStats)
SCC_resid_plot(co_model,grpcode,lineobus,losstype)
t1=Sys.time()
print(t1-t0)
|
437777b85b9da200096f33a6272999c0e5fdc082 | 72513a9502cd2247b2ddbb8f9c05520d09851d69 | /man/pin_convert.Rd | ea4158d0b1ba28aa373bbde9520f82fafb381e91 | [] | no_license | kozo2/sweidnumbr | 8187c2dab554286da3d5ca670aaf328a23a50cd7 | eb338dc206e8b0a9aab54ef77ec2dd6766123390 | refs/heads/master | 2021-05-10T12:35:11.191839 | 2016-09-14T14:54:19 | 2016-09-14T14:54:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 437 | rd | pin_convert.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pin_internal.R
\name{pin_convert}
\alias{pin_convert}
\title{pin_convert}
\usage{
pin_convert(pin, format)
}
\arguments{
\item{pin}{A character element of length one.}
\item{format}{Which format should be converted. See \link{as.pin}.}
}
\value{
Character element on standard format.
}
\description{
Converts one pin to standard format
}
\keyword{internal}
|
70c37cb30be0c068e40f222f712a94af9394e3ff | 39da6ebcf2d578230dbaf05713864757ac85da0c | /Plot1.R | 7470e649a4371440953a27085a82efb35a04e5e4 | [] | no_license | HuichunChien/ExData_Plotting1 | 6b106fa75d6f4ce4528e5ab564260b5a418941cb | b7d78d1c5c1d0323fdbc2e32122a4514b87056e6 | refs/heads/master | 2021-01-17T15:49:20.792191 | 2014-10-12T22:57:34 | 2014-10-12T22:57:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 616 | r | Plot1.R | # Plot1.R
data=read.table("household_power_consumption.txt", na.strings = "?", sep=";", header=TRUE)
#get subdata with data are 1/2/2007 and 2/2/2007
subdata<-subset(data,data$Date=='1/2/2007'|data$Date=='2/2/2007', select=c(Global_active_power,Global_reactive_power,Voltage,Global_intensity ,Sub_metering_1,Sub_metering_2,Sub_metering_3))
# plot plot1.png
png("plot1.png", height=480, width=480)
hist(as.numeric(subdata$Global_active_power), col="red", xlab="Global active power(kilowatts)", ylab="Frequency", main="Global active power")
axis(1,seq(from=0,to=6,by=2))
axis(2,seq(from=0,to=1200,by=200))
dev.off()
|
7322b7648e6e0b1b19a779adde50de1d35d2e616 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/intReg/examples/coef.intReg.Rd.R | 0c8fa238c4322137dc8210957bd1a92373f6d185 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,310 | r | coef.intReg.Rd.R | library(intReg)
### Name: coef.intReg
### Title: Extract (only informative) coefficients, standard errors, and
### variance-covariance matrix from 'intReg' model.
### Aliases: coef.intReg print.coef.intReg print.summary.intReg
### summary.intReg stdEr.intReg vcov.intReg
### Keywords: methods
### ** Examples
## Example of observation-specific boundaries
## Estimate the willingness to pay for the Kakadu National Park
## Data given in intervals -- 'lower' for lower bound and 'upper' for upper bound.
## Note that dichotomous-coice answers are already coded to 'lower' and 'upper'
data(Kakadu, package="Ecdat")
set.seed(1)
Kakadu <- Kakadu[sample(nrow(Kakadu), 400),]
# subsample to speed up the estimation
## Estimate in log form, change 999 to Inf
lb <- log(Kakadu$lower)
ub <- Kakadu$upper
ub[ub > 998] <- Inf
ub <- log(ub)
y <- cbind(lb, ub)
m <- intReg(y ~ sex + log(income) + age + schooling +
recparks + jobs + lowrisk + wildlife + future + aboriginal + finben +
mineparks + moreparks + gov +
envcon + vparks + tvenv + major, data=Kakadu)
## You may want to compare the results to Werner (1999),
## Journal of Business and Economics Statistics 17(4), pp 479-486
print(coef(m))
print(coef(m, boundaries=TRUE))
print(nObs(m))
|
48c6f3cb267e107bbf437c65b2f8d9811bde2c70 | 3bcfdd173001146e637d459b05a8e3e2723cd4f7 | /scripts/pipeline_all_diseases.R | 1592253624a24201c56b3c608ddf482d1e781564 | [] | no_license | aidanmacnamara/netChoose | 2c97e0f0717969c6ea215098ca15a3a706ca271b | b1b4ced8c54da2e4b8879b68b1fdbe2d511cc168 | refs/heads/main | 2023-01-24T18:28:02.954851 | 2020-11-24T05:15:05 | 2020-11-24T05:15:05 | 315,525,039 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 21,053 | r | pipeline_all_diseases.R |
# LOAD --------------------------------------------------------------------
require(tidyverse)
require(biomaRt)
require(eulerr)
require(jsonlite)
require(readxl)
require(igraph)
require(sparklyr)
# MAPPING -----------------------------------------------------------------
mapping = read_excel("gwas/HumanGeneList_17Sep2018_workup_betterensembl_list.xlsx") # use mark's mapping file
names(mapping)[1:3] = c("ensembl_gene_id","entrezgene","hgnc_symbol")
# use karsten's mapping
mesh_to_analysis = read_excel("gwas/ukbb_ttam_finngen_phenotype_view_current_20190326.xlsx", sheet="UKBB")
# clean up names
names(mesh_to_analysis) <- str_replace_all(names(mesh_to_analysis), " ", "_") %>% str_replace_all(., regex('[:punct:]'), "_") %>% str_replace_all(., regex('_{2,}'), "_") %>% str_to_lower()
# tidy and select relevant columns
mesh_to_analysis <- mesh_to_analysis %>% filter(!str_detect(analysis, "n/a")) %>% distinct(analysis, mesh_id)
# successful drug information
data_clin = read_excel("gwas/Pprojects_1Nov2018.xlsx") # fully mapped to entrez id
data_clin_summ = data_clin %>% filter(`Clinical Label_PP`=="Succeeded") %>% group_by(`MeSH ID`) %>% summarise(N=n()) %>% arrange(desc(N))
# fuzzy mesh mapping
fuzz = read_excel("gwas/PossibleMeSHrelationships_summary_rev.xlsx", sheet="PossibleMeSHrelationships")
# clean column names
names(fuzz) <- names(fuzz) %>% str_replace_all(., " ", "_") %>% str_replace_all(., regex('[:punct:]'), "_") %>% str_replace_all(., regex('_{2,}'), "_") %>% str_to_lower()
# filter for robust relationships (mark hurle suggested using 6 as a cutoff)
fuzz_filt <- fuzz %>% filter(relationship_class <= 6)
# export to cytoscape
fuzz_nodes = fuzz_filt[,1:2]; names(fuzz_nodes) = c("mesh","phenotype")
fuzz_nodes_2 = fuzz_filt[,3:4]; names(fuzz_nodes_2) = c("mesh","phenotype")
fuzz_nodes = rbind(fuzz_nodes, fuzz_nodes_2); fuzz_nodes = distinct(fuzz_nodes)
rm(fuzz_nodes_2)
write_tsv(fuzz_nodes, "cytoscape/fuzz_nodes.txt")
fuzz_edges = fuzz_filt %>% dplyr::select(-one_of(c("mesh_name_2","mesh_name_4")))
write_tsv(fuzz_edges, "cytoscape/fuzz_edges.txt")
# connect to gwas and clean
fuzz_to_gwas = data_clin %>% filter(`Clinical Label_PP`=="Succeeded" | `Clinical Label_PP`=="Clinical Failure") %>% inner_join(fuzz_filt, by=c("MeSH ID"="msh1")) %>% dplyr::select("EntrezGene ID", "Clinical Label_PP", "MeSH ID", msh2, relationship_class) %>% inner_join(mesh_to_analysis, by=c("msh2"="mesh_id")) %>% dplyr::select(`MeSH ID`,"msh2","relationship_class","analysis") %>% distinct()
names(fuzz_to_gwas)[1] = "msh1"
# GWAS --------------------------------------------------------------------
# full coloc results
coloc = read_tsv("~/Downloads/2019_03_12_ukb_all_colocs_overlap_GWAS.tsv") # all (karsten's table)
# karsten's computed coloc_hc
coloc_hc = read_tsv("~/Downloads/2019_03_12_ukb_positive_target_indications.tsv") # high-confidence
# filtered coloc_hc
# coloc_hc = filter(coloc, minpval2 <= 5e-8, minpval1 <= 1e-4, p12 >= 0.8)
# coloc_hc = coloc_hc %>% group_by(entity) %>% slice(which.max(p12))
# coloc edits
coloc_hc$entrez = mapping$entrezgene[match(coloc_hc$entity, mapping$ensembl_gene_id)]
# TMEM133
coloc_hc$entity[which(coloc_hc$hgncid=="TMEM133")] = "ENSG00000165895"
coloc_hc$entrez[which(coloc_hc$hgncid=="TMEM133")] = 143872
coloc_hc$hgncid[which(coloc_hc$hgncid=="TMEM133")] = "ARHGAP42"
filter(coloc_hc, is.na(entrez)) %>% dplyr::select(hgncid) %>% distinct() # all entrez/ensembl mapped
coloc$entity[which(coloc$hgncid=="TMEM133")] = "ENSG00000165895"
coloc$hgncid[which(coloc$hgncid=="TMEM133")] = "ARHGAP42"
# filter
my_gwas = coloc_hc %>% group_by(analysis2) %>% summarise(N=n()) %>% arrange(desc(N)) %>% dplyr::select(analysis2)
names(my_gwas) = "gwas"
table(my_gwas$gwas %in% fuzz_to_gwas$analysis) # which gwas have a mesh id
my_gwas$gwas[!my_gwas$gwas %in% fuzz_to_gwas$analysis]
my_gwas$mesh = fuzz_to_gwas$msh1[match(my_gwas$gwas, fuzz_to_gwas$analysis)] # remove any without a mapping to mesh
my_gwas = my_gwas[!is.na(my_gwas$mesh),] # only keep those with a mesh mapping
my_gwas$disease = fuzz$mesh_name_2[match(my_gwas$mesh, fuzz$msh1)]
# RESULTS DATA FRAME ------------------------------------------------------
mapping_filt = mapping %>% filter(type_of_gene=="protein-coding") %>% dplyr::select(1:3)
mega_tbl = do.call("rbind", replicate(dim(my_gwas)[1], mapping_filt, simplify=FALSE))
mega_tbl$gwas = rep(my_gwas$gwas, each=dim(mapping_filt)[1])
mega_tbl$mesh = rep(my_gwas$mesh, each=dim(mapping_filt)[1])
mega_tbl$disease = rep(my_gwas$disease, each=dim(mapping_filt)[1])
mega_tbl$high_confidence_genetic=0; mega_tbl$success=0; mega_tbl$failure=0; mega_tbl$index=1:dim(mega_tbl)[1]
for(i in 1:length(my_gwas$mesh)) {
print(i)
successes = filter(data_clin, `Clinical Label_PP`=="Succeeded", `MeSH ID` %in% unique(fuzz_to_gwas$msh2[which(fuzz_to_gwas$msh1 == my_gwas$mesh[i])])) %>% dplyr::select(`EntrezGene ID`) %>% unlist()
failures = filter(data_clin, `Clinical Label_PP`=="Clinical Failure", `MeSH ID` %in% unique(fuzz_to_gwas$msh2[which(fuzz_to_gwas$msh1 == my_gwas$mesh[i])])) %>% dplyr::select(`EntrezGene ID`) %>% unlist()
gwas = filter(coloc_hc, analysis2==my_gwas$gwas[i]) %>% dplyr::select(entity) %>% unlist()
# take directly from full coloc table
# gwas = filter(coloc, analysis2==my_gwas$gwas[i], p12 >= 0.8) %>% dplyr::select(entity) %>% unlist() %>% as.character()
gwas = gwas[!is.na(gwas)]; gwas = unique(gwas)
# add randomizer
# gwas = sample(x=filter(mapping, type_of_gene=="protein-coding") %>% dplyr::select(ensembl_gene_id) %>% unlist() %>% as.character(), size=length(gwas), replace=FALSE)
mega_tbl$success[(mega_tbl$gwas==my_gwas$gwas[i] & mega_tbl$entrezgene %in% successes)] = 1
mega_tbl$failure[(mega_tbl$gwas==my_gwas$gwas[i] & mega_tbl$entrezgene %in% failures)] = 1
mega_tbl$high_confidence_genetic[(mega_tbl$gwas==my_gwas$gwas[i] & mega_tbl$ensembl_gene_id %in% gwas)] = 1
}
# check numbers
coloc_hc_summ = coloc_hc %>% group_by(analysis2) %>% summarise(N=n()) %>% arrange(desc(N))
mega_tbl_n = mega_tbl %>% filter(high_confidence_genetic==1) %>% group_by(gwas) %>% summarise(N=n()) %>% arrange(desc(N))
# total number of hcghs
sum(mega_tbl_n$N) # 14374
# total number of drug targets
data_clin %>% filter((`Clinical Label_PP`=="Succeeded"|`Clinical Label_PP`=="Clinical Failure"), `MeSH ID` %in% unique(fuzz_to_gwas$msh2[fuzz_to_gwas$msh1 %in% my_gwas_nsp$mesh])) %>% distinct(`Target|Indication`) # 1268
data.frame(coloc=coloc_hc_summ$N, mega_tbl=mega_tbl_n$N[match(coloc_hc_summ$analysis2, mega_tbl_n$gwas)]) %>% ggplot(aes(x=coloc, y=mega_tbl)) + geom_point() + theme_thesis()
# ADD GENE SCORE ----------------------------------------------------------
mega_tbl$h4 = 0
mega_tbl$h4_log = 0
# require(odbc)
# conn = dbConnect(odbc::odbc(), dsn="impaladsn")
# dbListFields(conn, "2019_03_12_ukb_all_target_indications_no_filter_max_p12")
# test_sql = dbSendQuery(conn, "SELECT * FROM `2019_03_12_ukb_all_target_indications_no_filter_max_p12` WHERE analysis2 = 'GSK500kV3_FEV1_maximumValue'")
# conf <- sparklyr::spark_config()
# conf$sparklyr.log.console <- FALSE
# conf$spark.executor.memory <- "4g"
# conf$spark.yarn.am.memory <- "4g"
# conf$spark.driver.maxResultSize <- "4g"
# connect to full coloc results
# sc <- spark_connect(
# master = "yarn-client",
# spark_home = "/opt/cloudera/parcels/SPARK2/lib/spark2",
# version = "2.3",
# config = conf
# )
# sc %>% tbl_change_db("am673712")
# src_tbls(sc)
# coloc <- tbl(sc, "2019_03_12_ukb_all_target_indications_no_filter_max_p12")
# system.time({
# coloc_head = coloc %>% head() %>% collect()
# })
for(i in 1:dim(my_gwas)[1]) {
print(i)
# get the minimum eqtl score per tissue
# multiple gwas results here
coloc_filt = filter(coloc, analysis2==my_gwas$gwas[i]) %>% group_by(entity) %>% dplyr::slice(which.max(p12))
coloc_filt_hits = paste(coloc_filt$entity, coloc_filt$analysis2, sep="_")
hit_ix = match(coloc_filt_hits, paste(mega_tbl$ensembl_gene_id, mega_tbl$gwas, sep="_"))
# some not matching because id not present in mega_tbl
na_ix = which(is.na(hit_ix))
if(!is_empty(na_ix)) {
hit_ix = hit_ix[!is.na(hit_ix)] # remove na indexes
mega_tbl$h4[hit_ix] = coloc_filt$p12[-na_ix]
mega_tbl$h4_log[hit_ix] = -log(1-coloc_filt$p12[-na_ix], base=2)
} else {
mega_tbl$h4[hit_ix] = coloc_filt$p12
mega_tbl$h4_log[hit_ix] = -log(1-coloc_filt$p12, base=2)
}
}
# HOTNET SETUP ------------------------------------------------------------
# gene_ix = read_tsv("/Volumes/am673712/links/network_analysis/interactions/metabase_plus_ppi/metabase_plus_ppi_gene_index.txt", col_names=FALSE)
load("r_data/gene_ix.RData")
for(i in 1:dim(my_gwas)[1]) {
print(i)
heat_scores = data.frame(gene=gene_ix$X2, score=NA)
coloc_filt = filter(coloc, analysis2==my_gwas$gwas[i]) %>% group_by(entity) %>% dplyr::slice(which.max(p12))
# add heat scores
heat_scores$score = -log(1-coloc_filt$p12[match(heat_scores$gene, coloc_filt$hgncid)], base=2)
heat_scores$score[is.na(heat_scores$score)] = 0
heat_scores = arrange(heat_scores, desc(score))
write_tsv(heat_scores, paste0("/Volumes/am673712/links/network_analysis/hotnet/hotnet/test_all_gwas/input/metabase_plus_ppi/input_", i, ".tsv"), col_names=FALSE)
}
# run on slurm
# sbatch -n 1 -c 12 --mem=50G -t 360 -o slurm_log.out run_test.sh
# OTHER NETWORK SOURCES ---------------------------------------------------
networks = c("omnipath","huri","string","intomics")
for(i in 1:dim(my_gwas)[1]) {
print(i)
coloc_filt = filter(coloc, analysis2==my_gwas$gwas[i]) %>% group_by(entity) %>% dplyr::slice(which.max(p12))
coloc_filt$entrez = mapping$entrezgene[match(coloc_filt$entity, mapping$ensembl_gene_id)]
for(j in 4:length(networks)) {
# load gene score
gene_ix = read_tsv(paste0("interactions/",networks[j],"/",networks[j],"_nodes.txt"), col_names=FALSE)
# add heat scores
heat_scores = data.frame(gene=gene_ix$X2, score=NA)
heat_scores$score = -log(1-coloc_filt$p12[match(heat_scores$gene, coloc_filt$entrez)], base=2)
heat_scores$score[is.na(heat_scores$score)] = 0
heat_scores = arrange(heat_scores, desc(score))
write_tsv(heat_scores, paste0("/Volumes/am673712/links/network_analysis/hotnet/hotnet/test_all_gwas/input/", networks[j], "/input_", i, ".tsv"), col_names=FALSE)
}
}
# OVERLAPS ----------------------------------------------------------------
plot(euler(
list(
`high confidence genetics` = mega_tbl$index[mega_tbl$high_confidence_genetic==1],
successes = mega_tbl$index[mega_tbl$success==1],
failures = mega_tbl$index[mega_tbl$failure==1]
)
), quantities=TRUE)
# remove target/gwas pairs where success==1 and failure==1 - this results from fuzzy mesh matching
mega_tbl = mega_tbl %>% filter(!(success==1 & failure==1))
plot(euler(
list(
`high confidence genetics` = mega_tbl$index[mega_tbl$high_confidence_genetic==1],
successes = mega_tbl$index[mega_tbl$success==1],
failures = mega_tbl$index[mega_tbl$failure==1]
)
), quantities=TRUE)
# COMPLEX DATA ------------------------------------------------------------
mega_tbl$complex = 0
net_complex = read_tsv("interactions/complex_portal/net_complex.tsv")
for(i in 1:length(my_gwas$mesh)) {
print(i)
hits = as.numeric(unlist(mega_tbl[mega_tbl$gwas==my_gwas$gwas[i] & mega_tbl$high_confidence_genetic==1, 'entrezgene']))
c_ix = apply(net_complex[,1:2], 1, function(x) any(x %in% hits))
table(c_ix)
if(all(!c_ix)) {
next
} else {
mega_tbl$complex[(mega_tbl$gwas==my_gwas$gwas[i] & mega_tbl$entrezgene %in% unique(unlist(net_complex[c_ix,1:2])))] = 1
}
}
plot(euler(
list(
`high confidence genetics` = mega_tbl$index[mega_tbl$high_confidence_genetic==1],
successes = mega_tbl$index[mega_tbl$success==1],
failures = mega_tbl$index[mega_tbl$failure==1],
complex = mega_tbl$index[mega_tbl$complex==1]
)
), quantities=TRUE)
# LIGAND RECEPTOR ---------------------------------------------------------
mega_tbl$ligand_receptor = 0
mark_proxy_filt = read_tsv("interactions/ligand_receptor_db/mark_proxy_filt.txt")
for(i in 1:length(my_gwas$mesh)) {
print(i)
hits = as.numeric(unlist(mega_tbl[mega_tbl$gwas==my_gwas$gwas[i] & mega_tbl$high_confidence_genetic==1, 'entrezgene']))
lr_ix = apply(mark_proxy_filt, 1, function(x) any(x %in% hits))
table(lr_ix)
if(all(!lr_ix)) {
next
} else {
mega_tbl$ligand_receptor[(mega_tbl$gwas==my_gwas$gwas[i] & mega_tbl$entrezgene %in% unique(unlist(mark_proxy_filt[lr_ix,])))] = 1
}
}
plot(euler(
list(
`high confidence genetics` = mega_tbl$index[mega_tbl$high_confidence_genetic==1],
successes = mega_tbl$index[mega_tbl$success==1],
failures = mega_tbl$index[mega_tbl$failure==1],
complex = mega_tbl$index[mega_tbl$complex==1],
ligand_receptor = mega_tbl$index[mega_tbl$ligand_receptor==1]
)
), quantities=TRUE)
method_overlaps = list(
`high confidence genetics` = mega_tbl$index[mega_tbl$high_confidence_genetic==1],
successes = mega_tbl$index[mega_tbl$success==1],
failures = mega_tbl$index[mega_tbl$failure==1],
complex = mega_tbl$index[mega_tbl$complex==1],
ligand_receptor = mega_tbl$index[mega_tbl$ligand_receptor==1]
)
my_dat_genes_all_tbl = as.data.frame.matrix((table(stack(method_overlaps))))
my_dat_genes_all_tbl = cbind(rownames(my_dat_genes_all_tbl), my_dat_genes_all_tbl)
rownames(my_dat_genes_all_tbl) = NULL
upset(my_dat_genes_all_tbl, order.by="freq")
# NETWORK NEIGHBOURS ------------------------------------------------------
find_network_second_neighbor = TRUE # boolean: true for second neighbors, false for first neighbors
mega_tbl$network_first_neighbor = 0
mega_tbl$network_second_neighbor = 0
mb = read_tsv("interactions/metabase_plus_ppi/metabase/MetaBaseFullNetworkHML.txt")
mb_filt = mb %>% filter(TrustLevel=="high", !Mechanism %in% c("Competition","Influence on expression","Pharmacological effect", "Toxic effect","Unspecified"))
mb_graph = graph_from_data_frame(mb_filt[,c(1,3,5:11)])
for(i in 1:length(my_gwas$mesh)) {
print(i)
hits = as.numeric(unlist(mega_tbl[mega_tbl$gwas==my_gwas$gwas[i] & mega_tbl$high_confidence_genetic==1, 'entrezgene']))
if(is_empty(hits)) next
my_neighbors = c()
for(j in 1:length(hits)) {
possible_error <- tryCatch(
{
if(find_network_second_neighbor) {
neighborhood(mb_graph, order=2, nodes=hits[j])
} else {
neighbors(mb_graph, hits[j], mode="all")
}
},
error=function(e) e
)
if(inherits(possible_error, "error")) next
if(find_network_second_neighbor) {
my_neighbors = c(my_neighbors, as.numeric(possible_error[[1]]$name))
} else {
my_neighbors = c(my_neighbors, as.numeric(possible_error$name))
}
}
my_neighbors = unique(my_neighbors)
if(find_network_second_neighbor) {
mega_tbl$network_second_neighbor[(mega_tbl$gwas==my_gwas$gwas[i] & mega_tbl$entrezgene %in% my_neighbors)] = 1
} else {
mega_tbl$network_first_neighbor[(mega_tbl$gwas==my_gwas$gwas[i] & mega_tbl$entrezgene %in% my_neighbors)] = 1
}
}
# PATHWAYS ----------------------------------------------------------------
mega_tbl$pathways = 0
metabase = gmtPathways("genesets/Metabase_GO_Maps_entrez.filt10.gmt")
for(i in 1:length(my_gwas$mesh)) {
print(i)
hits = as.numeric(unlist(mega_tbl[mega_tbl$gwas==my_gwas$gwas[i] & mega_tbl$high_confidence_genetic==1, 'entrezgene']))
p_ix = unlist(lapply(metabase, function(x) any(hits %in% x)))
table(p_ix)
pathway_hits = as.numeric(unique(unlist(metabase[p_ix])))
mega_tbl$pathways[(mega_tbl$gwas==my_gwas$gwas[i] & mega_tbl$entrezgene %in% pathway_hits)] = 1
}
# PATHWAY NEIGHBOURS ------------------------------------------------------
load("r_data/Metabase_interactors.Rdata")
mega_tbl$pathway_first_neighbor = 0
mega_tbl$pathway_second_neighbor = 0
for(i in 1:length(my_gwas$mesh)) {
print(i)
hits = as.numeric(unlist(mega_tbl[mega_tbl$gwas==my_gwas$gwas[i] & mega_tbl$high_confidence_genetic==1, 'entrezgene']))
p_ix = which(as.numeric(names(gg.interactors)) %in% hits)
first_hits = as.numeric(unlist(lapply(gg.interactors[p_ix], function(x) x[[1]])))
second_hits = as.numeric(unlist(lapply(gg.interactors[p_ix], function(x) x[[2]])))
mega_tbl$pathway_first_neighbor[(mega_tbl$gwas==my_gwas$gwas[i] & mega_tbl$entrezgene %in% first_hits)] = 1
mega_tbl$pathway_second_neighbor[(mega_tbl$gwas==my_gwas$gwas[i] & mega_tbl$entrezgene %in% second_hits)] = 1
}
# NETWORK PROPAGATION -----------------------------------------------------
my_networks = c(
"intomics_pascal_lfdr_0.01",
"intomics_pascal_lfdr_0.05",
"intomics_pascal_lfdr_0.1"
)
missing_dat = data.frame(matrix(0, nrow=length(my_gwas$mesh), ncol=length(my_networks)))
names(missing_dat) = my_networks
for(j in 1:length(my_networks)) {
mega_tbl = mega_tbl %>% mutate(!!my_networks[j] := 0)
for(i in 1:length(my_gwas$mesh)) {
print(i)
if(file.exists(paste0("hotnet/hotnet/test_all_gwas/results/", my_networks[j], "/output_", i, "/consensus/subnetworks.tsv"))) {
hotnet_res = read_tsv(paste0("hotnet/hotnet/test_all_gwas/results/", my_networks[j], "/output_", i, "/consensus/subnetworks.tsv"), col_names=FALSE)
hotnet_res = as.numeric(unlist(str_split(hotnet_res$X1[3:length(hotnet_res$X1)], " ")))
mega_tbl[[my_networks[j]]][(mega_tbl$gwas==my_gwas$gwas[i] & mega_tbl$entrezgene %in% hotnet_res)] = 1
} else {
missing_dat[[my_networks[j]]][i] = 1
}
}
}
# RANDOM ------------------------------------------------------------------
mega_tbl = mega_tbl %>% group_by(gwas) %>% mutate(random = ifelse(1:n() %in% sample(1:n(),10000), 1, 0))
# check pathways or < 1
metabase = gmtPathways("genesets/Metabase_GO_Maps_entrez.filt10.gmt")
metabase = sort(as.numeric(unlist(metabase)))
mega_tbl = mega_tbl %>% group_by(gwas) %>% mutate(random = ifelse(entrezgene %in% sample(metabase,10000,replace=FALSE), 1, 0))
# OMNIPATH HEAT -----------------------------------------------------------
mega_tbl$hotnet_omnipath_heat = NA
for(i in 1:length(my_gwas$mesh)) {
print(i)
if(file.exists(paste0("hotnet/hotnet/test_all_gwas/results/omnipath/output_", i, "/heat.txt"))) {
heat = read_tsv(paste0("hotnet/hotnet/test_all_gwas/results/omnipath/output_", i, "/heat.txt"), col_names=FALSE)
} else {
print(paste("File:", i, "doesn't exist ..."))
next
}
# pull out the index and the entrez id
match_dat = mega_tbl %>% filter(gwas==my_gwas$gwas[i]) %>% dplyr::select(entrezgene, index)
heat_ix = match(heat$X1, match_dat$entrezgene)
missing_ix = which(is.na(heat_ix))
heat_ix = heat_ix[-missing_ix]
# update using index
mega_tbl$hotnet_omnipath_heat[match_dat$index[heat_ix]] = heat$X2[-missing_ix]
}
# USE PASCAL GENE SCORES FOR HOTNET ---------------------------------------
require(twilight)
# define a cutoff for the pascal scores
networks = c("intomics")
lfdr_range = c(0.1, 0.05, 0.01)
for(j in 1:length(networks)) {
for(k in 1:length(lfdr_range)) {
if(!dir.exists(paste0("hotnet/hotnet/test_all_gwas/input/", networks[j], "_pascal_lfdr_", lfdr_range[k]))) dir.create(paste0("hotnet/hotnet/test_all_gwas/input/", networks[j], "_pascal_lfdr_", lfdr_range[k]))
}
}
for(i in 1:dim(my_gwas)[1]) {
hits_filt = mega_tbl %>% filter(gwas==my_gwas$gwas[i])
if(all(is.na(hits_filt$pascal_default))) { # no pascal data for this gwas
print(paste("No Pascal data for GWAS:", my_gwas$gwas[i]))
next
}
for(j in 1:length(networks)) {
# load gene score
gene_ix = read_tsv(paste0("interactions/",networks[j],"/",networks[j],"_nodes.txt"), col_names=FALSE)
# add heat scores
heat_scores = data.frame(gene=gene_ix$X2, score=NA)
# get the gene p values
heat_scores$p = hits_filt$pascal_default[match(heat_scores$gene, hits_filt$entrezgene)]
# find the lfdr for the p values
heat_scores$lfdr = NA
t_res = tbl_df(twilight(heat_scores$p[!is.na(heat_scores$p)])$result)
t_res = t_res %>% arrange(index)
for(k in 1:length(lfdr_range)) {
heat_scores_copy = heat_scores
heat_scores_copy$lfdr[!is.na(heat_scores_copy$p)] = t_res$fdr
# heat_scores_copy %>% ggplot(aes(x=p, y=1-lfdr)) + geom_line() + theme_thesis() + xlab("Pascal p-value") + ylab("1-FDR")
heat_scores_copy$score = -log(hits_filt$pascal_default[match(heat_scores_copy$gene, hits_filt$entrezgene)], base=10)
heat_scores_copy$score[heat_scores_copy$lfdr > lfdr_range[k]] = 0
heat_scores_copy$score[is.na(heat_scores_copy$score)] = 0
heat_scores_copy = arrange(heat_scores_copy, desc(score))
write_tsv(heat_scores_copy[,1:2], paste0("hotnet/hotnet/test_all_gwas/input/", networks[j], "_pascal_lfdr_", lfdr_range[k], "/input_", i, ".tsv"), col_names=FALSE)
}
}
}
|
292f43c8f3213cf9a6cc50d15151fb77efd30711 | 3413a4251da58e64b85b6f07055c7ee250fa853d | /MATCH/EMA/maintainenceClean/match_ema_clean_diagnosis_w3.R | b865feb4fc37affa4d44866d0ab2fe13e5301c34 | [] | no_license | wangjingke/reach | 48dd0da6901f8393f22c4db02fce7d5fc10f214c | 1320fd5e9f76533ffe0e3d1e124ce8ed10673aa1 | refs/heads/master | 2020-05-21T14:59:02.926515 | 2018-04-23T03:43:29 | 2018-04-23T03:43:29 | 63,976,443 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,330 | r | match_ema_clean_diagnosis_w3.R | work_dir <- "D:/REACH/MATCH/EMA_Clean/Wave3"
setwd(work_dir)
source("D:/GitHub/reach/MATCH/EMA/backend/match_ema_promptList.R")
library(XLConnect)
w3 <- readWorksheetFromFile("D:/GitHub/reach/MATCH/EMA/backend/match_ema_keys.xlsx", sheet = "W3", colTypes = "character")
w3 <- w3[!is.na(w3$w3_start),]
w3$DID <- ifelse(nchar(w3$DID)<3, paste0("0", w3$DID), as.character(w3$DID))
w3$enterDate <- sapply(strsplit(w3$w3pu, " "), head, 1)
w3Schedule <- c()
for (i in 1:nrow(w3)) {
indivSchedule = rbind(data.frame(subjectID=paste0("11", w3$DID[i]), date=ema.dayAfter(w3$enterDate[i], 0:7), stringsAsFactors = FALSE), data.frame(subjectID=paste0("12", w3$DID[i]), date=ema.dayAfter(w3$enterDate[i], 0:7), stringsAsFactors = FALSE))
indivSchedule = cbind(indivSchedule, data.frame(matrix(data = NA, nrow = nrow(indivSchedule), ncol = 13, dimnames = list(c(), c("manualFile", "manualPrompts", "manualResponse", "wocketsFile", "wocketsPrompts", "wocketsResponse", "identicalPrompts", "identicalResponse", "cover", "coverManual", "coverWockets", "diffPrompts", "diffResponse"))), stringsAsFactors = FALSE))
w3Schedule = rbind(w3Schedule, indivSchedule)
rm(indivSchedule)
}
w3manual <- read.csv("D:/REACH/MATCH/EMA_ManualRetrieve/W3/MATCH_EMA_W3_List_manual_2017-03-10.csv", header = TRUE, stringsAsFactors = FALSE)
w3wockets <- read.csv("D:/REACH/MATCH/EMA_Wockets/EMA_Wockets_W3/MATCH_EMA_W3_List_Wockets_2017-03-09.csv", header = TRUE, stringsAsFactors = FALSE)
# function to return NA instead of logical zero
ema.returnNA = function(x) {
if (length(x)==0) return(NA) else return(x)
}
for (i in 1:nrow(w3Schedule)) {
w3Schedule$manualFile[i] <- ema.returnNA(paste(w3manual$file[which(w3manual$date == w3Schedule$date[i] & w3manual$subjectID == w3Schedule$subjectID[i])], collapse = ","))
w3Schedule$wocketsFile[i] <- ema.returnNA(paste(w3wockets$file[which(w3wockets$date == w3Schedule$date[i] & grepl(w3Schedule$subjectID[i], w3wockets$file))], collapse = ","))
}
w3Schedule$twoFile <- grepl(",", w3Schedule$manualFile) | grepl(",", w3Schedule$wocketsFile)
for (i in 1:nrow(w3Schedule)) {
if (w3Schedule$twoFile[i]) next
if (w3Schedule$subjectID[i] > 12000) {mother <- 0} else {mother <- 1}
if (!is.na(w3Schedule$manualFile[i]) & w3Schedule$manualFile[i]!="") {
manualX <- readRDS(paste0("D:/REACH/MATCH/EMA_ManualRetrieve/W3/", w3Schedule$manualFile[i]))
w3Schedule$manualPrompts[i] <- ema.returnNA(nrow(manualX$prompts))
switch (as.character(mother),
"1" = {w3Schedule$manualResponse[i] <- ema.returnNA(nrow(manualX$responses_mother))},
"0" = {w3Schedule$manualResponse[i] <- ema.returnNA(nrow(manualX$responses_child))}
)
}
if (!is.na(w3Schedule$wocketsFile[i]) & w3Schedule$wocketsFile[i] != "") {
wocketsX <- readRDS(paste0("D:/REACH/MATCH/EMA_Wockets/EMA_Wockets_W3/", w3Schedule$wocketsFile[i]))
w3Schedule$wocketsPrompts[i] <- ema.returnNA(nrow(wocketsX$prompts))
switch(as.character(mother),
"1" = {w3Schedule$wocketsResponse[i] <- ema.returnNA(nrow(wocketsX$responses_mother))},
"0" = {w3Schedule$wocketsResponse[i] <- ema.returnNA(nrow(wocketsX$responses_child))}
)
}
if (!is.na(w3Schedule$manualPrompts[i]) & !is.na(w3Schedule$wocketsPrompts[i])) {
w3Schedule$identicalPrompts[i] = identical(manualX$prompts, wocketsX$prompts)
}
if (is.na(w3Schedule$manualPrompts[i]) & is.na(w3Schedule$wocketsPrompts[i])) {w3Schedule$identicalPrompts[i] = TRUE}
if (!is.na(w3Schedule$manualResponse[i]) & !is.na(w3Schedule$wocketsResponse[i])) {
switch (as.character(mother),
"1" = {w3Schedule$identicalResponse[i] = identical(manualX$responses_mother, wocketsX$responses_mother)},
"0" = {w3Schedule$identicalResponse[i] = identical(manualX$responses_child, wocketsX$responses_child)}
)
}
if (is.na(w3Schedule$manualResponse[i]) & is.na(w3Schedule$wocketsResponse[i])) {w3Schedule$identicalResponse[i] = TRUE}
if (exists("manualX")) rm(manualX)
if (exists("wocketsX")) rm(wocketsX)
rm(mother)
if (i%%100 == 0) print(i)
}
w3Schedule$manualPrompts[is.na(w3Schedule$manualPrompts)] <- 0
w3Schedule$wocketsPrompts[is.na(w3Schedule$wocketsPrompts)] <- 0
w3Schedule$manualResponse[is.na(w3Schedule$manualResponse)] <- 0
w3Schedule$wocketsResponse[is.na(w3Schedule$wocketsResponse)] <- 0
# how many days were covered
w3Schedule$cover = ifelse(w3Schedule$manualPrompts > 0 | w3Schedule$wocketsPrompts > 0, 1, 0)
w3Schedule$coverManual = ifelse(w3Schedule$manualPrompts > 0, 1, 0)
w3Schedule$coverWockets = ifelse(w3Schedule$wocketsPrompts > 0, 1, 0)
w3Schedule$diffPrompts = w3Schedule$manualPrompts - w3Schedule$wocketsPrompts
w3Schedule$diffResponse = w3Schedule$manualResponse - w3Schedule$wocketsResponse
# Majority of the surveys (> 3 days) from the following subjects were completely missed, requiring investigation
aggregate(date~subjectID, data = w3Schedule[w3Schedule$cover==0,], FUN = function (x) {length(unique(x))})
# how many prompt days are identical between wockets and manual
table(w3Schedule$identicalPrompts, w3Schedule$identicalResponse, useNA = "ifany")
# identicalPrompts cannot be NA when manualFile or wocketsFile != ""
w3Schedule[is.na(w3Schedule$identicalPrompts) & w3Schedule$manualFile!="" & w3Schedule$wocketsFile!="",]
w3Schedule$senario <- NA
for (i in 1:nrow(w3Schedule)) {
if (w3Schedule$twoFile[i]) next
if (w3Schedule$manualFile[i]=="" && w3Schedule$wocketsFile[i]=="") next
targetDir = paste0(work_dir, "/MATCH_", w3Schedule$subjectID[i], "W3")
if (!dir.exists(targetDir)) dir.create(targetDir)
# senario control swtich
# senario 1, only available in manual backup
if (w3Schedule$manualFile[i]!="" && w3Schedule$wocketsFile[i]=="") {senario = "1"}
# senario 2, only available in wockets
if (w3Schedule$manualFile[i]=="" && w3Schedule$wocketsFile[i]!="") {senario = "2"}
# senario 3, available in both, completely identical prompts and responses, use the wockets copy
if (w3Schedule$manualFile[i]!="" && w3Schedule$wocketsFile[i]!="" && isTRUE(w3Schedule$identicalResponse[i]) && isTRUE(w3Schedule$identicalPrompts[i])) {senario = "3"}
# senario 4, avaiable in both, identical responses but different prompts
if (w3Schedule$manualFile[i]!="" && w3Schedule$wocketsFile[i]!="" && isTRUE(w3Schedule$identicalResponse[i]) && !isTRUE(w3Schedule$identicalPrompts[i])) {senario = "4"}
# actions under each senario, when no senario found, skip to next
if (!exists("senario")) next
w3Schedule$senario[i] <- senario
if (senario == "1") {
originDir = paste0("D:/REACH/MATCH/EMA_W3_temp/manual/", strsplit(w3Schedule$manualFile[i], "/")[[1]][1])
} else {
originDir = paste0("D:/REACH/MATCH/EMA_W3_temp/wockets/", strsplit(w3Schedule$wocketsFile[i], "/")[[1]][1])
}
if (exists("originDir")) {
dirsToCopy <- grep(w3Schedule$date[i], list.dirs(originDir, full.names = FALSE, recursive = TRUE), value = TRUE)
for (j in dirsToCopy) {
path.seg <- strsplit(j, "/")[[1]]
targetDir.X <- paste0(targetDir, "/", paste(path.seg[2:(length(path.seg)-1)], collapse = "/"))
# get rid of .match
targetDir.X <- gsub(".match/", "", targetDir.X)
if (!dir.exists(targetDir.X)) dir.create(targetDir.X, recursive = TRUE)
file.copy(from = paste0(originDir, "/", j), to = targetDir.X, recursive = TRUE)
}
# senario 4, use the responses and logs from wockets (completed above), and use the longer prompts
if (senario == "4" && w3Schedule$diffPrompts[i] > 0) {
# more prompts in manual than wockets, change the originDir to manual, and overwrite the prompts.csv file, otherwise the prompts.csv would be from wockets
originDir = paste0("D:/REACH/MATCH/EMA_W3_temp/manual/", strsplit(w3Schedule$manualFile[i], "/")[[1]][1])
promptX <- grep(w3Schedule$date[i], list.files(originDir, "Prompts.csv", recursive = TRUE), value = TRUE)
path.seg <- strsplit(promptX, "/")[[1]]
targetDir.X <- paste0(targetDir, "/.match/", paste(path.seg[2:(length(path.seg)-1)], collapse = "/"))
if (!dir.exists(targetDir.X)) dir.create(targetDir.X, recursive = TRUE)
file.copy(from = paste0(originDir, "/", promptX), to = targetDir.X, recursive = TRUE)
}
}
if(exists("originDir")) rm(originDir)
if(exists("senario")) rm(senario)
if(i%%10 == 0) print(paste0(i, "; ", round(i/nrow(w3Schedule)*100, 2), "%; ", Sys.time()))
}
for (i in 1:nrow(w3Schedule)) {
if (w3Schedule$senario[i] == "1" & !is.na(w3Schedule$senario[i])) {
targetDir = paste0(work_dir, "/MATCH_", w3Schedule$subjectID[i], "W3")
if (!dir.exists(targetDir)) dir.create(targetDir)
originDir = paste0("D:/REACH/MATCH/EMA_W3_temp/manual/", strsplit(w3Schedule$manualFile[i], "/")[[1]][1])
if (exists("originDir")) {
dirsToCopy <- grep(w3Schedule$date[i], list.dirs(originDir, full.names = FALSE, recursive = TRUE), value = TRUE)
for (j in dirsToCopy) {
path.seg <- strsplit(j, "/")[[1]]
targetDir.X <- paste0(targetDir, "/", paste(path.seg[2:(length(path.seg)-1)], collapse = "/"))
if (!dir.exists(targetDir.X)) dir.create(targetDir.X, recursive = TRUE)
file.copy(from = paste0(originDir, "/", j), to = targetDir.X, recursive = TRUE)
}
}
if(exists("originDir")) rm(originDir)
}
if(i%%10 == 0) print(paste0(i, "; ", round(i/nrow(w3Schedule)*100, 2), "%; ", Sys.time()))
}
# special cases
# two files for same day
w3Schedule[w3Schedule$twoFile == TRUE,]
w3Schedule[w3Schedule$senario==4 & !is.na(w3Schedule$senario),]
w3Schedule[w3Schedule$manualFile!="" & w3Schedule$wocketsFile!="" & w3Schedule$identicalPrompts!=TRUE,]
w3Schedule[w3Schedule$manualFile!="" & w3Schedule$wocketsFile!="" & w3Schedule$identicalResponse!=TRUE,]
w3Schedule[is.na(w3Schedule$senario) & w3Schedule$twoFile==FALSE & !is.na(w3Schedule$subjectID) & w3Schedule$cover == 1,]
# manually put senario 1 into correct folders
unique(w3Schedule$subjectID[w3Schedule$senario=="1"])
|
ebecea1dd2b7460d560603ca1393ff12b02d2d8a | 007757ece48ebd78299fb8395ae17eae3f9d6c09 | /analysis/networks/src/not_in_ms/specMets.R | c115fee3950426c7ca90b05af3e8de5ad46186af | [] | no_license | Lucy-Guarnieri/hedgerow_metacom | d8f4124d9e5d96953e7721a1a1e9257bf46e2411 | 3a91700551c7687134902666218a8a34d35bed7f | refs/heads/master | 2022-10-17T06:48:48.002125 | 2020-06-12T20:31:49 | 2020-06-12T20:31:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,367 | r | specMets.R | rm(list=ls())
setwd('~/Dropbox/hedgerow_metacom/analysis/speciesLevel')
source('src/initialize.R')
## **********************************************************
## species level metrics
## **********************************************************
spec.metrics <- calcSpec(nets, spec, spec.metric = "degree", 3)
save(spec.metrics, file=file.path(save.path, 'spec.metrics.Rdata'))
## linear models
load(file=file.path(save.path, 'spec.metrics.Rdata'))
## SiteStatus or ypr
xvar <- "s.Year"
## anything outputted by specieslevel
ys <- c("proportional.generality", "d", "degree",
"tot.int",
"partner.diversity")
formulas <-lapply(ys, function(x) {
as.formula(paste(x, "~",
paste(paste(xvar, "overall.spec", sep="*"),
## paste0("I(", xvar, "^2)*overall.spec"),
"(1|Site)",
"(1|GenusSpecies)",
sep="+")))
})
## formulas <-lapply(ys, function(x) {
## as.formula(paste(x, "~",
## paste(xvar,
## "(1|Site)",
## "(1|GenusSpecies)",
## sep="+")))
## })
mod.pols.cont <- lapply(formulas, function(x){
lmer(x,
data=spec.metrics[spec.metrics$speciesType == "pollinator" &
spec.metrics$SiteStatus == "control",])
})
mod.pols.mat <- lapply(formulas, function(x){
lmer(x,
data=spec.metrics[spec.metrics$speciesType == "pollinator" &
spec.metrics$SiteStatus == "mature",])
})
mod.plants.cont <- lapply(formulas, function(x){
lmer(x,
data=spec.metrics[spec.metrics$speciesType == "plant" &
spec.metrics$SiteStatus == "control",])
})
mod.plants.mat <- lapply(formulas, function(x){
lmer(x,
data=spec.metrics[spec.metrics$speciesType == "plant" &
spec.metrics$SiteStatus == "mature",])
})
names(mod.pols) <- names(mod.plants) <- ys
lapply(mod.pols.cont, summary)
lapply(mod.pols.mat, summary)
lapply(mod.plants.cont, summary)
lapply(mod.plants.mat, summary)
## [[1]]
## [1] -456.7538
## [[2]]
## [1] -220.4532
## [[3]]
## [1] 2951.248
## [[4]]
## [1] 2951.248
## [[5]]
## [1] 1165.832
save(mod.pols, mod.plants, ys, file=file.path(save.path,
sprintf('mods/spec.metrics_%s.Rdata', xvar)))
## **********************************************************
## degree distributions (abundance distributions)
## **********************************************************
baci.sites <- c("Barger", "Butler", "MullerB", "Sperandio", "Hrdy")
spec.metrics <- spec.metrics[spec.metrics$Site %in% baci.sites,]
layout(matrix(1:6, nrow=2))
cols <- rainbow(length(unique(spec.metrics$ypr)))
lapply(unique(spec.metrics$Site), function(x){
print(x)
this.spec.metrics <- spec.metrics[spec.metrics$Site == x, c("degree", "ypr")]
plot(NA, ylim=c(0,0.8), xlim=c(0,25),
ylab="Frequency",
xlab="Abundance",
main=x)
for(i in 1:length(unique(this.spec.metrics$ypr))){
this.ypr <- unique(this.spec.metrics$ypr)[i]
print(this.ypr)
points(density(this.spec.metrics$degree[this.spec.metrics$ypr == this.ypr]),
col=cols[i], type="l", lwd=2)
}
})
plot(NA, ylim=c(0,1), xlim=c(0,1), xaxt="n", yaxt="n", ylab="", xlab="")
legend("center", col=cols, lwd="2",
legend=sort(unique(spec.metrics$ypr)),
bty="n")
|
bb2062f02a6cd6b3ae7d8fc2d2f38c392fed80dd | 14edd8524265d64b1713909c83086a81bfbf6eaa | /R/elo_lookup.R | 1292a31434d345d1e105bbdf431eeeeff5ab0913 | [] | no_license | jotremblay/inmatch_api | 43847d21cda3356bcfe7a55e6cc7ac0cc271ec94 | 29a9f568786703a60811c9ea7b2e6ff8337cfbb8 | refs/heads/master | 2021-01-04T06:19:56.823230 | 2020-01-17T02:42:36 | 2020-01-17T02:42:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,350 | r | elo_lookup.R | #' Current Elo
#'
#' Extracts current elo from stored "elo" data
#'
#' @param id1 Character id of player 1
#' @param id2 Character id of player 2 (if doubles)
#' @param mens Logical if mens or womens match
#' @param default Numerical rating used when ID not found
#'
#' @return Numeric elo value
#'
#' @export
elo_lookup <- function(id1, id2 = NULL, mens, default = 1300){
if(mens){
if(is.null(id2)){
elo <- atp_elo %>% filter(playerid == id1)
if(nrow(elo) == 0 || is.na(elo$elo))
default
else
elo$elo
}
else{
elo1 <- atp_elo_doubles %>% filter(playerid == id1)
elo2 <- atp_elo_doubles %>% filter(playerid == id2)
if(nrow(elo1) == 0 || is.na(elo1$elo))
elo1 <- default
else
elo1 <- elo1$elo
if(nrow(elo2) == 0 || is.na(elo2$elo))
elo2 <- default
else
elo2 <- elo2$elo
(elo1 + elo2) / 2
}
}
else{
if(is.null(id2)){
elo <- wta_elo %>% filter(playerid == id1)
if(nrow(elo) == 0 || is.na(elo$elo))
default
else
elo$elo
}
else{
elo1 <- wta_elo_doubles %>% filter(playerid == id1)
elo2 <- wta_elo_doubles %>% filter(playerid == id2)
if(nrow(elo1) == 0 || is.na(elo1$elo))
elo1 <- default
else
elo1 <- elo1$elo
if(nrow(elo2) == 0 || is.na(elo2$elo))
elo2 <- default
else
elo2 <- elo2$elo
(elo1 + elo2) / 2
}
}
} |
5f472df060cd49541739b74be21e687458493a95 | 41a532d92286f1e2c95c355ae7cda970b2447003 | /ENMeval_Modeling.R | 13eea8b9c25f33cd350e10670dadb0ade832bf4c | [] | no_license | Shenggeqianxing/ENM | e6215855b9e572945b9dd92337b7d502b1d6975f | 9b6ad24ff9096866e6a9eaa96c83cfb0946f9c64 | refs/heads/master | 2023-01-08T00:53:39.974471 | 2020-10-24T02:55:07 | 2020-10-24T02:55:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,710 | r | ENMeval_Modeling.R | ## A.E. Melton
# Script to run ENMEval
### Install required packages
#library(devtools)
#install_github("bobmuscarella/ENMeval@master", force = TRUE)
#devtools::install_github("Pakillo/rSDM", force = TRUE)
#options(java.parameters = "- Xmx16g") # increase memory that can be used
#library(devtools)
#install_github('johnbaums/rmaxent')
library(rmaxent)
# Load required libraries
library(ENMeval)
library(rSDM)
library(dismo)
library(spThin)
library(readr)
### Load environmental data and prepare for analyses
# Present climate data
#
setwd("PATH TO RASTER LAYERS")
env.files <- list.files(pattern = ".asc", full.names = TRUE)
envStack <- stack(env.files)
names(envStack) <- c("bio10", "bio11", "bio12", "bio13", "bio14", "bio15", "bio16", "bio17", "bio18", "bio19", "bio1", "bio2", "bio3", "bio4", "bio5", "bio6", "bio7", "bio8", "bio9", "CEC",
"Clay", "Elevation", "ORC", "PH", "Sand", "Silt", "Slope", "Taxonomy", "TRI")
envStack <- setMinMax(envStack)
projection(envStack) <- "+proj=longlat +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +no_defs"
plot(envStack[[1]])
### Use correlation output from layer processing script (caret package)
corr <- layerStats(envStack, stat = "pearson", na.rm = TRUE)
c <- corr$`pearson correlation coefficient`
c.matrix <- data.matrix(c)
c.abs <- abs(c.matrix)
envtCor <- findCorrelation(c.abs, cutoff = 0.8, names = TRUE, exact = TRUE)
names(envStack)
sort(envtCor)
names(envStack[[c(2,3,5,6,7,8,9,10,14,16,17,21,25,29)]])
env <- envStack[[-c(2,3,5,6,7,8,9,10,14,16,17,21,25,29)]]
names(env)
##
setwd("PATH TO COCCURRENCE DATA")
points <- read.csv("POINTS DATA")[,2:3]
nrow(points)
# Reduce "points" to just one occurrence per cell (There may not be more than one, but juuuuust in case...).
points <- gridSample(xy = points, r = envStack[[1]], n = 1)
# If there are more than N points after first recution, further reduce to N to maximize distribution evenness.
if (nrow(points) > N) {
source("PATH TO thin_max.R")
points <- thin.max(points, c("longitude", "latitude"), N) # trim down to N
}
nrow(points)
points(points, col = "black")
### Designate background data
# Randomly sample 10,000 background points from one background extent raster (only one per cell without replacement). Note: Since the raster has <10,000 pixels, you'll get a warning and all pixels will be used for background. We will be sampling from the biome variable because it is missing some grid cells, and we are trying to avoid getting background points with NA.
bg <- randomPoints(env[[1]], n=10000)
bg <- as.data.frame(bg)
# Check the bg point distribution in g-space
plot(env[[1]], legend=FALSE)
points(bg, pch = 16, col='red')
### Model generation
#envReduced <- subset(env, c(1,2,5,6)) # For after you know what to get rid of
modeval <- ENMevaluate(occ = points[, c("longitude", "latitude")],
env = envStack,
bg.coords = bg,
#categoricals = "Taxonomy",
algorithm = 'maxent.jar',
RMvalues = c(0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4),
fc = c("L", "H", "LQ", "LQH", "LQP", "LQPH", "LQPHT"), #"L", "H", , "LQH", "LQP", "LQPH"
method = "checkerboard2", #"block", #"randomkfold", #kfolds = 10,
aggregation.factor = c(2,2),
#overlap = TRUE,
clamp = TRUE,
rasterPreds = TRUE,
parallel = TRUE,
numCores = 4,
bin.output = TRUE,
progbar = TRUE)
##########################################################################################
modeval
results <- modeval@results
# Tells you which model is the best
n <- which(results$delta.AICc == 0)
### Convert prediction to different output
modeval@predictions[[n]] # raw output; Check model name!
p <- predict(modeval@models[[n]], env)
plot(p) # logistic output
points(points)
##########################################################################################
### Save Model Output #change name for lines 133,135,137
setwd(dir = "PATH TO OUTPUT FOLDER")
save(modeval, file = "SPECIES_NAME")
writeRaster(x = p, filename = "SPECIES_NAME.asc", format = "ascii", NAFlag = "-9999", overwrite = T)
rm(list = ls())
devtools::session_info()
##########################################################################################
##########################################################################################
######################## GET MODEL EVALUATION STATS ######################################
load(file = "SPECIES_NAME.rda")
#
aic.opt <- modeval@models[[which(modeval@results$delta.AICc==0)]]
aic.opt
aic.opt@lambdas
parse_lambdas(aic.opt) # available in rmaxent package
#modeval@overlap
#modeval@models
# Gives you a table with the stats for all models
results <- modeval@results
kable(results)
#
plot(modeval@predictions[[which(modeval@results$delta.AICc==0)]], main="Relative occurrence rate")
# Tells you which model is the best
which(results$delta.AICc == 0)
# Data frame and barplot of variable importance
df <- var.importance(aic.opt)
df
barplot(df$permutation.importance, names.arg=df$variable, las=2, ylab="Permutation Importance")
#
eval.plot(results)
# Plots all of the predictions by the different models
maps <- modeval@predictions
plot(maps)
# Plots the predictions of the best model?
plot(maps[[which(results$delta.AICc == 0)]], main = "Models with lowest AICc")
# Plots all of the response curves for the best model
for (i in which(results$delta.AICc == 0)) {
response(modeval@models[[i]])
}
response(modeval@models[[aic.opt]]) |
56eb738cab422db500ce48d7c880a5261c4b790a | 9a49c0e424edfa60ab81fc07693f9c05f3cf984e | /R/testROH.R | a6d2b87dd550671ae68613b6695435edf773d829 | [] | no_license | HenrikBengtsson/PSCBS | c6967835662c27512e668f1a63e5cf3fccb10b3c | 4f6ccedc90d8cf58344b8dd436b5b9c82dab07ed | refs/heads/master | 2023-01-10T01:03:29.915343 | 2021-10-23T08:00:28 | 2021-10-23T08:00:28 | 20,844,732 | 9 | 6 | null | 2018-04-11T02:17:13 | 2014-06-15T00:15:30 | R | UTF-8 | R | false | false | 5,449 | r | testROH.R | ###########################################################################/**
# @set "class=numeric"
# @RdocMethod testROH
#
# @title "Tests if a segment is in Run-of-Homozygosity (ROH)"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{muN}{An @numeric @vector of J genotype calls in
# \{0,1/2,1\} for AA, AB, and BB, respectively,
# and @NA for non-polymorphic loci.}
# \item{csN}{(optional) A @numeric @vector of J genotype confidence scores.
# If @NULL, ad hoc scores calculated from \code{betaN} are used.}
# \item{betaN}{(optional) A @numeric @vector of J matched normal BAFs
# in [0,1] (due to noise, values may be slightly outside as well)
# or @NA for non-polymorphic loci.}
# \item{minNbrOfSnps}{Minimum number of SNPs required to test segment.
# If not tested, @NA is returned.}
# \item{delta}{A @double scalar specifying the maximum (weighted)
# proportion of heterozygous SNPs allowed in an ROH region.}
# \item{...}{Not used.}
# \item{verbose}{See @see "R.utils::Verbose".}
# }
#
# \value{
# Returns a @logical.
# }
#
# @author "PN, HB"
#
# @keyword internal
#*/###########################################################################
setMethodS3("testROH", "numeric", function(muN, csN=NULL, betaN=NULL, minNbrOfSnps=1, delta=1/12, ..., verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'muN':
muN <- Arguments$getDoubles(muN, range=c(0,1))
nbrOfSnps <- length(muN)
length2 <- rep(nbrOfSnps, times=2)
# Argument 'csN' & 'betaN':
if (!is.null(csN)) {
csN <- Arguments$getDoubles(csN, range=c(0,1), length=length2)
} else {
if (!is.null(betaN)) {
betaN <- Arguments$getDoubles(betaN, length=length2)
}
}
# Argument 'minNbrOfSnps':
minNbrOfSnps <- Arguments$getInteger(minNbrOfSnps, range=c(1,Inf))
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose)
if (verbose) {
pushState(verbose)
on.exit(popState(verbose))
}
verbose && enter(verbose, "Testing for ROH")
# Default ROH call
call <- NA
verbose && cat(verbose, "Number of SNPs: ", nbrOfSnps)
# Nothing todo?
if (nbrOfSnps < minNbrOfSnps) {
verbose && exit(verbose)
return(call)
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Calculate genotype confidence scores (from betaN)?
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (is.null(csN)) {
if (!is.null(betaN)) {
verbose && enter(verbose, "Calculating confidence scores")
# Assuming naive genotyping a'la aroma.light::callNaiveGenotypes()
# was used to call genotypes 'muN' from 'betaN'.
# AD HOC: We also have to assume that the thresholds were 1/3 and 2/3.
a <- 1/3 # was fit$x[1]
b <- 2/3 # was fit$x[2]
# AD HOC: We have to make some assumption about which SNPs are diploid.
# Assume all for now
isDiploid <- rep(TRUE, times=nbrOfSnps)
# KNOWN ISSUE: Scores for homozygotes are in [0,1/3], whereas
# heterzygotes are in [0,1/6]. /PN 2011-11-11
csN[isDiploid] <- rowMins(abs(cbind(betaN[isDiploid]-a, betaN[isDiploid]-b)), useNames=FALSE)
verbose && exit(verbose)
}
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Call ROH
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Identify heterozygous SNPs
isHet <- (muN == 1/2)
verbose && print(verbose, summary(isHet))
# With or without genotype confidence scores?
if (!is.null(csN)) {
# 0-1 weights (just to make sure)
# Weights summing to one
w <- csN / sum(csN, na.rm=TRUE)
wnHets <- sum(isHet*w, na.rm=TRUE)
wnSnps <- sum(w, na.rm=TRUE) # == 1 /HB
# Sanity check
.stop_if_not(isZero(wnSnps - 1.0, eps=sqrt(.Machine$double.eps)))
} else {
wnHets <- sum(isHet, na.rm=TRUE)
wnSnps <- 1
}
propHets <- wnHets/wnSnps
verbose && print(verbose, propHets)
call <- (propHets < delta)
verbose && print(verbose, call)
# Record parameter settings
attr(call, "minNbrOfSnps") <- minNbrOfSnps
attr(call, "delta") <- delta
verbose && exit(verbose)
call
}) # testROH()
##############################################################################
# HISTORY
# 2014-03-30 [HB]
# o GENERALIZATION: Now testROH() default to equal confidence scores
# whenever neither 'csN' not 'betaN' is given. This means that
# callROH() can also be done if only 'muN' was provided.
# o Updated the ordering and the defaults of testROH() arguments to make
# it clear that 'betaN' is optional and only used if 'csN' is not given.
# 2013-03-08 [HB]
# o Added Rdoc help.
# 2012-05-30 [HB]
# o Now testROH() return parameter settings as attributes.
# 2011-11-21 [HB]
# o BUG FIX: The internal sanity check on weights was slightly too
# conservative.
# 2011-11-12 [HB]
# o Renamed argument 'tauROH' to 'delta', cf. how we do for AB and LOH.
# o Added argument 'minNbrOfSnps' to testROH().
# o Added verbose output.
# 2011-11-12 [PN]
# o Implemented a naive caller based on the weighted proportion of hets
# in the segment.
# 2011-11-04 [HB]
# o Added skeleton for testROH().
# o Created.
##############################################################################
|
5b16c80dd5e69a943a605536c6812d64d9be9c15 | 0cb6804ae4b54daa547311909c12805cc31dbb41 | /run_analysis.R | f781c3bda2664efb0544b6593ba0290c1ded8323 | [] | no_license | efrainplaza/GettingAndCleaningData | 588edea4a48c6c63d4caeb50a90b8dcd1489c34c | ff6520b0bac82a8c02670a573691befff2d8fc42 | refs/heads/master | 2020-03-20T02:01:29.115878 | 2018-06-18T20:54:28 | 2018-06-18T20:54:28 | 137,096,842 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,690 | r | run_analysis.R | ## This script reads and merges data to create a Tidy Dataset
##Libraries
library(dplyr)
# Load data for test sets
setwd("C:/Data/R/Getting and Creating Data Week4/Week 4 Final Project/UCI HAR Dataset/test")
test_set <- read.csv("X_test.txt", sep = "",header = FALSE)
test_keys_set <- read.csv("y_test.txt", sep = "", header = FALSE)
test_subject_set <- read.csv("subject_test.txt", sep = "", header = FALSE )
dim(test_set)
dim(test_keys_set)
# Load data for train sets
setwd("C:/Data/R/Getting and Creating Data Week4/Week 4 Final Project/UCI HAR Dataset/train")
train_set <- read.csv("X_train.txt", sep = "",header = FALSE)
train_keys_set <- read.csv("y_train.txt", sep = "", header = FALSE)
train_subject_set <- read.csv("subject_train.txt", sep = "", header = FALSE)
# Assign each row of TEST set to the Activity performed and the Subject that performed it
test_set_act_key <- cbind(test_keys_set,test_set)
test_set_act_subject <- cbind(test_subject_set,test_set_act_key)
##View(test_set_act_subject)
# Assign each row of TRAIN set to the Activity performed and the Subject that performed it
train_set_act_key <- cbind(train_keys_set,train_set)
train_set_act_subject <- cbind(train_subject_set,train_set_act_key)
##View(train_set_act_subject)
#Load the Features and tag all columns
setwd("C:/Data/R/Getting and Creating Data Week4/Week 4 Final Project/UCI HAR Dataset")
features <- read.csv("features.txt", sep = "",header = FALSE, stringsAsFactors = FALSE)
features_line <- features$V2
features_add <- c("subject","activities",features_line)
names(test_set_act_subject) <- features_add
names(train_set_act_subject) <- features_add
#Load the Activities Labels
activit_label <- read.csv("activity_labels.txt", sep = "",header = FALSE, stringsAsFactors = FALSE)
#Merge the test and train datasets
merged_data <- rbind(test_set_act_subject,train_set_act_subject)
dim(merged_data)
#Tag data with activities Descriptions (i.e. Walking, Standing, etd)
merged_w_activ <- merge(merged_data,activit_label,by.x = "activities", by.y = "V1", all = TRUE)
#Choose only the MEAN and STD from the Dataset
match_mean_std <- grep("[Mm]ean|[Ss]td",names(merged_w_activ))
merged_data <- merged_w_activ[c(564,2,match_mean_std)]
names(merged_data)[1] <- "Activity"
##head(merged_data[1:5,])
merged_group <- group_by(merged_data, Activity, subject)
#Last group created calculating mean by Activity and subject
tidy_data <- summarise_all(merged_group,funs(mean))
setwd("C:/Data/R/Getting and Creating Data Week4/Week 4 Final Project")
##write.csv(tidy_data, file = "last_tidy.csv")
write.table(tidy_data, "tidy.txt", row.names = FALSE,sep="\t")
|
6c8df1b7af20a67aaed46d7dd78c42fd2b6136f9 | f0fcba83d61b4e6c6da3de57cd9f666e42d6bc7d | /R/VRIInit_siteTree.R | 1c660f28d164eabadd8ba791aa5ccaaf7c79250d | [
"Apache-2.0"
] | permissive | Miss-White/BCForestGroundSample | 42dfe06e7debe04e2f8fcde42fb8b0b499308a4d | 9d2761276fbc7b3aef2fedf03e073f8753cd0058 | refs/heads/master | 2021-04-06T03:59:26.127425 | 2018-03-02T17:00:21 | 2018-03-02T17:00:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,433 | r | VRIInit_siteTree.R | #' Load and select site trees-VRI specific
#'
#' @description This function connects site tree data (vi_h, cardh) to selected cluster/plot-level data.
#' Site tree data is located in \code{//Mayhem/GIS_TIB/RDW/RDW_Data2/Work_Areas/VRI_ASCII_PROD/vri_sa}
#'
#' @param clusterplotHeader data.table, contains cluster/plot-level attributes, an output from \code{\link{VRIInit_clusterplot}}.
#'
#' @param dataSourcePath character, Specifies the path that directs to the VRI original data soruce, i.e.,
#' \code{//Mayhem/GIS_TIB/RDW/RDW_Data2/Work_Areas/VRI_ASCII_PROD/vri_sa}.
#'
#'
#'
#'
#' @return A data table that contains site tree data information. A log file documents the detailed process
#'
#' @note VRI specific
#'
#' @importFrom data.table data.table ':=' set rbindlist setkey
#' @importFrom haven read_sas
#' @importFrom dplyr '%>%'
#' @export
#' @docType methods
#' @rdname VRIInit_siteTree
#'
#' @author Yong Luo
#'
#'
#'
setGeneric("VRIInit_siteTree",
function(clusterplotHeader,
dataSourcePath){
standardGeneric("VRIInit_siteTree")
})
#' @rdname VRIInit_siteTree
setMethod(
"VRIInit_siteTree",
signature = c(clusterplotHeader = "data.table",
dataSourcePath = "character"),
definition = function(clusterplotHeader,
dataSourcePath){
compileLog <- appendedCat("Start to load and select site tree.")
displayColumn <- c("CLSTR_ID", "PLOT", "TREE_NO")
vi_h <- read_sas(file.path(dataSourcePath, "vi_h.sas7bdat")) %>% data.table
names(vi_h) <- toupper(names(vi_h))
compileLog <- appendedCat(paste("vi_h (site tree data) contains ", nrow(vi_h), " rows.", sep = ""),
compileLog)
clusterplotHeader[, clusterplot := paste(CLSTR_ID, PLOT, sep = "_")]
vi_h[, clusterplot := paste(CLSTR_ID, PLOT, sep = "_")]
compileLog <- appendedCat(logFileProducer(reason = "Site trees are not in selected cluster/plot",
action = "removed",
displayTable = vi_h[!(clusterplot %in% clusterplotHeader$clusterplot),],
displayColumn = displayColumn),
compileLog)
vi_h <- vi_h[clusterplot %in% clusterplotHeader$clusterplot,]
vi_h[, lendup := length(TREE_LEN), by = displayColumn]
compileLog <- appendedCat(logFileProducer(reason = "Duplicate site tree observations at cluster/plot/tree level",
action = "removed",
displayTable = vi_h[lendup > 1,],
displayColumn = displayColumn),
compileLog)
vi_h <- unique(vi_h, by = displayColumn)
# range(vi_h$BNG_DIAM, na.rm = TRUE) # from 0.1 to 999.9, is 999.9 correct?
vi_h[(!is.na(BNG_DIAM) | BNG_DIAM != 0) & (!is.na(BARK_THK) | BARK_THK != 0),
DBH := BNG_DIAM + 2*BARK_THK/10]
compileLog <- appendedCat("DBH was calculated using diameter at bore (BNG_DIAM) and bark thickness (BARK_THK).",
compileLog)
compileLog <- appendedCat(logFileProducer(reason = "Failed to calculate DBH",
action = "no",
displayTable = vi_h[is.na(DBH)],
displayColumn = displayColumn),
compileLog)
compileLog <- appendedCat(paste("After selection, vi_h has", nrow(vi_h), "trees."),
compileLog)
vi_h[, ':='(PROJ_ID = NULL,
SAMP_NO = NULL,
TYPE_CD = NULL,
clusterplot = NULL,
lendup = NULL)]
##### sas codes
# IF UPCASE(WALKTHRU_STATUS) = 'W' THEN TREE_WT = 2;
# ELSE IF UPCASE(WALKTHRU_STATUS) = 'O' THEN TREE_WT = 0;
# ELSE WALKTHRU_STATUS = 1;
vi_h <- vi_h[order(CLSTR_ID, PLOT, TREE_NO),]
return(list(siteTreeData = vi_h,
compileLog = compileLog))
})
|
1814cbdf23ea9348c74a5829224ca82ce6c56886 | 514723241b6d6f3f12856b8faa17cb34ce873d69 | /milestone4/R-Code Files/kfold_classification.r | bf287128c745f0437ba86ddc1ecaa3ac8eca97fd | [] | no_license | hitesha14/Blockchain-and-Cryptocurrency | 829334aaebd0ae0de0c1bc66ae5f33631c121657 | 4bdcf95567c11a9cc1d6bb8c000231d41ed6a2f0 | refs/heads/master | 2021-05-06T03:30:28.766879 | 2017-12-02T13:22:12 | 2017-12-02T13:22:12 | 114,903,019 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,358 | r | kfold_classification.r | library(caret)
library(readr)
library(glmnet)
library(tictoc)
# Setting the directory
setwd('/home/akanksha/Desktop/Sem III/Data Analytics/Course Project/Dataset/cryptocurrencypricehistory/Cleaned Dataset')
# Load MODIFIED dataset
eth <- read_csv('eth_dataset_cleaned_filtered.csv')
# Drop eth_price_ternary_label and Date(UTC) column
eth <- within(eth, rm("eth_price_ternary_label", "Date(UTC)"))
#str(eth)
eth$eth_price_binary_label <- as.factor(eth$eth_price_binary_label) # For binary label
#str(eth)
# Create random data partitions into train and test sets
# Filtering the label column and convert it into a vector
classes <- eth[, "eth_price_binary_label"]
classes_vec <- as.numeric(classes$eth_price_binary_label)
# Creating random test/train partitions
train_set <- createDataPartition(classes_vec, p=0.8, list=FALSE)
str(train_set)
set.seed(120)
cv_splits <- createFolds(classes_vec, k=10, returnTrain = TRUE)
str(cv_splits)
nPca <- 6
eth_pca <- eth[, 1:6]
eth_pca <- cbind(eth_pca, eth_price_binary_label = eth$eth_price_binary_label)
### Fitting the Generalized Linear Model using 10 fold cross validation
eth_train <- eth_pca[train_set, ]
eth_test <- eth_pca[-train_set, ]
# Form a set of combinations of parameter lambda and alpha for tuning the model
glmnet_grid <- expand.grid(alpha = c(0, .1, .2, .4, .6, .8, 1), lambda = seq(.01, .2, length = 20))
glmnet_cntrl <- trainControl(method = "LOOCV", number = 10)
tic()
glmnet_fit <- train(eth_price_binary_label ~ ., data = eth_train, method = "glmnet", tuneGrid = glmnet_grid, trControl = glmnet_cntrl)
toc()
glmnet_fit
# Training accuracy was used to select the optimal model using the largest value which was found to be 54.8%.
# The final values used for the model were alpha = 0.1 and lambda = 0.01.
### Testing the model through predictions and building confusion matrices
pred_classes <- predict(glmnet_fit, eth_test[, 1:nPca])
conMat <- table(pred_classes, eth_test$eth_price_binary_label)
confusionMatrix(conMat)
# Testing Accuracy was found to be 52.2%
# Visualization of tuning parameters
trellis.par.set(caretTheme())
plot(glmnet_fit, scales = list(x = list(log = 2)))
# The plot shows the “accuracy”, that is the percentage of correctly classified observations,
# for the penalized logistic regression model with each combination of the two tuning parameters |
a5c22053666faeef55e9c83be8b3e35cd638388b | 49caa2248976a81b483c1852f57a342c42a64052 | /tests/testthat/test-pptx-selections.R | 7e8725d8e2527f75b4d3e59f688a94ab785d9e4f | [
"MIT"
] | permissive | davidgohel/officer | 3aa90b22052c86743a2eb80790ba5562b1b91189 | c0e15dc488c33268cdb8a8ef633ecd20f2370db2 | refs/heads/master | 2023-08-28T01:14:23.519702 | 2023-07-15T09:03:55 | 2023-07-15T09:03:55 | 62,127,714 | 577 | 114 | NOASSERTION | 2023-09-12T21:56:59 | 2016-06-28T09:22:18 | R | UTF-8 | R | false | false | 2,175 | r | test-pptx-selections.R | test_that("check slide selection", {
x <- read_pptx()
x <- add_slide(x, "Title and Content", "Office Theme")
x <- ph_with(x, "Hello world 1", location = ph_location_type(type = "body"))
x <- add_slide(x, "Title and Content", "Office Theme")
x <- ph_with(x, "Hello world 2", location = ph_location_type(type = "body"))
x <- add_slide(x, "Title and Content", "Office Theme")
x <- ph_with(x, "Hello world 3", location = ph_location_type(type = "body"))
x <- on_slide(x, index = 1)
sm <- slide_summary(x)
expect_equal(sm[1,]$text, "Hello world 1")
x <- on_slide(x, index = 2)
sm <- slide_summary(x)
expect_equal(sm[1, ]$text, "Hello world 2")
x <- on_slide(x, index = 3)
sm <- slide_summary(x)
expect_equal(sm[1, ]$text, "Hello world 3")
})
test_that("check errors", {
x <- read_pptx()
expect_error(slide_summary(x), "presentation contains no slide")
expect_error(remove_slide(x), "presentation contains no slide to delete")
expect_error(on_slide(x, index = 1), "presentation contains no slide")
x <- add_slide(x, "Title and Content", "Office Theme")
x <- ph_with(x, "Hello world", location = ph_location_type(type = "body"))
x <- ph_with(x, "my title", location = ph_location_type(type = "title"))
x <- add_slide(x, "Title and Content", "Office Theme")
x <- ph_with(x, "Hello world", location = ph_location_type(type = "body"))
x <- ph_with(x, "my title 2", location = ph_location_type(type = "title"))
expect_error(on_slide(x, index = 3), "unvalid index 3")
expect_error(remove_slide(x, index = 3), "unvalid index 3")
expect_error(slide_summary(x, index = 3), "unvalid index 3")
})
test_that("get shape id", {
doc <- read_pptx()
doc <- add_slide(doc, "Title and Content", "Office Theme")
doc <- ph_with(doc, "hello", location = ph_location_type(type = "body"))
file <- print(doc, target = tempfile(fileext = ".pptx"))
doc <- read_pptx(file)
expect_equal(officer:::get_shape_id(doc, type = "body", id = 1), "2")
expect_equal(officer:::get_shape_id(doc, ph_label = "Content Placeholder 2", id = 1), "2")
expect_error(officer:::get_shape_id(doc, type = "body", id = 4) )
})
unlink("*.pptx")
|
fc13d32db541e9fbee153ded6af19c395211a512 | 522db9be1de76f55caca5264c98480ce8d638ce5 | /man/fish_trip_simulation.Rd | 0beb0d37bcaed8e296aa37cc6d34c36ef7418dc8 | [] | no_license | peterkuriyama/ch4 | edd70b2fac81c7c47dbc5fcf46dfc08a6a38b0cc | 439c4aa2c2882db91576732fb03ae18123a78e4a | refs/heads/master | 2021-10-27T14:04:27.115425 | 2019-04-17T17:34:05 | 2019-04-17T17:34:05 | 72,870,772 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,414 | rd | fish_trip_simulation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fish_trip_simulation.R
\name{fish_trip_simulation}
\alias{fish_trip_simulation}
\title{Run Fish Trip Simulation
Function to run fish_trip simulations in parallel}
\usage{
fish_trip_simulation(in_list)
}
\arguments{
\item{nreps}{Number of replicates; each nrep corresponds to a seed}
\item{ncores}{Number of cores; used in mclapply}
\item{ntows}{Number of tows; start of input into fish_trip function}
\item{scope}{Scope of movement}
\item{input}{Overall input to the function; defaults to filt_clusts which is the data with clusters filtered to include clusters with the most tows}
\item{ntows}{Number of tows in the fishing trip}
\item{start_clust}{Starting cluster to fish in}
\item{quotas}{Data frame of quota for target and weak stock species}
\item{scale}{Scale of movement; if scale == 'port', specify a d_port; if scale == 'scope', specify scale}
\item{prob_type}{Probability type; type_prop_hauls -- frequency of encounter
or type_clust_perc -- proportion of catch in each cluster}
}
\description{
Run Fish Trip Simulation
Function to run fish_trip simulations in parallel
}
\examples{
#Specify arguments as a list
in_list <- list(nreps = 6, ncores = 6, ntows = 20, scope = 5,
scale = 'scope', the_port = "ASTORIA / WARRENTON", prob_type = "type_clust_perc",
quotas = quotas)
run_fish_trip_simulation(in_list)
}
|
4e317eca03acab7f86e78b8d3287fcfb245e6dc6 | b1a9184604fa99fc58a181d3c9f8cf167ea3b8ab | /man/YeastIRO.Rd | 14ea2bf100191be82d22e7f1e0adf77149fd6205 | [] | no_license | cran/IROmiss | b6a09d3b7142c88c717200f7fa4ed8393efbf47d | b99f2e9ce866893a419466fd41ed812f65277590 | refs/heads/master | 2021-05-11T19:18:27.202776 | 2020-02-19T04:10:02 | 2020-02-19T04:10:02 | 117,863,390 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,662 | rd | YeastIRO.Rd | \name{YeastIRO}
\alias{YeastIRO}
\title{ Learning gene regulatory networks for Yeast Cell Expression Data.}
\description{An Imputation Regularized Optimization (IRO) algorithm for learning gene regulatory networks with missing data. The dataset is collected from the yeast Saccharomyces cerevisiae responding to diverse environmental changes and is available at http://genome-www.stanford.edu/yeast-stress/.}
\usage{
YeastIRO(data, alpha1 = 0.05, alpha2 = 0.01, alpha3 = 0.01, iteration = 30, warm = 20)
}
\arguments{
\item{ data }{ \emph{\eqn{n}}x\emph{p} Yeast Cell expression data.}
\item{ alpha1 }{ The significance level of correlation screening in the \eqn{\psi}-learning algorithm, see R package \pkg{equSA} for detail. In general, a high significance level of correlation screening will lead to
a slightly large separator set, which reduces the risk of missing important variables in
the conditioning set. In general, including a few false variables in the conditioning set will not hurt much the
accuracy of the \eqn{\psi}-partial correlation coefficient, the default value is 0.05.}
\item{ alpha2 }{ The significance level of \eqn{\psi}-partial correlation coefficient screening for estimating the adjacency matrix, see \pkg{equSA}, the default value is 0.01.}
\item{ alpha3 }{ The significance level of integrative \eqn{\psi}-partial correlation coefficient screening for estimating the adjacency matrix of IRO_Ave method, the default value is 0.01.}
\item{ iteration }{ The number of total iterations, the default value is 30.}
\item{ warm }{ The number of burn-in iterations, the default value is 20.}
}
\value{
\item{ A }{ \emph{p}x\emph{p} Estimated adjacency matrix for network construction.}
%% ...
}
\author{ Bochao Jia\email{jbc409@ufl.edu} and Faming Liang}
\examples{
\donttest{
library(IROmiss)
library(huge)
data(yeast)
## long time ##
A <- YeastIRO(yeast, alpha1 = 0.05, alpha2 = 0.01, alpha3 = 0.01, iteration = 30, warm = 20)
## plot gene regulatory network by our estimated adjacency matrix.
huge.plot(A)
}
}
\references{
Liang, F., Song, Q. and Qiu, P. (2015). An Equivalent Measure of Partial Correlation Coefficients for High Dimensional Gaussian Graphical Models. J. Amer. Statist. Assoc., 110, 1248-1265.
Liang, F. and Zhang, J. (2008) Estimating FDR under general dependence using stochastic approximation. Biometrika, 95(4), 961-977.
Liang, F., Jia, B., Xue, J., Li, Q., and Luo, Y. (2018). An Imputation Regularized Optimization Algorithm for High-Dimensional Missing Data Problems and Beyond. Submitted to Journal of the Royal Statistical Society Series B.
}
\keyword{YeastIRO}
|
3937dab5c5e72a4ef621bd3e504d00d016931e8f | 3a8ff7937829e3ebb16f1b1d00fdb2676c99cd22 | /man/complete_network.Rd | 0fdc4f7a6a4e1027ad69567ad2f88dd0fd92d91a | [] | no_license | cran/SDDE | 3890abf2fb6ab21582d034700d66f84ae4757c2c | 66fc2f6eeade2aabaf634bd4b6ffd9078b19f930 | refs/heads/master | 2021-01-01T15:50:31.452166 | 2015-08-26T23:23:36 | 2015-08-26T23:23:36 | 31,781,169 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,294 | rd | complete_network.Rd | \encoding{ISO-8859-2}
\name{complete_network}
\alias{complete_network}
\title{compare two given networks (original and augmented, presented as undirected graphs) using a path analysis}
\description{This function computes the number of paths in each category for all pairs of nodes of the original network.}
\usage{
complete_network(g1,g2,
taxnames,
maxdistance=0,
maxtime=3600,
maxnode=0,
verbose=FALSE,
file="log.txt",
maxcores=1,
node1,
node2
)
}
%\usage{complete_network(g1,g2,taxnames='default',maxdistance=0, maxtime=3600,maxnode=0, verbose=FALSE, filename="log.txt", maxcores=0, node1="default", node2="default") }
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{g1}{the original network X}
\item{g2}{the augmented network Y with additional nodes (all the original nodes from X must be present in the augmented network Y)}
\item{taxnames}{the taxon name of the nodes added to the original graph (default: we select all nodes that are not in g1)}
\item{maxnode}{the maximum number of augmented nodes in network Y to take into account (default=0, no maximum number of augmented nodes to take into account. The augmented nodes are sorted by distance to the investigated node pairs by the algorithm.)}
\item{maxtime}{the maximum search time per pathway (default=3600 seconds)}
\item{maxdistance}{the maximum search distance to the added nodes in network Y (default=0, no maximum distance for augmented nodes to take into account)}
\item{verbose}{flag to save into a file additionnal informations regarding the obtained paths (default=FALSE, the file name should be indicated)}
\item{file}{filename to save the additionnal informations}
\item{node1}{if node1 is set,we look only for paths starting from node1}
\item{node2}{if node2 is set, we will only look at the paths starting at node1 and ending at node2}
\item{maxcores}{maximum number of cores to use (default=1, use 0 to use half of the total cores available )}
}
\value{This function returns the number of paths of each category: \emph{Shortcuts}, \emph{Detours}, \emph{Dead ends}, \emph{Equal} paths and disconnected nodes.\cr \cr If some of the augmented nodes are not visited because of the limits defined by \code{maxnode}, \code{maxtime} or \code{maxdistance}, a path can also be classified as \emph{Detour or Dead End}.}
%\author{Etienne Lord, Margaux Le Cam, Eric Bapteste, Vladimir Makarenkov and Francois-Joseph Lapointe}
\examples{
## Searching the sample data (containing 11 original nodes and 3 augmented nodes)
data(Sample_1)
result <- complete_network(g1, g2)
print(result)
## Results:
##
## disconnected nodes shortcuts equals detours dead ends
##user.self 18 4 5 26 2
## Dead ends or detour total user time system time real time
##user.self 0 55 0.997 0.111 2.186
##
## Searching for path x1 -> x2 in the sample data
\dontrun{complete_network(g1, g2,node1="x1", node2="x2")}
##
## Specifying a limit on time (60 seconds) and maximum distance (2) of
## an original node to an augmented node
\dontrun{complete_network(g1, g2, maxtime=60, maxdistance=2)}
}
%\keyword{ }
|
614923ecee033623af5e564d27646aa0a6bcaafa | b96d5cd00c721ba2e0ce3a99ede77959e45989ef | /Feb7th.R | c15b46ced3e0b01cd5a56721b8fce59106672170 | [] | no_license | soy54uce/statRepo | 7e60f2a2fcd503a51d8c68acb5f77706caeeaea7 | 404ba428cc252a3b3de761c65a6280f4e07add1b | refs/heads/master | 2020-04-17T07:00:44.469650 | 2019-04-02T13:32:13 | 2019-04-02T13:32:13 | 166,349,492 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,044 | r | Feb7th.R | # Feb 7th
library(stats)
library(dplyr)
library(ggplot2)
View(mpg)
# 5 Main verbs of dplyr
# 1. FILTER -- filter where some column meets some condition
mpg40 <- mpg %>%
filter(hwy > 40)
enEff <- mpg %>%
filter(hwy > 40, cty > 30)
enEff <- mpg %>%
filter(hwy > 40 | cty > 30)
# Choosing cars in a set of particular classes:
bigcars <- mpg %>%
filter(class %in% c('suv', 'pickup'))
# 2. MUTATE -- Use mutate to create new variables in the data frame
# To create a new vector that is the average of city and highway mileage
mpgNew <- mpg %>%
mutate(mpgAvg = (cty + hwy)/2)
# 3. SUMMARISE -- Summarize some information from the data frame.
mpg %>%
summarise(hwyavg = mean(hwy))
mpg %>%
summarise(mean(hwy))
mpg %>%
summarise(hwyavg = mean(hwy),
hwystd = sd(hwy),
numcars = n()) # or length(hwy)
# 4. Group_by() puts the data in groups by a vector. Most useful
# when followed by Summarise
classMpg <- mpg %>%
group_by(class) %>%
summarise(hwyavg = mean(hwy),
hwystd = sd(hwy),
numcars = n())
# 5. Arrange -- puts the data in order
# NOT sort -- only sorts individual vectors
# NOT order -- only gives position of the largest, next largest, . . .
mpgArranged <- mpg %>%
arrange(hwy)
View(mpgArranged)
mpgDescArranged <- mpg %>%
arrange(desc(hwy))
View(mpgDescArranged)
mpgDescArranged <- mpg %>%
arrange(desc(hwy)) %>%
head()
# Additional useful verbs in dplyr
# SELECT -- choose particular columns in the data frame to keep
mpgsmall <- mpgArranged %>%
select(manufacturer, model, hwy, class)
# SAMPLE_N -- used to take a subset of a data frame
mpgsample <- mpg %>%
sample_n(50)
dsmall <- diamonds %>%
sample_n(1000)
# LEFT_JOIN -- Used to merge data frames
# also inner_join, outer_join, right_join
mpg20 <- mpg %>%
filter(hwy > 20)
ggplot(mpg20, aes(x = hwy, y = cty, color = class)) +
geom_point()
# OR like this:
mpg %>%
filter(hwy > 20) %>%
ggplot(aes(x = hwy, y = cty, color = class)) +
geom_point()
|
bfb1f1b42926f9eba9e014217f9189e9222d8b4e | a85cd3c3dfc49477313f3b3572d7703264ff627b | /JATAgri/SYNTAX1.R | 79b6eb1028f6df39f4fa759ffb095fc388b0392f | [] | no_license | manigrew/emdp | 806d8f1cba59b21b7f0fc07aa8b722967f587965 | c5d745b1fca0d8d76a2a38787e080e1e142adefe | refs/heads/master | 2023-06-04T23:23:57.783253 | 2021-06-27T14:26:29 | 2021-06-27T14:26:29 | 350,419,461 | 0 | 1 | null | 2021-06-08T19:49:24 | 2021-03-22T16:48:09 | HTML | UTF-8 | R | false | false | 8,189 | r | SYNTAX1.R | library(readxl)
##############################
# Question 1: Anand, the cofounder of JAT, claims that disease 6 (leaf curl) information was accessed at least 60 times every month on average since October 2017 due to this disease outbreak.
# Test this claim at a significance level of 0.05 using an appropriate hypothesis test.
############################
##IMPORT THE DATA
#data <- read.csv("clipboard", sep = "\t", header = TRUE)
setwd("C:/Users/manish.grewal/emdp/R/Agri")
data <- read_excel("IMB733-XLS-ENG.xlsx", sheet = "Data Sheet")
View(data)
#########################
##To test this claim, select the D6 disease access data from October 2017 onwards
data1 <- data[95:123,10]
data11 <- data[95:123,"D6"]
data1 == data11
str(data1)
typeof(data1)
unlist(data1[,1])
mean(data1[,1])
###calculate the mean and standard deviation from data1
summary(data1)
sd(unlist(data1))
### one-sample t-test
2.048 * (mean(unlist(data1)) - 60) / (19.3548 / sqrt(28)) /2
t.test(data1, mu = 62, alternative = "greater", conf.level = 0.95)
#lower, upper
#lower = mean- t(alpha)*SE
#upper = mean+ t(alpha)*SE
## INTERPRETATION
# In this case, we reject the null hypothesis and infer that the claim made by Anand
# that the access of disease 6 in his app is at least 60 times a month on average
# due to the disease outbreak is correct.
##############################
# Question 2: Among the app users for disease information, at least 15% of the users
# access disease information related to disease 6. Use an appropriate hypothesis test
# to check this claim at a = 0.05.
D6_usage <- sum(data$D6[-124])
tot_usage <- sum (data$D1, data$D2,data$D3, data$D4,data$D5,data$D6,
data$D7,data$D8,data$D9, data$D10,data$D11)
p_usage <- D6_usage/tot_usage
sums <- lapply(data[-124,5:15], sum)
tot_usage <- sum(unlist(sums))
prop <- sums[["D6"]] / tot_usage
test <- prop.test(x = 4295, n = 26830, p = .15, alternative = "greater", conf.level = 0.95)
test
#############################
#Question 3: JAT believes that over the years, the average number of app users have
#increased significantly. Is there statistical evidence to support that the average
#number of users in year 2017-2018 is more than average number of users in year
#2015-2016 at at=0.05? Support your answer with all necessary tests.
##################################
## Spliting the data into two(2015-16, 2017-2018)
data2 <- data[1:73,]
data3 <- data[74:123,]
## Assigning a dummy column in each and clubbing these two data
data2$dummy <- "range_1"
data3$dummy <- "range_2"
newdata <- rbind(data2, data3)
names(newdata)
## Assuming equql varience
t.test (newdata$No.of.users~newdata$dummy, mu = 0, alternative = "two.sided", conf.level = 0.95, paired = FALSE, var.eq = TRUE)
## Assuming unequql varience
t.test (newdata$No.of.users~newdata$dummy, mu = 0, alternative = "two.sided", conf.level = 0.95, paired = FALSE, var.eq = FALSE)
##The critical value for a=0.05 with 121 degrees of freedom is 1.65. Since the calculated statistic value is greater than critical value, we reject the null hypothesis and accept the claim that there is statistically significant difference between the users base of the years 2017-2018 and 2015-2016.
##Question 4 #######################
Farmers use apps to access information throughout the month. Using the data, check whether
app usage is same or different across the four weeks of a month. Anand claims that app usage picked up after January 2016; so, test this hypothesis using data from January-2016 - May 2018.
##creating data-subset (2016-2018)
data5 <- data [26:123,]
## Test of Normality (the data is not normal)
shapiro.test(data5$Usage)
qqnorm(data5$Usage)
## converting the data using log transformation and checking the normality
data5$logusage <- log(data5$Usage)
shapiro.test(data5$logusage)
## using log transformed variables
anova <- aov (data5$logusage~data5$Week)
model.tables(anova, type = "means")
summary(anova)
##One-way ANOVA output shows that p-value = 0.326 > 0.05 and F-stat =1.16, which is less than F critical. Hence, we fail to reject the null hypothesis; and hence we conclude that there is no statistically significant difference in the app usage across various weeks.
## using orginal variable
anova1 <- aov (data5$Usage~data5$Week)
model.tables(anova1, type = "means")
##Question-5
##Anand claims that the number of users has increased over a period of two years. He wants to understand if his app usage (number of times his app is accessed in a month by various users) has increased with the increased number of users. Prove this claim statistically. Also suggest a suitable statistical test to prove that the correlation between users and usage is non-zero.
cor <- cor.test(data5$No.of.users, data5$Usage)
cor
##This shows a very high correlation between the number of users enrolled and the usage of the app; hence with the increase in the number of users, the usage also increased.
#Based on the above test, we can prove that users and usage have a correlation is higher and can never be zero.
##################################
##Question 6: A new version of the app was released in August 2016. Anand wants to know which month in the given time frame after the launch of new version, the mean usage pattern would start to show a statistically significant shift.
##draw the line chart and see the spike
rdate <- as.Date(data$Month.Year,"%m-%d-%y")
plot(data$Usage~rdate, type ="l", col = "red")
#creating a subset(from October 2016 onwards)
data6 <- data[1:61,]
data7 <- data[62:123,]
data6$dummy <- "before"
data7$dummy <- "after"
newdata1 <- rbind(data6, data7)
t.test (newdata1$Usage~newdata1$dummy, mu = 0, alternative = "two.sided", conf.level = 0.95, paired = FALSE, var.eq = TRUE)
##The calculated t-statistic is more than the critical value of t; and thus, we reject the null hypothesis and conclude that the app usage is statistically significantly different before and after October 2016. So, after the new release in August 2016, the usage of JAT's app increased, starting from October 2016.
#Question 7: If a disease is likely to spread in particular weather condition (data given in the disease index sheet), then the access of that disease should be more in the months having suitable weather conditions. Help the analyst in coming up with a statistical test to support the claim for two districts for which the sample of weather and disease access data is provided in the data sheet. Mention the diseases for which you can support this claim. Test this claim both for temperature and relative humidity at 95% confidence.
bdata <- read.csv ("clipboard", sep = "\t", header = TRUE)
names(bdata)
bdata1 <- subset(bdata,Temperature <= 24 & Humidity >= 80)
bdata2 <- subset(bdata,!(Temperature <= 24 & Humidity >= 80))
bdata1$weather <- "favourable"
bdata2$weather <- "unfavourable"
newbdata <- rbind (bdata1, bdata2)
##t-test 1 (RUNNING T TEST USING DATA USING THE DISEASE INDEX OF D1)
#NOTE: PLEASE USE THE INDEX OF DIFFERENT DISESES TO MAKE THE GROUP (FAVOURABLE VS. UNFAVOURABLE )
test1 <- t.test (newbdata$D1~newbdata$weather, mu = 0, alternative = "two.sided", conf.level = 0.95, paired = FALSE, var.eq = TRUE)
test2 <- t.test (newbdata$D2~newbdata$weather, mu = 0, alternative = "two.sided", conf.level = 0.95, paired = FALSE, var.eq = TRUE)
test3 <- t.test (newbdata$D3~newbdata$weather, mu = 0, alternative = "two.sided", conf.level = 0.95, paired = FALSE, var.eq = TRUE)
test4 <- t.test (newbdata$D4~newbdata$weather, mu = 0, alternative = "two.sided", conf.level = 0.95, paired = FALSE, var.eq = TRUE)
test5 <- t.test (newbdata$D5~newbdata$weather, mu = 0, alternative = "two.sided", conf.level = 0.95, paired = FALSE, var.eq = TRUE)
test6 <- t.test (newbdata$D7~newbdata$weather, mu = 0, alternative = "two.sided", conf.level = 0.95, paired = FALSE, var.eq = TRUE)
test1
test2 (## NOT CORRECT)
test3 (## NOT CORRECT)
test4 (## NOT CORRECT)
test5 (## NOT CORRECT)
test6 (## NOT CORRECT)
############################################################
SIMILAR TO THE PREVIOUS ONE RUN ANALYSIS FOR DHARWAD DISTRICT
############################################################
|
1ae7c2dc506edfb6bb6b69c85fe9f20bb5f562b3 | e270b17fa8f5ee5eb94866f6ce786893f17e8082 | /pages/stocks.R | 04781ec623dd7393625442ff0433413ce87833c1 | [] | no_license | casestudyassignment/WILProject1 | 7722dfacc1f3bb31a39c4119bc8bd37290bc0a6f | 13b41e89134a73b0ec36c076f3479f0d7e8a5923 | refs/heads/master | 2022-12-24T15:51:28.465737 | 2020-09-29T16:50:07 | 2020-09-29T16:50:07 | 285,493,991 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,459 | r | stocks.R | # STOCKS UI PAGE (VISUALISATION & MODELLING)
library(shinydashboard)
library(shiny)
tab_stocks_visualisation <- tabItem(tabName = "dataVisStocks",
tabsetPanel(
tabPanel("Data Viewer",
class = "data_viewer",
inputPanel(selectInput("SelectedStockTable",
label="Select one",
choices=c("Overall Stocks",
"BTC Stock",
"SYD Stock",
"CWN Stock",
"ASX Stock",
"UBER Stock",
"CSL Stock"))),
fluidPage(
conditionalPanel(
condition = "input.SelectedStockTable == 'Overall Stocks'",
fluidRow(box(title = "Overall Stocks",
width = 12,
status = "primary",
solidHeader = TRUE,
collapsible = TRUE,
dataTableOutput('tableStockOverall'))
)
),
conditionalPanel(
condition = "input.SelectedStockTable == 'BTC Stock'",
fluidRow(box(title = "BTC Company Stock",
width = 12,
status = "primary",
solidHeader = TRUE,
collapsible = TRUE,
dataTableOutput('tableStockBTC'))
)
),
conditionalPanel(
condition = "input.SelectedStockPlot == 'SYD Stock' ||
input.SelectedStockPlot == 'CWN Stock' ||
input.SelectedStockPlot == 'ASX Stock' ||
input.SelectedStockPlot == 'UBER Stock'||
input.SelectedStockPlot == 'CSL Stock'",
fluidRow(uiOutput("otherStockTableBox"))
)
)
),
tabPanel("Data Visualisation",
inputPanel(selectInput("SelectedStockPlot",
label="Select one",
choices=c("BTC Stock",
"SYD Stock",
"CWN Stock",
"ASX Stock",
"UBER Stock",
"CSL Stock"))),
fluidPage(
conditionalPanel(
condition = "input.SelectedStockPlot == 'BTC Stock'",
fluidRow(box(title = "BTC Stocks Plot",
width = 12,
status = "primary",
solidHeader = TRUE,
collapsible = TRUE,
plotOutput('plotStockBTC'))
)
),
conditionalPanel(
condition = "input.SelectedStockPlot == 'SYD Stock' ||
input.SelectedStockPlot == 'CWN Stock' ||
input.SelectedStockPlot == 'ASX Stock' ||
input.SelectedStockPlot == 'UBER Stock'||
input.SelectedStockPlot == 'CSL Stock'",
fluidRow(uiOutput("otherStockPlotBox1")),
#fluidRow(uiOutput("otherStockPlotBox2")),
fluidRow(uiOutput("otherStockPlotBox3"))
)
)
)
)
)
tab_stocks_modelling <- tabItem(tabName = "dataModelStocks",
inputPanel(selectInput("SelectedStockPrediction",
label="Select one",
choices=c("SYD Stocks",
"CWN Stock",
"ASX Stock",
"UBER Stock",
"CSL Stock"))),
fluidPage(
conditionalPanel(
condition = "input.SelectedStockPrediction != ''",
fluidRow(uiOutput("predictionStockPlotBox"))
)
)
) |
151296d009566e3247d7e245b1f69e54109e235d | 5e6ac7a2cda8eba04c6aaaa96f5d26b63e8b2391 | /run_analysis.R | 1366027cfca3e4276fb7d88e89b362904b50b510 | [] | no_license | Danielle1105/Course-Project | 3481fba7ee02b09bc365a93606447bf757502c1f | 718980c884f9684f5d93d7b31c9e0a87273dd673 | refs/heads/master | 2020-05-29T18:34:59.766169 | 2014-09-21T15:23:44 | 2014-09-21T15:23:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,392 | r | run_analysis.R | > x_test<-read.table("x_test.txt")
> x_train<-read.table("x_train.txt")
> x<-rbind(x_test,x_train)
> features<-read.table("features.txt")
> names(x)<-features[,2]
> y_test<-read.table("y_test.txt")
> y_train<-read.table("y_train.txt")
> y<-rbind(y_test,y_train)
> names(y)<-c("activity")
> subject_test<-read.table("subject_test.txt")
> subject_train<-read.table("subject_train.txt")
> subject<-rbind(subject_test,subject_train)
> names(subject)<-c("subject")
> data<-cbind(subjet,y,x)
> data2<-grep("mean",names(data),fixed=TRUE)
> data3<-grep("std",names(data),fixed=TRUE)
> newdata<-data[,c(1,2,data2,data3)
> newdata2<-grep("meanFreq",names(newdata),fixed=TRUE)
> newdata3<-newdata[,-newdata2]
> sub(1,"Walking",fixed=FALSE)
> sub(1,"Walking",newdata3[,2],fixed=FALSE)
> sub(2,"Walking_upstairs",newdata3[,2],fixed=FALSE)
> sub(3,"Walking_downstairs",newdata3[,2],fixed=FALSE)
> sub(4,"Sitting",newdata3[,2],fixed=FALSE)
> sub(5,"Standing",newdata3[,2],fixed=FALSE)
> sub(6,"Laying",newdata3[,2],fixed=FALSE)
> install.packages("dplyr")
> library("dplyr")
> groupdata<-group_by(newdata3,subject,activity)
> mean_groupdata<-summarise_each(groupdata,funs(mean))
> write.table(mean_groupdata,"Course project_data")
|
00ed9e43f13505a2143eb9382111524863971923 | e73cfd17823a4a7766ed21ad715df27a15658385 | /Myscripts/ggplot2/HW/4.R | 95a7ceaf7ecb46c688b4e00d1d58aea69df926dd | [] | no_license | kanishkd4/R_learning_code | 546b133f66ec63894ce844e91f3eefe7e3113563 | 53cac10180ecd34f80d42951e8be7c5e1d95608b | refs/heads/master | 2020-12-25T11:15:16.931186 | 2016-09-29T08:13:01 | 2016-09-29T08:13:01 | 61,795,112 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,118 | r | 4.R | ?plotmath
?ggplot
library(ggplot2)
library("scales")
p <- ggplot(diamonds, aes(carat, price, colour = cut))
p <- p + layer(geom = "point")
p
P <- p + layer(geom = "bar", geom_params = list(fill = "steelblue"), stat = "bin",
stat_params = list(binwidth = 2) # stat = "bin" didn't work for me
)
P
?ggplot
ggplot(diamonds, aes(carat, price, color = cut)) + layer(geom = "point")
?aes
ggplot(diamonds, aes(carat)) + layer(geom_histogram(binwidth = 2, fill = "steelblue"))
# All the shortcut functions have the same basic form, beginning with geom_ or stat_:
# geom_XXX(mapping, data, ..., geom, position)
# stat_XXX(mapping, data, ..., stat, position)
?geom_histogram
geom_histogram(data = aes(carat), binwidth = 2, fill = "steelblue")# does nothing
ggplot(data = msleep, aes(x = sleep_rem/sleep_total, y = awake)) + geom_point()
# equivalent to
qplot(x = sleep_rem/sleep_total, y = awake, data = msleep)
#layers can be added to qplot too
qplot(x = sleep_rem/sleep_total, y = awake, data = msleep) + geom_smooth()
#equivalent to
qplot(x = sleep_rem/sleep_total, y = awake, data = msleep, geom = c("point", "smooth"))
#equivalent to
ggplot(data = msleep, aes(x = sleep_rem/sleep_total, y = awake)) + geom_point() + geom_smooth()
p <- ggplot(data = msleep, aes(x = sleep_rem, y = awake))
summary(p)
p <- p + geom_point()
summary(P)
bestfit <- geom_smooth(method = "lm", se = F, colour = alpha("steelblue", 0.5), size = 2)
qplot(x = sleep_rem, y = sleep_total, data = msleep) + bestfit
qplot(x = awake, y = brainwt, data = msleep, log = "y") + bestfit
qplot(x = awake, y = brainwt, data = msleep, log = "xy") + bestfit
?qplot
p <- ggplot(mtcars)
summary(p)
p <- p + aes(wt, hp)
summary(p)
?geom_smooth
p + geom_point()
?pairs
ggplot(diamonds, aes(x=carat)) + geom_histogram(binwidth=.25, fill="steelblue")
install.packages("ggvis")
library(ggvis)
diamonds %>% ggvis(~carat) %>% layer_histograms(width=.25, fill:="steelblue")
p + geom_point(colour = "darkblue")# this sets the colour to darkblue
p + geom_point(aes(colour = "darkblue")) # this maps the colour to darkblue
# mapping the colour creates a new variable containing only the value "darkblue" and then
# maps colour to that variable
# grouping
?interaction
library(nlme) #needed for Oxboys
# group is set to the interaction of discrete variables
# this often partitions data correctly but sometimes it fails for discrete variables
# cannot group when there isn't a discrete variable
Oxboys
View(Oxboys)
;names(Oxboys)
p <- ggplot(data = Oxboys, aes(age, height, group = Subject)) + geom_line()
p
ggplot(data = Oxboys, aes(age, height)) + geom_line()# when we leave out group
#the grouped plot shows one line per subject. His height as he ages
# DIFFERENT GROUPS ON DIFFERENT LAYERS
p + geom_smooth(aes(group = Subject), method = "lm", se = F)
# this adds a smoothed line for each boy/subject. a best fit for each boy
p + geom_smooth(aes(group = 1), method = "lm", se = F, size = 2)
# the above code uses group = 1 to create a smoothed line for all boys. One best fit for all
# OVERRIDING DEFAULT GROUPING
?aes
boysbox <- ggplot(data = Oxboys, aes(Occasion, height)) + geom_boxplot()
boysbox
# default grouping works because occasion is a discrete variable
# to overlay individual trajectories, we can override default grouping for that layer with'
# aes(group = Subject)
boysbox + geom_line(aes(group = Subject), colour = "#3366FF")
# Line colour is different to make it distinct from box plot
# MATCHING AESTHETICS TO GRAPHIC OBJECTS
# note how aesthetics of individual obs are mappe to the aesthetics of the complete entity
# page 62 of pdf - Geoms, stats and default stats with geoms
ggplot(diamonds, aes(carat)) + geom_histogram(binwidth = 0.1)
ggplot(diamonds, aes(carat)) + geom_histogram(aes(y = ..density..), binwidth = 0.1)
# names of generated variables must be surrounded with .. when used; useful if the original
# data also has a variable of the same name
# can also be plotted using qplot
qplot(carat, ..density.., data = diamonds, geom = "histogram", binwidth = 0.1)
# position adjustments - page 65 in pdf used by position =
# PUTTING IT TOGETHER
# combining geoms and stats
d <- ggplot(diamonds, aes(carat)) + xlim(0, 3)
d + stat_bin(aes(ymax = ..count..), binwidth = 0.1, geom = "area")
d + stat_bin(aes(size = ..density..), geom = "point", position = "identity")
d + stat_bin(aes(ymax = ..density..), geom = "point", position = "identity")
d + stat_bin(aes(y = 1, fill = ..count..), binwidth = 0.1, geom = "tile", position = "identity")
# xlim si used to determine max scale of x axis
# DISPLAYING PRECOMPUTED STATS
# data that has already been summarised can be used with stat_identity, which leaves the data unchanged
# VARYING AESTHETICS AND DATA
# different datasets can be plotted on different layers of the same plot
# a common example is supplementing the data with predictions from a model
Oxboys
head(Oxboys)
require(nlme, quiet = T, warn.conflicts = F)
model <- lme(height ~ age, data = Oxboys, # linear mixed effects model
random = ~ 1 + age | Subject)
oplot <- ggplot(Oxboys, aes(age, height, group = Subject)) + geom_line()
# next, we'll compare the predicted trajectories to the actual trajectories
# this is done by building a grid that contains all combinations of ages and subjects
# we add the predictions back into the model as a variable called height
age_grid <- seq(-1, 1, length = 10)
subjects <- unique(Oxboys$Subject)
preds <- expand.grid(age = age_grid, Subject = subjects)
preds$height <- predict(model, preds)
# once we have the predictions, we can display them along with the original data
# because we have used the same name as the original Oxboys dataset, and we want the same group aesthetic
# we don't need to specify any aestheitc, but only override the default aesthetic
oplot + geom_line(data = preds, colour = "#3366FF", size = 0.4)
# it captures the high level structure of the data but it is hard to see the details
# plots of longitudanal data are often called spaghetti plots
# another way to compare the model to the data is to look at residuals
# we add predictions from the model to the original data(fitted), calculate residuals(resid)
# and add the residuals as well
Oxboys$fitted <- predict(model)
Oxboys$resid <- with(Oxboys, fitted - height)
oplot %+% Oxboys + aes(y = resid) + geom_smooth(aes(group = 1))
# %+% is to update the default data
# the smooth line shows that the residuals are not random, showing a deficiency in the model
# we add a quadratic term, refit the model, recalculate predictions and residuals, and replot
model2 <- update(model, height ~ age + I(age ^ 2))
Oxboys$fitted2 <- predict(model2)
Oxboys$resid2 <- with(Oxboys, fitted2 - height)
oplot %+% Oxboys + aes(y = resid2) + geom_smooth(aes(group = 1))
# modifying plot object is quite easy
# we updated the data and replotted twice without needing to reinitilise oplot. |
17268170ab2880b51f1523cb83210365698b6d7f | 0a906cf8b1b7da2aea87de958e3662870df49727 | /gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610046524-test.R | b0f001367f7e187cbebadc899d0db69fd6dd17d9 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 146 | r | 1610046524-test.R | testlist <- list(hi = 0, lo = 1.39079479304311e-320, mu = 0, sig = 4.53801546776667e+279)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result) |
5e5fe7f4b1ed7ed92cd51a36abfb0474d5c1e093 | 0d4dc2dacd77b5f3307ad552bf6ddaa26c3daa4f | /nrfr.R | 93b0f952a3cfaccc63212527765a6df8e718cfd7 | [
"MIT"
] | permissive | MGrauLeguia/rdn-fr | 9925033ce226e6df9510e64c0dfac8f4674777d1 | 5515796ea619e391a6ab2f36ec79b5e677379db0 | refs/heads/master | 2020-07-06T22:40:07.629431 | 2019-10-01T09:46:45 | 2019-10-01T09:46:45 | 203,160,255 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,116 | r | nrfr.R | # net reconstruction via feature ranking
library(randomForest) # for feature ranking with Random Forest
library(CORElearn) # for feature ranking with RReliefF
library(pROC) # for calculating the area under ROC curve
# read single trajectory from a CSV file fname, where:
# - each column corresponds to a network node
# - rows correspond to consequtive time points
read_trajectory = function(fname) {
return(read.csv(fname, header = FALSE, sep = ","))
}
# read the network adjacency matrix from a CSV file fname
read_adjacency_matrix = function(fname) {
return(as.matrix(read.csv(fname, header=F)))
}
# from trajectories ts to a data set for performing regression and calculating feature ranking
ts2ds = function(ts) {
tslen = nrow(ts)
nnodes = ncol(ts)
ds = cbind(ts[1:(tslen - 1), ], ts[2:tslen, ])
colnames(ds) = c(paste0("x", 1:nnodes), paste0("y", 1:nnodes))
return(ds)
}
# the core procedure
# takes the observed trajectories ts in the network nodes as input
# calculates feature ranking for each node and collects the scores in Fmatrix
# returns Fmatrix, which corresponds to matrix F from formula (6) in the article
#
# argument method selects a method for calculating the feature ranking "rf" or "relief"
# argument rseed set the seed value for the pseudo random generator
nrfr = function(ts, method = "rf", rseed = 42) {
set.seed(rseed)
nnodes = ncol(ts)
ds = ts2ds(ts)
xs = paste0("x", 1:nnodes)
Fmatrix = matrix(0, nrow = nnodes, ncol = nnodes)
if (method == "rf") {
for (node in 1:nnodes) {
print(sprintf("Calculating RF ranking for node %d", node))
f = reformulate(xs, paste0("y", node))
rfmodel = randomForest(f, data = ds, ntree = 1000, mtry = floor(sqrt(nnodes)), importance = TRUE)
Fmatrix[node,] = rfmodel$importance[, 1]
}
}
if (method == "relief") {
for (node in 1:nnodes) {
print(sprintf("Calculating RRELIEFF ranking for node %d", node))
f = reformulate(xs, paste0("y", node))
Fmatrix[node,] = attrEval(f, data = ds, estimator = "RReliefFequalK", kNearestEqual = 10)
}
}
return(Fmatrix)
}
# example of use
# read in the trajectories (in rows) for the network nodes (in columns)
ts = read_trajectory("ts1str025N25.csv")
# read in the adjecency matrix for the "true" network
A = read_adjacency_matrix("A1str025N25.csv")
# reconstruct the network from the trajectories using the Randorm Forest method
Frf = nrfr(ts, method = "rf")
# compare the reconstructed with the "true" network using the area under the ROC curve (see the "Measuring reconstruction quality" subsection of the Section 2 of the article)
auc = roc(as.factor(A), as.vector(Frf), levels = c(0, 1), direction = "<")$auc
print(auc)
# reconstruct the network from the trajectories using the RELIEF method
Frelief = nrfr(ts, method = "relief")
# compare the reconstructed with the "true" network using the area under the ROC curve
auc = roc(as.factor(A), as.vector(Frelief), levels = c(0, 1), direction = "<")$auc
print(auc) |
821e152d4e34000516645501f6937f75b405c4af | 29bd7ec43e40263e62535f51169d95082d11832f | /man/pct_dom1_group.Rd | e03e776003fb5614a382f81265d1ad4f37627a00 | [] | no_license | esocid/Benthos | 171ab922b751f5b2d03caa5e36aedc239f7894d0 | 60ba98f6935ee70998e6061ca0891f32dc0a8d5d | refs/heads/master | 2021-06-18T18:28:10.526456 | 2017-06-22T16:18:44 | 2017-06-22T16:18:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 716 | rd | pct_dom1_group.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/group_metrics.R
\name{pct_dom1_group}
\alias{pct_dom1_group}
\title{The percent of the most dominant group}
\usage{
pct_dom1_group(long.df, master.df, Group, taxa.rank)
}
\arguments{
\item{long.df}{Long data frame format of taxonomic counts.}
\item{master.df}{Taxonomic attributes table.}
\item{Group}{The taxonomic group to be assessed}
\item{taxa.rank}{The taxonomic taxa.rank used during the assessment.}
}
\value{
The percentage of taxa representing by the most dominant (abundant)
group. Typically, this function is used to assess the functional feeding
groups and habits.
}
\description{
The percent of the most dominant group
}
|
82c4f4b1dd2b9d69f3881020c36f61173f9dfbd4 | f5e1e7f34f8c3166b2e80404ff48c4432c8a9d3d | /genestructureID.R | 03b080f42b8f1caaa57f47cb5d2ba7ecfc91c2b4 | [] | no_license | jiangchb/SoybeanGDB | c0160bcc4ddbf95d4cac0ce008e725879c1586f9 | a8831d1366f6e731f496e01ab2a90fd9931421e8 | refs/heads/main | 2023-08-16T15:49:13.970483 | 2021-10-14T07:26:11 | 2021-10-14T07:26:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,061 | r | genestructureID.R | genestructureID <- function(chr="chr1", start=21418069, end=21434067, geneid = "SoyZH13_01G071600", gff = NULL){
start <- as.numeric(start)
end <- as.numeric(end)
gff.mrna <- gff[gff$type == "mRNA", ]
gff.reg.mrna <- gff.mrna[grep(geneid, gff.mrna$id), ]
gff.reg <- gff[gff$id %in% gff.reg.mrna$id, ]
gff.reg.mrna.ir <- IRanges::IRanges(gff.reg.mrna$start, gff.reg.mrna$end)
gff.reg.mrna.op <- GenomicRanges::findOverlaps(gff.reg.mrna.ir, GenomicRanges::reduce(gff.reg.mrna.ir))
gff.reg.mrna$grp <- S4Vectors::subjectHits(gff.reg.mrna.op)
gff.reg.mrna.1 <- gff.reg.mrna %>% group_by(grp) %>% mutate(y=row_number())
gff.reg <- merge(gff.reg, gff.reg.mrna.1[, c("id", "y")], by="id")
gff.reg$y <- gff.reg$y * 0.2 + 1
plot.mrna.lst <- lapply(unique(gff.reg$id), function(i){
dat <- gff.reg[gff.reg$id == i, ]
i.strand <- dat$strand[1]
dat.mrna <- dat[dat$type=="mRNA", ]
return(dat.mrna)
})
plot.mrna <- do.call(rbind, plot.mrna.lst)
p1 <- ggplot2::ggplot(plot.mrna) + ggplot2::geom_rect(ggplot2::aes(xmin=start, xmax=end, ymin=y+0.118, ymax=y+0.122,
text=anno),
color="grey30", fill="grey30")
plot.nm.lst <- lapply(unique(gff.reg$id), function(i){
dat <- gff.reg[gff.reg$id == i, ]
i.strand <- dat$strand[1]
dat.nm <- dat[dat$type!="mRNA", ]
dat.nm <- dat.nm[-nrow(dat.nm), ]
if (nrow(dat.nm)>0) {
dat.nm$ymin <- dat.nm$y+0.1
dat.nm$ymax <- dat.nm$y+0.14
dat.nm$ymin[dat.nm$type=="CDS"] <- dat.nm$ymin[dat.nm$type=="CDS"] - 0.02
dat.nm$ymax[dat.nm$type=="CDS"] <- dat.nm$ymax[dat.nm$type=="CDS"] + 0.02
}
return(dat.nm)
})
plot.nm <- do.call(rbind, plot.nm.lst)
if (nrow(plot.nm)>0) {
p1 <- p1 + ggplot2::geom_rect(ggplot2::aes(xmin=start, xmax=end, ymin=ymin, ymax=ymax, text=anno),
color="grey30", fill="grey30", data=plot.nm)
}
plot.tail.lst <- lapply(unique(gff.reg$id), function(i){
dat <- gff.reg[gff.reg$id == i, ]
i.strand <- dat$strand[1]
dat.nm <- dat[dat$type!="mRNA", ]
i.anno <- dat$anno[1]
i.id <- i
tail.type <- dat.nm$type[nrow(dat.nm)]
dat.tail <- data.frame(xx=rep(c(dat$start[nrow(dat)],
(dat$start[nrow(dat)] + dat$end[nrow(dat)])/2, dat$end[nrow(dat)]), each=2),
stringsAsFactors = FALSE)
if (i.strand == "-") {
dat.tail$yy <- c(0.12, 0.12, 0.1, 0.14, 0.1, 0.14) + dat$y[1]
dat.tail <- dat.tail[c(1,3,5,6,4,2), ]
dat.tail$pare <- i.id
dat.tail$anno <- i.anno
if (tail.type=="CDS") {
dat.tail$yy[2:3] <- dat.tail$yy[2:3] - 0.02
dat.tail$yy[4:5] <- dat.tail$yy[4:5] + 0.02
}
} else {
dat.tail$yy <- c(0.1, 0.14, 0.1, 0.14, 0.12, 0.12) + dat$y[1]
dat.tail <- dat.tail[c(1,3,5,6,4,2), ]
dat.tail$pare <- i.id
dat.tail$anno <- i.anno
if (tail.type=="CDS") {
dat.tail$yy[1:2] <- dat.tail$yy[1:2] - 0.02
dat.tail$yy[5:6] <- dat.tail$yy[5:6] + 0.02
}
}
dat.tail$id <- i.id
return(dat.tail)
})
plot.tail <- do.call(rbind, plot.tail.lst)
p1 <- p1 + ggplot2::geom_polygon(ggplot2::aes(x=xx, y=yy, group=id), color="grey30", fill="grey30",
data=plot.tail)
p1 <- p1 + ggplot2::theme(panel.grid.major = ggplot2::element_blank(),panel.grid.minor = ggplot2::element_blank()) +
ggplot2::theme(panel.background = ggplot2::element_rect(fill="white",colour="white")) + ggplot2::xlab(chr) + ggplot2::ylab("") +
ggplot2::theme(axis.ticks.y = ggplot2::element_blank()) + ggplot2::theme(axis.text.y = ggplot2::element_blank()) +
ggplot2::theme(axis.text = ggplot2::element_text(size=12), axis.title=ggplot2::element_text(size=14,face="bold"))
nnr <<- nrow(plot.mrna)
grid::grid.draw(ggplot2::ggplotGrob(p1))
} |
aa89effd2188927f31357074c3047625333ce665 | de7b5af9c415426ad5496ccf0b87f91b663cee29 | /man/subset_withAttributes.Rd | 19a8524273553c956c1d222c56fc33ce43694d9b | [
"MIT"
] | permissive | rgriffier/statsBordeaux | bb465057eab0b41e6d1515f02cfd13adb8179ac8 | ded4292fb43e2a959058267a1f707d25505c0b7d | refs/heads/master | 2021-09-06T15:02:07.420504 | 2021-08-03T13:56:13 | 2021-08-03T13:56:13 | 178,895,755 | 2 | 0 | null | 2020-09-28T14:14:45 | 2019-04-01T15:45:33 | R | UTF-8 | R | false | true | 866 | rd | subset_withAttributes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/statistic_functions.R
\name{subset_withAttributes}
\alias{subset_withAttributes}
\title{Subset data keeping attributes}
\usage{
subset_withAttributes(data, subset)
}
\arguments{
\item{data}{a data.frame}
\item{subset}{a logical vector}
}
\value{
a data.frame with all the attributes
}
\description{
Allows to subset data and keeping all the attributes of the data
}
\examples{
data(mtcars)
mtcars$nSpeed <- c(5, 6, 5, "NA", "NA", "NA", "NA", "NA", "NA", "NA",
"NA", "NA", "NA", "NA", "NA", "NA", "NA", 6, 6, 6,
"NA", "NA", "NA", "NA", "NA", 5, 5, 6, 5, 6, 6, 5)
attributes(mtcars[, "nSpeed"])$var_label <- "Number of speed in case of manual transmission"
subset <- subset_withAttributes(mtcars, mtcars$nSpeed != "NA")
attributes(subset[, "nSpeed"])
}
|
318899c580e16f8907df4fcae864c133f9de5e91 | 23f331debdc1d0c244686b22852720cf14c83801 | /jeff/getBioVolCatData.R | e2945a2d874df4920cfd45528ecc696a91822ebf | [] | no_license | willbmisled/cyanoLakes | 96ceb15e3d81a6ca869a70a08d13eb08d188ee07 | 345fc108ca5df96333a518ae819cd3dd80420ffb | refs/heads/master | 2021-01-18T08:12:05.803687 | 2018-07-26T14:46:41 | 2018-07-26T14:46:41 | 18,803,295 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,456 | r | getBioVolCatData.R | #get the Biovolume data
load('L:/Public/Milstead_Lakes/RData/cyanoBioVolume2014-03-11.rda')
#Data definition LbioV (bioVolume based on Lester's aggregated Cyano volumes)
#'data.frame': 1148 obs. of 3 variables:
# SITE_ID : chr "NLA06608-0001" "NLA06608-0002" : NLA ID
# sumLbioV: num 172 260935 6959269 63955 8220 ...: sum of the Cyanobacteria Biovolumes (Vol*Abund)
# bvCat : Ord.factor w/ 3 levels "LOW"<"MED"<"HIGH": Low<=Q1 High>=Q3
#NOTE: 84 lakes with one or more missing Volume estimates sumLbioV and bcCat==NA
# 33 lakes with no cyano (abund==0) sumLbioV==0 and bcCat=='LOW'
names(LbioV)[1]<-'NLA_ID'
#get the NLA wq data
load("L:/Public/Milstead_Lakes/RData/NLA_Chla_Data_20140116.rda")
#Data Definitions Lakes n=1151 (with some missing values)
#NLA_ID: NLA ID assigned to each site
#AlbersX: (m) ESRI USA Contiguous Albers Equal Area Conic X coordinate in from National_LakePoly.shp
#AlbersY: (m) ESRI USA Contiguous Albers Equal Area Conic Y coordinate in from National_LakePoly.shp
#LakeArea: (km2) Lake Area from attribute table of from National_LakePoly.shp
#LakePerim: (km) Lake Perimeter from attribute table of from National_LakePoly.shp
#ShoreDevel: Shoreline development index from attribute table of from National_LakePoly.shp
#DATE_COL: Date of site visit
#WSA_ECO9 : Wadeable Streams Assessment Aggregated Ecoregion
#CPL Coastal Plains
#NAP Northern Appalachians
#NPL Northern Plains
#SAP Southern Appalachians
#SPL Southern Plains
#TPL Temporate Plains
#UMW Upper Midwest
#WMT Western Mountains
#XER Xeric
#BASINAREA: (km2) Area of lake basin (upstream area) from attribute table of from National_LakePoly.shp
#DEPTHMAX: (m) Maximum Observed Lake Depth
#ELEV_PT: (m) Site elevation from the National Elevation Dataset
#CHLA: (?g/L) Chlorophyll a concentration.
#DO2_2M: (mg/L) MEAN DO2 CONC IN UPPER 2m (or UPPER 50% IF DEPTH < 4m)
#PH_FIELD: Field pH from Profile DO data (pH measured at first non-zero depth unless only depth was zero)
#COND: Conductivity (uS/cm @ 25 C)
#ANC: Gran ANC (ueq/L)
#TURB: Turbidity (NTU)
#TOC: Total Organic Carbon (mg/L)
#DOC: Dissolved Organic Carbon (mg/L)
#NH4: Ammonium (ueq/L)
#NO3_NO2: Nitrate + Nitrite by Flow Injection Analysis (mg N/L)
#NTL: Total Nitrogen (ug/L)
#PTL: Total Phosphorus (ug/L)
#CL: Chloride (ueq/L)
#NO3: Nitrate (ueq/L)
#SO4: Sulfate (ueq/L)
#CA: Calcium (ueq/L)
#MG: Magnesium (ueq/L)
#Na: Sodium (ueq/L)
#K: Potassium (ueq/L)
#COLOR: Color (PCU)
#SIO2: Silica (mg/L SiO2)
#H: H+ from PH_LAB (ueq/L)
#OH: Hydroxide from PH_LAB (ueq/L)
#NH4ION: Calculated NH4+ protolyte (ueq/L)
#CATSUM: Sum of Cations (ueq/L)
#ANSUM2: Sum of Anions using ANC (ueq/L)
#ANDEF2: Anion Deficit using ANC [C-A] (ueq/L)
#SOBC: Sum of Base Cations (ueq/L)
#BALANCE2: Ion Balance using ANC (%)
#ORGION: Est. Organic Anion (ueq/L)
#CONCAL2: Calculated Conductivity w/ANC (uS/cm)
#CONDHO2: D-H-O Calc. Cond. w/ANC (uS/cm)
#SECMEAN: Secchi transparency (m)(=avg. of disk disappearance and reappearance depths)
#TminW: (degrees C) minimum water temperature observed for depths <=1m (8 missing values)
#TmaxW: (degrees C) maximum water temperature observed for depths <=1m (8 missing values)
#TmeanW: (degrees C) mean water temperature for depths <=1m (8 missing values)
#DDs40 Single Sine Method used to Calculate Degree Days with a lower threshold of 40 degrees F
#DDs45 Single Sine Method used to Calculate Degree Days with a lower threshold of 45 degrees F
#DDs50 Single Sine Method used to Calculate Degree Days with a lower threshold of 50 degrees F
#DDs55 Single Sine Method used to Calculate Degree Days with a lower threshold of 55 degrees F
# MaxLength : num (m) the maximum distance on the lake surface between any two points on the shore line.
# MaxWidth : num (m) The maximum distance between the shores perpendicular to the line of maximum length.
# MeanWidth : num (m) the surface area divided by the maximum length.
# FetchN : num (m) max N to S length of lake surface area without land interruption that wind can act on.
# FetchNE : num (m) max NE to SW length of lake surface area without land interruption that wind can act on.
# FetchE : num (m) max E to W length of lake surface area without land interruption that wind can act on.
# FetchSE : num (m) max SE to NW length of lake surface area without land interruption that wind can act on.
# MaxDepthCorrect : num (m) Max estimated depth-See Hollister et al 2011
# VolumeCorrect : num (m3) Estimated Volume
# MeanDepthCorrect: num (m) VolumeCorrect/SurfaceArea; based on corrected maximum depth
# TS_NTL : chr Trophic State Based on NTL;Oligo <=350; Meso >350 & <=750;Eu >750 & <=1400;Hyper >1400
# TS_PTL : chr Trophic State Based on PTL;Oligo <=10; Meso >10 & <=25;Eu >25 & <=50;Hyper >50
# TS_CHLA : chr Trophic State Based on CHLA;Oligo <=2; Meso >2 & <=7;Eu >7 & <=30;Hyper >30
Lakes$TS_NTL<-factor(Lakes$TS_NTL,levels=c('Oligo','Meso','Eu','Hyper'),ordered=T)
Lakes$TS_PTL<-factor(Lakes$TS_PTL,levels=c('Oligo','Meso','Eu','Hyper'),ordered=T)
Lakes$TS_CHLA<-factor(Lakes$TS_CHLA,levels=c('Oligo','Meso','Eu','Hyper'),ordered=T)
bioV<-merge(LbioV,Lakes,by='NLA_ID',all=F)
str(bioV)
with(bioV,table(bvCat,TS_CHLA))
LOW<-subset(bioV,bioV$bvCat=="LOW")
MED<-subset(bioV,bioV$bvCat=="MED")
HIGH<-subset(bioV,bioV$bvCat=="HIGH")
|
7af3ca32e3bfa3959bc178129af0a59f10a89418 | 91350b5c91092fa867240342bd29272d6487b335 | /R/namedata.r | bd562ebdb5f8bbd047cd308204565e53cb368845 | [
"CC-BY-4.0",
"MIT",
"CC-BY-3.0",
"LicenseRef-scancode-public-domain"
] | permissive | LittlePenguinPenguin/globalnamedata | 6ed2991b384bfbc5540f6482cecbdbc01d172e92 | 64c8de74c30127e4b4ae2ec94a9a04bdf9e41694 | refs/heads/master | 2021-01-20T23:18:06.749370 | 2015-08-11T21:26:13 | 2015-08-11T21:26:13 | 40,331,750 | 0 | 0 | null | 2015-08-06T23:42:47 | 2015-08-06T23:42:46 | null | UTF-8 | R | false | false | 501 | r | namedata.r | #' Open Data for names and gender mapping
#'
#' namedata provides a set of tools for gathering name/gender
#' mappings from publicly available data. Functions to process and classify
#' gender by incidence are provided as are open datasets from the United
#' States and the United Kingdom.
#'
#' Pre and postprocessed data is available via attached datasets.
#' Simple functions for classification by gender are provided as well.
#'
#' @import plyr gdata
#' @docType package
#' @name namedata
NULL
|
f7f82903378c9d03d91bfcc9f5d3930cef84b65b | 6fb04083c9d4ee38349fc04f499a4bf83f6b32c9 | /tests/upstream/test_ks.test.R | 8c93cc5f4f1d38c1e59383e707827ef02306a6ef | [] | no_license | phani-srikar/AdapteR | 39c6995853198f01d17a85ac60f319de47637f89 | 81c481df487f3cbb3d5d8b3787441ba1f8a96580 | refs/heads/master | 2020-08-09T10:33:28.096123 | 2017-09-07T09:39:25 | 2017-09-07T09:39:25 | 214,069,176 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,290 | r | test_ks.test.R | # Problems with data.name,
# p.value error computed is wrong.
# Kolmogorov-Smirnov (KS) Test 1S
sqlstr <- paste0("SELECT * FROM tblKSTest WHERE GroupID = 1")
t <- sqlQuery(connection, sqlstr)
Renv = new.env(parent = globalenv())
Renv$a <- t$NUM_VAL
FLenv = as.FL(Renv)
test_that("Kolmogorov-Smirnov Test 1S: Testing DBLytix Example ",{
result = eval_expect_equal({
res_exact <- ks.test(a,'pnorm', 3.5, 11.5,exact=TRUE)
res_nonexact <- ks.test(a,'pnorm', 3.5, 11.5,exact=FALSE)
},Renv,FLenv,
expectation = c("q"),
check.attributes=F,
tolerance = .0001,
verbose = T
)
}
)
# Kolmogorov-Smirnov (KS) Test 1S
# gives p.value as > .25
set.seed(250)
Renv = new.env(parent = globalenv())
Renv$x <- rnorm(10, 1, 2)
FLenv <- as.FL(Renv)
test_that("Kolmogorov-Smirnov Test 1s:", {
result = eval_expect_equal({
res_nonexact <- ks.test(x, 'pnorm', 1, 2, exact = FALSE)
res_exact <- ks.test(x, 'pnorm', 1, 2, exact = TRUE)
},Renv,FLenv,
expectation = c("res_exact","res_nonexact"),
check.attributes = T,
tolerance = .1,
verbose = FALSE
)
})
## Kolmogorov-Smirnov (KS) Test 2S
set.seed(100)
Renv = new.env(parent = globalenv())
Renv$p <- rnorm(50)
Renv$q <- runif(30)
FLenv = as.FL(Renv)
test_that("Kolmogorov-Smirnov Test 2S, exact",{
result = eval_expect_equal({
a <- ks.test(p, q, exact = TRUE)
},Renv,FLenv,
expectation = c("a"),
check.attributes=F,
tolerance = .0001,
verbose = FALSE
)
}
)
# Kolmogorov-Smirnov (KS) Test 2S
sqlstr <- paste0("SELECT * FROM tblKSTest")
mt <- sqlQuery(connection, sqlstr)
Renv = new.env(parent = globalenv())
Renv$m <- mt$NUM_VAL[mt$GROUPID == 1]
Renv$n <- mt$NUM_VAL[mt$GROUPID == 2]
FLenv = as.FL(Renv)
test_that("Kolmogorov-Smirnov Test 2S, exact -- DBLytix Example ",{
result = eval_expect_equal({
a <- ks.test(m,n)
},Renv,FLenv,
expectation = c("a"),
check.attributes= T,
tolerance = .1,
verbose = FALSE
)
}
)
|
69e0a23a0b96f8c323fafff3d240f7115ed103d6 | 81753f3a4b5b0030fe96577f23c9dcd6e8267924 | /paper/data/wilcox_file_0006_cost.r | fdf818943d04360c19b9456bc71180b5ad1abaa7 | [] | no_license | gecko655/yamamori-mthesis | a48992c479c43f7fc11a3a85062913d087a0a2dc | b9eba558437f995137517dfdab2ecbd0e7255970 | refs/heads/master | 2021-01-15T10:37:14.162553 | 2016-01-25T07:15:01 | 2016-01-25T07:15:01 | 47,237,921 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 750 | r | wilcox_file_0006_cost.r | #Precision
coref_p=c(0.165243532,0.168612714,0.195634953,0.172589677,0.179265574,0.20864691,0.236549587,0.218925127,0.298376125)
cochg_p=c(0.0914587,0.10220134,0.175369193,0.149735927,0.200903947)
#Recall
coref_r=c(0.212995052,0.16113769,0.142661197,0.109901232,0.085838967,0.062706637,0.048061674,0.035831078,0.021271574)
cochg_r=c(0.062694088,0.04565677,0.031777639,0.020246173,0.010537297)
#F-measure
coref_f=c(0.186105047,0.164790477,0.165000498,0.134289759,0.116089838,0.096431731,0.079891211,0.061582981,0.039712032)
cochg_f=c(0.074392683,0.063117039,0.053805495,0.035669397,0.02002433)
wilcox.test(coref_p,cochg_p,alternative='greater')
wilcox.test(coref_r,cochg_r,alternative='greater')
wilcox.test(coref_f,cochg_f,alternative='greater')
|
a476c3e55a70f162dc43475742e7dc60655e81b0 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/MFPCA/examples/screeplot.MFPCAfit.Rd.R | dd87ea47ad22145c7137e1bfd358acd6ae09cc1b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 805 | r | screeplot.MFPCAfit.Rd.R | library(MFPCA)
### Name: screeplot.MFPCAfit
### Title: Screeplot for Multivariate Functional Principal Component
### Analysis
### Aliases: screeplot.MFPCAfit
### ** Examples
# Simulate multivariate functional data on one-dimensonal domains
# and calculate MFPCA (cf. MFPCA help)
set.seed(1)
# simulate data (one-dimensional domains)
sim <- simMultiFunData(type = "split", argvals = list(seq(0,1,0.01), seq(-0.5,0.5,0.02)),
M = 5, eFunType = "Poly", eValType = "linear", N = 100)
# MFPCA based on univariate FPCA
PCA <- MFPCA(sim$simData, M = 5, uniExpansions = list(list(type = "uFPCA"),
list(type = "uFPCA")))
# screeplot
screeplot(PCA) # default options
screeplot(PCA, npcs = 3, type = "barplot", main= "Screeplot")
|
f90a7bd06c272a7fdc824decaa7c3aa5d93533ff | 32694467865205579b98f15bf738d88c19fb954d | /tests/testthat/test_version_response.R | ebdcfc60962a92758f572520429064a3b9273180 | [] | no_license | vjcitn/terraClientR | ee0dc11c00b8d707d023d93776637b5622c189b6 | 85ab30d88da3b4c3da9e36a9b2f9dbc7ab5f237a | refs/heads/main | 2023-06-05T04:42:36.218619 | 2021-06-29T15:43:36 | 2021-06-29T15:43:36 | 381,414,881 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 416 | r | test_version_response.R | # Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test VersionResponse")
model.instance <- VersionResponse$new()
test_that("cromwell", {
# tests for the property `cromwell` (character)
# The version of the Cromwell Engine
# uncomment below to test the property
#expect_equal(model.instance$`cromwell`, "EXPECTED_RESULT")
})
|
ec4ea3ef8e1a7df8ae842b4b3eaeb103d51eb20e | c9b151232ad188a38469473ec765c0f7a1defe7c | /man/smoothfun.Rd | 1174ab66a07d888093c6da90101f389c4869182d | [] | no_license | obreschkow/cooltools | 3b2c46ac539962153c3a9aa8fbeaeee185455015 | 3b212d077537220aec5b8162f04ed85f7f0af996 | refs/heads/main | 2023-08-15T02:14:07.742064 | 2023-07-24T08:47:36 | 2023-07-24T08:47:36 | 184,692,943 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,541 | rd | smoothfun.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/smoothfun.R
\name{smoothfun}
\alias{smoothfun}
\title{Smoothed Function}
\usage{
smoothfun(x, y = NULL, w = NULL, df = NULL, ...)
}
\arguments{
\item{x}{a vector giving the values of the predictor variable, or a list or a two-column matrix specifying x and y.}
\item{y}{responses. If y is missing or NULL, the responses are assumed to be specified by x, with x the index vector.}
\item{w}{optional vector of weights of the same length as x; defaults to all 1.}
\item{df}{the desired equivalent number of degrees of freedom. Must be in [2,nx], where nx is the number of unique x values. If not given, nx is set to the square root of the number of unique x-values.}
\item{...}{additional optional arguments used by \code{\link[stats]{smooth.spline}}.}
}
\value{
Returns a fast and vectorized smoothed function f(x).
}
\description{
Generates a cubic smoothed spline function y=f(x) approximating supplied (x,y)-data with a custom number of degrees of freedom. The routines builds on \code{\link[stats]{smooth.spline}}, but directly returns the smoothed function rather than the fitted model.
}
\examples{
# make random data set
set.seed(1)
x = runif(100)
y = sin(2*pi*x)+rnorm(100, sd=0.5)
plot(x,y,pch=16)
# smoothed spline
f = smoothfun(x, y)
curve(f, add=TRUE, col='red')
# smoothed spline with custom degree of freedom
g = smoothfun(x, y, df=5)
curve(g, add=TRUE, col='blue')
}
\seealso{
\code{\link[stats]{smooth.spline}}
}
\author{
Danail Obreschkow
}
|
c9dbc9a2a340b7d1717bf26971dc1e137849c719 | 9309ad6799efec5f820b0f9921b2e2450da3a78e | /man/Lextale-package.Rd | 62ccccb6b0353cf0dc104a89bc6e363fde7d8409 | [
"MIT"
] | permissive | Ghozayel/Lextale | 6c6a6f730dbadfe3f1a71e37d864ae0e72836ce4 | b92214cb1a9c38fbef7033cb9ff9d3ec29811ccc | refs/heads/master | 2023-06-10T02:27:12.435039 | 2023-05-28T10:36:56 | 2023-05-28T10:36:56 | 525,854,071 | 3 | 3 | MIT | 2022-10-03T16:06:35 | 2022-08-17T15:32:07 | R | UTF-8 | R | false | true | 754 | rd | Lextale-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Lextale-package.R
\docType{package}
\name{Lextale-package}
\alias{Lextale}
\alias{Lextale-package}
\title{Lextale: A Package to calculate scoring for LexTALE-test, English, German and Dutch versions.}
\description{
This package calculates the scoring for the English LexTALE-test if administered with the downloads using implementations that do not end with participants' score on the screen, e.g. online surveys.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/Ghozayel/Lextale}
\item Report bugs at \url{https://github.com/Ghozayel/Lextale/issues}
}
}
\author{
\strong{Maintainer}: Ghozayel Elotteebi \email{gzyl4work@gmail.com}
}
\keyword{internal}
|
8c8dd2a3fba4fa92a6e2c692750da7bf616be365 | 12f63547182b536a68a7dff027207f1bbef6b01f | /tests/testthat/test-team1.R | 9dadb5e77610d2ac1d4d09d487b3443adb9c26cf | [] | no_license | SPI-Birds/lorentz_hackathon | 2f4fbd106d709d54a07c602315a7bfd2db0f0d15 | 2d1d13d1655a781df2431b065400d5b6bcc4689f | refs/heads/main | 2023-08-13T12:26:50.551292 | 2021-09-09T13:54:25 | 2021-09-09T13:54:25 | 403,632,489 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 459 | r | test-team1.R | test_that("team1 function works as expected...", {
#Ask for a file with capture data
message("Please link to capture data")
capture_data <- read.csv(file.choose(), header = TRUE, sep = ",")
#Filter it to just be 1 year for testing
input_data <- capture_data %>%
dplyr::filter(BreedingSeason == 2014)
#Function should return NULL (i.e. return(invisible()))
expect_equal(team1_survfunc(input_data, file = "testfile.inp"), NULL)
}) |
3a3a0615b9feeb29c15ccf8960646ea308a5712a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/RandomFields/examples/RMmodelsMultivariate.Rd.R | bca3c3dab738c442dce1d2429ac6ff6006a811c4 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,254 | r | RMmodelsMultivariate.Rd.R | library(RandomFields)
### Name: RMmodelsMultivariate
### Title: Multivariate models
### Aliases: RMmodelsMultivariate 'Multivariate RMmodels'
### Keywords: spatial
### ** Examples
## Don't show:
StartExample()
## End(Don't show)
RFoptions(seed=0) ## *ANY* simulation will have the random seed 0; set
## RFoptions(seed=NA) to make them all random again
n <- 100
x <- runif(n=n, min=1, max=50)
y <- runif(n=n, min=1, max=50)
rho <- matrix(nc=2, c(1, -0.8, -0.8, 1))
model <- RMparswmX(nudiag=c(0.5, 0.5), rho=rho)
## generation of artifical data
dta <- RFsimulate(model = model, x=x, y=y, grid=FALSE)
## Don't show:
if (!interactive()) .dataorig <- dta
## End(Don't show)
## introducing some NAs ...
dta@data$variable1[1:10] <- NA
if (interactive()) dta@data$variable2[90:100] <- NA
## Don't show:
if (!interactive()) {print("no NAs introduced"); dta <- .dataorig}
## End(Don't show)
plot(dta)
## co-kriging
x <- y <- seq(0, 50, 1)
## Don't show:
if (!interactive()) x <- y <- seq(0, 5, 1)
## End(Don't show)
k <- RFinterpolate(model, x=x, y=y, data= dta)
plot(k, dta)
## conditional simulation
z <- RFsimulate(model, x=x, y=y, data= dta) ## takes a while
plot(z, dta)
## Don't show:
FinalizeExample()
## End(Don't show)
|
5bfb7aecd936b91211e1b3ca700dec5cd7633b75 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/pooling/examples/test_pe.Rd.R | 469d3a964fddd5425ecffdf6f606c58829342834 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 555 | r | test_pe.Rd.R | library(pooling)
### Name: test_pe
### Title: Test for Underestimated Processing Error Variance in Pooling
### Studies
### Aliases: test_pe
### ** Examples
# Generate data for hypothetical study designed assuming sigsq_p = 0.1, but
# truly sigsq_p = 0.25. Have data collected for 40 pools of size 5, and wish
# to test H0: sigsq_p <= 0.1. In this instance, a false negative occurs.
set.seed(123)
xtilde <- replicate(n = 40, expr = mean(rnorm(5)) + rnorm(n = 1, sd = sqrt(0.25)))
(fit <- test_pe(xtilde = xtilde, g = 5, sigsq = 1, sigsq_m = 0))
|
bd5090f9f568514390f28f03bee78c1bf63735dc | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/numbersBR/examples/format.Rd.R | 0bf451431064f48f7025909b22aede042ef824d5 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 294 | r | format.Rd.R | library(numbersBR)
### Name: format.CNPJ
### Title: Format numbers
### Aliases: format.CNPJ format.CPF format.RENAVAN format
### ** Examples
x <- CNPJ(66670000100)
format(x)
format(x, "stripped")
x <- CPF(1239157673)
format(x)
format(x, "stripped")
x <- RENAVAN("68194359406")
format(x)
|
d7b26eeb4ea1b118ef7d18a72905fef313dad9ea | 4025bf67ebc1ab8e065deba65a8a3f482d1c3453 | /man/changes.Rd | a8b0866a0be83b6a28cd9df89393842093706360 | [
"CC0-1.0"
] | permissive | chgrl/rdnb | 644ecdf571640bb73825341696b13a265a6a0e8b | 88cf054e84c909964eb79e3c14de357a098491ab | refs/heads/master | 2023-02-21T01:34:59.698186 | 2023-02-17T15:00:58 | 2023-02-17T15:00:58 | 51,138,778 | 2 | 2 | NOASSERTION | 2021-04-17T21:53:09 | 2016-02-05T09:55:33 | R | UTF-8 | R | false | true | 364 | rd | changes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zzz.R
\name{changes}
\alias{changes}
\title{View changes notes.}
\usage{
changes(pkg = "rdnb")
}
\arguments{
\item{pkg}{Set to the default "rdnb". Other packages make no sense.}
}
\description{
\code{changes} brings up the NEWS file of the package.
}
\examples{
\dontrun{
changes()
}
}
|
a0a47a52ecc3df3ccc2949975d1ffa8e6694c710 | 81ad6ec0e3a465e30ab4c9e1c067c0ac502587d2 | /3.MoreModelDevelopment.r | 78c622b8faf2521271ea291fa56c3879bcfcdda4 | [] | no_license | SrAriza/Prediction-sentiment | d726bf168cc3ca473680e9b3fd6ee93a58ef1029 | ceaba300951a11de07d57f5a667f028810257517 | refs/heads/master | 2020-07-22T21:15:34.906564 | 2019-09-25T14:45:15 | 2019-09-25T14:45:15 | 207,330,369 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,549 | r | 3.MoreModelDevelopment.r | #######################
#Sentiment prediction
#
#By Julian Ariza
#
######################
#Libraries
library(doParallel)
library(caret)
library(ggplot2)
library(dplyr)
library(plotly)
library(e1071)
library(randomForest)
library(ROSE)
#Loading the data
iphonedata <- readRDS("iphone_dataframe.rds")
galaxydata <- readRDS("galaxy_dataframe.rds")
# Change data types
iphonedata$iphonesentiment <- as.factor(iphonedata$iphonesentiment)
galaxydata$galaxysentiment <- as.factor(galaxydata$galaxysentiment)
#Create a new dataset that will be used for recoding sentiment
iphoneRC <- iphonedata
galaxyRC <- galaxydata
## Recode sentiment to combine factor levels
iphoneRC$iphonesentiment <- recode(iphoneRC$iphonesentiment,
"VN" = "N",
"N" = "N",
"SN" = "N",
"VP" = "P",
"P" = "P",
"SP" = "P")
galaxyRC$galaxysentiment <- recode(galaxyRC$galaxysentiment,
"VN" = "N",
"N" = "N",
"SN" = "N",
"VP" = "P",
"P" = "P",
"SP" = "P")
## Change dependent variable data type
iphoneRC$iphonesentiment <- as.factor(iphoneRC$iphonesentiment)
galaxyRC$galaxysentiment <- as.factor(galaxyRC$galaxysentiment)
# Sampling
# iphone undersampling
set.seed(4635)
iphonedata.under <- ovun.sample(iphonesentiment~.,
data = iphoneRC,
p = 0.5,
seed = 1,
method = "under")$data
iphonedata.under %>%
group_by(iphonesentiment) %>%
summarise(count(iphonesentiment))
## galaxy oversampling
set.seed(2345)
galaxydata.over <- ovun.sample(galaxysentiment~.,
data = galaxyRC,
p = 0.5,
seed = 1,
method = "over")$data
galaxydata.over %>%
group_by(galaxysentiment) %>%
summarise(count(galaxysentiment))
# Core Selection
# Find how many cores are on your machine
detectCores()
# Create cluster with desired number of cores.
cl <- makeCluster(3)
# Register cluster
registerDoParallel(cl)
#Principal Component Analysis
# iphone
preprocessParamsiphone <- preProcess(iphonedata.under[,-14],
method=c("center", "scale", "pca"),
thresh = 0.95)
print(preprocessParamsiphone)
# use predict to apply pca parameters, create training, exclude dependant
iphone.pca <- predict(preprocessParamsiphone, iphonedata.under[,-14])
# add the dependent to training
iphone.pca$iphonesentiment <- iphonedata.under$iphonesentiment
# inspect results
str(iphone.pca)
## galaxy
preprocessParamsgalaxy <- preProcess(galaxydata.over[,-14],
method=c("center", "scale", "pca"),
thresh = 0.95)
print(preprocessParamsgalaxy)
# use predict to apply pca parameters, create training, exclude dependant
galaxy.pca <- predict(preprocessParamsgalaxy, galaxydata.over[,-14])
# add the dependent to training
galaxy.pca$galaxysentiment <- galaxydata.over$galaxysentiment
# inspect results
str(galaxy.pca)
### ---- Recursive Feature Elimination ----
## Set up rfeControl with randomforest, repeated cross validation and no updates
ctrl <- rfeControl(functions = rfFuncs,
method = "repeatedcv",
repeats = 5,
verbose = FALSE)
## Use rfe and omit the response variable (attribute 8 iphonesentiment & 5 galaxysentiment)
rfeResults1 <- rfe(iphone.pca[,1:7],
iphone.pca$iphonesentiment,
sizes = (1:7),
rfeControl = ctrl)
rfeResults2 <- rfe(galaxy.pca[,1:4],
galaxy.pca$galaxysentiment,
sizes = (1:4),
rfeControl = ctrl)
## Get results
rfeResults1
predictors(rfeResults1)
rfeResults2
predictors(rfeResults2)
## Plot results
plot(rfeResults1, type=c("g", "o"))
plot(rfeResults2, type=c("g", "o"))
## Create new data set with rfe recommended features
iphoneRFE <- iphone.pca[,predictors(rfeResults1)]
galaxyRFE <- galaxy.pca[,predictors(rfeResults2)]
## Add the dependent variable to iphoneRFE and galaxyRFE
iphoneRFE$iphonesentiment <- iphone.pca$iphonesentiment
galaxyRFE$galaxysentiment <- galaxy.pca$galaxysentiment
# Data Partition
# iphone data partition
intrain1 <- createDataPartition(y = iphoneRFE$iphonesentiment,
p = 0.7,
list = FALSE)
iphonetrain <- iphoneRFE[intrain1,]
iphonetest <- iphoneRFE[-intrain1,]
## galaxy data partition
intrain2 <- createDataPartition(y = galaxyRFE$galaxysentiment,
p = 0.7,
list = FALSE)
galaxytrain <- galaxyRFE[intrain2,]
galaxytest <- galaxyRFE[-intrain2,]
# Random Forest Modelization
# Set Train Control and Grid
RFtrctrl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 2)
# iphone
RFmodel1 <- train(iphonesentiment ~ .,
iphonetrain,
method = "rf",
trControl = RFtrctrl,
tuneLenght = 2)
RFmodel1
plot(RFmodel1)
varImp(RFmodel1)
predRFmodel1 <- predict(RFmodel1, iphonetest)
postResample(predRFmodel1, iphonetest$iphonesentiment) -> RFmodel1metrics
RFmodel1metrics
cmRFiphone <- confusionMatrix(predRFmodel1, iphonetest$iphonesentiment)
cmRFiphone
## galaxy
RFmodel2 <- train(galaxysentiment ~ .,
galaxytrain,
method = "rf",
trControl = RFtrctrl,
tuneLenght = 2)
RFmodel2
plot(RFmodel2)
varImp(RFmodel2)
predRFmodel2 <- predict(RFmodel2, galaxytest)
postResample(predRFmodel2, galaxytest$galaxysentiment) -> RFmodel2metrics
RFmodel2metrics
cmRFgalaxy <- confusionMatrix(predRFmodel2, galaxytest$galaxysentiment)
cmRFgalaxy
|
3297d68ee848670eaf188338c7131bd801d3002b | 9e8c1f5ac055cbbfd6aae73bca32885bdadcd491 | /Regression/SVM.R | 22ae9f343b5ecff1b0993766c9dc842d626279d0 | [] | no_license | hughbzhang/SIMR | 682c0a03720933210ed779d13f8b4640f3a83d1f | 9022b2114d139d41738f588ecdb5f951f03220c2 | refs/heads/master | 2021-06-11T20:11:40.144148 | 2016-04-03T07:14:45 | 2016-04-03T07:14:45 | 21,960,479 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,120 | r | SVM.R | #This is my code for using an SVM to predict the data
#SVM seems to overfit less than Random Forest
library(kernlab)
cur = all[sample(130),]
trainerr = 0
testerr = 0
for ( i in 1:10){
check = cur[(13*(i-1)+1):(13*i),]
attach(check)
#testx = cbind(V26,V3,V6,V4,V34,V41,V17,V21,V24,V15,V27,V7)
testx = cbind(V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16,V17,V18,V19,V20,V21,V22,V23,V24,V25,V26,V27,V28,V29,V30,V31,V32,V33,V34,V35,V36,V37,V38,V39,V40,V41,V42,V43)
testy = V1
detach(check)
train = cur[(-(26*(i-1)+1)):(-26*i),]
attach(train)
trainx = cbind(V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16,V17,V18,V19,V20,V21,V22,V23,V24,V25,V26,V27,V28,V29,V30,V31,V32,V33,V34,V35,V36,V37,V38,V39,V40,V41,V42,V43)
#trainx = cbind(V26,V3,V6,V4,V34,V41,V17,V21,V24,V15,V27,V7)
trainy = train[,1]
#.2 .5
# .05 2
detach(train)
model = ksvm(trainx,trainy,type = "C-svc",kernel = 'rbf',kpar = list(sigma=.05),C=2)
trainerr = trainerr + (sum(predict(model,trainx)!=trainy))/117
testerr = testerr + (sum(predict(model,testx)!=testy))/13
}
testerr = testerr/10
trainerr = trainerr/10 |
0fc4741b0c9b1cca99d1e792e03eb38c8861505e | 1709ead9cb5b0286bf8bc2125a33fa603a8bf7f2 | /man/build.y.Rd | a689e0877e37f357595a26fe2234374e28dcf517 | [] | no_license | jaredlander/useful | 92f26b088a19f62138bec71a4339ce4f1dcaaa31 | 1882e50b7494a7b5cdc8052f8491ed28630ccf98 | refs/heads/master | 2022-05-05T04:10:41.711760 | 2018-08-17T21:00:03 | 2018-08-17T21:00:03 | 2,523,193 | 5 | 5 | null | 2017-08-04T20:34:00 | 2011-10-06T00:31:58 | R | UTF-8 | R | false | true | 527 | rd | build.y.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/buildMatrix.r
\name{build.y}
\alias{build.y}
\title{build.y}
\usage{
build.y(formula, data)
}
\arguments{
\item{formula}{A formula}
\item{data}{A data.frame}
}
\value{
The y object from a formula and data
}
\description{
Build the y object from a formula and data
}
\details{
Given a formula and a data.frame build the y object
}
\examples{
require(ggplot2)
head(mpg)
head(build.y(hwy ~ class + cyl + year, data=mpg))
}
\author{
Jared P. Lander
}
|
355153f85244563be871d17b98ef4f05f7924612 | f3d9770b6dd5a21960215757074f279adb8339a2 | /man/getMarkDupsMetrics.Rd | 4f9e7f9162dc1e39f5dfafae1e9d158abb6651cb | [
"MIT"
] | permissive | dakl/clinseqr | 63f491457470db4991f5c984c9036f1936770dd4 | 28ef16998e84e472db27ede4a2f925c69b502e40 | refs/heads/master | 2016-09-05T12:12:42.714418 | 2015-11-02T13:15:19 | 2015-11-02T13:15:19 | 25,638,438 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 617 | rd | getMarkDupsMetrics.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/getMarkDupsMetrics.R
\name{getMarkDupsMetrics}
\alias{getMarkDupsMetrics}
\title{Get MarkDups Metrics}
\usage{
getMarkDupsMetrics(reports, src = "PANEL_MARKDUPS_METRICS")
}
\arguments{
\item{reports}{data frame with reports}
\item{src}{string with name of column name to use (from reports)}
}
\value{
A data table with metrics from markDuplicates
}
\description{
Get MarkDups Metrics
}
\examples{
#dat <- getMarkDupsMetrics(reports, src="PANEL_MARKDUPS_METRICS")
#dat <- getMarkDupsMetrics(reports, src="WGS_MARKDUPS_METRICS")
}
|
b6aad2e4616e0388a9a05393615c650f3557b220 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/openintro/examples/student.housing.Rd.R | ae7d84c231599d377ebaaf8420dc06d569e4dc5a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 490 | r | student.housing.Rd.R | library(openintro)
### Name: student.housing
### Title: Community college housing (simulated data, 2015)
### Aliases: student.housing
### Keywords: datasets
### ** Examples
data(student.housing)
set.seed(5)
generate.student.housing <- data.frame(
price = round(rnorm(175, 515, 65) + exp(rnorm(175, 4.2, 1))))
hist(student.housing$price, 20)
t.test(student.housing$price)
mean(student.housing$price)
sd(student.housing$price)
identical(student.housing, generate.student.housing)
|
c27dbe55078c485e81f1b652923c5cccdb8007f9 | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.database/man/elasticache_delete_global_replication_group.Rd | 971d4c41dc162ca302cdb60e71edf073dffa8016 | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 2,436 | rd | elasticache_delete_global_replication_group.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elasticache_operations.R
\name{elasticache_delete_global_replication_group}
\alias{elasticache_delete_global_replication_group}
\title{Deleting a Global Datastore is a two-step process:}
\usage{
elasticache_delete_global_replication_group(GlobalReplicationGroupId,
RetainPrimaryReplicationGroup)
}
\arguments{
\item{GlobalReplicationGroupId}{[required] The name of the Global Datastore}
\item{RetainPrimaryReplicationGroup}{[required] The primary replication group is retained as a standalone replication
group.}
}
\value{
A list with the following syntax:\preformatted{list(
GlobalReplicationGroup = list(
GlobalReplicationGroupId = "string",
GlobalReplicationGroupDescription = "string",
Status = "string",
CacheNodeType = "string",
Engine = "string",
EngineVersion = "string",
Members = list(
list(
ReplicationGroupId = "string",
ReplicationGroupRegion = "string",
Role = "string",
AutomaticFailover = "enabled"|"disabled"|"enabling"|"disabling",
Status = "string"
)
),
ClusterEnabled = TRUE|FALSE,
GlobalNodeGroups = list(
list(
GlobalNodeGroupId = "string",
Slots = "string"
)
),
AuthTokenEnabled = TRUE|FALSE,
TransitEncryptionEnabled = TRUE|FALSE,
AtRestEncryptionEnabled = TRUE|FALSE,
ARN = "string"
)
)
}
}
\description{
Deleting a Global Datastore is a two-step process:
\itemize{
\item First, you must
\code{\link[=elasticache_disassociate_global_replication_group]{disassociate_global_replication_group}}
to remove the secondary clusters in the Global Datastore.
\item Once the Global Datastore contains only the primary cluster, you can
use DeleteGlobalReplicationGroup API to delete the Global Datastore
while retainining the primary cluster using Retain…= true.
}
Since the Global Datastore has only a primary cluster, you can delete
the Global Datastore while retaining the primary by setting
\code{RetainPrimaryCluster=true}.
When you receive a successful response from this operation, Amazon
ElastiCache immediately begins deleting the selected resources; you
cannot cancel or revert this operation.
}
\section{Request syntax}{
\preformatted{svc$delete_global_replication_group(
GlobalReplicationGroupId = "string",
RetainPrimaryReplicationGroup = TRUE|FALSE
)
}
}
\keyword{internal}
|
1c64b3821a0033ac3e722532f2bf90b13820d100 | fc0eb864d127a5487820d76e3121f1272769796a | /BigMart_Sales_Prediction_With_Dimentionality_Reduction/Dataset/Pradeep_Exploration_1.R | f13ffbb58654d640ac23aacc9d3834d8f99fc401 | [] | no_license | pradeepsathyamurthy/university_projects | 5d092d25fd83eaa607b8c13dfa86e509eaa6792d | 7f82a450eecfbdde7e4fb42818482229c77df256 | refs/heads/master | 2021-01-10T23:53:21.764024 | 2017-11-29T00:06:10 | 2017-11-29T00:06:10 | 70,092,710 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,594 | r | Pradeep_Exploration_1.R | ######################################################################################
# Author: Pradeep Sathyamurthy
# Date: 07-June-2017
# Course: CSC-433
# Guiding Prof: Prof. Steve Jost
# Project: Final Project Submission
# Train Dataset Name: mart_train.csv
# Test Dataset Name: mart_test.csv
######################################################################################
# Libraries imported for this analysis
require(ggplot2) # <- needed for graphing
require(rpart) # <- Needed for building decision tree
require(rattle) # <- Needed to make decision tree look neat
require(rpart.plot) # <- Needed to make decision tree look neat
require(RColorBrewer) # <- Needed to make decision tree look neat
require(caret) # <- Needed for data splitting
require(MASS) # <- Needed for Outlier and Influential points detection
require(car) # Needed for Multicolinearity
# Step-1: Reading the trianing dataset
setwd("C:/Users/prade/Documents/GitHub/university_projects/BigMart_Sales_Prediction_With_Dimentionality_Reduction")
data.mart.raw <- read.csv("Dataset/Mart_Train.csv")
head(data.mart.raw)
# Step-2: Researching the variables present
col_mart_name <- colnames(data.mart.raw) # <- Column names
col_mart_length <- length(col_mart_name) # <- There are 12 variables
var_det <- data.frame(Var_Name="NULL",Var_Type="NULL",stringsAsFactors = FALSE)
for(i in 1:col_mart_length){
var_det <- rbind(var_det, c(colnames(data.mart.raw[i]),class(data.mart.raw[[i]])))
}
var_det <- var_det[-c(1),]
plot_var_type <- data.frame(table(var_det$Var_Type))
barplot(plot_var_type$Freq,names.arg = plot_var_type$Var1, main = "Variable Type Distribution in Dataset")
print(var_det,row.names = FALSE)
# above for loop says there are:
# 7 Factor Variables: Item_Identifier, Item_Fat_Content, Item_Type, Outlet_Identifier, Outlet_Size, Outlet_Location_Type, Outlet_Type
# 1 integer variable: Outlet_Establishment_Year
# 4 Numeric variables: Item_Weight, Item_Visibility, Item_MRP, Item_Outlet_Sales
# Step-3: Converting the object type based on their values
# From the data we could conclude to have Item_Identifier as a ID variable and Outlet_Establishment_Year as a factor
#data.mart.raw$Item_Identifier <- as.character(data.mart.raw$Item_Identifier)
data.mart.raw <- data.mart.raw[-c(1)]
head(data.mart.raw)
data.mart.raw$Outlet_Establishment_Year <- as.factor(data.mart.raw$Outlet_Establishment_Year)
summary(data.mart.raw)
col_mart_name <- colnames(data.mart.raw) # <- Column names
col_mart_length <- length(col_mart_name) # <- There are 12 variables
var_det <- data.frame(Var_Name="NULL",Var_Type="NULL",stringsAsFactors = FALSE)
for(i in 1:col_mart_length){
var_det <- rbind(var_det, c(colnames(data.mart.raw[i]),class(data.mart.raw[[i]])))
}
var_det <- var_det[-c(1),]
plot_var_type <- data.frame(table(var_det$Var_Type))
barplot(plot_var_type$Freq,names.arg = plot_var_type$Var1, main = "Variable Type Distribution in Dataset")
print(var_det,row.names = FALSE)
# Step-4: Exploratory Data Analysis on factor variables
# After conversion below are factor variables:
# 1. Item_Fat_Content
# 2. Item_Type
# 3. Outlet_Identifier
# 4. Outlet_Size
# 5. Outlet_Location_Type
# 6. Outlet_Type
# 7. Outlet_Establishment_Year
# Let us plot these data to see the frequency of occurence
data.frame(table(data.mart.raw$Item_Fat_Content))
plot(data.frame(table(data.mart.raw$Item_Fat_Content)), main="Frequency Distribution of Item_Fat_Content",xlab="Item_Fat_Content")
data.frame(table(data.mart.raw$Item_Type))
plot(data.frame(table(data.mart.raw$Item_Type)), main="Frequency Distribution of Item_Type",xlab="Item_Type")
data.frame(table(data.mart.raw$Outlet_Identifier))
plot(data.frame(table(data.mart.raw$Outlet_Identifier)), main="Frequency Distribution of Outlet_Identifier",xlab="Outlet_Identifier")
data.frame(table(data.mart.raw$Outlet_Size))
plot(data.frame(table(data.mart.raw$Outlet_Size)), main="Frequency Distribution of Outlet_Size",xlab="Outlet_Size")
data.frame(table(data.mart.raw$Outlet_Location_Type))
plot(data.frame(table(data.mart.raw$Outlet_Location_Type)), main="Frequency Distribution of Outlet_Location_Type",xlab="Outlet_Location_Type")
data.frame(table(data.mart.raw$Outlet_Type))
plot(data.frame(table(data.mart.raw$Outlet_Type)), main="Frequency Distribution of Outlet_Type",xlab="Outlet_Type")
data.frame(table(data.mart.raw$Outlet_Establishment_Year))
plot(data.frame(table(data.mart.raw$Outlet_Establishment_Year)), main="Frequency Distribution of Outlet_Establishment_Year",xlab="Outlet_Establishment_Year")
# Step-5: Exploratory Data Analysis on numerical variables
# After conversion below are numerical variables:
# 1. Item_Weight
# 2. Item_Visibility
# 3. Item_MRP
# 4. Item_Outlet_Sales
summary(data.mart.raw$Item_Weight)
hist(data.mart.raw$Item_Weight)
summary(data.mart.raw$Item_Visibility)
hist(data.mart.raw$Item_Visibility)
summary(data.mart.raw$Item_MRP)
hist(data.mart.raw$Item_MRP)
summary(data.mart.raw$Item_Outlet_Sales)
hist(data.mart.raw$Item_Outlet_Sales)
boxplot(data.mart.raw$Item_Outlet_Sales)
# Step-6: Treating the missing values
# From above exploratoy analysis, we could see there is no normal distriution of data in both factor as well numerical variable
# So before we normalize them, we need to treat missing values
head(data.mart.raw)
# Treating factor variables
pie(table((data.mart.raw$Item_Fat_Content)),main = "Analysis of Missing Values in Item_Fat_Content")
pie(table((data.mart.raw$Item_Type)),main = "Analysis of Missing Values in Item_Type")
pie(table((data.mart.raw$Outlet_Identifier)),main = "Analysis of Missing Values in Outlet_Identifier")
pie(table((data.mart.raw$Outlet_Establishment_Year)),main = "Analysis of Missing Values in Outlet_Establishment_Year")
pie(table((data.mart.raw$Outlet_Size)),main = "Analysis of Missing Values in Outlet_Size")
pie(table((data.mart.raw$Outlet_Location_Type)),main = "Analysis of Missing Values in Outlet_Location_Type")
pie(table((data.mart.raw$Outlet_Type)),main = "Analysis of Missing Values in Outlet_Type")
# Treating numerical variables
pie(table(is.na(data.mart.raw$Item_Weight)),main = "Analysis of Missing Values in Item_Weight")
pie(table(is.na(data.mart.raw$Item_Visibility)),main = "Analysis of Missing Values in Item_Visibility")
pie(table(is.na(data.mart.raw$Item_MRP)),main = "Analysis of Missing Values in Item_MRP")
pie(table(is.na(data.mart.raw$Item_Outlet_Sales)),main = "Analysis of Missing Values in Item_Outlet_Sales")
# Step-6.1: Treating Outlet_Size, Creating split based on the missing values in column Outlet_Size
data.mart.raw.tree <- data.mart.raw
data.mart.raw.tree.test <- data.mart.raw.tree[data.mart.raw.tree$Outlet_Size=="",]
data.mart.raw.tree.train <- data.mart.raw.tree[data.mart.raw.tree$Outlet_Size!="",]
# Step-6.2: Imputing values for outlet_size using decision tree
head(data.mart.raw.tree.train)
#tree_treated <- rpart(y~age+job+marital+education+default+balance+housing+loan+contact+day+month+duration+campaign+pdays+previous+poutcome,data=TRAINING_TREATEDBANKPROJECTDATASET)
tree_treated <- rpart(Outlet_Size~Item_Weight+Item_Fat_Content+Item_Visibility+Item_Type+Item_MRP+Outlet_Identifier+Outlet_Establishment_Year+Outlet_Location_Type+Outlet_Type+Item_Outlet_Sales, data = data.mart.raw.tree.train)
summary(tree_treated)
# Plotting the tree ( it is better though)
plot(tree_treated, uniform=TRUE)
# Now creating the fancy part
fancyRpartPlot(tree_treated)
# We can do prediction as below
predict(tree_treated)
predict(tree_treated, type="class")
# Confusion matrix
table(data.mart.raw.tree.train$Outlet_Size, predict(tree_treated, type="class"), dnn=c("Actual","Predicted"))
# Testing the model with test datpredicted_treated_class1a set
# Loading the file to R
predicted_treated_class <- predict(tree_treated,data.mart.raw.tree.test,type="class")
table(data.mart.raw.tree.test$Outlet_Size,predicted_treated_class,dnn=c("Actual","Predicted"))
# treating the missing values
for (i in 1 : length(data.mart.raw.tree.test$Outlet_Size)){
if(data.mart.raw.tree.test$Outlet_Identifier[i] == ("OUT018") |
data.mart.raw.tree.test$Outlet_Identifier[i] == ("OUT027") |
data.mart.raw.tree.test$Outlet_Identifier[i] == ("OUT049")){
data.mart.raw.tree.test$Outlet_Size[i] <- as.character("Medium")
} else if (data.mart.raw.tree.test$Outlet_Identifier[i] == ("OUT013")){
data.mart.raw.tree.test$Outlet_Size[i] <- as.character("High")
} else {data.mart.raw.tree.test$Outlet_Size[i] <- as.character("Small")}
}
tail(data.mart.raw.tree.test$Outlet_Size)
data.mart.raw.tree <- rbind(data.mart.raw.tree.train,data.mart.raw.tree.test)
tail(data.mart.raw.tree)
data.mart.raw.2 <- data.mart.raw.tree
# Step:6.3 Treating Item_Weight
data.mart.raw.3 <- data.mart.raw.2
tail(data.mart.raw.3)
summary(data.mart.raw.3$Item_Weight) # <- from summary we see mean and median stay close, so i will fill data with its mean value
for (i in 1 : length(data.mart.raw.3$Item_Weight)){
if(is.na(data.mart.raw.3$Item_Weight[i]) == TRUE |
is.nan(data.mart.raw.3$Item_Weight[i]) == TRUE |
is.null(data.mart.raw.3$Item_Weight[i]) == TRUE){
data.mart.raw.3$Item_Weight[i] <- mean(data.mart.raw.3$Item_Weight, na.rm = TRUE)
}
}
summary(data.mart.raw.3$Item_Weight) # <- From this we could see that mean and median became so close and hence we can hope this imputation works fine
data.mart.treaded <- data.mart.raw.3
hist(data.mart.treaded$Item_Weight) #<- Converted from normal curve
# Step:6.4 Treating Item_Weight Item_Fat_Content
data.frame(table(data.mart.treaded$Item_Fat_Content))
plot(data.frame(table(data.mart.treaded$Item_Fat_Content)), main="Frequency Distribution of Item_Fat_Content",xlab="Item_Fat_Content")
data.mart.treaded$Item_Fat_Content <- as.character(data.mart.treaded$Item_Fat_Content)
for (i in 1 : length(data.mart.treaded$Item_Fat_Content)){
if(data.mart.treaded$Item_Fat_Content[i] == as.character("LF") |
data.mart.treaded$Item_Fat_Content[i] == as.character("low fat") |
data.mart.treaded$Item_Fat_Content[i] == as.character("Low Fat")){
data.mart.treaded$Item_Fat_Content[i] <- as.character("Low_Fat")
} else {data.mart.treaded$Item_Fat_Content[i] <- as.character("Regular")}
}
# Step:6.5 Converting the Column objects to factor or Numeric after treatment
data.mart.treaded$Item_Fat_Content <- as.factor(data.mart.treaded$Item_Fat_Content)
data.mart.treaded$Outlet_Size <- factor(data.mart.treaded$Outlet_Size,levels=c("High", "Medium", "Small"))
# Step:7 Splitting the dataset to test and train for local validation
# Creating a random index to split the data as 80 - 20%
idx <- createDataPartition(data.mart.treaded$Item_Weight, p=.80, list=FALSE)
print(idx[1:20])
# Using the index created to create a Training Data set - 131 observations created
data.train <- data.mart.treaded[idx,]
head(data.mart.treaded)
# Using the index created to create a Testing Data set - 31 observations created
data.test <- data.mart.treaded[-idx,]
head(data.test)
idx <- NULL
# Step-8 Exploratory data analysis on training set
# Factor Variables
data.frame(table(data.train$Item_Fat_Content))
plot(data.frame(table(data.train$Item_Fat_Content)), main="Frequency Distribution of Item_Fat_Content",xlab="Item_Fat_Content")
data.frame(table(data.train$Item_Type))
plot(data.frame(table(data.train$Item_Type)), main="Frequency Distribution of Item_Type",xlab="Item_Type")
data.frame(table(data.train$Outlet_Identifier))
plot(data.frame(table(data.train$Outlet_Identifier)), main="Frequency Distribution of Outlet_Identifier",xlab="Outlet_Identifier")
data.frame(table(data.train$Outlet_Size))
plot(data.frame(table(data.train$Outlet_Size)), main="Frequency Distribution of Outlet_Size",xlab="Outlet_Size")
data.frame(table(data.train$Outlet_Location_Type))
plot(data.frame(table(data.train$Outlet_Location_Type)), main="Frequency Distribution of Outlet_Location_Type",xlab="Outlet_Location_Type")
data.frame(table(data.train$Outlet_Type))
plot(data.frame(table(data.train$Outlet_Type)), main="Frequency Distribution of Outlet_Type",xlab="Outlet_Type")
data.frame(table(data.train$Outlet_Establishment_Year))
plot(data.frame(table(data.train$Outlet_Establishment_Year)), main="Frequency Distribution of Outlet_Establishment_Year",xlab="Outlet_Establishment_Year")
# Numerical Variabes
summary(data.train$Item_Weight)
hist(data.train$Item_Weight)
summary(data.train$Item_Visibility)
hist(data.train$Item_Visibility)
summary(data.train$Item_MRP)
hist(data.train$Item_MRP)
summary(data.train$Item_Outlet_Sales)
hist(data.train$Item_Outlet_Sales)
pie(table((data.train$Outlet_Size)),main = "Analysis of Missing Values in Outlet_Size")
pie(table(is.na(data.train$Item_Weight)),main = "Analysis of Missing Values in Item_Weight")
# Step-9 : Making Inference and Hypothesis
# 1. Low fat food is being purchased more compare to the regular fat foods
# 2. Food products like Fruits and Vegitables, snaks have higher sale; Households, canned, dairy and baking good have average sales and others are bought even less
# 3. OUT010 and OUT019 have lowest sale compare to others
# 4. Big mart owns Small and medium sized outlets more when comapre to High size outlet
# 5. Big mart outlets are situated more more in Tier3 and Tier2 locations when compare to Tier1 regions
# 6. Other than 1997, we could see a constant sale obtained in all years till
# 7. Item weight has a normal distribution, which means product of all weight are available in store at equal proportion, it not just the whole sale which is happening in store
# 8. Product visibility is sckewed to right, stores have more of small display area for product more and interestingly there is a size 0 which can be even online sold product
# 9. MRP of the product is also quite normally distributed, which means product of all price range from $31 to $266 is available in store in eqal proportion, so it target all kind of customers for its sales
# 10. Total sale revenue is skewed to right, meaning store constantly generate revenue of range $800 to $3000 in each of its outlet mostly
# Hypothesis: Groceries like fruit, vegetables and snkacks with low fat content with minimum product visibility in a small and medium sized outlet situated in Tire-3 and Tier-2 region should have a comparitively good sale excluding the outlets OUT010 and OUT019.
# Step-10 : Basic Model Building
model1 <- lm(Item_Outlet_Sales~Item_Fat_Content+Item_Type+Outlet_Identifier+Outlet_Establishment_Year+Outlet_Size+Outlet_Location_Type+Outlet_Type+Item_Weight+Item_Visibility+Item_MRP,data = data.train)
cor_var1 <- data.frame(data.train$Item_Weight,data.train$Item_Visibility,data.train$Item_MRP)
cor(cor_var1) # No significant correlation exists with all numerical variabels available
summary(model1) # <- model-1 explains 0.5657 of sales variance, having Item_Fat_Content, Outlet_Identifier and Item_MRP as a significant variables
# Item_Outlet_Sales ~ Item_Fat_Content + Outlet_Identifier + Item_MRP
# Step-11 : Model Building using stepwise algorithm
model2_stepwise <- step(model1, direction = "backward")
summary(model2_stepwise) # <- explains 0.566 of sales variance
# Item_Outlet_Sales ~ Outlet_Identifier + Item_MRP
# Step-12: Residual Analysis
par(mfrow=c(4,2))
par(mar = rep(2, 4))
plot(model2_stepwise)
sd(data.train$Item_Outlet_Sales)
residual <- rstandard(model2_stepwise)
hist(residual) # Residual seems normally distributed
# Could observe some heteroscadastic behavious in residual plot, we can try for some transformation
# Step-13: Transformation
# Doing log transformation on dependent variable
model3_transformed <- lm(log(Item_Outlet_Sales)~Item_Fat_Content+Item_Type+Outlet_Identifier+Outlet_Establishment_Year+Outlet_Size+Outlet_Location_Type+Outlet_Type+Item_Weight+Item_Visibility+Item_MRP,data = data.train)
summary(model3_transformed)
# Adj R^2 is 0.7241
par(mfrow=c(4,2))
par(mar = rep(2, 4))
plot(model3_transformed)
residual_af_tranformation <- rstandard(model3_transformed)
hist(residual_af_tranformation)
# Step-14: Outlier Check and Influential Point Check
# computing studentized residual for outlier check
n_sample_size <- nrow(data.train)
studentized.residuals <- studres(model3_transformed)
#cat("Complete list of Studentized Residual::::","\n")
#print(studentized.residuals)
for(i in c(1:n_sample_size)){
if(studentized.residuals[i] < -3 || studentized.residuals[i] > 3){
cat("Validate these values for outliers:::",studentized.residuals[i],"at observation",i,"\n")
}
}
# Influential Points
hhat.model <- lm.influence(model3_transformed)$hat
n_sample_size <- nrow(data.train)
p_beta <- length(model3_transformed$coefficients) +1
#cat("Complete list of HHat Values::::","\n")
#print(hhat.model)
hhat.cutoff <- (2*p_beta)/n_sample_size
cat("Looking for values more than cut off::::",hhat.cutoff,"\n")
for(i in c(1:n_sample_size)){
if(hhat.model[i] > hhat.cutoff){
cat("Validate these values for Influential points:::",hhat.model[i],"at observation",i,"\n")
}
}
# we see only observation 831 as both outlier and influential point, so trying to remove it
data.train.treated <- data.train[-c(831),]
model3_transformed_treated <- lm(log(Item_Outlet_Sales)~Item_Fat_Content+Item_Type+Outlet_Identifier+Outlet_Establishment_Year+Outlet_Size+Outlet_Location_Type+Outlet_Type+Item_Weight+Item_Visibility+Item_MRP,data = data.train.treated)
summary(model3_transformed_treated)
# removing the outlier impoves the Adj R-square very significantly
# Ste-15: Model validation for Multicollinearity
# vif(model3_transformed) # No aliased coefficient in the model
# Step-16: Computing the standardized coefficient
#data.train.std <- sapply(data.train[,],FUN=scale)
#data.train.std <- data.frame(data.train)
#model3_transformed.std <- lm(log(Item_Outlet_Sales)~Item_Fat_Content+Item_Type+Outlet_Identifier+Outlet_Establishment_Year+Outlet_Size+Outlet_Location_Type+Outlet_Type+Item_Weight+Item_Visibility+Item_MRP, data = data.train)
#summary(model3_transformed.std)
#since most of the variables are factorial in nature, there is no need of standardizing the value
# Step-17: Model Validation
FINAL_MODEL <- lm(log(Item_Outlet_Sales) ~ Outlet_Identifier + Item_MRP, data = data.train)
final_summary <- summary(FINAL_MODEL); final_summary # adj r-square is 72.41%
str(data.test)
COUNT_PREDICTED <- predict(FINAL_MODEL,data.test)
plot(COUNT_PREDICTED,data.test$Item_Outlet_Sales,lwd=2, cex=2, col="red")
COUNT_PREDICTED_RE_TRANSFORMED <- exp(COUNT_PREDICTED)
plot(COUNT_PREDICTED_RE_TRANSFORMED,data.test$count,lwd=2, cex=2, col="green")
abline(0,1,col='red', lwd=2)
# Step-18: Prediction
# Prediction Interval
pred_Int <- predict(FINAL_MODEL,data.test,interval = "predict")
conf_Int <- predict(FINAL_MODEL,data.test,interval = "confidence")
converted_pred_int <- exp(pred_Int)
converted_conf_int <- exp(conf_Int)
data.test$predicted_count <- converted_pred_int[,1]
data.test$prediction_interval_low <- converted_pred_int[,2]
data.test$prediction_interval_high <- converted_pred_int[,3]
data.test$confidence_interval_low <- converted_conf_int[,2]
data.test$confidence_interval_high <- converted_conf_int[,3]
data.prediction.result <- data.frame(data.test$Item_Outlet_Sales,data.test$predicted_count,data.test$prediction_interval_low,data.test$prediction_interval_high,data.test$confidence_interval_low,data.test$confidence_interval_high)
View(data.prediction.result)
data.test$predicted_count <- NULL
data.test$prediction_interval_low <- NULL
data.test$prediction_interval_high <- NULL
data.test$confidence_interval_low <- NULL
data.test$confidence_interval_high <- NULL
|
74b798b4a8f33d5a95798ba1c9ed3ced3217008a | 7d39123774a9a4b651d9a6f0cebf2a10ccb29288 | /man/footprint.Rd | 577a0925c75bf5178217ed6654a031194a486ffa | [
"MIT"
] | permissive | ThomasDCY/INFIMA | 3c858b6d5657373db58435d3dfff62ff2967bdc4 | f1ebe80eb906a6ddfd7b263aec0275347e6b6c69 | refs/heads/master | 2023-04-15T07:42:29.792045 | 2021-07-13T14:01:12 | 2021-07-13T14:01:12 | 258,241,256 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 336 | rd | footprint.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_description.R
\docType{data}
\name{footprint}
\alias{footprint}
\title{footprint}
\format{a vector, the dimension of which equals to the number of local-ATAC-MVs}
\description{
the footprint vector
}
\author{
Chenyang Dong \email{cdong@stat.wisc.edu}
}
|
a1ac346ce1a5e56adc4e70736a5426640313d644 | cb4d4f8cb3f16d8ca733772ee699088934236873 | /man/ridl_resource_patch.Rd | d0cea0c95265a37aea0ab63dda5d7c8e670c7114 | [
"MIT"
] | permissive | UNHCRmdl/ridl-1 | 9d59d22feca4184ac845f1ca6df9babf92bb40e0 | fb48ac36a5dfce43467bb50d66e1f7b491636454 | refs/heads/master | 2023-03-15T05:13:02.413607 | 2021-03-23T17:28:52 | 2021-03-23T17:28:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 586 | rd | ridl_resource_patch.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resource.R
\name{ridl_resource_patch}
\alias{ridl_resource_patch}
\title{Patch a resource on RIDL}
\usage{
ridl_resource_patch(resource, file_path, dataset_id, configuration = NULL)
}
\arguments{
\item{resource}{RIDLResource, a resource object}
\item{file_path}{character, the path to the file to upload}
\item{dataset_id}{character, the id or the name of the RIDLDataset}
\item{configuration}{RIDLConfig, the configuration}
}
\value{
RIDLResource, the resource
}
\description{
Patch a resource on RIDL
}
|
e9bad22963e669be26701300bde640b059773355 | 52377bc6bf76d313ecfd32254b47aee9777599de | /shiny.R | 3e4e8ffab3ae21d0844c972dd611042013fb0fab | [] | no_license | shivapal/EDAproj | be4559453dad340f594bfdc6e688c688bd25381f | 87bb63a0bb57bf23b0fef80d77004f45b3c91dfc | refs/heads/master | 2020-04-25T20:03:48.674808 | 2019-03-28T20:00:41 | 2019-03-28T20:00:41 | 173,042,043 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,297 | r | shiny.R |
library(shiny)
library("dplyr")
library("ggmap")
library("maptools")
library(maps)
library(ggplot2)
register_google(key = 'AIzaSyAzCWRarLpXhmd9Dx05XgXsdZetHXCVAog')
ui <- fluidPage(
titlePanel("Map"),
sidebarPanel(
selectInput("variable", "Compare Maps:",
c("CDC vs #flu & #fluszn"= "all", "CDC vs #flu" = "flu", "CDC vs #fluszn"= "fluszn"))),
mainPanel(
textOutput("caption"),
imageOutput("image1"),
imageOutput("image2")
)
)
server <- function(input,output){
output$image1 <-renderImage(list(src="usData.png",contentType="image/png"),deleteFile = FALSE)
output$image2 <- renderImage({
if (input$variable == "all") {
return(list(
src = "problem6Plot2.png",
fileType = "image/png"
))
} else if (input$variable == "flu") {
return(list(
src = "usData.png",
filetype = "image/png"
))
} else if (input$variable == "problem6Plot1.png") {
return(list(
src = "problem6Plot1.png",
filetype = "image/png"
))
}
}, deleteFile = FALSE)
output$caption <- renderText(
if (input$variable == "all") {"all"
} else if (input$variable == "flu") {"flu"
} else if (input$variable == "problem6Plot1.png") {"fluszn"
}
)
}
shinyApp(ui,server) |
75fda4b9e8d0a9f5e7d74d46ab7d2c02432f4edf | 52595664b4574420a27f5c32a9e5051881f25824 | /man/optifix.Rd | 75aeaa737b176cedd06692beeb5390add3b6e69c | [] | no_license | ick003/GPspt | 0bc6f54d626505759100f1bc8395eb4d1490a25d | d103992df60daa4161cc2fef9b6010cd0e9f8d00 | refs/heads/master | 2022-04-25T17:33:48.257316 | 2020-03-13T03:32:02 | 2020-03-13T03:32:02 | 72,394,880 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 996 | rd | optifix.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{optifix}
\alias{optifix}
\title{Optimise with fixed parameters}
\usage{
optifix(
par,
fixed,
fn,
gr = NULL,
...,
method = c("Nelder-Mead", "BFGS", "CG", "L-BFGS-B", "SANN"),
lower = -Inf,
upper = Inf,
control = list(),
hessian = FALSE
)
}
\arguments{
\item{par}{paramters}
\item{fixed}{boolean}
\item{fn}{function}
\item{gr}{gradient of the function}
\item{...}{additional parameters related to the fn and gr functions.}
\item{method}{optimization method}
\item{lower}{lower bounds.}
\item{upper}{upper bounds.}
\item{control}{list of controls.}
\item{hessian}{boolean. Return the Hessian.}
}
\description{
Optimise with fixed parameters
}
\examples{
fn = function(t){(2 - t[1] + t[2])^2 + 2*(3 - t[3])^4}
th = c(1,1,1)
opt = optifix(th,fixed = c(TRUE, FALSE, FALSE), fn = fn, method = "Nelder-Mead")
}
\keyword{fixed}
\keyword{optimization}
\keyword{values.}
\keyword{with}
|
2ed060d5e707d20e2bbd52f78346400b7a0a5755 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Gent-Rowley/Connect5/cf_5_6x6_r_/cf_5_6x6_r_.R | 27273e913caf88559b6a25feee439bc2e60706ee | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 64 | r | cf_5_6x6_r_.R | 056abd3573e95322846f430e898fef74 cf_5_6x6_r_.qdimacs 97636 42339 |
fbb9219332b75f79babe858fba609a1e42d9460d | 65cb1fe25385cd10a159f4d7b6e3716cae92197d | /man/grp_mean.Rd | d4af06ccb7de613266dde7712548a484844a2bdb | [
"MIT"
] | permissive | tbonza/supml | 46f795838442b22e56f2a82c01501a235dd336b6 | d64dcd3216a4abdec8cb04442e7baa92ab4fd507 | refs/heads/master | 2020-04-10T09:42:03.006011 | 2018-12-13T17:19:45 | 2018-12-13T17:19:45 | 160,944,681 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 288 | rd | grp_mean.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/discriminant.R
\name{grp_mean}
\alias{grp_mean}
\title{Mean of each predictor attribute for each class K}
\usage{
grp_mean(indices, predictors)
}
\description{
See HT, Elements of Statistical Learning (4.10)
}
|
37578739d6e62497505f478b7e4e4e91e8e32a7f | b1039c40b1a579fdee1c36f80bb525bdfa6a00fe | /man/DGMcrf-package.Rd | e8e80bcd41f97ba0a7016221f587aff93bb7b53c | [] | no_license | bensonkenduiywo/DGMcrf | 3060bec293a3e0e19337381a97ce963a720dbfa3 | 5e9a8564f5c87ea953bedeb214e7b0f44aa03e8b | refs/heads/master | 2020-07-21T06:43:28.236608 | 2019-09-06T11:25:35 | 2019-09-06T11:25:35 | 206,772,603 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 464 | rd | DGMcrf-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DGMcrf-package.R
\docType{package}
\name{DGMcrf-package}
\alias{DGMcrf}
\alias{DGMcrf-package}
\title{DGMcrf: Directed Graphical Models: Conditional Random Fields}
\description{
USes directed graphicals models to model spatial depencies in image classification.
}
\author{
\strong{Maintainer}: Benson Kenduiywo \email{bensonkemboi@gmail.com} (0000-0002-8448-0499)
}
\keyword{internal}
|
661a0cc4f149be00d729abced580d00358f7996d | 531c16f88b06811084f57fb1dcb3a501e96cb2af | /Part4/sec1_2_network.R | 23ee97d053f9cbcc80779df3655463afbc71b49b | [] | no_license | noc1992/R_Data_Analysis | 2c84152deb264eefaa2a1965b3507aaa8c852249 | d73e9c521eade5d5b50c7cb0852ee8ffc898925d | refs/heads/master | 2020-06-05T04:06:55.971142 | 2019-11-18T01:42:07 | 2019-11-18T01:42:07 | 189,162,866 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,723 | r | sec1_2_network.R | install.packages("igraph")
library(igraph)
g1 <- graph(c(1,2, 2,3, 2,4, 1,5, 5,5, 3,6))
plot(g1)
str(g1)
name <- c('seo','il','kim','son','no','li','yu','sin','kang','kwang','jung')
pemp <- c('seo','seo','il','kim','kim','seo','li','yu','seo','kang','kwang')
emp <- data.frame(na = name, suna = pemp)
emp
g <- graph.data.frame(emp,directed=T)
plot(g, layout=layout.fruchterman.reingold,vertex.size=8,edge.arrow.size=0.5)
dev.off()
install.packages("devtools")
install.packages("d3Network")
library(d3Network)
install.packages("devtools")
install.packages('d3Network')
install.packages("RCurl")
library(d3Network)
library(RCurl)
library(devtools)
library(igraph)
name <- c('Angela Bassett','Jessica Lange','Winona Ryder','Michelle Pfeiffer',
'Whoopi Goldberg','Emma Thompson','Julia Roberts','Sharon Stone','Meryl Streep',
'Susan Sarandon','Nicole Kidman')
pemp <- c('Angela Bassett','Angela Bassett','Jessica Lange','Winona Ryder','Winona Ryder',
'Angela Bassett','Emma Thompson', 'Julia Roberts','Angela Bassett',
'Meryl Streep','Susan Sarandon')
emp <- data.frame(이름=name,상사이름=pemp)
d3SimpleNetwork(emp, width=600, height=600, file="graph/d3.html")
g <- read.csv("data/군집분석.csv", head= T )
graph <- data.frame(학생=g$학생,교수=g$교수)
g <- graph.data.frame(graph,directed = T)
plot(g, layout=layout.fruchterman.reingold, vertex.size=2 , edge.arrow.size=0.5,
vertex.color = "green",vertex.label=NA)
plot(g, layout=layout.kamada.kawai, vertex.size=2 , edge.arrow.size=0.5,
vertex.color = "green",vertex.label=NA)
library(stringr)
g <- read.csv("data/군집분석.csv", head= T )
graph <- data.frame(학생=g$학생,교수=g$교수)
g <- graph.data.frame(graph,directed = T)
V(g)$name
gubun1 <- V(g)$name
gubun1
gubun <- str_sub(gubun1,start=1,end=1)
gubun
colors <- c()
for ( i in length(gubun)){
if (gubun[i] =='S') {
colors <- c(colors,'red')
}
else {
colors <- c(colors,'green')
}
}
sizes <- c()
for (i in 1:length(gubun)){
if (gubun[i] =='S') {
sizes <- c(sizes,2)
}
else {
sizes <- c(sizes,6)
}
}
plot(g, layout=layout.fruchterman.reingold,vertex.size=sizes,edge.arrow.size=0.1,vertex.color=colors,vertex.label=NA)
savePlot("graph/군집_색상크기조절_1.png", type="png")
shapes <- c()
for ( i in length(gubun)){
if (gubun[i] =='S') {
shapes <- c(shapes,'circle')
}
else {
shapes <- c(shapes,'square')
}
}
plot(g, layout=layout.kamada.kawai,vertex.size=sizes,edge.arrow.size=0,vertex.color=colors,vertex.label=NA,vertex.shape= shapes)
virus1 <- read.csv("data/메르스전염현황.csv",header=T)
d3SimpleNetwork(virus1,width = 1000,height = 1000,file="graph/mers.html")
|
3de263217ff219fadd948b54d9bad8201be73ff8 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.media.services/R/mediapackage_service.R | 1ede23d76de76a59cff3ebe3af4dca92c2405c32 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | false | 4,197 | r | mediapackage_service.R | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config
NULL
#' AWS Elemental MediaPackage
#'
#' @description
#' AWS Elemental MediaPackage
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#'
#' @section Service syntax:
#' ```
#' svc <- mediapackage(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string"
#' ),
#' endpoint = "string",
#' region = "string"
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- mediapackage()
#' svc$configure_logs(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=mediapackage_configure_logs]{configure_logs} \tab Changes the Channel's properities to configure log subscription\cr
#' \link[=mediapackage_create_channel]{create_channel} \tab Creates a new Channel\cr
#' \link[=mediapackage_create_harvest_job]{create_harvest_job} \tab Creates a new HarvestJob record\cr
#' \link[=mediapackage_create_origin_endpoint]{create_origin_endpoint} \tab Creates a new OriginEndpoint record\cr
#' \link[=mediapackage_delete_channel]{delete_channel} \tab Deletes an existing Channel\cr
#' \link[=mediapackage_delete_origin_endpoint]{delete_origin_endpoint} \tab Deletes an existing OriginEndpoint\cr
#' \link[=mediapackage_describe_channel]{describe_channel} \tab Gets details about a Channel\cr
#' \link[=mediapackage_describe_harvest_job]{describe_harvest_job} \tab Gets details about an existing HarvestJob\cr
#' \link[=mediapackage_describe_origin_endpoint]{describe_origin_endpoint} \tab Gets details about an existing OriginEndpoint\cr
#' \link[=mediapackage_list_channels]{list_channels} \tab Returns a collection of Channels\cr
#' \link[=mediapackage_list_harvest_jobs]{list_harvest_jobs} \tab Returns a collection of HarvestJob records\cr
#' \link[=mediapackage_list_origin_endpoints]{list_origin_endpoints} \tab Returns a collection of OriginEndpoint records\cr
#' \link[=mediapackage_list_tags_for_resource]{list_tags_for_resource} \tab List tags for resource\cr
#' \link[=mediapackage_rotate_channel_credentials]{rotate_channel_credentials} \tab Changes the Channel's first IngestEndpoint's username and password\cr
#' \link[=mediapackage_rotate_ingest_endpoint_credentials]{rotate_ingest_endpoint_credentials} \tab Rotate the IngestEndpoint's username and password, as specified by the IngestEndpoint's id\cr
#' \link[=mediapackage_tag_resource]{tag_resource} \tab Tag resource\cr
#' \link[=mediapackage_untag_resource]{untag_resource} \tab Untag resource\cr
#' \link[=mediapackage_update_channel]{update_channel} \tab Updates an existing Channel\cr
#' \link[=mediapackage_update_origin_endpoint]{update_origin_endpoint} \tab Updates an existing OriginEndpoint
#' }
#'
#' @return
#' A client for the service. You can call the service's operations using
#' syntax like `svc$operation(...)`, where `svc` is the name you've assigned
#' to the client. The available operations are listed in the
#' Operations section.
#'
#' @rdname mediapackage
#' @export
mediapackage <- function(config = list()) {
svc <- .mediapackage$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.mediapackage <- list()
.mediapackage$operations <- list()
.mediapackage$metadata <- list(
service_name = "mediapackage",
endpoints = list("*" = list(endpoint = "mediapackage.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "mediapackage.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "mediapackage.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "mediapackage.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "MediaPackage",
api_version = "2017-10-12",
signing_name = "mediapackage",
json_version = "1.1",
target_prefix = ""
)
.mediapackage$service <- function(config = list()) {
handlers <- new_handlers("restjson", "v4")
new_service(.mediapackage$metadata, handlers, config)
}
|
2b7c88d835f88c50895e8b03c2d937b2f7c1d757 | 850406eebc34d582fe8603a9ed79b6bcf613f132 | /h2o.predict_json.Rd | 7db88f1675596f9cdb998618cf2d38b11523168d | [
"MIT"
] | permissive | sanjaybasu/sdh_t2dm | 7652a7452cc271f345a9bf9303d77f1bf3efa73c | 5b1d5752a056898397bff7e51c6e1aa180feccc2 | refs/heads/master | 2020-03-29T04:00:35.481658 | 2019-04-19T23:11:19 | 2019-04-19T23:11:19 | 149,511,195 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,121 | rd | h2o.predict_json.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.R
\name{h2o.predict_json}
\alias{h2o.predict_json}
\title{H2O Prediction from R without having H2O running}
\usage{
h2o.predict_json(model, json, genmodelpath, labels, classpath, javaoptions)
}
\arguments{
\item{model}{String with file name of MOJO or POJO Jar}
\item{json}{JSON String with inputs to model}
\item{genmodelpath}{(Optional) path name to h2o-genmodel.jar, if not set defaults to same dir as MOJO}
\item{labels}{(Optional) if TRUE then show output labels in result}
\item{classpath}{(Optional) Extra items for the class path of where to look for Java classes, e.g., h2o-genmodel.jar}
\item{javaoptions}{(Optional) Java options string, default if "-Xmx4g"}
}
\value{
Returns an object with the prediction result
}
\description{
Provides the method h2o.predict with which you can predict a MOJO or POJO Jar model
from R.
}
\examples{
\donttest{
library(h2o)
h2o.predict_json('~/GBM_model_python_1473313897851_6.zip', '{"C7":1}')
h2o.predict_json('~/GBM_model_python_1473313897851_6.zip', '{"C7":1}', c(".", "lib"))
}
}
|
535e40ed7a285332715ef1753de471b3fc885170 | fe872a4ad8d46e7df60dd19617fb14e988f07ed8 | /R/utils-proj.R | 89299f6cce7ae50e1739b42a3d7ff15b4f914816 | [
"MIT"
] | permissive | chaudhary-amit/acblm | 638aa75273f6f4522279634e67b3b831036d0a03 | b6aa44163c1f2782becbbef6b6f71d5fe4b85f62 | refs/heads/master | 2023-04-25T06:25:22.124853 | 2021-05-18T15:49:43 | 2021-05-18T15:49:43 | 368,360,787 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,741 | r | utils-proj.R | #' Generate a linear projection decomposition for the model
#' with continuous worker hetergoneity
#'
#' @export
lin.proja <- function(sdata,y_col="y",k_col="k",j_col="j",do_interacted_reg=1) {
rr = list()
sdata2 = copy(data.table(sdata))
sdata2[,y_imp := get(y_col)]
sdata2[,k_imp := get(k_col)]
sdata2[,j := get(j_col)]
fit = lm(y_imp ~ k_imp + factor(j),sdata2)
sdata2$res = residuals(fit)
pred = predict(fit,type = "terms")
sdata2$k_hat = pred[,1]
sdata2$l_hat = pred[,2]
rr$cc = sdata2[,cov.wt( data.frame(y_imp,k_hat,l_hat,res))$cov]
rr$rsq1 = summary(fit)$r.squared
if (do_interacted_reg==1) {
fit2 = lm(y_imp ~ 0+ k_imp:factor(j) + factor(j),sdata2)
rr$rsq2 = 1-mean(resid(fit2)^2)/var(sdata2$y_imp)
} else {
rr$rsq2=NA
}
get.stats <- function(cc) {
r=list()
den = cc[2,2] + cc[3,3] + 2 * cc[2,3]
r$cor_kl = round(cc[2,3]/sqrt(cc[2,2]*cc[3,3]),4)
r$cov_kl = 2*round(cc[2,3]/den,4)
r$var_k = round(cc[2,2]/den,4)
r$var_l = round(cc[3,3]/den,4)
r$rsq = round((cc[1,1] - cc[4,4])/cc[1,1],4)
return(r)
}
rr$stats = get.stats(rr$cc)
#print(data.frame(rr$stats))
rr$NNs = sdata[,.N,j1][order(j1)][,N]
return(rr)
}
#' @export
lin.proj.three <- function(sdata,y_col="y",k_col="k",j_col="j",m_col="m",usex=FALSE,do.unc=TRUE) {
rr = list()
sdata2 = copy(data.table(sdata))
sdata2[,y_imp := get(y_col)]
sdata2[,k_imp := get(k_col)]
sdata2[,j := get(j_col)]
sdata2[,m := get(m_col)]
#browser()
fit = lm(y_imp ~ factor(k_imp) + factor(j) + factor(m), sdata2)
sdata2$res = residuals(fit)
pred = predict(fit,type = "terms")
sdata2$k_hat = pred[,1]
sdata2$l_hat = pred[,2]
sdata2$m_hat = pred[,3]
rr$cc = sdata2[,cov.wt( data.frame(y_imp,k_hat,l_hat,m_hat,res))$cov]
rr$rsq1 = summary(fit)$r.squared
if (do.unc) {
fit2 = lm(y_imp ~factor(k_imp) * factor(j) * factor(m),sdata2)
rr$rsq2 = summary(fit2)$r.squared
}
get.stats <- function(cc) {
r=list()
den = cc[2,2] + cc[3,3] + cc[4,4] + 2 * cc[2,3] + 2 * cc[2,4] + 2 * cc[3,4]
r$cor_kl = round(cc[2,3]/sqrt(cc[2,2]*cc[3,3]),4)
r$cor_km = round(cc[2,4]/sqrt(cc[2,2]*cc[4,4]),4)
r$cor_lm = round(cc[3,4]/sqrt(cc[3,3]*cc[4,4]),4)
r$cov_kl = 2*round(cc[2,3]/den,4)
r$cov_km = 2*round(cc[2,4]/den,4)
r$cov_lm = 2*round(cc[3,4]/den,4)
r$var_k = round(cc[2,2]/den,4)
r$var_l = round(cc[3,3]/den,4)
r$var_m = round(cc[4,4]/den,4)
r$rsq = round((cc[1,1] - cc[5,5])/cc[1,1],4)
return(r)
}
rr$stats = get.stats(rr$cc)
rr$NNs = sdata2[,.N,j][order(j)][,N]
print(data.frame(rr$stats))
return(rr)
}
#' @export
lin.proj <- function(sdata,y_col="y",k_col="k",j_col="j",usex=FALSE,do.unc=TRUE) {
rr = list()
sdata2 = copy(data.table(sdata))
sdata2[,y_imp := get(y_col)]
sdata2[,k_imp := get(k_col)]
sdata2[,j := get(j_col)]
fit = lm(y_imp ~ factor(k_imp) + factor(j),sdata2)
sdata2$res = residuals(fit)
pred = predict(fit,type = "terms")
sdata2$k_hat = pred[,1]
sdata2$l_hat = pred[,2]
rr$cc = sdata2[,cov.wt( data.frame(y_imp,k_hat,l_hat,res))$cov]
rr$rsq1 = summary(fit)$r.squared
if (do.unc) {
fit2 = lm(y_imp ~factor(k_imp) * factor(j),sdata2)
rr$rsq2 = summary(fit2)$r.squared
}
get.stats <- function(cc) {
r=list()
den = cc[2,2] + cc[3,3] + 2 * cc[2,3]
r$cor_kl = round(cc[2,3]/sqrt(cc[2,2]*cc[3,3]),4)
r$cov_kl = 2*round(cc[2,3]/den,4)
r$var_k = round(cc[2,2]/den,4)
r$var_l = round(cc[3,3]/den,4)
r$rsq = round((cc[1,1] - cc[4,4])/cc[1,1],4)
return(r)
}
rr$stats = get.stats(rr$cc)
rr$NNs = sdata2[,.N,j][order(j)][,N]
#print(data.frame(rr$stats))
return(rr)
}
#' Computes the linear projection using X
#' @export
lin.projx <- function(sdata,y_col="y",k_col="k",j_col="j") {
rr = list()
sdata2 = copy(data.table(sdata))
sdata2[,y_imp := get(y_col)]
sdata2[,k_imp := get(k_col)]
sdata2[,j := get(j_col)]
fit = lm(y_imp ~ factor(k_imp) + factor(j) +factor(x),sdata2)
sdata2$res = residuals(fit)
pred = predict(fit,type = "terms")
sdata2$k_hat = pred[,1]
sdata2$l_hat = pred[,2]
sdata2$x_hat = pred[,3]
rr$cc = sdata2[,cov.wt( data.frame(y_imp,k_hat,l_hat,res,x_hat))$cov]
fit2 = lm(y_imp ~factor(k_imp) * factor(j),sdata2)
rr$rsq1 = summary(fit)$r.squared
rr$rsq2 = summary(fit2)$r.squared
get.stats <- function(cc) {
r=list()
den = cc[2,2] + cc[3,3] + 2 * cc[2,3]
r$cor_kl = round(cc[2,3]/sqrt(cc[2,2]*cc[3,3]),4)
r$cov_kl = 2*round(cc[2,3]/den,4)
r$var_k = round(cc[2,2]/den,4)
r$var_l = round(cc[3,3]/den,4)
r$rsq = round((cc[1,1] - cc[4,4])/cc[1,1],4)
return(r)
}
rr$stats = get.stats(rr$cc)
print(data.frame(rr$stats))
return(rr)
}
#' Computes the linear projection using X
#' @export
lin.projax <- function(sdata,y_col="y",k_col="k",j_col="j") {
rr = list()
sdata2 = copy(data.table(sdata))
sdata2[,y_imp := get(y_col)]
sdata2[,k_imp := get(k_col)]
sdata2[,j := get(j_col)]
fit = lm(y_imp ~ k_imp + factor(j) +factor(x),sdata2)
sdata2$res = residuals(fit)
pred = predict(fit,type = "terms")
sdata2$k_hat = pred[,1]
sdata2$l_hat = pred[,2]
sdata2$x_hat = pred[,3]
rr$cc = sdata2[,cov.wt( data.frame(y_imp,k_hat,l_hat,res,x_hat))$cov]
rr$rsq1 = summary(fit)$r.squared
get.stats <- function(cc) {
r=list()
den = cc[2,2] + cc[3,3] + 2 * cc[2,3]
r$cor_kl = round(cc[2,3]/sqrt(cc[2,2]*cc[3,3]),4)
r$cov_kl = 2*round(cc[2,3]/den,4)
r$var_k = round(cc[2,2]/den,4)
r$var_l = round(cc[3,3]/den,4)
r$rsq = round((cc[1,1] - cc[4,4])/cc[1,1],4)
return(r)
}
rr$stats = get.stats(rr$cc)
print(data.frame(rr$stats))
return(rr)
}
|
b4d52399fe3909801537ed684ee9d56d63ec45d1 | e93d573e0a6d8fdf22bab955684c66cbda6cd1dd | /test/test_dirichlet_mle.R | b050b91e682ccda68313c7c5f4845afefb1745e0 | [] | no_license | eric-f/mixedWarpedCurves2 | ecfe08ee25e8fad62fa3923cd08b3a0c5f8e92ef | f8c496beb96f2f8520859adc23d35609972254be | refs/heads/master | 2020-03-14T10:02:00.327450 | 2018-05-16T19:17:33 | 2018-05-16T19:17:33 | 131,557,746 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,164 | r | test_dirichlet_mle.R | library(gtools)
library(mixedWarpedCurves2)
library(ggplot2)
kappa0 <- c(1,2,2,1)
kappa0 <- kappa0 / sum(kappa0)
kappa1 <- c(0.5,1,1,0.5)
kappa1 <- kappa1 / sum(kappa1)
kappa2 <- c(1.2,1,1,1.2)
kappa2 <- kappa2 / sum(kappa2)
kappa3 <- c(0.5,1,2,1)
kappa3 <- kappa3 / sum(kappa3)
kappa4 <- c(1,1,0.5,0.5)
kappa4 <- kappa4 / sum(kappa4)
nsim <- 20
ns <- c(100, 1000, 5000)
alpha <- array(NA, c(length(ns), 4, nsim))
sd_alpha <- array(NA, c(length(ns), 4, nsim))
for(j in seq(along=ns))
for(i in 1:nsim){
dat0 <- sim_warping_mixture(ns[j], rep(1, 1),
rbind(kappa1),
ni = 1,
tau = 2,
mu_sh = -25, mu_sc = 500,
sd_sh = 10, sd_sc=50, sd_err = 10)
w <- attr(dat0, "w")
w_lst <- apply(w, 2, function(x){list(w=x)})
out <- try(dirichlet_mle(w_lst))
(out$alpha)
alpha[j,,i] <- out$alpha
sd_alpha[j,,i] <- sqrt(diag(solve(-out$hessian)))
}
(mean_alpha <- apply(alpha, c(1, 2), mean))
(emp_sd_alpha <- apply(alpha, c(1, 2), sd))
(mod_sd_alpha <- apply(sd_alpha, c(1, 2), mean))
|
de973586d5602b8c8fce0a23a6da633cc479e9bd | 19f597b79290d1f71c57a790d7565c0ca30ea2b2 | /plot4.R | 0fb27466ab9609bc311a50b9e2abca6956fba21d | [] | no_license | mpadmana/ExData_Plotting1 | acf35b878b64461b1e00d7323e18ed4b93df147c | 4e8eff81792f87414829fd3d35f79166543691b9 | refs/heads/master | 2021-01-18T00:15:22.865224 | 2015-01-11T06:58:25 | 2015-01-11T06:58:25 | 29,079,011 | 0 | 0 | null | 2015-01-11T02:39:51 | 2015-01-11T02:39:51 | null | UTF-8 | R | false | false | 1,809 | r | plot4.R | rowsInEachIter=100000
dataset <- read.table("household_power_consumption.txt", header = TRUE, sep=";", nrows = rowsInEachIter, na.strings = "?")
columns<-colnames(dataset)
dataset$DateToFilter <- as.Date(dataset$Date, format="%d/%m/%Y")
get.rows <- dataset$DateToFilter == as.Date("2007-02-01") | dataset$DateToFilter == as.Date("2007-02-02")
dataset<-dataset[get.rows,]
iter=1
repeat{
fromRow=iter*rowsInEachIter
datasetTemp<-read.table("household_power_consumption.txt", header = TRUE, sep=";", skip=fromRow, nrows=rowsInEachIter, na.strings = "?", col.names = columns)
datasetTemp$DateToFilter <- as.Date(datasetTemp$Date, format="%d/%m/%Y")
get.rows <- datasetTemp$DateToFilter == as.Date("2007-02-01") | datasetTemp$DateToFilter == as.Date("2007-02-02")
dataset<-rbind(dataset, datasetTemp[get.rows,])
if (nrow(datasetTemp) < rowsInEachIter){
break
}
rm(datasetTemp)
iter=iter+1
}
png(filename = "plot4.png", width = 480, height = 480, units = "px")
na.omit(dataset)
par(mfrow = c(2,2))
dataset$DateTime <- as.POSIXct(paste(dataset$Date, dataset$Time), format="%d/%m/%Y %H:%M:%S")
plot(dataset$DateTime, dataset$Global_active_power, ylab = "Global Active Power (kilowatts)", xlab = " ", type = "l")
plot(dataset$DateTime, dataset$Voltage, ylab = "Voltage", xlab = "datetime", type = "l")
plot(dataset$DateTime,dataset$Sub_metering_1,xlab="", ylab="Energy sub metering", type="l")
points(dataset$DateTime,dataset$Sub_metering_2,type="l",col=2)
points(dataset$DateTime,dataset$Sub_metering_3,type="l",col=4)
legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c(1,2,4), lwd=1, bty="n")
plot(dataset$DateTime, dataset$Global_reactive_power, ylab = "Global_reactive_power", xlab = "datetime", type = "l")
dev.off() |
a5d3ee5c992bf6ba277ad4cb5ad583aee9304d73 | 63b0cd5d17131f25ba33d858797c0b27d248e89f | /tests/testthat/test-auth.R | 06ff247be229df0042d4fbe9a7f8dc64cf192269 | [
"MIT"
] | permissive | yutannihilation/mediumr | da43bb26a8669eaa6c0c95d2cf9ea86a32ed0235 | 691e0254e90ee53b746930b4eb4dd54403ae39d5 | refs/heads/master | 2021-05-15T11:05:54.758993 | 2017-11-08T12:23:58 | 2017-11-08T12:23:58 | 108,277,200 | 25 | 3 | null | null | null | null | UTF-8 | R | false | false | 255 | r | test-auth.R | context("auth")
test_that("Authorization header", {
withr::with_envvar(
setNames("xxxx", "MEDIUM_API_TOKEN"),
{
expect_equivalent(
medium_authorization_header()$headers["Authorization"],
"Bearer xxxx"
)
}
)
})
|
8f7af93cb6606472eb1554c370853fef047044a0 | 27b1e9936a96dd1b9f8cd0a399f6bbfb1019de47 | /customer acq.R | 367e3dc8b204fa970c7aedf033c11ff8dcc94c3a | [] | no_license | muralidatascience/Rcode | 4c7d5a98e56106a7f0b0b19588962677c7155b9c | 43676264a899f7c366f66f0b4f408c89400927ab | refs/heads/master | 2020-04-06T06:47:24.324611 | 2016-08-26T12:33:30 | 2016-08-26T12:33:30 | 55,692,889 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 746 | r | customer acq.R |
#Simple Bar Plot
counts4 <- table(Dataframe2$polarity)
barplot(counts4, main="Number of Customer Opinion",
xlab="Categories")
# Stacked Bar Plot with Colors and Legend
counts2 <- table(Dataframe2$location, Dataframe2$polarity)
barplot(counts2, main="Customer Feedbacks & Sources",
xlab="Number of Customer Opinion & Source", col=c("darkblue","red"),
legend = rownames(counts2))
# Grouped Bar Plot
counts3 <- table(Dataframe2$location, Dataframe2$polarity)
barplot(counts3, main="Customer Feedbacks & Sources",
xlab="Number of Customer Opinion & Source", col=c("darkblue","red"),
legend = rownames(counts3), beside=TRUE)
write.csv(Dataframe2,"C:\\Users\\murali.kommanaboina\\Desktop\\output.csv")
|
ed5ce5d793c3de34d5b520837f3f198ca14ce857 | 58f4573bc3e9efbc14ff9ebbf089231c246cf066 | /demos/caseStudies/oncology/rocchetti.R | 533fd84e6b6e2c6190b7e6f121890c3c288b2674 | [] | no_license | Anathawa/mlxR | 1a4ec2f277076bd13525f0c1d912ede3d20cb1cc | 7e05119b78b47c8b19126de07c084e7d267c4baf | refs/heads/master | 2021-01-19T09:17:35.765267 | 2017-04-05T18:00:39 | 2017-04-05T18:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,191 | r | rocchetti.R | #------------------------------------------------------
model.pharmml <- 'pharmML/Rocchetti_2013_oncology_TGI_antiangiogenic_combo_v1.xml'
#------------------------------------------------------
d = read.csv('data/rocchetti2013_data.csv',skip=1,na='.')
head(d)
adm1 <- list( time = d$TIME[d$EVID==1&d$CMT==1],
amount = d$AMT[d$EVID==1&d$CMT==1],
target = 'Q0_A')
adm2 <- list( time = d$TIME[d$EVID==1&d$CMT==3],
amount = d$AMT[d$EVID==1&d$CMT==3],
target = 'Q1_B')
p <- c(Emax=1, FV1_A=1/0.119, FV1_B=1/2.13, IC50=3.6, IC50combo=2.02,
k1=3.54, k12=141.1, k2=0.221, k21=10.4,
ka_A=24*log(2)/6.19, ka_B=18.8, ke_A=log(2)/6.05, ke_B=49.2,
lambda0=0.14, lambda1=0.129, psi=20, CV=0.1, w0=0.062)
out <- list( name = c('Wtot','y'), time = d$TIME[d$EVID!=1])
res <- simulx( model = model.pharmml,
parameter = p,
treatment = list(adm1, adm2),
output = out,
settings = list(seed=12345))
print(ggplotmlx() + geom_line(data=res$Wtot, aes(x=time, y=Wtot), colour="black") +
geom_point(data=res$y, aes(x=time, y=y), colour="red"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.