blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f290966e0f25bbcbe24ab5cdb7f3b90273a11732
|
7cf30a07f20590b80e8a46929410daf8c8f663df
|
/significance_testing/multiple_comparisons_coverage.R
|
ceb302c717b12217ba8e978a2c7ef91449c20fc0
|
[] |
no_license
|
CPJKU/recommendation_systems_fairness
|
e2cda569b7be5ab8dca01b9e1b1d9e7382f622b4
|
480829541e5a7038e07aca9ccd1e5888df934dd2
|
refs/heads/main
| 2023-06-14T02:13:21.721708
| 2021-07-13T07:40:13
| 2021-07-13T07:40:13
| 317,505,054
| 11
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 91,157
|
r
|
multiple_comparisons_coverage.R
|
library(haven)
library(here)
library(readr)
library(car)
library(psych)
library(ggpubr)
library(ggplot2)
library(onewaytests)
library(tseries)
library(poolr)
library(dplyr)
library(corrplot)
library(FSA)
library(lsr)
library(rstatix)
library(readr)
library(scran)
library(metafor)
library(rcompanion)
get_result <- function(dunn.res, RESULTS) {
p_d <- data.frame(N1 = numeric(0), N2 = numeric(0), N3 = numeric(0), N4 = numeric(0), N5 = numeric(0), N6 = numeric(0), N7 = numeric(0), N8 = numeric(0), N9 = numeric(0), N10 = numeric(0), N11 = numeric(0), N12 = numeric(0), N13 = numeric(0), N14 = numeric(0), N15 = numeric(0))
mult_compar <- data.frame(t(dunn.res$p.adj))
my_name_vector <- c("alsVSbpr", "alsVitemknn", "alsVSpop", "alsVSslim", "alsVSvae", "bprVSitemknn", "bprVSpop", "bprVSslim", "bprVSvae", "itemknnVSpop", "itemknnVSslim", "itemknnVSvae", "popVSslim", "popVSvae", "slimVSvae")
colnames(p_d) <- my_name_vector
colnames(mult_compar) <- my_name_vector
p_d <- rbind(p_d, mult_compar)
RESULTS <- c(RESULTS, p_d)
return <- RESULTS
}
my_path <- here::here()
sampling <- c("D", "N", "U")
all_levels <- c(3, 5, 10, 20, 50)
RESULTS <- list()
RESULTS <- c(RESULTS, "COVERAGE")
for (samp_group in sampling){
RESULTS <- c(RESULTS, samp_group)
#RESULTS <- c(RESULTS, "MALE")
for (threshold in all_levels){
RESULTS <- c(RESULTS, "===========================================")
RESULTS <- c(RESULTS, threshold)
RESULTS <- c(RESULTS, "===========================================")
data_0_pop <- read_delim(paste("full_raw_metrics_beyond_accuracy_pop", samp_group, "_0.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_0_itemknn <- read_delim(paste("full_raw_metrics_beyond_accuracy_itemknn", samp_group, "_0.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_0_als <- read_delim(paste("full_raw_metrics_beyond_accuracy_als", samp_group, "_0.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_0_bpr <- read_delim(paste("full_raw_metrics_beyond_accuracy_bpr", samp_group, "_0.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_0_slim <- read_delim(paste("full_raw_metrics_beyond_accuracy_slim", samp_group, "_0.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_0_vae <- read_delim(paste("full_raw_metrics_beyond_accuracy_vae", samp_group, "_0.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_1_pop <- read_delim(paste("full_raw_metrics_beyond_accuracy_pop", samp_group, "_1.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_1_itemknn <- read_delim(paste("full_raw_metrics_beyond_accuracy_itemknn", samp_group, "_1.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_1_als <- read_delim(paste("full_raw_metrics_beyond_accuracy_als", samp_group, "_1.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_1_bpr <- read_delim(paste("full_raw_metrics_beyond_accuracy_bpr", samp_group, "_1.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_1_slim <- read_delim(paste("full_raw_metrics_beyond_accuracy_slim", samp_group, "_1.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_1_vae <- read_delim(paste("full_raw_metrics_beyond_accuracy_vae", samp_group, "_1.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_2_pop <- read_delim(paste("full_raw_metrics_beyond_accuracy_pop", samp_group, "_2.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_2_itemknn <- read_delim(paste("full_raw_metrics_beyond_accuracy_itemknn", samp_group, "_2.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_2_als <- read_delim(paste("full_raw_metrics_beyond_accuracy_als", samp_group, "_2.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_2_bpr <- read_delim(paste("full_raw_metrics_beyond_accuracy_bpr", samp_group, "_2.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_2_slim <- read_delim(paste("full_raw_metrics_beyond_accuracy_slim", samp_group, "_2.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_2_vae <- read_delim(paste("full_raw_metrics_beyond_accuracy_vae", samp_group, "_2.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data3_pop <- read_delim(paste("full_raw_metrics_beyond_accuracy_pop", samp_group, "_3.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data3_itemknn <- read_delim(paste("full_raw_metrics_beyond_accuracy_itemknn", samp_group, "_3.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data3_als <- read_delim(paste("full_raw_metrics_beyond_accuracy_als", samp_group, "_3.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data3_bpr <- read_delim(paste("full_raw_metrics_beyond_accuracy_bpr", samp_group, "_3.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data3_slim <- read_delim(paste("full_raw_metrics_beyond_accuracy_slim", samp_group, "_3.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data3_vae <- read_delim(paste("full_raw_metrics_beyond_accuracy_vae", samp_group, "_3.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_4_pop <- read_delim(paste("full_raw_metrics_beyond_accuracy_pop", samp_group, "_4.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_4_itemknn <- read_delim(paste("full_raw_metrics_beyond_accuracy_itemknn", samp_group, "_4.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_4_als <- read_delim(paste("full_raw_metrics_beyond_accuracy_als", samp_group, "_4.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_4_bpr <- read_delim(paste("full_raw_metrics_beyond_accuracy_bpr", samp_group, "_4.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_4_slim <- read_delim(paste("full_raw_metrics_beyond_accuracy_slim", samp_group, "_4.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_4_vae <- read_delim(paste("full_raw_metrics_beyond_accuracy_vae", samp_group, "_4.csv", sep=""), "\t", escape_double = FALSE, trim_ws = TRUE)
data_0_m_3_coverage_pop <- data_0_pop$`test/gender_m/coverage_at_3`[!is.na(data_0_pop$`test/gender_m/coverage_at_3`)]
data_0_m_3_coverage_poplabel <- c(rep('pop', length(data_0_m_3_coverage_pop)))
data_0_m_3_coverage_itemknn <- data_0_itemknn$`test/gender_m/coverage_at_3`[!is.na(data_0_itemknn$`test/gender_m/coverage_at_3`)]
data_0_m_3_coverage_itemknnlabel <- c(rep('itemknn', length(data_0_m_3_coverage_itemknn)))
data_0_m_3_coverage_als <- data_0_als$`test/gender_m/coverage_at_3`[!is.na(data_0_als$`test/gender_m/coverage_at_3`)]
data_0_m_3_coverage_alslabel <- c(rep('als', length(data_0_m_3_coverage_als)))
data_0_m_3_coverage_bpr <- data_0_bpr$`test/gender_m/coverage_at_3`[!is.na(data_0_bpr$`test/gender_m/coverage_at_3`)]
data_0_m_3_coverage_bprlabel <- c(rep('bpr', length(data_0_m_3_coverage_bpr)))
data_0_m_3_coverage_slim <- data_0_slim$`test/gender_m/coverage_at_3`[!is.na(data_0_slim$`test/gender_m/coverage_at_3`)]
data_0_m_3_coverage_slimlabel <- c(rep('slim', length(data_0_m_3_coverage_slim)))
data_0_m_3_coverage_vae <- data_0_vae$`test/gender_m/coverage_at_3`[!is.na(data_0_vae$`test/gender_m/coverage_at_3`)]
data_0_m_3_coverage_vaelabel <- c(rep('vae', length(data_0_m_3_coverage_vae)))
data_1_m_3_coverage_pop <- data_1_pop$`test/gender_m/coverage_at_3`[!is.na(data_1_pop$`test/gender_m/coverage_at_3`)]
data_1_m_3_coverage_poplabel <- c(rep('pop', length(data_1_m_3_coverage_pop)))
data_1_m_3_coverage_itemknn <- data_1_itemknn$`test/gender_m/coverage_at_3`[!is.na(data_1_itemknn$`test/gender_m/coverage_at_3`)]
data_1_m_3_coverage_itemknnlabel <- c(rep('itemknn', length(data_1_m_3_coverage_itemknn)))
data_1_m_3_coverage_als <- data_1_als$`test/gender_m/coverage_at_3`[!is.na(data_1_als$`test/gender_m/coverage_at_3`)]
data_1_m_3_coverage_alslabel <- c(rep('als', length(data_1_m_3_coverage_als)))
data_1_m_3_coverage_bpr <- data_1_bpr$`test/gender_m/coverage_at_3`[!is.na(data_1_bpr$`test/gender_m/coverage_at_3`)]
data_1_m_3_coverage_bprlabel <- c(rep('bpr', length(data_1_m_3_coverage_bpr)))
data_1_m_3_coverage_slim <- data_1_slim$`test/gender_m/coverage_at_3`[!is.na(data_1_slim$`test/gender_m/coverage_at_3`)]
data_1_m_3_coverage_slimlabel <- c(rep('slim', length(data_1_m_3_coverage_slim)))
data_1_m_3_coverage_vae <- data_1_vae$`test/gender_m/coverage_at_3`[!is.na(data_1_vae$`test/gender_m/coverage_at_3`)]
data_1_m_3_coverage_vaelabel <- c(rep('vae', length(data_1_m_3_coverage_vae)))
data_2_m_3_coverage_pop <- data_2_pop$`test/gender_m/coverage_at_3`[!is.na(data_2_pop$`test/gender_m/coverage_at_3`)]
data_2_m_3_coverage_poplabel <- c(rep('pop', length(data_2_m_3_coverage_pop)))
data_2_m_3_coverage_itemknn <- data_2_itemknn$`test/gender_m/coverage_at_3`[!is.na(data_2_itemknn$`test/gender_m/coverage_at_3`)]
data_2_m_3_coverage_itemknnlabel <- c(rep('itemknn', length(data_2_m_3_coverage_itemknn)))
data_2_m_3_coverage_als <- data_2_als$`test/gender_m/coverage_at_3`[!is.na(data_2_als$`test/gender_m/coverage_at_3`)]
data_2_m_3_coverage_alslabel <- c(rep('als', length(data_2_m_3_coverage_als)))
data_2_m_3_coverage_bpr <- data_2_bpr$`test/gender_m/coverage_at_3`[!is.na(data_2_bpr$`test/gender_m/coverage_at_3`)]
data_2_m_3_coverage_bprlabel <- c(rep('bpr', length(data_2_m_3_coverage_bpr)))
data_2_m_3_coverage_slim <- data_2_slim$`test/gender_m/coverage_at_3`[!is.na(data_2_slim$`test/gender_m/coverage_at_3`)]
data_2_m_3_coverage_slimlabel <- c(rep('slim', length(data_2_m_3_coverage_slim)))
data_2_m_3_coverage_vae <- data_2_vae$`test/gender_m/coverage_at_3`[!is.na(data_2_vae$`test/gender_m/coverage_at_3`)]
data_2_m_3_coverage_vaelabel <- c(rep('vae', length(data_2_m_3_coverage_vae)))
data3_m_3_coverage_pop <- data3_pop$`test/gender_m/coverage_at_3`[!is.na(data3_pop$`test/gender_m/coverage_at_3`)]
data3_m_3_coverage_poplabel <- c(rep('pop', length(data3_m_3_coverage_pop)))
data3_m_3_coverage_itemknn <- data3_itemknn$`test/gender_m/coverage_at_3`[!is.na(data3_itemknn$`test/gender_m/coverage_at_3`)]
data3_m_3_coverage_itemknnlabel <- c(rep('itemknn', length(data3_m_3_coverage_itemknn)))
data3_m_3_coverage_als <- data3_als$`test/gender_m/coverage_at_3`[!is.na(data3_als$`test/gender_m/coverage_at_3`)]
data3_m_3_coverage_alslabel <- c(rep('als', length(data3_m_3_coverage_als)))
data3_m_3_coverage_bpr <- data3_bpr$`test/gender_m/coverage_at_3`[!is.na(data3_bpr$`test/gender_m/coverage_at_3`)]
data3_m_3_coverage_bprlabel <- c(rep('bpr', length(data3_m_3_coverage_bpr)))
data3_m_3_coverage_slim <- data3_slim$`test/gender_m/coverage_at_3`[!is.na(data3_slim$`test/gender_m/coverage_at_3`)]
data3_m_3_coverage_slimlabel <- c(rep('slim', length(data3_m_3_coverage_slim)))
data3_m_3_coverage_vae <- data3_vae$`test/gender_m/coverage_at_3`[!is.na(data3_vae$`test/gender_m/coverage_at_3`)]
data3_m_3_coverage_vaelabel <- c(rep('vae', length(data3_m_3_coverage_vae)))
data_4_m_3_coverage_pop <- data_4_pop$`test/gender_m/coverage_at_3`[!is.na(data_4_pop$`test/gender_m/coverage_at_3`)]
data_4_m_3_coverage_poplabel <- c(rep('pop', length(data_4_m_3_coverage_pop)))
data_4_m_3_coverage_itemknn <- data_4_itemknn$`test/gender_m/coverage_at_3`[!is.na(data_4_itemknn$`test/gender_m/coverage_at_3`)]
data_4_m_3_coverage_itemknnlabel <- c(rep('itemknn', length(data_4_m_3_coverage_itemknn)))
data_4_m_3_coverage_als <- data_4_als$`test/gender_m/coverage_at_3`[!is.na(data_4_als$`test/gender_m/coverage_at_3`)]
data_4_m_3_coverage_alslabel <- c(rep('als', length(data_4_m_3_coverage_als)))
data_4_m_3_coverage_bpr <- data_4_bpr$`test/gender_m/coverage_at_3`[!is.na(data_4_bpr$`test/gender_m/coverage_at_3`)]
data_4_m_3_coverage_bprlabel <- c(rep('bpr', length(data_4_m_3_coverage_bpr)))
data_4_m_3_coverage_slim <- data_4_slim$`test/gender_m/coverage_at_3`[!is.na(data_4_slim$`test/gender_m/coverage_at_3`)]
data_4_m_3_coverage_slimlabel <- c(rep('slim', length(data_4_m_3_coverage_slim)))
data_4_m_3_coverage_vae <- data_4_vae$`test/gender_m/coverage_at_3`[!is.na(data_4_vae$`test/gender_m/coverage_at_3`)]
data_4_m_3_coverage_vaelabel <- c(rep('vae', length(data_4_m_3_coverage_vae)))
# combine data
coverage_3M <- c(data_0_m_3_coverage_pop, data_0_m_3_coverage_itemknn, data_0_m_3_coverage_als, data_0_m_3_coverage_bpr, data_0_m_3_coverage_slim, data_0_m_3_coverage_vae, data_1_m_3_coverage_pop, data_1_m_3_coverage_itemknn, data_1_m_3_coverage_als, data_1_m_3_coverage_bpr, data_1_m_3_coverage_slim, data_1_m_3_coverage_vae, data_2_m_3_coverage_pop, data_2_m_3_coverage_itemknn, data_2_m_3_coverage_als, data_2_m_3_coverage_bpr, data_2_m_3_coverage_slim, data_2_m_3_coverage_vae, data3_m_3_coverage_pop, data3_m_3_coverage_itemknn, data3_m_3_coverage_als, data3_m_3_coverage_bpr, data3_m_3_coverage_slim, data3_m_3_coverage_vae, data_4_m_3_coverage_pop, data_4_m_3_coverage_itemknn, data_4_m_3_coverage_als, data_4_m_3_coverage_bpr, data_4_m_3_coverage_slim, data_4_m_3_coverage_vae)
coverage_3labelM <- c(data_0_m_3_coverage_poplabel, data_0_m_3_coverage_itemknnlabel, data_0_m_3_coverage_alslabel, data_0_m_3_coverage_bprlabel, data_0_m_3_coverage_slimlabel, data_0_m_3_coverage_vaelabel, data_1_m_3_coverage_poplabel, data_1_m_3_coverage_itemknnlabel, data_1_m_3_coverage_alslabel, data_1_m_3_coverage_bprlabel, data_1_m_3_coverage_slimlabel, data_1_m_3_coverage_vaelabel, data_2_m_3_coverage_poplabel, data_2_m_3_coverage_itemknnlabel, data_2_m_3_coverage_alslabel, data_2_m_3_coverage_bprlabel, data_2_m_3_coverage_slimlabel, data_2_m_3_coverage_vaelabel, data3_m_3_coverage_poplabel, data3_m_3_coverage_itemknnlabel, data3_m_3_coverage_alslabel, data3_m_3_coverage_bprlabel, data3_m_3_coverage_slimlabel, data3_m_3_coverage_vaelabel, data_4_m_3_coverage_poplabel, data_4_m_3_coverage_itemknnlabel, data_4_m_3_coverage_alslabel, data_4_m_3_coverage_bprlabel, data_4_m_3_coverage_slimlabel, data_4_m_3_coverage_vaelabel)
#
data_0_m_5_coverage_pop <- data_0_pop$`test/gender_m/coverage_at_5`[!is.na(data_0_pop$`test/gender_m/coverage_at_5`)]
data_0_m_5_coverage_poplabel <- c(rep('pop', length(data_0_m_5_coverage_pop)))
data_0_m_5_coverage_itemknn <- data_0_itemknn$`test/gender_m/coverage_at_5`[!is.na(data_0_itemknn$`test/gender_m/coverage_at_5`)]
data_0_m_5_coverage_itemknnlabel <- c(rep('itemknn', length(data_0_m_5_coverage_itemknn)))
data_0_m_5_coverage_als <- data_0_als$`test/gender_m/coverage_at_5`[!is.na(data_0_als$`test/gender_m/coverage_at_5`)]
data_0_m_5_coverage_alslabel <- c(rep('als', length(data_0_m_5_coverage_als)))
data_0_m_5_coverage_bpr <- data_0_bpr$`test/gender_m/coverage_at_5`[!is.na(data_0_bpr$`test/gender_m/coverage_at_5`)]
data_0_m_5_coverage_bprlabel <- c(rep('bpr', length(data_0_m_5_coverage_bpr)))
data_0_m_5_coverage_slim <- data_0_slim$`test/gender_m/coverage_at_5`[!is.na(data_0_slim$`test/gender_m/coverage_at_5`)]
data_0_m_5_coverage_slimlabel <- c(rep('slim', length(data_0_m_5_coverage_slim)))
data_0_m_5_coverage_vae <- data_0_vae$`test/gender_m/coverage_at_5`[!is.na(data_0_vae$`test/gender_m/coverage_at_5`)]
data_0_m_5_coverage_vaelabel <- c(rep('vae', length(data_0_m_5_coverage_vae)))
data_1_m_5_coverage_pop <- data_1_pop$`test/gender_m/coverage_at_5`[!is.na(data_1_pop$`test/gender_m/coverage_at_5`)]
data_1_m_5_coverage_poplabel <- c(rep('pop', length(data_1_m_5_coverage_pop)))
data_1_m_5_coverage_itemknn <- data_1_itemknn$`test/gender_m/coverage_at_5`[!is.na(data_1_itemknn$`test/gender_m/coverage_at_5`)]
data_1_m_5_coverage_itemknnlabel <- c(rep('itemknn', length(data_1_m_5_coverage_itemknn)))
data_1_m_5_coverage_als <- data_1_als$`test/gender_m/coverage_at_5`[!is.na(data_1_als$`test/gender_m/coverage_at_5`)]
data_1_m_5_coverage_alslabel <- c(rep('als', length(data_1_m_5_coverage_als)))
data_1_m_5_coverage_bpr <- data_1_bpr$`test/gender_m/coverage_at_5`[!is.na(data_1_bpr$`test/gender_m/coverage_at_5`)]
data_1_m_5_coverage_bprlabel <- c(rep('bpr', length(data_1_m_5_coverage_bpr)))
data_1_m_5_coverage_slim <- data_1_slim$`test/gender_m/coverage_at_5`[!is.na(data_1_slim$`test/gender_m/coverage_at_5`)]
data_1_m_5_coverage_slimlabel <- c(rep('slim', length(data_1_m_5_coverage_slim)))
data_1_m_5_coverage_vae <- data_1_vae$`test/gender_m/coverage_at_5`[!is.na(data_1_vae$`test/gender_m/coverage_at_5`)]
data_1_m_5_coverage_vaelabel <- c(rep('vae', length(data_1_m_5_coverage_vae)))
data_2_m_5_coverage_pop <- data_2_pop$`test/gender_m/coverage_at_5`[!is.na(data_2_pop$`test/gender_m/coverage_at_5`)]
data_2_m_5_coverage_poplabel <- c(rep('pop', length(data_2_m_5_coverage_pop)))
data_2_m_5_coverage_itemknn <- data_2_itemknn$`test/gender_m/coverage_at_5`[!is.na(data_2_itemknn$`test/gender_m/coverage_at_5`)]
data_2_m_5_coverage_itemknnlabel <- c(rep('itemknn', length(data_2_m_5_coverage_itemknn)))
data_2_m_5_coverage_als <- data_2_als$`test/gender_m/coverage_at_5`[!is.na(data_2_als$`test/gender_m/coverage_at_5`)]
data_2_m_5_coverage_alslabel <- c(rep('als', length(data_2_m_5_coverage_als)))
data_2_m_5_coverage_bpr <- data_2_bpr$`test/gender_m/coverage_at_5`[!is.na(data_2_bpr$`test/gender_m/coverage_at_5`)]
data_2_m_5_coverage_bprlabel <- c(rep('bpr', length(data_2_m_5_coverage_bpr)))
data_2_m_5_coverage_slim <- data_2_slim$`test/gender_m/coverage_at_5`[!is.na(data_2_slim$`test/gender_m/coverage_at_5`)]
data_2_m_5_coverage_slimlabel <- c(rep('slim', length(data_2_m_5_coverage_slim)))
data_2_m_5_coverage_vae <- data_2_vae$`test/gender_m/coverage_at_5`[!is.na(data_2_vae$`test/gender_m/coverage_at_5`)]
data_2_m_5_coverage_vaelabel <- c(rep('vae', length(data_2_m_5_coverage_vae)))
data3_m_5_coverage_pop <- data3_pop$`test/gender_m/coverage_at_5`[!is.na(data3_pop$`test/gender_m/coverage_at_5`)]
data3_m_5_coverage_poplabel <- c(rep('pop', length(data3_m_5_coverage_pop)))
data3_m_5_coverage_itemknn <- data3_itemknn$`test/gender_m/coverage_at_5`[!is.na(data3_itemknn$`test/gender_m/coverage_at_5`)]
data3_m_5_coverage_itemknnlabel <- c(rep('itemknn', length(data3_m_5_coverage_itemknn)))
data3_m_5_coverage_als <- data3_als$`test/gender_m/coverage_at_5`[!is.na(data3_als$`test/gender_m/coverage_at_5`)]
data3_m_5_coverage_alslabel <- c(rep('als', length(data3_m_5_coverage_als)))
data3_m_5_coverage_bpr <- data3_bpr$`test/gender_m/coverage_at_5`[!is.na(data3_bpr$`test/gender_m/coverage_at_5`)]
data3_m_5_coverage_bprlabel <- c(rep('bpr', length(data3_m_5_coverage_bpr)))
data3_m_5_coverage_slim <- data3_slim$`test/gender_m/coverage_at_5`[!is.na(data3_slim$`test/gender_m/coverage_at_5`)]
data3_m_5_coverage_slimlabel <- c(rep('slim', length(data3_m_5_coverage_slim)))
data3_m_5_coverage_vae <- data3_vae$`test/gender_m/coverage_at_5`[!is.na(data3_vae$`test/gender_m/coverage_at_5`)]
data3_m_5_coverage_vaelabel <- c(rep('vae', length(data3_m_5_coverage_vae)))
data_4_m_5_coverage_pop <- data_4_pop$`test/gender_m/coverage_at_5`[!is.na(data_4_pop$`test/gender_m/coverage_at_5`)]
data_4_m_5_coverage_poplabel <- c(rep('pop', length(data_4_m_5_coverage_pop)))
data_4_m_5_coverage_itemknn <- data_4_itemknn$`test/gender_m/coverage_at_5`[!is.na(data_4_itemknn$`test/gender_m/coverage_at_5`)]
data_4_m_5_coverage_itemknnlabel <- c(rep('itemknn', length(data_4_m_5_coverage_itemknn)))
data_4_m_5_coverage_als <- data_4_als$`test/gender_m/coverage_at_5`[!is.na(data_4_als$`test/gender_m/coverage_at_5`)]
data_4_m_5_coverage_alslabel <- c(rep('als', length(data_4_m_5_coverage_als)))
data_4_m_5_coverage_bpr <- data_4_bpr$`test/gender_m/coverage_at_5`[!is.na(data_4_bpr$`test/gender_m/coverage_at_5`)]
data_4_m_5_coverage_bprlabel <- c(rep('bpr', length(data_4_m_5_coverage_bpr)))
data_4_m_5_coverage_slim <- data_4_slim$`test/gender_m/coverage_at_5`[!is.na(data_4_slim$`test/gender_m/coverage_at_5`)]
data_4_m_5_coverage_slimlabel <- c(rep('slim', length(data_4_m_5_coverage_slim)))
data_4_m_5_coverage_vae <- data_4_vae$`test/gender_m/coverage_at_5`[!is.na(data_4_vae$`test/gender_m/coverage_at_5`)]
data_4_m_5_coverage_vaelabel <- c(rep('vae', length(data_4_m_5_coverage_vae)))
# combine data
coverage_5M <- c(data_0_m_5_coverage_pop, data_0_m_5_coverage_itemknn, data_0_m_5_coverage_als, data_0_m_5_coverage_bpr, data_0_m_5_coverage_slim, data_0_m_5_coverage_vae, data_1_m_5_coverage_pop, data_1_m_5_coverage_itemknn, data_1_m_5_coverage_als, data_1_m_5_coverage_bpr, data_1_m_5_coverage_slim, data_1_m_5_coverage_vae, data_2_m_5_coverage_pop, data_2_m_5_coverage_itemknn, data_2_m_5_coverage_als, data_2_m_5_coverage_bpr, data_2_m_5_coverage_slim, data_2_m_5_coverage_vae, data3_m_5_coverage_pop, data3_m_5_coverage_itemknn, data3_m_5_coverage_als, data3_m_5_coverage_bpr, data3_m_5_coverage_slim, data3_m_5_coverage_vae, data_4_m_5_coverage_pop, data_4_m_5_coverage_itemknn, data_4_m_5_coverage_als, data_4_m_5_coverage_bpr, data_4_m_5_coverage_slim, data_4_m_5_coverage_vae)
coverage_5labelM <- c(data_0_m_5_coverage_poplabel, data_0_m_5_coverage_itemknnlabel, data_0_m_5_coverage_alslabel, data_0_m_5_coverage_bprlabel, data_0_m_5_coverage_slimlabel, data_0_m_5_coverage_vaelabel, data_1_m_5_coverage_poplabel, data_1_m_5_coverage_itemknnlabel, data_1_m_5_coverage_alslabel, data_1_m_5_coverage_bprlabel, data_1_m_5_coverage_slimlabel, data_1_m_5_coverage_vaelabel, data_2_m_5_coverage_poplabel, data_2_m_5_coverage_itemknnlabel, data_2_m_5_coverage_alslabel, data_2_m_5_coverage_bprlabel, data_2_m_5_coverage_slimlabel, data_2_m_5_coverage_vaelabel, data3_m_5_coverage_poplabel, data3_m_5_coverage_itemknnlabel, data3_m_5_coverage_alslabel, data3_m_5_coverage_bprlabel, data3_m_5_coverage_slimlabel, data3_m_5_coverage_vaelabel, data_4_m_5_coverage_poplabel, data_4_m_5_coverage_itemknnlabel, data_4_m_5_coverage_alslabel, data_4_m_5_coverage_bprlabel, data_4_m_5_coverage_slimlabel, data_4_m_5_coverage_vaelabel)
#
data_0_m_10_coverage_pop <- data_0_pop$`test/gender_m/coverage_at_10`[!is.na(data_0_pop$`test/gender_m/coverage_at_10`)]
data_0_m_10_coverage_poplabel <- c(rep('pop', length(data_0_m_10_coverage_pop)))
data_0_m_10_coverage_itemknn <- data_0_itemknn$`test/gender_m/coverage_at_10`[!is.na(data_0_itemknn$`test/gender_m/coverage_at_10`)]
data_0_m_10_coverage_itemknnlabel <- c(rep('itemknn', length(data_0_m_10_coverage_itemknn)))
data_0_m_10_coverage_als <- data_0_als$`test/gender_m/coverage_at_10`[!is.na(data_0_als$`test/gender_m/coverage_at_10`)]
data_0_m_10_coverage_alslabel <- c(rep('als', length(data_0_m_10_coverage_als)))
data_0_m_10_coverage_bpr <- data_0_bpr$`test/gender_m/coverage_at_10`[!is.na(data_0_bpr$`test/gender_m/coverage_at_10`)]
data_0_m_10_coverage_bprlabel <- c(rep('bpr', length(data_0_m_10_coverage_bpr)))
data_0_m_10_coverage_slim <- data_0_slim$`test/gender_m/coverage_at_10`[!is.na(data_0_slim$`test/gender_m/coverage_at_10`)]
data_0_m_10_coverage_slimlabel <- c(rep('slim', length(data_0_m_10_coverage_slim)))
data_0_m_10_coverage_vae <- data_0_vae$`test/gender_m/coverage_at_10`[!is.na(data_0_vae$`test/gender_m/coverage_at_10`)]
data_0_m_10_coverage_vaelabel <- c(rep('vae', length(data_0_m_10_coverage_vae)))
data_1_m_10_coverage_pop <- data_1_pop$`test/gender_m/coverage_at_10`[!is.na(data_1_pop$`test/gender_m/coverage_at_10`)]
data_1_m_10_coverage_poplabel <- c(rep('pop', length(data_1_m_10_coverage_pop)))
data_1_m_10_coverage_itemknn <- data_1_itemknn$`test/gender_m/coverage_at_10`[!is.na(data_1_itemknn$`test/gender_m/coverage_at_10`)]
data_1_m_10_coverage_itemknnlabel <- c(rep('itemknn', length(data_1_m_10_coverage_itemknn)))
data_1_m_10_coverage_als <- data_1_als$`test/gender_m/coverage_at_10`[!is.na(data_1_als$`test/gender_m/coverage_at_10`)]
data_1_m_10_coverage_alslabel <- c(rep('als', length(data_1_m_10_coverage_als)))
data_1_m_10_coverage_bpr <- data_1_bpr$`test/gender_m/coverage_at_10`[!is.na(data_1_bpr$`test/gender_m/coverage_at_10`)]
data_1_m_10_coverage_bprlabel <- c(rep('bpr', length(data_1_m_10_coverage_bpr)))
data_1_m_10_coverage_slim <- data_1_slim$`test/gender_m/coverage_at_10`[!is.na(data_1_slim$`test/gender_m/coverage_at_10`)]
data_1_m_10_coverage_slimlabel <- c(rep('slim', length(data_1_m_10_coverage_slim)))
data_1_m_10_coverage_vae <- data_1_vae$`test/gender_m/coverage_at_10`[!is.na(data_1_vae$`test/gender_m/coverage_at_10`)]
data_1_m_10_coverage_vaelabel <- c(rep('vae', length(data_1_m_10_coverage_vae)))
data_2_m_10_coverage_pop <- data_2_pop$`test/gender_m/coverage_at_10`[!is.na(data_2_pop$`test/gender_m/coverage_at_10`)]
data_2_m_10_coverage_poplabel <- c(rep('pop', length(data_2_m_10_coverage_pop)))
data_2_m_10_coverage_itemknn <- data_2_itemknn$`test/gender_m/coverage_at_10`[!is.na(data_2_itemknn$`test/gender_m/coverage_at_10`)]
data_2_m_10_coverage_itemknnlabel <- c(rep('itemknn', length(data_2_m_10_coverage_itemknn)))
data_2_m_10_coverage_als <- data_2_als$`test/gender_m/coverage_at_10`[!is.na(data_2_als$`test/gender_m/coverage_at_10`)]
data_2_m_10_coverage_alslabel <- c(rep('als', length(data_2_m_10_coverage_als)))
data_2_m_10_coverage_bpr <- data_2_bpr$`test/gender_m/coverage_at_10`[!is.na(data_2_bpr$`test/gender_m/coverage_at_10`)]
data_2_m_10_coverage_bprlabel <- c(rep('bpr', length(data_2_m_10_coverage_bpr)))
data_2_m_10_coverage_slim <- data_2_slim$`test/gender_m/coverage_at_10`[!is.na(data_2_slim$`test/gender_m/coverage_at_10`)]
data_2_m_10_coverage_slimlabel <- c(rep('slim', length(data_2_m_10_coverage_slim)))
data_2_m_10_coverage_vae <- data_2_vae$`test/gender_m/coverage_at_10`[!is.na(data_2_vae$`test/gender_m/coverage_at_10`)]
data_2_m_10_coverage_vaelabel <- c(rep('vae', length(data_2_m_10_coverage_vae)))
data3_m_10_coverage_pop <- data3_pop$`test/gender_m/coverage_at_10`[!is.na(data3_pop$`test/gender_m/coverage_at_10`)]
data3_m_10_coverage_poplabel <- c(rep('pop', length(data3_m_10_coverage_pop)))
data3_m_10_coverage_itemknn <- data3_itemknn$`test/gender_m/coverage_at_10`[!is.na(data3_itemknn$`test/gender_m/coverage_at_10`)]
data3_m_10_coverage_itemknnlabel <- c(rep('itemknn', length(data3_m_10_coverage_itemknn)))
data3_m_10_coverage_als <- data3_als$`test/gender_m/coverage_at_10`[!is.na(data3_als$`test/gender_m/coverage_at_10`)]
data3_m_10_coverage_alslabel <- c(rep('als', length(data3_m_10_coverage_als)))
data3_m_10_coverage_bpr <- data3_bpr$`test/gender_m/coverage_at_10`[!is.na(data3_bpr$`test/gender_m/coverage_at_10`)]
data3_m_10_coverage_bprlabel <- c(rep('bpr', length(data3_m_10_coverage_bpr)))
data3_m_10_coverage_slim <- data3_slim$`test/gender_m/coverage_at_10`[!is.na(data3_slim$`test/gender_m/coverage_at_10`)]
data3_m_10_coverage_slimlabel <- c(rep('slim', length(data3_m_10_coverage_slim)))
data3_m_10_coverage_vae <- data3_vae$`test/gender_m/coverage_at_10`[!is.na(data3_vae$`test/gender_m/coverage_at_10`)]
data3_m_10_coverage_vaelabel <- c(rep('vae', length(data3_m_10_coverage_vae)))
data_4_m_10_coverage_pop <- data_4_pop$`test/gender_m/coverage_at_10`[!is.na(data_4_pop$`test/gender_m/coverage_at_10`)]
data_4_m_10_coverage_poplabel <- c(rep('pop', length(data_4_m_10_coverage_pop)))
data_4_m_10_coverage_itemknn <- data_4_itemknn$`test/gender_m/coverage_at_10`[!is.na(data_4_itemknn$`test/gender_m/coverage_at_10`)]
data_4_m_10_coverage_itemknnlabel <- c(rep('itemknn', length(data_4_m_10_coverage_itemknn)))
data_4_m_10_coverage_als <- data_4_als$`test/gender_m/coverage_at_10`[!is.na(data_4_als$`test/gender_m/coverage_at_10`)]
data_4_m_10_coverage_alslabel <- c(rep('als', length(data_4_m_10_coverage_als)))
data_4_m_10_coverage_bpr <- data_4_bpr$`test/gender_m/coverage_at_10`[!is.na(data_4_bpr$`test/gender_m/coverage_at_10`)]
data_4_m_10_coverage_bprlabel <- c(rep('bpr', length(data_4_m_10_coverage_bpr)))
data_4_m_10_coverage_slim <- data_4_slim$`test/gender_m/coverage_at_10`[!is.na(data_4_slim$`test/gender_m/coverage_at_10`)]
data_4_m_10_coverage_slimlabel <- c(rep('slim', length(data_4_m_10_coverage_slim)))
data_4_m_10_coverage_vae <- data_4_vae$`test/gender_m/coverage_at_10`[!is.na(data_4_vae$`test/gender_m/coverage_at_10`)]
data_4_m_10_coverage_vaelabel <- c(rep('vae', length(data_4_m_10_coverage_vae)))
# combine data
coverage_10M <- c(data_0_m_10_coverage_pop, data_0_m_10_coverage_itemknn, data_0_m_10_coverage_als, data_0_m_10_coverage_bpr, data_0_m_10_coverage_slim, data_0_m_10_coverage_vae, data_1_m_10_coverage_pop, data_1_m_10_coverage_itemknn, data_1_m_10_coverage_als, data_1_m_10_coverage_bpr, data_1_m_10_coverage_slim, data_1_m_10_coverage_vae, data_2_m_10_coverage_pop, data_2_m_10_coverage_itemknn, data_2_m_10_coverage_als, data_2_m_10_coverage_bpr, data_2_m_10_coverage_slim, data_2_m_10_coverage_vae, data3_m_10_coverage_pop, data3_m_10_coverage_itemknn, data3_m_10_coverage_als, data3_m_10_coverage_bpr, data3_m_10_coverage_slim, data3_m_10_coverage_vae, data_4_m_10_coverage_pop, data_4_m_10_coverage_itemknn, data_4_m_10_coverage_als, data_4_m_10_coverage_bpr, data_4_m_10_coverage_slim, data_4_m_10_coverage_vae)
coverage_10labelM <- c(data_0_m_10_coverage_poplabel, data_0_m_10_coverage_itemknnlabel, data_0_m_10_coverage_alslabel, data_0_m_10_coverage_bprlabel, data_0_m_10_coverage_slimlabel, data_0_m_10_coverage_vaelabel, data_1_m_10_coverage_poplabel, data_1_m_10_coverage_itemknnlabel, data_1_m_10_coverage_alslabel, data_1_m_10_coverage_bprlabel, data_1_m_10_coverage_slimlabel, data_1_m_10_coverage_vaelabel, data_2_m_10_coverage_poplabel, data_2_m_10_coverage_itemknnlabel, data_2_m_10_coverage_alslabel, data_2_m_10_coverage_bprlabel, data_2_m_10_coverage_slimlabel, data_2_m_10_coverage_vaelabel, data3_m_10_coverage_poplabel, data3_m_10_coverage_itemknnlabel, data3_m_10_coverage_alslabel, data3_m_10_coverage_bprlabel, data3_m_10_coverage_slimlabel, data3_m_10_coverage_vaelabel, data_4_m_10_coverage_poplabel, data_4_m_10_coverage_itemknnlabel, data_4_m_10_coverage_alslabel, data_4_m_10_coverage_bprlabel, data_4_m_10_coverage_slimlabel, data_4_m_10_coverage_vaelabel)
#
data_0_m_20_coverage_pop <- data_0_pop$`test/gender_m/coverage_at_20`[!is.na(data_0_pop$`test/gender_m/coverage_at_20`)]
data_0_m_20_coverage_poplabel <- c(rep('pop', length(data_0_m_20_coverage_pop)))
data_0_m_20_coverage_itemknn <- data_0_itemknn$`test/gender_m/coverage_at_20`[!is.na(data_0_itemknn$`test/gender_m/coverage_at_20`)]
data_0_m_20_coverage_itemknnlabel <- c(rep('itemknn', length(data_0_m_20_coverage_itemknn)))
data_0_m_20_coverage_als <- data_0_als$`test/gender_m/coverage_at_20`[!is.na(data_0_als$`test/gender_m/coverage_at_20`)]
data_0_m_20_coverage_alslabel <- c(rep('als', length(data_0_m_20_coverage_als)))
data_0_m_20_coverage_bpr <- data_0_bpr$`test/gender_m/coverage_at_20`[!is.na(data_0_bpr$`test/gender_m/coverage_at_20`)]
data_0_m_20_coverage_bprlabel <- c(rep('bpr', length(data_0_m_20_coverage_bpr)))
data_0_m_20_coverage_slim <- data_0_slim$`test/gender_m/coverage_at_20`[!is.na(data_0_slim$`test/gender_m/coverage_at_20`)]
data_0_m_20_coverage_slimlabel <- c(rep('slim', length(data_0_m_20_coverage_slim)))
data_0_m_20_coverage_vae <- data_0_vae$`test/gender_m/coverage_at_20`[!is.na(data_0_vae$`test/gender_m/coverage_at_20`)]
data_0_m_20_coverage_vaelabel <- c(rep('vae', length(data_0_m_20_coverage_vae)))
data_1_m_20_coverage_pop <- data_1_pop$`test/gender_m/coverage_at_20`[!is.na(data_1_pop$`test/gender_m/coverage_at_20`)]
data_1_m_20_coverage_poplabel <- c(rep('pop', length(data_1_m_20_coverage_pop)))
data_1_m_20_coverage_itemknn <- data_1_itemknn$`test/gender_m/coverage_at_20`[!is.na(data_1_itemknn$`test/gender_m/coverage_at_20`)]
data_1_m_20_coverage_itemknnlabel <- c(rep('itemknn', length(data_1_m_20_coverage_itemknn)))
data_1_m_20_coverage_als <- data_1_als$`test/gender_m/coverage_at_20`[!is.na(data_1_als$`test/gender_m/coverage_at_20`)]
data_1_m_20_coverage_alslabel <- c(rep('als', length(data_1_m_20_coverage_als)))
data_1_m_20_coverage_bpr <- data_1_bpr$`test/gender_m/coverage_at_20`[!is.na(data_1_bpr$`test/gender_m/coverage_at_20`)]
data_1_m_20_coverage_bprlabel <- c(rep('bpr', length(data_1_m_20_coverage_bpr)))
data_1_m_20_coverage_slim <- data_1_slim$`test/gender_m/coverage_at_20`[!is.na(data_1_slim$`test/gender_m/coverage_at_20`)]
data_1_m_20_coverage_slimlabel <- c(rep('slim', length(data_1_m_20_coverage_slim)))
data_1_m_20_coverage_vae <- data_1_vae$`test/gender_m/coverage_at_20`[!is.na(data_1_vae$`test/gender_m/coverage_at_20`)]
data_1_m_20_coverage_vaelabel <- c(rep('vae', length(data_1_m_20_coverage_vae)))
data_2_m_20_coverage_pop <- data_2_pop$`test/gender_m/coverage_at_20`[!is.na(data_2_pop$`test/gender_m/coverage_at_20`)]
data_2_m_20_coverage_poplabel <- c(rep('pop', length(data_2_m_20_coverage_pop)))
data_2_m_20_coverage_itemknn <- data_2_itemknn$`test/gender_m/coverage_at_20`[!is.na(data_2_itemknn$`test/gender_m/coverage_at_20`)]
data_2_m_20_coverage_itemknnlabel <- c(rep('itemknn', length(data_2_m_20_coverage_itemknn)))
data_2_m_20_coverage_als <- data_2_als$`test/gender_m/coverage_at_20`[!is.na(data_2_als$`test/gender_m/coverage_at_20`)]
data_2_m_20_coverage_alslabel <- c(rep('als', length(data_2_m_20_coverage_als)))
data_2_m_20_coverage_bpr <- data_2_bpr$`test/gender_m/coverage_at_20`[!is.na(data_2_bpr$`test/gender_m/coverage_at_20`)]
data_2_m_20_coverage_bprlabel <- c(rep('bpr', length(data_2_m_20_coverage_bpr)))
data_2_m_20_coverage_slim <- data_2_slim$`test/gender_m/coverage_at_20`[!is.na(data_2_slim$`test/gender_m/coverage_at_20`)]
data_2_m_20_coverage_slimlabel <- c(rep('slim', length(data_2_m_20_coverage_slim)))
data_2_m_20_coverage_vae <- data_2_vae$`test/gender_m/coverage_at_20`[!is.na(data_2_vae$`test/gender_m/coverage_at_20`)]
data_2_m_20_coverage_vaelabel <- c(rep('vae', length(data_2_m_20_coverage_vae)))
data3_m_20_coverage_pop <- data3_pop$`test/gender_m/coverage_at_20`[!is.na(data3_pop$`test/gender_m/coverage_at_20`)]
data3_m_20_coverage_poplabel <- c(rep('pop', length(data3_m_20_coverage_pop)))
data3_m_20_coverage_itemknn <- data3_itemknn$`test/gender_m/coverage_at_20`[!is.na(data3_itemknn$`test/gender_m/coverage_at_20`)]
data3_m_20_coverage_itemknnlabel <- c(rep('itemknn', length(data3_m_20_coverage_itemknn)))
data3_m_20_coverage_als <- data3_als$`test/gender_m/coverage_at_20`[!is.na(data3_als$`test/gender_m/coverage_at_20`)]
data3_m_20_coverage_alslabel <- c(rep('als', length(data3_m_20_coverage_als)))
data3_m_20_coverage_bpr <- data3_bpr$`test/gender_m/coverage_at_20`[!is.na(data3_bpr$`test/gender_m/coverage_at_20`)]
data3_m_20_coverage_bprlabel <- c(rep('bpr', length(data3_m_20_coverage_bpr)))
data3_m_20_coverage_slim <- data3_slim$`test/gender_m/coverage_at_20`[!is.na(data3_slim$`test/gender_m/coverage_at_20`)]
data3_m_20_coverage_slimlabel <- c(rep('slim', length(data3_m_20_coverage_slim)))
data3_m_20_coverage_vae <- data3_vae$`test/gender_m/coverage_at_20`[!is.na(data3_vae$`test/gender_m/coverage_at_20`)]
data3_m_20_coverage_vaelabel <- c(rep('vae', length(data3_m_20_coverage_vae)))
data_4_m_20_coverage_pop <- data_4_pop$`test/gender_m/coverage_at_20`[!is.na(data_4_pop$`test/gender_m/coverage_at_20`)]
data_4_m_20_coverage_poplabel <- c(rep('pop', length(data_4_m_20_coverage_pop)))
data_4_m_20_coverage_itemknn <- data_4_itemknn$`test/gender_m/coverage_at_20`[!is.na(data_4_itemknn$`test/gender_m/coverage_at_20`)]
data_4_m_20_coverage_itemknnlabel <- c(rep('itemknn', length(data_4_m_20_coverage_itemknn)))
data_4_m_20_coverage_als <- data_4_als$`test/gender_m/coverage_at_20`[!is.na(data_4_als$`test/gender_m/coverage_at_20`)]
data_4_m_20_coverage_alslabel <- c(rep('als', length(data_4_m_20_coverage_als)))
data_4_m_20_coverage_bpr <- data_4_bpr$`test/gender_m/coverage_at_20`[!is.na(data_4_bpr$`test/gender_m/coverage_at_20`)]
data_4_m_20_coverage_bprlabel <- c(rep('bpr', length(data_4_m_20_coverage_bpr)))
data_4_m_20_coverage_slim <- data_4_slim$`test/gender_m/coverage_at_20`[!is.na(data_4_slim$`test/gender_m/coverage_at_20`)]
data_4_m_20_coverage_slimlabel <- c(rep('slim', length(data_4_m_20_coverage_slim)))
data_4_m_20_coverage_vae <- data_4_vae$`test/gender_m/coverage_at_20`[!is.na(data_4_vae$`test/gender_m/coverage_at_20`)]
data_4_m_20_coverage_vaelabel <- c(rep('vae', length(data_4_m_20_coverage_vae)))
# combine data
coverage_20M <- c(data_0_m_20_coverage_pop, data_0_m_20_coverage_itemknn, data_0_m_20_coverage_als, data_0_m_20_coverage_bpr, data_0_m_20_coverage_slim, data_0_m_20_coverage_vae, data_1_m_20_coverage_pop, data_1_m_20_coverage_itemknn, data_1_m_20_coverage_als, data_1_m_20_coverage_bpr, data_1_m_20_coverage_slim, data_1_m_20_coverage_vae, data_2_m_20_coverage_pop, data_2_m_20_coverage_itemknn, data_2_m_20_coverage_als, data_2_m_20_coverage_bpr, data_2_m_20_coverage_slim, data_2_m_20_coverage_vae, data3_m_20_coverage_pop, data3_m_20_coverage_itemknn, data3_m_20_coverage_als, data3_m_20_coverage_bpr, data3_m_20_coverage_slim, data3_m_20_coverage_vae, data_4_m_20_coverage_pop, data_4_m_20_coverage_itemknn, data_4_m_20_coverage_als, data_4_m_20_coverage_bpr, data_4_m_20_coverage_slim, data_4_m_20_coverage_vae)
coverage_20labelM <- c(data_0_m_20_coverage_poplabel, data_0_m_20_coverage_itemknnlabel, data_0_m_20_coverage_alslabel, data_0_m_20_coverage_bprlabel, data_0_m_20_coverage_slimlabel, data_0_m_20_coverage_vaelabel, data_1_m_20_coverage_poplabel, data_1_m_20_coverage_itemknnlabel, data_1_m_20_coverage_alslabel, data_1_m_20_coverage_bprlabel, data_1_m_20_coverage_slimlabel, data_1_m_20_coverage_vaelabel, data_2_m_20_coverage_poplabel, data_2_m_20_coverage_itemknnlabel, data_2_m_20_coverage_alslabel, data_2_m_20_coverage_bprlabel, data_2_m_20_coverage_slimlabel, data_2_m_20_coverage_vaelabel, data3_m_20_coverage_poplabel, data3_m_20_coverage_itemknnlabel, data3_m_20_coverage_alslabel, data3_m_20_coverage_bprlabel, data3_m_20_coverage_slimlabel, data3_m_20_coverage_vaelabel, data_4_m_20_coverage_poplabel, data_4_m_20_coverage_itemknnlabel, data_4_m_20_coverage_alslabel, data_4_m_20_coverage_bprlabel, data_4_m_20_coverage_slimlabel, data_4_m_20_coverage_vaelabel)
#
data_0_m_50_coverage_pop <- data_0_pop$`test/gender_m/coverage_at_50`[!is.na(data_0_pop$`test/gender_m/coverage_at_50`)]
data_0_m_50_coverage_poplabel <- c(rep('pop', length(data_0_m_50_coverage_pop)))
data_0_m_50_coverage_itemknn <- data_0_itemknn$`test/gender_m/coverage_at_50`[!is.na(data_0_itemknn$`test/gender_m/coverage_at_50`)]
data_0_m_50_coverage_itemknnlabel <- c(rep('itemknn', length(data_0_m_50_coverage_itemknn)))
data_0_m_50_coverage_als <- data_0_als$`test/gender_m/coverage_at_50`[!is.na(data_0_als$`test/gender_m/coverage_at_50`)]
data_0_m_50_coverage_alslabel <- c(rep('als', length(data_0_m_50_coverage_als)))
data_0_m_50_coverage_bpr <- data_0_bpr$`test/gender_m/coverage_at_50`[!is.na(data_0_bpr$`test/gender_m/coverage_at_50`)]
data_0_m_50_coverage_bprlabel <- c(rep('bpr', length(data_0_m_50_coverage_bpr)))
data_0_m_50_coverage_slim <- data_0_slim$`test/gender_m/coverage_at_50`[!is.na(data_0_slim$`test/gender_m/coverage_at_50`)]
data_0_m_50_coverage_slimlabel <- c(rep('slim', length(data_0_m_50_coverage_slim)))
data_0_m_50_coverage_vae <- data_0_vae$`test/gender_m/coverage_at_50`[!is.na(data_0_vae$`test/gender_m/coverage_at_50`)]
data_0_m_50_coverage_vaelabel <- c(rep('vae', length(data_0_m_50_coverage_vae)))
data_1_m_50_coverage_pop <- data_1_pop$`test/gender_m/coverage_at_50`[!is.na(data_1_pop$`test/gender_m/coverage_at_50`)]
data_1_m_50_coverage_poplabel <- c(rep('pop', length(data_1_m_50_coverage_pop)))
data_1_m_50_coverage_itemknn <- data_1_itemknn$`test/gender_m/coverage_at_50`[!is.na(data_1_itemknn$`test/gender_m/coverage_at_50`)]
data_1_m_50_coverage_itemknnlabel <- c(rep('itemknn', length(data_1_m_50_coverage_itemknn)))
data_1_m_50_coverage_als <- data_1_als$`test/gender_m/coverage_at_50`[!is.na(data_1_als$`test/gender_m/coverage_at_50`)]
data_1_m_50_coverage_alslabel <- c(rep('als', length(data_1_m_50_coverage_als)))
data_1_m_50_coverage_bpr <- data_1_bpr$`test/gender_m/coverage_at_50`[!is.na(data_1_bpr$`test/gender_m/coverage_at_50`)]
data_1_m_50_coverage_bprlabel <- c(rep('bpr', length(data_1_m_50_coverage_bpr)))
data_1_m_50_coverage_slim <- data_1_slim$`test/gender_m/coverage_at_50`[!is.na(data_1_slim$`test/gender_m/coverage_at_50`)]
data_1_m_50_coverage_slimlabel <- c(rep('slim', length(data_1_m_50_coverage_slim)))
data_1_m_50_coverage_vae <- data_1_vae$`test/gender_m/coverage_at_50`[!is.na(data_1_vae$`test/gender_m/coverage_at_50`)]
data_1_m_50_coverage_vaelabel <- c(rep('vae', length(data_1_m_50_coverage_vae)))
data_2_m_50_coverage_pop <- data_2_pop$`test/gender_m/coverage_at_50`[!is.na(data_2_pop$`test/gender_m/coverage_at_50`)]
data_2_m_50_coverage_poplabel <- c(rep('pop', length(data_2_m_50_coverage_pop)))
data_2_m_50_coverage_itemknn <- data_2_itemknn$`test/gender_m/coverage_at_50`[!is.na(data_2_itemknn$`test/gender_m/coverage_at_50`)]
data_2_m_50_coverage_itemknnlabel <- c(rep('itemknn', length(data_2_m_50_coverage_itemknn)))
data_2_m_50_coverage_als <- data_2_als$`test/gender_m/coverage_at_50`[!is.na(data_2_als$`test/gender_m/coverage_at_50`)]
data_2_m_50_coverage_alslabel <- c(rep('als', length(data_2_m_50_coverage_als)))
data_2_m_50_coverage_bpr <- data_2_bpr$`test/gender_m/coverage_at_50`[!is.na(data_2_bpr$`test/gender_m/coverage_at_50`)]
data_2_m_50_coverage_bprlabel <- c(rep('bpr', length(data_2_m_50_coverage_bpr)))
data_2_m_50_coverage_slim <- data_2_slim$`test/gender_m/coverage_at_50`[!is.na(data_2_slim$`test/gender_m/coverage_at_50`)]
data_2_m_50_coverage_slimlabel <- c(rep('slim', length(data_2_m_50_coverage_slim)))
data_2_m_50_coverage_vae <- data_2_vae$`test/gender_m/coverage_at_50`[!is.na(data_2_vae$`test/gender_m/coverage_at_50`)]
data_2_m_50_coverage_vaelabel <- c(rep('vae', length(data_2_m_50_coverage_vae)))
data3_m_50_coverage_pop <- data3_pop$`test/gender_m/coverage_at_50`[!is.na(data3_pop$`test/gender_m/coverage_at_50`)]
data3_m_50_coverage_poplabel <- c(rep('pop', length(data3_m_50_coverage_pop)))
data3_m_50_coverage_itemknn <- data3_itemknn$`test/gender_m/coverage_at_50`[!is.na(data3_itemknn$`test/gender_m/coverage_at_50`)]
data3_m_50_coverage_itemknnlabel <- c(rep('itemknn', length(data3_m_50_coverage_itemknn)))
data3_m_50_coverage_als <- data3_als$`test/gender_m/coverage_at_50`[!is.na(data3_als$`test/gender_m/coverage_at_50`)]
data3_m_50_coverage_alslabel <- c(rep('als', length(data3_m_50_coverage_als)))
data3_m_50_coverage_bpr <- data3_bpr$`test/gender_m/coverage_at_50`[!is.na(data3_bpr$`test/gender_m/coverage_at_50`)]
data3_m_50_coverage_bprlabel <- c(rep('bpr', length(data3_m_50_coverage_bpr)))
data3_m_50_coverage_slim <- data3_slim$`test/gender_m/coverage_at_50`[!is.na(data3_slim$`test/gender_m/coverage_at_50`)]
data3_m_50_coverage_slimlabel <- c(rep('slim', length(data3_m_50_coverage_slim)))
data3_m_50_coverage_vae <- data3_vae$`test/gender_m/coverage_at_50`[!is.na(data3_vae$`test/gender_m/coverage_at_50`)]
data3_m_50_coverage_vaelabel <- c(rep('vae', length(data3_m_50_coverage_vae)))
data_4_m_50_coverage_pop <- data_4_pop$`test/gender_m/coverage_at_50`[!is.na(data_4_pop$`test/gender_m/coverage_at_50`)]
data_4_m_50_coverage_poplabel <- c(rep('pop', length(data_4_m_50_coverage_pop)))
data_4_m_50_coverage_itemknn <- data_4_itemknn$`test/gender_m/coverage_at_50`[!is.na(data_4_itemknn$`test/gender_m/coverage_at_50`)]
data_4_m_50_coverage_itemknnlabel <- c(rep('itemknn', length(data_4_m_50_coverage_itemknn)))
data_4_m_50_coverage_als <- data_4_als$`test/gender_m/coverage_at_50`[!is.na(data_4_als$`test/gender_m/coverage_at_50`)]
data_4_m_50_coverage_alslabel <- c(rep('als', length(data_4_m_50_coverage_als)))
data_4_m_50_coverage_bpr <- data_4_bpr$`test/gender_m/coverage_at_50`[!is.na(data_4_bpr$`test/gender_m/coverage_at_50`)]
data_4_m_50_coverage_bprlabel <- c(rep('bpr', length(data_4_m_50_coverage_bpr)))
data_4_m_50_coverage_slim <- data_4_slim$`test/gender_m/coverage_at_50`[!is.na(data_4_slim$`test/gender_m/coverage_at_50`)]
data_4_m_50_coverage_slimlabel <- c(rep('slim', length(data_4_m_50_coverage_slim)))
data_4_m_50_coverage_vae <- data_4_vae$`test/gender_m/coverage_at_50`[!is.na(data_4_vae$`test/gender_m/coverage_at_50`)]
data_4_m_50_coverage_vaelabel <- c(rep('vae', length(data_4_m_50_coverage_vae)))
# combine data
coverage_50M <- c(data_0_m_50_coverage_pop, data_0_m_50_coverage_itemknn, data_0_m_50_coverage_als, data_0_m_50_coverage_bpr, data_0_m_50_coverage_slim, data_0_m_50_coverage_vae, data_1_m_50_coverage_pop, data_1_m_50_coverage_itemknn, data_1_m_50_coverage_als, data_1_m_50_coverage_bpr, data_1_m_50_coverage_slim, data_1_m_50_coverage_vae, data_2_m_50_coverage_pop, data_2_m_50_coverage_itemknn, data_2_m_50_coverage_als, data_2_m_50_coverage_bpr, data_2_m_50_coverage_slim, data_2_m_50_coverage_vae, data3_m_50_coverage_pop, data3_m_50_coverage_itemknn, data3_m_50_coverage_als, data3_m_50_coverage_bpr, data3_m_50_coverage_slim, data3_m_50_coverage_vae, data_4_m_50_coverage_pop, data_4_m_50_coverage_itemknn, data_4_m_50_coverage_als, data_4_m_50_coverage_bpr, data_4_m_50_coverage_slim, data_4_m_50_coverage_vae)
coverage_50labelM <- c(data_0_m_50_coverage_poplabel, data_0_m_50_coverage_itemknnlabel, data_0_m_50_coverage_alslabel, data_0_m_50_coverage_bprlabel, data_0_m_50_coverage_slimlabel, data_0_m_50_coverage_vaelabel, data_1_m_50_coverage_poplabel, data_1_m_50_coverage_itemknnlabel, data_1_m_50_coverage_alslabel, data_1_m_50_coverage_bprlabel, data_1_m_50_coverage_slimlabel, data_1_m_50_coverage_vaelabel, data_2_m_50_coverage_poplabel, data_2_m_50_coverage_itemknnlabel, data_2_m_50_coverage_alslabel, data_2_m_50_coverage_bprlabel, data_2_m_50_coverage_slimlabel, data_2_m_50_coverage_vaelabel, data3_m_50_coverage_poplabel, data3_m_50_coverage_itemknnlabel, data3_m_50_coverage_alslabel, data3_m_50_coverage_bprlabel, data3_m_50_coverage_slimlabel, data3_m_50_coverage_vaelabel, data_4_m_50_coverage_poplabel, data_4_m_50_coverage_itemknnlabel, data_4_m_50_coverage_alslabel, data_4_m_50_coverage_bprlabel, data_4_m_50_coverage_slimlabel, data_4_m_50_coverage_vaelabel)
##
# make dataframe
coverage_m <- data.frame(coverage_3M, coverage_3labelM, coverage_5M, coverage_5labelM, coverage_10M, coverage_10labelM, coverage_20M, coverage_20labelM, coverage_50M, coverage_50labelM)
#View(coverage_m)
##
##
data_0_f_3_coverage_pop <- data_0_pop$`test/gender_f/coverage_at_3`[!is.na(data_0_pop$`test/gender_f/coverage_at_3`)]
data_0_f_3_coverage_poplabel <- c(rep('pop', length(data_0_f_3_coverage_pop)))
data_0_f_3_coverage_itemknn <- data_0_itemknn$`test/gender_f/coverage_at_3`[!is.na(data_0_itemknn$`test/gender_f/coverage_at_3`)]
data_0_f_3_coverage_itemknnlabel <- c(rep('itemknn', length(data_0_f_3_coverage_itemknn)))
data_0_f_3_coverage_als <- data_0_als$`test/gender_f/coverage_at_3`[!is.na(data_0_als$`test/gender_f/coverage_at_3`)]
data_0_f_3_coverage_alslabel <- c(rep('als', length(data_0_f_3_coverage_als)))
data_0_f_3_coverage_bpr <- data_0_bpr$`test/gender_f/coverage_at_3`[!is.na(data_0_bpr$`test/gender_f/coverage_at_3`)]
data_0_f_3_coverage_bprlabel <- c(rep('bpr', length(data_0_f_3_coverage_bpr)))
data_0_f_3_coverage_slim <- data_0_slim$`test/gender_f/coverage_at_3`[!is.na(data_0_slim$`test/gender_f/coverage_at_3`)]
data_0_f_3_coverage_slimlabel <- c(rep('slim', length(data_0_f_3_coverage_slim)))
data_0_f_3_coverage_vae <- data_0_vae$`test/gender_f/coverage_at_3`[!is.na(data_0_vae$`test/gender_f/coverage_at_3`)]
data_0_f_3_coverage_vaelabel <- c(rep('vae', length(data_0_f_3_coverage_vae)))
data_1_f_3_coverage_pop <- data_1_pop$`test/gender_f/coverage_at_3`[!is.na(data_1_pop$`test/gender_f/coverage_at_3`)]
data_1_f_3_coverage_poplabel <- c(rep('pop', length(data_1_f_3_coverage_pop)))
data_1_f_3_coverage_itemknn <- data_1_itemknn$`test/gender_f/coverage_at_3`[!is.na(data_1_itemknn$`test/gender_f/coverage_at_3`)]
data_1_f_3_coverage_itemknnlabel <- c(rep('itemknn', length(data_1_f_3_coverage_itemknn)))
data_1_f_3_coverage_als <- data_1_als$`test/gender_f/coverage_at_3`[!is.na(data_1_als$`test/gender_f/coverage_at_3`)]
data_1_f_3_coverage_alslabel <- c(rep('als', length(data_1_f_3_coverage_als)))
data_1_f_3_coverage_bpr <- data_1_bpr$`test/gender_f/coverage_at_3`[!is.na(data_1_bpr$`test/gender_f/coverage_at_3`)]
data_1_f_3_coverage_bprlabel <- c(rep('bpr', length(data_1_f_3_coverage_bpr)))
data_1_f_3_coverage_slim <- data_1_slim$`test/gender_f/coverage_at_3`[!is.na(data_1_slim$`test/gender_f/coverage_at_3`)]
data_1_f_3_coverage_slimlabel <- c(rep('slim', length(data_1_f_3_coverage_slim)))
data_1_f_3_coverage_vae <- data_1_vae$`test/gender_f/coverage_at_3`[!is.na(data_1_vae$`test/gender_f/coverage_at_3`)]
data_1_f_3_coverage_vaelabel <- c(rep('vae', length(data_1_f_3_coverage_vae)))
data_2_f_3_coverage_pop <- data_2_pop$`test/gender_f/coverage_at_3`[!is.na(data_2_pop$`test/gender_f/coverage_at_3`)]
data_2_f_3_coverage_poplabel <- c(rep('pop', length(data_2_f_3_coverage_pop)))
data_2_f_3_coverage_itemknn <- data_2_itemknn$`test/gender_f/coverage_at_3`[!is.na(data_2_itemknn$`test/gender_f/coverage_at_3`)]
data_2_f_3_coverage_itemknnlabel <- c(rep('itemknn', length(data_2_f_3_coverage_itemknn)))
data_2_f_3_coverage_als <- data_2_als$`test/gender_f/coverage_at_3`[!is.na(data_2_als$`test/gender_f/coverage_at_3`)]
data_2_f_3_coverage_alslabel <- c(rep('als', length(data_2_f_3_coverage_als)))
data_2_f_3_coverage_bpr <- data_2_bpr$`test/gender_f/coverage_at_3`[!is.na(data_2_bpr$`test/gender_f/coverage_at_3`)]
data_2_f_3_coverage_bprlabel <- c(rep('bpr', length(data_2_f_3_coverage_bpr)))
data_2_f_3_coverage_slim <- data_2_slim$`test/gender_f/coverage_at_3`[!is.na(data_2_slim$`test/gender_f/coverage_at_3`)]
data_2_f_3_coverage_slimlabel <- c(rep('slim', length(data_2_f_3_coverage_slim)))
data_2_f_3_coverage_vae <- data_2_vae$`test/gender_f/coverage_at_3`[!is.na(data_2_vae$`test/gender_f/coverage_at_3`)]
data_2_f_3_coverage_vaelabel <- c(rep('vae', length(data_2_f_3_coverage_vae)))
data3_f_3_coverage_pop <- data3_pop$`test/gender_f/coverage_at_3`[!is.na(data3_pop$`test/gender_f/coverage_at_3`)]
data3_f_3_coverage_poplabel <- c(rep('pop', length(data3_f_3_coverage_pop)))
data3_f_3_coverage_itemknn <- data3_itemknn$`test/gender_f/coverage_at_3`[!is.na(data3_itemknn$`test/gender_f/coverage_at_3`)]
data3_f_3_coverage_itemknnlabel <- c(rep('itemknn', length(data3_f_3_coverage_itemknn)))
data3_f_3_coverage_als <- data3_als$`test/gender_f/coverage_at_3`[!is.na(data3_als$`test/gender_f/coverage_at_3`)]
data3_f_3_coverage_alslabel <- c(rep('als', length(data3_f_3_coverage_als)))
data3_f_3_coverage_bpr <- data3_bpr$`test/gender_f/coverage_at_3`[!is.na(data3_bpr$`test/gender_f/coverage_at_3`)]
data3_f_3_coverage_bprlabel <- c(rep('bpr', length(data3_f_3_coverage_bpr)))
data3_f_3_coverage_slim <- data3_slim$`test/gender_f/coverage_at_3`[!is.na(data3_slim$`test/gender_f/coverage_at_3`)]
data3_f_3_coverage_slimlabel <- c(rep('slim', length(data3_f_3_coverage_slim)))
data3_f_3_coverage_vae <- data3_vae$`test/gender_f/coverage_at_3`[!is.na(data3_vae$`test/gender_f/coverage_at_3`)]
data3_f_3_coverage_vaelabel <- c(rep('vae', length(data3_f_3_coverage_vae)))
data_4_f_3_coverage_pop <- data_4_pop$`test/gender_f/coverage_at_3`[!is.na(data_4_pop$`test/gender_f/coverage_at_3`)]
data_4_f_3_coverage_poplabel <- c(rep('pop', length(data_4_f_3_coverage_pop)))
data_4_f_3_coverage_itemknn <- data_4_itemknn$`test/gender_f/coverage_at_3`[!is.na(data_4_itemknn$`test/gender_f/coverage_at_3`)]
data_4_f_3_coverage_itemknnlabel <- c(rep('itemknn', length(data_4_f_3_coverage_itemknn)))
data_4_f_3_coverage_als <- data_4_als$`test/gender_f/coverage_at_3`[!is.na(data_4_als$`test/gender_f/coverage_at_3`)]
data_4_f_3_coverage_alslabel <- c(rep('als', length(data_4_f_3_coverage_als)))
data_4_f_3_coverage_bpr <- data_4_bpr$`test/gender_f/coverage_at_3`[!is.na(data_4_bpr$`test/gender_f/coverage_at_3`)]
data_4_f_3_coverage_bprlabel <- c(rep('bpr', length(data_4_f_3_coverage_bpr)))
data_4_f_3_coverage_slim <- data_4_slim$`test/gender_f/coverage_at_3`[!is.na(data_4_slim$`test/gender_f/coverage_at_3`)]
data_4_f_3_coverage_slimlabel <- c(rep('slim', length(data_4_f_3_coverage_slim)))
data_4_f_3_coverage_vae <- data_4_vae$`test/gender_f/coverage_at_3`[!is.na(data_4_vae$`test/gender_f/coverage_at_3`)]
data_4_f_3_coverage_vaelabel <- c(rep('vae', length(data_4_f_3_coverage_vae)))
# combine data
coverage_3F <- c(data_0_f_3_coverage_pop, data_0_f_3_coverage_itemknn, data_0_f_3_coverage_als, data_0_f_3_coverage_bpr, data_0_f_3_coverage_slim, data_0_f_3_coverage_vae, data_1_f_3_coverage_pop, data_1_f_3_coverage_itemknn, data_1_f_3_coverage_als, data_1_f_3_coverage_bpr, data_1_f_3_coverage_slim, data_1_f_3_coverage_vae, data_2_f_3_coverage_pop, data_2_f_3_coverage_itemknn, data_2_f_3_coverage_als, data_2_f_3_coverage_bpr, data_2_f_3_coverage_slim, data_2_f_3_coverage_vae, data3_f_3_coverage_pop, data3_f_3_coverage_itemknn, data3_f_3_coverage_als, data3_f_3_coverage_bpr, data3_f_3_coverage_slim, data3_f_3_coverage_vae, data_4_f_3_coverage_pop, data_4_f_3_coverage_itemknn, data_4_f_3_coverage_als, data_4_f_3_coverage_bpr, data_4_f_3_coverage_slim, data_4_f_3_coverage_vae)
coverage_3labelF <- c(data_0_f_3_coverage_poplabel, data_0_f_3_coverage_itemknnlabel, data_0_f_3_coverage_alslabel, data_0_f_3_coverage_bprlabel, data_0_f_3_coverage_slimlabel, data_0_f_3_coverage_vaelabel, data_1_f_3_coverage_poplabel, data_1_f_3_coverage_itemknnlabel, data_1_f_3_coverage_alslabel, data_1_f_3_coverage_bprlabel, data_1_f_3_coverage_slimlabel, data_1_f_3_coverage_vaelabel, data_2_f_3_coverage_poplabel, data_2_f_3_coverage_itemknnlabel, data_2_f_3_coverage_alslabel, data_2_f_3_coverage_bprlabel, data_2_f_3_coverage_slimlabel, data_2_f_3_coverage_vaelabel, data3_f_3_coverage_poplabel, data3_f_3_coverage_itemknnlabel, data3_f_3_coverage_alslabel, data3_f_3_coverage_bprlabel, data3_f_3_coverage_slimlabel, data3_f_3_coverage_vaelabel, data_4_f_3_coverage_poplabel, data_4_f_3_coverage_itemknnlabel, data_4_f_3_coverage_alslabel, data_4_f_3_coverage_bprlabel, data_4_f_3_coverage_slimlabel, data_4_f_3_coverage_vaelabel)
#
data_0_f_5_coverage_pop <- data_0_pop$`test/gender_f/coverage_at_5`[!is.na(data_0_pop$`test/gender_f/coverage_at_5`)]
data_0_f_5_coverage_poplabel <- c(rep('pop', length(data_0_f_5_coverage_pop)))
data_0_f_5_coverage_itemknn <- data_0_itemknn$`test/gender_f/coverage_at_5`[!is.na(data_0_itemknn$`test/gender_f/coverage_at_5`)]
data_0_f_5_coverage_itemknnlabel <- c(rep('itemknn', length(data_0_f_5_coverage_itemknn)))
data_0_f_5_coverage_als <- data_0_als$`test/gender_f/coverage_at_5`[!is.na(data_0_als$`test/gender_f/coverage_at_5`)]
data_0_f_5_coverage_alslabel <- c(rep('als', length(data_0_f_5_coverage_als)))
data_0_f_5_coverage_bpr <- data_0_bpr$`test/gender_f/coverage_at_5`[!is.na(data_0_bpr$`test/gender_f/coverage_at_5`)]
data_0_f_5_coverage_bprlabel <- c(rep('bpr', length(data_0_f_5_coverage_bpr)))
data_0_f_5_coverage_slim <- data_0_slim$`test/gender_f/coverage_at_5`[!is.na(data_0_slim$`test/gender_f/coverage_at_5`)]
data_0_f_5_coverage_slimlabel <- c(rep('slim', length(data_0_f_5_coverage_slim)))
data_0_f_5_coverage_vae <- data_0_vae$`test/gender_f/coverage_at_5`[!is.na(data_0_vae$`test/gender_f/coverage_at_5`)]
data_0_f_5_coverage_vaelabel <- c(rep('vae', length(data_0_f_5_coverage_vae)))
data_1_f_5_coverage_pop <- data_1_pop$`test/gender_f/coverage_at_5`[!is.na(data_1_pop$`test/gender_f/coverage_at_5`)]
data_1_f_5_coverage_poplabel <- c(rep('pop', length(data_1_f_5_coverage_pop)))
data_1_f_5_coverage_itemknn <- data_1_itemknn$`test/gender_f/coverage_at_5`[!is.na(data_1_itemknn$`test/gender_f/coverage_at_5`)]
data_1_f_5_coverage_itemknnlabel <- c(rep('itemknn', length(data_1_f_5_coverage_itemknn)))
data_1_f_5_coverage_als <- data_1_als$`test/gender_f/coverage_at_5`[!is.na(data_1_als$`test/gender_f/coverage_at_5`)]
data_1_f_5_coverage_alslabel <- c(rep('als', length(data_1_f_5_coverage_als)))
data_1_f_5_coverage_bpr <- data_1_bpr$`test/gender_f/coverage_at_5`[!is.na(data_1_bpr$`test/gender_f/coverage_at_5`)]
data_1_f_5_coverage_bprlabel <- c(rep('bpr', length(data_1_f_5_coverage_bpr)))
data_1_f_5_coverage_slim <- data_1_slim$`test/gender_f/coverage_at_5`[!is.na(data_1_slim$`test/gender_f/coverage_at_5`)]
data_1_f_5_coverage_slimlabel <- c(rep('slim', length(data_1_f_5_coverage_slim)))
data_1_f_5_coverage_vae <- data_1_vae$`test/gender_f/coverage_at_5`[!is.na(data_1_vae$`test/gender_f/coverage_at_5`)]
data_1_f_5_coverage_vaelabel <- c(rep('vae', length(data_1_f_5_coverage_vae)))
data_2_f_5_coverage_pop <- data_2_pop$`test/gender_f/coverage_at_5`[!is.na(data_2_pop$`test/gender_f/coverage_at_5`)]
data_2_f_5_coverage_poplabel <- c(rep('pop', length(data_2_f_5_coverage_pop)))
data_2_f_5_coverage_itemknn <- data_2_itemknn$`test/gender_f/coverage_at_5`[!is.na(data_2_itemknn$`test/gender_f/coverage_at_5`)]
data_2_f_5_coverage_itemknnlabel <- c(rep('itemknn', length(data_2_f_5_coverage_itemknn)))
data_2_f_5_coverage_als <- data_2_als$`test/gender_f/coverage_at_5`[!is.na(data_2_als$`test/gender_f/coverage_at_5`)]
data_2_f_5_coverage_alslabel <- c(rep('als', length(data_2_f_5_coverage_als)))
data_2_f_5_coverage_bpr <- data_2_bpr$`test/gender_f/coverage_at_5`[!is.na(data_2_bpr$`test/gender_f/coverage_at_5`)]
data_2_f_5_coverage_bprlabel <- c(rep('bpr', length(data_2_f_5_coverage_bpr)))
data_2_f_5_coverage_slim <- data_2_slim$`test/gender_f/coverage_at_5`[!is.na(data_2_slim$`test/gender_f/coverage_at_5`)]
data_2_f_5_coverage_slimlabel <- c(rep('slim', length(data_2_f_5_coverage_slim)))
data_2_f_5_coverage_vae <- data_2_vae$`test/gender_f/coverage_at_5`[!is.na(data_2_vae$`test/gender_f/coverage_at_5`)]
data_2_f_5_coverage_vaelabel <- c(rep('vae', length(data_2_f_5_coverage_vae)))
data3_f_5_coverage_pop <- data3_pop$`test/gender_f/coverage_at_5`[!is.na(data3_pop$`test/gender_f/coverage_at_5`)]
data3_f_5_coverage_poplabel <- c(rep('pop', length(data3_f_5_coverage_pop)))
data3_f_5_coverage_itemknn <- data3_itemknn$`test/gender_f/coverage_at_5`[!is.na(data3_itemknn$`test/gender_f/coverage_at_5`)]
data3_f_5_coverage_itemknnlabel <- c(rep('itemknn', length(data3_f_5_coverage_itemknn)))
data3_f_5_coverage_als <- data3_als$`test/gender_f/coverage_at_5`[!is.na(data3_als$`test/gender_f/coverage_at_5`)]
data3_f_5_coverage_alslabel <- c(rep('als', length(data3_f_5_coverage_als)))
data3_f_5_coverage_bpr <- data3_bpr$`test/gender_f/coverage_at_5`[!is.na(data3_bpr$`test/gender_f/coverage_at_5`)]
data3_f_5_coverage_bprlabel <- c(rep('bpr', length(data3_f_5_coverage_bpr)))
data3_f_5_coverage_slim <- data3_slim$`test/gender_f/coverage_at_5`[!is.na(data3_slim$`test/gender_f/coverage_at_5`)]
data3_f_5_coverage_slimlabel <- c(rep('slim', length(data3_f_5_coverage_slim)))
data3_f_5_coverage_vae <- data3_vae$`test/gender_f/coverage_at_5`[!is.na(data3_vae$`test/gender_f/coverage_at_5`)]
data3_f_5_coverage_vaelabel <- c(rep('vae', length(data3_f_5_coverage_vae)))
data_4_f_5_coverage_pop <- data_4_pop$`test/gender_f/coverage_at_5`[!is.na(data_4_pop$`test/gender_f/coverage_at_5`)]
data_4_f_5_coverage_poplabel <- c(rep('pop', length(data_4_f_5_coverage_pop)))
data_4_f_5_coverage_itemknn <- data_4_itemknn$`test/gender_f/coverage_at_5`[!is.na(data_4_itemknn$`test/gender_f/coverage_at_5`)]
data_4_f_5_coverage_itemknnlabel <- c(rep('itemknn', length(data_4_f_5_coverage_itemknn)))
data_4_f_5_coverage_als <- data_4_als$`test/gender_f/coverage_at_5`[!is.na(data_4_als$`test/gender_f/coverage_at_5`)]
data_4_f_5_coverage_alslabel <- c(rep('als', length(data_4_f_5_coverage_als)))
data_4_f_5_coverage_bpr <- data_4_bpr$`test/gender_f/coverage_at_5`[!is.na(data_4_bpr$`test/gender_f/coverage_at_5`)]
data_4_f_5_coverage_bprlabel <- c(rep('bpr', length(data_4_f_5_coverage_bpr)))
data_4_f_5_coverage_slim <- data_4_slim$`test/gender_f/coverage_at_5`[!is.na(data_4_slim$`test/gender_f/coverage_at_5`)]
data_4_f_5_coverage_slimlabel <- c(rep('slim', length(data_4_f_5_coverage_slim)))
data_4_f_5_coverage_vae <- data_4_vae$`test/gender_f/coverage_at_5`[!is.na(data_4_vae$`test/gender_f/coverage_at_5`)]
data_4_f_5_coverage_vaelabel <- c(rep('vae', length(data_4_f_5_coverage_vae)))
# combine data
coverage_5F <- c(data_0_f_5_coverage_pop, data_0_f_5_coverage_itemknn, data_0_f_5_coverage_als, data_0_f_5_coverage_bpr, data_0_f_5_coverage_slim, data_0_f_5_coverage_vae, data_1_f_5_coverage_pop, data_1_f_5_coverage_itemknn, data_1_f_5_coverage_als, data_1_f_5_coverage_bpr, data_1_f_5_coverage_slim, data_1_f_5_coverage_vae, data_2_f_5_coverage_pop, data_2_f_5_coverage_itemknn, data_2_f_5_coverage_als, data_2_f_5_coverage_bpr, data_2_f_5_coverage_slim, data_2_f_5_coverage_vae, data3_f_5_coverage_pop, data3_f_5_coverage_itemknn, data3_f_5_coverage_als, data3_f_5_coverage_bpr, data3_f_5_coverage_slim, data3_f_5_coverage_vae, data_4_f_5_coverage_pop, data_4_f_5_coverage_itemknn, data_4_f_5_coverage_als, data_4_f_5_coverage_bpr, data_4_f_5_coverage_slim, data_4_f_5_coverage_vae)
coverage_5labelF <- c(data_0_f_5_coverage_poplabel, data_0_f_5_coverage_itemknnlabel, data_0_f_5_coverage_alslabel, data_0_f_5_coverage_bprlabel, data_0_f_5_coverage_slimlabel, data_0_f_5_coverage_vaelabel, data_1_f_5_coverage_poplabel, data_1_f_5_coverage_itemknnlabel, data_1_f_5_coverage_alslabel, data_1_f_5_coverage_bprlabel, data_1_f_5_coverage_slimlabel, data_1_f_5_coverage_vaelabel, data_2_f_5_coverage_poplabel, data_2_f_5_coverage_itemknnlabel, data_2_f_5_coverage_alslabel, data_2_f_5_coverage_bprlabel, data_2_f_5_coverage_slimlabel, data_2_f_5_coverage_vaelabel, data3_f_5_coverage_poplabel, data3_f_5_coverage_itemknnlabel, data3_f_5_coverage_alslabel, data3_f_5_coverage_bprlabel, data3_f_5_coverage_slimlabel, data3_f_5_coverage_vaelabel, data_4_f_5_coverage_poplabel, data_4_f_5_coverage_itemknnlabel, data_4_f_5_coverage_alslabel, data_4_f_5_coverage_bprlabel, data_4_f_5_coverage_slimlabel, data_4_f_5_coverage_vaelabel)
#
data_0_f_10_coverage_pop <- data_0_pop$`test/gender_f/coverage_at_10`[!is.na(data_0_pop$`test/gender_f/coverage_at_10`)]
data_0_f_10_coverage_poplabel <- c(rep('pop', length(data_0_f_10_coverage_pop)))
data_0_f_10_coverage_itemknn <- data_0_itemknn$`test/gender_f/coverage_at_10`[!is.na(data_0_itemknn$`test/gender_f/coverage_at_10`)]
data_0_f_10_coverage_itemknnlabel <- c(rep('itemknn', length(data_0_f_10_coverage_itemknn)))
data_0_f_10_coverage_als <- data_0_als$`test/gender_f/coverage_at_10`[!is.na(data_0_als$`test/gender_f/coverage_at_10`)]
data_0_f_10_coverage_alslabel <- c(rep('als', length(data_0_f_10_coverage_als)))
data_0_f_10_coverage_bpr <- data_0_bpr$`test/gender_f/coverage_at_10`[!is.na(data_0_bpr$`test/gender_f/coverage_at_10`)]
data_0_f_10_coverage_bprlabel <- c(rep('bpr', length(data_0_f_10_coverage_bpr)))
data_0_f_10_coverage_slim <- data_0_slim$`test/gender_f/coverage_at_10`[!is.na(data_0_slim$`test/gender_f/coverage_at_10`)]
data_0_f_10_coverage_slimlabel <- c(rep('slim', length(data_0_f_10_coverage_slim)))
data_0_f_10_coverage_vae <- data_0_vae$`test/gender_f/coverage_at_10`[!is.na(data_0_vae$`test/gender_f/coverage_at_10`)]
data_0_f_10_coverage_vaelabel <- c(rep('vae', length(data_0_f_10_coverage_vae)))
data_1_f_10_coverage_pop <- data_1_pop$`test/gender_f/coverage_at_10`[!is.na(data_1_pop$`test/gender_f/coverage_at_10`)]
data_1_f_10_coverage_poplabel <- c(rep('pop', length(data_1_f_10_coverage_pop)))
data_1_f_10_coverage_itemknn <- data_1_itemknn$`test/gender_f/coverage_at_10`[!is.na(data_1_itemknn$`test/gender_f/coverage_at_10`)]
data_1_f_10_coverage_itemknnlabel <- c(rep('itemknn', length(data_1_f_10_coverage_itemknn)))
data_1_f_10_coverage_als <- data_1_als$`test/gender_f/coverage_at_10`[!is.na(data_1_als$`test/gender_f/coverage_at_10`)]
data_1_f_10_coverage_alslabel <- c(rep('als', length(data_1_f_10_coverage_als)))
data_1_f_10_coverage_bpr <- data_1_bpr$`test/gender_f/coverage_at_10`[!is.na(data_1_bpr$`test/gender_f/coverage_at_10`)]
data_1_f_10_coverage_bprlabel <- c(rep('bpr', length(data_1_f_10_coverage_bpr)))
data_1_f_10_coverage_slim <- data_1_slim$`test/gender_f/coverage_at_10`[!is.na(data_1_slim$`test/gender_f/coverage_at_10`)]
data_1_f_10_coverage_slimlabel <- c(rep('slim', length(data_1_f_10_coverage_slim)))
data_1_f_10_coverage_vae <- data_1_vae$`test/gender_f/coverage_at_10`[!is.na(data_1_vae$`test/gender_f/coverage_at_10`)]
data_1_f_10_coverage_vaelabel <- c(rep('vae', length(data_1_f_10_coverage_vae)))
data_2_f_10_coverage_pop <- data_2_pop$`test/gender_f/coverage_at_10`[!is.na(data_2_pop$`test/gender_f/coverage_at_10`)]
data_2_f_10_coverage_poplabel <- c(rep('pop', length(data_2_f_10_coverage_pop)))
data_2_f_10_coverage_itemknn <- data_2_itemknn$`test/gender_f/coverage_at_10`[!is.na(data_2_itemknn$`test/gender_f/coverage_at_10`)]
data_2_f_10_coverage_itemknnlabel <- c(rep('itemknn', length(data_2_f_10_coverage_itemknn)))
data_2_f_10_coverage_als <- data_2_als$`test/gender_f/coverage_at_10`[!is.na(data_2_als$`test/gender_f/coverage_at_10`)]
data_2_f_10_coverage_alslabel <- c(rep('als', length(data_2_f_10_coverage_als)))
data_2_f_10_coverage_bpr <- data_2_bpr$`test/gender_f/coverage_at_10`[!is.na(data_2_bpr$`test/gender_f/coverage_at_10`)]
data_2_f_10_coverage_bprlabel <- c(rep('bpr', length(data_2_f_10_coverage_bpr)))
data_2_f_10_coverage_slim <- data_2_slim$`test/gender_f/coverage_at_10`[!is.na(data_2_slim$`test/gender_f/coverage_at_10`)]
data_2_f_10_coverage_slimlabel <- c(rep('slim', length(data_2_f_10_coverage_slim)))
data_2_f_10_coverage_vae <- data_2_vae$`test/gender_f/coverage_at_10`[!is.na(data_2_vae$`test/gender_f/coverage_at_10`)]
data_2_f_10_coverage_vaelabel <- c(rep('vae', length(data_2_f_10_coverage_vae)))
data3_f_10_coverage_pop <- data3_pop$`test/gender_f/coverage_at_10`[!is.na(data3_pop$`test/gender_f/coverage_at_10`)]
data3_f_10_coverage_poplabel <- c(rep('pop', length(data3_f_10_coverage_pop)))
data3_f_10_coverage_itemknn <- data3_itemknn$`test/gender_f/coverage_at_10`[!is.na(data3_itemknn$`test/gender_f/coverage_at_10`)]
data3_f_10_coverage_itemknnlabel <- c(rep('itemknn', length(data3_f_10_coverage_itemknn)))
data3_f_10_coverage_als <- data3_als$`test/gender_f/coverage_at_10`[!is.na(data3_als$`test/gender_f/coverage_at_10`)]
data3_f_10_coverage_alslabel <- c(rep('als', length(data3_f_10_coverage_als)))
data3_f_10_coverage_bpr <- data3_bpr$`test/gender_f/coverage_at_10`[!is.na(data3_bpr$`test/gender_f/coverage_at_10`)]
data3_f_10_coverage_bprlabel <- c(rep('bpr', length(data3_f_10_coverage_bpr)))
data3_f_10_coverage_slim <- data3_slim$`test/gender_f/coverage_at_10`[!is.na(data3_slim$`test/gender_f/coverage_at_10`)]
data3_f_10_coverage_slimlabel <- c(rep('slim', length(data3_f_10_coverage_slim)))
data3_f_10_coverage_vae <- data3_vae$`test/gender_f/coverage_at_10`[!is.na(data3_vae$`test/gender_f/coverage_at_10`)]
data3_f_10_coverage_vaelabel <- c(rep('vae', length(data3_f_10_coverage_vae)))
data_4_f_10_coverage_pop <- data_4_pop$`test/gender_f/coverage_at_10`[!is.na(data_4_pop$`test/gender_f/coverage_at_10`)]
data_4_f_10_coverage_poplabel <- c(rep('pop', length(data_4_f_10_coverage_pop)))
data_4_f_10_coverage_itemknn <- data_4_itemknn$`test/gender_f/coverage_at_10`[!is.na(data_4_itemknn$`test/gender_f/coverage_at_10`)]
data_4_f_10_coverage_itemknnlabel <- c(rep('itemknn', length(data_4_f_10_coverage_itemknn)))
data_4_f_10_coverage_als <- data_4_als$`test/gender_f/coverage_at_10`[!is.na(data_4_als$`test/gender_f/coverage_at_10`)]
data_4_f_10_coverage_alslabel <- c(rep('als', length(data_4_f_10_coverage_als)))
data_4_f_10_coverage_bpr <- data_4_bpr$`test/gender_f/coverage_at_10`[!is.na(data_4_bpr$`test/gender_f/coverage_at_10`)]
data_4_f_10_coverage_bprlabel <- c(rep('bpr', length(data_4_f_10_coverage_bpr)))
data_4_f_10_coverage_slim <- data_4_slim$`test/gender_f/coverage_at_10`[!is.na(data_4_slim$`test/gender_f/coverage_at_10`)]
data_4_f_10_coverage_slimlabel <- c(rep('slim', length(data_4_f_10_coverage_slim)))
data_4_f_10_coverage_vae <- data_4_vae$`test/gender_f/coverage_at_10`[!is.na(data_4_vae$`test/gender_f/coverage_at_10`)]
data_4_f_10_coverage_vaelabel <- c(rep('vae', length(data_4_f_10_coverage_vae)))
# combine data
coverage_10F <- c(data_0_f_10_coverage_pop, data_0_f_10_coverage_itemknn, data_0_f_10_coverage_als, data_0_f_10_coverage_bpr, data_0_f_10_coverage_slim, data_0_f_10_coverage_vae, data_1_f_10_coverage_pop, data_1_f_10_coverage_itemknn, data_1_f_10_coverage_als, data_1_f_10_coverage_bpr, data_1_f_10_coverage_slim, data_1_f_10_coverage_vae, data_2_f_10_coverage_pop, data_2_f_10_coverage_itemknn, data_2_f_10_coverage_als, data_2_f_10_coverage_bpr, data_2_f_10_coverage_slim, data_2_f_10_coverage_vae, data3_f_10_coverage_pop, data3_f_10_coverage_itemknn, data3_f_10_coverage_als, data3_f_10_coverage_bpr, data3_f_10_coverage_slim, data3_f_10_coverage_vae, data_4_f_10_coverage_pop, data_4_f_10_coverage_itemknn, data_4_f_10_coverage_als, data_4_f_10_coverage_bpr, data_4_f_10_coverage_slim, data_4_f_10_coverage_vae)
coverage_10labelF <- c(data_0_f_10_coverage_poplabel, data_0_f_10_coverage_itemknnlabel, data_0_f_10_coverage_alslabel, data_0_f_10_coverage_bprlabel, data_0_f_10_coverage_slimlabel, data_0_f_10_coverage_vaelabel, data_1_f_10_coverage_poplabel, data_1_f_10_coverage_itemknnlabel, data_1_f_10_coverage_alslabel, data_1_f_10_coverage_bprlabel, data_1_f_10_coverage_slimlabel, data_1_f_10_coverage_vaelabel, data_2_f_10_coverage_poplabel, data_2_f_10_coverage_itemknnlabel, data_2_f_10_coverage_alslabel, data_2_f_10_coverage_bprlabel, data_2_f_10_coverage_slimlabel, data_2_f_10_coverage_vaelabel, data3_f_10_coverage_poplabel, data3_f_10_coverage_itemknnlabel, data3_f_10_coverage_alslabel, data3_f_10_coverage_bprlabel, data3_f_10_coverage_slimlabel, data3_f_10_coverage_vaelabel, data_4_f_10_coverage_poplabel, data_4_f_10_coverage_itemknnlabel, data_4_f_10_coverage_alslabel, data_4_f_10_coverage_bprlabel, data_4_f_10_coverage_slimlabel, data_4_f_10_coverage_vaelabel)
#
data_0_f_20_coverage_pop <- data_0_pop$`test/gender_f/coverage_at_20`[!is.na(data_0_pop$`test/gender_f/coverage_at_20`)]
data_0_f_20_coverage_poplabel <- c(rep('pop', length(data_0_f_20_coverage_pop)))
data_0_f_20_coverage_itemknn <- data_0_itemknn$`test/gender_f/coverage_at_20`[!is.na(data_0_itemknn$`test/gender_f/coverage_at_20`)]
data_0_f_20_coverage_itemknnlabel <- c(rep('itemknn', length(data_0_f_20_coverage_itemknn)))
data_0_f_20_coverage_als <- data_0_als$`test/gender_f/coverage_at_20`[!is.na(data_0_als$`test/gender_f/coverage_at_20`)]
data_0_f_20_coverage_alslabel <- c(rep('als', length(data_0_f_20_coverage_als)))
data_0_f_20_coverage_bpr <- data_0_bpr$`test/gender_f/coverage_at_20`[!is.na(data_0_bpr$`test/gender_f/coverage_at_20`)]
data_0_f_20_coverage_bprlabel <- c(rep('bpr', length(data_0_f_20_coverage_bpr)))
data_0_f_20_coverage_slim <- data_0_slim$`test/gender_f/coverage_at_20`[!is.na(data_0_slim$`test/gender_f/coverage_at_20`)]
data_0_f_20_coverage_slimlabel <- c(rep('slim', length(data_0_f_20_coverage_slim)))
data_0_f_20_coverage_vae <- data_0_vae$`test/gender_f/coverage_at_20`[!is.na(data_0_vae$`test/gender_f/coverage_at_20`)]
data_0_f_20_coverage_vaelabel <- c(rep('vae', length(data_0_f_20_coverage_vae)))
data_1_f_20_coverage_pop <- data_1_pop$`test/gender_f/coverage_at_20`[!is.na(data_1_pop$`test/gender_f/coverage_at_20`)]
data_1_f_20_coverage_poplabel <- c(rep('pop', length(data_1_f_20_coverage_pop)))
data_1_f_20_coverage_itemknn <- data_1_itemknn$`test/gender_f/coverage_at_20`[!is.na(data_1_itemknn$`test/gender_f/coverage_at_20`)]
data_1_f_20_coverage_itemknnlabel <- c(rep('itemknn', length(data_1_f_20_coverage_itemknn)))
data_1_f_20_coverage_als <- data_1_als$`test/gender_f/coverage_at_20`[!is.na(data_1_als$`test/gender_f/coverage_at_20`)]
data_1_f_20_coverage_alslabel <- c(rep('als', length(data_1_f_20_coverage_als)))
data_1_f_20_coverage_bpr <- data_1_bpr$`test/gender_f/coverage_at_20`[!is.na(data_1_bpr$`test/gender_f/coverage_at_20`)]
data_1_f_20_coverage_bprlabel <- c(rep('bpr', length(data_1_f_20_coverage_bpr)))
data_1_f_20_coverage_slim <- data_1_slim$`test/gender_f/coverage_at_20`[!is.na(data_1_slim$`test/gender_f/coverage_at_20`)]
data_1_f_20_coverage_slimlabel <- c(rep('slim', length(data_1_f_20_coverage_slim)))
data_1_f_20_coverage_vae <- data_1_vae$`test/gender_f/coverage_at_20`[!is.na(data_1_vae$`test/gender_f/coverage_at_20`)]
data_1_f_20_coverage_vaelabel <- c(rep('vae', length(data_1_f_20_coverage_vae)))
data_2_f_20_coverage_pop <- data_2_pop$`test/gender_f/coverage_at_20`[!is.na(data_2_pop$`test/gender_f/coverage_at_20`)]
data_2_f_20_coverage_poplabel <- c(rep('pop', length(data_2_f_20_coverage_pop)))
data_2_f_20_coverage_itemknn <- data_2_itemknn$`test/gender_f/coverage_at_20`[!is.na(data_2_itemknn$`test/gender_f/coverage_at_20`)]
data_2_f_20_coverage_itemknnlabel <- c(rep('itemknn', length(data_2_f_20_coverage_itemknn)))
data_2_f_20_coverage_als <- data_2_als$`test/gender_f/coverage_at_20`[!is.na(data_2_als$`test/gender_f/coverage_at_20`)]
data_2_f_20_coverage_alslabel <- c(rep('als', length(data_2_f_20_coverage_als)))
data_2_f_20_coverage_bpr <- data_2_bpr$`test/gender_f/coverage_at_20`[!is.na(data_2_bpr$`test/gender_f/coverage_at_20`)]
data_2_f_20_coverage_bprlabel <- c(rep('bpr', length(data_2_f_20_coverage_bpr)))
data_2_f_20_coverage_slim <- data_2_slim$`test/gender_f/coverage_at_20`[!is.na(data_2_slim$`test/gender_f/coverage_at_20`)]
data_2_f_20_coverage_slimlabel <- c(rep('slim', length(data_2_f_20_coverage_slim)))
data_2_f_20_coverage_vae <- data_2_vae$`test/gender_f/coverage_at_20`[!is.na(data_2_vae$`test/gender_f/coverage_at_20`)]
data_2_f_20_coverage_vaelabel <- c(rep('vae', length(data_2_f_20_coverage_vae)))
data3_f_20_coverage_pop <- data3_pop$`test/gender_f/coverage_at_20`[!is.na(data3_pop$`test/gender_f/coverage_at_20`)]
data3_f_20_coverage_poplabel <- c(rep('pop', length(data3_f_20_coverage_pop)))
data3_f_20_coverage_itemknn <- data3_itemknn$`test/gender_f/coverage_at_20`[!is.na(data3_itemknn$`test/gender_f/coverage_at_20`)]
data3_f_20_coverage_itemknnlabel <- c(rep('itemknn', length(data3_f_20_coverage_itemknn)))
data3_f_20_coverage_als <- data3_als$`test/gender_f/coverage_at_20`[!is.na(data3_als$`test/gender_f/coverage_at_20`)]
data3_f_20_coverage_alslabel <- c(rep('als', length(data3_f_20_coverage_als)))
data3_f_20_coverage_bpr <- data3_bpr$`test/gender_f/coverage_at_20`[!is.na(data3_bpr$`test/gender_f/coverage_at_20`)]
data3_f_20_coverage_bprlabel <- c(rep('bpr', length(data3_f_20_coverage_bpr)))
data3_f_20_coverage_slim <- data3_slim$`test/gender_f/coverage_at_20`[!is.na(data3_slim$`test/gender_f/coverage_at_20`)]
data3_f_20_coverage_slimlabel <- c(rep('slim', length(data3_f_20_coverage_slim)))
data3_f_20_coverage_vae <- data3_vae$`test/gender_f/coverage_at_20`[!is.na(data3_vae$`test/gender_f/coverage_at_20`)]
data3_f_20_coverage_vaelabel <- c(rep('vae', length(data3_f_20_coverage_vae)))
data_4_f_20_coverage_pop <- data_4_pop$`test/gender_f/coverage_at_20`[!is.na(data_4_pop$`test/gender_f/coverage_at_20`)]
data_4_f_20_coverage_poplabel <- c(rep('pop', length(data_4_f_20_coverage_pop)))
data_4_f_20_coverage_itemknn <- data_4_itemknn$`test/gender_f/coverage_at_20`[!is.na(data_4_itemknn$`test/gender_f/coverage_at_20`)]
data_4_f_20_coverage_itemknnlabel <- c(rep('itemknn', length(data_4_f_20_coverage_itemknn)))
data_4_f_20_coverage_als <- data_4_als$`test/gender_f/coverage_at_20`[!is.na(data_4_als$`test/gender_f/coverage_at_20`)]
data_4_f_20_coverage_alslabel <- c(rep('als', length(data_4_f_20_coverage_als)))
data_4_f_20_coverage_bpr <- data_4_bpr$`test/gender_f/coverage_at_20`[!is.na(data_4_bpr$`test/gender_f/coverage_at_20`)]
data_4_f_20_coverage_bprlabel <- c(rep('bpr', length(data_4_f_20_coverage_bpr)))
data_4_f_20_coverage_slim <- data_4_slim$`test/gender_f/coverage_at_20`[!is.na(data_4_slim$`test/gender_f/coverage_at_20`)]
data_4_f_20_coverage_slimlabel <- c(rep('slim', length(data_4_f_20_coverage_slim)))
data_4_f_20_coverage_vae <- data_4_vae$`test/gender_f/coverage_at_20`[!is.na(data_4_vae$`test/gender_f/coverage_at_20`)]
data_4_f_20_coverage_vaelabel <- c(rep('vae', length(data_4_f_20_coverage_vae)))
# combine data
coverage_20F <- c(data_0_f_20_coverage_pop, data_0_f_20_coverage_itemknn, data_0_f_20_coverage_als, data_0_f_20_coverage_bpr, data_0_f_20_coverage_slim, data_0_f_20_coverage_vae, data_1_f_20_coverage_pop, data_1_f_20_coverage_itemknn, data_1_f_20_coverage_als, data_1_f_20_coverage_bpr, data_1_f_20_coverage_slim, data_1_f_20_coverage_vae, data_2_f_20_coverage_pop, data_2_f_20_coverage_itemknn, data_2_f_20_coverage_als, data_2_f_20_coverage_bpr, data_2_f_20_coverage_slim, data_2_f_20_coverage_vae, data3_f_20_coverage_pop, data3_f_20_coverage_itemknn, data3_f_20_coverage_als, data3_f_20_coverage_bpr, data3_f_20_coverage_slim, data3_f_20_coverage_vae, data_4_f_20_coverage_pop, data_4_f_20_coverage_itemknn, data_4_f_20_coverage_als, data_4_f_20_coverage_bpr, data_4_f_20_coverage_slim, data_4_f_20_coverage_vae)
coverage_20labelF <- c(data_0_f_20_coverage_poplabel, data_0_f_20_coverage_itemknnlabel, data_0_f_20_coverage_alslabel, data_0_f_20_coverage_bprlabel, data_0_f_20_coverage_slimlabel, data_0_f_20_coverage_vaelabel, data_1_f_20_coverage_poplabel, data_1_f_20_coverage_itemknnlabel, data_1_f_20_coverage_alslabel, data_1_f_20_coverage_bprlabel, data_1_f_20_coverage_slimlabel, data_1_f_20_coverage_vaelabel, data_2_f_20_coverage_poplabel, data_2_f_20_coverage_itemknnlabel, data_2_f_20_coverage_alslabel, data_2_f_20_coverage_bprlabel, data_2_f_20_coverage_slimlabel, data_2_f_20_coverage_vaelabel, data3_f_20_coverage_poplabel, data3_f_20_coverage_itemknnlabel, data3_f_20_coverage_alslabel, data3_f_20_coverage_bprlabel, data3_f_20_coverage_slimlabel, data3_f_20_coverage_vaelabel, data_4_f_20_coverage_poplabel, data_4_f_20_coverage_itemknnlabel, data_4_f_20_coverage_alslabel, data_4_f_20_coverage_bprlabel, data_4_f_20_coverage_slimlabel, data_4_f_20_coverage_vaelabel)
#
data_0_f_50_coverage_pop <- data_0_pop$`test/gender_f/coverage_at_50`[!is.na(data_0_pop$`test/gender_f/coverage_at_50`)]
data_0_f_50_coverage_poplabel <- c(rep('pop', length(data_0_f_50_coverage_pop)))
data_0_f_50_coverage_itemknn <- data_0_itemknn$`test/gender_f/coverage_at_50`[!is.na(data_0_itemknn$`test/gender_f/coverage_at_50`)]
data_0_f_50_coverage_itemknnlabel <- c(rep('itemknn', length(data_0_f_50_coverage_itemknn)))
data_0_f_50_coverage_als <- data_0_als$`test/gender_f/coverage_at_50`[!is.na(data_0_als$`test/gender_f/coverage_at_50`)]
data_0_f_50_coverage_alslabel <- c(rep('als', length(data_0_f_50_coverage_als)))
data_0_f_50_coverage_bpr <- data_0_bpr$`test/gender_f/coverage_at_50`[!is.na(data_0_bpr$`test/gender_f/coverage_at_50`)]
data_0_f_50_coverage_bprlabel <- c(rep('bpr', length(data_0_f_50_coverage_bpr)))
data_0_f_50_coverage_slim <- data_0_slim$`test/gender_f/coverage_at_50`[!is.na(data_0_slim$`test/gender_f/coverage_at_50`)]
data_0_f_50_coverage_slimlabel <- c(rep('slim', length(data_0_f_50_coverage_slim)))
data_0_f_50_coverage_vae <- data_0_vae$`test/gender_f/coverage_at_50`[!is.na(data_0_vae$`test/gender_f/coverage_at_50`)]
data_0_f_50_coverage_vaelabel <- c(rep('vae', length(data_0_f_50_coverage_vae)))
data_1_f_50_coverage_pop <- data_1_pop$`test/gender_f/coverage_at_50`[!is.na(data_1_pop$`test/gender_f/coverage_at_50`)]
data_1_f_50_coverage_poplabel <- c(rep('pop', length(data_1_f_50_coverage_pop)))
data_1_f_50_coverage_itemknn <- data_1_itemknn$`test/gender_f/coverage_at_50`[!is.na(data_1_itemknn$`test/gender_f/coverage_at_50`)]
data_1_f_50_coverage_itemknnlabel <- c(rep('itemknn', length(data_1_f_50_coverage_itemknn)))
data_1_f_50_coverage_als <- data_1_als$`test/gender_f/coverage_at_50`[!is.na(data_1_als$`test/gender_f/coverage_at_50`)]
data_1_f_50_coverage_alslabel <- c(rep('als', length(data_1_f_50_coverage_als)))
data_1_f_50_coverage_bpr <- data_1_bpr$`test/gender_f/coverage_at_50`[!is.na(data_1_bpr$`test/gender_f/coverage_at_50`)]
data_1_f_50_coverage_bprlabel <- c(rep('bpr', length(data_1_f_50_coverage_bpr)))
data_1_f_50_coverage_slim <- data_1_slim$`test/gender_f/coverage_at_50`[!is.na(data_1_slim$`test/gender_f/coverage_at_50`)]
data_1_f_50_coverage_slimlabel <- c(rep('slim', length(data_1_f_50_coverage_slim)))
data_1_f_50_coverage_vae <- data_1_vae$`test/gender_f/coverage_at_50`[!is.na(data_1_vae$`test/gender_f/coverage_at_50`)]
data_1_f_50_coverage_vaelabel <- c(rep('vae', length(data_1_f_50_coverage_vae)))
data_2_f_50_coverage_pop <- data_2_pop$`test/gender_f/coverage_at_50`[!is.na(data_2_pop$`test/gender_f/coverage_at_50`)]
data_2_f_50_coverage_poplabel <- c(rep('pop', length(data_2_f_50_coverage_pop)))
data_2_f_50_coverage_itemknn <- data_2_itemknn$`test/gender_f/coverage_at_50`[!is.na(data_2_itemknn$`test/gender_f/coverage_at_50`)]
data_2_f_50_coverage_itemknnlabel <- c(rep('itemknn', length(data_2_f_50_coverage_itemknn)))
data_2_f_50_coverage_als <- data_2_als$`test/gender_f/coverage_at_50`[!is.na(data_2_als$`test/gender_f/coverage_at_50`)]
data_2_f_50_coverage_alslabel <- c(rep('als', length(data_2_f_50_coverage_als)))
data_2_f_50_coverage_bpr <- data_2_bpr$`test/gender_f/coverage_at_50`[!is.na(data_2_bpr$`test/gender_f/coverage_at_50`)]
data_2_f_50_coverage_bprlabel <- c(rep('bpr', length(data_2_f_50_coverage_bpr)))
data_2_f_50_coverage_slim <- data_2_slim$`test/gender_f/coverage_at_50`[!is.na(data_2_slim$`test/gender_f/coverage_at_50`)]
data_2_f_50_coverage_slimlabel <- c(rep('slim', length(data_2_f_50_coverage_slim)))
data_2_f_50_coverage_vae <- data_2_vae$`test/gender_f/coverage_at_50`[!is.na(data_2_vae$`test/gender_f/coverage_at_50`)]
data_2_f_50_coverage_vaelabel <- c(rep('vae', length(data_2_f_50_coverage_vae)))
data3_f_50_coverage_pop <- data3_pop$`test/gender_f/coverage_at_50`[!is.na(data3_pop$`test/gender_f/coverage_at_50`)]
data3_f_50_coverage_poplabel <- c(rep('pop', length(data3_f_50_coverage_pop)))
data3_f_50_coverage_itemknn <- data3_itemknn$`test/gender_f/coverage_at_50`[!is.na(data3_itemknn$`test/gender_f/coverage_at_50`)]
data3_f_50_coverage_itemknnlabel <- c(rep('itemknn', length(data3_f_50_coverage_itemknn)))
data3_f_50_coverage_als <- data3_als$`test/gender_f/coverage_at_50`[!is.na(data3_als$`test/gender_f/coverage_at_50`)]
data3_f_50_coverage_alslabel <- c(rep('als', length(data3_f_50_coverage_als)))
data3_f_50_coverage_bpr <- data3_bpr$`test/gender_f/coverage_at_50`[!is.na(data3_bpr$`test/gender_f/coverage_at_50`)]
data3_f_50_coverage_bprlabel <- c(rep('bpr', length(data3_f_50_coverage_bpr)))
data3_f_50_coverage_slim <- data3_slim$`test/gender_f/coverage_at_50`[!is.na(data3_slim$`test/gender_f/coverage_at_50`)]
data3_f_50_coverage_slimlabel <- c(rep('slim', length(data3_f_50_coverage_slim)))
data3_f_50_coverage_vae <- data3_vae$`test/gender_f/coverage_at_50`[!is.na(data3_vae$`test/gender_f/coverage_at_50`)]
data3_f_50_coverage_vaelabel <- c(rep('vae', length(data3_f_50_coverage_vae)))
data_4_f_50_coverage_pop <- data_4_pop$`test/gender_f/coverage_at_50`[!is.na(data_4_pop$`test/gender_f/coverage_at_50`)]
data_4_f_50_coverage_poplabel <- c(rep('pop', length(data_4_f_50_coverage_pop)))
data_4_f_50_coverage_itemknn <- data_4_itemknn$`test/gender_f/coverage_at_50`[!is.na(data_4_itemknn$`test/gender_f/coverage_at_50`)]
data_4_f_50_coverage_itemknnlabel <- c(rep('itemknn', length(data_4_f_50_coverage_itemknn)))
data_4_f_50_coverage_als <- data_4_als$`test/gender_f/coverage_at_50`[!is.na(data_4_als$`test/gender_f/coverage_at_50`)]
data_4_f_50_coverage_alslabel <- c(rep('als', length(data_4_f_50_coverage_als)))
data_4_f_50_coverage_bpr <- data_4_bpr$`test/gender_f/coverage_at_50`[!is.na(data_4_bpr$`test/gender_f/coverage_at_50`)]
data_4_f_50_coverage_bprlabel <- c(rep('bpr', length(data_4_f_50_coverage_bpr)))
data_4_f_50_coverage_slim <- data_4_slim$`test/gender_f/coverage_at_50`[!is.na(data_4_slim$`test/gender_f/coverage_at_50`)]
data_4_f_50_coverage_slimlabel <- c(rep('slim', length(data_4_f_50_coverage_slim)))
data_4_f_50_coverage_vae <- data_4_vae$`test/gender_f/coverage_at_50`[!is.na(data_4_vae$`test/gender_f/coverage_at_50`)]
data_4_f_50_coverage_vaelabel <- c(rep('vae', length(data_4_f_50_coverage_vae)))
# combine data
coverage_50F <- c(data_0_f_50_coverage_pop, data_0_f_50_coverage_itemknn, data_0_f_50_coverage_als, data_0_f_50_coverage_bpr, data_0_f_50_coverage_slim, data_0_f_50_coverage_vae, data_1_f_50_coverage_pop, data_1_f_50_coverage_itemknn, data_1_f_50_coverage_als, data_1_f_50_coverage_bpr, data_1_f_50_coverage_slim, data_1_f_50_coverage_vae, data_2_f_50_coverage_pop, data_2_f_50_coverage_itemknn, data_2_f_50_coverage_als, data_2_f_50_coverage_bpr, data_2_f_50_coverage_slim, data_2_f_50_coverage_vae, data3_f_50_coverage_pop, data3_f_50_coverage_itemknn, data3_f_50_coverage_als, data3_f_50_coverage_bpr, data3_f_50_coverage_slim, data3_f_50_coverage_vae, data_4_f_50_coverage_pop, data_4_f_50_coverage_itemknn, data_4_f_50_coverage_als, data_4_f_50_coverage_bpr, data_4_f_50_coverage_slim, data_4_f_50_coverage_vae)
coverage_50labelF <- c(data_0_f_50_coverage_poplabel, data_0_f_50_coverage_itemknnlabel, data_0_f_50_coverage_alslabel, data_0_f_50_coverage_bprlabel, data_0_f_50_coverage_slimlabel, data_0_f_50_coverage_vaelabel, data_1_f_50_coverage_poplabel, data_1_f_50_coverage_itemknnlabel, data_1_f_50_coverage_alslabel, data_1_f_50_coverage_bprlabel, data_1_f_50_coverage_slimlabel, data_1_f_50_coverage_vaelabel, data_2_f_50_coverage_poplabel, data_2_f_50_coverage_itemknnlabel, data_2_f_50_coverage_alslabel, data_2_f_50_coverage_bprlabel, data_2_f_50_coverage_slimlabel, data_2_f_50_coverage_vaelabel, data3_f_50_coverage_poplabel, data3_f_50_coverage_itemknnlabel, data3_f_50_coverage_alslabel, data3_f_50_coverage_bprlabel, data3_f_50_coverage_slimlabel, data3_f_50_coverage_vaelabel, data_4_f_50_coverage_poplabel, data_4_f_50_coverage_itemknnlabel, data_4_f_50_coverage_alslabel, data_4_f_50_coverage_bprlabel, data_4_f_50_coverage_slimlabel, data_4_f_50_coverage_vaelabel)
##
# make dataframe
coverage_f <- data.frame(coverage_3F, coverage_3labelF, coverage_5F, coverage_5labelF, coverage_10F, coverage_10labelF, coverage_20F, coverage_20labelF, coverage_50F, coverage_50labelF)
#View(coverage_f)
##
# combine data FM
coverage_3FM <- c(coverage_3F, coverage_3M)
coverage_3labelFM <- c(coverage_3labelF, coverage_3labelM)
coverage_5FM <- c(coverage_5F, coverage_5M)
coverage_5labelFM <- c(coverage_5labelF, coverage_5labelM)
coverage_10FM <- c(coverage_10F, coverage_10M)
coverage_10labelFM <- c(coverage_10labelF, coverage_10labelM)
coverage_20FM <- c(coverage_20F, coverage_20M)
coverage_20labelFM <- c(coverage_20labelF, coverage_20labelM)
coverage_50FM <- c(coverage_50F, coverage_50M)
coverage_50labelFM <- c(coverage_50labelF, coverage_50labelM)
# make dataframe FM
coverage_FM <- data.frame(coverage_3FM, coverage_3labelFM, coverage_5FM, coverage_5labelFM, coverage_10FM, coverage_10labelFM, coverage_20FM, coverage_20labelFM, coverage_50FM, coverage_50labelFM)
#View(coverage_FM)
##
if(threshold == 3){
dunn_M <- dunn_test(coverage_3M ~ coverage_3labelM, data=coverage_m, p.adjust.method = "bonferroni")
dunn_F <- dunn_test(coverage_3F ~ coverage_3labelF, data=coverage_f, p.adjust.method = "bonferroni")
dunn_FM <- dunn_test(coverage_3FM ~ coverage_3labelFM, data=coverage_FM, p.adjust.method = "bonferroni")
} else if (threshold == 5){
dunn_M <- dunn_test(coverage_5M ~ coverage_5labelM, data=coverage_m, p.adjust.method = "bonferroni")
dunn_F <- dunn_test(coverage_5F ~ coverage_5labelF, data=coverage_f, p.adjust.method = "bonferroni")
dunn_FM <- dunn_test(coverage_5FM ~ coverage_5labelFM, data=coverage_FM, p.adjust.method = "bonferroni")
} else if (threshold == 10){
dunn_M <- dunn_test(coverage_10M ~ coverage_10labelM, data=coverage_m, p.adjust.method = "bonferroni")
dunn_F <- dunn_test(coverage_10F ~ coverage_10labelF, data=coverage_f, p.adjust.method = "bonferroni")
dunn_FM <- dunn_test(coverage_10FM ~ coverage_10labelFM, data=coverage_FM, p.adjust.method = "bonferroni")
} else if (threshold == 20){
dunn_M <- dunn_test(coverage_20M ~ coverage_20labelM, data=coverage_m, p.adjust.method = "bonferroni")
dunn_F <- dunn_test(coverage_20F ~ coverage_20labelF, data=coverage_f, p.adjust.method = "bonferroni")
dunn_FM <- dunn_test(coverage_20FM ~ coverage_20labelFM, data=coverage_FM, p.adjust.method = "bonferroni")
} else if (threshold == 50){
dunn_M <- dunn_test(coverage_50M ~ coverage_50labelM, data=coverage_m, p.adjust.method = "bonferroni")
dunn_F <- dunn_test(coverage_50F ~ coverage_50labelF, data=coverage_f, p.adjust.method = "bonferroni")
dunn_FM <- dunn_test(coverage_50FM ~ coverage_50labelFM, data=coverage_FM, p.adjust.method = "bonferroni")
}
RESULTS <- c(RESULTS, "MALE")
RESULTS <- get_result(dunn_M, RESULTS)
RESULTS <- c(RESULTS, "FEMALE")
RESULTS <- get_result(dunn_F, RESULTS)
RESULTS <- c(RESULTS, "ALL")
RESULTS <- get_result(dunn_FM, RESULTS)
}
}
sink('analysis-output_coverage.txt')
print(RESULTS)
sink('analysis-output_coverage.txt', append=TRUE)
sink()
|
b76ae5bf6599de0bfe029cf1ce03f67993bc1549
|
cc0b570a7153348a644c9a15514a89327b241855
|
/testCacheMatrix.R
|
714224d118a260bb58b6704eb95449f44d231095
|
[] |
no_license
|
amarallr/ProgrammingAssignment2
|
91c65953f1f650a8795f2513f1442adb31920128
|
893a3bb50f13e59e3c2f3390b62804f25c804d76
|
refs/heads/master
| 2021-01-15T17:07:20.543333
| 2014-09-14T12:55:43
| 2014-09-14T12:55:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 497
|
r
|
testCacheMatrix.R
|
## setwd("C:\\courseraworkspace\\rprog\\pa2\\ProgrammingAssignment2")
## source("testCacheMatrix.R")
source("cachematrix.R")
x <- stats::rnorm(16000000)
dim(x) <- c(4000,4000)
t0 <- date()
print(paste(t0, " Calling cacheSolve(x)..."))
xi <- cacheSolve(x)
print("=================================================")
t1 <- date()
print(paste(t1, " Calling cacheSolve(x)..."))
xi <- cacheSolve(x)
print("=================================================")
t2 <- date()
print(paste(t2, " Finished."))
|
30e9ca6aa36bee8d3c6857a1684cf8c4fd849dae
|
6292a37c62159e1ec96200c61fd5e5bd0ce03c2e
|
/dsia_demo_codes/ch1424.R
|
e7618bb31368c5d21773da31869747d5aea785dd
|
[] |
no_license
|
jtlai0921/AEL018600_codes
|
5270eb9bc11acc653f1ba24ff1f6eee340625eb5
|
1e4267ea82b75a2c9d57f92edfbc5e8f5a429dbf
|
refs/heads/master
| 2020-12-18T18:07:52.593639
| 2020-01-22T01:55:00
| 2020-01-22T01:55:00
| 235,479,335
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,249
|
r
|
ch1424.R
|
library(ggplot2)
library(gridExtra)
library(MASS)
get_train_validation <- function(labeled_df, validation_size=0.3) {
m <- nrow(labeled_df)
row_indice <- 1:m
shuffled_row_indice <- sample(row_indice)
labeled_df <- labeled_df[shuffled_row_indice, ]
validation_threshold <- as.integer(validation_size * m)
validation <- labeled_df[1:validation_threshold, ]
train <- labeled_df[(validation_threshold+1):m, ]
return(list(
validation = validation,
train = train
))
}
labeled_url <- "https://storage.googleapis.com/kaggle_datasets/House-Prices-Advanced-Regression-Techniques/train.csv"
labeled_df <- read.csv(labeled_url)
split_result <- get_train_validation(labeled_df)
train <- split_result$train
X_vec <- seq(min(train$GarageArea), max(train$GarageArea), length.out = 50)
lambdas <- c(0, 1e2, 1e4, 1e6)
function_plots <- list()
coef_plots <- list()
d <- 10
for (i in 1:length(lambdas)) {
ridge <- lm.ridge(SalePrice ~ poly(GarageArea, degree = d), lambda = lambdas[i], data = train)
new_data <- poly(as.matrix(X_vec), degree = d)
#ones <- as.matrix(rep(1, times = nrow(new_data)))
y_vec <- cbind(1, new_data) %*% coef(ridge)
function_df <- data.frame(GarageArea = X_vec, SalePrice = y_vec)
gg <- ggplot(labeled_df, aes(x = GarageArea, y = SalePrice)) +
geom_point(size = 0.5) +
geom_line(data = function_df, aes(x = GarageArea, y = SalePrice), color = "#ff00ff") +
xlab("") +
ylab("") +
theme(axis.ticks.x = element_blank(),
axis.ticks.y = element_blank())
function_plots[[i]] <- gg
coefs <- abs(ridge$coef)
thetas <- 1:d
coef_df <- data.frame(thetas = thetas, coefs = coefs)
gg <- ggplot(coef_df, aes(x = thetas, y = coefs)) +
geom_line() +
geom_point() +
scale_y_continuous(trans = "log") +
xlab("") +
ylab("") +
scale_x_continuous(breaks = 1:10) +
geom_hline(yintercept = 5000, color = "red", lty = 2) +
theme(axis.ticks.x = element_blank(),
axis.ticks.y = element_blank())
coef_plots[[i]] <- gg
}
grid.arrange(function_plots[[1]], coef_plots[[1]],
function_plots[[2]], coef_plots[[2]],
function_plots[[3]], coef_plots[[3]],
function_plots[[4]], coef_plots[[4]],
ncol = 2)
|
c8494d1bb164e1f021bbab189b838513d97440d9
|
153ab3d71ee4b2c95c1f3bee838619a7e0882951
|
/titanic_decisiontree_20140608.R
|
3723afb5472ec8007cbc87422c19c8e3b70b66c6
|
[] |
no_license
|
yujim78/kaggle_titanic
|
7f49cc0a813ce69c49e905634461f96470de5661
|
f222d43c830a186ca8d9f7f88d02b430b3f0ac7d
|
refs/heads/master
| 2016-08-06T13:08:55.074716
| 2014-07-06T19:31:58
| 2014-07-06T19:31:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 651
|
r
|
titanic_decisiontree_20140608.R
|
# set working directory and load data csv
setwd("~/Desktop/kaggle/titanic/kaggle_titanic")
train <- read.csv("~/Desktop/kaggle/titanic/kaggle_titanic/train.csv")
test <- read.csv("~/Desktop/kaggle/titanic/kaggle_titanic/test.csv")
library(rpart)
fit <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked, data=train, method="class")
plot(fit)
text(fit)
library(rattle)
library(rpart.plot)
library(RColorBrewer)
fancyRpartPlot(fit)
prediction <- predict(fit, test, type="class")
submit <- data.frame(PassengerId = test$PassengerId, Survived = prediction)
write.csv(submit, file="decisiontree20140608.csv", row.names=FALSE)
|
d7a51226226298caf564cc2f18f87e9dd656b6c9
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-r/H2O_Load.R
|
96058f24465e3689a904b6690662e3383e6051eb
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,569
|
r
|
H2O_Load.R
|
# Change this global variable to match your own system's path
SPENCER.ROOT.PATH <- "/Users/spencer/0xdata/"
LUDI.ROOT.PATH <- "/Users/ludirehak/"
ARNO.ROOT.PATH <- "/home/arno/"
AMY.ROOT.PATH <- "/Users/amy/"
MAGNUS.ROOT.PATH <- "/Users/magnus/Git/"
USER.PATHS <- c(SPENCER.ROOT.PATH, LUDI.ROOT.PATH, ARNO.ROOT.PATH, AMY.ROOT.PATH, MAGNUS.ROOT.PATH)
ROOT.PATH <- USER.PATHS [ sapply(USER.PATHS, dir.exists)]
DEV.PATH <- "h2o-3/h2o-r/h2o-package/R/"
FULL.PATH <- paste(ROOT.PATH, DEV.PATH, sep="")
src <-
function() {
warning("MAY NOT WORK ON YOUR SYSTEM -- **TRY TO CHANGE `ROOT.PATH`!**")
to_src <- c("astfun.R", "classes.R", "config.R", "connection.R", "constants.R", "logging.R", "communication.R",
"import.R", "frame.R", "kvstore.R", "grid.R", "generic.R", "parse.R", "export.R", "models.R",
"edicts.R", "coxph.R", "coxphutils.R", "glm.R", "gam.R", "glrm.R", "pca.R", "kmeans.R", "gbm.R",
"deeplearning.R", "naivebayes.R", "randomforest.R", "svd.R", "locate.R", "predict.R", "rulefit.R",
"isolationforest.R", "psvm.R", "tf-idf.R", "permutation_varimp.R", "extendedisolationforest.R",
"anovaglm.R", "modelselection.R", "upliftrandomforest.R", "infogram.R", "admissibleml.R", "decisiontree.R")
require(jsonlite); require(RCurl)
invisible(lapply(to_src,function(x){source(paste(FULL.PATH, x, sep = ""))}))
}
src()
h <- conn <- h2o.init(strict_version_check = F)
#hex <- as.h2o(iris)
#hex <- h2o.importFile(h, paste(ROOT.PATH, "h2o-dev/smalldata/logreg/prostate.csv", sep = ""))
|
36b95b9ed27f63db681d08620fed4a398e71c9a0
|
72cacfe29831236bfee1a2569ff531c79d4f3971
|
/TestingData.R
|
5812d0688daae763ed4fa11c10277d0f4656d970
|
[] |
no_license
|
iarlaith/Week7
|
8683fdc05241084e5edfc280dbfc83b39be9274b
|
c2f891a362b0d9d52e56d94aee9628620cc227dc
|
refs/heads/master
| 2020-05-18T17:22:18.312211
| 2019-05-15T13:46:55
| 2019-05-15T13:46:55
| 184,553,219
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,230
|
r
|
TestingData.R
|
# Example using mobile phone usage while driving
# Comparing 2 samples (with and without phone)
# and comparing mean reaction times f both groups
# so we'll use a paired t-test
install.packages("pwr")
library(pwr)
power_information <- pwr.t.test(d=0.8, sig.level = 0.05, power = 0.90,
type = "two.sample", alternative = "two.sided")
power_information
# Results suggest that we need 34 participants in each group (34 with and 34 without phone) to have an
# effect sze of 0.8 with 90% certainty and no more than 5% chance of erronously concluding that a
# difference exists when it doesn't.
power_information2 <- pwr.t.test(d=0.8, sig.level = 0.01, power = 0.90,
type = "two.sample", alternative = "two.sided")
power_information2
# Changing the significant lever to less than 1% means we need 48 people.
plot(power_information2)
# Cohen describes the effect size as "The degree to which the null hypothesis is false".
h_value <- ES.h(p1 = 0.75, p2 = 0.50)
heads_or_tails <- pwr.p.test(h = h_value, sig.level = 0.05, power = 0.90)
heads_or_tails
cohen.ES(test = "r", size = "medium")
cohen.ES(test = "r", size = "large")
cohen.ES(test = "r", size = "small")
|
dcc2af7ff336c12f1600dcbcb800608058ca958f
|
c9d66d6a0b9ed0a49eb13e252770e30f7739f6fa
|
/Chapter 10/CreateDataFrameJsonParquet.R
|
04fb49fc54759dab08961160ea23cdd3df5d04e5
|
[
"MIT"
] |
permissive
|
PacktPublishing/big-data-analytics
|
ac5ea8e14b8c552b7850ef08ae3d41f82f2ef0ae
|
937d12ec23a94be33d68e3bbd6c3bbe597bbda04
|
refs/heads/master
| 2023-01-24T09:12:07.409511
| 2023-01-18T10:21:11
| 2023-01-18T10:21:11
| 70,221,427
| 33
| 30
| null | null | null | null |
UTF-8
|
R
| false
| false
| 505
|
r
|
CreateDataFrameJsonParquet.R
|
# Get into SparkR shell and run these commands.
people_json <- read.df("file:///home/cloudera/spark-2.0.0-bin-hadoop2.7/examples/src/main/resources/people.json", "json")
people_json
DataFrame[age:bigint, name:string]
head(people_json)
# To write the people DataFrame as Parquet file, use below command. This will create people-parq directory on HDFS and creates parquet file with snappy compression.
write.df(people_json, path = "people-parq", source = "parquet", mode = "overwrite")
|
dd60bcd6f54484e195a78d627c2fb778abf801b3
|
f677ad4de11474ee34e3e8c1fef92f8cc7ae8d82
|
/R/prepare_BioGeoBEARS_point_input_cluster_allrealms_mammals.R
|
549dc1f11b0878b6ea638b64ff748f18181c3652
|
[] |
no_license
|
javierigea/hotspots_mambird_paper
|
1eb711be9c867996eddd32e4ce03dbb3b6367749
|
275b4fc731ce027b6027a584b7bb0e83c071f852
|
refs/heads/master
| 2020-03-22T20:18:25.031169
| 2019-02-17T16:22:34
| 2019-02-17T16:22:34
| 140,588,176
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,832
|
r
|
prepare_BioGeoBEARS_point_input_cluster_allrealms_mammals.R
|
#run model in hydrogen cluster
.libPaths('/home/ecosys/ji247/R/x86_64-pc-linux-gnu-library/3.3')
setwd('/home/ecosys/ji247/hotspots_vertebrates/')
library(ape)
library(plyr)
library(phangorn)
library(geiger)
library(parallel)
library(diversitree)
###############################################
###########################
#world.table.file is 100_all_realms_ranges_plus_hotspots.txt
#treefile is the tree
#name has to be one of (afrotrop,austral,indo,nearctic,neotrop,palearctic)
#overlap is 0.8
prepare_realm_input_7areas<-function(world.table.file,treefile,name,overlap){
tree<-read.tree(treefile)
world.table<-read.table(world.table.file,header=T,stringsAsFactors = F,sep='\t')
#collate tree and table
tree<-drop.tip(tree,setdiff(tree$tip.label,world.table$spp))
world.table<-world.table[world.table$spp%in%tree$tip.label,]
#keep columns with proportion of range inside realm and proportion of range within hotspot
drop.columns<-grep('cells',colnames(world.table))
world.table<-world.table[,-drop.columns]
colnames(world.table)<-sub(colnames(world.table),pattern='_WWF',replacement='')
prepare_realm_input_7areas_plus_inhotoutrealm(table=world.table,name=name,overlap=0.80,tree=tree)
}
prepare_realm_input_7areas_plus_inhotoutrealm<-function(table,name,overlap,tree,replicate){
table<-table[,c('spp',colnames(table)[grep('_realm.area',colnames(table))],paste(name,'_hotspot.area',sep=''))]
table[is.na(table)]<-0
#get a table without the hotspot area first
realm.table<-table[,-grep('hotspot',colnames(table))]
#process realm.table
realm.table$model.state<-0
#if overlap in any column is >0.8 assign to that region
realm.exclusive.species<-apply(realm.table,1,function(x) unname(which(x[c(2:7)]>=overlap)))
realm.exclusive.species<-lapply(realm.exclusive.species,function(x)if(length(x)==0){x<-0}else{return(x)})
realm.table$model.state<-unlist(realm.exclusive.species)
#sort out the multirealm species
multirrealm.species<-realm.table[realm.table$model.state==0,]
multirrealm.species.regions<-apply(multirrealm.species,1,function(x)unname(which(x[c(2:7)]>(1-overlap))))
multirrealm.species.regions<-unlist(lapply(multirrealm.species.regions,function(x) if(length(x)==2){x<-paste(x,collapse=',')}else{x<-0}))
realm.table[realm.table$model.state==0,'model.state']<-multirrealm.species.regions
#drop species with 0 = species occurring in Oceanic realm
realm.table<-realm.table[!realm.table$model.state==0,]
#select column of realm to be analysed
realm.character<-as.character(grep(name,colnames(realm.table)[-c(1,8)]))
realm.hotspot.table<-table[,grep(name,colnames(table))]
realm.hotspot.table<-realm.hotspot.table[which(realm.hotspot.table[,1]>=overlap),]
realm.hotspot.table$model.state<-0
realm.hotspot.table[realm.hotspot.table[,2]<(1-overlap),'model.state']<-realm.character
realm.hotspot.table[realm.hotspot.table[,2]>=overlap,'model.state']<-7
realm.hotspot.table[realm.hotspot.table[,2]>(1-overlap)&realm.hotspot.table[,2]<overlap,'model.state']<-paste(realm.character,7,sep=',')
realm.table[realm.table$model.state==realm.character,'model.state']<-realm.hotspot.table$model.state
#select species occurring in target realm plus another realm to check if
target.states<-names(table(realm.table$model.state))[grep(realm.character,names(table(realm.table$model.state)))]
if(length(grep('7',target.states))>0){
target.states<-target.states[-grep('7',target.states)]
}
target.states<-target.states[-match(realm.character,target.states)]
if(!is.na(target.states)){
realm.plus.outside.table<-realm.table[realm.table$model.state%in%target.states,]
#add hotspot overlap within realm to realm.plus.outside table
realm.plus.outside.table.hotspots<-as.data.frame(cbind(table[table$spp%in%realm.plus.outside.table$spp,'spp'],table[table$spp%in%realm.plus.outside.table$spp,paste(name,'_hotspot.area',sep='')]),stringsAsFactors = F)
colnames(realm.plus.outside.table.hotspots)<-c('spp',paste(name,'_hotspot.area',sep=''))
realm.plus.outside.table.hotspots$model.state<-realm.table[realm.table$model.state%in%target.states,]$model.state
realm.plus.outside.table.hotspots[realm.plus.outside.table.hotspots[,2]>=overlap,]$model.state<-sub(realm.plus.outside.table.hotspots[realm.plus.outside.table.hotspots[,2]>=overlap,]$model.state,pattern=realm.character,replacement='7')
realm.plus.outside.table.hotspots[realm.plus.outside.table.hotspots$model.state==paste('7,','1',sep=''),'model.state']<-'1,7'
realm.plus.outside.table.hotspots[realm.plus.outside.table.hotspots$model.state==paste('7,','2',sep=''),'model.state']<-'2,7'
realm.plus.outside.table.hotspots[realm.plus.outside.table.hotspots$model.state==paste('7,','3',sep=''),'model.state']<-'3,7'
realm.plus.outside.table.hotspots[realm.plus.outside.table.hotspots$model.state==paste('7,','4',sep=''),'model.state']<-'4,7'
realm.plus.outside.table.hotspots[realm.plus.outside.table.hotspots$model.state==paste('7,','5',sep=''),'model.state']<-'5,7'
realm.plus.outside.table.hotspots[realm.plus.outside.table.hotspots$model.state==paste('7,','6',sep=''),'model.state']<-'6,7'
realm.table[realm.table$model.state%in%target.states,'model.state']<-realm.plus.outside.table.hotspots$model.state
}
table<-realm.table
cat('state distribution','\n')
cat(table(table$model.state),'\n')
results.table<-table
model5<-results.table$model.state
table.model.state<-table(table$model.state)
names(model5)<-results.table$spp
name.check<-name.check(tree,model5)
if(name.check=='OK'){
tree.model<-tree
}else{
#drop tips in tree without data
tree.model<-drop.tip(tree,name.check$tree_not_data)
#drop species not in tree from trait vector
model5<-model5[!(names(model5) %in% name.check$data_not_tree)]
}
cat(name.check(tree.model,model5),'namecheck','\n')
write.tree(tree.model,paste('./',name,'_7areas_inhotoutrealm.tree',sep=''))
#create the table for BioGeoBEARS
#1-> 100; 2-> 110; 3-> 010; 4-> 011; 5-> 001; 6->101
geographytable<-data.frame(names(model5),unname(model5))
colnames(geographytable)<-c('spp','model.state')
geographytable$code<-'0000000'
geographytable[geographytable$model.state=='1','code']<-'1000000'
geographytable[geographytable$model.state=='2','code']<-'0100000'
geographytable[geographytable$model.state=='3','code']<-'0010000'
geographytable[geographytable$model.state=='4','code']<-'0001000'
geographytable[geographytable$model.state=='5','code']<-'0000100'
geographytable[geographytable$model.state=='6','code']<-'0000010'
geographytable[geographytable$model.state=='7','code']<-'0000001'
geographytable[geographytable$model.state=='1,7','code']<-'1000001'
geographytable[geographytable$model.state=='2,7','code']<-'0100001'
geographytable[geographytable$model.state=='3,7','code']<-'0010001'
geographytable[geographytable$model.state=='4,7','code']<-'0001001'
geographytable[geographytable$model.state=='5,7','code']<-'0000101'
geographytable[geographytable$model.state=='6,7','code']<-'0000011'
geographytable[geographytable$model.state=='1,2','code']<-'1100000'
geographytable[geographytable$model.state=='1,3','code']<-'1010000'
geographytable[geographytable$model.state=='1,4','code']<-'1001000'
geographytable[geographytable$model.state=='1,5','code']<-'1000100'
geographytable[geographytable$model.state=='1,6','code']<-'1000010'
geographytable[geographytable$model.state=='2,3','code']<-'0110000'
geographytable[geographytable$model.state=='2,4','code']<-'0101000'
geographytable[geographytable$model.state=='2,5','code']<-'0100100'
geographytable[geographytable$model.state=='2,6','code']<-'0100010'
geographytable[geographytable$model.state=='3,4','code']<-'0011000'
geographytable[geographytable$model.state=='3,5','code']<-'0010100'
geographytable[geographytable$model.state=='3,6','code']<-'0010010'
geographytable[geographytable$model.state=='4,5','code']<-'0001100'
geographytable[geographytable$model.state=='4,6','code']<-'0001010'
geographytable[geographytable$model.state=='5,6','code']<-'0000110'
geographytable$model.state<-NULL
header<-cbind(nrow(geographytable),7)
write.table(header,paste('./',name,'_',replicate,'_7areas_inhotoutrealm_geographyfile.txt',sep=''),sep='\t',quote=F,row.names=F,col.names = F)
write.table(geographytable,paste('./',name,'_',replicate,'_7areas_inhotoutrealm_geographyfile.txt',sep=''),append=T,sep=' ',quote=F,row.names=F,col.names = F)
return(table.model.state)
}
#points is a vector with the points to get the overlap for
#returns a table with overlap with realm and hotspot for the set of points
#table is world.table
#name is the name of the realm (Afrotropical,Australasian,IndoMalay,Nearctic,Neotropical,Palearctic)
get_rangeoverlap_with_points<-function(points,name,table){
if(!name%in%c('Afrotropical','Australasian','Indo-Malay','Nearctic','Neotropical','Palearctic')){
cat('incorrect realm name','\n')
break
}
world.table<-table
#calculate proportion of species range inside hotspots
grids<-list.files(path='./output/grids/',pattern='grid_.*_100.rds')
grid.world<-grids[grep('World_RealmsMerged',grids)]
grid.realms.names<-grids[-grep('World_RealmsMerged',grids)]
grid.world<-readRDS(paste('./output/grids/',grid.world,sep=''))
grid.realms<-lapply(grid.realms.names,function(x) readRDS(paste('./output/grids/',x,sep='')))
grid.realms.names<-sub(grid.realms.names,pattern='grid_',replacement='')
grid.realms.names<-sub(grid.realms.names,pattern='_100.rds',replacement='')
grid.realms.ncells<-unlist(lapply(grid.realms,function(x)length(x)))
grid.realms.start.cell<-numeric(length(grid.realms.ncells))
grid.realms.end.cell<-numeric(length(grid.realms.ncells))
for(i in 1:length(grid.realms.ncells)){
if(i==1){
grid.realms.start.cell[i]<-1
grid.realms.end.cell[i]<-grid.realms.ncells[i]
next
}else{
grid.realms.start.cell[i]<-grid.realms.end.cell[i-1]+1
grid.realms.end.cell[i]<-grid.realms.start.cell[i]+grid.realms.ncells[i]-1
next
}
}
grid.cells.df<-as.data.frame(cbind(grid.realms.names,grid.realms.ncells,grid.realms.start.cell,grid.realms.end.cell),stringsAsFactors = F)
colnames(grid.cells.df)<-c('realms.names','ncells','start.cell','end.cell')
grid.cells.df$ncells<-as.numeric(grid.cells.df$ncells)
grid.cells.df$start.cell<-as.numeric(grid.cells.df$start.cell)
grid.cells.df$end.cell<-as.numeric(grid.cells.df$end.cell)
#remove Oceanic grid.cells.df (just 79 cells and most of them hotspot)
grid.cells.df<-grid.cells.df[-grep('Oceanic',grid.cells.df$realms.names),]
#correct realm points (points are in reference to the realm but the table is worldwide)
points<-grid.cells.df[grid.cells.df$realms.names==name,]$start.cell+points
hotspots.cells.realm<-sort(points)
world.table.cells<-as.character(world.table$cells)
str(unlist(strsplit(world.table.cells[2],' ')))
world.table.cells<-sapply(world.table.cells,function(x) unlist(strsplit(x,' ')))
world.table.cells<-sapply(world.table.cells,function(x) x[x != ""])
world.table.cells<-sapply(world.table.cells,function(x)as.numeric(x))
#intersect each entry in world.table.cells with all elements in hotspots.cells.realm and record
world.table.cells.hotspots<-lapply(world.table.cells,function(x) length(intersect(x,hotspots.cells.realm)))
if(name=='Afrotropical'){
world.table$range.cells.Afrotropical.hotspot<-unlist(world.table.cells.hotspots)
world.table$afrotrop_WWF_hotspot.area<-world.table$range.cells.Afrotropical.hotspot/world.table$range.cells.Afrotropical
world.table[is.na(world.table$afrotrop_WWF_hotspot.area),]$afrotrop_WWF_hotspot.area<-0
}else if (name=='Australasian'){
world.table$range.cells.Australasian.hotspot<-unlist(world.table.cells.hotspots)
world.table$austral_WWF_hotspot.area<-world.table$range.cells.Australasian.hotspot/world.table$range.cells.Australasian
world.table[is.na(world.table$austral_WWF_hotspot.area),]$austral_WWF_hotspot.area<-0
}else if (name=='Indo-Malay'){
world.table$range.cells.IndoMalay.hotspot<-unlist(world.table.cells.hotspots)
world.table$indo_WWF_hotspot.area<-world.table$range.cells.IndoMalay.hotspot/world.table$range.cells.IndoMalay
world.table[is.na(world.table$indo_WWF_hotspot.area),]$indo_WWF_hotspot.area<-0
}else if (name=='Nearctic'){
world.table$range.cells.Nearctic.hotspot<-unlist(world.table.cells.hotspots)
world.table$nearctic_WWF_hotspot.area<-world.table$range.cells.Nearctic.hotspot/world.table$range.cells.Nearctic
world.table[is.na(world.table$nearctic_WWF_hotspot.area),]$nearctic_WWF_hotspot.area<-0
}else if (name=='Neotropical'){
world.table$range.cells.Neotropical.hotspot<-unlist(world.table.cells.hotspots)
world.table$neotrop_WWF_hotspot.area<-world.table$range.cells.Neotropical.hotspot/world.table$range.cells.Neotropical
world.table[is.na(world.table$neotrop_WWF_hotspot.area),]$neotrop_WWF_hotspot.area<-0
}else if (name=='Palearctic'){
world.table$range.cells.Palearctic.hotspot<-unlist(world.table.cells.hotspots)
world.table$palearctic_WWF_hotspot.area<-world.table$range.cells.Palearctic.hotspot/world.table$range.cells.Palearctic
world.table[is.na(world.table$palearctic_WWF_hotspot.area),]$palearctic_WWF_hotspot.area<-0
}
return(world.table)
}
####################################################################################################
####################################################################################################
####
#points.objectfile<-'./output/mammals/immigration/controls/points.afrotropical.50.RDS'
#world.tablefile<-'./output/mammals/tables/100_all_realms_ranges_plus_hotspots.txt'
#region.name is one of 'Afrotropical','Australasian','Indo-Malay','Neotropical','Nearctic','Palearctic'
#treefile<-'./output/mammals/trees/mammals_tree_IUCN.tree'
#name is one of 'afrotrop','austral','indo','nearctic','neotrop','palearctic'
#path<-'./output/mammals/immigration/controls/'
#path is the relative path to store the control files ('./output/mammals/immigration/controls/')
get_table_from_points<-function(points.objectfile,world.tablefile,region.name,treefile,name,path){
dir.create(paste(path,name,sep=''))
points<-readRDS(points.objectfile)
world.table<-read.table(world.tablefile,header=T,sep='\t',stringsAsFactors = F)
#drop columns with hotspot info in world.tablefile
world.table<-world.table[,-grep('hotspot',colnames(world.table))]
points.table<-lapply(points,function(x) get_rangeoverlap_with_points(points=x,name=region.name,table=world.table))
#collate tree and table for austral, check species distributions
tree<-read.tree(treefile)
tree<-drop.tip(tree,setdiff(tree$tip.label,points.table[[1]]$spp))
points.table<-lapply(points.table,function(x) x[x$spp%in%tree$tip.label,])
#keep columns with proportion of range inside realm and proportion of range within hotspot
points.table<-lapply(points.table,function(x){drop.columns<-grep('cells',colnames(x));x<-x[,-drop.columns];colnames(x)<-sub(colnames(x),pattern='_WWF',replacement='');return(x)})
points.control.input<-list()
for(i in 1:length(points.table)){
name<-name
overlap<-0.8
folder<-paste(path,name,'/',i,'/',sep='')
dir.create(folder)
setwd(folder)
points.control.input[[i]]<-prepare_realm_input_7areas_plus_inhotoutrealm(table=points.table[[i]],name=name,overlap=0.80,replicate=i,tree=tree)
setwd('/home/ji247/hotspots_vertebrates/')
}
saveRDS(points.control.input,file=paste(path,name,'points.control.input.rds',sep=''))
}
get_table_from_points(points.objectfile='./output/mammals/immigration/controls/points.afrotropical.50.RDS',world.tablefile='./output/mammals/tables/100_all_realms_ranges_plus_hotspots.txt',region.name='Afrotropical',treefile='./output/mammals/trees/mammals_tree_IUCN.tree',name='afrotrop',path='./output/mammals/immigration/controls/')
get_table_from_points(points.objectfile='./output/mammals/immigration/controls/points.austral.50.RDS',world.tablefile='./output/mammals/tables/100_all_realms_ranges_plus_hotspots.txt',region.name='Australasian',treefile='./output/mammals/trees/mammals_tree_IUCN.tree',name='austral',path='./output/mammals/immigration/controls/')
get_table_from_points(points.objectfile='./output/mammals/immigration/controls/points.indo.50.RDS',world.tablefile='./output/mammals/tables/100_all_realms_ranges_plus_hotspots.txt',region.name='Indo-Malay',treefile='./output/mammals/trees/mammals_tree_IUCN.tree',name='indo',path='./output/mammals/immigration/controls/')
get_table_from_points(points.objectfile='./output/mammals/immigration/controls/points.nearctic.50.RDS',world.tablefile='./output/mammals/tables/100_all_realms_ranges_plus_hotspots.txt',region.name='Nearctic',treefile='./output/mammals/trees/mammals_tree_IUCN.tree',name='nearctic',path='./output/mammals/immigration/controls/')
get_table_from_points(points.objectfile='./output/mammals/immigration/controls/points.neotropical.50.RDS',world.tablefile='./output/mammals/tables/100_all_realms_ranges_plus_hotspots.txt',region.name='Neotropical',treefile='./output/mammals/trees/mammals_tree_IUCN.tree',name='neotrop',path='./output/mammals/immigration/controls/')
get_table_from_points(points.objectfile='./output/mammals/immigration/controls/points.palearctic.50.RDS',world.tablefile='./output/mammals/tables/100_all_realms_ranges_plus_hotspots.txt',region.name='Palearctic',treefile='./output/mammals/trees/mammals_tree_IUCN.tree',name='palearctic',path='./output/mammals/immigration/controls/')
########plot number of species in each category for afrotrop
####afrotrop.realm<-read.table('./output/mammals/new_tables_hotspots/whole_realms/afrotrop/afrotrop_geographyfile.txt',header=F,sep=' ',skip = 1)
####afrotrop.realm.table<-table(afrotrop.realm$V2)
####afrotrop.control.input.points<-list()
####for(i in 1:5){
#### afrotrop.control.input.points[[i]]<-unlist(lapply(afrotrop.control.input,function(x)x[i]))
####}
####par(mfrow=c(2,2))
####for(i in 1:4){
#### if(i==1){
#### hist(afrotrop.control.input.points[[i]],xlim=c(0,max(c(afrotrop.control.input.points[[i]],afrotrop.realm.table['100']))),main='afrotrop hot endemics')
#### abline(v=afrotrop.realm.table['100'],col='red')
#### }else if(i==2){
#### hist(afrotrop.control.input.points[[i]],xlim=c(0,max(c(afrotrop.control.input.points[[i]],afrotrop.realm.table['110']))),main='afrotrop hot/nonhot')
#### abline(v=afrotrop.realm.table['110'],col='red')
#### }else if(i==3){
#### hist(afrotrop.control.input.points[[i]],xlim=c(0,max(c(afrotrop.control.input.points[[i]],afrotrop.realm.table['10']))),main='afrotrop nonhot')
#### abline(v=afrotrop.realm.table['10'],col='red')
#### }else if(i==4){
#### hist(afrotrop.control.input.points[[i]],xlim=c(0,max(c(afrotrop.control.input.points[[i]],afrotrop.realm.table['11']))),main='afrotrop nonhot/world')
#### abline(v=afrotrop.realm.table['11'],col='red')
#### }
####}
####dev.off()
####
##########plot number of species in each category for austral
####austral.realm<-read.table('./output/mammals/new_tables_hotspots/whole_realms/austral/austral_geographyfile.txt',header=F,sep=' ',skip = 1)
####austral.realm.table<-table(austral.realm$V2)
####austral.control.input.points<-list()
####for(i in 1:5){
#### austral.control.input.points[[i]]<-unlist(lapply(austral.control.input,function(x)x[i]))
####}
####par(mfrow=c(2,2))
####for(i in 1:4){
#### if(i==1){
#### hist(austral.control.input.points[[i]],xlim=c(0,max(c(austral.control.input.points[[i]],austral.realm.table['100']))),main='austral hot endemics')
#### abline(v=austral.realm.table['100'],col='red')
#### }else if(i==2){
#### hist(austral.control.input.points[[i]],xlim=c(0,max(c(austral.control.input.points[[i]],austral.realm.table['110']))),main='austral hot/nonhot')
#### abline(v=austral.realm.table['110'],col='red')
#### }else if(i==3){
#### hist(austral.control.input.points[[i]],xlim=c(0,max(c(austral.control.input.points[[i]],austral.realm.table['10']))),main='austral nonhot')
#### abline(v=austral.realm.table['10'],col='red')
#### }else if(i==4){
#### hist(austral.control.input.points[[i]],xlim=c(0,max(c(austral.control.input.points[[i]],austral.realm.table['11']))),main='austral nonhot/world')
#### abline(v=austral.realm.table['11'],col='red')
#### }
####}
####dev.off()
####
##########plot number of species in each category for indo
#####no whole realm analysis for indo, so skipping
####
##########plot number of species in each category for nearctic
####nearctic.realm<-read.table('./output/mammals/new_tables_hotspots/whole_realms/nearctic/nearctic_geographyfile.txt',header=F,sep=' ',skip = 1)
####nearctic.realm.table<-table(nearctic.realm$V2)
####nearctic.control.input.points<-list()
####for(i in 1:5){
#### nearctic.control.input.points[[i]]<-unlist(lapply(nearctic.control.input,function(x)x[i]))
####}
####par(mfrow=c(2,2))
####for(i in 1:4){
#### if(i==1){
#### hist(nearctic.control.input.points[[i]],xlim=c(0,max(c(nearctic.control.input.points[[i]],nearctic.realm.table['100']))),main='nearctic hot endemics')
#### abline(v=nearctic.realm.table['100'],col='red')
#### }else if(i==2){
#### hist(nearctic.control.input.points[[i]],xlim=c(0,max(c(nearctic.control.input.points[[i]],nearctic.realm.table['110']))),main='nearctic hot/nonhot')
#### abline(v=nearctic.realm.table['110'],col='red')
#### }else if(i==3){
#### hist(nearctic.control.input.points[[i]],xlim=c(0,max(c(nearctic.control.input.points[[i]],nearctic.realm.table['10']))),main='nearctic nonhot')
#### abline(v=nearctic.realm.table['10'],col='red')
#### }else if(i==4){
#### hist(nearctic.control.input.points[[i]],xlim=c(0,max(c(nearctic.control.input.points[[i]],nearctic.realm.table['11']))),main='nearctic nonhot/world')
#### abline(v=nearctic.realm.table['11'],col='red')
#### }
####}
####dev.off()
####
####
##########plot number of species in each category for neotrop
####neotrop.realm<-read.table('./output/mammals/new_tables_hotspots/whole_realms/neotrop/neotrop_geographyfile.txt',header=F,sep=' ',skip = 1)
####neotrop.realm.table<-table(neotrop.realm$V2)
####neotrop.control.input.points<-list()
####for(i in 1:5){
#### neotrop.control.input.points[[i]]<-unlist(lapply(neotrop.control.input,function(x)x[i]))
####}
####par(mfrow=c(2,2))
####for(i in 1:4){
#### if(i==1){
#### hist(neotrop.control.input.points[[i]],xlim=c(0,max(c(neotrop.control.input.points[[i]],neotrop.realm.table['100']))),main='neotrop hot endemics')
#### abline(v=neotrop.realm.table['100'],col='red')
#### }else if(i==2){
#### hist(neotrop.control.input.points[[i]],xlim=c(0,max(c(neotrop.control.input.points[[i]],neotrop.realm.table['110']))),main='neotrop hot/nonhot')
#### abline(v=neotrop.realm.table['110'],col='red')
#### }else if(i==3){
#### hist(neotrop.control.input.points[[i]],xlim=c(0,max(c(neotrop.control.input.points[[i]],neotrop.realm.table['10']))),main='neotrop nonhot')
#### abline(v=neotrop.realm.table['10'],col='red')
#### }else if(i==4){
#### hist(neotrop.control.input.points[[i]],xlim=c(0,max(c(neotrop.control.input.points[[i]],neotrop.realm.table['11']))),main='neotrop nonhot/world')
#### abline(v=neotrop.realm.table['11'],col='red')
#### }
####}
####dev.off()
####
##########plot number of species in each category for palearctic
####palearctic.realm<-read.table('./output/mammals/new_tables_hotspots/whole_realms/palearctic/palearctic_geographyfile.txt',header=F,sep=' ',skip = 1)
####palearctic.realm.table<-table(palearctic.realm$V2)
####palearctic.control.input.points<-list()
####for(i in 1:5){
#### palearctic.control.input.points[[i]]<-unlist(lapply(palearctic.control.input,function(x)x[i]))
####}
####par(mfrow=c(2,2))
####for(i in 1:4){
#### if(i==1){
#### hist(palearctic.control.input.points[[i]],xlim=c(0,max(c(palearctic.control.input.points[[i]],palearctic.realm.table['100']))),main='palearctic hot endemics')
#### abline(v=palearctic.realm.table['100'],col='red')
#### }else if(i==2){
#### hist(palearctic.control.input.points[[i]],xlim=c(0,max(c(palearctic.control.input.points[[i]],palearctic.realm.table['110']))),main='palearctic hot/nonhot')
#### abline(v=palearctic.realm.table['110'],col='red')
#### }else if(i==3){
#### hist(palearctic.control.input.points[[i]],xlim=c(0,max(c(palearctic.control.input.points[[i]],palearctic.realm.table['10']))),main='palearctic nonhot')
#### abline(v=palearctic.realm.table['10'],col='red')
#### }else if(i==4){
#### hist(palearctic.control.input.points[[i]],xlim=c(0,max(c(palearctic.control.input.points[[i]],palearctic.realm.table['11']))),main='palearctic nonhot/world')
#### abline(v=palearctic.realm.table['11'],col='red')
#### }
####}
####dev.off()
##
##realm.name<-'afrotrop'
##number<-'1'
##run_whole_realm_BioGeoBEARS_plusBSM<-function(realm.name,number){
## setwd('~/hotspots_vertebrates/')
## source('./BioGeoBEARS_run_models.R')
##
## path<-paste('./output/mammals/new_tables_hotspots/immigration/controls/',realm.name,'/',number,'/',sep='')
## names.trees<-list.files(path,pattern='.tree')
## names.geography<-list.files(path,pattern='_geographyfile.txt')
## if(length(names.trees)==0){
## return('no trees in this folder')
##
## }
## if(length(names.trees)!=length(names.geography)){
## return('different lenghts, double check folder')
##
## }
## for(i in 1:length(names.trees)){
## run_BioGeoBEARS_models(treefile = names.trees[i],geographyfile =names.geography[i],path = path,name=sub(names.trees[i],pattern=paste('_',realm.name,'.tree',sep=''),replacement='') )
## setwd('~/Desktop/HOTSPOTS/repositories/hotspots_vertebrates/')
## }
## list.aicc.files<-list.files(path,pattern='_AICc_rellike_formatted.txt')
## #read AICC tables, select the best fitting model (sort by AICc weight), store the best fitting models in a list
## model.list<-list(0)
## for (i in 1:length(list.aicc.files)){
## table<-read.table(file=paste(path,list.aicc.files[i],sep=''),header=T,sep='\t')
## table<-table[order(-table$AICc_wt),]
## cat(i,' ',table[1,'AICc_wt'],' ',row.names(table)[1],'\n')
## model.list[[i]]<-c(row.names(table)[1],table[1,'AICc_wt'])
## }
## results.BSM<-list(0)
## for (i in 1:length(model.list)){
## results.BSM[[i]]<-run_BioGeoBEARS_selectedmodel_BSM(treefile=names.trees[i],geographyfile=names.geography[i],path=path,name=sub(names.trees[i],pattern='_neotropical.tree',replacement=''),model_name=model.list[[i]][1])
## setwd('/Users/javier/Desktop/HOTSPOTS/repositories/hotspots_vertebrates')
## }
## saveRDS(results.BSM,file=paste(path,realm.name,'_',number,'_BSM.RDS',sep=''))
##
##}
##
##
##
##
|
bb0ddcbb6503d5a3aa3fe887b268a4f18e1a645b
|
79d755cbdc90fd322e6d9fe8dcf3af14e52cfc2a
|
/man/DoEstRare.Rd
|
a67024d92207c3f07c9fed13fcb6de151ef0b198
|
[] |
no_license
|
cran/DoEstRare
|
1eb822b268aa3e6bf64c6e3c796a5723ba6b39e1
|
6f018fe785a9fe39fff1a855ecc81372a29c8c97
|
refs/heads/master
| 2021-01-02T08:24:32.222930
| 2017-08-01T12:24:30
| 2017-08-01T12:24:30
| 99,005,381
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,706
|
rd
|
DoEstRare.Rd
|
\name{DoEstRare}
\alias{DoEstRare}
\title{DoEstRare}
\description{
Rare variant association test comparing position density functions and mutation counts between cases and controls.
}
\usage{
DoEstRare(pheno, geno, position, genome.size,
perm=NULL, alpha=NULL, c=NULL,
autosomal=TRUE, gender=NULL)
}
\arguments{
\item{pheno}{a numeric vector of phenotypes. Affected individuals are coded 1 and unaffected individuals are coded 0.}
\item{geno}{a numeric matrix of genotypes (row: individual, column: variant). Genotypes are coded 0,1 or 2 corresponding to the number of minor alleles. }
\item{position}{a numeric vector of variant positions. }
\item{genome.size}{a numeric value corresponding to the length of the analyzed region. }
\item{perm}{number of permutations. If not NULL,a "standard permutation procedure" is performed to compute the significance. See Details. }
\item{alpha}{error level. If not NULL,an "adaptive permutation procedure" is performed to compute the significance. See Details.}
\item{c}{precision of the p-value. If not NULL,an "adaptive permutation procedure" is performed to compute the significance. See Details.}
\item{autosomal}{boolean. If TRUE, autosomal chromosome; FALSE, X chromosome.}
\item{gender}{numeric vector. 1=male; 2=female.}
}
\value{
\item{p.value}{the p-value obtained by the phenotype permutation procedure. }
\item{stat}{the test statistic. }
}
\details{
Two types of permutations procedures can be defined in the function: the standard permutation procedure and the adaptive permutation procedure.
In the standard permutation procedure, the user specifies, in the argument "perm", the number of permutations to be done. The p-value will be \eqn{(R+1)(B+1)}. With \eqn{R} the number of permutation statistics superior to the observed statistic and \eqn{B} the number of permutations.
In the adaptive permutation procedure, the user specifies, in the argument "alpha", the significance to achieve after multiple testing correction. In the argument "c", the estimation precision of the p-value. In function of these two paremeters, the maximal number of permutations and the maximal number of success to achieve will be computed. If the maximal number of success is reached, the p-value will be \eqn{R/B}. If not, the maximal number of permutations will be used to compute the p-value \eqn{(R+1)(B+1)}.
}
\references{
Persyn E, Karakachoff M, Le Scouarnec S, Le Cl??zio C, Campion D, French Exome Consortium, et al. DoEstRare: A statistical test to identify local enrichments in rare genomic variants associated with disease. Wang K, editor. PLOS ONE. 2017 Jul 24;12(7):e0179364.
Che R, Jack JR, Motsinger-Reif AA, Brown CC. An adaptive permutation approach for genome-wide association study: evaluation and recommendations for use. BioData Min. 2014;7:9.
}
\author{Elodie Persyn, \email{elodie.persyn@univ-nantes.fr}}
\examples{
pheno=rep(c(0,1), 500)
geno=matrix(sample(c(0,1),prob=c(0.7,0.3) ,1000*30, replace=TRUE), ncol=30)
position=sample(1:500,30)
genome.size=500
perm=200
#Autosomal gene
#standard phenotype permutation procedure
DoEstRare(pheno, geno, position, genome.size, perm)
#adaptive phenotype permutation procedure
DoEstRare(pheno, geno, position, genome.size, alpha=0.05, c=0.2)
#X gene
gender=rep(c(1,2), each=500)
#standard phenotype permutation procedure
DoEstRare(pheno, geno, position, genome.size, perm, autosomal=FALSE, gender=gender)
#adaptive phenotype permutation procedure
DoEstRare(pheno, geno, position, genome.size, alpha=0.05, c=0.2, autosomal=FALSE, gender=gender)
}
|
3f6ec5017932ab3b3ddf1b28cd92df34666e277d
|
6d5a7d0a5f55520fceb0a2868bc6b7fb7903075a
|
/R/class-node.R
|
7c2e1411bab9e47720ff00251229239b121da243
|
[] |
no_license
|
meta-QSAR/simple-tree
|
7dbb617aff4e637d1fcce202890f322b99364494
|
28ff7bf591d3330498a3c8a85d8ae5a3d27b37d5
|
refs/heads/master
| 2016-09-14T07:08:33.743705
| 2015-08-04T09:31:24
| 2015-08-04T09:31:24
| 58,349,277
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 127
|
r
|
class-node.R
|
.node<-setClass(
Class = "node",
slots=c(
id = "character",
name = "character",
parent.id = "character"
)
)
|
9ea98dda19440f402ef378724672c9ac1a5f32bb
|
d3fe5e108e4dbf3ba50aba0c184ddbe9884bc635
|
/scripts/eda.R
|
2953c410eca8c132236bbec6577a8c9d7ffe60b5
|
[] |
no_license
|
JasonDude16/Data-Science-Capstone-NLP
|
0c3b41c1e82ed7496f5d64ae0d3a6bec3d4b9250
|
70c5f2e439b5f4787f52f7af81b41ca4ee66f623
|
refs/heads/master
| 2023-02-18T06:23:11.749069
| 2021-01-14T00:01:58
| 2021-01-14T00:01:58
| 284,143,973
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,894
|
r
|
eda.R
|
# EXPLORATORY ANALYSIS ---------------------------------------------------
dat_dfm2_df <- tidy(dat_dfm2)
colnames(dat_dfm2_df)[2] <- "word"
# Word cloud, top words
dat_dfm2_df %>%
count(word) %>%
with(wordcloud(word, n, max.words = 100))
# Bar chart, top words
data.frame(word = attributes(topfeatures(dat_dfm2, n = 20))$names,
count = topfeatures(dat_dfm2, n = 20)) %>%
mutate(word = fct_reorder(word, desc(count))) %>%
ggplot(aes(word, count)) +
geom_col(fill = "lightblue") +
theme_bw() +
labs(title = "Top 20 Most Frequent Words") +
theme(axis.text.x = element_text(angle = 315),
panel.grid.minor.y = element_blank(),
panel.grid.major.x = element_blank())
# SENTIMENT ANALYSIS
## Top positive and negative
# Bing
dat_dfm2_df %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
group_by(sentiment) %>%
mutate(word = fct_reorder(word, n)) %>%
slice(1:10) %>%
ggplot(aes(n, word, fill = sentiment)) +
geom_col(position = "dodge") +
facet_wrap(~sentiment, scales = "free_y") +
labs(title = "Top 10 Most Frequent Positive and Negative Sentiments") +
ylab("Count") +
theme_bw() +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank())
# Top sentiments
## Afinn
dat_dfm2_df %>%
inner_join(get_sentiments("afinn")) %>%
count(word, value, sort = TRUE) %>%
group_by(value) %>%
mutate(word = fct_reorder(word, n)) %>%
slice(1:3) %>%
ggplot(aes(word, n, fill = as.factor(value))) +
geom_col(position = "dodge") +
labs(title = "Top 3 Most Frequent Words by Sentiment") +
ylab("Count") +
theme_bw() +
theme(axis.text.x = element_text(angle = 315),
panel.grid.minor.y = element_blank(),
panel.grid.major.x = element_blank())
# Top sentiments
## Nrc
dat_dfm2_df %>%
inner_join(get_sentiments("nrc")) %>%
count(word, sentiment, sort = TRUE) %>%
mutate(word = fct_reorder(word, n)) %>%
group_by(sentiment) %>%
slice(1:3) %>%
ggplot(aes(word, n, fill = as.factor(sentiment))) +
geom_col(position = "dodge") +
facet_wrap(~sentiment, scales = "free_x", nrow = 2) +
labs(title = "Top 3 Most Frequent Words by Sentiment") +
ylab("Count") +
theme_bw() +
theme(panel.grid.minor.y = element_blank(),
panel.grid.major.x = element_blank())
# -------------------------------------------------------------------------
ggplot(summary(corpus), aes(x = Tokens)) +
geom_bar()
ggplot(summary(corpus), aes(x = Sentences)) +
geom_bar()
textplot_wordcloud(dat_dfm_samp)
textstat_frequency(dat_dfm_samp)[1:20] %>%
ggplot(aes(x = feature, y = frequency)) +
geom_bar(stat = "identity")
term_count <-
tidy(dat_dfm_samp) %>%
group_by(term) %>%
summarise(count = n()) %>%
arrange(desc(count))
ggplot(term_count[1:20,], aes(x = term, y = count)) +
geom_bar(stat = "identity")
|
3b22e9914278aaa7e7d17be58b88b6e4e41fa741
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/dvmisc/man/bmi3.Rd
|
c12d19fa6d3b9b2395830dbc9a9f25564d7895e8
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 722
|
rd
|
bmi3.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bmi3.R
\name{bmi3}
\alias{bmi3}
\title{Convert Continuous BMI Values into 3-Level Factor}
\usage{
bmi3(x, labels = TRUE)
}
\arguments{
\item{x}{Numeric vector of BMI values.}
\item{labels}{If \code{TRUE}, factor levels are labeled
\code{"Normal weight"}, \code{"Overweight"}, and \code{"Obese"}; if
\code{FALSE}, factor levels are \code{[-Inf, 25)}, \code{[25, 30)}, and
\code{[30, Inf)}.}
}
\value{
Factor variable with 3 levels.
}
\description{
Converts a continuous BMI variable into a 3-level factor variable: Normal
weight if \code{[-Inf, 25)}, Overweight if \code{[25, 30)}, and Obese if
\code{[30, Inf)}.
}
|
5066485c78ee9908a23106e9852dea07366fed7b
|
87be7889592534040bcc835fc5ee76ec2934c98c
|
/Russell_DBST667_Week2_Exercise_R_Script.R
|
78ac9eccf88ab5608c5559d1cbf5b60ef92151d1
|
[] |
no_license
|
rbrandon87/DBST667_Week2_Exercise
|
83831331dabaf44c13e5823b7b64d700e428839e
|
0da9c27a566c5f57039e8c6e0429a87d57cf2f0d
|
refs/heads/master
| 2020-03-18T19:41:57.760146
| 2018-06-02T16:06:23
| 2018-06-02T16:06:23
| 135,171,249
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,850
|
r
|
Russell_DBST667_Week2_Exercise_R_Script.R
|
#Part 2aii
#Load data and read first 10 rows
credit <- utils::read.csv(file="CreditApproval.csv", head=TRUE, sep=",")
head(credit, n=10)
#Part 2aiii
#Run str command on credit dataset
str(credit)
#Part 2bi
#Run summary command on credit dataset
summary(credit)
#Part2bii
#Run summary command on specific attributes
summary(credit$A14)
summary(credit$A2)
#Part2biii
summary(credit$A1)
summary(credit$A4)
#Part2cii
#install and load necessary packages for discretization
#Run interval, frequency, and clustering on same column for comparing
install.packages("discretization")
install.packages("arules")
library(discretization)
library(arules)
summary(credit$A2)
credit$A2<-discretize(credit$A2, "interval", breaks=6)
summary(credit$A2)
rm(credit)
credit <- utils::read.csv(file="CreditApproval.csv", head=TRUE, sep=",")
summary(credit$A2)
credit$A2<-discretize(credit$A2, "frequency", breaks=6)
summary(credit$A2)
rm(credit)
credit <- utils::read.csv(file="CreditApproval.csv", head=TRUE, sep=",")
summary(credit$A2)
credit$A2<-discretize(credit$A2, "cluster", breaks=6)
summary(credit$A2)
#Part2civ
#Remove an atttribute and view dataset
rm(credit)
credit <- utils::read.csv(file="CreditApproval.csv", head=TRUE, sep=",")
credit$A1 <- NULL
View(credit)
#Part2di
#View attributes with missing values
rm(credit)
credit <- utils::read.csv(file="CreditApproval.csv", head=TRUE, sep=",")
apply(credit, 2, function (credit) sum(is.na(credit)))
#Part2dii
#Replace missing values with attribute mean
credit$A2[is.na(credit$A2)]<-mean(credit$A2, na.rm=TRUE)
apply(credit, 2, function (credit) sum(is.na(credit)))
#Part2dv
#Sort the dataset
rm(credit)
credit <- utils::read.csv(file="CreditApproval.csv", head=TRUE, sep=",")
credit_sorted<-credit[order(credit$A6), ]
head(credit_sorted$A6)
#Part2ei
#Create a plot
plot(table(credit$A6), type="h", col="blue")
|
2352444e46e4e9a4f889734eb66d60577ddb9674
|
2b2345b0b67a866ecbe686f7162ac1bc9922c50c
|
/cachematrix.R
|
9c09d298bfb50f2e40171ce113ee2dcce3e3924a
|
[] |
no_license
|
abulmohaimin/ProgrammingAssignment2
|
67628254c197dec61f9e1612e2fbdab1cab49f3a
|
3e4f949be66062ab27ddb5c5bbb8ebb442462fdb
|
refs/heads/master
| 2020-12-24T18:23:20.872757
| 2016-01-23T03:39:22
| 2016-01-23T03:39:22
| 50,221,267
| 0
| 0
| null | 2016-01-23T03:17:42
| 2016-01-23T03:17:42
| null |
UTF-8
|
R
| false
| false
| 664
|
r
|
cachematrix.R
|
## Calculate inverse of inputted matrix
## Cache the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(x) {
x <<- x
m <<- NULL
}
get <- function()x
set_inv_mat <- function(inv_mat) m <<- inv_mat
get_inv_mat <- function() m
list(set = set, get = get,
set_inv_mat = set_inv_mat,
get_inv_mat = get_inv_mat)
}
## Calculate the inverse of matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$get_inv_mat()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$set_inv_mat(m)
m
}
|
76c0474043e3708cfc755af5907e7ba11b0d656f
|
f7cd9023746b8bdd29b90731ec389ad9ccf626d1
|
/haplotyping/convert_VCF_into_PED.R
|
e975a2b113f0413cdfdc686d3f37e62762e09fcb
|
[] |
no_license
|
kibanez/analysing_STRs
|
1a498f121763d7cb411e3b66b25ec22eea608244
|
b922ab7d2b9668dc8add9e73f1d4e44a0aac1fd4
|
refs/heads/master
| 2022-02-25T05:28:43.480585
| 2022-02-04T10:29:30
| 2022-02-04T10:29:30
| 218,277,677
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,198
|
r
|
convert_VCF_into_PED.R
|
# Function or R script, that given as input files PED and phenotypic files, it completes the PED file
date ()
Sys.info ()[c("nodename", "user")]
commandArgs ()
rm (list = ls ())
R.version.string ## "R version 3.6.1 (2019-07-05)"
# libraries
library(dplyr); packageDescription ("dplyr", fields = "Version") #"0.8.3"
library(ggplot2); packageDescription ("ggplot2", fields = "Version") #"3.2.1"
library(reshape); packageDescription ("reshape", fields = "Version") #"0.8.8"
library(scatterplot3d); packageDescription("scatterplot3d", fields = "Version") # 0.3-41
library(ggpubr); packageDescription("ggpubr", fields = "Version") # 0.2.3
library(tidyverse)
# Reading arguments
options <- commandArgs(trailingOnly = T)
input_ped <- as.character(options[1])
genotype_ped <- as.character(options[2])
phenotype_file <- as.character(options[3])
output_ped <- as.character(options[4])
#Check whether `input_ped` and `phenotype_file` do exist
if (!file.exists(input_ped)){
write("convert_VCF_into_PED R function: Original PED file does not exist. The original PED file is required", stderr())
}
if (!file.exists(phenotype_file)){
write("convert_VCF_into_PED R function: Phenotypes (gender and affection status) for all genomes are missing", stderr())
}
# Load data
ped_data = read.csv(input_ped,
stringsAsFactors = F,
header = F,
sep = "\t")
pheno_data = read.csv(phenotype_file,
stringsAsFactors = F,
header = T,
sep = "\t")
genotype_data = read.csv(genotype_ped,
stringsAsFactors = F,
header = F,
sep = "\t")
# Enrich with `gender` and `affection status` each genome
ped_data = left_join(ped_data[,c(1:4)],
pheno_data %>% select(IID, sex, CaseControl),
by = c("V2" = "IID"))
# Merge `ped_data` with `genotypes`
ped_complete = cbind(ped_data,
genotype_data)
# Write into file
write.table(ped_complete,
output_ped,
quote = F,
row.names = F,
col.names = F,
sep = "\t")
|
d7935cf06bfa3eacc57c03f53561ed25f264d8ab
|
ffe18353716e2a910d687f0892ab524e74c752a3
|
/appt/server.r
|
fd1fd49cd1e53991607af652d3c3c69bd8d4c1a9
|
[] |
no_license
|
joanrojas/crime_data_shinyapps
|
c20b69c7b109853a5c2f199677ce8cd9ab97256a
|
ef8f58e81bd4e5ec6127d2ec443879931bea3872
|
refs/heads/master
| 2021-01-19T08:42:16.122836
| 2016-12-26T10:46:06
| 2016-12-26T10:46:06
| 29,781,843
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,527
|
r
|
server.r
|
library(shiny)
library(gridExtra)
require(gridExtra)
require(ggplot2)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
#district function returns a data frame with data only from the distric input in the selectInput from UI
data_try <- reactive({
crime_data <- read.csv(file="crime_data.csv", header=TRUE, na.strings="NA")
set <- subset(crime_data, crime_data$PdDistrict == as.character(input$input_1))
date_character <- as.character(set$Date)
set$month <- substr(date_character,1,2)
set$month <- as.factor(set$month)
x <- as.character(set$Time)
tt <- strptime(paste("2001-01-01", x), format="%Y-%m-%d %H:%M")
set$hour <- as.factor(format(round(tt,units="hours"), format="%H:%M"))
return(set)
})
output$plot_month <- renderPlot ({
b <- data_try()
ggplot(b) + geom_bar(aes(x= month), fill="#599ad3", position="dodge") + theme(axis.text.x=element_text(angle=45, hjust=1)) + ylab("N. Crimes")
})
output$plot_day <- renderPlot ({
c <- data_try()
ggplot(c) + geom_bar(aes(x =DayOfWeek)) + theme(axis.text.x=element_text(angle=45, hjust=1)) + xlab("Day of the week") + ylab("N.Crimes")
})
output$plot_hour <- renderPlot ({
d <- data_try()
hour_breaks = c("00:00", "03:00", "06:00", "09:00", "12:00", "15:00", "18:00","21:00")
ggplot(d) + geom_bar(aes(x=hour)) + theme(axis.text.x=element_text(angle=90, hjust=8)) + scale_x_discrete(breaks = hour_breaks) + ylab("N. Crimes")
})
})
|
e640bdaa3b799c259cb80dd84f858c3f9840b20a
|
bd8a7c215d851e6b3c44165baec15e3f13efb665
|
/man/es_renderPlot.Rd
|
2b9a8948c3a35c4abc8ad3a923ea7bb771f8f1fc
|
[] |
no_license
|
mYstar/easyshiny
|
dfe36d11f97d390cb3e7e5548f64d6939b9de36a
|
9987d571a65ecdb6004cfa112ad80f027694b0fd
|
refs/heads/master
| 2020-04-12T15:02:55.283045
| 2019-06-19T08:19:46
| 2019-06-19T08:19:46
| 162,569,346
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,661
|
rd
|
es_renderPlot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/es_add_output.R
\name{es_renderPlot}
\alias{es_renderPlot}
\alias{es_renderDataTable}
\alias{es_renderImage}
\alias{es_renderPrint}
\alias{es_renderTable}
\alias{es_renderText}
\alias{es_renderUI}
\title{Add Output Object}
\usage{
es_renderPlot(expr, tab = NULL, box = NULL, ...)
es_renderDataTable(expr, tab = NULL, box = NULL, ...)
es_renderImage(expr, tab = NULL, box = NULL, ...)
es_renderPrint(expr, tab = NULL, box = NULL, ...)
es_renderTable(expr, tab = NULL, box = NULL, ...)
es_renderText(expr, tab = NULL, box = NULL, ...)
es_renderUI(expr, tab = NULL, box = NULL, ...)
}
\arguments{
\item{expr}{the expression to pass to the outputFunction (should be generating the correct object type)}
\item{tab}{tab to show the plot in (default: 'Output', new name creates tab)}
\item{box}{box in the view area to show plot in (default: 'Result', new name creates box)}
\item{...}{the parameters, that should be given to the output function}
}
\description{
Adds an output object from shiny to the Easy Shiny app and places it into a specific tab and box. All the
wrapper functions pass all their arguments to \code{es_add_output} (useful for specifying tabs and boxes).
}
\section{Functions}{
\itemize{
\item \code{es_renderDataTable}: adds a shiny datatable to the app
\item \code{es_renderImage}: adds a shiny image to the app
\item \code{es_renderPrint}: prints the results in the app
\item \code{es_renderTable}: adds a shiny table to the app
\item \code{es_renderText}: adds a shiny text to the app
\item \code{es_renderUI}: adds shiny UI elements to the app
}}
|
f171b6a85d572854f426b766c05c4a4099049fa7
|
d6793e6154e4373b20fbeab75bd227800a2cb7fa
|
/Revised_1_app copy 4_v2.R
|
c000095765725f24343b2bc96ef074a5241593e0
|
[] |
no_license
|
Laava5892/R-recent-scripts
|
c94985fc52a7608c20cb78e41e0e84cd38d84eff
|
db5c7c5ae8c30bff99b069042472152a8f0d2e6d
|
refs/heads/master
| 2021-06-16T12:50:37.434202
| 2021-05-15T18:26:28
| 2021-05-15T18:26:28
| 200,422,233
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 240,883
|
r
|
Revised_1_app copy 4_v2.R
|
##### PACKAGES #####
# install.packages('rsconnect')
# library('rsconnect')
#
# rsconnect::setAccountInfo(name='paml-adobe', token='F2029EBA9515AEA913E45F3CA4B43BF0', secret='GGmXl7C77AlltpYQq6GjoA8vJPqg9Ug1BCYxZINH')
#
# rsconnect::deployApp('path/to/your/app')
#
# install.packages("rvest")
# library("rvest")
# getwd()
# setwd("/Users/lganesh/Desktop/Search_Dashboard") # change to your working directory and place the search diagonistics image in www folder
packages <- c('rvest', 'rsconnect', 'tsapi', 'RInside', 'mime', 'RCurl', 'jsonlite', 'stringr', 'futile.logger', 'reshape', 'ggplot2',
'DT', 'datasets', 'devtools', 'dplyr', 'lubridate', 'shinyjs', 'httr',
'jsonlite', 'markdown', 'shinyBS', 'shiny', 'tidyr', 'scales', 'lubridate','curl','shinydashboard','plotly','data.table')
# install.packages("shinydashboard")
# library(shinydashboard)
#
# install.packages("plotly")
# library(plotly)
#
# install.packages("data.table")
# library("data.table")
#
# install.packages("futile.logger")
# library("futile.logger")
# determine which required packages are not installed
new.packages <- packages[!(packages %in% installed.packages()[,"Package"])]
# if packages are missing, install them
if(length(new.packages)) install.packages(new.packages)
# after package install, initialise all packages
lapply(packages, FUN = function(X) {
do.call("require", list(X))
})
# klmno_user = " "
# klmno_password = " "
# ts_api
ts_api <- function (query = NA, db_tag = NA, dc = NA)
{
URL <- "https://ts-api.efrontier.com/api/super/execute"
result <- postForm(URL, query = query, dbtags = db_tag, dc = dc,
.opts = list(timeout = 600, ssl.verifypeer = TRUE))
#.opts = curlOptions(header = TRUE, userpwd = "release:kT7J%*", netrc = TRUE))
return(jsonlite::fromJSON(result))
}
# amo_db_get
amo_db_get <- function (query = NA, db_tag = NA, dc = NA, debug = FALSE)
{
if (debug)
flog.layout(layout.format(paste("[~l] [amo_db_get] [~t] ~m",
sep = "")))
if (any(sapply(list(query, db_tag, dc), length)) < 1)
stop("Failed: missing input parameter [query, db_tag, dc]")
if (any(sapply(list(query, db_tag, dc), is.character)) ==
FALSE)
stop("Failed: all inputs must be strings")
if (!(dc %in% c("scl2", "or1", "lon5", "all")))
stop("Failed: Incorrect data centre ID")
if (debug)
flog.info("Verified input parameters")
if (debug)
flog.info("Attempting query: \"%s\"", gsub("\r?\n|\r|\\s+",
" ", query))
func_start_time <- Sys.time()
api_out <- ts_api(query = query, db_tag = db_tag, dc = dc)
if (!valid_response(api_out))
stop("Failed: please check your query is correct (dates must be strings)")
if (valid_response(api_out)) {
if (is.character(api_out$response) && grepl(sprintf("%s not found",
db_tag), api_out$response)) {
stop(sprintf("Failed: db_tag %s not found on %s data centre",
db_tag, dc))
}
if (debug)
flog.info("Query successful for db_tag %s on %s dc",
db_tag, dc)
if (debug)
flog.info("Completed (runtime: %.2f sec)", as.numeric(Sys.time() -
func_start_time, units = "secs"))
return(data.frame(api_out$response, stringsAsFactors = FALSE))
}
}
valid_response <- function (input) {
ifelse(input$status$code == "200" & length(input$response) > 0, TRUE, FALSE)}
# to enter PIDs in correct form in the table
options(scipen=999)
server = function(input, output, session ) {
##### Caching functionnn nnb v
cache_users <- function() {
get_active_usernames <- function() {
admin_query = "select userid, username, db_tag, tzid from users where managed = 'm';"
#userids, names, db_tag from OR1 - US and A where (date(mtime) > (current_date - 90))
admin_or1 = amo_db_get(query=admin_query, db_tag = 'admin', dc = 'or1', debug = TRUE)
admin_or1$dc <-'or1'
admin_global <- admin_or1
#userids, names, db_tag from lon5 - UK
admin_lon5 = amo_db_get(query=admin_query, db_tag = 'admin', dc = 'lon5', debug = TRUE)
admin_lon5$dc <-'lon5'
#rbind the DFs from each Data Center
admin_global = rbind(admin_or1, admin_lon5)
admin_global = admin_global[order(admin_global$username), ]
}
##### FOR VARIOUS ACTIVE USERS #####
cache_file <- "./users_cached.csv" # set filename and path
sprintf("Cache file: %s", cache_file) # logging statement
if( !file.exists(cache_file) ) {
# if the cache_file does not exist, retrieve list of active
# clients and write to file
active_users <- get_active_usernames()
#rtb_users <- rtb_users$response
write.csv(active_users, file = cache_file, row.names=FALSE)
} else {
# if cache_file exists, determine whether it is outdated.
cache_modified = format(file.mtime(cache_file), "%Y-%m-%d") # get modified date for cache_file
current_date = format(Sys.Date(), "%Y-%m-%d") # get current date
if (cache_modified != current_date) {
#if cache_file was not modified on the current date, it is old.
#re-query rtb_users and overwrite local file
active_users <- get_active_usernames()
#rtb_users <- rtb_users$response
write.csv(active_users, file = cache_file, row.names=FALSE)
} else {
# if cache_file exists and was last modified today, read rtb
# users from the saved file without querying
active_users <- read.csv(cache_file, stringsAsFactors = FALSE)
}
}
return(active_users)
}
#####
users <- cache_users() # execute function before shinyServer()
users$dbtagdc = paste(users$db_tag, users$dc, sep=",")
passdata <-eventReactive(input$goButton,{
withProgress(message = 'Loading in progress...', value = 0, {
n <- 10
for (i in 1:n) {
incProgress(1/n, detail = paste(""))
Sys.sleep(.1)
}
if (!is.na(input$username != "All")) {
users_1 <- users[users$username == input$username & users$dbtagdc == input$Dbtag_DC,]
dbtag <- as.character(users_1$db_tag)
dc_choose <- as.character(users_1$dc)
# GET all pids
query_all_pid <- "select pid from user_portfolios ; "
pid_or1 <- amo_db_get(query=query_all_pid, db_tag = dbtag, dc = dc_choose, debug=TRUE)
active_pid_or1 <- sprintf("(%s)", paste(unique(pid_or1$pid), collapse=","))
active_pid_or1 <- noquote(gsub("\\(|\\)", "", active_pid_or1))
date1 <- Sys.Date()-8
date2 <- Sys.Date()-1
admin_query_pid = sprintf("select pid,db_tag, userid as Client_Account, portfolio_id as Portfolio_Name, username as Client_Account_Name, status_code as Portfolio_Status from user_portfolios left join users using (userid) where pid in (%s) order by 1", active_pid_or1) #input[characterstring][3]
admin_pid = amo_db_get(query=admin_query_pid, db_tag = dbtag, dc = dc_choose, debug = TRUE)
admin_pidd <- admin_pid %>% mutate(portfolio_status = case_when(portfolio_status == 'a' ~ "Active",
portfolio_status == 'i' ~ "Inactive",
portfolio_status == 'z' ~ "Optimize",
portfolio_status == 'r' ~ "Deleted" ))
admin_pidd$dcchoose <- dc_choose
admin_final <- admin_pidd
admin_query_daily_accuracy_agg = sprintf("SELECT p.pid as pid,
SUM(predicted_clicks) as pred_clicks,
SUM(actual_clicks) as act_clicks,
(100.0 * SUM(actual_clicks )/ SUM(NULLIF( predicted_clicks, 0 )) )::NUMERIC(20,2) AS click_acc,
SUM(predicted_spend) as pred_cost,
SUM(actual_spend) as act_cost,
SUM(budget)::NUMERIC(20,2) AS budget ,
(100.0 * SUM(actual_spend) / SUM(NULLIF( predicted_spend, 0 )) )::NUMERIC(20,2) AS cost_acc,
SUM(predicted_rev) as pred_rev,
SUM(crev) as act_rev,
(100.0 * SUM(crev )/ SUM(NULLIF ( predicted_rev, 0 ) ))::NUMERIC(20,2) AS rev_acc,
SUM(predicted_impr) as pred_impr,
SUM(actual_impr) as act_impr,
(100.0 * SUM(actual_impr)/SUM(NULLIF( predicted_impr, 0) ))::NUMERIC(20,2) AS impr_acc,
(100.0 * SUM((actual_spend/NULLIF( actual_clicks, 0 ))) /SUM(NULLIF((predicted_spend/NULLIF( predicted_clicks, 0 )),0))) ::NUMERIC(20,2) AS cpc_acc,
(100.0 *SUM(actual_spend )/ SUM(NULLIF( budget, 0 ) ))::NUMERIC(20,2) AS pacing_acc,
(100.0 * SUM(crev/NULLIF( actual_clicks, 0 )) /SUM(NULLIF((predicted_rev/NULLIF( predicted_clicks, 0 )),0))) ::NUMERIC(20,2) AS rpc_acc
FROM ( SELECT (j.job_start_time AT TIME ZONE t.tz + INTERVAL '6 hours')::date as date,j.pid,
AVG(j.converged_clicks)::NUMERIC(20,2) AS predicted_clicks,
AVG(j.converged_impr)::NUMERIC(20,2) AS predicted_impr,
( AVG(j.converged_cost)/100.0 )::NUMERIC(20,2) AS predicted_spend,
AVG(j.converged_rev)::NUMERIC(40,4) AS predicted_rev,
AVG( CASE j.stid WHEN 5 THEN j.st_constraint ELSE j.budget_high END ) AS budget,
COUNT(1) as opt_runs,
MAX(j.job_start_time at time zone t.tz) as last_ran,
( array_agg(pov.objid ORDER BY j.job_start_time DESC))[1] as objid
FROM optimizer_jobs j
JOIN user_portfolios up ON (up.pid = j.pid)
LEFT JOIN portfolio_objectives_versions pov ON (up.pid = pov.pid and j.job_start_time between pov.xstart_time and pov.xend_time)
JOIN ( users u JOIN timezones t ON (t.tzid = u.tzid) ) t ON (up.userid = t.userid)
WHERE up.pid in (%1$s)
AND j.job_start_time >= DATE '%2$s' - INTERVAL '2 days'
AND j.job_end_time <= DATE '%3$s' + INTERVAL '2 days'
AND (j.job_start_time at time zone t.tz + interval '6 hours')::DATE >= DATE '%2$s'
AND (j.job_end_time at time zone t.tz + interval '6 hours')::DATE <= DATE '%3$s'
GROUP BY 1,j.pid ) as p
FULL JOIN ( SELECT CPH.pid,WH.date,
SUM(WH.clicks) AS actual_clicks,
SUM(WH.impressions) AS actual_impr,
( SUM(WH.cost)/100.0 )::NUMERIC(20,2) AS actual_spend
FROM day_sem_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN user_campaigns uc on (CPH.cid = uc.cid)
JOIN user_accts ua on (uc.user_acctid = ua.user_acctid)
WHERE CPH.pid in (%1$s)
AND ua.sid <> 77
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid
UNION ALL
SELECT CPH.pid,WH.date,
SUM(WH.est_clicks) AS actual_clicks,
SUM(WH.est_impressions) AS actual_impr,
( SUM(WH.est_cost)/100.0 )::NUMERIC(20,2) AS actual_spend
FROM day_trig_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN user_campaigns uc on (CPH.cid = uc.cid)
JOIN user_accts ua on (uc.user_acctid = ua.user_acctid)
WHERE CPH.pid in (%1$s)
AND ua.sid = 77
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid
) AS c USING (pid,date)
FULL JOIN ( SELECT CPH.pid, WH.date,
SUM( case when (device = 'm' AND mobile_weights_enabled = TRUE)
then coalesce(f.mobile_weight, f.weight)
else f.weight
end * (COALESCE(CT_VALUE_LAST, 0) + COALESCE(CTVT_VALUE_LAST, 0) * (1 - 0) + COALESCE(VTCT_VALUE_LAST, 0) * 0 + 0.4 * ( COALESCE (VT_VALUE_LAST, 0)) ) ) as crev,
(SUM( case when (device = 'm' AND mobile_weights_enabled = TRUE)
then coalesce(f.mobile_weight, f.weight)
else f.weight
end * ( COALESCE(CT_VALUE_LAST, 0) / COALESCE(D.click_thru_percentage, 1) + COALESCE(CTVT_VALUE_LAST, 0) * (1 - 0) / COALESCE(D.click_thru_percentage, 1) + COALESCE(VTCT_VALUE_LAST, 0) * 0 / COALESCE(D.view_thru_percentage, 1) + COALESCE (VT_VALUE_LAST, 0) * 0.4 / COALESCE(D.view_thru_percentage, 1) ) ))::NUMERIC(20,2) as crev_d
FROM day_revenue_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN portfolio_objectives PO ON (CPH.pid = PO.pid)
JOIN objective_function f ON (f.objid = PO.objid AND f.propertyid = WH.propertyid)
JOIN objectives ob ON ob.objid = f.objid
LEFT JOIN (
SELECT pid,
propertyid,
(now() at time zone tz)::date - delay AS date,
CASE
WHEN click_thru_percentage = 0
THEN 1
WHEN click_thru_percentage < 0.01
THEN 0.01
ELSE
LEAST(click_thru_percentage, 1)
END AS click_thru_percentage,
CASE
WHEN view_thru_percentage = 0
THEN 1
WHEN view_thru_percentage < 0.01
THEN 0.01
ELSE
LEAST(view_thru_percentage, 1)
END AS view_thru_percentage
FROM property_daily_delayed_rev_factors
JOIN user_portfolios using(pid)
JOIN users using(userid)
JOIN timezones using(tzid)
WHERE delay >= 0
AND pid in (%1$s)
) D on (wh.propertyid = D.propertyid and CPH.pid = D.pid and WH.date = D.date)
WHERE CPH.pid in (%1$s)
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid ) AS cr USING (pid,date)
JOIN ( SELECT CPH.pid,WH.date,
SUM( case when (device = 'm' AND mobile_weights_enabled = TRUE) then coalesce(f.mobile_weight, f.weight) else f.weight end * (COALESCE(CT_VALUE_LAST, 0) + (1 - 0) * COALESCE(CTVT_VALUE_LAST, 0) + 0.4 * ( COALESCE (VT_VALUE_LAST, 0) + 0 * COALESCE(VTCT_VALUE_LAST, 0) ) ) ) as trev
FROM tday_revenue_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN portfolio_objectives PO ON (CPH.pid = PO.pid)
JOIN objective_function f ON (f.objid = PO.objid AND f.propertyid = WH.propertyid)
JOIN objectives ob ON ob.objid = f.objid
WHERE CPH.pid in (%1$s)
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid ) AS tr USING (pid,date)
GROUP BY 1
ORDER BY 1 DESC,2 ;", active_pid_or1,date1,date2)
admin_daily_1 = amo_db_get(query=admin_query_daily_accuracy_agg, db_tag = dbtag, dc = dc_choose, debug = TRUE)
admin_final <- merge(admin_final, admin_daily_1, by.x="pid", by.y = "pid", all.x = TRUE)
admin_final[is.na(admin_final)] <- 0
admin_final <- admin_final %>%
mutate(Scenario = case_when(cost_acc > 0 & cost_acc < 80 & rpc_acc >=80 & rpc_acc<=120 ~ "Underspend AND RPC Accuracy OK",
cost_acc > 0 & cost_acc < 80 & rpc_acc <80 | rpc_acc >120 ~ "Underspend AND Poor RPC Accuracy ",
cost_acc > 120 & rpc_acc >=80 & rpc_acc<=120 ~ "Overspend AND RPC Accuracy OK",
cost_acc > 120 & rpc_acc > 0 & rpc_acc < 80 | rpc_acc > 120 ~ "Overspend AND Poor RPC Accuracy",
cost_acc >= 80 & cost_acc <=120 & rpc_acc >=80 & rpc_acc<=120 ~ "Cost Accuracy Within Range AND RPC Accuracy OK",
cost_acc >= 80 & cost_acc <=120 & rpc_acc > 0 & rpc_acc < 80 | rpc_acc > 120 ~ "Cost Accuracy Within Range AND Poor RPC Accuracy",
cost_acc == 0 & rpc_acc == 0 ~ "No Data"
))
admin_final <- admin_final %>% mutate(Spend_Scenario = case_when(cost_acc > 0 & cost_acc < 80 ~ "Underspend",
cost_acc > 120 ~ "Overspend",
cost_acc >= 80 & cost_acc <=120 ~ "Cost Accuracy Within Range",
cost_acc == 0 ~ "No Data"))
admin_final <- admin_final %>% mutate(RPC_Scenario = case_when(rpc_acc > 120 | rpc_acc < 80 ~ "Poor RPC Accuracy",
rpc_acc >= 80 & rpc_acc <=120 ~ "RPC Accuracy OK",
rpc_acc == 0 ~ "No Data"))
admin_final <- admin_final[order(admin_final$pred_cost, decreasing = TRUE),]
admin_final$portfolionamepid = paste(admin_final$portfolio_name, admin_final$pid, sep=",")
return(admin_final)
}
})
})
passdata1 <-eventReactive(input$go,{
withProgress(message = 'Loading in progress...', value = 0, {
n <- 10
for (i in 1:n) {
incProgress(1/n, detail = paste(""))
Sys.sleep(.1)
}
if (!is.na(input$PortfolioNamePID != "All")) {
pf_1 <- passdata()[passdata()$portfolionamepid == input$PortfolioNamePID,]
dbtag1 <- as.character(pf_1$db_tag)
dc_choose1 <- as.character(pf_1$dcchoose)
pid1 <- as.character(pf_1$pid)
# dbtag1 = 'c11504'
# dc_choose1 = 'lon5'
# pid1= '1700000005'
date1 <- Sys.Date()-8
date2 <- Sys.Date()-1
date3 <- Sys.Date()-90
date4 <- Sys.Date()-1
date5 <- Sys.Date()-30
date6 <- Sys.Date()-1
admin_query_pid = sprintf("select pid,db_tag, userid as Client_Account, portfolio_id as Portfolio_Name, username as Client_Account_Name, status_code as Portfolio_Status from user_portfolios left join users using (userid) where pid = (%s) order by 1", pid1) #input[characterstring][3]
admin_pid = amo_db_get(query=admin_query_pid, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
admin_pidd <- admin_pid %>% mutate(portfolio_status = case_when(portfolio_status == 'a' ~ "Active",
portfolio_status == 'i' ~ "Inactive",
portfolio_status == 'z' ~ "Optimize",
portfolio_status == 'r' ~ "Deleted" ))
# admin_pidd <- admin_pidd %>%filter(portfolio_status == 'Optimize' | portfolio_status == 'Active')
# admin_pidd <- cache_portfolio(dbtag, dc_choose)
admin_final <- admin_pidd
admin_query_optv6v7 = sprintf(" select c.pid, p.handler as Opt_version from
(select pid,
COALESCE(model_type, 'o') as model_type,
COALESCE(modelid, 68) as modelid
from
user_portfolios
left join model_dispatch_portfolio_level using(pid)) as c
left join models p on (c.modelid=p.modelid)
where c.modelid in (81,68)
and pid = (%s)
order by 1;", pid1)
admin_optv6v7 = amo_db_get(query=admin_query_optv6v7, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
admin_final <- merge(admin_final, admin_optv6v7, by.x="pid", by.y = "pid", all.x = TRUE)
#DBA
admin_query_dba_computer = "with base_1 as(
select oa.pid, 'mobile' as device, coalesce(param_value, default_value)::boolean as value
from model_parameters mp
join optimizer_arguments oa using(paramid)
where param_name like 'enable_mobile_bid_adjustment'
),
base_2 as(
select oa.pid, 'mobile' as device, coalesce(param_value, default_value) as max_bid_adj
from model_parameters mp
join optimizer_arguments oa using(paramid)
where param_name like 'min_mobile_bid_adjustment'
),
base_3 as(
select oa.pid, 'mobile' as device, coalesce(param_value, default_value) as min_bid_adj
from model_parameters mp
join optimizer_arguments oa using(paramid)
where param_name like 'max_mobile_bid_adjustment'
),
optv7 as (
select pid, bid_adjustment_type as device, coalesce(aba.auto_adjust,false)::boolean as value, aba.max_bid_adjustment as max_bid_adj, aba.min_bid_adjustment as min_bid_adj
from auto_bid_adjustments aba
join auto_bid_adjustments_types abt using(bid_adjustment_type_id)
join model_dispatch_portfolio_level mp using(pid)
join models using(modelid)
where handler like 'optv7'
and bid_adjustment_type in ('computer','mobile','tablet')
)
select up.pid, d.device,
case
when d.device::text = 'mobile'
then
coalesce(optv7.value, coalesce(b1.value, false))
else
coalesce(optv7.value, false)
end as BA_enable_computer,
case
when d.device::text = 'mobile'
then
coalesce(optv7.max_bid_adj::text, coalesce(b2.max_bid_adj,'none'))
else
coalesce(optv7.max_bid_adj::text, 'none')
end as max_BA_computer,
case
when d.device::text = 'mobile'
then
coalesce(optv7.min_bid_adj::text, coalesce(b3.min_bid_adj,'none'))
else
coalesce(optv7.min_bid_adj::text, 'none')
end as min_BA_computer
from user_portfolios up
join (select * from ( values ('computer'),('tablet'), ('mobile') ) as foo(device) ) as d on (True)
left join base_1 b1 on ( d.device::text = b1.device::text and b1.pid = up.pid)
left join base_2 b2 on ( d.device::text = b2.device::text and b2.pid = up.pid)
left join base_3 b3 on ( d.device::text = b3.device::text and b3.pid = up.pid)
left join optv7 on (optv7.device::text = d.device::text and optv7.pid = up.pid)
where d.device='computer';"
admin_opt_dba_computer = amo_db_get(query=admin_query_dba_computer, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
admin_final <- merge(admin_final,admin_opt_dba_computer,by.x = "pid",by.y = "pid",all.x = TRUE)
admin_query_dba_tablet = "with base_1 as(
select oa.pid, 'mobile' as device, coalesce(param_value, default_value)::boolean as value
from model_parameters mp
join optimizer_arguments oa using(paramid)
where param_name like 'enable_mobile_bid_adjustment'
),
base_2 as(
select oa.pid, 'mobile' as device, coalesce(param_value, default_value) as max_bid_adj
from model_parameters mp
join optimizer_arguments oa using(paramid)
where param_name like 'min_mobile_bid_adjustment'
),
base_3 as(
select oa.pid, 'mobile' as device, coalesce(param_value, default_value) as min_bid_adj
from model_parameters mp
join optimizer_arguments oa using(paramid)
where param_name like 'max_mobile_bid_adjustment'
),
optv7 as (
select pid, bid_adjustment_type as device, coalesce(aba.auto_adjust,false)::boolean as value, aba.max_bid_adjustment as max_bid_adj, aba.min_bid_adjustment as min_bid_adj
from auto_bid_adjustments aba
join auto_bid_adjustments_types abt using(bid_adjustment_type_id)
join model_dispatch_portfolio_level mp using(pid)
join models using(modelid)
where handler like 'optv7'
and bid_adjustment_type in ('computer','mobile','tablet')
)
select up.pid, d.device,
case
when d.device::text = 'mobile'
then
coalesce(optv7.value, coalesce(b1.value, false))
else
coalesce(optv7.value, false)
end as BA_enable_tablet,
case
when d.device::text = 'mobile'
then
coalesce(optv7.max_bid_adj::text, coalesce(b2.max_bid_adj,'none'))
else
coalesce(optv7.max_bid_adj::text, 'none')
end as max_BA_tablet,
case
when d.device::text = 'mobile'
then
coalesce(optv7.min_bid_adj::text, coalesce(b3.min_bid_adj,'none'))
else
coalesce(optv7.min_bid_adj::text, 'none')
end as min_BA_tablet
from user_portfolios up
join (select * from ( values ('computer'),('tablet'), ('mobile') ) as foo(device) ) as d on (True)
left join base_1 b1 on ( d.device::text = b1.device::text and b1.pid = up.pid)
left join base_2 b2 on ( d.device::text = b2.device::text and b2.pid = up.pid)
left join base_3 b3 on ( d.device::text = b3.device::text and b3.pid = up.pid)
left join optv7 on (optv7.device::text = d.device::text and optv7.pid = up.pid)
where d.device='tablet';"
admin_opt_dba_tablet = amo_db_get(query=admin_query_dba_tablet, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
admin_final <- merge(admin_final,admin_opt_dba_tablet,by.x = "pid",by.y = "pid",all.x = TRUE)
admin_query_dba_mobile = "with base_1 as(
select oa.pid, 'mobile' as device, coalesce(param_value, default_value)::boolean as value
from model_parameters mp
join optimizer_arguments oa using(paramid)
where param_name like 'enable_mobile_bid_adjustment'
),
base_2 as(
select oa.pid, 'mobile' as device, coalesce(param_value, default_value) as max_bid_adj
from model_parameters mp
join optimizer_arguments oa using(paramid)
where param_name like 'min_mobile_bid_adjustment'
),
base_3 as(
select oa.pid, 'mobile' as device, coalesce(param_value, default_value) as min_bid_adj
from model_parameters mp
join optimizer_arguments oa using(paramid)
where param_name like 'max_mobile_bid_adjustment'
),
optv7 as (
select pid, bid_adjustment_type as device, coalesce(aba.auto_adjust,false)::boolean as value, aba.max_bid_adjustment as max_bid_adj, aba.min_bid_adjustment as min_bid_adj
from auto_bid_adjustments aba
join auto_bid_adjustments_types abt using(bid_adjustment_type_id)
join model_dispatch_portfolio_level mp using(pid)
join models using(modelid)
where handler like 'optv7'
and bid_adjustment_type in ('computer','mobile','tablet')
)
select up.pid, d.device,
case
when d.device::text = 'mobile'
then
coalesce(optv7.value, coalesce(b1.value, false))
else
coalesce(optv7.value, false)
end as BA_enable_mobile,
case
when d.device::text = 'mobile'
then
coalesce(optv7.max_bid_adj::text, coalesce(b2.max_bid_adj,'none'))
else
coalesce(optv7.max_bid_adj::text, 'none')
end as max_BA_mobile,
case
when d.device::text = 'mobile'
then
coalesce(optv7.min_bid_adj::text, coalesce(b3.min_bid_adj,'none'))
else
coalesce(optv7.min_bid_adj::text, 'none')
end as min_BA_mobile
from user_portfolios up
join (select * from ( values ('computer'),('tablet'), ('mobile') ) as foo(device) ) as d on (True)
left join base_1 b1 on ( d.device::text = b1.device::text and b1.pid = up.pid)
left join base_2 b2 on ( d.device::text = b2.device::text and b2.pid = up.pid)
left join base_3 b3 on ( d.device::text = b3.device::text and b3.pid = up.pid)
left join optv7 on (optv7.device::text = d.device::text and optv7.pid = up.pid)
where d.device='mobile';"
admin_opt_dba_mobile = amo_db_get(query=admin_query_dba_mobile, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
admin_final <- merge(admin_final,admin_opt_dba_mobile,by.x = "pid",by.y = "pid",all.x = TRUE)
admin_final <- admin_final %>% mutate(DEVfeature = case_when(ba_enable_mobile==TRUE | ba_enable_tablet==TRUE | ba_enable_computer==TRUE ~ "Enabled",
ba_enable_mobile==FALSE & ba_enable_tablet==FALSE & ba_enable_computer==FALSE ~ "Not_Enabled"))
#Daily Accuracy... daily level
admin_query_daily_accuracy = sprintf("SELECT p.pid as pid,TO_CHAR(date, 'YYYY-MM-DD') as date, extract(isodow from date) as dow,
predicted_clicks as pred_clicks,
actual_clicks as act_clicks,
(100.0 * actual_clicks / NULLIF( predicted_clicks, 0 ) )::NUMERIC(20,2) AS click_acc,
predicted_spend as pred_cost,
actual_spend as act_cost,
budget::NUMERIC(20,2) AS budget ,
(100.0 * actual_spend / NULLIF( predicted_spend, 0 ) )::NUMERIC(20,2) AS cost_acc,
predicted_rev as pred_rev,
crev as act_rev,
(100.0 * crev / NULLIF ( predicted_rev, 0 ) )::NUMERIC(20,2) AS rev_acc,
crev_d as act_rev_adj,
(100.0 * crev_d / NULLIF ( predicted_rev, 0 ) )::NUMERIC(20,2) AS rev_d_acc,
trev as act_trev,
(100.0 * trev / NULLIF ( predicted_rev, 0 ) )::NUMERIC(20,2) AS trev_acc,
predicted_impr as pred_impr,
actual_impr as act_impr,
(100.0 * actual_impr/ NULLIF( predicted_impr, 0) )::NUMERIC(20,2) AS impr_acc,
opt_runs,objid, last_ran,
(100.0 * (actual_spend/NULLIF( actual_clicks, 0 )) /NULLIF((predicted_spend/NULLIF( predicted_clicks, 0 )),0)) ::NUMERIC(20,2) AS cpc_acc,
(100.0 * actual_spend / NULLIF( budget, 0 ) )::NUMERIC(20,2) AS pacing_accuracy,
(100.0 * (crev/NULLIF( actual_clicks, 0 )) /NULLIF((predicted_rev/NULLIF( predicted_clicks, 0 )),0)) ::NUMERIC(20,2) AS rpc_acc
FROM ( SELECT (j.job_start_time AT TIME ZONE t.tz + INTERVAL '6 hours')::date as date,j.pid,
AVG(j.converged_clicks)::NUMERIC(20,2) AS predicted_clicks,
AVG(j.converged_impr)::NUMERIC(20,2) AS predicted_impr,
( AVG(j.converged_cost)/100.0 )::NUMERIC(20,2) AS predicted_spend,
AVG(j.converged_rev)::NUMERIC(40,4) AS predicted_rev,
AVG( CASE j.stid WHEN 5 THEN j.st_constraint ELSE j.budget_high END ) AS budget,
COUNT(1) as opt_runs,
MAX(j.job_start_time at time zone t.tz) as last_ran,
( array_agg(pov.objid ORDER BY j.job_start_time DESC))[1] as objid
FROM optimizer_jobs j
JOIN user_portfolios up ON (up.pid = j.pid)
LEFT JOIN portfolio_objectives_versions pov ON (up.pid = pov.pid and j.job_start_time between pov.xstart_time and pov.xend_time)
JOIN ( users u JOIN timezones t ON (t.tzid = u.tzid) ) t ON (up.userid = t.userid)
WHERE up.pid = (%1$s)
AND j.job_start_time >= DATE '%2$s' - INTERVAL '2 days'
AND j.job_end_time <= DATE '%3$s' + INTERVAL '2 days'
AND (j.job_start_time at time zone t.tz + interval '6 hours')::DATE >= DATE '%2$s'
AND (j.job_end_time at time zone t.tz + interval '6 hours')::DATE <= DATE '%3$s'
GROUP BY 1,j.pid ) as p
FULL JOIN ( SELECT CPH.pid,WH.date,
SUM(WH.clicks) AS actual_clicks,
SUM(WH.impressions) AS actual_impr,
( SUM(WH.cost)/100.0 )::NUMERIC(20,2) AS actual_spend
FROM day_sem_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN user_campaigns uc on (CPH.cid = uc.cid)
JOIN user_accts ua on (uc.user_acctid = ua.user_acctid)
WHERE CPH.pid = (%1$s)
AND ua.sid <> 77
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid
UNION ALL
SELECT CPH.pid,WH.date,
SUM(WH.est_clicks) AS actual_clicks,
SUM(WH.est_impressions) AS actual_impr,
( SUM(WH.est_cost)/100.0 )::NUMERIC(20,2) AS actual_spend
FROM day_trig_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN user_campaigns uc on (CPH.cid = uc.cid)
JOIN user_accts ua on (uc.user_acctid = ua.user_acctid)
WHERE CPH.pid = (%1$s)
AND ua.sid = 77
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid
) AS c USING (pid,date)
FULL JOIN ( SELECT CPH.pid, WH.date,
SUM( case when (device = 'm' AND mobile_weights_enabled = TRUE)
then coalesce(f.mobile_weight, f.weight)
else f.weight
end * (COALESCE(CT_VALUE_LAST, 0) + COALESCE(CTVT_VALUE_LAST, 0) * (1 - 0) + COALESCE(VTCT_VALUE_LAST, 0) * 0 + 0.4 * ( COALESCE (VT_VALUE_LAST, 0)) ) ) as crev,
(SUM( case when (device = 'm' AND mobile_weights_enabled = TRUE)
then coalesce(f.mobile_weight, f.weight)
else f.weight
end * ( COALESCE(CT_VALUE_LAST, 0) / COALESCE(D.click_thru_percentage, 1) + COALESCE(CTVT_VALUE_LAST, 0) * (1 - 0) / COALESCE(D.click_thru_percentage, 1) + COALESCE(VTCT_VALUE_LAST, 0) * 0 / COALESCE(D.view_thru_percentage, 1) + COALESCE (VT_VALUE_LAST, 0) * 0.4 / COALESCE(D.view_thru_percentage, 1) ) ))::NUMERIC(20,2) as crev_d
FROM day_revenue_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN portfolio_objectives PO ON (CPH.pid = PO.pid)
JOIN objective_function f ON (f.objid = PO.objid AND f.propertyid = WH.propertyid)
JOIN objectives ob ON ob.objid = f.objid
LEFT JOIN (
SELECT pid,
propertyid,
(now() at time zone tz)::date - delay AS date,
CASE
WHEN click_thru_percentage = 0
THEN 1
WHEN click_thru_percentage < 0.01
THEN 0.01
ELSE
LEAST(click_thru_percentage, 1)
END AS click_thru_percentage,
CASE
WHEN view_thru_percentage = 0
THEN 1
WHEN view_thru_percentage < 0.01
THEN 0.01
ELSE
LEAST(view_thru_percentage, 1)
END AS view_thru_percentage
FROM property_daily_delayed_rev_factors
JOIN user_portfolios using(pid)
JOIN users using(userid)
JOIN timezones using(tzid)
WHERE delay >= 0
AND pid = (%1$s)
) D on (wh.propertyid = D.propertyid and CPH.pid = D.pid and WH.date = D.date)
WHERE CPH.pid = (%1$s)
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid ) AS cr USING (pid,date)
JOIN ( SELECT CPH.pid,WH.date,
SUM( case when (device = 'm' AND mobile_weights_enabled = TRUE) then coalesce(f.mobile_weight, f.weight) else f.weight end * (COALESCE(CT_VALUE_LAST, 0) + (1 - 0) * COALESCE(CTVT_VALUE_LAST, 0) +
0.4 * ( COALESCE (VT_VALUE_LAST, 0) + 0 * COALESCE(VTCT_VALUE_LAST, 0) ) ) ) as trev
FROM tday_revenue_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN portfolio_objectives PO ON (CPH.pid = PO.pid)
JOIN objective_function f ON (f.objid = PO.objid AND f.propertyid = WH.propertyid)
JOIN objectives ob ON ob.objid = f.objid
WHERE CPH.pid = (%1$s)
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid ) AS tr USING (pid,date)
ORDER BY 1 DESC,2 ;", pid1,date1,date2)
#as.character(input$dateRange[1]),as.character(input$dateRange[2])
#,date1,date2,active_pid_or1,date1,date2,active_pid_or1,date1,date2,active_pid_or1,active_pid_or1,date1,date2,active_pid_or1,date1,date2)
#daily accuracy... aggregate level
#Daily Accuracy... daily level
admin_query_daily_accuracy_1 = sprintf("SELECT p.pid as pid,TO_CHAR(date, 'YYYY-MM-DD') as date, extract(isodow from date) as dow,
predicted_clicks as pred_clicks,
actual_clicks as act_clicks,
(100.0 * actual_clicks / NULLIF( predicted_clicks, 0 ) )::NUMERIC(20,2) AS click_acc,
predicted_spend as pred_cost,
actual_spend as act_cost,
budget::NUMERIC(20,2) AS budget ,
(100.0 * actual_spend / NULLIF( predicted_spend, 0 ) )::NUMERIC(20,2) AS cost_acc,
predicted_rev as pred_rev,
crev as act_rev,
(100.0 * crev / NULLIF ( predicted_rev, 0 ) )::NUMERIC(20,2) AS rev_acc,
crev_d as act_rev_adj,
(100.0 * crev_d / NULLIF ( predicted_rev, 0 ) )::NUMERIC(20,2) AS rev_d_acc,
trev as act_trev,
(100.0 * trev / NULLIF ( predicted_rev, 0 ) )::NUMERIC(20,2) AS trev_acc,
predicted_impr as pred_impr,
actual_impr as act_impr,
(100.0 * actual_impr/ NULLIF( predicted_impr, 0) )::NUMERIC(20,2) AS impr_acc,
opt_runs,objid, last_ran,
(100.0 * (actual_spend/NULLIF( actual_clicks, 0 )) /NULLIF((predicted_spend/NULLIF( predicted_clicks, 0 )),0)) ::NUMERIC(20,2) AS cpc_acc,
(100.0 * actual_spend / NULLIF( budget, 0 ) )::NUMERIC(20,2) AS pacing_accuracy,
(100.0 * (crev/NULLIF( actual_clicks, 0 )) /NULLIF((predicted_rev/NULLIF( predicted_clicks, 0 )),0)) ::NUMERIC(20,2) AS rpc_acc
FROM ( SELECT (j.job_start_time AT TIME ZONE t.tz + INTERVAL '6 hours')::date as date,j.pid,
AVG(j.converged_clicks)::NUMERIC(20,2) AS predicted_clicks,
AVG(j.converged_impr)::NUMERIC(20,2) AS predicted_impr,
( AVG(j.converged_cost)/100.0 )::NUMERIC(20,2) AS predicted_spend,
AVG(j.converged_rev)::NUMERIC(40,4) AS predicted_rev,
AVG( CASE j.stid WHEN 5 THEN j.st_constraint ELSE j.budget_high END ) AS budget,
COUNT(1) as opt_runs,
MAX(j.job_start_time at time zone t.tz) as last_ran,
( array_agg(pov.objid ORDER BY j.job_start_time DESC))[1] as objid
FROM optimizer_jobs j
JOIN user_portfolios up ON (up.pid = j.pid)
LEFT JOIN portfolio_objectives_versions pov ON (up.pid = pov.pid and j.job_start_time between pov.xstart_time and pov.xend_time)
JOIN ( users u JOIN timezones t ON (t.tzid = u.tzid) ) t ON (up.userid = t.userid)
WHERE up.pid = (%1$s)
AND j.job_start_time >= DATE '%2$s' - INTERVAL '2 days'
AND j.job_end_time <= DATE '%3$s' + INTERVAL '2 days'
AND (j.job_start_time at time zone t.tz + interval '6 hours')::DATE >= DATE '%2$s'
AND (j.job_end_time at time zone t.tz + interval '6 hours')::DATE <= DATE '%3$s'
GROUP BY 1,j.pid ) as p
FULL JOIN ( SELECT CPH.pid,WH.date,
SUM(WH.clicks) AS actual_clicks,
SUM(WH.impressions) AS actual_impr,
( SUM(WH.cost)/100.0 )::NUMERIC(20,2) AS actual_spend
FROM day_sem_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN user_campaigns uc on (CPH.cid = uc.cid)
JOIN user_accts ua on (uc.user_acctid = ua.user_acctid)
WHERE CPH.pid = (%1$s)
AND ua.sid <> 77
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid
UNION ALL
SELECT CPH.pid,WH.date,
SUM(WH.est_clicks) AS actual_clicks,
SUM(WH.est_impressions) AS actual_impr,
( SUM(WH.est_cost)/100.0 )::NUMERIC(20,2) AS actual_spend
FROM day_trig_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN user_campaigns uc on (CPH.cid = uc.cid)
JOIN user_accts ua on (uc.user_acctid = ua.user_acctid)
WHERE CPH.pid = (%1$s)
AND ua.sid = 77
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid
) AS c USING (date,pid)
FULL JOIN ( SELECT CPH.pid, WH.date,
SUM( case when (device = 'm' AND mobile_weights_enabled = TRUE)
then coalesce(f.mobile_weight, f.weight)
else f.weight
end * (COALESCE(CT_VALUE_LAST, 0) + COALESCE(CTVT_VALUE_LAST, 0) * (1 - 0) + COALESCE(VTCT_VALUE_LAST, 0) * 0 + 0.4 * ( COALESCE (VT_VALUE_LAST, 0)) ) ) as crev,
(SUM( case when (device = 'm' AND mobile_weights_enabled = TRUE)
then coalesce(f.mobile_weight, f.weight)
else f.weight
end * ( COALESCE(CT_VALUE_LAST, 0) / COALESCE(D.click_thru_percentage, 1) + COALESCE(CTVT_VALUE_LAST, 0) * (1 - 0) / COALESCE(D.click_thru_percentage, 1) + COALESCE(VTCT_VALUE_LAST, 0) * 0 / COALESCE(D.view_thru_percentage, 1) + COALESCE (VT_VALUE_LAST, 0) * 0.4 / COALESCE(D.view_thru_percentage, 1) ) ))::NUMERIC(20,2) as crev_d
FROM day_revenue_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN portfolio_objectives PO ON (CPH.pid = PO.pid)
JOIN objective_function f ON (f.objid = PO.objid AND f.propertyid = WH.propertyid)
JOIN objectives ob ON ob.objid = f.objid
LEFT JOIN (
SELECT pid,
propertyid,
(now() at time zone tz)::date - delay AS date,
CASE
WHEN click_thru_percentage = 0
THEN 1
WHEN click_thru_percentage < 0.01
THEN 0.01
ELSE
LEAST(click_thru_percentage, 1)
END AS click_thru_percentage,
CASE
WHEN view_thru_percentage = 0
THEN 1
WHEN view_thru_percentage < 0.01
THEN 0.01
ELSE
LEAST(view_thru_percentage, 1)
END AS view_thru_percentage
FROM property_daily_delayed_rev_factors
JOIN user_portfolios using(pid)
JOIN users using(userid)
JOIN timezones using(tzid)
WHERE delay >= 0
AND pid = (%1$s)
) D on (wh.propertyid = D.propertyid and CPH.pid = D.pid and WH.date = D.date)
WHERE CPH.pid = (%1$s)
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid ) AS cr USING (date,pid)
JOIN ( SELECT CPH.pid,WH.date,
SUM( case when (device = 'm' AND mobile_weights_enabled = TRUE) then coalesce(f.mobile_weight, f.weight) else f.weight end * (COALESCE(CT_VALUE_LAST, 0) + (1 - 0) * COALESCE(CTVT_VALUE_LAST, 0) +
0.4 * ( COALESCE (VT_VALUE_LAST, 0) + 0 * COALESCE(VTCT_VALUE_LAST, 0) ) ) ) as trev
FROM tday_revenue_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN portfolio_objectives PO ON (CPH.pid = PO.pid)
JOIN objective_function f ON (f.objid = PO.objid AND f.propertyid = WH.propertyid)
JOIN objectives ob ON ob.objid = f.objid
WHERE CPH.pid = (%1$s)
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid ) AS tr USING (date,pid)
ORDER BY 1 DESC,2 ;", pid1,date3,date4)
admin_query_daily_accuracy_2 = sprintf("SELECT p.pid as pid,TO_CHAR(date, 'YYYY-MM-DD') as date, extract(isodow from date) as dow,
predicted_clicks as pred_clicks,
actual_clicks as act_clicks,
(100.0 * actual_clicks / NULLIF( predicted_clicks, 0 ) )::NUMERIC(20,2) AS click_acc,
predicted_spend as pred_cost,
actual_spend as act_cost,
budget::NUMERIC(20,2) AS budget ,
(100.0 * actual_spend / NULLIF( predicted_spend, 0 ) )::NUMERIC(20,2) AS cost_acc,
predicted_rev as pred_rev,
crev as act_rev,
(100.0 * crev / NULLIF ( predicted_rev, 0 ) )::NUMERIC(20,2) AS rev_acc,
crev_d as act_rev_adj,
(100.0 * crev_d / NULLIF ( predicted_rev, 0 ) )::NUMERIC(20,2) AS rev_d_acc,
trev as act_trev,
(100.0 * trev / NULLIF ( predicted_rev, 0 ) )::NUMERIC(20,2) AS trev_acc,
predicted_impr as pred_impr,
actual_impr as act_impr,
(100.0 * actual_impr/ NULLIF( predicted_impr, 0) )::NUMERIC(20,2) AS impr_acc,
opt_runs,objid, last_ran,
(100.0 * (actual_spend/NULLIF( actual_clicks, 0 )) /NULLIF((predicted_spend/NULLIF( predicted_clicks, 0 )),0)) ::NUMERIC(20,2) AS cpc_acc,
(100.0 * actual_spend / NULLIF( budget, 0 ) )::NUMERIC(20,2) AS pacing_accuracy,
(100.0 * (crev/NULLIF( actual_clicks, 0 )) /NULLIF((predicted_rev/NULLIF( predicted_clicks, 0 )),0)) ::NUMERIC(20,2) AS rpc_acc
FROM ( SELECT (j.job_start_time AT TIME ZONE t.tz + INTERVAL '6 hours')::date as date,j.pid,
AVG(j.converged_clicks)::NUMERIC(20,2) AS predicted_clicks,
AVG(j.converged_impr)::NUMERIC(20,2) AS predicted_impr,
( AVG(j.converged_cost)/100.0 )::NUMERIC(20,2) AS predicted_spend,
AVG(j.converged_rev)::NUMERIC(40,4) AS predicted_rev,
AVG( CASE j.stid WHEN 5 THEN j.st_constraint ELSE j.budget_high END ) AS budget,
COUNT(1) as opt_runs,
MAX(j.job_start_time at time zone t.tz) as last_ran,
( array_agg(pov.objid ORDER BY j.job_start_time DESC))[1] as objid
FROM optimizer_jobs j
JOIN user_portfolios up ON (up.pid = j.pid)
LEFT JOIN portfolio_objectives_versions pov ON (up.pid = pov.pid and j.job_start_time between pov.xstart_time and pov.xend_time)
JOIN ( users u JOIN timezones t ON (t.tzid = u.tzid) ) t ON (up.userid = t.userid)
WHERE up.pid = (%1$s)
AND j.job_start_time >= DATE '%2$s' - INTERVAL '2 days'
AND j.job_end_time <= DATE '%3$s' + INTERVAL '2 days'
AND (j.job_start_time at time zone t.tz + interval '6 hours')::DATE >= DATE '%2$s'
AND (j.job_end_time at time zone t.tz + interval '6 hours')::DATE <= DATE '%3$s'
GROUP BY 1,j.pid ) as p
FULL JOIN ( SELECT CPH.pid,WH.date,
SUM(WH.clicks) AS actual_clicks,
SUM(WH.impressions) AS actual_impr,
( SUM(WH.cost)/100.0 )::NUMERIC(20,2) AS actual_spend
FROM day_sem_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN user_campaigns uc on (CPH.cid = uc.cid)
JOIN user_accts ua on (uc.user_acctid = ua.user_acctid)
WHERE CPH.pid = (%1$s)
AND ua.sid <> 77
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid
UNION ALL
SELECT CPH.pid,WH.date,
SUM(WH.est_clicks) AS actual_clicks,
SUM(WH.est_impressions) AS actual_impr,
( SUM(WH.est_cost)/100.0 )::NUMERIC(20,2) AS actual_spend
FROM day_trig_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN user_campaigns uc on (CPH.cid = uc.cid)
JOIN user_accts ua on (uc.user_acctid = ua.user_acctid)
WHERE CPH.pid = (%1$s)
AND ua.sid = 77
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid
) AS c USING (date,pid)
FULL JOIN ( SELECT CPH.pid, WH.date,
SUM( case when (device = 'm' AND mobile_weights_enabled = TRUE)
then coalesce(f.mobile_weight, f.weight)
else f.weight
end * (COALESCE(CT_VALUE_LAST, 0) + COALESCE(CTVT_VALUE_LAST, 0) * (1 - 0) + COALESCE(VTCT_VALUE_LAST, 0) * 0 + 0.4 * ( COALESCE (VT_VALUE_LAST, 0)) ) ) as crev,
(SUM( case when (device = 'm' AND mobile_weights_enabled = TRUE)
then coalesce(f.mobile_weight, f.weight)
else f.weight
end * ( COALESCE(CT_VALUE_LAST, 0) / COALESCE(D.click_thru_percentage, 1) + COALESCE(CTVT_VALUE_LAST, 0) * (1 - 0) / COALESCE(D.click_thru_percentage, 1) + COALESCE(VTCT_VALUE_LAST, 0) * 0 / COALESCE(D.view_thru_percentage, 1) + COALESCE (VT_VALUE_LAST, 0) * 0.4 / COALESCE(D.view_thru_percentage, 1) ) ))::NUMERIC(20,2) as crev_d
FROM day_revenue_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN portfolio_objectives PO ON (CPH.pid = PO.pid)
JOIN objective_function f ON (f.objid = PO.objid AND f.propertyid = WH.propertyid)
JOIN objectives ob ON ob.objid = f.objid
LEFT JOIN (
SELECT pid,
propertyid,
(now() at time zone tz)::date - delay AS date,
CASE
WHEN click_thru_percentage = 0
THEN 1
WHEN click_thru_percentage < 0.01
THEN 0.01
ELSE
LEAST(click_thru_percentage, 1)
END AS click_thru_percentage,
CASE
WHEN view_thru_percentage = 0
THEN 1
WHEN view_thru_percentage < 0.01
THEN 0.01
ELSE
LEAST(view_thru_percentage, 1)
END AS view_thru_percentage
FROM property_daily_delayed_rev_factors
JOIN user_portfolios using(pid)
JOIN users using(userid)
JOIN timezones using(tzid)
WHERE delay >= 0
AND pid = (%1$s)
) D on (wh.propertyid = D.propertyid and CPH.pid = D.pid and WH.date = D.date)
WHERE CPH.pid = (%1$s)
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid ) AS cr USING (date,pid)
JOIN ( SELECT CPH.pid,WH.date,
SUM( case when (device = 'm' AND mobile_weights_enabled = TRUE) then coalesce(f.mobile_weight, f.weight) else f.weight end * (COALESCE(CT_VALUE_LAST, 0) + (1 - 0) * COALESCE(CTVT_VALUE_LAST, 0) +
0.4 * ( COALESCE (VT_VALUE_LAST, 0) + 0 * COALESCE(VTCT_VALUE_LAST, 0) ) ) ) as trev
FROM tday_revenue_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN portfolio_objectives PO ON (CPH.pid = PO.pid)
JOIN objective_function f ON (f.objid = PO.objid AND f.propertyid = WH.propertyid)
JOIN objectives ob ON ob.objid = f.objid
WHERE CPH.pid = (%1$s)
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid ) AS tr USING (date,pid)
ORDER BY 1 DESC,2 ;", pid1,date5,date6)
admin_query_daily_accuracy_agg = sprintf("SELECT p.pid as pid,
SUM(predicted_clicks) as pred_clicks,
SUM(actual_clicks) as act_clicks,
(100.0 * SUM(actual_clicks )/ SUM(NULLIF( predicted_clicks, 0 )) )::NUMERIC(20,2) AS click_acc,
SUM(predicted_spend) as pred_cost,
SUM(actual_spend) as act_cost,
SUM(budget)::NUMERIC(20,2) AS budget ,
(100.0 * SUM(actual_spend) / SUM(NULLIF( predicted_spend, 0 )) )::NUMERIC(20,2) AS cost_acc,
SUM(predicted_rev) as pred_rev,
SUM(crev) as act_rev,
(100.0 * SUM(crev )/ SUM(NULLIF ( predicted_rev, 0 ) ))::NUMERIC(20,2) AS rev_acc,
SUM(predicted_impr) as pred_impr,
SUM(actual_impr) as act_impr,
(100.0 * SUM(actual_impr)/SUM(NULLIF( predicted_impr, 0) ))::NUMERIC(20,2) AS impr_acc,
(100.0 * SUM((actual_spend/NULLIF( actual_clicks, 0 ))) /SUM(NULLIF((predicted_spend/NULLIF( predicted_clicks, 0 )),0))) ::NUMERIC(20,2) AS cpc_acc,
(100.0 *SUM(actual_spend )/ SUM(NULLIF( budget, 0 ) ))::NUMERIC(20,2) AS pacing_acc,
(100.0 * SUM(crev/NULLIF( actual_clicks, 0 )) /SUM(NULLIF((predicted_rev/NULLIF( predicted_clicks, 0 )),0))) ::NUMERIC(20,2) AS rpc_acc
FROM ( SELECT (j.job_start_time AT TIME ZONE t.tz + INTERVAL '6 hours')::date as date,j.pid,
AVG(j.converged_clicks)::NUMERIC(20,2) AS predicted_clicks,
AVG(j.converged_impr)::NUMERIC(20,2) AS predicted_impr,
( AVG(j.converged_cost)/100.0 )::NUMERIC(20,2) AS predicted_spend,
AVG(j.converged_rev)::NUMERIC(40,4) AS predicted_rev,
AVG( CASE j.stid WHEN 5 THEN j.st_constraint ELSE j.budget_high END ) AS budget,
COUNT(1) as opt_runs,
MAX(j.job_start_time at time zone t.tz) as last_ran,
( array_agg(pov.objid ORDER BY j.job_start_time DESC))[1] as objid
FROM optimizer_jobs j
JOIN user_portfolios up ON (up.pid = j.pid)
LEFT JOIN portfolio_objectives_versions pov ON (up.pid = pov.pid and j.job_start_time between pov.xstart_time and pov.xend_time)
JOIN ( users u JOIN timezones t ON (t.tzid = u.tzid) ) t ON (up.userid = t.userid)
WHERE up.pid = (%1$s)
AND j.job_start_time >= DATE '%2$s' - INTERVAL '2 days'
AND j.job_end_time <= DATE '%3$s' + INTERVAL '2 days'
AND (j.job_start_time at time zone t.tz + interval '6 hours')::DATE >= DATE '%2$s'
AND (j.job_end_time at time zone t.tz + interval '6 hours')::DATE <= DATE '%3$s'
GROUP BY 1,j.pid ) as p
FULL JOIN ( SELECT CPH.pid,WH.date,
SUM(WH.clicks) AS actual_clicks,
SUM(WH.impressions) AS actual_impr,
( SUM(WH.cost)/100.0 )::NUMERIC(20,2) AS actual_spend
FROM day_sem_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN user_campaigns uc on (CPH.cid = uc.cid)
JOIN user_accts ua on (uc.user_acctid = ua.user_acctid)
WHERE CPH.pid = (%1$s)
AND ua.sid <> 77
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid
UNION ALL
SELECT CPH.pid,WH.date,
SUM(WH.est_clicks) AS actual_clicks,
SUM(WH.est_impressions) AS actual_impr,
( SUM(WH.est_cost)/100.0 )::NUMERIC(20,2) AS actual_spend
FROM day_trig_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN user_campaigns uc on (CPH.cid = uc.cid)
JOIN user_accts ua on (uc.user_acctid = ua.user_acctid)
WHERE CPH.pid = (%1$s)
AND ua.sid = 77
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid
) AS c USING (pid,date)
FULL JOIN ( SELECT CPH.pid, WH.date,
SUM( case when (device = 'm' AND mobile_weights_enabled = TRUE)
then coalesce(f.mobile_weight, f.weight)
else f.weight
end * (COALESCE(CT_VALUE_LAST, 0) + COALESCE(CTVT_VALUE_LAST, 0) * (1 - 0) + COALESCE(VTCT_VALUE_LAST, 0) * 0 + 0.4 * ( COALESCE (VT_VALUE_LAST, 0)) ) ) as crev,
(SUM( case when (device = 'm' AND mobile_weights_enabled = TRUE)
then coalesce(f.mobile_weight, f.weight)
else f.weight
end * ( COALESCE(CT_VALUE_LAST, 0) / COALESCE(D.click_thru_percentage, 1) + COALESCE(CTVT_VALUE_LAST, 0) * (1 - 0) / COALESCE(D.click_thru_percentage, 1) + COALESCE(VTCT_VALUE_LAST, 0) * 0 / COALESCE(D.view_thru_percentage, 1) + COALESCE (VT_VALUE_LAST, 0) * 0.4 / COALESCE(D.view_thru_percentage, 1) ) ))::NUMERIC(20,2) as crev_d
FROM day_revenue_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN portfolio_objectives PO ON (CPH.pid = PO.pid)
JOIN objective_function f ON (f.objid = PO.objid AND f.propertyid = WH.propertyid)
JOIN objectives ob ON ob.objid = f.objid
LEFT JOIN (
SELECT pid,
propertyid,
(now() at time zone tz)::date - delay AS date,
CASE
WHEN click_thru_percentage = 0
THEN 1
WHEN click_thru_percentage < 0.01
THEN 0.01
ELSE
LEAST(click_thru_percentage, 1)
END AS click_thru_percentage,
CASE
WHEN view_thru_percentage = 0
THEN 1
WHEN view_thru_percentage < 0.01
THEN 0.01
ELSE
LEAST(view_thru_percentage, 1)
END AS view_thru_percentage
FROM property_daily_delayed_rev_factors
JOIN user_portfolios using(pid)
JOIN users using(userid)
JOIN timezones using(tzid)
WHERE delay >= 0
AND pid = (%1$s)
) D on (wh.propertyid = D.propertyid and CPH.pid = D.pid and WH.date = D.date)
WHERE CPH.pid = (%1$s)
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid ) AS cr USING (pid,date)
JOIN ( SELECT CPH.pid,WH.date,
SUM( case when (device = 'm' AND mobile_weights_enabled = TRUE) then coalesce(f.mobile_weight, f.weight) else f.weight end * (COALESCE(CT_VALUE_LAST, 0) + (1 - 0) * COALESCE(CTVT_VALUE_LAST, 0) + 0.4 * ( COALESCE (VT_VALUE_LAST, 0) + 0 * COALESCE(VTCT_VALUE_LAST, 0) ) ) ) as trev
FROM tday_revenue_campaign_agg WH
JOIN cid_pid_history CPH ON (WH.cid = CPH.cid AND WH.date = CPH.date)
JOIN portfolio_objectives PO ON (CPH.pid = PO.pid)
JOIN objective_function f ON (f.objid = PO.objid AND f.propertyid = WH.propertyid)
JOIN objectives ob ON ob.objid = f.objid
WHERE CPH.pid = (%1$s)
AND WH.date >= '%2$s'
AND WH.date <= '%3$s'
GROUP BY WH.date,CPH.pid ) AS tr USING (pid,date)
GROUP BY 1
ORDER BY 1 DESC,2 ;", pid1,date1,date2)
#,date1,date2,active_pid_or1,date1,date2,active_pid_or1,date1,date2,active_pid_or1,active_pid_or1,date1,date2,active_pid_or1,date1,date2)
#
#Daily Accuracy
admin_daily = amo_db_get(query=admin_query_daily_accuracy, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
admin_daily_2 = amo_db_get(query=admin_query_daily_accuracy_1, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
admin_daily_3 = amo_db_get(query=admin_query_daily_accuracy_2, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
# admin_daily_v21 <- merge(admin_pidd, admin_daily_2, by.x = "pid",by.y = "pid", all.x = TRUE)
# admin_daily_v212 <- admin_daily_v21[admin_daily_v21$portfolio_name == "Grainger PLA",]
# data <- admin_daily_v212[order(admin_daily_v212$date, decreasing = TRUE),]
admin_daily_v11 <- admin_daily_2 %>%select(pid,date,cost_acc,rpc_acc)
admin_daily_v11 <- merge(admin_pidd, admin_daily_v11, by.x = "pid",by.y = "pid", all.x = TRUE)
admin_daily_v31 <- admin_daily_3 %>%select(pid,date,cost_acc,rpc_acc)
admin_daily_v31 <- merge(admin_pidd, admin_daily_v31, by.x = "pid",by.y = "pid", all.x = TRUE)
admin_daily_v32 <- admin_daily_3 %>%select(pid,date,click_acc,cpc_acc)
admin_daily_v32 <- merge(admin_pidd, admin_daily_v32, by.x = "pid",by.y = "pid", all.x = TRUE)
admin_daily_v3 <- admin_daily_2 %>%select(pid,date,click_acc,cpc_acc)
admin_daily_v3 <- merge(admin_pidd, admin_daily_v3, by.x = "pid",by.y = "pid", all.x = TRUE)
admin_daily_v12 <- admin_daily_2 %>%select(pid,date,act_cost, act_rev, pred_cost, pred_rev)
admin_daily_v12 <- merge(admin_pidd, admin_daily_v12, by.x = "pid",by.y = "pid", all.x = TRUE)
#
# admin_daily_v12[is.na(admin_daily_v12)] <- 0
# admin_daily_v12 <- admin_daily_v12[admin_daily_v12$portfolio_name == "Grainger PLA",]
#
# library(lubridate)
# admin_daily_v12$date <- as.Date(admin_daily_v12$date)
# admin_daily_v12$dow <- wday(admin_daily_v12$date, label=TRUE)
# #
# admin_daily_v13 <- admin_daily_v12 %>% select(portfolio_name, dow, act_cost, act_rev, pred_cost, pred_rev)
#
#
# # p <-ggplot(admin_daily_v14, aes(x=dow, y=roas_acc)) +geom_bar(stat = "identity")
# # print(p)
# #
# # admin_daily_v13 <- admin_daily_v12 %>% select(portfolio_name, dow, roas_acc)
#
# admin_daily_v14 <- aggregate(.~portfolio_name+dow,admin_daily_v13,sum)
#
# data <- admin_daily_v14 %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
# data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
# data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
#
# admin_daily_v14 <- admin_daily_v14 %>% mutate(act_roas=act_rev/act_cost)
# admin_daily_v14 <- admin_daily_v14 %>% mutate(pred_roas=pred_rev/pred_cost)
# admin_daily_v14 <- admin_daily_v14 %>% mutate(roas_acc=act_roas/pred_roas*100)
# admin_daily_v15 <- admin_daily_v14 %>% select(portfolio_name,act_roas,dow)
# admin_v6 <- melt(admin_daily_v15)
#
# ggplot(admin_v6, aes(x=dow, y=value, fill=variable)) +
# geom_bar(stat='identity', position='dodge2')+ facet_grid(. ~ portfolio_name)
#
# admin_daily_v14$Variance_di <- var(admin_daily_v14$act_roas)
# admin_daily_v14$avg_di <- mean(admin_daily_v14$act_roas)
# admin_daily_v14$dispersion_index <- admin_daily_v14$Variance_di/admin_daily_v14$avg_di
#
# admin_daily_v14$Variance_er <- var(admin_daily_v14$roas_acc)
# admin_daily_v14$avg_er <- mean(admin_daily_v14$roas_acc)
# admin_daily_v14$error_rate <- admin_daily_v14$Variance_er/admin_daily_v14$avg_er
#
# di_dow <- unique(admin_daily_v14$dispersion_index)
# er_dow <- unique(admin_daily_v14$error_rate)
#
# admin_final <- admin_final[admin_final$portfolio_name == "Grainger PLA RLSA",]
#
# admin_final$di <- di
# admin_final$er <- er
# #
# paste("click coverage:", admin_final$di)
# # #
# admin_final <- admin_final %>% mutate(reco_dow = case_when(di > 0.2 & DOWfeature == "Not_Enabled" & Spend_Scenario == "Overspend" ~ "Enable_DOW",
# di > 0.2 & DOWfeature == "Not_Enabled" & Spend_Scenario == "Underspend"~ "Enable_DOW",
# di > 0.2 & DOWfeature == "Not_Enabled" & RPC_Scenario =="Poor_RPC" ~ "Enable_DOW",
# Spend_Scenario =="Poor_RPC" & di < 0.2 & DOWfeature == "Enabled" ~ "Disable_DOW",
# Spend_Scenario =="Overspend" & di < 0.2 & DOWfeature == "Enabled" ~ "Disable_DOW",
# Spend_Scenario =="Underspend" & di < 0.2 & DOWfeature == "Enabled" ~ "Disable_DOW",
# Spend_Scenario == "Overspend" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & click_coverage > 0.1 ~ "Further_investigate",
# Spend_Scenario == "Overspend" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & click_coverage < 0.1 ~ "Further_investigate",
# Spend_Scenario == "Overspend" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & revenue_coverage < 0.05 ~ "Further_investigate",
# Spend_Scenario == "Overspend" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & revenue_coverage >0.05 ~ "Further_investigate",
# Spend_Scenario == "Underspend" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & revenue_coverage > 0.05 ~ "Further_investigate",
# Spend_Scenario == "Underspend" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & revenue_coverage < 0.05 ~ "Further_investigate",
# Spend_Scenario == "Underspend" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & click_coverage < 0.1 ~ "Further_investigate",
# Spend_Scenario == "Underspend" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & click_coverage > 0.1 ~ "Further_investigate",
# RPC_Scenario == "Poor_RPC" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & revenue_coverage > 0.05 ~ "Further_investigate",
# RPC_Scenario == "Poor_RPC" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & revenue_coverage < 0.05 ~ "Further_investigate",
# RPC_Scenario == "Poor_RPC" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & click_coverage < 0.1 ~ "Further_investigate",
# RPC_Scenario == "Poor_RPC" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & click_coverage > 0.1 ~ "Further_investigate",
# TRUE ~ DOWfeature))
# admin_final$DOWfeature[1] <- "Not_Enabled"
#
# admin_daily_v12 <- merge(admin_pidd, admin_daily_v11, by.x = "pid",by.y = "pid", all.x = TRUE)
# unique(admin_daily_v11$portfolio_name)
# # #
# data_1 <- admin_daily_v11[admin_daily_v11$portfolio_name == "Grainger PLA",]
# data_1 <- admin_daily_v11 %>% select(portfolio_name, date, cost_acc, rpc_acc)
#
# y_data <- data_1 %>% select(date,cost_acc, rpc_acc)
#
# y_data <- y_data %>%arrange(desc(date))
#
# y_data <- tail(y_data, -7)
#
#
# y_data[is.na(y_data)] <- 0
#
#
#
# library(changepoint)
#
# cptm_CP <- cpt.mean(y_data$cost_acc, penalty='MBIC',pen.value=0, method='BinSeg',
# test.stat="Normal", minseglen=7, class=TRUE)
# cptm_CP
#
# plot(cptm_CP)
# abline(v=y_data[max(cpts_CP),1], col="blue")
#
# cpts_CP <- cpts(cptm_CP) # change point time points
# cpts_CP
#
# cost_change <- ifelse(max(cpts_CP)==0,0,(Sys.Date()-(as.Date(y_data[max(cpts_CP),1]))))
#
# c<- ifelse(max(cpts_CP)==0,0,(Sys.Date()-(as.Date(y_data[max(cpts_CP),1]))))
#
# library(changepoint)
#
# cptm_CP_1 <- cpt.mean(y_data$rpc_acc, penalty='MBIC',pen.value=0, method='BinSeg',
# test.stat="Normal", minseglen=7, class=TRUE)
# cptm_CP_1
#
# plot(cptm_CP_1)
#
# cpts_CP_1 <- cpts(cptm_CP_1) # change point time points
# cpts_CP_1
#
# rpc_change <- ifelse(max(cpts_CP_1)==0,0,(Sys.Date()-(as.Date(y_data[max(cpts_CP_1),1]))))
#
#
# print( paste("latest change in mean on", y_data[max(cpts_CP_1),1] , "that is", Sys.Date()-(as.Date(y_data[max(cpts_CP_1),1])), "days ago." ))
# print( paste("latest change in mean on", y_data[max(cpts_CP),1] , "that is", Sys.Date()-(as.Date(y_data[max(cpts_CP),1])), "days ago." ))
#
# #
# library("outliers")
# y_data$Cost_Acc_score <- scores(type="z", y_data$cost_acc, prob=.95)
# y_data$RPC_Acc_score <- scores(type="z", y_data$rpc_acc, prob=.95)
# cost_outliers <- list(subset(y_data, Cost_Acc_score==TRUE)[,1])
# cost_outliers
# revenue_outliers <- list(subset(y_data, RPC_Acc_score==TRUE)[,1])
# revenue_outliers
# d1 <- as.data.frame(count(unlist(revenue_outliers)))
#
# sf <- sum(d1$freq)
#
# count(revenue_outliers)
#
# y_data_1 <- subset(y_data, RPC_Acc_score==TRUE)
# y_data_2 <- subset(y_data, Cost_Acc_score==TRUE)
#
# # #ggplot(data_1, aes(x=date, y=value, color = variable, group=1)) +geom_line() + facet_grid(. ~ portfolio_name)
# #
# p= ggplot() +
# geom_line(data =data_1, aes(x = as.Date(date), y = cost_acc, group=1, color="red")) +
# geom_line(data = data_1, aes(x = as.Date(date), y = rpc_acc, group=1, color="darkcyan")) +
# geom_point(data = y_data_1, aes(x = as.Date(date), y = rpc_acc, group=1, color="darkcyan", shape="circle", size=3))+
# geom_point(data = y_data_2, aes(x = as.Date(date), y = cost_acc, group=1, color="red", shape = "triangle", size=3))+
# xlab('Date') +
# ylab('Value') + facet_grid(. ~ portfolio_name)+
# scale_x_date(date_breaks = "1 month")+
# geom_vline(xintercept=as.Date(y_data[max(cpts_CP),1]), linetype=4, color = "red", show.legend = T)+
# geom_text(data=y_data, mapping=aes(x=as.Date(y_data[max(cpts_CP),1]), y=0, label=as.Date(y_data[max(cpts_CP),1] , sep = " ")), size=3, vjust=1, hjust=0, angle=90)+
# geom_vline(xintercept=as.Date(y_data[max(cpts_CP_1),1]), linetype=4, color = "darkcyan", show.legend = T)+
# geom_text(data=y_data, mapping=aes(x=as.Date(y_data[max(cpts_CP_1),1]), y=0, label=as.Date(y_data[max(cpts_CP_1),1] , sep = " ")), size=3, vjust=1, hjust=0, angle=90)+
# scale_color_manual(name = "Value", labels = c("darkcyan"="cost_acc", "red"="rpc_acc"), values = c("darkcyan", "red"))
#
# p= ggplot() +
# geom_line(data =data_1, aes(x = as.Date(date), y = cost_acc, group=1, color="red")) +
# geom_point(data = y_data_2, aes(x = as.Date(date), y = cost_acc, group=1, color="red", shape = "triangle", size=3), show.legend = F)+
# xlab('Date') +
# ylab('Value') + facet_grid(. ~ portfolio_name)+
# scale_x_date(date_breaks = "1 month")+
# geom_vline(xintercept=as.Date(y_data[max(cpts_CP),1]), linetype=4, color = "red", show.legend = T)+
# geom_text(data=y_data, mapping=aes(x=as.Date(y_data[max(cpts_CP),1]), y=0, label=as.Date(y_data[max(cpts_CP),1] , sep = " ")), size=3, vjust=1, hjust=0, angle=90)+
# scale_color_manual(name = "Value", labels = c("red"="cost_acc"), values = c("red"))
#
# print(p)
#
# #
# p= ggplot() +
# geom_line(data = data_1, aes(x = as.Date(date), y = rpc_acc, group=1), color="darkcyan" , show.legend = F) +
# geom_point(data = y_data_1, aes(x = as.Date(date), y = rpc_acc, group=1, shape="circle"),size=5, color="darkcyan", show.legend = T)+
# xlab('Date') +
# ylab('rpc_acc') + facet_grid(. ~ portfolio_name)+
# scale_x_date(date_breaks = "1 week")+
# geom_vline(data=y_data, mapping = aes(xintercept=as.Date(y_data[max(cpts_CP),1]), linetype="twodash"),size=1, color = "darkcyan", show.legend =T)+
# geom_hline(data=y_data, mapping = aes(yintercept=100, linetype="dotted"),size=1, color = "darkcyan", show.legend =T)+
# geom_text(data=y_data, mapping=aes(x=as.Date(y_data[max(cpts_CP_1),1]), y=0, label=as.Date(y_data[max(cpts_CP_1),1] , sep = " ")), size=3, vjust=1, hjust=0, angle=90)+
# scale_shape_manual(name = " Legend-Shape", labels = c("circle"="outliers"), values = c("circle"))+
# scale_linetype_manual(name = "Legend-Line ", labels = c("dotted"="100 %","twodash"="change_mean"), values = c("dotted","twodash"))+
# guides(shape = guide_legend("Legend-Shape",override.aes = list(linetype = 0, color="darkcyan")),
# linetype = guide_legend("Legend-Line",override.aes = list(shape = 0, color="darkcyan")))
# p
#
#
# p= ggplot() +
# geom_line(data =data_1, aes(x = as.Date(date), y = cost_acc, group=1, color="red"), show.legend = F) +
# geom_point(data = y_data_2, aes(x = as.Date(date), y = cost_acc, group=1, shape = "circle", color="red"), size=5, show.legend = T)+
# xlab('Date') +
# ylab('cost_acc') + facet_grid(. ~ portfolio_name)+
# scale_x_date(date_breaks = "1 month")+
# geom_vline(data=y_data, mapping = aes(xintercept=as.Date(y_data[max(cpts_CP),1]), linetype="twodash", color = "red"),size=1, show.legend =T)+
# geom_text(data=y_data, mapping=aes(x=as.Date(y_data[max(cpts_CP),1]), y=0, label=as.Date(y_data[max(cpts_CP),1] , sep = " ")), size=3, vjust=1, hjust=0, angle=90)+
# scale_shape_manual(name = " Legend-Shape", labels = c("circle"="outliers"), values = c("circle"))+
# scale_linetype_manual(name = "Legend-Line ", labels = c("twodash"="change_mean"), values = c("twodash"))+
# scale_color_discrete(guide = FALSE)+
# guides(shape = guide_legend("Legend-Shape",override.aes = list(linetype = 0)),
# linetype = guide_legend("Legend-Line",override.aes = list(shape = 0)))
admin_daily_1 = amo_db_get(query=admin_query_daily_accuracy_agg, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
# admin_daily_agg1 = amo_db_get(query=admin_query_daily_accuracy_agg_1, db_tag = dbtag, dc = dc_choose, debug = TRUE)
# admin_daily_agg_v21 <- merge(admin_pidd, admin_daily_agg1, by.x = "pid",by.y = "pid", all.x = TRUE)
# admin_daily_agg_v212 <- admin_daily_agg_v21[admin_daily_agg_v21$portfolio_name == "Grainger PLA",]
# data <- admin_daily_agg_v212[order(admin_daily_agg_v212$date, decreasing = TRUE),]
admin_final <- merge(admin_final, admin_daily_1, by.x="pid", by.y = "pid", all.x = TRUE)
# data_1 <- admin_final[admin_final$client_account_name == "grainger",]
# data <- data_1 %>% select(portfolio_name, pred_cost,cost_acc, rpc_acc, Scenario)
# data[is.na(data)] <- 0
# data <- data[order(data$pred_cost, decreasing = TRUE),]
# #
# library("formattable")
# improvement_formatter <-
# formatter("span",
# style = x ~ style(
# font.weight = "bold",
# color = ifelse(x > 120, "Green", ifelse(x < 80, "Red", "black"))),
# x ~ icontext(ifelse(x > 120, "arrow-up", ifelse(x < 80, "arrow-down", " ")), x))
#
# formattable(data, list(
# 'cost_acc' = improvement_formatter
# ))
#
# as.datatable(formattable(data, list(
# 'cost_acc' = improvement_formatter
# )))
# data$cost_acc <- paste(round(data$cost_acc,digits=1),"%",sep="")
# data$rpc_acc <- paste(round(data$rpc_acc,digits=1),"%",sep="")
# data$pred_cost <- format(data$pred_cost,big.mark=",", trim=TRUE,scientific=FALSE)
# data$pred_cost <- as.numeric(unlist(regmatches(data$pred_cost,
# gregexpr("[[:digit:]]+\\.*[[:digit:]]*",data$pred_cost))
# ) )
# data <- data[order(data$pred_cost, decreasing = TRUE),]
# range(data$pred_cost)
# View(data)
admin_final[is.na(admin_final)] <- 0
#SCENARIOS
admin_final <- admin_final %>%
mutate(Scenario = case_when(cost_acc > 0 & cost_acc < 80 & rpc_acc >=80 & rpc_acc<=120 ~ "Underspend AND RPC Accuracy OK",
cost_acc > 0 & cost_acc < 80 & rpc_acc <80 | rpc_acc >120 ~ "Underspend AND Poor RPC Accuracy ",
cost_acc > 120 & rpc_acc >=80 & rpc_acc<=120 ~ "Overspend AND RPC Accuracy OK",
cost_acc > 120 & rpc_acc > 0 & rpc_acc < 80 | rpc_acc > 120 ~ "Overspend AND Poor RPC Accuracy",
cost_acc >= 80 & cost_acc <=120 & rpc_acc >=80 & rpc_acc<=120 ~ "Cost Accuracy Within Range AND RPC Accuracy OK",
cost_acc >= 80 & cost_acc <=120 & rpc_acc > 0 & rpc_acc < 80 | rpc_acc > 120 ~ "Cost Accuracy Within Range AND Poor RPC Accuracy",
cost_acc == 0 & rpc_acc == 0 ~ "No Data"
))
# admin_query_spend_multiple=sprintf("select pid, spend_limit_multiple as Campaign_spend_multiple from portfolio_campaign_spend_limit where pid = (%s) order by 1", pid1)
admin_query_spend_multiple=sprintf("select pid, spend_limit_multiple as Campaign_spend_multiple from portfolio_campaign_spend_limit")
admin_opt_sm = amo_db_get(query=admin_query_spend_multiple, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
admin_opt_sm[is.na(admin_opt_sm)] <- 0
admin_final <- merge(admin_final, admin_opt_sm, by.x="pid", by.y = "pid", all.x = TRUE)
admin_query_cost_rev_KW = sprintf(" select pid, active_ads as Active_KW_count, max_unconstrained_revenue, max_constrained_revenue,
(max_unconstrained_revenue-max_constrained_revenue) as inc_unconstrained_revenue,
max_constrained_spend, max_unconstrained_spend,
(max_unconstrained_spend-max_constrained_spend) as inc_unconstrained_cost,
constrained_kwds_count from user_portfolio_stats where pid = (%s) order by 1;", pid1)
admin_cost_rev_KW = amo_db_get(query=admin_query_cost_rev_KW, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
admin_cost_rev_KW[is.na(admin_cost_rev_KW)] <- 0
admin_cost_rev_KW$unconstrained_rev_share_percent <- admin_cost_rev_KW$inc_unconstrained_revenue/admin_cost_rev_KW$max_unconstrained_revenue*100
admin_final <- merge(admin_final, admin_cost_rev_KW, by.x="pid", by.y = "pid", all.x = TRUE)
admin_final[is.na(admin_final)] <- 0
admin_final <- admin_final[order(admin_final$pred_cost, decreasing = TRUE),]
admin_final <- admin_final %>% mutate(Spend_Scenario = case_when(cost_acc > 0 & cost_acc < 80 ~ "Underspend",
cost_acc > 120 ~ "Overspend",
cost_acc >= 80 & cost_acc <=120 ~ "Cost Accuracy Within Range",
cost_acc == 0 ~ "No Data"))
admin_final <- admin_final %>% mutate(RPC_Scenario = case_when(rpc_acc > 120 | rpc_acc < 80 ~ "Poor RPC Accuracy",
rpc_acc >= 80 & rpc_acc <=120 ~ "RPC Accuracy OK",
rpc_acc == 0 ~ "No Data"))
admin_final <- admin_final %>% mutate(reco_max_bid = case_when(Spend_Scenario!= "Overspend" & unconstrained_rev_share_percent > 10 ~ "remove constraints",
TRUE ~ as.character(unconstrained_rev_share_percent)))
admin_final$reco_sm <- ifelse(admin_final$Spend_Scenario == "Overspend" & admin_final$campaign_spend_multiple > 1.2, ifelse(1.2 > admin_final$campaign_spend_multiple/2, 1.2, admin_final$campaign_spend_multiple/2),
ifelse(admin_final$Spend_Scenario == "Underspend" & admin_final$campaign_spend_multiple < 2, ifelse(2 < admin_final$campaign_spend_multiple*3/2, 2, admin_final$campaign_spend_multiple*3/2), admin_final$campaign_spend_multiple))
admin_query_opt_args_intraday = "select up.pid,param_name as param_name_intraday,coalesce(oa.param_value, mp.default_value) as param_value_intraday
from user_portfolios up
join models m on (True)
join model_parameters mp using (modelid)
left join (select pid, paramid, param_value
from model_parameters
join optimizer_arguments_portfolio_level op using(paramid)
) oa on (oa.pid = up.pid and oa.paramid = mp.paramid)
where m.model_type = 'o' and param_name= 'enable_intraday'
and up.pid in (select pid from user_portfolios )
and (not default_value isnull or
not oa.param_value isnull);"
admin_query_opt_args_learning_budget = "select up.pid,param_name as param_name_lb,coalesce(oa.param_value, mp.default_value) as param_value_LB
from user_portfolios up
join models m on (True)
join model_parameters mp using (modelid)
left join (select pid, paramid, param_value
from model_parameters
join optimizer_arguments_portfolio_level op using(paramid)
) oa on (oa.pid = up.pid and oa.paramid = mp.paramid)
where m.model_type = 'o' and param_name= 'profile_fract'
and up.pid in (select pid from user_portfolios )
and (not default_value isnull or
not oa.param_value isnull);"
admin_query_opt_args_ZIL = "select up.pid,param_name as param_name_ZIL,coalesce(oa.param_value, mp.default_value) as param_value_ZIL
from user_portfolios up
join models m on (True)
join model_parameters mp using (modelid)
left join (select pid, paramid, param_value
from model_parameters
join optimizer_arguments_portfolio_level op using(paramid)
) oa on (oa.pid = up.pid and oa.paramid = mp.paramid)
where m.model_type = 'o' and param_name= 'activate_learning_zero_impr'
and up.pid in (select pid from user_portfolios )
and (not default_value isnull or
not oa.param_value isnull);"
admin_opt_opt_args_intraday = amo_db_get(query=admin_query_opt_args_intraday, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
admin_opt_opt_args_learning_budget = amo_db_get(query=admin_query_opt_args_learning_budget, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
admin_opt_opt_args_ZIL = amo_db_get(query=admin_query_opt_args_ZIL, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
admin_final <- merge(admin_final,admin_opt_opt_args_intraday,by.x = "pid",by.y = "pid",all.x = TRUE)
admin_final <- merge(admin_final,admin_opt_opt_args_learning_budget,by.x = "pid",by.y = "pid",all.x = TRUE)
admin_final <- merge(admin_final,admin_opt_opt_args_ZIL,by.x = "pid",by.y = "pid",all.x = TRUE)
#interested columns: Cost HL, Revenue HL
admin_query_hl = sprintf(" select pid as PID, cost_model_half_life, revenue_model_half_life from portfolio_model_arguments where pid = (%s) order by 1;", pid1)
admin_opt_hl = amo_db_get(query=admin_query_hl, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
admin_final <- merge(admin_final, admin_opt_hl, by.x="pid", by.y = "pid",all.x = TRUE)
admin_query_model_coverage = sprintf("select portfolio_id as portfolio, pid,
cast(revenue_model_stats.ctime as date) as date_most_recent,
ads as bid_units,
cost_modeled_ads as click_models,
ads_w_revenue as revenue_models,
round(cost_modeled_ads / cast(ads as numeric), 3) as click_coverage,
round(ads_w_revenue / cast(ads as numeric), 3) as revenue_coverage
from revenue_model_stats
join (select pid,
max(ctime) as ctime
from revenue_model_stats
group by pid) temp_pid_maxctime using (pid, ctime)
join alg_user_portfolio_stats using (pid)
join user_portfolios using (pid)
where status_code in ('z', 'a')
and (cast(ads as numeric) >0)
and pid = (%s)
order by pid;", pid1)
admin_model_coverage = amo_db_get(query=admin_query_model_coverage, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
require(dplyr)
admin_model_coverage <- admin_model_coverage %>%select(pid,click_coverage,revenue_coverage)
admin_final <- merge(admin_final, admin_model_coverage, by.x="pid", by.y = "pid", all.x = TRUE)
admin_final[is.na(admin_final)] <- 0
# df1 <- admin_final[admin_final$portfolio_name == "Grainger SEM",]
# df1$cchange <- cost_change
# df1$rchange <- rpc_change
# df1$click_coverage[1]= 0.5
# df1$cost_model_half_life[1]= 11
# df1$cost_acc[1]= 75
# df1$cchange[1]= 11
# df1 <- df1 %>% mutate(reco_cost_HL = case_when(Spend_Scenario!= "Within_Range" & cchange > 0 & click_coverage < 0.1 & max(cost_model_half_life,cchange) < 14 & max(cost_model_half_life,cchange) > 3 ~ max(cost_model_half_life, cchange),
# Spend_Scenario!= "Within_Range" & cchange > 0 & click_coverage < 0.1 & max(cost_model_half_life,cchange) >14 ~ 14,
# Spend_Scenario!= "Within_Range" & cchange > 0 & click_coverage < 0.1 & max(cost_model_half_life,cchange) < 3 ~ 3,
# Spend_Scenario!= "Within_Range" & cchange > 0 & click_coverage > 0.1 & min((cost_model_half_life-3)/2, 3) < 3 ~ 3,
# Spend_Scenario!= "Within_Range" & cchange > 0 & click_coverage > 0.1 & ((cost_model_half_life-3)/2) > 3 ~ (as.numeric(min((cost_model_half_life-3)/2, 3) )),
# TRUE ~ as.numeric(cost_model_half_life)))
#
# df1 <- df1 %>% mutate(reco_rev_HL = case_when(RPC_Scenario!= "RPC_OK" & rchange > 0 & revenue_coverage < 0.05 & max(revenue_model_half_life,rchange) < 60 & max(revenue_model_half_life,rchange) > 10 ~ max(revenue_model_half_life, rchange),
# RPC_Scenario!= "RPC_OK" & rchange > 0 & revenue_coverage < 0.05 & max(revenue_model_half_life,rchange) >60 ~ 60,
# RPC_Scenario!= "RPC_OK" & rchange > 0 & revenue_coverage < 0.05 & max(revenue_model_half_life,rchange) < 10 ~ 10,
# RPC_Scenario!= "RPC_OK" & rchange > 0 & revenue_coverage > 0.05 & min((revenue_model_half_life-10)/2, 10) < 10 ~ 10,
# RPC_Scenario!= "RPC_OK" & rchange > 0 & revenue_coverage > 0.05 & ((revenue_model_half_life-3)/2) > 10 ~ (as.numeric(min((revenue_model_half_life-10)/2, 10) )),
# TRUE ~ revenue_model_half_life))
admin_final <- admin_final %>%
mutate(reco_ZILstatus = case_when(Spend_Scenario == "Overspend" & param_value_zil == "True" ~ "False",
Spend_Scenario == "Underspend" & param_value_zil == "False" ~ "True",
TRUE ~ param_value_zil))
admin_final <- admin_final[order(admin_final$pred_cost, decreasing = TRUE),]
admin_final$param_value_lb <- as.double(admin_final$param_value_lb)
admin_final <- admin_final %>%
mutate(reco_LearningBudget = case_when(Spend_Scenario == "Overspend" & param_value_lb > 0 ~ 0.0,
TRUE ~ as.double(param_value_lb)
## Spend_Scenario == "Underspend" & param_value_lb < 0.2 ~ 0.2,
)
)
admin_final <- admin_final %>%
mutate(reco_Intraday = case_when(Spend_Scenario != "Cost Accuracy Within Range" & param_value_intraday == "False" ~ "True",
TRUE ~ as.character(param_value_intraday))
)
admin_query_spend_strategy = sprintf("select a.pid as pid, p.budget_steer_period as strategy
from budget_steer_periods p
full join budget_steer_args a
on p.budget_steer_periodid = a.budget_steer_periodid
where a.pid = (%s);", pid1)
admin_spend_strategy = amo_db_get(query=admin_query_spend_strategy, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
admin_final <- merge(admin_final, admin_spend_strategy, by.x="pid", by.y = "pid", all.x = TRUE)
admin_query_opt_args_cdow = "select up.pid,param_name as param_name_cdow,coalesce(oa.param_value, mp.default_value) as param_value_cdow
from user_portfolios up
join models m on (True)
join model_parameters mp using (modelid)
left join (select pid, paramid, param_value
from model_parameters
join optimizer_arguments_portfolio_level op using(paramid)
) oa on (oa.pid = up.pid and oa.paramid = mp.paramid)
where m.model_type = 'o' and param_name= 'enable_click_dow'
and up.pid in (select pid from user_portfolios )
and (not default_value isnull or
not oa.param_value isnull);"
admin_cdow = amo_db_get(query=admin_query_opt_args_cdow, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
admin_final <- merge(admin_final,admin_cdow,by.x = "pid",by.y = "pid",all.x = TRUE)
# #VALIDATION
#
# View(admin_final)
#
# #campaign_spend_multiple
#
# admin_final$Spend_Scenario <- "Overspend"
# admin_final$campaign_spend_multiple <- 5
# admin_final$param_value_lb <- 5
# admin_final$param_value_zil <- "True"
# admin_final$param_value_intraday <- "False"
# admin_final$reco_sm <- ifelse(admin_final$Spend_Scenario == "Overspend" & admin_final$campaign_spend_multiple > 1.2, ifelse(1.2 > admin_final$campaign_spend_multiple/2, 1.2, admin_final$campaign_spend_multiple/2),
# ifelse(admin_final$Spend_Scenario == "Underspend" & admin_final$campaign_spend_multiple < 2, ifelse(2 < admin_final$campaign_spend_multiple*3/2, 2, admin_final$campaign_spend_multiple*3/2), admin_final$campaign_spend_multiple))
# admin_final <- admin_final %>%
# mutate(reco_ZILstatus = case_when(Spend_Scenario == "Overspend" & param_value_zil == "True" ~ "False",
# Spend_Scenario == "Underspend" & param_value_zil == "False" ~ "True",
# TRUE ~ param_value_zil))
#
# admin_final <- admin_final %>%
# mutate(reco_LearningBudget = case_when(Spend_Scenario == "Overspend" & param_value_lb > 0 ~ 0,
# Spend_Scenario == "Underspend" & param_value_lb < 0.2 ~ 0.2,
# TRUE ~ as.double(param_value_lb)))
# admin_final <- admin_final %>%
# mutate(reco_Intraday = case_when(Spend_Scenario != "Cost Accuracy Within Range" & param_value_intraday == "False" ~ "True",
# TRUE ~ as.character(param_value_intraday))
# )
#
# admin_final$unconstrained_rev_share_percent
#
#
#
# admin_final$Spend_Scenario <- "Underspend"
# admin_final$campaign_spend_multiple <- 1.7
# admin_final$param_value_lb <- 0.1
# admin_final$param_value_zil <- "False"
# admin_final$param_value_intraday <- "False"
# admin_final$reco_sm <- ifelse(admin_final$Spend_Scenario == "Overspend" & admin_final$campaign_spend_multiple > 1.2, ifelse(1.2 > admin_final$campaign_spend_multiple/2, 1.2, admin_final$campaign_spend_multiple/2),
# ifelse(admin_final$Spend_Scenario == "Underspend" & admin_final$campaign_spend_multiple < 2, ifelse(2 < admin_final$campaign_spend_multiple*3/2, 2, admin_final$campaign_spend_multiple*3/2), admin_final$campaign_spend_multiple))
#
# admin_final <- admin_final %>%
# mutate(reco_ZILstatus = case_when(Spend_Scenario == "Overspend" & param_value_zil == "True" ~ "False",
# Spend_Scenario == "Underspend" & param_value_zil == "False" ~ "True",
# TRUE ~ param_value_zil))
# admin_final <- admin_final %>%
# mutate(reco_LearningBudget = case_when(Spend_Scenario == "Overspend" & param_value_lb > 0 ~ 0,
# Spend_Scenario == "Underspend" & param_value_lb < 0.2 ~ 0.2,
# TRUE ~ as.double(param_value_lb)))
# admin_final <- admin_final %>%
# mutate(reco_Intraday = case_when(Spend_Scenario != "Cost Accuracy Within Range" & param_value_intraday == "False" ~ "True",
# TRUE ~ as.character(param_value_intraday))
# )
#
#
#Match Type
admin_query_MT_accuracy_2 =
sprintf("SELECT c.pid,
match_type_display as match_type,
sum(e.oclicks)::NUMERIC(20,2) AS pred_clicks,
sum(c.clicks) AS act_clicks,
(100.0 * SUM(c.clicks) / NULLIF( SUM(e.oclicks), 0 ) )::NUMERIC(20,2) AS click_acc,
sum(e.orev)::NUMERIC(20,4) AS pred_rev,
sum(r.rev)::NUMERIC(20,4) AS act_rev,
sum( COALESCE((e.orev), 0) - COALESCE((r.rev), 0) )::NUMERIC(20,4) AS rev_diff,
(100.0 * SUM(r.rev) / NULLIF ( SUM(e.orev), 0 ) )::NUMERIC(20,2) AS rev_acc,
SUM(e.ocost)::NUMERIC(20,2) AS pred_cost,
SUM(c.spend) AS act_cost,
( COALESCE(SUM(e.ocost), 0) - COALESCE(SUM(c.spend), 0) )::NUMERIC(20,2) AS cost_diff,
(100.0 * SUM(c.spend)/ NULLIF( SUM(e.ocost), 0 ) )::NUMERIC(20,2) AS cost_acc,
(100.0 * SUM((c.spend/NULLIF( c.clicks, 0 )) /NULLIF((e.ocost/NULLIF( e.oclicks, 0 )),0)) )::NUMERIC(20,2) AS CPC_acc,
(100.0 * SUM((r.rev/NULLIF(c.clicks, 0 )) /NULLIF((e.orev/NULLIF( e.oclicks, 0 )),0))) ::NUMERIC(20,2) AS RPC_acc
FROM ( SELECT pid, adid,
device,
sum(oclicks) as oclicks,
sum(orev) as orev,
sum(ocost) as ocost
FROM (SELECT e.historical_pid as pid,e.adid,
e.device,
((e.xtime + INTERVAL '6 hours') at time zone tz)::date,
avg(e.clicks) as oclicks,
avg(e.revenue) as orev,
avg(coalesce(total_cost, e.clicks * e.cost)) as ocost
FROM ad_execute_inserts e
JOIN user_portfolios up ON (e.historical_pid = up.pid)
JOIN users U ON (up.userid = u.userid)
JOIN timezones t ON (t.tzid = u.tzid)
WHERE up.pid = (%1$s)
and ((e.xtime + INTERVAL '6 hours') at time zone tz) between DATE '%2$s' and DATE '%3$s' + INTERVAL '1 day'
and e.xtime between (DATE '%2$s' - INTERVAL '1 day') and (DATE '%3$s' + INTERVAL '2 day')
group by 1,2,3,4) daily_avg
group by pid,adid, device ) AS e
RIGHT OUTER JOIN ( SELECT wh.pid,WH.adid,
CASE WHEN device = 'm' THEN device ELSE 'b'::TEXT END as device, -- ad_execute has device = 'b' for desktop, tablet and non-device SEs
SUM(wh.clicks) AS clicks,
SUM(wh.cost) AS spend
FROM day_sem_keyword_agg WH
WHERE WH.date >= DATE '%2$s'
AND WH.date <= DATE '%3$s'
AND WH.pid = (%1$s)
AND WH.sid <> 77
GROUP BY 1,2,3
UNION ALL
SELECT
wh.pid,wh.adid,
'b'::TEXT as device,
SUM(wh.est_clicks) AS clicks,
SUM(wh.est_cost) AS spend
FROM day_trig_keyword_agg WH
WHERE WH.date >= DATE '%2$s'
AND WH.date <= DATE '%3$s'
AND WH.pid = (%1$s)
AND WH.sid = 77
GROUP BY 1,2,3
) AS c USING (pid,adid, device)
LEFT OUTER JOIN (SELECT wh.pid,wh.adid,
CASE WHEN device = 'm' THEN device ELSE 'b'::TEXT END as device, -- ad_execute has device = 'b' for desktop, tablet and non-device SEs
SUM(coalesce(f.mobile_weight, weight) * (COALESCE(CT_VALUE_LAST, 0) + COALESCE(CTVT_VALUE_LAST, 0)) ) AS rev
FROM day_revenue_keyword_agg WH
JOIN portfolio_objectives PO ON (PO.pid = WH.pid)
JOIN objective_function F ON (F.objid = PO.objid AND F.propertyid = WH.propertyid)
WHERE WH.date >= DATE '%2$s'
AND WH.date <= DATE '%3$s'
AND WH.pid = (%1$s)
GROUP BY 1,2,3) AS r USING (pid,adid, device)
JOIN ads using (adid)
JOIN mgroupid_mtid using (mgroupid)
JOIN match_types using (mtid)
GROUP BY 1,2
ORDER BY 8 DESC, 12 DESC",pid1,date3, date4)
admin_opt_MT_2 = amo_db_get(query=admin_query_MT_accuracy_2, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
admin_opt_MT_2[is.na(admin_opt_MT_2)] <- 0
admin_opt_MT_v2 <- merge(admin_pidd, admin_opt_MT_2, by.x = "pid",by.y = "pid", all.x = TRUE)
admin_opt_MT_v2[is.na(admin_opt_MT_v2)] <- 0
admin_opt_MT_v2 <- admin_opt_MT_v2[order(admin_opt_MT_v2$pid, decreasing = TRUE),]
admin_final[is.na(admin_final)] <- 0
admin_final$strategy <- ifelse(admin_final$strategy == "0", "None", admin_final$strategy)
admin_final <- admin_final %>%mutate(DOWfeature = case_when(strategy == "weekly" ~ "Enabled",
strategy == "day_of_week" & param_value_cdow == "True" ~ "Enabled",
TRUE ~ "Not_Enabled"))
admin_final <- admin_final[order(admin_final$pred_cost, decreasing = TRUE),]
# admin_opt_MT_v2 <- admin_opt_MT_v2[admin_opt_MT_v2$portfolio_name == "Grainger PLA",]
#
# require(dplyr)
# admin_opt_MT_v1 <- admin_opt_MT_v2 %>% select(portfolio_name,match_type, act_cost, act_rev, pred_cost, pred_rev)
# admin_opt_MT_v1 <- admin_opt_MT_v1 %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
# admin_opt_MT_v1 <- admin_opt_MT_v1%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
# admin_opt_MT_v1 <- admin_opt_MT_v1 %>% mutate_at(vars(spendperc), funs(round(., 2)))
# colnames(admin_opt_MT_v1)[colnames(admin_opt_MT_v1)=="match_type"] <- "matchtype"
# admin_opt_MT_v12 <- admin_opt_MT_v1%>% group_by(portfolio_name) %>% filter(rank==1)
# admin_opt_MT_v12 <- admin_opt_MT_v1 %>% select(portfolio_name,matchtype, spendperc)
#
# mycols <- c("#0073C2FF", "#EFC000FF", "#868686FF", "#CD534CFF")
#
# ggplot(admin_opt_MT_v1, aes(x = "", y = spendperc, fill = match_type)) +
# geom_bar(width = 1, stat = "identity", color = "white") +
# coord_polar("y", start = 0)+
# geom_text(aes(y = spendperc, label = spendperc), color = "white")+
# theme_void()
#
# library(plotly)
#
# p1 <- plot_ly(admin_opt_MT_v1, labels = ~ matchtype, values = ~ spendperc, type = 'pie', text = ~paste("Match Type:",matchtype, ",","Spend Percent:", spendperc), hoverinfo="text", textinfo="text") %>%
# layout(title = 'Match_Type_Spend_Percent',
# xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
# yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE))
# print(p1)
#
# admin_opt_MT_v3 <- admin_opt_MT_v2 %>% mutate(act_roas=act_rev/act_cost)
# admin_opt_MT_v3 <- admin_opt_MT_v3 %>% mutate(pred_roas=pred_rev/pred_cost)
# admin_opt_MT_v3 <- admin_opt_MT_v3 %>% mutate(roas_acc=act_roas/pred_roas*100)
# admin_opt_MT_v3[is.na(admin_opt_MT_v3)] <- 0
# admin_opt_MT_v5 <- admin_opt_MT_v3 %>% select(portfolio_name,act_roas, pred_roas,match_type)
# admin_opt_MT_v6 <- melt(admin_opt_MT_v5)
#
# ggplot(admin_opt_MT_v6, aes(x=match_type, y=value, fill=variable)) +
# geom_bar(stat='identity', position='dodge2')+ facet_grid(. ~ portfolio_name)
#
# admin_opt_MT_v3$Variance_di <- var(admin_opt_MT_v3$act_roas)
# admin_opt_MT_v3$avg_di <- mean(admin_opt_MT_v3$act_roas)
# admin_opt_MT_v3$dispersion_index_mt <- admin_opt_MT_v3$Variance_di/admin_opt_MT_v3$avg_di
#
# admin_opt_MT_v3$Variance_er <- var(admin_opt_MT_v3$roas_acc)
# admin_opt_MT_v3$avg_er <- mean(admin_opt_MT_v3$roas_acc)
# admin_opt_MT_v3$error_rate_mt <- admin_opt_MT_v3$Variance_er/admin_opt_MT_v3$avg_er
#
# di_mt <- unique(admin_opt_MT_v3$dispersion_index_mt)
# er_mt <- unique(admin_opt_MT_v3$error_rate_mt)
#
# admin_final <- admin_final[admin_final$portfolio_name == "Grainger PLA RLSA",]
#
# admin_final$di <- di_mt
# admin_final$er <- er_mt
admin_query_Click_level_accuracy_2 =
sprintf("SELECT pid,
(CASE WHEN e.oclicks > 0 and e.oclicks <= 0.5
THEN 0.1
WHEN e.oclicks > 0.5 and e.oclicks <= 5
THEN 1
WHEN e.oclicks > 5 and e.oclicks <= 50
THEN 10
WHEN e.oclicks > 50 and e.oclicks <= 500
THEN 100
WHEN e.oclicks > 500 and e.oclicks <= 5000
THEN 1000
WHEN e.oclicks > 5000
THEN 10000
ELSE 0
END)::NUMERIC(20,2)::TEXT AS clicklevel,
SUM(e.oclicks)::NUMERIC(20,2) AS pred_clicks,
SUM(c.clicks) AS act_clicks,
SUM(e.ocost)::NUMERIC(20,2) AS pred_cost,
SUM(c.spend) AS act_cost,
( COALESCE(SUM(e.ocost), 0) - COALESCE(SUM(c.spend), 0) )::NUMERIC(20,2) AS cost_diff,
sum(e.orev)::NUMERIC(20,4) AS pred_rev,
sum(r.rev)::NUMERIC(20,4) AS act_rev,
sum( COALESCE((e.orev), 0) - COALESCE((r.rev), 0) )::NUMERIC(20,4) AS rev_diff,
(100.0 * SUM(c.spend)/ NULLIF( SUM(e.ocost), 0 ) )::NUMERIC(20,2) AS cost_acc,
(100.0 * SUM(c.clicks) / NULLIF( SUM(e.oclicks), 0 ) )::NUMERIC(20,2) AS click_acc,
(100.0 * SUM(r.rev) / NULLIF ( SUM(e.orev), 0 ) )::NUMERIC(20,2) AS rev_acc,
(100.0 * SUM((c.spend/NULLIF( c.clicks, 0 )) /NULLIF((e.ocost/NULLIF( e.oclicks, 0 )),0)) )::NUMERIC(20,2) AS CPC_acc,
(100.0 * SUM((r.rev/NULLIF(c.clicks, 0 )) /NULLIF((e.orev/NULLIF( e.oclicks, 0 )),0))) ::NUMERIC(20,2) AS RPC_acc
FROM ( SELECT pid, adid,
device,
sum(oclicks) as oclicks,
sum(ocost) as ocost,
sum(orev) as orev
FROM (
SELECT e.historical_pid as pid,e.adid,
e.device,
((e.xtime + INTERVAL '6 hours') at time zone tz)::date,
avg(e.clicks) as oclicks,
avg(coalesce(total_cost, e.clicks * e.cost)) as ocost,
avg(e.revenue) as orev
FROM ad_execute_inserts e
JOIN user_portfolios up ON (e.historical_pid = up.pid)
JOIN users U ON (up.userid = u.userid)
JOIN timezones t ON (t.tzid = u.tzid)
WHERE up.pid = (%1$s)
and ((e.xtime + INTERVAL '6 hours') at time zone tz) between DATE '%2$s' and DATE '%3$s' + INTERVAL '1 day'
and e.xtime between (DATE '%2$s' - INTERVAL '1 day') and (DATE '%3$s' + INTERVAL '2 day')
group by 2,3,4,1) daily_avg
group by adid, device, pid ) AS e
FULL JOIN (
SELECT wh.pid,
wh.adid,
CASE WHEN WH.sid in (3, 94, 10) AND device = 'm' THEN device ELSE 'b' END as device,
SUM(wh.clicks) AS clicks,
SUM(wh.cost) AS spend
FROM day_sem_keyword_agg WH
WHERE WH.date >= DATE '%2$s'
AND WH.date <= DATE '%3$s'
AND WH.pid = (%1$s)
AND WH.sid <> 77
GROUP BY 2,3,1
UNION ALL
SELECT wh.pid,
wh.adid,
'b'::TEXT as device,
SUM(wh.est_clicks) AS clicks,
SUM(wh.est_cost) AS spend
FROM day_trig_keyword_agg WH
WHERE WH.date >= DATE '%2$s'
AND WH.date <= DATE '%3$s'
AND WH.pid = (%1$s)
AND WH.sid = 77
GROUP BY wh.adid, wh.pid
) AS c USING (adid, device,pid)
FULL JOIN (
SELECT wh.pid,
wh.adid,
CASE WHEN device = 'm' THEN device ELSE 'b'::TEXT END as device,
SUM(coalesce(f.mobile_weight, weight) * (COALESCE(CT_VALUE_LAST, 0) + COALESCE(CTVT_VALUE_LAST, 0)) ) AS rev
FROM day_revenue_keyword_agg WH
JOIN portfolio_objectives PO ON (PO.pid = WH.pid)
JOIN objective_function F ON (F.objid = PO.objid AND F.propertyid = WH.propertyid)
WHERE WH.date >= DATE '%2$s'
AND WH.date <= DATE '%3$s'
AND WH.pid = (%1$s)
GROUP BY wh.adid, 3,1 ) AS r USING (adid, device,pid)
GROUP BY clicklevel,pid
UNION ALL
SELECT pid,
'Total'::TEXT clicklevel,
SUM(e.oclicks)::NUMERIC(20,2) AS pred_clicks,
SUM(c.clicks) AS act_clicks,
SUM(e.ocost)::NUMERIC(20,2) AS pred_cost,
SUM(c.spend) AS act_cost,
( COALESCE(SUM(e.ocost), 0) - COALESCE(SUM(c.spend), 0) )::NUMERIC(20,2) AS cost_diff,
sum(e.orev)::NUMERIC(20,4) AS pred_rev,
sum(r.rev)::NUMERIC(20,4) AS act_rev,
sum( COALESCE((e.orev), 0) - COALESCE((r.rev), 0) )::NUMERIC(20,4) AS rev_diff,
(100.0 * SUM(c.spend)/ NULLIF( SUM(e.ocost), 0 ) )::NUMERIC(20,2) AS cost_acc,
(100.0 * SUM(c.clicks) / NULLIF( SUM(e.oclicks), 0 ) )::NUMERIC(20,2) AS click_acc,
(100.0 * SUM(r.rev) / NULLIF ( SUM(e.orev), 0 ) )::NUMERIC(20,2) AS rev_acc,
(100.0 * SUM((c.spend/NULLIF( c.clicks, 0 )) /NULLIF((e.ocost/NULLIF( e.oclicks, 0 )),0)) )::NUMERIC(20,2) AS CPC_acc,
(100.0 * SUM((r.rev/NULLIF(c.clicks, 0 )) /NULLIF((e.orev/NULLIF( e.oclicks, 0 )),0))) ::NUMERIC(20,2) AS RPC_acc
FROM ( SELECT pid,adid,
device,
sum(oclicks) as oclicks,
sum(ocost) as ocost,
sum(orev) as orev
FROM (
SELECT e.historical_pid as pid,e.adid,
e.device,
((e.xtime + INTERVAL '6 hours') at time zone tz)::date,
avg(e.clicks) as oclicks,
avg(coalesce(total_cost, e.clicks * e.cost)) as ocost,
avg(e.revenue) as orev
FROM ad_execute_inserts e
JOIN user_portfolios up ON (e.historical_pid = up.pid)
JOIN users U ON (up.userid = u.userid)
JOIN timezones t ON (t.tzid = u.tzid)
WHERE up.pid = (%1$s)
and ((e.xtime + INTERVAL '6 hours') at time zone tz) between DATE '%2$s' and DATE '%3$s' + INTERVAL '1 day'
and e.xtime between (DATE '%2$s' - INTERVAL '1 day') and (DATE '%3$s' + INTERVAL '2 day')
group by 2,3,4,1) daily_avg
group by adid, device,pid ) AS e
FULL JOIN (
SELECT wh.pid,
wh.adid,
CASE WHEN WH.sid in (3, 94, 10) AND device = 'm' THEN device ELSE 'b' END as device,
SUM(wh.clicks) AS clicks,
SUM(wh.cost) AS spend
FROM day_sem_keyword_agg WH
WHERE WH.date >= DATE '%2$s'
AND WH.date <= DATE '%3$s'
AND WH.pid = (%1$s)
AND WH.sid <> 77
GROUP BY wh.adid, 3,1
UNION ALL
SELECT wh.pid,
wh.adid,
'b'::TEXT as device,
SUM(wh.est_clicks) AS clicks,
SUM(wh.est_cost) AS spend
FROM day_trig_keyword_agg WH
WHERE WH.date >= DATE '%2$s'
AND WH.date <= DATE '%3$s'
AND WH.pid = (%1$s)
AND WH.sid = 77
GROUP BY wh.adid, wh.pid
) AS c USING (adid, device, pid)
FULL JOIN (
SELECT wh.pid,
wh.adid,
CASE WHEN device = 'm' THEN device ELSE 'b'::TEXT END as device,
SUM(coalesce(f.mobile_weight, weight) * (COALESCE(CT_VALUE_LAST, 0) + COALESCE(CTVT_VALUE_LAST, 0)) ) AS rev
FROM day_revenue_keyword_agg WH
JOIN portfolio_objectives PO ON (PO.pid = WH.pid)
JOIN objective_function F ON (F.objid = PO.objid AND F.propertyid = WH.propertyid)
WHERE WH.date >= DATE '%2$s'
AND WH.date <= DATE '%3$s'
AND WH.pid = (%1$s)
GROUP BY wh.adid, 3,1 ) AS r USING (adid, device,pid)
GROUP BY clicklevel,pid
ORDER BY clicklevel,pid; ", pid1,date3,date4)
admin_opt_Clicklevel_2 = amo_db_get(query=admin_query_Click_level_accuracy_2, db_tag =dbtag1, dc = dc_choose1, debug = TRUE)
admin_opt_Clicklevel_2[is.na(admin_opt_Clicklevel_2)] <- 0
admin_opt_Clicklevel_2 <- merge(admin_pidd, admin_opt_Clicklevel_2, by.x = "pid",by.y = "pid", all.x = TRUE)
admin_opt_Clicklevel_2[is.na(admin_opt_Clicklevel_2)] <- 0
admin_opt_Clicklevel_2 <- admin_opt_Clicklevel_2[order(admin_opt_Clicklevel_2$pid, decreasing = TRUE),]
#
# admin_opt_Clicklevel_2 <- admin_opt_Clicklevel_2[admin_opt_Clicklevel_2$portfolio_name == "Grainger PLA",]
# admin_opt_Clicklevel_2 <- admin_opt_Clicklevel_2[order(admin_opt_Clicklevel_2$clicklevel, decreasing = FALSE),]
#
# target <- c("0.00","0.10","1.00","10.00","100.00","1000.00","10000.00")
# admin_opt_v2 <- admin_opt_Clicklevel_2%>%filter(clicklevel %in% target)
# admin_opt_v2 <- admin_opt_v2 %>% group_by(portfolio_name) %>%mutate(total_cost=sum(act_cost))
# admin_opt_v2 <- admin_opt_v2 %>%filter(clicklevel=="0.00")%>%group_by(portfolio_name)%>%mutate(spendperc0=100*act_cost/total_cost)
#
#
# target <- c("0.00","0.10","1.00","10.00","100.00","1000.00","10000.00")
# admin_opt_v3 <- admin_opt_Clicklevel_2%>%filter(clicklevel %in% target)
# admin_opt_v3 <- admin_opt_v3 %>% select(portfolio_name,clicklevel, act_cost, act_rev, pred_cost, pred_rev)
# admin_opt_v3 <- admin_opt_v3 %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
# admin_opt_v3 <- admin_opt_v3%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
# admin_opt_v3 <- admin_opt_v3 %>% mutate_at(vars(spendperc), funs(round(., 2)))
# admin_opt_v3 <- admin_opt_v3 %>% mutate(act_roas=act_rev/act_cost)
# admin_opt_v3 <- admin_opt_v3 %>% mutate(pred_roas=pred_rev/pred_cost)
# admin_opt_v3 <- admin_opt_v3 %>% mutate(roas_acc=act_roas/pred_roas*100)
# admin_opt_v3[is.na(admin_opt_v3)] <- 0
#
# admin_opt_v3$Variance_di <- var(admin_opt_v3$act_roas)
# admin_opt_v3$avg_di <- mean(admin_opt_v3$act_roas)
# admin_opt_v3$dispersion_index_cl <- admin_opt_v3$Variance_di/admin_opt_v3$avg_di
#
# admin_opt_v3$Variance_er <- var(admin_opt_v3$roas_acc)
# admin_opt_v3$avg_er <- mean(admin_opt_v3$roas_acc)
# admin_opt_v3$error_rate_cl <- admin_opt_v3$Variance_er/admin_opt_v3$avg_er
#
# di_cl <- unique(admin_opt_v3$dispersion_index_cl)
# er_cl <- unique(admin_opt_v3$error_rate_cl)
#
# p1 <- plot_ly(admin_opt_v3, labels = ~ clicklevel, values = ~ spendperc, type = 'pie', text = ~paste("Click Level:",clicklevel, ",","Spend Percent:", spendperc), hoverinfo="text", textinfo="text") %>%
# layout( xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
# yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE))
# print(p1)
#
# admin_opt_CL_v5 <- admin_opt_v3 %>% select(portfolio_name,act_roas, pred_roas,clicklevel)
# admin_opt_CL_v5 <- as.data.frame(admin_opt_CL_v5)
# admin_opt_CL_v6 <- melt(admin_opt_CL_v5)
#
# ggplot(admin_opt_CL_v6, aes(x=clicklevel, y=value, fill=variable)) +
# geom_bar(stat='identity', position='dodge2')+ facet_grid(. ~ portfolio_name)
admin_query_device_accuracy_1 =
sprintf("SELECT c.pid,device,
sum(e.oclicks)::NUMERIC(20,2) AS pred_clicks,
sum(c.clicks) AS act_clicks,
(100.0 * SUM(c.clicks) / NULLIF( SUM(e.oclicks), 0 ) )::NUMERIC(20,2) AS click_acc,
sum(e.orev)::NUMERIC(20,4) AS pred_rev,
sum(r.rev)::NUMERIC(20,4) AS act_rev,
sum( COALESCE((e.orev), 0) - COALESCE((r.rev), 0) )::NUMERIC(20,4) AS rev_diff,
(100.0 * SUM(r.rev) / NULLIF ( SUM(e.orev), 0 ) )::NUMERIC(20,2) AS rev_acc,
SUM(e.ocost)::NUMERIC(20,2) AS pred_cost,
SUM(c.spend) AS act_cost,
( COALESCE(SUM(e.ocost), 0) - COALESCE(SUM(c.spend), 0) )::NUMERIC(20,2) AS cost_diff,
(100.0 * SUM(c.spend)/ NULLIF( SUM(e.ocost), 0 ) )::NUMERIC(20,2) AS cost_acc,
(100.0 * SUM((c.spend/NULLIF( c.clicks, 0 )) /NULLIF((e.ocost/NULLIF( e.oclicks, 0 )),0)) )::NUMERIC(20,2) AS CPC_acc,
(100.0 * SUM((r.rev/NULLIF(c.clicks, 0 )) /NULLIF((e.orev/NULLIF( e.oclicks, 0 )),0))) ::NUMERIC(20,2) AS RPC_acc
FROM ( SELECT pid,adid, device,
sum(oclicks) as oclicks,
sum(orev) as orev,
sum(ocost) as ocost
FROM ( SELECT e.historical_pid as pid,e.adid,
e.device,
((e.xtime + INTERVAL '6 hours') at time zone tz)::date,
avg(e.clicks) as oclicks,
avg(e.revenue) as orev,
avg(coalesce(total_cost, e.clicks * e.cost)) as ocost
FROM ad_execute_inserts e
JOIN user_portfolios up ON (e.historical_pid = up.pid)
JOIN users U ON (up.userid = u.userid)
JOIN timezones t ON (t.tzid = u.tzid)
WHERE up.pid = (%1$s)
and ((e.xtime + INTERVAL '6 hours') at time zone tz) between DATE '%2$s' and DATE '%3$s' + INTERVAL '1 day'
and e.xtime between (DATE '%2$s' - INTERVAL '1 day') and (DATE '%3$s' + INTERVAL '2 day')
group by 1,e.adid, e.device, 4 ) daily_avg
group by pid,adid, device ) AS e
RIGHT OUTER JOIN (SELECT
WH.pid,WH.adid,
CASE WHEN device = 'm' THEN device ELSE 'b'::TEXT END as device,
SUM(wh.clicks) AS clicks,
SUM(wh.cost) AS spend
FROM day_sem_keyword_agg WH
WHERE WH.date >= DATE '%2$s'
AND WH.date <= DATE '%3$s'
AND WH.pid = (%1$s)
AND WH.sid <> 77
GROUP BY wh.pid, wh.adid, 3
UNION ALL
SELECT
wh.pid,wh.adid,
'b'::TEXT as device,
SUM(wh.est_clicks) AS clicks,
SUM(wh.est_cost) AS spend
FROM day_trig_keyword_agg WH
WHERE WH.date >= DATE '%2$s'
AND WH.date <= DATE '%3$s'
AND WH.pid = (%1$s)
AND WH.sid = 77
GROUP BY wh.pid, wh.adid, 3
) AS c USING (pid,adid, device)
LEFT OUTER JOIN (SELECT wh.pid,wh.adid,
CASE WHEN device = 'm' THEN device ELSE 'b'::TEXT END as device,
SUM(coalesce(f.mobile_weight, weight) * (COALESCE(CT_VALUE_LAST, 0) + COALESCE(CTVT_VALUE_LAST, 0)) ) AS rev
FROM day_revenue_keyword_agg WH
JOIN portfolio_objectives PO ON (PO.pid = WH.pid)
JOIN objective_function F ON (F.objid = PO.objid AND F.propertyid = WH.propertyid)
WHERE WH.date >= DATE '%2$s'
AND WH.date <= DATE '%3$s'
AND WH.pid = (%1$s)
GROUP BY wh.pid,wh.adid, 3 ) AS r USING (pid,adid, device)
JOIN ads using (adid)
JOIN mgroupid_mtid using (mgroupid)
JOIN match_types using (mtid)
GROUP BY 1,2
ORDER BY 8 DESC, 12 DESC;", pid1,date3, date4)
#,date1,date2,date1,date2,active_pid_or1,date1,date2,active_pid_or1,date1,date2,active_pid_or1)
admin_opt_device_1 = amo_db_get(query=admin_query_device_accuracy_1, db_tag = dbtag1, dc = dc_choose1, debug = TRUE)
#
admin_opt_device_1[is.na(admin_opt_device_1)] <- 0
admin_opt_device_1 <- merge(admin_pidd, admin_opt_device_1, by.x = "pid",by.y = "pid", all.x = TRUE)
admin_opt_device_1$device[is.na(admin_opt_device_1$device)] <- "None"
admin_opt_device_1[is.na(admin_opt_device_1)] <- 0
admin_opt_device_1 <- admin_opt_device_1[order(admin_opt_device_1$pid, decreasing = TRUE),]
# admin_opt_device_1 <- admin_opt_device_1[admin_opt_device_1$portfolio_name == "Grainger PLA",]
#
#
# admin_opt_v4 <- admin_opt_device_1 %>% select(portfolio_name,device, act_cost, act_rev, pred_cost, pred_rev)
# admin_opt_v4 <- admin_opt_v4 %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
# admin_opt_v4 <- admin_opt_v4%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
# admin_opt_v4 <- admin_opt_v4 %>% mutate_at(vars(spendperc), funs(round(., 2)))
# admin_opt_v4 <- admin_opt_v4 %>% mutate(act_roas=act_rev/act_cost)
# admin_opt_v4 <- admin_opt_v4 %>% mutate(pred_roas=pred_rev/pred_cost)
# admin_opt_v4 <- admin_opt_v4 %>% mutate(roas_acc=act_roas/pred_roas*100)
# admin_opt_v4[is.na(admin_opt_v4)] <- 0
#
#
# data_1 <- admin_opt_v4
# data <- admin_opt_v4 %>% ungroup() %>% select(device, act_cost, pred_cost, act_rev, pred_rev, rank, spendperc, act_roas, pred_roas, roas_acc)
#
# admin_opt_v4$Variance_di <- var(admin_opt_v4$act_roas)
# admin_opt_v4$avg_di <- mean(admin_opt_v4$act_roas)
# admin_opt_v4$dispersion_index_dev <- admin_opt_v4$Variance_di/admin_opt_v4$avg_di
#
# admin_opt_v4$Variance_er <- var(admin_opt_v4$roas_acc)
# admin_opt_v4$avg_er <- mean(admin_opt_v4$roas_acc)
# admin_opt_v4$error_rate_dev <- admin_opt_v4$Variance_er/admin_opt_v4$avg_er
#
# var_dev <- unique(admin_opt_v4$dispersion_index_dev)
# er_dev <- unique(admin_opt_v4$error_rate_dev)
#
# admin_final$devdi <- var_dev
# admin_final$dever <- er_dev
#
# admin_final <- admin_final %>% mutate(reco_devf = case_when(devdi > 0.2 & DEVfeature == "Not_Enabled" & Spend_Scenario == "Overspend" ~ "Enable",
# devdi > 0.2 & DEVfeature == "Not_Enabled" & Spend_Scenario == "Underspend"~ "Enable",
# devdi > 0.2 & DEVfeature == "Not_Enabled" & RPC_Scenario =="Poor_RPC" ~ "Enable",
# Spend_Scenario == "Overspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & click_coverage > 0.1 ~ "Further_investigate",
# Spend_Scenario == "Overspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & click_coverage < 0.1 ~ "Disable",
# Spend_Scenario == "Overspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & revenue_coverage < 0.05 ~ "Disable",
# Spend_Scenario == "Overspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever < 0.2 ~ "Disable",
# Spend_Scenario == "Overspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & revenue_coverage >0.05 ~ "Further_investigate",
# Spend_Scenario == "Underspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & revenue_coverage > 0.05 ~ "Further_investigate",
# Spend_Scenario == "Underspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & revenue_coverage < 0.05 ~ "Disable",
# Spend_Scenario == "Underspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever < 0.2 ~ "Disable",
# Spend_Scenario == "Underspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & click_coverage < 0.1 ~ "Disable",
# Spend_Scenario == "Underspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & click_coverage > 0.1 ~ "Further_investigate",
# RPC_Scenario == "Poor_RPC" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & revenue_coverage > 0.05 ~ "Further_investigate",
# RPC_Scenario == "Poor_RPC" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & revenue_coverage < 0.05 ~ "Disable",
# RPC_Scenario == "Poor_RPC" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & click_coverage < 0.1 ~ "Disable",
# RPC_Scenario == "Poor_RPC" & devdi > 0.2 & DEVfeature == "Enabled" & dever < 0.2 ~ "Disable",
# RPC_Scenario == "Poor_RPC" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & click_coverage > 0.1 ~ "Further_investigate",
# TRUE ~ DEVfeature))
#
# df <- data.frame()
#
# df <- rbind(df, c("Match Type",di_mt,er_mt), c("Device",var_dev,er_dev),c("Click Level",di_cl,er_cl),c("Day of Week",di_dow,er_dow))
#
# df[ df == "NaN" ] <- NA
#
# names(df) <- c("Dimension", "Variance","Error Rate")
#
# df
#
# # , c("Device",var_dev,er_dev),c("Click Level",var_cl,er_cl),c("Day of Week",var_dow,er_dow)
#
# library(plyr)
# rename(df, c("X1"="Dimension", "X2"="Variance", "X3"="Error Rate"))
#
admin_final_v1 <- admin_final %>%select(portfolio_name,campaign_spend_multiple,reco_sm, param_value_zil, reco_ZILstatus, param_value_lb, reco_LearningBudget, inc_unconstrained_revenue, reco_max_bid, param_value_intraday, reco_Intraday)
#reco_cost_HL,reco_rev_HL
admin_finall_v12 <- melt(as.data.table(admin_final_v1), id.vars = "portfolio_name")
admin_finall_v13 <- admin_finall_v12 %>% filter(variable == "campaign_spend_multiple" | variable == "param_value_zil" | variable == "param_value_lb" | variable == "inc_unconstrained_revenue")
colnames(admin_finall_v13) <- c("portfolio_name","Settings","Current_value")
library("plyr")
admin_finall_v13$Settings <- revalue(admin_finall_v13$Settings, c("param_value_zil"="Zero_imp_rule", "param_value_lb"="Learning_budget", "inc_unconstrained_revenue" = "Portfolio_max_bid"))
admin_finall_v14 <- admin_finall_v12 %>% filter(variable == "reco_sm" | variable == "reco_ZILstatus" | variable == "reco_LearningBudget" | variable == "reco_max_bid" )
colnames(admin_finall_v14) <- c("portfolio_name","Settings","Recommended_value")
admin_finall_v14$Settings <- revalue(admin_finall_v14$Settings, c("reco_sm"="campaign_spend_multiple","reco_ZILstatus"="Zero_imp_rule" ,"reco_LearningBudget"="Learning_budget", "reco_max_bid" = "Portfolio_max_bid"))
admin_finall_v15 <- merge(admin_finall_v13, admin_finall_v14, by = c("portfolio_name","Settings"))
admin_finall_v15[is.na(admin_finall_v15)] <- 0
data_1 <- admin_final[admin_final$client_account_name == "grainger",]
data_1 <- data_1 %>% select(portfolio_name, pred_cost,cost_acc, rpc_acc, Scenario)
write.csv(data_1, file="portfolios.csv")
data <- list(df1=admin_final, df2=admin_finall_v15, df3=admin_daily_v11, df4=admin_daily_v12, df5=admin_daily_v3, df6=admin_opt_MT_v2, df7=admin_opt_Clicklevel_2, df8=admin_opt_device_1, df9=admin_daily_v31, df10=admin_daily_v32)
return(data)
}
})
})
output$choose_dataset_1 <- renderUI({
selectInput("username", "Username:", c("choose your desired client", unique(as.character((as.data.frame(users))$username))))
})
output$choose_dataset <- renderUI({
selectInput("Dbtag_DC", "Dbtag_DC:", " ")
})
output$choose_dataset_2 <- renderUI({
selectInput("PortfolioNamePID", "Portfolio_Name, PID:", c("select your desired portfolio",unique(as.character((as.data.frame(passdata()))$portfolionamepid))))
})
observeEvent(input$username,{
updateSelectInput(session,'Dbtag_DC',
choices = unique(as.character((as.data.frame(users))$dbtagdc)[(as.character((as.data.frame(users))$username))==input$username]))
})
output$selected_var <- renderText({
paste("You have selected the Client Account Name : ", input$username)
})
output$daterange1 <- renderText({
paste("The diagnostics are based on the cost and rpc accuracies for the last 7 days: ", Sys.Date()-8, "-",Sys.Date()-1)
})
output$daterange2 <- renderText({
paste("The recommendations are based on the model accuracies stemmed for the last 90 days: ", Sys.Date()-90, "-",Sys.Date()-1,"except cost model half life recommendations that stem from last 30 days:", Sys.Date()-30, "-",Sys.Date()-1)
})
install.packages("formattable")
library(formattable)
output$tbl_1 <- DT::renderDataTable(server = FALSE, DT::datatable({
data <- as.data.frame(passdata())
data <- data %>% select(pid,portfolio_name,portfolio_status, pred_cost,cost_acc, rpc_acc, Scenario)
data <- setNames(data, c("PID","portfolio_name", "portfolio_status","predicted_cost","cost_acc_%", "rpc_acc_%", "Scenario"))
data
},
extensions = 'Buttons', options = list(scrollX=TRUE,
dom = 'Bfrtip',
buttons = c('copy', 'csv')
),rownames = FALSE) %>%
formatStyle('predicted_cost',
background = styleColorBar(range(data$predicted_cost), 'lightblue'),
backgroundSize = '95% 88%',
backgroundRepeat = 'no-repeat',
backgroundPosition = 'right')%>%
formatCurrency('predicted_cost',currency = "", interval = 3, mark = ",")
%>%
formatStyle(
'cost_acc_%',
color = styleInterval(c(80, 120), c('red', 'black', 'red'))
)
%>%
formatRound('cost_acc_%', digits = 2)
%>%
formatStyle(
'rpc_acc_%',
color = styleInterval(c(80, 120), c('red', 'black', 'red'))
)
%>%
formatRound('rpc_acc_%', digits = 2)
)
output$selected_var_1 <- renderText({
data <- as.data.frame(passdata1()$df1)
paste("Diagnostic : ", data$Scenario)
})
output$selected_var_3 <- renderText({
data <- as.data.frame(passdata1()$df1)
paste("Cost_Accuracy_% : ", data$cost_acc)
})
output$selected_var_4 <- renderText({
data <- as.data.frame(passdata1()$df1)
paste("RPC_Accuracy_% : ", data$rpc_acc)
})
output$dimdt <- DT::renderDataTable(DT::datatable({
data <- as.data.frame(passdata1()$df6)
data_1 <- as.data.frame(passdata1()$df8)
data_2 <- as.data.frame(passdata1()$df7)
data_3 <- as.data.frame(passdata1()$df4)
data <- data %>% select(portfolio_name,match_type, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
colnames(data)[colnames(data)=="match_type"] <- "matchtype"
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data$Variance_di <- var(data$act_roas)
data$avg_di <- mean(data$act_roas)
data$dispersion_index_mt <- data$Variance_di/data$avg_di
data$Variance_er <- var(data$roas_acc)
data$avg_er <- mean(data$roas_acc)
data$error_rate_mt <- data$Variance_er/data$avg_er
var_mt <- unique(data$dispersion_index_mt)
er_mt <- unique(data$error_rate_mt)
data_1 <- data_1 %>% select(portfolio_name,device, act_cost, act_rev, pred_cost, pred_rev)
data_1 <- data_1 %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data_1 <- data_1%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data_1 <- data_1 %>% mutate_at(vars(spendperc), funs(round(., 2)))
data_1 <- data_1 %>% mutate(act_roas=act_rev/act_cost)
data_1 <- data_1 %>% mutate(pred_roas=pred_rev/pred_cost)
data_1 <- data_1 %>% mutate(roas_acc=act_roas/pred_roas*100)
data_1[is.na(data_1)] <- 0
data_1$Variance_di <- var(data_1$act_roas)
data_1$avg_di <- mean(data_1$act_roas)
data_1$dispersion_index_dev <- data_1$Variance_di/data_1$avg_di
data_1$Variance_er <- var(data_1$roas_acc)
data_1$avg_er <- mean(data_1$roas_acc)
data_1$error_rate_dev <- data_1$Variance_er/data_1$avg_er
var_dev <- unique(data_1$dispersion_index_dev)
er_dev <- unique(data_1$error_rate_dev)
data_2 <- data_2[order(data_2$clicklevel, decreasing = FALSE),]
target <- c("0.00","0.10","1.00","10.00","100.00","1000.00","10000.00")
data_2 <- data_2%>%filter(clicklevel %in% target)
data_2 <- data_2 %>% select(portfolio_name,clicklevel, act_cost, act_rev, pred_cost, pred_rev)
data_2 <- data_2 %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data_2 <- data_2%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data_2 <- data_2 %>% mutate_at(vars(spendperc), funs(round(., 2)))
data_2 <- data_2 %>% mutate(act_roas=act_rev/act_cost)
data_2 <- data_2 %>% mutate(pred_roas=pred_rev/pred_cost)
data_2 <- data_2 %>% mutate(roas_acc=act_roas/pred_roas*100)
data_2[is.na(data_2)] <- 0
data_2$Variance_di <- var(data_2$act_roas)
data_2$avg_di <- mean(data_2$act_roas)
data_2$dispersion_index_cl <- data_2$Variance_di/data_2$avg_di
data_2$Variance_er <- var(data_2$roas_acc)
data_2$avg_er <- mean(data_2$roas_acc)
data_2$error_rate_cl <- data_2$Variance_er/data_2$avg_er
var_cl <- unique(data_2$dispersion_index_cl)
er_cl <- unique(data_2$error_rate_cl)
data_3[is.na(data_3)] <- 0
library(lubridate)
data_3$date <- as.Date(data_3$date)
data_3$dow <- lubridate::wday(data_3$date, label=TRUE)
data_v3 <- data_3 %>% select(portfolio_name, dow, act_cost, act_rev, pred_cost, pred_rev)
data_v4 <- aggregate(.~portfolio_name+dow,data_v3,sum)
data_v4 <- data_v4 %>% mutate(act_roas=act_rev/act_cost)
data_v4 <- data_v4 %>% mutate(pred_roas=pred_rev/pred_cost)
data_v4 <- data_v4 %>% mutate(roas_acc=act_roas/pred_roas*100)
data_v4$Variance_di <- var(data_v4$act_roas)
data_v4$avg_di <- mean(data_v4$act_roas)
data_v4$dispersion_index_dow <- data_v4$Variance_di/data_v4$avg_di
data_v4$Variance_er <- var(data_v4$roas_acc)
data_v4$avg_er <- mean(data_v4$roas_acc)
data_v4$error_rate_dow <- data_v4$Variance_er/data_v4$avg_er
var_dow <- unique(data_v4$dispersion_index)
er_dow <- unique(data_v4$error_rate)
df <- data.frame()
df <- rbind(df, c("Match Type",var_mt,er_mt), c("Device",var_dev,er_dev), c("Click Level",var_cl,er_cl), c("Day of Week",var_dow,er_dow) )
df[ df == "NaN" ] <- NA
names(df) <- c("Dimension", "Dispersion_Index","Error_Rate")
df[is.na(df)] <- 0
df <- df[order(df$Dispersion_Index, decreasing = TRUE),]
rownames(df) <- NULL
df
# , c("Device",var_dev,er_dev),c("Click Level",var_cl,er_cl),c("Day of Week",var_dow,er_dow)
# library(plyr)
# rename(df, c("X1"="Dimension", "X2"="Variance", "X3"="Error Rate"))
},extensions = 'Buttons', options = list(scrollX=TRUE,
dom = 'Bfrtip',
buttons = c('copy', 'csv')
),rownames = FALSE)
%>%formatRound('Dispersion_Index', digits = 3)
%>%formatRound('Error_Rate', digits = 3)
)
output$admin_dev <- DT::renderDataTable(DT::datatable({
data <- as.data.frame(passdata1()$df8)
data <- data %>% select(portfolio_name,device, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data <- data %>% ungroup()%>% select(device, act_cost, pred_cost, act_rev, pred_rev, rank, spendperc, act_roas, pred_roas, roas_acc)
data
}, extensions = 'Buttons', options = list(scrollX=TRUE,
dom = 'Bfrtip',
buttons = c('copy', 'csv')
),rownames = FALSE) %>%formatRound('act_cost', digits = 3)
%>%formatRound('act_rev', digits = 3)
%>%formatRound('pred_cost', digits = 3)
%>%formatRound('pred_rev', digits = 3)
%>%formatRound('spendperc', digits = 3)
%>%formatRound('act_roas', digits = 3)
%>%formatRound('pred_roas', digits = 3)
%>%formatRound('roas_acc', digits = 3))
output$admin_MT <- DT::renderDataTable(DT::datatable({
data <- as.data.frame(passdata1()$df6)
data <- data %>% select(portfolio_name,match_type, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
colnames(data)[colnames(data)=="match_type"] <- "matchtype"
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data <- data %>% ungroup()%>% select(matchtype, act_cost, pred_cost, act_rev, pred_rev, rank, spendperc, act_roas, pred_roas, roas_acc)
data
}, extensions = 'Buttons', options = list(scrollX=TRUE,
dom = 'Bfrtip',
buttons = c('copy', 'csv')
),rownames = FALSE) %>%formatRound('act_cost', digits = 3)
%>%formatRound('act_rev', digits = 3)
%>%formatRound('pred_cost', digits = 3)
%>%formatRound('pred_rev', digits = 3)
%>%formatRound('spendperc', digits = 3)
%>%formatRound('act_roas', digits = 3)
%>%formatRound('pred_roas', digits = 3)
%>%formatRound('roas_acc', digits = 3))
output$admin_CL <- DT::renderDataTable(DT::datatable({
data <- as.data.frame(passdata1()$df7)
data <- data[order(data$clicklevel, decreasing = FALSE),]
target <- c("0.00","0.10","1.00","10.00","100.00","1000.00","10000.00")
data <- data%>%filter(clicklevel %in% target)
data <- data %>% select(portfolio_name,clicklevel, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data <- data %>% ungroup()%>% select(clicklevel, act_cost, pred_cost, act_rev, pred_rev, rank, spendperc, act_roas, pred_roas, roas_acc)
data <- data[order(data$clicklevel, decreasing = FALSE),]
data
}, extensions = 'Buttons', options = list(scrollX=TRUE,
dom = 'Bfrtip',
buttons = c('copy', 'csv')
),rownames = FALSE) %>%formatRound('act_cost', digits = 3)
%>%formatRound('act_rev', digits = 3)
%>%formatRound('pred_cost', digits = 3)
%>%formatRound('pred_rev', digits = 3)
%>%formatRound('spendperc', digits = 3)
%>%formatRound('act_roas', digits = 3)
%>%formatRound('pred_roas', digits = 3)
%>%formatRound('roas_acc', digits = 3))
output$textcl <- renderText({
data <- as.data.frame(passdata1()$df7)
admin_opt_Clicklevel_2 <- data[order(data$clicklevel, decreasing = FALSE),]
target <- c("0.00","0.10","1.00","10.00","100.00","1000.00","10000.00")
admin_opt_v2 <- admin_opt_Clicklevel_2%>%filter(clicklevel %in% target)
admin_opt_v2 <- admin_opt_v2 %>% group_by(portfolio_name) %>%mutate(total_cost=sum(act_cost))
admin_opt_v2 <- admin_opt_v2 %>%filter(clicklevel=="0.00")%>%group_by(portfolio_name)%>%mutate(spendperc0=100*act_cost/total_cost)
paste("Unpredicted cost share:", round(unique(admin_opt_v2$spendperc0),digits = 3))
})
output$textdevdi <- renderText({
data <- as.data.frame(passdata1()$df8)
data <- data %>% select(portfolio_name,device, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data$Variance_di <- var(data$act_roas)
data$avg_di <- mean(data$act_roas)
data$dispersion_index_dev <- data$Variance_di/data$avg_di
data$Variance_er <- var(data$roas_acc)
data$avg_er <- mean(data$roas_acc)
data$error_rate_dev <- data$Variance_er/data$avg_er
data[ data == "NaN" ] <- NA
data[is.na(data)] <- 0
paste("Device Dispersion Index:", round(unique(data$dispersion_index_dev),digits = 3))
})
output$textmtdi <- renderText({
data <- as.data.frame(passdata1()$df6)
data <- data %>% select(portfolio_name,match_type, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
colnames(data)[colnames(data)=="match_type"] <- "matchtype"
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data$Variance_di <- var(data$act_roas)
data$avg_di <- mean(data$act_roas)
data$dispersion_index_mt <- data$Variance_di/data$avg_di
data$Variance_er <- var(data$roas_acc)
data$avg_er <- mean(data$roas_acc)
data$error_rate_mt <- data$Variance_er/data$avg_er
data[ data == "NaN" ] <- NA
data[is.na(data)] <- 0
paste("Match Type Dispersion Index:", round(unique(data$dispersion_index_mt),digits=3))
})
output$textcldi <- renderText({
data <- as.data.frame(passdata1()$df7)
data <- data[order(data$clicklevel, decreasing = FALSE),]
target <- c("0.00","0.10","1.00","10.00","100.00","1000.00","10000.00")
data <- data%>%filter(clicklevel %in% target)
data <- data %>% select(portfolio_name,clicklevel, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data$Variance_di <- var(data$act_roas)
data$avg_di <- mean(data$act_roas)
data$dispersion_index_cl <- data$Variance_di/data$avg_di
data$Variance_er <- var(data$roas_acc)
data$avg_er <- mean(data$roas_acc)
data$error_rate_cl <- data$Variance_er/data$avg_er
data[ data == "NaN" ] <- NA
data[is.na(data)] <- 0
paste("Click Level Dispersion Index:", round(unique(data$dispersion_index_cl),digits=3))
})
output$textcler <- renderText({
data <- as.data.frame(passdata1()$df7)
data <- data[order(data$clicklevel, decreasing = FALSE),]
target <- c("0.00","0.10","1.00","10.00","100.00","1000.00","10000.00")
data <- data%>%filter(clicklevel %in% target)
data <- data %>% select(portfolio_name,clicklevel, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data$Variance_di <- var(data$act_roas)
data$avg_di <- mean(data$act_roas)
data$dispersion_index_cl <- data$Variance_di/data$avg_di
data$Variance_er <- var(data$roas_acc)
data$avg_er <- mean(data$roas_acc)
data$error_rate_cl <- data$Variance_er/data$avg_er
data[ data == "NaN" ] <- NA
data[is.na(data)] <- 0
paste("Click Level Error Rate:", round(unique(data$error_rate_cl),digits=3))
})
output$textmter <- renderText({
data <- as.data.frame(passdata1()$df6)
data <- data %>% select(portfolio_name,match_type, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
colnames(data)[colnames(data)=="match_type"] <- "matchtype"
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data$Variance_di <- var(data$act_roas)
data$avg_di <- mean(data$act_roas)
data$dispersion_index_mt <- data$Variance_di/data$avg_di
data$Variance_er <- var(data$roas_acc)
data$avg_er <- mean(data$roas_acc)
data$error_rate_mt <- data$Variance_er/data$avg_er
data[ data == "NaN" ] <- NA
data[is.na(data)] <- 0
paste("Match Type Error Rate:", round(unique(data$error_rate_mt), digits = 3))
})
output$textdever <- renderText({
data <- as.data.frame(passdata1()$df8)
data <- data %>% select(portfolio_name,device, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data$Variance_di <- var(data$act_roas)
data$avg_di <- mean(data$act_roas)
data$dispersion_index_dev <- data$Variance_di/data$avg_di
data$Variance_er <- var(data$roas_acc)
data$avg_er <- mean(data$roas_acc)
data$error_rate_dev <- data$Variance_er/data$avg_er
data[ data == "NaN" ] <- NA
data[is.na(data)] <- 0
paste("Device Error Rate:", round(unique(data$error_rate_dev),digits=3))
})
output$devplot <- renderPlot({
data <- as.data.frame(passdata1()$df8)
data <- data %>% select(portfolio_name,device, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data1 <- data %>% select(portfolio_name,act_roas,device)
data1 <- as.data.frame(data1)
data2 <- melt(data1)
p <- ggplot() +
geom_bar(data=data2, aes(x=device, y=value),stat='identity', position='dodge2', color="darkcyan", fill="darkcyan")+ facet_grid(. ~ portfolio_name)+labs(x="device", y="actual_roas") + scale_fill_discrete(guide=FALSE)+geom_hline(data=data1, mapping = aes(yintercept=mean(act_roas), linetype="dotted"),size=1, color = "darkcyan", show.legend =T)+scale_linetype_manual(name = "Legend-Line ", labels = c("dotted"="mean roas"), values = c("twodash"))+
guides(linetype = guide_legend("Legend-Line",override.aes = list(shape = 0, color="darkcyan")))+
# theme_set(theme_gray(base_size = 18))
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$devplot1 <- renderPlot({
data <- as.data.frame(passdata1()$df8)
data <- data %>% select(portfolio_name,device, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data1 <- data %>% select(portfolio_name,roas_acc,device)
data1 <- as.data.frame(data1)
data2 <- melt(data1)
p <- ggplot() +
geom_bar(data=data2, aes(x=device, y=value),stat='identity', position='dodge2', color="darkcyan", fill="darkcyan")+ facet_grid(. ~ portfolio_name)+labs(x="device", y="roas_acc") + scale_fill_discrete(guide=FALSE)+geom_hline(data=data1, mapping = aes(yintercept=100, linetype="dotted"),size=1, color = "darkcyan", show.legend =T)+scale_linetype_manual(name = "Legend-Line ", labels = c("dotted"="100 %"), values = c("twodash"))+
guides(linetype = guide_legend("Legend-Line",override.aes = list(shape = 0, color="darkcyan")))+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$devplot2 <- renderPlot({
data <- as.data.frame(passdata1()$df8)
data <- data %>% select(portfolio_name,device, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data1 <- data %>% select(portfolio_name,act_cost,device)
data1 <- as.data.frame(data1)
data2 <- melt(data1)
p <- ggplot() +
geom_bar(data=data2, aes(x=device, y=value),stat='identity', position='dodge2', color="red", fill="red")+ facet_grid(. ~ portfolio_name)+labs(x="device", y="actual_cost") + scale_fill_discrete(guide=FALSE)+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$devplot3 <- renderPlot({
data <- as.data.frame(passdata1()$df8)
data <- data %>% select(portfolio_name,device, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data <- data %>% mutate(cost_acc=act_cost/pred_cost*100)
data[is.na(data)] <- 0
data1 <- data %>% select(portfolio_name,cost_acc,device)
data1 <- as.data.frame(data1)
data2 <- melt(data1)
p <- ggplot() +
geom_bar(data=data2, aes(x=device, y=value),stat='identity', position='dodge2', color="red", fill="red")+ facet_grid(. ~ portfolio_name)+labs(x="device", y="cost_acc") + scale_fill_discrete(guide=FALSE)+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$MTplot <- renderPlot({
data <- as.data.frame(passdata1()$df6)
data <- data %>% select(portfolio_name,match_type, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
colnames(data)[colnames(data)=="match_type"] <- "matchtype"
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data1 <- data %>% select(portfolio_name,act_roas,matchtype)
data1 <- as.data.frame(data1)
data2 <- melt(data1)
p <- ggplot() +
geom_bar(data=data2, aes(x=matchtype, y=value),stat='identity', position='dodge2', color="darkcyan", fill="darkcyan")+ facet_grid(. ~ portfolio_name)+labs(x="match type", y="actual_roas") + scale_fill_discrete(guide=FALSE)+geom_hline(data=data1, mapping = aes(yintercept=mean(act_roas), linetype="dotted"),size=1, color = "darkcyan", show.legend =T)+scale_linetype_manual(name = "Legend-Line ", labels = c("dotted"="mean roas"), values = c("twodash"))+
guides(linetype = guide_legend("Legend-Line",override.aes = list(shape = 0, color="darkcyan")))+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$MTplot1 <- renderPlot({
data <- as.data.frame(passdata1()$df6)
data <- data %>% select(portfolio_name,match_type, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
colnames(data)[colnames(data)=="match_type"] <- "matchtype"
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data1 <- data %>% select(portfolio_name,roas_acc,matchtype)
data1 <- as.data.frame(data1)
data2 <- melt(data1)
p <- ggplot() +
geom_bar(data=data2, aes(x=matchtype, y=value),stat='identity', position='dodge2', color="darkcyan", fill="darkcyan")+ facet_grid(. ~ portfolio_name)+labs(x="match type", y="roas_acc") + scale_fill_discrete(guide=FALSE)+geom_hline(data=data1, mapping = aes(yintercept=100, linetype="dotted"),size=1, color = "darkcyan", show.legend =T)+scale_linetype_manual(name = "Legend-Line ", labels = c("dotted"="100 %"), values = c("twodash"))+
guides(linetype = guide_legend("Legend-Line",override.aes = list(shape = 0, color="darkcyan")))+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$MTplot2 <- renderPlot({
data <- as.data.frame(passdata1()$df6)
data <- data %>% select(portfolio_name,match_type, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
colnames(data)[colnames(data)=="match_type"] <- "matchtype"
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data1 <- data %>% select(portfolio_name,act_cost,matchtype)
data1 <- as.data.frame(data1)
data2 <- melt(data1)
p <- ggplot() +
geom_bar(data=data2, aes(x=matchtype, y=value),stat='identity', position='dodge2', color="red", fill="red")+ facet_grid(. ~ portfolio_name)+labs(x="match type", y="act_cost") + scale_fill_discrete(guide=FALSE)+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$MTplot3 <- renderPlot({
data <- as.data.frame(passdata1()$df6)
data <- data %>% select(portfolio_name,match_type, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
colnames(data)[colnames(data)=="match_type"] <- "matchtype"
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data <- data %>% mutate(cost_acc=act_cost/pred_cost*100)
data[is.na(data)] <- 0
data1 <- data %>% select(portfolio_name,cost_acc,matchtype)
data1 <- as.data.frame(data1)
data2 <- melt(data1)
p <- ggplot() +
geom_bar(data=data2, aes(x=matchtype, y=value),stat='identity', position='dodge2', color="red", fill="red")+ facet_grid(. ~ portfolio_name)+labs(x="match type", y="cost_acc") + scale_fill_discrete(guide=FALSE)+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$CLplot <- renderPlot({
data <- as.data.frame(passdata1()$df7)
data <- data[order(data$clicklevel, decreasing = FALSE),]
target <- c("0.00","0.10","1.00","10.00","100.00","1000.00","10000.00")
data <- data%>%filter(clicklevel %in% target)
data <- data %>% select(portfolio_name,clicklevel, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data1 <- data %>% select(portfolio_name,act_roas,clicklevel)
data1 <- as.data.frame(data1)
data2 <- melt(data1)
p <- ggplot() +
geom_bar(data=data2, aes(x=clicklevel, y=value),stat='identity', position='dodge2', color="darkcyan", fill="darkcyan")+ facet_grid(. ~ portfolio_name)+labs(x="clicklevel", y="act_roas") + scale_fill_discrete(guide=FALSE)+geom_hline(data=data1, mapping = aes(yintercept=mean(act_roas), linetype="dotted"),size=1, color = "darkcyan", show.legend =T)+scale_linetype_manual(name = "Legend-Line ", labels = c("dotted"="mean roas"), values = c("twodash"))+
guides(linetype = guide_legend("Legend-Line",override.aes = list(shape = 0, color="darkcyan")))+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$CLplot1<- renderPlot({
data <- as.data.frame(passdata1()$df7)
data <- data[order(data$clicklevel, decreasing = FALSE),]
target <- c("0.00","0.10","1.00","10.00","100.00","1000.00","10000.00")
data <- data%>%filter(clicklevel %in% target)
data <- data %>% select(portfolio_name,clicklevel, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data1 <- data %>% select(portfolio_name,roas_acc,clicklevel)
data1 <- as.data.frame(data1)
data2 <- melt(data1)
p <- ggplot() +
geom_bar(data=data2, aes(x=clicklevel, y=value),stat='identity', position='dodge2', color="darkcyan", fill="darkcyan")+ facet_grid(. ~ portfolio_name)+labs(x="clicklevel", y="roas_acc") + scale_fill_discrete(guide=FALSE)+geom_hline(data=data1, mapping = aes(yintercept=100, linetype="dotted"),size=1, color = "darkcyan", show.legend =T)+scale_linetype_manual(name = "Legend-Line ", labels = c("dotted"="100 %"), values = c("twodash"))+
guides(linetype = guide_legend("Legend-Line",override.aes = list(shape = 0, color="darkcyan")))+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$CLplot2 <- renderPlot({
data <- as.data.frame(passdata1()$df7)
data <- data[order(data$clicklevel, decreasing = FALSE),]
target <- c("0.00","0.10","1.00","10.00","100.00","1000.00","10000.00")
data <- data%>%filter(clicklevel %in% target)
data <- data %>% select(portfolio_name,clicklevel, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data[is.na(data)] <- 0
data1 <- data %>% select(portfolio_name,act_cost,clicklevel)
data1 <- as.data.frame(data1)
data2 <- melt(data1)
p <- ggplot() +
geom_bar(data=data2, aes(x=clicklevel, y=value),stat='identity', position='dodge2', color="red", fill="red")+ facet_grid(. ~ portfolio_name)+labs(x="clicklevel", y="act_cost") + scale_fill_discrete(guide=FALSE)+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$CLplot3 <- renderPlot({
data <- as.data.frame(passdata1()$df7)
data <- data[order(data$clicklevel, decreasing = FALSE),]
target <- c("0.00","0.10","1.00","10.00","100.00","1000.00","10000.00")
data <- data%>%filter(clicklevel %in% target)
data <- data %>% select(portfolio_name,clicklevel, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
data <- data %>% mutate(act_roas=act_rev/act_cost)
data <- data %>% mutate(pred_roas=pred_rev/pred_cost)
data <- data %>% mutate(roas_acc=act_roas/pred_roas*100)
data <- data %>% mutate(cost_acc=act_cost/pred_cost*100)
data[is.na(data)] <- 0
data1 <- data %>% select(portfolio_name,cost_acc,clicklevel)
data1 <- as.data.frame(data1)
data2 <- melt(data1)
p <- ggplot() +
geom_bar(data=data2, aes(x=clicklevel, y=value),stat='identity', position='dodge2', color="red", fill="red")+ facet_grid(. ~ portfolio_name)+labs(x="clicklevel", y="cost_acc") + scale_fill_discrete(guide=FALSE)+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$topdevice <- renderText({
data <- as.data.frame(passdata1()$df8)
data <- data %>% select(portfolio_name,device, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
data <- data%>% group_by(portfolio_name) %>% filter(rank==1)
data <- data %>% select(portfolio_name,device, spendperc)
paste("Top Cost Device : ", data$device)
})
output$topmatch <- renderText({
data <- as.data.frame(passdata1()$df6)
data <- data %>% select(portfolio_name,match_type, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
colnames(data)[colnames(data)=="match_type"] <- "matchtype"
data <- data%>% group_by(portfolio_name) %>% filter(rank==1)
data <- data %>% select(portfolio_name,matchtype, spendperc)
paste("Top Cost Match Type : ", data$matchtype)
})
output$plotMT <- renderPlotly({
data <- as.data.frame(passdata1()$df6)
data <- data %>% select(portfolio_name,match_type, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
colnames(data)[colnames(data)=="match_type"] <- "matchtype"
library(plotly)
plot_ly(data, labels = ~ matchtype, values = ~ spendperc, type = 'pie', text = ~paste("Match Type:",matchtype, ",","Spend Percent:", spendperc), hoverinfo="text", textinfo="text") %>%
layout(xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE))
})
output$plotdev <- renderPlotly({
data <- as.data.frame(passdata1()$df8)
data <- data %>% select(portfolio_name,device, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
library(plotly)
plot_ly(data, labels = ~ device, values = ~ spendperc, type = 'pie', text = ~paste("Device:",device, ",","Spend Percent:", spendperc), hoverinfo="text", textinfo="text") %>%
layout(xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE))
})
output$plotCL <- renderPlotly({
data <- as.data.frame(passdata1()$df7)
data <- data[order(data$clicklevel, decreasing = FALSE),]
target <- c("0.00","0.10","1.00","10.00","100.00","1000.00","10000.00")
data <- data%>%filter(clicklevel %in% target)
data <- data %>% select(portfolio_name,clicklevel, act_cost, act_rev, pred_cost, pred_rev)
data <- data %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data <- data%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data <- data %>% mutate_at(vars(spendperc), funs(round(., 2)))
library(plotly)
plot_ly(data, labels = ~ clicklevel, values = ~ spendperc, type = 'pie', text = ~paste("Click Level:",clicklevel, ",","Spend Percent:", spendperc), hoverinfo="text", textinfo="text") %>%
layout(xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE))
})
output$admin_reco_2 <- DT::renderDataTable(DT::datatable({
data_1 <- as.data.frame(passdata1()$df1)
data_2 <- as.data.frame(passdata1()$df3)
data_3 <- as.data.frame(passdata1()$df4)
data_5 <- as.data.frame(passdata1()$df8)
data_6 <- as.data.frame(passdata1()$df9)
data_2 <- data_2 %>% select(portfolio_name, date, cost_acc, rpc_acc)
y_data <- data_2 %>% select(date,cost_acc, rpc_acc)
y_data[is.na(y_data)] <- 0
data_21 <- data_6 %>% select(portfolio_name, date, cost_acc)
y_data1 <- data_21 %>% select(date,cost_acc)
y_data1[is.na(y_data1)] <- 0
library(changepoint)
cptm_CP <- cpt.mean(y_data1$cost_acc, penalty='MBIC',pen.value=0, method='BinSeg',
test.stat="Normal", minseglen=7, class=TRUE)
cptm_CP
cpts_CP <- cpts(cptm_CP) # change point time points
cpts_CP
cost_change <- ifelse(max(cpts_CP)==0,0,(Sys.Date()-(as.Date(y_data1[max(cpts_CP),1]))))
library(changepoint)
cptm_CP_1 <- cpt.mean(y_data$rpc_acc, penalty='MBIC',pen.value=0, method='BinSeg',
test.stat="Normal", minseglen=7, class=TRUE)
cptm_CP_1
cpts_CP_1 <- cpts(cptm_CP_1) # change point time points
cpts_CP_1
rpc_change <- ifelse(max(cpts_CP_1)==0,0,(Sys.Date()-(as.Date(y_data[max(cpts_CP_1),1]))))
data_1$cchange <- cost_change
data_1$rchange <- rpc_change
data_3[is.na(data_3)] <- 0
library(lubridate)
data_3$date <- as.Date(data_3$date)
data_3$dow <- lubridate::wday(data_3$date, label=TRUE)
data_v3 <- data_3 %>% select(portfolio_name, dow, act_cost, act_rev, pred_cost, pred_rev)
data_v4 <- aggregate(.~portfolio_name+dow,data_v3,sum)
data_v4 <- data_v4 %>% mutate(act_roas=act_rev/act_cost)
data_v4 <- data_v4 %>% mutate(pred_roas=pred_rev/pred_cost)
data_v4 <- data_v4 %>% mutate(roas_acc=act_roas/pred_roas*100)
data_v4$Variance_di <- var(data_v4$act_roas)
data_v4$avg_di <- mean(data_v4$act_roas)
data_v4$dispersion_index <- data_v4$Variance_di/data_v4$avg_di
data_v4$Variance_er <- var(data_v4$roas_acc)
data_v4$avg_er <- mean(data_v4$roas_acc)
data_v4$error_rate <- data_v4$Variance_er/data_v4$avg_er
di <- unique(data_v4$dispersion_index)
er <- unique(data_v4$error_rate)
data_1$di <- di
data_1$er <- er
data_1 <- data_1 %>% mutate(reco_dow = case_when(di > 0.2 & DOWfeature == "Not_Enabled" & Spend_Scenario == "Overspend" ~ "Enable_DOW",
di > 0.2 & DOWfeature == "Not_Enabled" & Spend_Scenario == "Underspend"~ "Enable_DOW",
di > 0.2 & DOWfeature == "Not_Enabled" & RPC_Scenario =="Poor RPC Accuracy" ~ "Enable_DOW",
Spend_Scenario == "Overspend" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & click_coverage > 0.1 ~ "Further_investigate",
Spend_Scenario == "Overspend" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & click_coverage < 0.1 ~ "Disable_DOW",
Spend_Scenario == "Overspend" & di > 0.2 & DOWfeature == "Enabled" & er < 0.2 ~ "Disable_DOW",
Spend_Scenario == "Overspend" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & revenue_coverage < 0.05 ~ "Disable_DOW",
Spend_Scenario == "Overspend" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & revenue_coverage >0.05 ~ "Further_investigate",
Spend_Scenario == "Underspend" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & revenue_coverage > 0.05 ~ "Further_investigate",
Spend_Scenario == "Underspend" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & revenue_coverage < 0.05 ~ "Disable_DOW",
Spend_Scenario == "Underspend" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & click_coverage < 0.1 ~ "Disable_DOW",
Spend_Scenario == "Underspend" & di > 0.2 & DOWfeature == "Enabled" & er < 0.2 ~ "Disable_DOW",
Spend_Scenario == "Underspend" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & click_coverage > 0.1 ~ "Further_investigate",
RPC_Scenario == "Poor RPC Accuracy" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & revenue_coverage > 0.05 ~ "Further_investigate",
RPC_Scenario == "Poor RPC Accuracy" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & revenue_coverage < 0.05 ~ "Disable_DOW",
RPC_Scenario == "Poor RPC Accuracy" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & click_coverage < 0.1 ~ "Disable_DOW",
RPC_Scenario == "Poor RPC Accuracy" & di > 0.2 & DOWfeature == "Enabled" & er < 0.2 ~ "Disable_DOW",
RPC_Scenario == "Poor RPC Accuracy" & di > 0.2 & DOWfeature == "Enabled" & er > 0.2 & click_coverage > 0.1 ~ "Further_investigate",
TRUE ~ DOWfeature))
data_1 <- data_1 %>% mutate(reco_cost_HL = case_when(Spend_Scenario!= "Cost Accuracy Within Range" & cchange > 0 & click_coverage < 0.1 & max(cost_model_half_life,cchange) < 14 & max(cost_model_half_life,cchange) > 3 ~ max(cost_model_half_life, cchange),
Spend_Scenario!= "Cost Accuracy Within Range" & cchange > 0 & click_coverage < 0.1 & max(cost_model_half_life,cchange) >14 ~ 14,
Spend_Scenario!= "Cost Accuracy Within Range" & cchange > 0 & click_coverage < 0.1 & max(cost_model_half_life,cchange) < 3 ~ 3,
Spend_Scenario!= "Cost Accuracy Within Range" & cchange > 0 & click_coverage > 0.1 & min((cost_model_half_life-3)/2, 3) < 3 ~ 3,
Spend_Scenario!= "Cost Accuracy Within Range" & cchange > 0 & click_coverage > 0.1 & ((cost_model_half_life-3)/2) > 3 ~ (as.numeric(min((cost_model_half_life-3)/2, 3) )),
TRUE ~ as.numeric(cost_model_half_life)))
data_1 <- data_1 %>% mutate(reco_rev_HL = case_when(RPC_Scenario!= "RPC Accuracy OK" & rchange > 0 & revenue_coverage < 0.05 & max(revenue_model_half_life,rchange) < 60 & max(revenue_model_half_life,rchange) > 10 ~ max(revenue_model_half_life, rchange),
RPC_Scenario!= "RPC Accuracy OK" & rchange > 0 & revenue_coverage < 0.05 & max(revenue_model_half_life,rchange) >60 ~ 60,
RPC_Scenario!= "RPC Accuracy OK" & rchange > 0 & revenue_coverage < 0.05 & max(revenue_model_half_life,rchange) < 10 ~ 10,
RPC_Scenario!= "RPC Accuracy OK" & rchange > 0 & revenue_coverage > 0.05 & min((revenue_model_half_life-10)/2, 10) < 10 ~ 10,
RPC_Scenario!= "RPC Accuracy OK" & rchange > 0 & revenue_coverage > 0.05 & ((revenue_model_half_life-3)/2) > 10 ~ (as.numeric(min((revenue_model_half_life-10)/2, 10) )),
TRUE ~ revenue_model_half_life))
data_5 <- data_5 %>% select(portfolio_name,device, act_cost, act_rev, pred_cost, pred_rev)
data_5 <- data_5 %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data_5 <- data_5%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data_5 <- data_5 %>% mutate_at(vars(spendperc), funs(round(., 2)))
data_5 <- data_5 %>% mutate(act_roas=act_rev/act_cost)
data_5 <- data_5 %>% mutate(pred_roas=pred_rev/pred_cost)
data_5 <- data_5 %>% mutate(roas_acc=act_roas/pred_roas*100)
data_5[is.na(data_5)] <- 0
data_5$Variance_di <- var(data_5$act_roas)
data_5$avg_di <- mean(data_5$act_roas)
data_5$dispersion_index_dev <- data_5$Variance_di/data_5$avg_di
data_5$Variance_er <- var(data_5$roas_acc)
data_5$avg_er <- mean(data_5$roas_acc)
data_5$error_rate_dev <- data_5$Variance_er/data_5$avg_er
data_5[ data_5 == "NaN" ] <- NA
data_5[is.na(data_5)] <- 0
data_1$devdi <- unique(data_5$dispersion_index_dev)
data_1$dever <- unique(data_5$error_rate_dev)
# data_1 <- data_1 %>% mutate(DEVfeature = case_when(ba_enable_mobile==TRUE | ba_enable_tablet==TRUE | ba_enable_computer==TRUE ~ "Enabled",
# ba_enable_mobile==FALSE & ba_enable_tablet==FALSE & ba_enable_computer==FALSE ~ "Not_Enabled"))
#
# min and max default Mobile -50, 100: tablet -0, 0: comp: 0, 0, audience/loc -50 200
# decrease max bid by 20%, keep positive, increase min bid by 20%, reduce range
data_1 <- data_1 %>% mutate(reco_devf = case_when(devdi > 0.2 & DEVfeature == "Not_Enabled" & Spend_Scenario == "Overspend" ~ "Enable",
devdi > 0.2 & DEVfeature == "Not_Enabled" & Spend_Scenario == "Underspend"~ "Enable",
devdi > 0.2 & DEVfeature == "Not_Enabled" & RPC_Scenario =="Poor_RPC" ~ "Enable",
Spend_Scenario == "Overspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & click_coverage > 0.1 ~ "Further_investigate",
Spend_Scenario == "Overspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & click_coverage < 0.1 ~ "Disable",
Spend_Scenario == "Overspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & revenue_coverage < 0.05 ~ "Disable",
Spend_Scenario == "Overspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever < 0.2 ~ "Disable",
Spend_Scenario == "Overspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & revenue_coverage >0.05 ~ "Further_investigate",
Spend_Scenario == "Underspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & revenue_coverage > 0.05 ~ "Further_investigate",
Spend_Scenario == "Underspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & revenue_coverage < 0.05 ~ "Disable",
Spend_Scenario == "Underspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever < 0.2 ~ "Disable",
Spend_Scenario == "Underspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & click_coverage < 0.1 ~ "Disable",
Spend_Scenario == "Underspend" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & click_coverage > 0.1 ~ "Further_investigate",
RPC_Scenario == "Poor_RPC" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & revenue_coverage > 0.05 ~ "Further_investigate",
RPC_Scenario == "Poor_RPC" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & revenue_coverage < 0.05 ~ "Disable",
RPC_Scenario == "Poor_RPC" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & click_coverage < 0.1 ~ "Disable",
RPC_Scenario == "Poor_RPC" & devdi > 0.2 & DEVfeature == "Enabled" & dever < 0.2 ~ "Disable",
RPC_Scenario == "Poor_RPC" & devdi > 0.2 & DEVfeature == "Enabled" & dever > 0.2 & click_coverage > 0.1 ~ "Further_investigate",
TRUE ~ DEVfeature))
data <- data_1 %>%select(portfolio_name,campaign_spend_multiple,reco_sm, param_value_zil, reco_ZILstatus, param_value_lb, reco_LearningBudget, unconstrained_rev_share_percent, reco_max_bid, cost_model_half_life, revenue_model_half_life, reco_cost_HL, reco_rev_HL, param_value_intraday, reco_Intraday, DOWfeature, reco_dow, DEVfeature, reco_devf)
#reco_cost_HL,reco_rev_HL
data_v12 <- melt(as.data.table(data), id.vars = "portfolio_name")
data_v13 <- data_v12 %>% filter(variable == "campaign_spend_multiple" | variable == "param_value_zil" | variable == "param_value_lb" | variable == "unconstrained_rev_share_percent" | variable == "cost_model_half_life" | variable == "revenue_model_half_life" | variable == "param_value_intraday" | variable == "DOWfeature" | variable == "DEVfeature")
colnames(data_v13) <- c("portfolio_name","Settings","Current_value")
library("plyr")
data_v13$Settings <- revalue(data_v13$Settings, c("param_value_zil"="Zero Impression Bid Units", "param_value_lb"="Learning Budget", "unconstrained_rev_share_percent" = "Inc.Unconstrained Revenue % ", "param_value_intraday"= "Intraday", "DOWfeature"= "DOW Modeling", "cost_model_half_life" = "Cost Model Half Life", "revenue_model_half_life" = "Revenue Model Half Life","campaign_spend_multiple"="Multiple", "DEVfeature"="Device BId Adjustments"))
data_v14 <- data_v12 %>% filter(variable == "reco_sm" | variable == "reco_ZILstatus" | variable == "reco_LearningBudget" | variable == "reco_max_bid" | variable == "reco_cost_HL" | variable == "reco_rev_HL" | variable == "reco_Intraday" | variable == "reco_dow"| variable == "reco_devf")
colnames(data_v14) <- c("portfolio_name","Settings","Recommended_value")
data_v14$Settings <- revalue(data_v14$Settings, c("reco_sm"="Multiple","reco_ZILstatus"="Zero Impression Bid Units" ,"reco_LearningBudget"="Learning Budget", "reco_max_bid" = "Inc.Unconstrained Revenue % ", "reco_cost_HL" = "Cost Model Half Life", "reco_rev_HL" = "Revenue Model Half Life", "reco_Intraday" = "Intraday", "reco_dow" = "DOW Modeling", "reco_devf"="Device BId Adjustments"))
data_v15 <- merge(data_v13, data_v14, by = c("portfolio_name","Settings"))
data_v15[is.na(data_v15)] <- 0
data_v15 <- data_v15 %>% select(Settings, Current_value, Recommended_value )
library(dplyr)
x <- c("DOW Modeling", "Multiple","Device BId Adjustments", "Learning Budget", "Zero Impression Bid Units","Intraday","Cost Model Half Life","Revenue Model Half Life","Inc.Unconstrained Revenue % ")
data_v15$NewTemp <- ifelse(data_v15$Recommended_value == data_v15$Current_value, 1, 0)
data_v15 <- data_v15 %>%
slice(match(x, Settings))
data_v15
},
extensions = 'Buttons', options = list(scrollX=TRUE,
dom = 'Bfrtip',
buttons = c('copy', 'csv'), columnDefs = list(list(targets = 4, visible = FALSE))
))%>% formatStyle('Recommended_value', 'NewTemp',backgroundColor = styleEqual(c(1,0), c('white', 'yellow')), fontWeight = styleEqual(c(1,0), c('normal', 'bold'))))
output$dowTable <- DT::renderDataTable(DT::datatable({
data_3 <- as.data.frame(passdata1()$df4)
data_3[is.na(data_3)] <- 0
library(lubridate)
data_3$date <- as.Date(data_3$date)
data_3$dow <- lubridate::wday(data_3$date, label=TRUE)
data_v3 <- data_3 %>% select(portfolio_name, dow, act_cost, act_rev, pred_cost, pred_rev)
data_v4 <- aggregate(.~portfolio_name+dow,data_v3,sum)
data_v4 <- data_v4 %>% group_by(portfolio_name) %>% mutate(rank = rank(desc(act_cost)), total_cost= sum(act_cost)) %>% arrange(rank)
data_v4 <- data_v4%>% group_by(portfolio_name) %>% mutate(spendperc=100*act_cost/total_cost)
data_v4 <- data_v4 %>% mutate_at(vars(spendperc), funs(round(., 2)))
data_v4 <- data_v4 %>% mutate(act_roas=act_rev/act_cost)
data_v4 <- data_v4 %>% mutate(pred_roas=pred_rev/pred_cost)
data_v4 <- data_v4 %>% mutate(roas_acc=act_roas/pred_roas*100)
data_v4[is.na(data_v4)] <- 0
data_v4 <- data_v4 %>% ungroup()%>% select(dow, act_cost, pred_cost, act_rev, pred_rev, rank, spendperc, act_roas, pred_roas, roas_acc)
data_v4
}, extensions = 'Buttons', options = list(scrollX=TRUE,
dom = 'Bfrtip',
buttons = c('copy', 'csv')
),rownames = FALSE) %>%formatRound('act_cost', digits = 3)
%>%formatRound('act_rev', digits = 3)
%>%formatRound('pred_cost', digits = 3)
%>%formatRound('pred_rev', digits = 3)
%>%formatRound('spendperc', digits = 3)
%>%formatRound('act_roas', digits = 3)
%>%formatRound('pred_roas', digits = 3)
%>%formatRound('roas_acc', digits = 3))
output$plot4 <- renderPlot({
data_1 <- as.data.frame(passdata1()$df10)
data_1 <- data_1 %>% select(portfolio_name, date, click_acc, cpc_acc)
y_data <- data_1 %>% select(date,click_acc, cpc_acc)
y_data[is.na(y_data)] <- 0
library(changepoint)
cptm_CP <- cpt.mean(y_data$click_acc, penalty='MBIC',pen.value=0, method='BinSeg',
test.stat="Normal", minseglen=7, class=TRUE)
cptm_CP
plot(cptm_CP)
cpts_CP <- cpts(cptm_CP) # change point time points
cpts_CP
library(changepoint)
cptm_CP_1 <- cpt.mean(y_data$cpc_acc, penalty='MBIC',pen.value=0, method='BinSeg',
test.stat="Normal", minseglen=7, class=TRUE)
cptm_CP_1
plot(cptm_CP_1)
cpts_CP_1 <- cpts(cptm_CP_1) # change point time points
cpts_CP_1
library("outliers")
y_data$Click_Acc_score <- scores(type="z", y_data$click_acc, prob=.95)
y_data$CPC_Acc_score <- scores(type="z", y_data$cpc_acc, prob=.95)
cost_outliers <- list(subset(y_data, Click_Acc_score==TRUE)[,1])
cost_outliers
revenue_outliers <- list(subset(y_data, CPC_Acc_score==TRUE)[,1])
revenue_outliers
y_data_1 <- subset(y_data, CPC_Acc_score==TRUE)
y_data_2 <- subset(y_data, Click_Acc_score==TRUE)
p= ggplot() +
geom_line(data = data_1, aes(x = as.Date(date), y = click_acc, group=1), color="darkred" , show.legend = F) +
# geom_point(data = y_data_2, aes(x = as.Date(date), y = click_acc, group=1, shape="circle"),size=5, color="darkred", show.legend = T)+
xlab('Date') +
ylab('click_acc') + facet_grid(. ~ portfolio_name)+
scale_x_date(date_breaks = "1 week")+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
# geom_vline(data=y_data, mapping = aes(xintercept=as.Date(y_data[max(cpts_CP),1]), linetype="twodash"),size=1, color = "darkred", show.legend =T)+
# geom_text(data=y_data, mapping=aes(x=as.Date(y_data[max(cpts_CP),1]), y=0, label=as.Date(y_data[max(cpts_CP),1] , sep = " ")), size=3, vjust=1, hjust=0, angle=90)+
# scale_shape_manual(name = " Legend-Shape", labels = c("circle"="outliers"), values = c("circle"))+
# scale_linetype_manual(name = "Legend-Line ", labels = c("twodash"="change_mean"), values = c("twodash"))+
# guides(shape = guide_legend("Legend-Shape",override.aes = list(linetype = 0, color="darkred")),
# linetype = guide_legend("Legend-Line",override.aes = list(shape = 0, color="darkred")))
print(p)
})
output$plot5 <- renderPlot({
data_1 <- as.data.frame(passdata1()$df10)
data_1 <- data_1 %>% select(portfolio_name, date, click_acc, cpc_acc)
y_data <- data_1 %>% select(date,click_acc, cpc_acc)
y_data[is.na(y_data)] <- 0
library(changepoint)
cptm_CP <- cpt.mean(y_data$click_acc, penalty='MBIC',pen.value=0, method='BinSeg',
test.stat="Normal", minseglen=7, class=TRUE)
cptm_CP
plot(cptm_CP)
cpts_CP <- cpts(cptm_CP) # change point time points
cpts_CP
library(changepoint)
cptm_CP_1 <- cpt.mean(y_data$cpc_acc, penalty='MBIC',pen.value=0, method='BinSeg',
test.stat="Normal", minseglen=7, class=TRUE)
cptm_CP_1
plot(cptm_CP_1)
cpts_CP_1 <- cpts(cptm_CP_1) # change point time points
cpts_CP_1
library("outliers")
y_data$Click_Acc_score <- scores(type="z", y_data$click_acc, prob=.95)
y_data$CPC_Acc_score <- scores(type="z", y_data$cpc_acc, prob=.95)
cost_outliers <- list(subset(y_data, Click_Acc_score==TRUE)[,1])
cost_outliers
revenue_outliers <- list(subset(y_data, CPC_Acc_score==TRUE)[,1])
revenue_outliers
y_data_1 <- subset(y_data, CPC_Acc_score==TRUE)
y_data_2 <- subset(y_data, Click_Acc_score==TRUE)
p= ggplot() +
geom_line(data = data_1, aes(x = as.Date(date), y = cpc_acc, group=1), color="darkred" , show.legend = F) +
# geom_point(data = y_data_1, aes(x = as.Date(date), y = cpc_acc, group=1, shape="circle"),size=5, color="darkred", show.legend = T)+
xlab('Date') +
ylab('cpc_acc') + facet_grid(. ~ portfolio_name)+
scale_x_date(date_breaks = "1 week")+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
# geom_vline(data=y_data, mapping = aes(xintercept=as.Date(y_data[max(cpts_CP_1),1]), linetype="twodash"),size=1, color = "darkred", show.legend =T)+
# geom_text(data=y_data, mapping=aes(x=as.Date(y_data[max(cpts_CP_1),1]), y=0, label=as.Date(y_data[max(cpts_CP_1),1] , sep = " ")), size=3, vjust=1, hjust=0, angle=90)+
# scale_shape_manual(name = " Legend-Shape", labels = c("circle"="outliers"), values = c("circle"))+
# scale_linetype_manual(name = "Legend-Line ", labels = c("twodash"="change_mean"), values = c("twodash"))+
# guides(shape = guide_legend("Legend-Shape",override.aes = list(linetype = 0, color="darkred")),
# linetype = guide_legend("Legend-Line",override.aes = list(shape = 0, color="darkred")))
print(p)
#+ facet_grid(. ~ portfolio_name)
})
output$plot1 <- renderPlot({
data_1 <- as.data.frame(passdata1()$df9)
data_1 <- data_1 %>% select(portfolio_name, date, cost_acc, rpc_acc)
y_data <- data_1 %>% select(date,cost_acc, rpc_acc)
y_data[is.na(y_data)] <- 0
library(changepoint)
cptm_CP <- cpt.mean(y_data$cost_acc, penalty='MBIC',pen.value=0, method='BinSeg',
test.stat="Normal", minseglen=7, class=TRUE)
cptm_CP
plot(cptm_CP)
cpts_CP <- cpts(cptm_CP) # change point time points
cpts_CP
library(changepoint)
cptm_CP_1 <- cpt.mean(y_data$rpc_acc, penalty='MBIC',pen.value=0, method='BinSeg',
test.stat="Normal", minseglen=7, class=TRUE)
cptm_CP_1
plot(cptm_CP_1)
cpts_CP_1 <- cpts(cptm_CP_1) # change point time points
cpts_CP_1
library("outliers")
y_data$Cost_Acc_score <- scores(type="z", y_data$cost_acc, prob=.95)
y_data$RPC_Acc_score <- scores(type="z", y_data$rpc_acc, prob=.95)
cost_outliers <- list(subset(y_data, Cost_Acc_score==TRUE)[,1])
cost_outliers
revenue_outliers <- list(subset(y_data, RPC_Acc_score==TRUE)[,1])
revenue_outliers
y_data_1 <- subset(y_data, RPC_Acc_score==TRUE)
y_data_2 <- subset(y_data, Cost_Acc_score==TRUE)
p= ggplot() +
geom_line(data = data_1, aes(x = as.Date(date), y = cost_acc, group=1), color="red" , show.legend = F) +
geom_point(data = y_data_2, aes(x = as.Date(date), y = cost_acc, group=1, shape="circle"),size=5, color="red", show.legend = T)+
xlab('Date') +
ylab('cost_acc') + facet_grid(. ~ portfolio_name)+
scale_x_date(date_breaks = "1 week")+
geom_vline(data=y_data, mapping = aes(xintercept=as.Date(y_data[max(cpts_CP),1]), linetype="twodash"),size=1, color = "red", show.legend =T)+
geom_hline(data=data_1, mapping = aes(yintercept=100, linetype="dotted"),size=1, color = "red", show.legend =T)+
geom_text(data=y_data, mapping=aes(x=as.Date(y_data[max(cpts_CP),1]), y=0, label=as.Date(y_data[max(cpts_CP),1] , sep = " ")), size=3, vjust=1, hjust=0, angle=90)+
scale_shape_manual(name = " Legend-Shape", labels = c("circle"="outliers"), values = c("circle"))+
scale_linetype_manual(name = "Legend-Line ", labels = c("dotted"="100 %","twodash"="change_mean"), values = c("dotted","twodash"))+
guides(shape = guide_legend("Legend-Shape",override.aes = list(linetype = 0, color="red")),
linetype = guide_legend("Legend-Line",override.aes = list(shape = 0, color="red")))+
# theme_set(theme_gray(base_size = 18))+
theme(axis.text.x = element_text(color = "grey20",size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20",size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"),
axis.title.y = element_text(size = 15, angle = 90),
axis.title.x = element_text(size = 15, angle = 00),
strip.text.x = element_text(size = 15, colour = "black", angle = 00))
print(p)
})
output$plot3 <- renderPlot({
data_1 <- as.data.frame(passdata1()$df1)
data_3 <- as.data.frame(passdata1()$df4)
data_3[is.na(data_3)] <- 0
library(lubridate)
data_3$date <- as.Date(data_3$date)
data_3$dow <- lubridate::wday(data_3$date, label=TRUE)
data_v3 <- data_3 %>% select(portfolio_name, dow, act_cost, act_rev, pred_cost, pred_rev)
data_v4 <- aggregate(.~portfolio_name+dow,data_v3,sum)
data_v4 <- data_v4 %>% mutate(act_roas=act_rev/act_cost)
data_v4 <- data_v4 %>% mutate(pred_roas=pred_rev/pred_cost)
data_v4 <- data_v4 %>% mutate(roas_acc=act_roas/pred_roas*100)
data_v4 <- data_v4 %>% select(portfolio_name,act_roas,dow)
admin_v6 <- melt(data_v4)
p <- ggplot() +
geom_bar(data=admin_v6, aes(x=dow, y=value),stat='identity', position='dodge2', color="darkcyan", fill="darkcyan")+ facet_grid(. ~ portfolio_name)+labs(x="day of week", y="actual_roas") + scale_fill_discrete(guide=FALSE)+geom_hline(data=data_v4, mapping = aes(yintercept=mean(act_roas), linetype="dotted"),size=1, color = "darkcyan", show.legend =T)+scale_linetype_manual(name = "Legend-Line ", labels = c("dotted"="mean roas"), values = c("twodash"))+
guides(linetype = guide_legend("Legend-Line",override.aes = list(shape = 0, color="darkcyan")))+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$plotcost <- renderPlot({
data_1 <- as.data.frame(passdata1()$df1)
data_3 <- as.data.frame(passdata1()$df4)
data_3[is.na(data_3)] <- 0
library(lubridate)
data_3$date <- as.Date(data_3$date)
data_3$dow <- lubridate::wday(data_3$date, label=TRUE)
data_v3 <- data_3 %>% select(portfolio_name, dow, act_cost, act_rev, pred_cost, pred_rev)
data_v4 <- aggregate(.~portfolio_name+dow,data_v3,sum)
# data_v4 <- data_v4 %>% mutate(act_roas=act_rev/act_cost)
# data_v4 <- data_v4 %>% mutate(pred_roas=pred_rev/pred_cost)
# data_v4 <- data_v4 %>% mutate(roas_acc=act_roas/pred_roas*100)
data_v4 <- data_v4 %>% select(portfolio_name,act_cost,dow)
admin_v6 <- melt(data_v4)
p <- ggplot() +
geom_bar(data=admin_v6, aes(x=dow, y=value),stat='identity', position='dodge2', color="red", fill="red")+ facet_grid(. ~ portfolio_name)+labs(x="day of week", y="actual_cost") + scale_fill_discrete(guide=FALSE)+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$plotcostacc <- renderPlot({
data_1 <- as.data.frame(passdata1()$df1)
data_3 <- as.data.frame(passdata1()$df4)
data_3[is.na(data_3)] <- 0
library(lubridate)
data_3$date <- as.Date(data_3$date)
data_3$dow <- lubridate::wday(data_3$date, label=TRUE)
data_v3 <- data_3 %>% select(portfolio_name, dow, act_cost, act_rev, pred_cost, pred_rev)
data_v4 <- aggregate(.~portfolio_name+dow,data_v3,sum)
# data_v4 <- data_v4 %>% mutate(act_roas=act_rev/act_cost)
# data_v4 <- data_v4 %>% mutate(pred_roas=pred_rev/pred_cost)
data_v4 <- data_v4 %>% mutate(cost_acc=act_cost/pred_cost*100)
data_v4 <- data_v4 %>% select(portfolio_name,cost_acc,dow)
admin_v6 <- melt(data_v4)
p <- ggplot() +
geom_bar(data=admin_v6, aes(x=dow, y=value),stat='identity', position='dodge2', color="red", fill="red")+ facet_grid(. ~ portfolio_name)+labs(x="day of week", y="cost_acc") + scale_fill_discrete(guide=FALSE)+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$plotroasacc <- renderPlot({
data_1 <- as.data.frame(passdata1()$df1)
data_3 <- as.data.frame(passdata1()$df4)
data_3[is.na(data_3)] <- 0
library(lubridate)
data_3$date <- as.Date(data_3$date)
data_3$dow <- lubridate::wday(data_3$date, label=TRUE)
data_v3 <- data_3 %>% select(portfolio_name, dow, act_cost, act_rev, pred_cost, pred_rev)
data_v4 <- aggregate(.~portfolio_name+dow,data_v3,sum)
data_v4 <- data_v4 %>% mutate(act_roas=act_rev/act_cost)
data_v4 <- data_v4 %>% mutate(pred_roas=pred_rev/pred_cost)
data_v4 <- data_v4 %>% mutate(roas_acc=act_cost/pred_cost*100)
data_v4 <- data_v4 %>% select(portfolio_name,roas_acc,dow)
admin_v6 <- melt(data_v4)
p <- ggplot() +
geom_bar(data=admin_v6, aes(x=dow, y=value),stat='identity', position='dodge2', color="darkcyan", fill="darkcyan")+ facet_grid(. ~ portfolio_name)+labs(x="day of week", y="roas_acc") + scale_fill_discrete(guide=FALSE)+geom_hline(data=data_v4, mapping = aes(yintercept=100, linetype="dotted"),size=1, color = "darkcyan", show.legend =T)+scale_linetype_manual(name = "Legend-Line ", labels = c("dotted"="100 %"), values = c("twodash"))+
guides(linetype = guide_legend("Legend-Line",override.aes = list(shape = 0, color="darkcyan")))+
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$text8 <- renderText({
data_1 <- as.data.frame(passdata1()$df1)
data_3 <- as.data.frame(passdata1()$df4)
data_3[is.na(data_3)] <- 0
library(lubridate)
data_3$date <- as.Date(data_3$date)
data_3$dow <- lubridate::wday(data_3$date, label=TRUE)
data_v3 <- data_3 %>% select(portfolio_name, dow, act_cost, act_rev, pred_cost, pred_rev)
data_v4 <- aggregate(.~portfolio_name+dow,data_v3,sum)
data_v4 <- data_v4 %>% mutate(act_roas=act_rev/act_cost)
data_v4 <- data_v4 %>% mutate(pred_roas=pred_rev/pred_cost)
data_v4 <- data_v4 %>% mutate(roas_acc=act_roas/pred_roas*100)
data_v4$Variance_di <- var(data_v4$act_roas)
data_v4$avg_di <- mean(data_v4$act_roas)
data_v4$dispersion_index <- data_v4$Variance_di/data_v4$avg_di
data_v4$Variance_er <- var(data_v4$roas_acc)
data_v4$avg_er <- mean(data_v4$roas_acc)
data_v4$error_rate <- data_v4$Variance_er/data_v4$avg_er
data_v4[ data_v4 == "NaN" ] <- NA
data_v4[is.na(data_v4)] <- 0
di <- unique(data_v4$dispersion_index)
er <- unique(data_v4$error_rate)
data_1$di <- di
data_1$er <- er
paste("DOW Dispersion Index:", round(data_1$di, digits = 3))
})
output$text12 <- renderText({
paste("Enable_DOW recommendation could mean any of the following :")
})
output$textintra <- renderText({
paste("Enabling Intraday has the highest impact when campaigns capping is enabled.")
})
output$intro <- renderText({
paste("This app aims at diagnosing SEM performance from data modeling stand point and recommending optimal portfolio settings.")
})
output$text9 <- renderText({
data_1 <- as.data.frame(passdata1()$df1)
data_3 <- as.data.frame(passdata1()$df4)
data_3[is.na(data_3)] <- 0
library(lubridate)
data_3$date <- as.Date(data_3$date)
data_3$dow <- lubridate::wday(data_3$date, label=TRUE)
data_v3 <- data_3 %>% select(portfolio_name, dow, act_cost, act_rev, pred_cost, pred_rev)
data_v4 <- aggregate(.~portfolio_name+dow,data_v3,sum)
data_v4 <- data_v4 %>% mutate(act_roas=act_rev/act_cost)
data_v4 <- data_v4 %>% mutate(pred_roas=pred_rev/pred_cost)
data_v4 <- data_v4 %>% mutate(roas_acc=act_roas/pred_roas*100)
data_v4$Variance_di <- var(data_v4$act_roas)
data_v4$avg_di <- mean(data_v4$act_roas)
data_v4$dispersion_index <- data_v4$Variance_di/data_v4$avg_di
data_v4$Variance_er <- var(data_v4$roas_acc)
data_v4$avg_er <- mean(data_v4$roas_acc)
data_v4$error_rate <- data_v4$Variance_er/data_v4$avg_er
data_v4[ data_v4 == "NaN" ] <- NA
data_v4[is.na(data_v4)] <- 0
di <- unique(data_v4$dispersion_index)
er <- unique(data_v4$error_rate)
data_1$di <- di
data_1$er <- er
paste("DOW Error Rate:", round(data_1$er, digits = 3))
})
output$plot2 <- renderPlot({
data_1 <- as.data.frame(passdata1()$df3)
data_1 <- data_1 %>% select(portfolio_name, date, cost_acc, rpc_acc)
y_data <- data_1 %>% select(date,cost_acc, rpc_acc)
y_data <- y_data %>%arrange(desc(date))
y_data <- tail(y_data, -7)
y_data[is.na(y_data)] <- 0
# library(changepoint)
#
# cptm_CP <- cpt.mean(y_data$cost_acc, penalty='MBIC',pen.value=0, method='BinSeg',
# test.stat="Normal", minseglen=7, class=TRUE)
# cptm_CP
#
# plot(cptm_CP)
#
# cpts_CP <- cpts(cptm_CP) # change point time points
# cpts_CP
library(changepoint)
cptm_CP_1 <- cpt.mean(y_data$rpc_acc, penalty='MBIC',pen.value=0, method='BinSeg',
test.stat="Normal", minseglen=7, class=TRUE)
cptm_CP_1
plot(cptm_CP_1)
cpts_CP_1 <- cpts(cptm_CP_1) # change point time points
cpts_CP_1
library("outliers")
y_data$Cost_Acc_score <- scores(type="z", y_data$cost_acc, prob=.95)
y_data$RPC_Acc_score <- scores(type="z", y_data$rpc_acc, prob=.95)
cost_outliers <- list(subset(y_data, Cost_Acc_score==TRUE)[,1])
cost_outliers
revenue_outliers <- list(subset(y_data, RPC_Acc_score==TRUE)[,1])
revenue_outliers
y_data_1 <- subset(y_data, RPC_Acc_score==TRUE)
y_data_2 <- subset(y_data, Cost_Acc_score==TRUE)
p= ggplot() +
geom_line(data = data_1, aes(x = as.Date(date), y = rpc_acc, group=1), color="darkcyan" , show.legend = F) +
geom_point(data = y_data_1, aes(x = as.Date(date), y = rpc_acc, group=1, shape="circle"),size=5, color="darkcyan", show.legend = T)+
xlab('Date') +
ylab('rpc_acc') + facet_grid(. ~ portfolio_name)+
scale_x_date(date_breaks = "1 week")+
geom_vline(data=y_data, mapping = aes(xintercept=as.Date(y_data[max(cpts_CP_1),1]), linetype="twodash"),size=1, color = "darkcyan", show.legend =T)+
geom_hline(data=data_1, mapping = aes(yintercept=100, linetype="dotted"),size=1, color = "darkcyan", show.legend =T)+
geom_text(data=y_data, mapping=aes(x=as.Date(y_data[max(cpts_CP_1),1]), y=0, label=as.Date(y_data[max(cpts_CP_1),1] , sep = " ")), size=3, vjust=1, hjust=0, angle=90)+
scale_shape_manual(name = " Legend-Shape", labels = c("circle"="outliers"), values = c("circle"))+
scale_linetype_manual(name = "Legend-Line ", labels = c("dotted"="100 %","twodash"="change_mean"), values = c("dotted","twodash"))+
guides(shape = guide_legend("Legend-Shape",override.aes = list(linetype = 0, color="darkcyan")),
linetype = guide_legend("Legend-Line",override.aes = list(shape = 0, color="darkcyan"))) +
theme(axis.text.x = element_text(color = "grey20", size = 15, angle = 45, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 15, angle = 0, hjust = 1, vjust = 0, face = "plain"))
print(p)
})
output$text3 <- renderText({
data_1 <- as.data.frame(passdata1()$df3)
data_1 <- data_1 %>% select(portfolio_name, date, cost_acc, rpc_acc)
y_data <- data_1 %>% select(date,cost_acc, rpc_acc)
y_data <- y_data %>%arrange(desc(date))
y_data <- tail(y_data, -7)
y_data[is.na(y_data)] <- 0
library("outliers")
y_data$Cost_Acc_score <- scores(type="z", y_data$cost_acc, prob=.95)
y_data$RPC_Acc_score <- scores(type="z", y_data$rpc_acc, prob=.95)
cost_outliers <- list(subset(y_data, Cost_Acc_score==TRUE)[,1])
cost_outliers
revenue_outliers <- list(subset(y_data, RPC_Acc_score==TRUE)[,1])
revenue_outliers
paste(c("Recommendation -- exclude the following dates from the revenue model:", unlist(revenue_outliers)), collapse = " ")
})
output$rpcdiag <- renderText({
data_1 <- as.data.frame(passdata1()$df3)
data_1 <- data_1 %>% select(portfolio_name, date, cost_acc, rpc_acc)
y_data <- data_1 %>% select(date,cost_acc, rpc_acc)
y_data <- y_data %>%arrange(desc(date))
y_data <- tail(y_data, -7)
y_data[is.na(y_data)] <- 0
library("outliers")
y_data$Cost_Acc_score <- scores(type="z", y_data$cost_acc, prob=.95)
y_data$RPC_Acc_score <- scores(type="z", y_data$rpc_acc, prob=.95)
cost_outliers <- list(subset(y_data, Cost_Acc_score==TRUE)[,1])
cost_outliers
revenue_outliers <- list(subset(y_data, RPC_Acc_score==TRUE)[,1])
revenue_outliers
d1 <- as.data.frame(count(unlist(revenue_outliers)))
sf <- sum(d1$freq)
paste(c("There are", sf, "outliers in the RPC accuracy"), collapse = " ")
})
output$costdiag <- renderText({
data_1 <- as.data.frame(passdata1()$df9)
data_1 <- data_1 %>% select(portfolio_name, date, cost_acc, rpc_acc)
y_data <- data_1 %>% select(date,cost_acc, rpc_acc)
y_data[is.na(y_data)] <- 0
library("outliers")
y_data$Cost_Acc_score <- scores(type="z", y_data$cost_acc, prob=.95)
y_data$RPC_Acc_score <- scores(type="z", y_data$rpc_acc, prob=.95)
cost_outliers <- list(subset(y_data, Cost_Acc_score==TRUE)[,1])
cost_outliers
revenue_outliers <- list(subset(y_data, RPC_Acc_score==TRUE)[,1])
revenue_outliers
d1 <- as.data.frame(count(unlist(cost_outliers)))
sf <- sum(d1$freq)
paste(c("There are", sf,"outliers in the Cost Accuracy"), collapse = " ")
})
output$text4 <- renderText({
data_1 <- as.data.frame(passdata1()$df9)
data_1 <- data_1 %>% select(portfolio_name, date, cost_acc, rpc_acc)
y_data <- data_1 %>% select(date,cost_acc, rpc_acc)
y_data[is.na(y_data)] <- 0
library("outliers")
y_data$Cost_Acc_score <- scores(type="z", y_data$cost_acc, prob=.95)
y_data$RPC_Acc_score <- scores(type="z", y_data$rpc_acc, prob=.95)
cost_outliers <- list(subset(y_data, Cost_Acc_score==TRUE)[,1])
cost_outliers
revenue_outliers <- list(subset(y_data, RPC_Acc_score==TRUE)[,1])
revenue_outliers
paste(c("Recommendation -- exclude the following dates from the cost model:", unlist(cost_outliers)), collapse = " ")
})
output$text5 <- renderText({
data_1 <- as.data.frame(passdata1()$df1)
paste("Click Coverage:", data_1$click_coverage)
})
output$text6 <- renderText({
data_1 <- as.data.frame(passdata1()$df1)
paste("Revenue Coverage:", data_1$revenue_coverage)
})
output$text7 <- renderText({
data_1 <- as.data.frame(passdata1()$df1)
paste("Spend Strategy:", data_1$strategy)
})
output$text1 <- renderText({
data_1 <- as.data.frame(passdata1()$df9)
data_1 <- data_1 %>% select(date,cost_acc)
data_1[is.na(data_1)] <- 0
# y_data <- data_1 %>% select(date,cost_acc)
library(changepoint)
cptm_CP <- cpt.mean(data_1$cost_acc, penalty='MBIC',pen.value=0, method='BinSeg',
test.stat="Normal", minseglen=7, class=TRUE)
cptm_CP
plot(cptm_CP)
cpts_CP <- cpts(cptm_CP) # change point time points
cpts_CP
#ggplot(data_1, aes(x=date, y=value, color = variable, group=1)) +geom_line() + facet_grid(. ~ portfolio_name)
print( paste("Diagnostic -- latest change in mean of cost accuracy occured on", data_1[max(cpts_CP),1] , "that is", Sys.Date()-(as.Date(data_1[max(cpts_CP),1])), "days ago." ))
})
output$text2 <- renderText({
data_1 <- as.data.frame(passdata1()$df3)
data_1 <- data_1 %>% select(date,rpc_acc)
data_1 <- data_1 %>%arrange(desc(date))
data_1 <- tail(data_1, -7)
data_1[is.na(data_1)] <- 0
# y_data <- data_1 %>% select(date,cost_acc)
library(changepoint)
cptm_CP <- cpt.mean(data_1$rpc_acc, penalty='MBIC',pen.value=0, method='BinSeg',
test.stat="Normal", minseglen=7, class=TRUE)
cptm_CP
plot(cptm_CP)
cpts_CP <- cpts(cptm_CP) # change point time points
cpts_CP
#ggplot(data_1, aes(x=date, y=value, color = variable, group=1)) +geom_line() + facet_grid(. ~ portfolio_name)
print( paste("Diagnostic -- latest change in mean of RPC accuracy occured on", data_1[max(cpts_CP),1] , "that is", Sys.Date()-(as.Date(data_1[max(cpts_CP),1])), "days ago." ))
})
output$selected_var_2 <- renderText({
paste("You have selected the Portfolio Name and PID : ", input$PortfolioNamePID)
})
output$contact<- renderText({
paste("For any questions please reach out to:")
})
output$diag<- renderText({
paste("Instructions and Descriptions per tab:")
})
output$gloss<- renderText({
paste("The ways in which we came up with the recommendations along with a comprehensive understanding of the different terminologies mentioned in the dashboard is given in the link below:")
})
output$dq<- renderText({
paste("Data Quality validated. JIRA Ticket Number:")
})
output$dq1<- renderText({
paste("Data Quality validated. JIRA Ticket Number:")
})
output$dq12<- renderText({
paste("Data Quality validated. JIRA Ticket Numbers:")
})
output$nt<- renderText({
paste("Data Quality has been validated for daily accuracy data. Dimensional data is yet to be validated. JIRA tickets have been raised for the same")
})
}
ui <- dashboardPage(
dashboardHeader(
title="Search Performance App"
# tags$li(class="dropdown",
# tags$img(src="frontier32x32.png"),"Search Diagnostics")
),
dashboardSidebar(
sidebarMenu(
menuItem("Instructions",tabName = "Instructions"),
menuItem("Account_Diagnostics",tabName = "Account_Diagnostics"),
menuItem("Portfolio_Recommendations",tabName = "Portfolio_Recommendations"),
menuItem("Daily_Accuracy",tabName = "Daily_Accuracy"),
menuItem("DOW_Modeling",tabName = "DOW_Modeling"),
menuItem("Match_Type",tabName = "Match_Type"),
menuItem("Click_Level",tabName = "Click_Level"),
menuItem("Device_Level",tabName = "Device_Level")
)
),
dashboardBody(
tags$head(tags$style(HTML(' .main-sidebar{ width: 250px; } .main-header > .navbar { margin-left: 300px; } .main-header .logo { width: 300px; } .content-wrapper, .main-footer, .right-side { margin-left: 300px; } .box.box-solid.box-primary>.box-header {background:navy} .box.box-solid.box-info>.box-header {background:teal} .box.box-solid.box-warning>.box-header {background:purple}'))),
tabItems( tabItem(tabName = "Instructions",
fluidRow(box(width=12, title="ABOUT THE DASHBOARD", status="info", solidHeader = TRUE, textOutput('intro'), tags$head(tags$style("#intro{color: black;
font-size: 20px;}"
))),
box(width=12, title="WORKFLOW", status="info", solidHeader = TRUE, textOutput('diag'), tags$head(
tags$style("#diag{color: blue;
font-size: 20px;font-style: italic;}", HTML("
li {
font-size: 10px;
}
li span {
font-size: 18px;
}
ul {
list-style-type: square;
}
"))
),
tags$div(tags$ul(
tags$li(tags$span("Account_Diagnostics - Pick a client, db_tag, data center and submit. That will return cross portfolio diagnostic on the basis of cost and rpc accuracy. The portfolios are sorted by predicted cost")),
tags$li(tags$span("Portfolio_Recommendations - Pick a specific portfolio and submit. That will return two tables. The first table would be providing recommendations for portfolio settings in the UI. The second table would be providing details of the dispersion index and error rate per dimension. For more details on the terminologies please see the glossary section below")),
tags$li(tags$span("Daily_Accuracy - Based on the portfolio selected in the previous tab, this tab would help delving deeper into the half life recommendations of the previous tab. Also, it would provide recommendations for outlier removals.For more details on the terminologies please see the glossary section below ")),
tags$li(tags$span("Match_Type, Device_level, Click_Level, DOW_Modeling - These are the different dimensions that we will be looking at for each portfolio selected. The visualizations, tables and numerics in each of these tabs will help comprehending what is happening in each dimension and take bettter action using some of the recommendations provided in the previous tab. "))
))),
box(width=12, title="DECISION TREE", status="info", solidHeader = TRUE, tags$li(class="dropdown",
tags$img(src="Searchdiagnostics.png", height="100%", width="100%",align="center"))),
box(width=12, title="GLOSSARY", status="info", solidHeader = TRUE, textOutput('gloss'), tags$head(
tags$style("#gloss{color: blue;
font-size: 20px;font-style: italic;}", HTML("
li {
font-size: 10px;
}
li span {
font-size: 18px;
}
ul {
list-style-type: square;
}
"))
),
tags$a(href='Glossary.pdf', target='blank', 'CLICK HERE', download = 'Glossary.pdf')
),
box(width=12, title="CONTACT INFO", status="info", solidHeader = TRUE, textOutput('contact'), tags$head(
tags$style("#contact{color: blue;
font-size: 20px;font-style: italic;}", HTML("
li {
font-size: 10px;
}
li span {
font-size: 18px;
}
ul {
list-style-type: square;
}
"))
),
tags$div(tags$ul(
tags$li(tags$span("Laavanya Ganesh: lganesh@adobe.com")),
tags$li(tags$span("Benjamin Vigneron: vigneron@adobe.com")),
tags$li(tags$span("Alex Lambrakis: alambrak@adobe.com"))
))
))
),
tabItem(tabName = "Account_Diagnostics",
fluidRow(
box(title = "PICK A CLIENT, DB TAG, DATA CENTER AND SUBMIT",status="primary",color="navy",solidHeader = TRUE,
#actionButton("do", "Load active usernames"),
uiOutput("choose_dataset_1"),
uiOutput("choose_dataset"),
actionButton("goButton", "Submit")),
box(title= "DATE RANGE for DIAGNOSTICS",width=12, status="info",solidHeader = TRUE, textOutput('daterange1'), tags$head(tags$style("#daterange1{color: black;
font-size: 20px;font-style: italic;}"
)
)),
box(title= "DIAGNOSTICS FOR DIFFERENT PORTFOLIOS WITHIN THE ACCOUNT",width=12, status="success",solidHeader = TRUE, DT::dataTableOutput('tbl_1')
),
box(title= "NOTE",width=12, status="info",solidHeader = TRUE, textOutput('dq'), tags$head(tags$style("#dq{color: green;
font-size: 20px;font-style: italic;}"
)
),
tags$a(href='https://jira.corp.adobe.com/browse/AMO-130089', target='_blank', 'AMO-130089')
)
# tags$a(href='https://jira.corp.adobe.com/browse/AMO-130089', 'AMO-130089', , target="_blank"'))
# downloadButton("downloadData", "Download")
) ),
tabItem(tabName = "Portfolio_Recommendations",
fluidRow(
box(width=12, textOutput('selected_var'), tags$head(tags$style("#selected_var{color: red;
font-size: 20px;font-style: italic;}"
))
),
box(title = "PICK A PORTFOLIO AND SUBMIT", status="primary", solidHeader = TRUE,
uiOutput("choose_dataset_2"),
actionButton("go", "Submit")),
box(title = "OVERVIEW", status="info",solidHeader = TRUE,width=12, textOutput('selected_var_1'), tags$head(tags$style("#selected_var_1{color: blue;
font-size: 20px;font-style: italic;}"
)), textOutput('selected_var_3'), tags$head(tags$style("#selected_var_3{color: blue;
font-size: 20px;font-style: italic;}"
)),textOutput('selected_var_4'), tags$head(tags$style("#selected_var_4{color: blue;
font-size: 20px;font-style: italic;}"
))
),
box(title= "DATE RANGE for RECOMMENDATIONS",width=12, status="info",solidHeader = TRUE, textOutput('daterange2'), tags$head(tags$style("#daterange2{color: black;
font-size: 20px;font-style: italic;}"
)
)),
# box(width=12, textOutput('selected_var_3'), tags$head(tags$style("#selected_var_3{color: blue;
# font-size: 20px;font-style: italic;}"
# )),textOutput('selected_var_4'), tags$head(tags$style("#selected_var_4{color: blue;
# font-size: 20px;font-style: italic;}"
# ))
# ),
# box(width=12, textOutput('selected_var_4'), tags$head(tags$style("#selected_var_4{color: blue;
# font-size: 20px;font-style: italic;}"
# ))
# ),
# box(width=12, DT::dataTableOutput('admin_reco')
# ),
box(title= "PORTFOLIO RECOMMENDATIONS FOR UI SETTINGS",status="success",solidHeader = TRUE,width=12, DT::dataTableOutput('admin_reco_2')
),
box(title="NOTE", status="info",solidHeader = TRUE,width=12, textOutput('text12'), tags$head(
tags$style("#text12{color: blue;
font-size: 20px;font-style: italic;}", HTML("
li {
font-size: 10px;
}
li span {
font-size: 18px;
}
ul {
list-style-type: square;
}
"))
),
tags$div(tags$ul(
tags$li(tags$span("Enable Weekly Spend Strategy")),
tags$li(tags$span("Enable Day of Week Spend Strategy with Day of Week Models"))))
# tags$li(tags$span("Enable Day of Week Spend Strategy without Day of Week Models"))))
),
box(title= "NOTE",width=12, status="info",solidHeader = TRUE, textOutput('textintra'), tags$head(tags$style("#textintra{color: blue;
font-size: 20px;font-style: italic;}"
)
)),
box(title= "NOTE",width=12, status="info",solidHeader = TRUE, textOutput('dq1'), tags$head(tags$style("#dq1{color: green;
font-size: 20px;font-style: italic;}"
)
),
tags$a(href='https://jira.corp.adobe.com/browse/AMO-130089', target='_blank', 'AMO-130089')
# tags$a(href='https://jira.corp.adobe.com/browse/AMO-130110', target='_blank', 'AMO-130110'),
# tags$a(href='https://jira.corp.adobe.com/browse/AMO-130112', target='_blank', 'AMO-130112')
),
box(title= "DIMENSION OPPORTUNITIES AND ERROR RATES",status="success",solidHeader = TRUE, width=12, DT::dataTableOutput('dimdt')
)
)
),
tabItem(tabName = "Daily_Accuracy",
fluidRow(
box(width=12, textOutput('selected_var_2'), tags$head(tags$style("#selected_var_2{color: blue;
font-size: 20px;font-style: italic;}"
))
),
box(width=12, status = "warning", solidHeader = TRUE, title = "COST ACCURACY", plotOutput("plot1"),
textOutput('text1'), tags$head(tags$style("#text1{color: red;
font-size: 20px;font-style: italic;}"
)),textOutput('costdiag'), tags$head(tags$style("#costdiag{color: red;
font-size: 20px;font-style: italic;}"
)) , textOutput('text5'), tags$head(tags$style("#text4{color: red;
font-size: 20px;font-style: italic;}"
)) ,textOutput('text4'), tags$head(tags$style("#text5{color: red;
font-size: 20px;font-style: italic;}"
))
),
# box(width=12, textOutput('text4'), tags$head(tags$style("#text4{color: red;
# font-size: 20px;font-style: italic;}"
# ))
# ),
# box(width=12, textOutput('text5'), tags$head(tags$style("#text5{color: red;
# font-size: 20px;font-style: italic;}"
# ))
# ),
box(width=12, status = "warning", solidHeader = TRUE, title = "RPC ACCURACY", plotOutput("plot2"),
textOutput('text2'), tags$head(tags$style("#text2{color: darkcyan;
font-size: 20px;font-style: italic;}"
)) ,
textOutput('rpcdiag'), tags$head(tags$style("#rpcdiag{color: darkcyan;
font-size: 20px;font-style: italic;}"
)) ,
textOutput('text6'), tags$head(tags$style("#text3{color: darkcyan;
font-size: 20px;font-style: italic;}"
)) ,
textOutput('text3'), tags$head(tags$style("#text6{color: darkcyan;
font-size: 20px;font-style: italic;}"
))
),
box(width=12, status = "warning", solidHeader = TRUE, title = "EXTRA PLOTS"),
box(width=6, title = "CLICK ACCURACY", plotOutput("plot4")),
box(width=6, title = "CPC ACCURACY", plotOutput("plot5"))
# box(width=12, textOutput('text3'), tags$head(tags$style("#text3{color: darkcyan;
# font-size: 20px;font-style: italic;}"
# ))
# ),
# box(width=12, textOutput('text6'), tags$head(tags$style("#text6{color: darkcyan;
# font-size: 20px;font-style: italic;}"
# ))
# )
)
),
tabItem(tabName = "DOW_Modeling",
fluidRow(
box(width=12, textOutput('text7'), tags$head(tags$style("#text7{color: blue;
font-size: 20px;font-style: italic;}"
))
),
box(width=6, status = "warning", solidHeader = TRUE, title = "DAY OF WEEK ACTUAL COST ", plotOutput("plotcost")),
box(width=6, status = "warning", solidHeader = TRUE, title = "DAY OF WEEK ACTUAL ROAS", plotOutput("plot3")),
box(width=6, status = "warning", solidHeader = TRUE, title = "DAY OF WEEK COST ACCURACY", plotOutput("plotcostacc")),
box(width=6, status = "warning", solidHeader = TRUE, title = "DAY OF WEEK ROAS ACCURACY", plotOutput("plotroasacc")),
box(title= "DATA ACROSS DAY OF WEEK",status="success",solidHeader = TRUE,width=12, DT::dataTableOutput('dowTable')
),
box(title = "OVERVIEW", status="info",solidHeader = TRUE,width=12, textOutput('text8'), tags$head(tags$style("#text8{color: green;
font-size: 20px;font-style: italic;}" )),
textOutput('text9'), tags$head(tags$style("#text9{color: green;
font-size: 20px;font-style: italic;}"
))
)
# box(width=12, textOutput('text9'), tags$head(tags$style("#text9{color: green;
# font-size: 20px;font-style: italic;}"
# ))
# )
)
),
tabItem(tabName = "Match_Type",
fluidRow(
box(width=12, textOutput('topmatch'), tags$head(tags$style("#topmatch{color: blue;
font-size: 20px;font-style: italic;}"
))
),
# box(width=12, plotlyOutput("plotMT")),
box(title= "DATA ACROSS MATCH TYPES",status="success",solidHeader = TRUE,width=12, DT::dataTableOutput('admin_MT')
),
box(width=6, status = "warning", solidHeader = TRUE, title = "MATCH TYPE ACTUAL COST ", plotOutput("MTplot2")),
box(width=6, status = "warning", solidHeader = TRUE, title = "MATCH TYPE ACTUAL ROAS", plotOutput("MTplot")),
box(width=6, status = "warning", solidHeader = TRUE, title = "MATCH TYPE COST ACCURACY", plotOutput("MTplot3")),
box(width=6, status = "warning", solidHeader = TRUE, title = "MATCH TYPE ROAS ACCURACY", plotOutput("MTplot1")),
box(title = "OVERVIEW", status="info",solidHeader = TRUE,width=12, textOutput('textmtdi'), tags$head(tags$style("#textmtdi{color: blue;
font-size: 20px;font-style: italic;}" )) ,
textOutput('textmter'), tags$head(tags$style("#textmter{color: blue;
font-size: 20px;font-style: italic;}"
))
)
# box(width=12, textOutput('textmter'), tags$head(tags$style("#textmter{color: blue;
# font-size: 20px;font-style: italic;}"
# ))
# )
)
),
tabItem(tabName = "Device_Level",
fluidRow(
box(width=12, textOutput('topdevice'), tags$head(tags$style("#topdevice{color: blue;
font-size: 20px;font-style: italic;}"
))
),
# box(width=12, plotlyOutput("plotdev")),
box(title= "DATA ACROSS DEVICES",status="success",solidHeader = TRUE,width=12, DT::dataTableOutput('admin_dev')
),
box(width=6, status = "warning", solidHeader = TRUE, title = "DEVICE ACTUAL COST ", plotOutput("devplot2")),
box(width=6, status = "warning", solidHeader = TRUE, title = "DEVICE ACTUAL ROAS", plotOutput("devplot")),
box(width=6, status = "warning", solidHeader = TRUE, title = "DEVICE COST ACCURACY", plotOutput("devplot3")),
box(width=6, status = "warning", solidHeader = TRUE, title = "DEVICE ROAS ACCURACY", plotOutput("devplot1")),
box(title = "OVERVIEW", status="info",solidHeader = TRUE,width=12, textOutput('textdevdi'), tags$head(tags$style("#textdevdi{color: blue;
font-size: 20px;font-style: italic;}" )) ,
textOutput('textdever'), tags$head(tags$style("#textdever{color: blue;
font-size: 20px;font-style: italic;}" ))
)
# box(width=12, textOutput('textdever'), tags$head(tags$style("#textdever{color: blue;
# font-size: 20px;font-style: italic;}"
# ))
# )
)
),
tabItem(tabName = "Click_Level",
fluidRow(
box(width=12, textOutput('textcl'), tags$head(tags$style("#textcl{color: blue;
font-size: 20px;font-style: italic;}"
))
),
box(title= "DATA ACROSS CLICK LEVELS",status="success",solidHeader = TRUE,width=12, DT::dataTableOutput('admin_CL')
),
box(width=6, status = "warning", solidHeader = TRUE, title = "CLICKLEVEL ACTUAL COST ", plotOutput("CLplot2")),
box(width=6, status = "warning", solidHeader = TRUE, title = "CLICKLEVEL ACTUAL ROAS", plotOutput("CLplot")),
box(width=6, status = "warning", solidHeader = TRUE, title = "CLICKLEVEL COST ACCURACY", plotOutput("CLplot3")),
box(width=6, status = "warning", solidHeader = TRUE, title = "CLICKLEVEL ROAS ACCURACY", plotOutput("CLplot1")),
box(title = "OVERVIEW", status="info",solidHeader = TRUE,width=12, textOutput('textcldi'), tags$head(tags$style("#textcldi{color: blue;
font-size: 20px;font-style: italic;}" )) ,
textOutput('textcler'), tags$head(tags$style("#textcler{color: blue;
font-size: 20px;font-style: italic;}"
))
)
# box(width=12, textOutput('textcler'), tags$head(tags$style("#textcler{color: blue;
# font-size: 20px;font-style: italic;}"
# ))
# )
)
)
)
)
)
shinyApp(ui = ui, server = server)
|
6ef31472f49d73b5c23e0473030b37a7eaab6b83
|
aa4f45999038440a4aafd0c4c00e894c2c668aa8
|
/plot4.r
|
9a2c3fc61affeb218873a15c7448107b58d6fa8e
|
[] |
no_license
|
charlieZha/ExData_Plotting1
|
50cd4384187c01eaccd5f42957de1e4ece623b92
|
53ad5aa1eb6b1d2de786fe17e0b39a1ae57ff1d2
|
refs/heads/master
| 2020-04-05T23:12:37.516038
| 2014-07-12T14:46:23
| 2014-07-12T14:46:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 872
|
r
|
plot4.r
|
temp<-read.csv("household_power_consumption.txt", sep=";",header=T)
data<-temp[temp$Date %in% c("1/2/2007","2/2/2007"),]
data$datetime<-strptime(paste(data$Date,data$Time),"%d/%m/%Y %H:%M:%S")
png("plot4.png", height=480,width=480)
par(mfrow=c(2,2))
plot(data$datetime,data$Global_active_power, xlab="",ylab="Global Active Power", type="l")
plot(data$datetime,data$Voltage, xlab="datetime",ylab="Voltage", type="l")
cols=c("Sub_metering_1","Sub_metering_2","Submetering_3")
plot(data$datetime,data$Sub_metering_1,xlab="",ylab="Energy sub metering",type="l",col='black')
lines(data$datetime,data$Sub_metering_2,col="red")
lines(data$datetime,data$Sub_metering_3,col="blue")
legend('topright',legend=cols,col=c('black','red','blue'), lty = 1, lwd = 3)
plot(data$datetime,data$Global_reactive_power, xlab="datetime", ylab="Global_reactive_power", type="l")
dev.off()
|
45ce18b1b78cd56a266122b44f2756b189584f1d
|
5c2ac9974888c4c7d65440c59eea61f55a2c760b
|
/BrewHome/R/USbreweryhomevalues.R
|
4f35a436cdb7e95939bef5dbf9481d60f61d044a
|
[] |
no_license
|
ecwalters112/BrewHome-Package
|
78043596c73237449d390ecbea6f9bf602090215
|
2d88f0efec5382badc775364e55e3e7520981126
|
refs/heads/master
| 2020-12-26T14:09:55.061604
| 2020-02-04T16:34:27
| 2020-02-04T16:34:27
| 237,533,456
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,687
|
r
|
USbreweryhomevalues.R
|
#' Brewery & Home Info
#'
#' This function makes it easy to search home sales and rental
#' valuations by breweries in different states.
#'
#' The function asks the user to provide a U.S state that they would
#' like to search for information on home sale and rental valuations
#' around breweries.
#'
#'
#' @param state A state in the U.S
#' @keywords Brewery locations state home sales rentals
#' @export
#' @examples
#' get_breweryhomeinfo(state = "Colorado")
#'
get_breweryhomeinfo <- function(state) {
library(dplyr)
data <- brewery_sales_rentals %>%
select(Brewery, `Brewery Type`, Address, City, Zipcode, `Average Listing Price`, `Typical Rent Price`, State) %>%
filter(State == state)
states_notincluded <- c("Alabama", "Alaska", "Arizona", "Arkansas", "Connecticut", "Delaware", "Hawaii", "Idaho",
"Indiana", "Iowa", "Kansas", "Kentucky", "Louisiana", "Maine", "Maryland", "Massachusetts",
"Minnesota", "Mississippi", "Montana", "Nebraska", "Nevada", "New Hampshire", "New Jersery",
"New Mexico", "North Dakota", "Oklahoma", "Oregon", "Rhode Island", "South Dakota", "Utah",
"Vermont", "Washington", "West Virginia", "Wyoming", "alabama", "alaska", "arizona", "arkansas",
"connecticut", "delaware", "hawaii", "idaho", "indiana", "iowa", "kansas", "kentucky", "louisiana",
"maine", "maryland", "massachusetts", "minnesota", "mississippi", "montana", "nebraska", "nevada",
"new hampshire", "new jersery", "new mexico", "north dakota", "oklahoma", "oregon", "rhode island",
"south dakota", "utah", "vermont", "washington", "west virginia", "wyoming")
state_lower <- c("new york", "pennsylvania", "virginia", "north carolina", "south carolina", "georgia", "florida", "tennessee",
"ohio", "michigan", "wisconsin", "illinois", "missouri", "texas", "colorado", "california")
if ( any(state %in% states_notincluded)) stop ("Due to the small size of the dataset, this state does not have any available information.
Please choose a different U.S state from the following list: New York, Pennsylvania,
Virginia, North Carolina, South Carolina, Georgia, Florida, Tennessee, Ohio, Mchigan,
Wisconsin, Illinois, Missouri, Texas, Colorado, California")
if ( any(state %in% state_lower)) stop ("States should always be capitalized!! Capitalize and re-run the function!")
return(data)
}
|
bfdae2d9b3dd0a26c53a8029350351667f889fba
|
bdf28f7b04438ab58e2ccabe178dd91f019accf2
|
/plot_training_20200701/0202-折线图.R
|
18d227df1e4fffecbb2bf4a7fc5c789174eb2f5e
|
[] |
no_license
|
yxr123321/Drawing_scientific_research
|
bc6e7d9075c17501c6e447249d10ae28bec8fb1b
|
4060eabcd41447a3efb0cb550ba4a8650f119da5
|
refs/heads/main
| 2023-03-06T17:26:34.525381
| 2021-02-21T15:06:29
| 2021-02-21T15:06:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 594
|
r
|
0202-折线图.R
|
## ----------------------------------------------------
library(tidyverse)
library(cowplot)
library(gapminder)
data("gapminder")
## ----------------------------------------------------
gapminder_filter <-
filter(gapminder, country %in% c('China', 'India', 'Japan'))
gapminder_filter
## ----------------------------------------------------
ggplot(data = gapminder_filter,
aes(x = year, y = lifeExp, color = country)) +
geom_line() +
geom_point(shape = 21, size =2, fill = "white") +
scale_color_aaas() +
theme_minimal_hgrid() +
theme(legend.position = c(0.85, 0.16))
|
f8eeb6b96573c6b0b0e5c8f38f4182c55cc1b40d
|
03c99906a94c70e9a13e7714aad996f461f339c1
|
/R/dbMANOVAspecies.R
|
11cfb60fe1e1caef0f37e24467cb2c4e639095ba
|
[] |
no_license
|
cran/adiv
|
6a111f6a1ef39fe302a2f882b9a9d04e7d652c04
|
d65d6e0301e4611a94a91933299bff1fdc06d96b
|
refs/heads/master
| 2022-10-28T08:07:33.352817
| 2022-10-06T12:40:04
| 2022-10-06T12:40:04
| 97,764,074
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,434
|
r
|
dbMANOVAspecies.R
|
dbMANOVAspecies <- function(comm, groups, nrep = 999, method = c("Euclidean", "Manhattan", "Canberra", "BrayCurtis"), global = TRUE, species = TRUE, padjust = "none", tol = 1e-8){
global <- global[1]
species <- species[1]
if(!inherits(global, "logical")) stop("Incorrect definition of argument global")
if(!inherits(species, "logical")) stop("Incorrect definition of argument species")
if(!species) global <- TRUE
if(!inherits(comm, "data.frame") && !inherits(comm, "matrix")) stop("comm must be a data frame or a matrix")
if(is.null(colnames(comm))) colnames(comm) <- paste("species", 1:ncol(comm), sep="")
if(is.null(rownames(comm))) rownames(comm) <- paste("community", 1:nrow(comm), sep="")
if(any(comm < -tol)) stop("comm must have nonnegative values")
if(any(colSums(comm) < tol)){
comm <- comm[, colSums(comm) > tol, drop=FALSE]
warning("species with zero abundance over all considered communities have been removed")
}
if(nrow(comm) != length(groups)) stop("The length of argument groups should be equal to the number of rows of argument comm")
if(any(rowSums(comm) < tol)){
comm2 <- comm[rowSums(comm) > tol, , drop=FALSE]
groups <- groups[rowSums(comm) > tol]
comm <- comm2
warning("empty communities with zero species abundance have been removed")
}
comm[comm < 0] <- 0
if(!inherits(groups, "factor") && !inherits(groups, "character")) stop("Incorrect definition for argument groups")
if(!inherits(groups, "factor") | length(levels(groups)) != length(unique(groups)))
groups <- factor(groups)
method <- method[1]
if(!method%in%c("Euclidean","Manhattan","Canberra","BrayCurtis")) stop("Incorrect definition of argument method")
Nplots <- nrow(comm)
if(!species){
distabund <- function (abund, meth)
{
d <- matrix(0, nrow(abund), nrow(abund))
funEuclidean <- function(x) {
sum((abund[x[1], ] - abund[x[2], ])^2)
}
funManhattan <- function(x) {
sum(abs(abund[x[1], ] - abund[x[2], ]))
}
funCanberra <- function(x) {
sum(abs(abund[x[1], ] - abund[x[2], col])/(abund[x[1], ] + abund[x[2], ]), na.rm=TRUE)
}
funBrayCurtis <- function(x) {
sum(abs(abund[x[1], ] - abund[x[2], ])/sum(abund[c(x[1],x[2]), ]))
}
index <- cbind(col(d)[col(d) < row(d)], row(d)[col(d) < row(d)])
if (meth == "Euclidean")
d <- unlist(apply(index, 1, funEuclidean))
else if (meth == "Manhattan")
d <- unlist(apply(index, 1, funManhattan))
else if (meth == "Canberra")
d <- unlist(apply(index, 1, funCanberra))
else
d <- unlist(apply(index, 1, funBrayCurtis))
attr(d, "Size") <- nrow(abund)
attr(d, "Diag") <- FALSE
attr(d, "Upper") <- FALSE
attr(d, "method") <- meth
class(d) <- "dist"
return(d)
}
distances <- distabund(comm, method)
funSTATs <- function(dis, gro){
QT <- sum(as.vector(dis))/Nplots
Distances <- as.matrix(dis)
ldistancesW <- lapply(levels(gro), function(x) Distances[gro==x, gro==x])
ldistancesW <- lapply(ldistancesW, as.dist, diag = FALSE, upper = FALSE)
lQW <- lapply(ldistancesW, function(x) sum(as.vector(x)/attributes(x)$Size))
QW <- sum(as.vector(unlist(lQW)))
QB <- QT - QW
res <- c(QT, QB, QW)
names(res) <- c("QT", "QB", "QW")
return(res)
}
funperm <- function(i){
egroups <- sample(groups)
theo <- funSTATs(distances, egroups)[2]
return(theo)
}
THEO <- sapply(1:nrep, funperm)
OBS <- funSTATs(distances, groups)[2]
simu <- as.randtest(sim = THEO, obs = OBS, alter = "greater")
tabobs <- funSTATs(distances, groups)
}
else{
distabunde <- function (abund, col, meth)
{
d <- matrix(0, nrow(abund), nrow(abund))
funEuclidean <- function(x) {
(abund[x[1], col] - abund[x[2], col])^2
}
funManhattan <- function(x) {
abs(abund[x[1], col] - abund[x[2], col])
}
funCanberra <- function(x) {
if((abund[x[1], col] + abund[x[2], col]) < tol) return(0)
else
return(abs(abund[x[1], col] - abund[x[2], col])/(abund[x[1], col] + abund[x[2], col]))
}
funBrayCurtis <- function(x) {
if(is.null(attributes(abund)$internal.p.rowSums))
abs(abund[x[1], col] - abund[x[2], col])/sum(abund[c(x[1],x[2]), ])
else
abs(abund[x[1], col] - abund[x[2], col])/sum(attributes(abund)$rowSums[c(x[1],x[2])])
}
index <- cbind(col(d)[col(d) < row(d)], row(d)[col(d) < row(d)])
if (meth == "Euclidean")
d <- unlist(apply(index, 1, funEuclidean))
else if (meth == "Manhattan")
d <- unlist(apply(index, 1, funManhattan))
else if (meth == "Canberra")
d <- unlist(apply(index, 1, funCanberra))
else
d <- unlist(apply(index, 1, funBrayCurtis))
attr(d, "Size") <- nrow(abund)
attr(d, "Diag") <- FALSE
attr(d, "Upper") <- FALSE
attr(d, "method") <- meth
class(d) <- "dist"
return(d)
}
listDIS <- lapply(1:ncol(comm), function(i) as.matrix(distabunde(comm, i, method)))
if(global) {
distances <- listDIS[[1]]
for(i in 2:length(listDIS)) distances <- distances + listDIS[[i]]
distances <- as.dist(distances, diag = FALSE, upper = FALSE)
}
listDIS <- lapply(listDIS, as.dist, diag = FALSE, upper = FALSE)
funSTATs <- function(dis, gro){
QT <- sum(as.vector(dis))/Nplots
Distances <- as.matrix(dis)
ldistancesW <- lapply(levels(gro), function(x) Distances[gro==x, gro==x])
ldistancesW <- lapply(ldistancesW, as.dist, diag = FALSE, upper = FALSE)
lQW <- lapply(ldistancesW, function(x) sum(as.vector(x)/attributes(x)$Size))
QW <- sum(as.vector(unlist(lQW)))
QB <- QT - QW
res <- c(QT, QB, QW)
names(res) <- c("QT", "QB", "QW")
return(res)
}
funperm <- function(i){
egroups <- sample(groups)
if(global) theoglobal <- funSTATs(distances, egroups)[2]
theospecies <- as.vector(unlist(lapply(listDIS, function(dd) funSTATs(dd, egroups)[2])))
if(global) theo <- c(theoglobal, theospecies)
else theo <- theospecies
return(theo)
}
THEO <- t(cbind.data.frame(sapply(1:nrep, funperm)))
if(global) colnames(THEO) <- c("GLOBAL", colnames(comm))
else colnames(THEO) <- colnames(comm)
obsspecies <- as.vector(unlist(lapply(listDIS, function(dd) funSTATs(dd, groups)[2])))
if(global){
obsglobal <- funSTATs(distances, groups)[2]
OBS <- c(obsglobal, obsspecies)
}
else OBS <- obsspecies
simu <- as.krandtest(sim = THEO, obs = OBS, alter = "greater", p.adjust.method = padjust)
obsspecies <- cbind.data.frame(lapply(listDIS, function(dd) funSTATs(dd, groups)))
colnames(obsspecies) <- colnames(comm)
if(global){
obsglobal <- funSTATs(distances, groups)
tabobs <- cbind.data.frame(Global = obsglobal, obsspecies)
}
else tabobs <- obsspecies
}
RES <- list(observations = tabobs, test = simu, call = match.call(), method = method, padjust = padjust, tol = tol)
class(RES) <- "dbMANOVAspecies"
return(RES)
}
|
542673228e90002e4509f8cd188db64c213a5f60
|
ef5b13299d011f1362445e6c48c5dd9a9f7c25e5
|
/inst/doc/q23.R
|
468f946bb2ce397f58af5aeb8b414d7bd4b177aa
|
[] |
no_license
|
mclements/biostat3
|
faa45c9578ae36e7b972a9895ab2ba1164fa99e4
|
187e21adc3a2f9694f1c25fe181853f9d55868bf
|
refs/heads/master
| 2021-07-05T15:25:48.004841
| 2021-05-10T14:42:02
| 2021-05-10T14:42:02
| 99,109,204
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,527
|
r
|
q23.R
|
## Purpose: Solution for Biostat III exercises in R
## Author: Mark Clements, 2017-11-03
###############################################################################
## Install needed packages only need to be done once
## install.packages("foreign")
## install.packages("survival")
## install.packages("dplyr")
###############################################################################
## Exercise 23
###############################################################################
## @knitr loadDependencies
library(biostat3) # for Surv and survfit
library(dplyr) # for data manipulation
## @knitr loadPreprocess
## @knitr 23.a
data(melanoma)
scale <- 365.24
mel <- mutate(melanoma,
ydx=biostat3::year(dx),
adx=age+0.5, # mid-point approximation
dead=(status %in% c("Dead: cancer","Dead: other") & surv_mm<110)+0,
surv_mm=pmin(110,surv_mm),
astart=adx,
astop=adx+surv_mm/12)
mel.split <- survSplit(mel,
cut=1:110,
event="dead",start="astart", end="astop")
subset(mel.split, id<=2, select=c(id,astart,astop,dead))
## @knitr 23.b
mel.split <- mutate(mel.split,
ystart=year(dx)+astart-adx,
ystop=year(dx)+astop-adx)
mel.split2 <- survSplit(mel.split,
cut=1970:2000,event="dead",
start="ystart", end="ystop") %>%
mutate(astart=adx+ystart-ydx,
astop=adx+ystop-ydx,
age=floor(astop),
year=floor(ystop),
pt = ystop - ystart)
subset(mel.split2, id<=2, select=c(id,ystart,ystop,astart,astop,dead))
## @knitr 23.c
xtabs(pt ~ age+year, data=mel.split2, subset = age>=50 & age<60)
xtabs(dead ~ age+year, data=mel.split2, subset = age>=50 & age<60)
## @knitr 23.d
mel.split2 <- mutate(mel.split2,
age10=cut(age,seq(0,110,by=10),right=FALSE),
year10=cut(year,seq(1970,2000,by=5),right=FALSE))
head(survRate(Surv(pt,dead)~sex+age10+year10, data=mel.split2))
## @knitr 23.e
pt <- mutate(mel.split2,sex=unclass(sex)) %>%
group_by(sex, age, year) %>%
summarise(pt=sum(pt))
expected <- inner_join(popmort, pt) %>%
mutate(pt=ifelse(is.na(pt),0,pt)) %>%
group_by(sex,year) %>%
summarise(E=sum(rate*pt)) %>% ungroup
observed <- mutate(mel.split2, sex=as.numeric(unclass(sex))) %>%
group_by(sex, year) %>%
summarise(O=sum(dead)) %>% ungroup
joint <- inner_join(observed,expected) %>%
mutate(SMR = O/E)
## @knitr 23.f
## overall SMRs
by(joint, joint$sex, function(data) poisson.test(sum(data$O), sum(data$E)))
## utility function to draw a confidence interval
polygon.ci <- function(time, interval, col="lightgrey")
polygon(c(time,rev(time)), c(interval[,1],rev(interval[,2])), col=col, border=col)
## modelling by calendar period
summary(fit <- glm(O ~ sex*ns(year,df=3)+offset(log(E)), data=joint, family=poisson))
##
pred <- predict(fit,type="response",newdata=mutate(joint,E=1),se.fit=TRUE)
full <- cbind(mutate(joint,fit=pred$fit), confint.predictnl(pred))
ci.cols <- c("lightgrey", "grey")
matplot(full$year, full[,c("2.5 %", "97.5 %")], type="n", ylab="SMR", xlab="Calendar year")
for (i in 1:2) {
with(subset(full, sex==i), {
polygon.ci(year, cbind(`2.5 %`, `97.5 %`), col=ci.cols[i])
})
}
for (i in 1:2) {
with(subset(full, sex==i), {
lines(year,fit,col=i)
})
}
legend("topright", legend=levels(mel.split2$sex), lty=1, col=1:2, bty="n")
|
b6986bf169654e147d83bf440a071327888f5e3c
|
de47db6a8a358c999904444361c8c10ce7a2aa45
|
/modules/rtm/R/defparam.R
|
72bbb6aeba06061c1e21ddba9e50241ae77f01dd
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
davidjpmoore/pecan
|
0d42d0983203ad0791ef4668862726a4fc47ec97
|
73ac58ad36764fbf8c44290bb8f2cd4b741e73b8
|
refs/heads/master
| 2021-01-15T17:56:19.229136
| 2015-09-01T15:11:40
| 2015-09-01T15:11:40
| 41,746,837
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 617
|
r
|
defparam.R
|
#' @name defparam
#' @title Get default parameters
#' @details Extract default parameter values from `model.list`
#' @param modname Model name. Must match `modname` in `model.list`
#' @return Named vector of default parameter values
defparam <- function(modname){
data(model.list)
setkey(model.list, modname)
p.raw <- model.list[modname, par.default]
p.split <- strsplit(p.raw, " ")
p.names <- sapply(p.split, function(x) gsub("=.*", "", x))
p.vals <- sapply(p.split, function(x) as.numeric(gsub(".*=", "", x)))
p.out <- p.vals[,1]
names(p.out) <- p.names[,1]
return(p.out)
}
|
3b552106672f9b94db570aa47563a33be0925840
|
136d7cc6af0c39ad06c41b559b6b09f5d2ca270e
|
/R/data.R
|
555f0bb9ccc4ad5110d6b13752fcb255c836d98d
|
[
"MIT"
] |
permissive
|
sempwn/cr0eso
|
40383e9ecd76f804050a06c7183d15e7839d5136
|
9e0316526fbf74654534aca40b6c2b49ccffb823
|
refs/heads/master
| 2023-04-18T05:07:51.960035
| 2022-08-17T21:44:56
| 2022-08-17T21:44:56
| 325,662,500
| 0
| 2
|
MIT
| 2021-09-17T19:34:34
| 2020-12-30T22:42:04
|
R
|
UTF-8
|
R
| false
| false
| 4,472
|
r
|
data.R
|
#' Long Term Health Care facility COVID-19 outbreaks in British Columbia, with 100 imputations of missing data
#'
#' A dataset containing 100 replicates of LTHC covid-19 outbreaks in BC, Canada, each
#' with a random imputation of missing symptom onset times.
#'
#'
#' @format A list with 100 elements, corresponding to 100 imputations of missing symptom onset data, each with 6 sub-elements
#' \describe{
#' \item{Location}{Long Term Health Care facility identifier}
#' \item{num_cases}{Total number of COVID-19 cases identified in each location's outbreak}
#' \item{time_series}{Daily symptom onset incidence in each location's outbreak e.g. time_series[[2]][3] gives the number of cases with symptom onset on the third day of Location[2]'s outbreak.}
#' \item{capacity}{Resident capacity of each location}
#' \item{reported_date}{Date of first case reported in each location's outbreak}
#' \item{case_matrix}{time_series objects combined into a matrix, where each column is a different facility outbreak and rows 1,...,n give the number of cases with symptom onset on day 1,...,n in that outbreak. Used in Stan model fitting.}
#' }
#' @source British Columbia Centre for Disease Control
"BC_LTHC_outbreaks_100Imputs"
#' Additional covariate data on Long Term Health Care facility COVID-19 outbreaks in British Columbia
#'
#' A dataset containing additional covariate data for each of the 18 Long Term Health Care
#' facility COVID-19 outbreaks in British Columbia contained in dataset
#' 'BC_LTHC_outbreaks_100Imputs'. The majority of this data was sourced from the Office of
#' the Seniors Advocate, British Columbia, Long-Term Care Facilities Quick Facts Directory.
#' https://www.seniorsadvocatebc.ca/quickfacts/location. All OSABC data is from the 2018/19
#' year. Identity of the initial case in each facility outbreak was obtained from the BCCDC.
#'
#'
#' @format A dataframe with 18 rows, corresponding to the 18 LTHC outbreaks in this dataset, and 12 columns of data concerning each outbreak:
#' \describe{
#' \item{Location}{Long Term Health Care facility identifier}
#' \item{LTHC facility type}{Funding type of each facility: Private, Private non-profit or Public, as much as was possible to conclude}
#' \item{Year facility opened}{Year in which each facility first opened to residents}
#' \item{Resident room type}{Type of accomodations available for residents: single rooms only, multi-resident rooms only, or a mix (either primarily single rooms or primarily multi-person rooms)}
#' \item{Accreditation status}{Has each facility been voluntarily accredited by Accreditation Canada (see https://www2.gov.bc.ca/gov/content/health/accessing-health-care/home-community-care/accountability/quality-and-safety)}
#' \item{Direct care hours /resident/day}{Total direct care hours (hours per resident per day), Nursing/Care Aide care + Allied health hours, in each facility}
#' \item{Average resident age \(years\)}{Average age in years of residents in each facility}
#' \item{Average resident stay \(days\)}{Average length of stay in days of residents in each facility}
#' \item{Residents dependent for daily activities (\%)}{Percent of residents in each facility who are totally dependent in their in activities of daily living}
#' \item{Number of lodged complaints 2018/19}{Number of licensing complaints lodged in each facility during the 2018/19 year.}
#' \item{Number of disease outbreaks 2018/19}{Total number of disease outbreak or occurrence incidents recorded in each facility during the 2018/19 year}
#' \item{Identity of initial COVID-19 case}{Identity (staff/worker or resident/patient) of the initial COVID-19 case in each facility, as determined by earliest recorded date of symptom onset.}
#' }
#' @source Office of the Seniors Advocate, British Columbia \url{https://www.seniorsadvocatebc.ca/quickfacts/location} and British Columbia Centre for Disease Control
"BC_OSABC_facilitydata"
#' BC outbreak data posterior
#'
#' Object created from `seir_model_fit` for BC data with fixed intervention.
#' For more details see the BC model fitting vignette:
#' \code{vignette("BC_outbreak_data_fitting", package = "cr0eso")}
"bc_fit"
#' BC outbreak data hierarchical intervention posterior
#'
#' Object created from `seir_model_fit` for BC data with hierarchical intervention.
#' For more details see the BC model fitting vignette:
#' \code{vignette("BC_outbreak_data_fitting", package = "cr0eso")}
"bc_fit_zeta"
|
02651a0208e5ea53ce55f11747a6c480ad05ea69
|
c7db766c230fe59d8186d6fd1038e2cfe2464eaa
|
/man/ExoLVsCor.Rd
|
605fcfca787761e685a36915e545f5bbec5d3ec2
|
[] |
no_license
|
cran/dgmb
|
3c751847db4eba84104a6161565df0723bc3998e
|
886b56b03deb6697706329b0a3edc7f7c13386a2
|
refs/heads/master
| 2016-09-05T11:55:28.454485
| 2015-10-22T15:23:16
| 2015-10-22T15:23:16
| 17,695,489
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,716
|
rd
|
ExoLVsCor.Rd
|
\name{ExoLVsCor}
\alias{ExoLVsCor}
\title{Calculating correlations between exogenous constructs}
\description{A function to calculate correlations between exogenous constructs.}
\usage{ExoLVsCor(N, n, bex, rie, y.ex, a.nle, a.ie)}
\arguments{
\item{N}{The number of data sets.}
\item{n}{The sample size of each data set.}
\item{bex}{The number of exogenous constructs.}
\item{rie}{A binary matrix specifying nonlinear and interaction effects on the endogenous construct.}
\item{y.ex}{An array with \code{N} matrices of dimension \code{n} times \code{bex} with the scores of exogenous constructs.}
\item{a.nle}{An array with \code{N} matrices of dimension \code{n} times \code{ncol(rie)} with the scores of nonlinear effects.}
\item{a.ie}{An array with \code{N} matrices of dimension \code{n} times \code{ncol(rie)} with the scores of interaction effects.}
}
\value{A list with the following components:
\item{y.ex.tot}{An array with \code{N} matrices of dimension \code{n} times \code{ncol(y.ex)+ncol(a.nle)+} \cr \code{ncol(a.ie)} with the scores of exogenous constructs (linear, nonlinear, and interaction effects).}
\item{y.ex.cor}{An array with \code{N} matrices of dimension \code{ncol(y.ex)+ncol(a.nle)+} \cr \code{ncol(a.ie)} times \code{ncol(y.ex)+ncol(a.nle)+ncol(a.ie)} with the correlations between exogenous constructs (linear, nonlinear, and interaction effects).}}
\author{Alba Martinez-Ruiz <amartine@ucsc.cl>}
\seealso{\code{\link{ErrEnLV}}, \code{\link{EnLVs}}, \code{\link{EnMVs}}, \code{\link{XexXen}}}
\examples{
N <- 500
n <- 250
\dontrun{
yexcor <- ExoLVsCor(N,n,intpar$bex,intpar$rie,yex$y.ex,nlie$a.nle,nlie$a.ie)
attributes(yexcor)}
}
|
e658bdb77db77782794406d364adef3fee5b09d8
|
46b30340db3a4f368e9ff948b540e9de4ffe3b92
|
/hbook2/hbook2.rd
|
cff6398228e448ba32ee762f9eb242970a3521ee
|
[
"LicenseRef-scancode-philippe-de-muyter"
] |
permissive
|
kaitanie/ruby-cernlib
|
499654a2975c5122e17a8db4aa7deaaddab857e9
|
2c2775fde146f97fa191d4cb80715d5c2556233b
|
refs/heads/master
| 2016-09-15T20:18:05.745659
| 2009-06-30T15:31:15
| 2009-06-30T15:31:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,353
|
rd
|
hbook2.rd
|
=begin RD
= HBook2 extension library
HBook2 is a histogram library using the CERNLIB HBOOK package.
Some histogram classes, N-tuple classes, a directory stream class and so on
are defined under CERN::HBook2 module.
= HBook2
Histogram module using the CERNLIB HBOOK package.
== module function:
--- HBook2.exist?(id)
returns true if a histogram with ID ((|id|)) exists on memory.
--- HBook2.merge(srces, dest)
reads files give with an array ((|srces|)), merges the histograms
and N-tuples, and writes them to a new file ((|dest|)).
--- HBook2.read_all(filename)
reads a file ((|filename|)) and returns each array of objects of
((<Hist1>)), ((<Hist2>)), ((<Profile>)) and ((<Ntuple>)) class.
If a file ((|filename|)) does not exist, an exception occurs.
--- HBook2.write_all(filename)
writs all histograms on memory to a file ((|filename|)).
= BasicHist
Virtual class to be a parent of histogram classes.
== super class:
* Data
== method:
--- BasicHist#+(other)
--- BasicHist#-(other)
--- BasicHist#*(other)
--- BasicHist#/(other)
performs the operation with each channel of the histogram,
and returns the result as a histogram.
The ID of the returned histogram is determined automatically,
so user usually has to specify it explicitly
with (({((<BasicHist#id=>))})) method.
--- BasicHist#copy(id[, title])
copies the histogram to a new histogram with ID ((|id|)).
If ((|title|)) is given, the title of the new histogram is set to it.
--- BasicHist#delete
deletes the histogram from memory.
If user accesses the deleted histogram, exception is raised.
--- BasicHist#deleted?
returns true if the histogram is already deleted.
--- BasicHist#entry
returns the number of entry in the histogram.
--- BasicHist#hist_id
returns the ID number of the histogram.
--- BasicHist#hist_id=(id)
sets the ID number to ((|id|)).
--- BasicHist#max
returns the maximum contents of the histogram
(except for overflow and underflow).
--- BasicHist#min
returns the minimum contents of the histogram
(except for overflow and underflow).
--- BasicHist#reset([title])
resets all channels to 0.
If ((|title|)) is given, the title is also changed.
--- BasicHist#sum
returns the sum of the contents of all channels.
(except for overflow and underflow).
--- BasicHist#title
returns the title of the histogram.
--- BasicHist#write(filename)
writes the histogram to a file ((|filename|)).
= Hist1
1-dimensional histogram class.
== super class:
* ((<BasicHist>))
== class methods:
--- Hist1.new(id, title, bin, min, max)
returns a new one-dimensional histogram with ID of ((|id|)),
title ((|title|)), equidistant bins of ((|bin|)) channels
from ((|min|)) to ((|max|)).
--- Hist1.func(id, title, bin, min, max, func)
--- Hist1.func(id, title, bin, min, max) { ... }
creates a new one-dimensional histogram with ID of ((|id|)),
title ((|title|)), equidistant bins of ((|bin|)) channels
from ((|min|)) to ((|max|)), and fills it with values obtained from
a function ((|func|)) or block.
--- Hist1.read(id, filename)
reads a one-dimensional histogram with ID of ((|id|))
from a file ((|filename|)).
If a file ((|filename|)) does not exists, exception is raised.
If a histogram with ID of ((|id|)) does not exist
in a file ((|filename|)) or is not one-dimensional,
returns (({nil})).
== methods:
--- Hist1#<<(x)
--- Hist1#fill(x[, inc])
increments by ((|inc|)) the channel containing the value ((|x|)).
The default value of ((|inc|)) is 1.
Or, in case of ((|x|)) being an array, increments each channel
by the contents of the array.
--- Hist1#[](xch)
--- Hist1#content(xch)
returns the contents of ((|xch|))-th channel,
underflow if ((|xch|)) is less than 0,
or overflow if ((|xch|)) is more than the number of channels.
--- Hist1#equiv
returns the number of equivalent events
(except for overflow and underflow).
--- Hist1#err2a
--- Hist1#unpack_err
returns an array containing the error of each channel.
--- Hist1#error(xch)
returns the error of ((|xch|))-th channel.
--- Hist1#fit(par, func[, option])
--- Hist1#fit(par[, option]) { ... }
fits the histogram with a function ((|func|)) or block,
and returns the obtained fitting parameters, their errors
and chi-squared value.
Initial parameters must be given as the first argument ((|par|)).
The fitting function ((|func|)) is a Proc object or
either of the string below:
* "G" Gaussian
* "E" exponential
* "P((|n|))" ((|n|)) polynomial
--- Hist1#mean
returns mean value of the histogram
(except for overflow and underflow).
--- Hist1#overflow
returns the entry of overflow of the histogram.
--- Hist1#pack(ary)
replaces the content of each channel from the contents
of an array ((|ary|)).
--- Hist1#pack_err(ary)
replaces the error of each channel from the contents
of an array ((|ary|)).
--- Hist1#random
returns random number distributed according to the contents
of the histogram.
--- Hist1#sigma
returns the standard deviation of the histogram
(except for overflow and underflow).
--- Hist1#to_a
--- Hist1#unpack
returns an array containing the content of each channel.
--- Hist1#underflow
returns the entry of underflow of the histogram.
= Hist2
2-dimensional histogram class.
== super class:
* ((<BasicHist>))
== class methods:
--- Hist2.new(id, title, xbin, xmin, xmax, ybin, ymin, ymax)
returns a new 2-dimensional histogram with ID of ((|id|)),
title ((|title|)), equidistant bins of ((|xbin|)) channels
from ((|xmin|)) to ((|xmax|)) for X-axis,
and equidistant bins of ((|ybin|)) channels
from ((|ymin|)) to ((|ymax|)) for Y-axis.
--- Hist2.read(id, filename)
reads a 2-dimensional histogram with ID of ((|id|))
from a file ((|filename|)).
If a file ((|filename|)) does not exists, exception is raised.
If a histogram with ID of ((|id|)) does not exist
in a file ((|filename|)) or is not 2-dimensional,
returns (({nil})).
== methods:
--- Hist2#[](xch, ych)
--- Hist2#content(xch, ych)
returns the content of the specified bin, ((|xch|))-th for X
and ((|ych|))-th for Y-axis,
underflow if ((|xch|)) or ((|ych|)) is less then 0,
or overflow if ((|xch|)) or ((|ych|)) is more than the number
of channels.
--- Hist2#book_xproj
books a projection of the histogram onto X.
It does NOT be filled at the time it is booked.
When the 2-dimensional histogram is filled,
its projections are also filled automatically.
--- Hist2#book_yproj
books a projection of the histogram onto Y.
--- Hist2#book_xband(min, max)
books a projection of the histogram onto X,
restricted to the Y interval (((|min|)), ((|max|))).
--- Hist2#book_yband(min, max)
books a projection of the histogram onto Y,
restricted to the X interval (((|min|)), ((|max|))).
--- Hist2#book_xslice(num)
books slices along Y of the histogram as ((|num|)) 1-dimensional
histograms.
--- Hist2#book_yslice(num)
books slices along X of the histogram as ((|num|)) 1-dimensional
histograms.
--- Hist2#err2a([option[, num]])
--- Hist2#unpack_err([option[, num]])
In case ((|option|)) is (({"HIST"})) or not given,
returns a 2-dimensional array containing the error of each channel
of the 2-dimensional histogram.
In case (({"PROX"})), (({"PROY"})), (({"BANX"})),
(({"BANY"})), (({"SLIX"})) or (({"SLIY"})) is specified,
returns an array of containing the error of projection onto X or Y,
band, or slice, respectively.
For (({hist2})) being an instance of ((<Hist2>)) class,
(({hist2.err2a[2][4]})) gives the same result as (but is slower than)
(({hist2.error(5, 3)})).
--- Hist2#error(xch, ych)
returns the error of the specified bin, ((|xch|))-th for X
and ((|ych|))-th for Y-axis,
--- Hist2#fill(x, y[, inc])
increments by ((|inc|)) the channel containing the pair of value
(((|x|)), ((|y|))).
The default value of ((|inc|)) is 1.
--- Hist2#fill(ary)
increments each channel by the contents of a 2-dimensional array
((|ary|)).
--- Hist2#pack(ary)
replaces the content of each channel from the contents
of a 2-dimensional array ((|ary|)).
--- Hist2#pack_err(ary)
replaces the error of each channel from the contents
of a 2-dimensional array ((|ary|)).
--- Hist2#random
returns a pair of random numbers distributed according to the contents
of the histogram.
--- Hist2#to_a([option[, num]])
--- Hist2#unpack([option[, num]])
In case ((|option|)) is (({"HIST"})) or not given,
returns a 2-dimensional array containing the content of each channel
of the 2-dimensional histogram.
In case (({"PROX"})), (({"PROY"})), (({"BANX"})),
(({"BANY"})), (({"SLIX"})) or (({"SLIY"})) is specified,
returns an array of containing the content of projection onto X or Y,
band, or slice, respectively.
For (({hist2})) being an instance of ((<Hist2>)) class,
(({hist2.to_a[2][4]})) gives the same result as (but is slower than)
(({hist2.contents(5, 3)})).
--- Hist2#xproj(id, title)
returns a projection onto X of the 2-dimensional histogram.
Different from (({((<Hist2#book_xproj>))})),
books a new 1-dimensional histogram with ID of ((|id|)),
title ((|title|)), and fills it with the contents of
the 2-dimensional histogram.
--- Hist2#yproj(id, title)
returns a projection onto Y of the 2-dimensional histogram.
= Profile
Profile histogram class.
== super class:
* ((<BasicHist>))
== class methods:
--- Profile.new(id, title, xbin, xmin, xmax, ymin, ymax[, option])
returns a new profile histogram with ID of ((|id|)),
title ((|title|)), equidistant bins of ((|xbin|)) channels
from ((|xmin|)) to ((|xmax|)) for X-axis,
restricted to Y from ((|ymin|)) to ((|ymax|)).
--- Profile.read(id, filename)
reads a profile histogram with ID of ((|id|))
from a file ((|filename|)).
If a file ((|filename|)) does not exists, exception is raised.
If a histogram with ID of ((|id|)) does not exist
in a file ((|filename|)) or is not a profile histogram,
returns (({nil})).
--- Profile.new(id, title, xbin, xmin, xmax, ymin, ymax[, option])
--- Profile.read(id, filename)
== methods:
--- Profile#[](xch)
--- Profile#content(xch)
returns the content of ((|xch|))-th channel.
--- Profile#err2a
--- Profile#unpack_err
returns an array containing the error of each channel.
--- Profile#error(xch)
returns the error of ((|xch|))-th channel.
--- Profile#fill(x, y[, inc])
increments by ((|inc|)) the channel containing the pair of value
(((|x|)), ((|y|))).
The default value of ((|inc|)) is 1.
--- Profile#fit(par, func[, option])
--- Profile#fit(par[, option]) { ... }
fits the histogram with a function ((|func|)) or block,
and returns the obtained fitting parameters, their errors
and chi-squared value.
Initial parameters must be given as the first argument ((|par|)).
The fitting function ((|func|)) is a Proc object or
either of the string below:
* "G" Gaussian
* "E" exponential
* "P((|n|))" ((|n|)) polynomial
--- Profile#to_a
--- Profile#unpack
returns an array containing the content of each channel.
= Ntuple
Virtual class to be a parent of Row-Wise N-tuple and Column-Wise N-tuple.
== super class:
* Data
== including module:
* Enumerable
== methods:
--- Ntuple#delete
deletes the N-tuple from memory.
If user accesses the deleted N-tuple, an exception is raised.
--- Ntuple#deleted?
returns true if the N-tuple is already deleted.
--- Ntuple#entry
--- Ntuple#length
--- Ntuple#size
returns the number of events in the N-tuple.
--- Ntuple#ntuple_id
returns the ID number of the N-tuple.
--- Ntuple#reset([title])
resets the N-tuple.
If ((|title|)) is given, the title is also changed.
--- Ntuple#title
returns the title of the N-tuple.
= RWNtuple
Row-Wise N-tuple class.
== super class:
* ((<Ntuple>))
== class methods:
--- RWNtuple.new(id, title, dir, memory, member1, ...)
returns a new Row-Wise N-tuple with ID of ((|id|)), title ((|title|)),
booked on a directory ((|dir|)).
== methods:
--- RWNtuple#[](event)
--- RWNtuple#get_event(event)
returns a hash containing ((|event|))-th event.
--- RWNtuple#<<(data)
--- RWNtuple#fill(data)
fills the N-tuple with an event data ((|data|)), given as a hash.
--- RWNtuple#copy(id[, title])
returns a new N-tuple with ID of ((|id|)), having same member
as the N-tuple.
The event entries are NOT copied.
If ((|title|)) is given, the title of the new N-tuple is set to it.
--- RWNtuple#each { |var| ... }
evaluates the block with each event data of the N-tuple.
--- RWNtuple#fill_hist1(member, hist[, from, to])
--- RWNtuple#fill_hist1(member, hist[, from, to]) { ... }
fills a 1-dimensional-histogram ((|hist|)) with the values of
member ((|member|)) from ((|from|))-th to ((|to|))-th event
of the N-tuple.
When a block is given, it is used as a weight function.
This method is much faster than (({((<Ntuple#projection>))})).
--- RWNtuple#fill_hist2(xmember, ymember, hist[, from, to])
--- RWNtuple#fill_hist2(xmember, ymember, hist[, from, to]) { ... }
fills a 2-dimensional-histogram ((|hist|)) with pairs of
the values of member ((|xmember|)) and ((|ymember|))
from ((|from|))-th to ((|to|))-th event of the N-tuple.
When a block is given, it is used as a weight function.
--- RWNtuple#max(member)
returns the maximum value of the member ((|member|)).
--- RWNtuple#members
returns an array containing the name of each member of the N-tuple.
--- RWNtuple#min(member)
returns the minimum value of the member ((|member|)).
--- RWNtuple#projection(hist) { ... }
evaluates the block with hashes containing the event in the N-tuple,
and fills a histogram ((|hist|)) with the results.
To fill a 2-dimensional histogram or a profile,
each call of the block must return an array containing (({[ x, y ]})).
If the block returns (({nil})), the event is not filled.
= CWNtuple
Column-Wise N-tuple class.
It is used with event block class ((<CWNBlock>)), for example,
rzfile = CERN::HBook2::RZFile.open("test.rz", "topdir", "N")
cwntuple = CERN::HBook2::CWNtuple.new(10, "CWNtuple", rzfile)
EventBlk = CERN::HBook2::CWNBlock.new("EventBlk", "x:r", "y:r", "n:i")
cwntuple.set_block EventBlk
block = EventBlk.new
block.x = 1.5
block.y = 3.6
block.n = 100
cwntuple << block
or
rzfile = CERN::HBook2::RZFile.open("test.rz", "topdir", "N")
cwntuple = CERN::HBook2::CWNtuple.new(10, "CWNtuple", rzfile)
EventBlk = CERN::HBook2::CWNBlock.new("EventBlk", "x:r", "y:r", "n:i")
cwntuple.set_block EventBlk
EventBlk.x = 1.5
EventBlk.y = 3.6
EventBlk.z = 100
cwntuple.fill_all
== super class:
* ((<Ntuple>))
== class methods:
--- CWNtuple.new(id, title, dir)
returns a new Column-Wise N-tuple with ID of ((|id|)), title ((|title|)),
booked on a directory ((|dir|)).
== methods:
--- CWNtuple#[](event)
--- CWNtuple#get_event(event[, block])
returns an array containing the event blocks of ((|event|))-th event.
If event block class ((|block|)) is given,
returns only the event block.
--- CWNtuple#<<(data)
--- CWNtuple#fill(data)
fills the N-tuple with an event block ((|data|)).
--- CWNtuple#each { ... }
evaluates the block with an array containing the event blocks of
each event of the N-tuple.
--- CWNtuple#fill_all
fills the N-tuple with all data packed in the event blocks
set to the N-tuple.
--- CWNtuple#blocks
returns an array containing the event block classes set to the N-tuple.
--- CWNtuple#set_block(block)
sets an event block class ((|block|)) to the N-tuple.
= CWNBlock
Struct (block) class for events to fill Column-wise N-tuple.
It creates a new subclass by the ((<CWNBlock.new>)) method
like the builltin Struct class.
== super class:
* Data
== including module:
* Enumerable
== class methods:
--- CWNBlock.new(name, member1, member2, ...)
returns a new event block class with a name of ((|name|)).
== methods:
--- CWNBlock#members
returns an array containing the names of the members.
--- CWNBlock#each { ... }
evaluates the block with the value of each member.
--- CWNBlock#length
--- CWNBlock#size
returns the number of the members.
--- CWNBlock#to_a
--- CWNBlock#values
returns an array containing the value of each member.
= RZDir
HBOOK directory stream class.
== super class:
* Data
== including module:
* Enumerable
== class methods:
--- RZDir.new([dir])
--- RZDir.open([dir])
--- RZDir.open([dir]) { |rzdir| ... }
opens a directory stream of the directory ((|dir|)).
The default directory is the current working directory.
open() can be called with a block.
If a block is given, evaluates the block with the directory stream,
and returns the result.
The directory stream is closed automatically.
--- RZDir.chdir(dir)
sets the current working directory to ((|dir|)).
--- RZDir.delete(dir)
--- RZDir.rmdir(dir)
removes the directory ((|dir|)).
--- RZDir.entries(dir)
returns an array containing all histograms and N-tuples
on the directory ((|dir|)).
--- RZDir.expand_path(dir)
returns the full path name of the directory ((|dir|)).
--- RZDir.foreach(dir) { |item| ... }
evaluates the block with each content of the directory ((|dir|)).
--- RZDir.list([dir])
prints a list of contents of the directory ((|dir|)) to stdout.
The default directory if the current working directory.
--- RZDir.getwd
--- RZDir.pwd
returns the full path name of the current working directory.
--- RZDir.mkdir(dir)
create a new directory ((|dir|)), and returns its directory stream.
== methods:
--- RZDir#[](id)
returns a histogram or a N-tuple with ID of ((|id|)) on the directory.
--- RZDir#<<(hist)
--- RZDir#write(hist)
write a histogram or a N-tuple ((|hist|)) to the directory.
The directory must be on a file.
--- RZDir#close
closes the directory stream.
If user accesses the closed directory stream, an exception is raised.
--- RZDir#closed?
returns true if the directory stream is already closed.
--- RZDir#each { |item| ... }
evaluates the block with each content of the directory.
--- RZDir#read
returns the next content in the directory stream.
--- RZDir#rewind
reset the reading position of the directory stream to the head.
--- RZDir#to_s
returns the path of the directory.
--- RZDir#write_all
writes all histograms and N-tuples on memory to the directory.
The directory must be on a file.
= RZFile
RZ file class.
== super class:
* ((<RZDir>))
== class methods:
--- RZFile.new(filename[, topdir[, option]])
--- RZFile.open(filename[, topdir[, option]])
--- RZFile.open(filename[, topdir[, option]]) { |rzfile| ... }
opens a RZ file ((|filename|)), mounts it as the directory ((|topdir|)),
and returns its directory stream.
If ((|topdir|)) is not given,
it is determined automatically as 'lun1', 'lun2', and so on.
open() can be called with a block.
If a block is given, evaluates the block with the directory stream
of the opened RZ file, and returns the result.
The RZ file is closed automatically.
== methods:
--- RZFile#close
closes the RZ file.
If user accesses the closed RZ file, an exception is raised.
--- RZFile#filename
returns the file name.
--- RZFile#lun
returns the logical unit number of the file.
--- RZFile#option
returns the option specified when the RZ file is opened.
--- RZFile#topname
returns the directory name of the mount point.
=end
|
1d793517bb777a6ea81fe5b4f83f9a0587621fd2
|
3b234546e5d374714f25c755608fdfddaeade31e
|
/Similarity.r
|
12cdd5314b9484a670dd212009b2fa25f1f039ec
|
[] |
no_license
|
Tbhangale/Similarity-News-Articles
|
13b18ece12df230e442f15c31e9f3eaa0f94974f
|
a138c70cb71f2a9a3c4959f41919c3a2d2196a26
|
refs/heads/master
| 2020-04-03T03:35:22.240081
| 2018-10-27T17:34:15
| 2018-10-27T17:34:15
| 154,990,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,997
|
r
|
Similarity.r
|
#==============================================
#load these libraries
install.packages('tm')
require('tm')
require('SnowballC')
#data pre-processing
all <- Corpus(DirSource("D:/Sem 2/Temporal and spatial data/20news-18828", +
encoding = "UTF-8", recursive=TRUE), +
readerControl=list(reader=readPlain,language="en"))
all.p <- tm_map (all, removePunctuation)
all.p <- tm_map (all.p , content_transformer(tolower))
all.p <- tm_map(all.p, content_transformer(removeWords),stopwords("english"))
all.p <- tm_map(all.p, content_transformer(removeWords),stopwords("SMART"))
all.p <- tm_map (all.p, stemDocument)
all.p <- tm_map (all.p, removeNumbers)
all.p <- tm_map (all.p, stripWhitespace)
#turn into Document-Term Matrix
dtm <- DocumentTermMatrix (all.p)
#remove sparse terms
dtm <- removeSparseTerms(dtm, 0.99)
#conversion to data frame
library(tidytext)
DF <- tidy(dtm)
#======================================================
#Feature Selection
#find top 100 most frequent words
#convert document-term matrix to matrix
m <- as.matrix(dtm)
#convert matrix to data frame
dataframe_m = tidy(m)
#sort matrix by sum of frequencies
sortedMatrix <- sort(colSums(m), decreasing=TRUE)
#select top 100 words from sorted matrix
sorted100 <- head(sortedMatrix, 100)
#=======================================================
#find similarities
install.packages('proxy')
require('proxy')
#Sample 1000 docs
#-----------------
samplem <- head(as.matrix(dtm),1000)
dataframe_samplem = tidy(samplem)
#Cosine similarity
#-------------------
m_cosdis <- dist(samplem, method="cosine")
#jaccard similarity
#-------------------
m_jacdis <- dist(samplem, method="Jaccard")
#Eucleadian similarity
#---------------------
m_eucdis <- dist(samplem, method="euclidean")
#=========================================================
#plot frequency
install.packages('reshape2')
require('reshape2')
install.packages('ggplot2')
require('ggplot2')
dfplot <- as.data.frame(melt(sortedMatrix))
dfplot$word <- dimnames(dfplot)[[1]]
dfplot$word <- factor(dfplot$word,
levels=dfplot$word[order(dfplot$value,
decreasing=TRUE)])
dfplot$word <- as.numeric(dfplot$word)
fig <- ggplot(dfplot, aes(x=word, y=value)) +
geom_bar(stat="identity", fill="grey", colour="darkred")
fig <- fig + xlab("Term Ranking")
fig <- fig + ylab("Term Frequency")
print(fig)
#=================================================================
#heatmap generation ggplot
ggplot(data = melt(as.matrix(m_cosdis)), aes(x=Var1, y=Var2, fill=value))+
geom_tile() + ggtitle("Cosine Similarity Matrix")
ggplot(data = melt(as.matrix(m_jacdis)), aes(x=Var1, y=Var2, fill=value)) +
geom_tile()+ ggtitle("Jaccard Similarity Matrix")
ggplot(data = melt(as.matrix(m_eucdis)), aes(x=Var1, y=Var2, fill=value)) +
geom_tile()+ ggtitle("Euclidean Similarity Matrix")
#========================================================
#Analyze the correlations (degree of similarity)
Cosine_Euclidean = cor(m_eucdis,m_cosdis,method = "pearson")
Euclidean_Jarcard = cor(m_jacdis,m_eucdis,method = "pearson")
Jarcard_Cosine = cor(m_cosdis,m_jacdis,method = "pearson")
#linear regression
cos_vector <- as.vector(head(m_cosdis,500))
euc_vector <- as.vector(head(m_eucdis,500))
jac_vector <- as.vector(head(m_jacdis,500))
fit1 <- lm(formula=cos_vector ~ euc_vector, data=dataframe_m)
fit2 <- lm(formula=euc_vector ~ jac_vector, data=dataframe_m)
fit3 <- lm(formula=jac_vector ~ cos_vector, data=dataframe_m)
scatter.smooth(x=euc_vector, y=cos_vector)
scatter.smooth(x=jac_vector, y=euc_vector)
scatter.smooth(x=cos_vector, y=jac_vector)
#=======================================================
#Standard Deviation
install.packages('plotly')
require('plotly')
data_sd<-as.matrix(m)
euc_sd<-vector()
cos_sd<-vector()
jac_sd<-vector()
i<-0
dp<-c(0,10,20,50,100,500)
for(j in dp)
{
data_selected<- data_sd[500:1500,1:j]
ed_temp<-dist(data_selected,method = "euclidean")
cd_temp<-dist(data_selected,method = "cosine")
jd_temp<-dist(data_selected,method = "jaccard")
eudd<-sd(ed_temp,na.rm = FALSE)
euc_sd<-c(euc_sd,eudd)
coss<-sd(cd_temp,na.rm = FALSE)
cos_sd<-c(cos_sd,coss)
jacc<-sd(jd_temp,na.rm = FALSE)
jac_sd<-c(jac_sd,jacc)
i<-i+1
}
plot(dp,euc_sd,type="l",ylim=c(-2,15), col="green",xlab = "Features",ylab = "Standard Deviation of Similarity Scores")
lines(dp,cos_sd,type="o",col="red",ylim=c(-2,15))
points(dp,jac_sd,col="blue",ylim=c(-2,15))
#=======================================================
#Rank article pairs
jac_melt <- melt(as.matrix(m_jacdis))
tail(jac_melt[order(jac_melt$value),])
euc_melt <- melt(as.matrix(m_eucdis))
tail(euc_melt[order(euc_melt$value),])
cos_melt <- melt(as.matrix(m_cosdis))
tail(cos_melt[order(cos_melt$value),])
|
8700706baf84aa8ad006c3ea41fefb81c925e059
|
88dde69ede665f2bc9f606781d3de00562e8b3ed
|
/man/create.poset.Rd
|
5357aab370e6790cc401cfabd9fde6982664aaf7
|
[] |
no_license
|
vishalbelsare/pim
|
64ebfb621d618e7e85457c9d41c83f7b6251d6d1
|
26545a0cbcc668c0015f6289e634837548d442b2
|
refs/heads/master
| 2023-06-09T12:34:55.536755
| 2020-02-03T17:17:01
| 2020-02-03T17:17:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 840
|
rd
|
create.poset.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create.poset.R
\name{create.poset}
\alias{create.poset}
\title{Create a poset}
\usage{
create.poset(compare = c("unique", "all"), n)
}
\arguments{
\item{compare}{a character value, either 'unique' or 'all'}
\item{n}{an single integer value indicating how many observations there
are in the model.}
}
\value{
A named list with 2 elements, called "L" and "R", containing
the selection indices for the left hand and right hand side of a pim.
}
\description{
This function creates a poset for use in a pim model based on a
number of observations and a comparison type. This function is
called from \code{\link{new.pim.poset}} and returns a list that
can be used as a value for its argument \code{compare}.
}
\examples{
create.poset(n=10)
create.poset('all',n=4)
}
|
1b8e52378d287fd153cba770fcb5b269e15d0d59
|
f6f3e0e473e8c12fa977f36e9bca348b66adc51f
|
/plot2.R
|
ec9205d64bd495b1fc4acb24025ceffa5a90fd8f
|
[] |
no_license
|
shuoyenl/ExData_Plotting1
|
f16809fa5f7373fbc54d37dcb7e5ca89a6850fc8
|
1cc7ed90b530ec2f6f790132576e1e0b3788a372
|
refs/heads/master
| 2020-05-29T11:45:32.510419
| 2015-04-11T22:32:23
| 2015-04-11T22:32:23
| 33,718,840
| 0
| 0
| null | 2015-04-10T08:58:37
| 2015-04-10T08:58:37
| null |
UTF-8
|
R
| false
| false
| 705
|
r
|
plot2.R
|
if (!file.exists("household_power_consumption.txt"))
{
unzip("exdata-data-household_power_consumption.zip")
}
partialData <- read.table("household_power_consumption.txt", sep=";", header=TRUE, na.strings=c("?"), nrows = 100)
classes <- sapply(partialData, class)
allData <- read.table("household_power_consumption.txt", sep=";", header=TRUE, na.strings=c("?"), colClasses=classes, skip = 0)
sampleData <- allData[allData$Date == '1/2/2007' | allData$Date == '2/2/2007',]
png(filename='plot2.png', width = 480, height = 480)
plot(sampleData$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)", xaxt='n')
axis(side=1, at=c(0, 1440, 2880), lab=c("Thu","Fri", "Sat"))
dev.off()
|
4f799d272e73317431041ad24fa5ad6045dfd70c
|
b99c39e52d62173235c1351682735deddba6ace5
|
/MAPR_integrated_population_model.r
|
f361d3738d9bda1368ef203455520e47a81872c3
|
[] |
no_license
|
steffenoppel/MAPR
|
6c80d333a4ff9e4a0ae78a73218683b8c6b05be4
|
c6bd16a53021b85dfae416e17d4b08011db969f5
|
refs/heads/master
| 2023-08-11T09:11:15.463363
| 2021-09-30T18:46:04
| 2021-09-30T18:46:04
| 305,317,914
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,472
|
r
|
MAPR_integrated_population_model.r
|
##########################################################################
#
# MACGILLIVRAY PRION POPULATION MODEL FOR GOUGH ISLAND
#
##########################################################################
# population model adapted from Jiguet, F., Robert, A., Micol, T. and Barbraud, C. (2007), Quantifying stochastic and deterministic threats to island seabirds: last endemic prions face extinction from falcon peregrinations. Animal Conservation, 10: 245-253. doi:10.1111/j.1469-1795.2007.00100.x
# implemented in JAGS based on Kery and Schaub 2012
# written by Steffen.oppel@rspb.org.uk in May 2020
# revised in June 2020: start projection in 1955 with 5 million pairs, 2001 had 1 million pairs, project to 2050 (100 years)
library(tidyverse)
library(jagsUI)
library(data.table)
library(lubridate)
library(popbio)
filter<-dplyr::filter
select<-dplyr::select
#########################################################################
# 1. LOAD AND PREPARE DATA FOR ADULT ANNUAL SURVIVAL ESTIMATION
#########################################################################
##### LOAD FORMATTED RINGING DATA ###########
setwd("C:/STEFFEN/RSPB/UKOT/Gough/ANALYSIS/SeabirdSurvival")
## run the RODBC import of CMR data in a 32-bit version of R
system(paste0("C:/PROGRA~1/R/R-35~1.1/bin/i386/Rscript.exe ", shQuote("C:\\STEFFEN\\RSPB\\UKOT\\Gough\\ANALYSIS\\SeabirdSurvival\\RODBC_CMR_import.R")), wait = TRUE, invisible = FALSE, intern = T)
try(setwd("C:\\STEFFEN\\RSPB\\UKOT\\Gough\\ANALYSIS\\SeabirdSurvival"), silent=T)
load("GOUGH_seabird_CMR_data.RData")
###### FILTER DATA FROM RAW CONTACTS ########
contacts<-contacts %>% filter(SpeciesCode %in% c("MAPR","BBPR","PRIO")) %>% filter(Location=="Prion Cave")
head(contacts) ## seabird count data
EncHist<-contacts %>% group_by(BirdID,Contact_Year) %>%
summarise(n=length(Date_Time)) %>%
spread(key=Contact_Year,value=n,fill=0)
#### FORMAT FOR SIMPLE CJS MODEL ############
CH<-as.matrix(EncHist[,2:ncol(EncHist)], dimnames=F)
CH<-ifelse(CH>0,1,0)
head(CH)
# ELIMINATE TRANSIENTS ONLY OBSERVED IN A SINGLE YEAR
del <- apply(CH[,1:ncol(CH)], 1, sum)
dim(CH)
rCH<-CH[!(del==1),]
dim(rCH)
# Compute vector with occasion of first capture
get.first <- function(x) min(which(x==1))
f <- apply(rCH, 1, get.first)
#########################################################################
# 2. LOAD AND PREPARE DATA FOR BREEDING SUCCESS SUMMARY
#########################################################################
## run the RODBC import in a 32-bit version of R
system(paste0("C:/PROGRA~1/R/R-35~1.1/bin/i386/Rscript.exe ", shQuote("C:\\STEFFEN\\RSPB\\UKOT\\Gough\\ANALYSIS\\SeabirdBreedingSuccess\\RODBC_nest_import.r")), wait = TRUE, invisible = FALSE, intern=T)
#system(paste0(Sys.getenv("R_HOME"), "/bin/i386/Rscript.exe ", shQuote("C:\\Users\\Gough Conservation\\Documents\\Gough Birders\\2018-2019\\12.Monthly reports 2018-19\\RODBC_imports.r")), wait = FALSE, invisible = FALSE)
try(setwd("C:\\STEFFEN\\RSPB\\UKOT\\Gough\\ANALYSIS\\SeabirdBreedingSuccess"), silent=T)
#try(setwd("C:\\Users\\Gough Conservation\\Documents\\Gough Birders\\2018-2019\\12.Monthly reports 2018-19"), silent=T)
load("GOUGH_nest_data.RData")
head(nestsDB) ## nest data
head(visDB) ## nest visit data
## SELECT DATA FOR TARGET SPECIES AND SUMMARISE NEST SUCCESS ####
head(nestsDB)
succ<-nestsDB %>% filter(Species=="MAPR") %>% filter(Year>2013) %>%
mutate(count=1) %>%
group_by(Species,Year) %>%
summarise(R=sum(count),J=sum(SUCCESS))
#########################################################################
# 3. SET UP AND RUN INTEGRATED POPULATION MODEL
#########################################################################
# Bundle data
jags.data <- list(## survival
y = rCH,
f = f,
n.occasions = dim(rCH)[2],
nind = dim(rCH)[1],
mean.juv.surv.prop= 0.728/0.894, ## juvenile psurvival based on proportion of adult survival from Jiguet 2007
## fecundity
R =succ$R,
J=succ$J,
T.fec=length(succ$J),
## population process
PROJ=100,
POP.SIZE=4500000
)
# Initial values
inits <- function(){list(phi = runif(1, 0.7, 1),
pp = runif(1, 0, 1))}
# Parameters monitored
parameters <- c("mean.fec","full.fec","juv.surv","phi","p","growth.rate","lambda","Ntot.breed")
# MCMC settings
ni <- 10000
nt <- 2
nb <- 5000
nc <- 4
# Call JAGS from R (model created below)
MAPR_IPM <- autojags(jags.data, inits, parameters, "C:\\STEFFEN\\RSPB\\UKOT\\Gough\\ANALYSIS\\PopulationModel\\MAPR\\MAPR_IPM_v2.jags",
n.chains = nc, n.thin = nt, n.burnin = nb,parallel=T) # n.iter = ni,
#########################################################################
# 4. SUMMARISE OUTPUT AND PLOT POPULATION TRAJECTORY
#########################################################################
## compile output
out<-as.data.frame(MAPR_IPM$summary)
out$parameter<-row.names(MAPR_IPM$summary)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# CREATE OUTPUT TABLE FOR REPORT /MANUSCRIPT
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
head(out)
TABLE1<-out %>% filter(parameter %in% c('mean.fec','full.fec','phi','juv.surv','growth.rate[1]','growth.rate[2]')) %>%
select(parameter,c(5,3,7))
names(TABLE1)<-c("Parameter","Median","lowerCL","upperCL")
TABLE1$Parameter<-c("current fecundity","mouse-free fecundity","first year survival","adult survival","population growth rate (no eradication)","population growth rate (with eradication)")
TABLE1
#fwrite(TABLE1,"MAPR_demographic_parameter_estimates_v1.csv")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# GRAPH 1: POPULATION TRAJECTORY UNDER BOTH SCENARIOS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## retrieve the past population estimates (2006-2019)
MAPRpop<-out[(grep("Ntot.breed\\[",out$parameter)),c(12,5,3,7)] %>%
mutate(Year=rep(seq(1956,2055),2)) %>%
mutate(scenario=as.numeric(str_extract_all(parameter,"\\(?[0-9]+\\)?", simplify=TRUE)[,2])) %>%
mutate(Scenario=ifelse(scenario==1,"no eradication","with eradication")) %>%
filter(!(Scenario=="with eradication" & Year<2025)) %>%
#filter((Scenario=="with eradication")) %>%
rename(parm=parameter,median=`50%`,lcl=`2.5%`,ucl=`97.5%`) %>%
dplyr::select(parm,Scenario,Year,median,lcl,ucl)
### CREATE PLOT FOR BASELINE TRAJECTORY
MAPRpop$ucl[MAPRpop$ucl>6000000]<-6000000
ggplot()+
geom_line(data=MAPRpop, aes(x=Year, y=median, color=Scenario), size=1)+
geom_ribbon(data=MAPRpop,aes(x=Year, ymin=lcl,ymax=ucl, fill=Scenario),alpha=0.2)+
## format axis ticks
scale_y_continuous(name="N MacGillivray's Prion on Gough (1000s)", limits=c(0,6000000),breaks=seq(0,6000000,500000),labels=seq(0,6000,500))+
scale_x_continuous(name="Year", limits=c(1956,2056), breaks=seq(1956,2056,20), labels=as.character(seq(1956,2056,20)))+
## add count data
geom_segment(aes(x=1956, xend=1956,y=0.4*10000000,yend=0.5*10000000),lineend = "round", size=2, colour="darkblue") +
geom_segment(aes(x=2000, xend=2000,y=0.4*1500000,yend=0.5*2000000),lineend = "round", size=2, colour="darkblue") +
## beautification of the axes
theme(panel.background=element_rect(fill="white", colour="black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.text.y=element_text(size=18, color="black"),
axis.text.x=element_text(size=14, color="black"),
axis.title=element_text(size=18),
legend.text=element_text(size=12, color="black"),
legend.title=element_text(size=14, color="black"),
legend.key = element_rect(fill = NA),
strip.text.x=element_text(size=18, color="black"),
strip.background=element_rect(fill="white", colour="black"))
ggsave("MAPR_population_projection_v2.jpg", width=9, height=6)
#########################################################################
# 5. Specify BASIC POPULATION MODEL WITH TWO SCENARIOS
#########################################################################
### DEMOGRAPHIC PARAMETERS
#Juvenile survival: 0.728 from Barbraud & Weimerskirch (2003), Oro et al. (2004)
#Immature survival: 0.894 fromBarbraud & Weimerskirch (2003)
#Adult survival: 0.894 from Barbraud & Weimerskirch (2003)
#Age at maturity: 4 from Warham (1990), Oro et al. (2004)
#Female breeding success: 0.519 from Nevoux & Barbraud (2005)
### Calculation of stable age distribution
### CREATING THE POPULATION MATRIX ###
seabird.matrix<-matrix(c(
0,0,0,0,0.519*0.5,
0.728,0,0,0,0,
0,0.894,0,0,0,
0,0,0.894,0,0,
0,0,0,0.894,0.894),ncol=5, byrow=T)
stable.stage(seabird.matrix)
setwd("C:\\STEFFEN\\RSPB\\UKOT\\Gough\\ANALYSIS\\PopulationModel\\MAPR")
sink("MAPR_IPM_v2.jags")
cat("
model {
#-------------------------------------------------
# integrated population model for the MacGillivray's Prion population
# - age structured model with 4 age classes
# - adult survival based on CMR ringing data
# - productivity based on Prion Cave nest monitoring data
# - simplified population process with informed prior for adults skipping breeding and uninformed immatures recruiting
# - TWO future scenarios to project population growth with and without eradication
# -------------------------------------------------
#-------------------------------------------------
# 1. PRIORS FOR ALL DATA SETS
#-------------------------------------------------
# -------------------------------------------------
# 1.1. Priors and constraints FOR FECUNDITY
# -------------------------------------------------
mean.fec ~ dunif(0,1) ## uninformative prior with upper bound from Nevoux & Barbraud (2005)
full.fec ~ dnorm(0.519,1000) ## prior for full fecundity without predation from Nevoux & Barbraud (2005) - very high precision
# -------------------------------------------------
# 1.2. Priors and constraints FOR SURVIVAL
# -------------------------------------------------
phi ~ dunif(0.7, 1)
p ~ dunif(0, 1)
juv.surv <- juv.surv.prop*phi
juv.surv.prop ~ dnorm(mean.juv.surv.prop,1000) T(0,1)
#-------------------------------------------------
# 2. LIKELIHOODS AND ECOLOGICAL STATE MODEL
#-------------------------------------------------
# -------------------------------------------------
# 2.1. System process: female based matrix model
# -------------------------------------------------
for (scen in 1:2){
fec.proj[scen]<-max(mean.fec,(scen-1)*full.fec) ## takes current fecundity for scenario 1 and full fecundity for scenario 2
### INITIAL VALUES FOR COMPONENTS FOR YEAR 1 - based on stable stage distribution from previous model
JUV[1,scen]<-max(2,round(Ntot.breed[1,scen]*0.5*(mean.fec+0.16)))
N1[1,scen]<-round(Ntot.breed[1,scen]*0.5*(mean.fec+0.17)*juv.surv)
N2[1,scen]<-round(Ntot.breed[1,scen]*0.5*(mean.fec+0.18)*juv.surv*phi)
N3[1,scen]<-round(Ntot.breed[1,scen]*0.5*(mean.fec+0.19)*juv.surv*phi*phi)
Ntot.breed[1,scen] ~ dnorm(POP.SIZE,10) # initial value of population size
for (tt in 2:66){
## THE PRE-BREEDERS ##
nestlings[tt,scen] <- round((mean.fec+0.30/tt) * 0.5 * Ntot.breed[tt,scen]) ### number of locally produced FEMALE chicks
JUV[tt,scen] ~ dpois(nestlings[tt,scen]) ### need a discrete number otherwise dbin will fail, dpois must be >0
N1[tt,scen] ~ dbin(juv.surv, max(2,round(JUV[tt-1,scen]))) ### number of 1-year old survivors
N2[tt,scen] ~ dbin(phi, max(2,round(N1[tt-1,scen]))) ### number of 2-year old survivors
N3[tt,scen] ~ dbin(phi, max(2,round(N2[tt-1,scen]))) ### number of 3-year old survivors
## THE BREEDERS ##
Ntot.breed[tt,scen] ~ dbin(phi, max(2,round(N3[tt-1,scen]+Ntot.breed[tt-1,scen]))) ### the annual number of breeding birds is the sum of old breeders and recent recruits
} # tt
for (tt in 67:PROJ){
## THE PRE-BREEDERS ##
nestlings[tt,scen] <- round(fec.proj[scen] * 0.5 * Ntot.breed[tt,scen]) ### number of locally produced FEMALE chicks
JUV[tt,scen] ~ dpois(nestlings[tt,scen]) ### need a discrete number otherwise dbin will fail, dpois must be >0
N1[tt,scen] ~ dbin(juv.surv, max(2,round(JUV[tt-1,scen]))) ### number of 1-year old survivors
N2[tt,scen] ~ dbin(phi, max(2,round(N1[tt-1,scen]))) ### number of 2-year old survivors
N3[tt,scen] ~ dbin(phi, max(2,round(N2[tt-1,scen]))) ### number of 3-year old survivors
## THE BREEDERS ##
Ntot.breed[tt,scen] ~ dbin(phi, max(2,round(N3[tt-1,scen]+Ntot.breed[tt-1,scen]))) ### the annual number of breeding birds is the sum of old breeders and recent recruits
} # tt
} # scen
# -------------------------------------------------
# 2.2. Likelihood for fecundity: Poisson regression from the number of surveyed broods
# -------------------------------------------------
for (t in 1:(T.fec)){ ### T-1 or not
J[t] ~ dpois(rho.fec[t])
rho.fec[t] <- R[t]*mean.fec
} # close loop over every year in which we have fecundity data
# -------------------------------------------------
# 2.3. Likelihood for adult and juvenile survival from CMR
# -------------------------------------------------
for (i in 1:nind){
# Define latent state at first capture
z[i,f[i]] <- 1
for (t in (f[i]+1):n.occasions){
# State process
z[i,t] ~ dbern(mu1[i,t])
mu1[i,t] <- phi * z[i,t-1]
# Observation process
y[i,t] ~ dbern(mu2[i,t])
mu2[i,t] <- p * z[i,t]
} #t
} #i
# -------------------------------------------------
# 4. DERIVED POPULATION GROWTH RATE
# -------------------------------------------------
## DERIVED POPULATION GROWTH RATE
for (scen in 1:2){
for (tt in 1:33){
lambda[tt,scen]<-Ntot.breed[tt+67,scen]/max(1,Ntot.breed[tt+66,scen])
loglam[tt,scen]<-log(lambda[tt,scen])
} ## end of tt
growth.rate[scen] <- exp((1/(33))*sum(loglam[1:(33),scen])) ### geometric mean growth rate
} ## end of scen
} ## END MODEL LOOP
",fill = TRUE)
sink()
|
aea5380a35113d8a166323fdbf2fab5f4538e9d7
|
17785aec04c48e0c3c5461eb0940d6876936c317
|
/feature_combinations_detail.r
|
b860d636107690b7456db4b58319fa01f25c5740
|
[] |
no_license
|
sweb/kaggle_otto
|
ee113295ed10263566054304e0cf255a5e6dc9b8
|
858f267c7e2d37025709be9e1a83e4a500ee05a6
|
refs/heads/master
| 2021-01-15T17:46:54.004347
| 2015-05-10T19:13:29
| 2015-05-10T19:13:29
| 33,920,798
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,153
|
r
|
feature_combinations_detail.r
|
setwd("C:/dev/repositories/R/kaggle_otto")
source("init_ws.r")
class2_accus <- read.csv("observations/feature_combinations.csv") %>% select(-X)
require(tidyr)
vis_class2_accus <- class2_accus %>%
filter (validation > 0.845) %>%
mutate(feat = paste("feat", feat, sep="_")) %>%
gather("type", "accuracy", 2:3)
ggplot(vis_class2_accus, aes(x=feat, y=accuracy, fill=type)) +
theme_classic() +
geom_bar(stat="identity", position="dodge")
ggplot(class2_accus %>% filter(validation > 0.83), aes(x=train, y=validation)) +
theme_minimal() +
geom_point() +
geom_text(aes(label = feat), hjust=1.5, size=4)
# Best three feature-combinations (until 70): 64, 70, 9
require(doParallel)
c1 <- makeCluster(2)
registerDoParallel(c1)
i <- 48
engineered.data <- normalize(addFeatureCombination(i, munged.data))
set.seed(42)
split.data <- createDataPartition(engineered.data$is_class2, p = 0.6, list = FALSE)
t.data <- engineered.data[split.data, ]
tmp.test.data <- engineered.data[-split.data, ]
set.seed(42)
split.test <- createDataPartition(tmp.test.data$is_class2, p = 0.5, list = FALSE)
v.data <- tmp.test.data[split.test,]
ts.data <- tmp.test.data[-split.test,]
control.config <- trainControl(method = "repeatedcv", repeats = 1,
summaryFunction = twoClassSummary,
classProbs = TRUE)
logreg.tmp.train <- train(is_class2 ~ .,
data = t.data %>% select(1:93, is_class2),
method = "glm",
metric = "ROC",
trControl = control.config)
logreg.tmp.pred_train <- predict(logreg.tmp.train, t.data)
confusionMatrix(logreg.tmp.pred_train, t.data$is_class2)$overall[1]
logreg.tmp.pred <- predict(logreg.tmp.train, v.data)
confusionMatrix(logreg.tmp.pred, v.data$is_class2)$overall[1]
baseline <- cbind(train = tmp.train.acc, validation = tmp.val.acc)
d <- data.frame()
for (j in seq(1:92)) {
a <- Sys.time()
current <- j + 93
print(current)
logreg.tmp.train <- train(is_class2 ~ .,
data = t.data %>% select(1:93,current, is_class2),
method = "glm",
metric = "ROC",
trControl = control.config)
print (Sys.time())
print (Sys.time() - a)
logreg.tmp.pred_train <- predict(logreg.tmp.train, t.data %>% select(1:93,current, is_class2))
tmp.train.acc <- confusionMatrix(logreg.tmp.pred_train, t.data$is_class2)$overall[1]
logreg.tmp.pred <- predict(logreg.tmp.train, v.data %>% select(1:93,current, is_class2))
tmp.val.acc <- confusionMatrix(logreg.tmp.pred, v.data$is_class2)$overall[1]
d <- rbind(d, cbind(feat = j, train = tmp.train.acc, validation = tmp.val.acc))
print(paste("Train:", tmp.train.acc, "- Validation:", tmp.val.acc))
}
d %>% arrange(desc(validation))
d %>% arrange(desc(train))
ggplot(d, aes(x=train, y=validation)) +
theme_minimal() +
geom_point() +
geom_text(aes(label = feat), hjust=1.5, size=4)
# 181, 116, 171, 180, 122, 125, 177, 94, 124, 146, 154, 168
#76, 33, 79, 29, 88, 28, 71
#9, 70, 40, 29, 43, 54, 66, 63
|
cf61304e4f965ab474311db25093104d2160b5e4
|
dd3cac999fbb8e88a6c9a02cbf899fc83de415d9
|
/func/Task_1.5_Tokenization_func.R
|
048d9cf9ab9517e8329266b4bd7d5d2d9366c9ad
|
[
"MIT"
] |
permissive
|
rjcc/SwiftKey-Natural-language
|
4852f920ce8c9a441577e0bc6314d259c77e34d7
|
08e38c0426708936fa70db0ab1ba5a5f1a85ba61
|
refs/heads/master
| 2020-12-11T12:30:11.804690
| 2019-06-30T12:43:18
| 2019-06-30T12:43:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,685
|
r
|
Task_1.5_Tokenization_func.R
|
tokenization <- function (docs, trans = c(T,T,T,T,T,T,T,T), ChartoSpace = c('/','@','\\|'),
stopWords = 'english', ownStopWords = c(), profanity = data.frame()) {
cat(paste('\nPlease wait for initializing and summrising the input files......'))
require(tm); require(SnowballC)
cat(paste('\nDocuments below will be processed soon!\n'))
print(summary(docs))
cat(paste('\nStart tokenization processes...'))
# cat(paste('\nuse the utf8 encoding on the macintosh without the need to recompile...'))
# tm_map(docs, function(x) iconv(x, to='UTF-8-MAC', sub='byte'))
# tm_map(docs, function(x) iconv(enc2utf8(x), sub = "byte"))
# Simple Transformation
cat(paste('\n\n1.Simple Transformation:', trans[1]))
if(trans[1] == T){
toSpace <- content_transformer(function(x, pattern) gsub(pattern, " ", x))
for (i in ChartoSpace){
docs <- tm_map(docs, toSpace, i)
cat(paste('\n ->Character:', i, 'has been transformed into white space!'))
}
}
# Specific Transformations/Profanity filtering
cat(paste('\n2.Specific Transformations/Profanity filtering:', trans[8]))
if(trans[8] == T){
cat(paste('\n', nrow(profanity), 'words will be filtered, following is a sample of the words:\n'))
print(head(profanity,5))
toString <- content_transformer(function(x, from, to) gsub(from, to, x))
for (i in 1:nrow(profanity)){
# cat(paste('\n ->Transfer', profanity[i,1], 'to', profanity[i,2]))
docs <- tm_map(docs, toString, profanity[i,1], profanity[i,2])
}
cat('\n ->Specific Transformations/Profanity filtering have been done to raw document!')
}
# Lowercase
cat(paste('\n3.Lowercase Transformation:', trans[2]))
if(trans[2] == T){
docs <- tm_map(docs, content_transformer(tolower))
cat('\n ->All CAPITAL characters have been transformed to lower cases!')
}
# Remove Numbers
cat(paste('\n4.Remove Numbers:', trans[3]))
if(trans[3] == T){
docs <- tm_map(docs, removeNumbers)
cat('\n ->All NUMBERs have been eliminated from raw document!')
}
# Remove Punctuations
cat(paste('\n5.Remove Punctuations:', trans[4]))
if(trans[4] == T){
docs <- tm_map(docs, removePunctuation)
cat('\n ->All Punctuations have been eliminated from raw document!')
}
# Remove English Stop Words
cat(paste('\n6.Remove Stop Words:', trans[5]))
if(trans[5] == T){
cat(paste('\n->Remove', stopWords, 'Stop Words:'))
cat(paste('\n->Stop Words including:\n' ))
print(stopwords(stopWords))
cat(paste('\n->',length(stopwords(stopWords)), 'words in total'))
docs <- tm_map(docs, removeWords, stopwords(stopWords))
cat('\n ->Stop Words have been eliminated from raw document!')
}
# Remove Own Stop Words
cat(paste('\n7.Remove Own Stop Words:', trans[6]))
if(trans[6] == T){
cat(paste('\n ->Remove Own Stop Words:'))
cat(paste('\n ->Stop Words including:\n'))
print(ownStopWords)
cat('\n ->', paste(length(ownStopWords), 'words in total'))
docs <- tm_map(docs, removeWords, ownStopWords)
cat('\n ->Own Stop Words have been eliminated from raw document!')
}
# Strip Whitespace
cat(paste('\n8.Strip Whitespace:', trans[7]))
if(trans[7] == T){
docs <- tm_map(docs, stripWhitespace)
cat('\n ->Whitespaces have been stripped from raw document!')
}
# Complete messages
cat('\n\nDocument has been tokenized!')
cat(summary(docs))
return(docs)
}
|
e76add8659e0b2d024ba832d250009ffe870106a
|
8c353819bc833ce88ff5c1f2d27f31d40ac2b162
|
/R/SOcrs.R
|
db8edc9a8a14b93df32e55e9b46772ea83088fc1
|
[] |
no_license
|
AustralianAntarcticDivision/SOmap
|
6e1e91ec59a59be6471ce1b940c9363154f949b7
|
0297ee8aea87015e32a2d3e4f9b009d00a549d29
|
refs/heads/master
| 2023-03-12T07:42:44.477742
| 2023-03-07T02:44:57
| 2023-03-07T02:44:57
| 155,124,496
| 24
| 5
| null | 2023-02-02T22:18:42
| 2018-10-28T23:06:11
|
R
|
UTF-8
|
R
| false
| false
| 745
|
r
|
SOcrs.R
|
#' SOmap coordinate system
#'
#' Set or return the coordinate system currently in use.
#'
#' If argument `crs` is NULL, the function returns the current value (which may be `NULL`).
#' @param crs provide PROJ string to set the value
#' @export
#' @examples
#' \dontrun{
#' SOmap()
#' SOcrs()
#' }
SOcrs <- function(crs = NULL) {
if (!is.null(crs)) {
options(SOmap.crs.inuse = crs)
return(crs)
}
crs <- getOption("SOmap.crs.inuse")
if (is.null(crs)) warning("No SOmap.crs.inuse")
crs
}
SOextent <- function(extent = NULL) {
if (!is.null(extent)) {
options(SOmap.extent.inuse = extent)
return(extent)
}
extent <- getOption("SOmap.extent.inuse")
if (is.null(extent)) warning("No SOmap.extent.inuse")
extent
}
|
b895fb5b0482591dde76de36c34389b384980a7e
|
c28a77e18cb220184dd84963e8c3809969606273
|
/001-uniform-swing.R
|
81ab3bc361f2e26cff6af2bd36a909b77eaf17c7
|
[] |
no_license
|
RomanSalzwedel/BTW17_constituency_forecast
|
469bec6f0aeac832616ac14332f7d83b4803a321
|
87f35fb952445afa5d4d684a85b5c12ed665af88
|
refs/heads/master
| 2021-01-02T09:24:39.860481
| 2017-08-03T08:37:08
| 2017-08-03T08:37:08
| 99,210,323
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,488
|
r
|
001-uniform-swing.R
|
####################################
### Election Forecasting Project ###
### HU Seminar SoSe 2017 ###
####################################
### Requierements ------------------
source("packages.r")
source("functions.r")
### Data Manipulation --------------
# Import
df <- read.csv2("data/kandidatinnen_90_17_long.csv",
sep = ";", stringsAsFactors = FALSE)
# Generate 'election id'
election_years <- unique(df$year)
election_years_id <- seq_along(election_years)
election_years_df <- data.frame(year = election_years, election_id = election_years_id)
head(election_years_df)
df <- merge(df, election_years_df, by = c("year"), all.x = TRUE)
# Add BTW results
btw <- read.csv2("data/btw_zweit.csv",
sep = ";", stringsAsFactors = FALSE)
df <- merge(df, btw, by = c("year", "party"), all.x = TRUE)
# Generate Percentage of Erst- and Zweitsimme
df <- mutate(df, per_erst = erst / glt_erst, per_zweit = zweit / glt_zweit)
df %>% mutate(per_erst = round(per_erst, 3) * 100) -> df
df %>% mutate(per_zweit = round(per_zweit, 3) * 100) -> df
# Generate lag Variables
df$btw_zweit <- as.numeric(df$btw_zweit)
df <- group_by(df, wkr_nummer, party) %>% arrange(year) %>%
mutate(btw_l1 = lag(btw_zweit, 1),
btw_l2 = lag(btw_zweit, 2),
btw_l3 = lag(btw_zweit, 3)
)
df <- group_by(df, wkr_nummer, party) %>% arrange(year) %>%
mutate(erst_l1 = lag(per_erst, 1)
)
# Save
save(df, file = "data/uniform_data_complete.RData")
# Order Data
df %>% select(wkr_nummer, year, election_id, party, k_idname,
per_erst, erst_l1, k_winner,
wkr_name, wkr_new, wkr_change, wkr_nr2017, bula,
bula_ost,
k_vname, k_nname, k_inc, k_platz,
party_inc, per_zweit, btw_zweit,
btw_l1, btw_l2, btw_l3) -> btw_data
btw_data <- arrange(btw_data, year, wkr_nummer, party)
# Generate Swing Variable
btw_data <- mutate(btw_data, btw_swing = btw_zweit - btw_l1)
# Split Sample
btw_data %>% filter(between(year, 2005, 2013)) -> btw0513
btw_data %>% filter(year == 2017) -> btw2017
# save
save(btw0513, file = "data/btw0513.RData")
# Restrict sample to the two most competitive candidates
btw0513_2k <- group_by(btw0513, year, wkr_nummer) %>% arrange(desc(per_erst)) %>% mutate(rank = seq_along(per_erst))
btw0513_2k <- filter(btw0513_2k, rank <= 2)
btw0513_2k <- arrange(btw0513_2k, year, wkr_nummer, party)
# save
save(btw0513_2k, file = "data/btw0513_2k.RData")
### Uniform Swing Model ------------------
# Model
model_out <- lm(per_erst ~ erst_l1 + btw_swing - 1, data = btw0513)
summary(model_out)
# >Comment:
# Adj. R-squared of simple uniform swing model is 0.9745.
# How much room for improvement does this leave for our
# gtrends approach?
# <
# Evaluate Fit
model_out_fit <- augment(model_out)
model_out_fit$party <- btw0513$party[as.numeric(model_out_fit$.rownames)]
model_out_fit$year <- btw0513$year[as.numeric(model_out_fit$.rownames)]
model_out_fit$wkr <- btw0513$wkr_nummer[as.numeric(model_out_fit$.rownames)]
# MAE
mean(abs(model_out_fit$.resid))
group_by(model_out_fit, year, wkr) %>% summarize(mae = mean(abs(.resid)))
group_by(model_out_fit, year, party) %>% summarize(mae = mean(abs(.resid)))
# Plot
plot(model_out_fit$.fitted, model_out_fit$per_erst, cex = .5, pch = 20)
text(model_out_fit$.fitted, model_out_fit$per_erst, paste0(model_out_fit$party, str_sub(as.character(model_out_fit$year), -2, -1)), pos = 3, offset = .15, cex = .6)
grid()
abline(0, 1)
# >Working Points:
# Extend simpel uniform swing model (i.e including incumbency, pioneer status etc.)
# Run out-of-sample checks
# Build graphs
# Identify subsample of most competitive constituencies
### 2017 Forecast ------------------
# Forecast from http://zweitstimme.org , accessed 01.08.2017
forecast_2017 <- c(37.4, 37.4, 25.5, 7.9, 7.9, 8.7)
party <- c("CDU", "CSU", "SPD", "FDP", "GRU", "PDS")
year <- "2017"
forecast <- data.frame(party = party,
btw17_zweitstimme_org = forecast_2017,
year = year)
# per_erst = pastvoteshare + national-level vote swing
btw2017 <- merge(btw2017, forecast, by = c("year", "party"), all.x = TRUE)
btw2017 %>% mutate(btw_swing = btw17_zweitstimme_org - btw_l1) -> btw2017
# btw2017 %>% mutate(per_erst = erst_l1 + btw_swing) -> btw2017
model_out <- lm(per_erst ~ erst_l1 + btw_swing - 1, data = btw0513)
btw2017 <- augment(model_out, newdata = btw2017)
(predict_conf <- predict(model_out, btw2017, se.fit = TRUE, interval = "confidence"))
(predict_pred <- predict(model_out, btw2017, se.fit = TRUE, interval = "prediction"))
# Where's the difference between 'conf' and 'pred'?
# Sort Data
btw2017 <- arrange(btw2017, year, wkr_nummer, party)
btw2017 %>%
mutate(.fitted = round(.fitted, 1),
.se.fit = round(.se.fit, 2)) -> btw2017
# Mark winning candidate
btw2017 <- group_by(btw2017, year, wkr_nummer) %>% arrange(desc(.fitted)) %>% mutate(rank = seq_along(.fitted))
btw2017 <- arrange(btw2017, year, wkr_nummer, party)
# Save forecast Data as btw17_forecast
save(btw2017, file = "data/btw17_forecast.RData")
### Confidence Bounds ------------------
# # manual computation of standard error used for prediction interval
# se_pred <- sqrt(predict_pred$se.fit^2+sum((model_out$residuals^2 / model_out$df.residual))) # see http://stats.stackexchange.com/questions/154247/what-are-the-formulae-used-in-r-by-predict-lm-when-interval-a-none-b-pred
#
# conf_fit <- data.frame(fit = predict_pred$fit[,1],
# lwr = predict_pred$fit[,1] + qt(0.025, predict_pred$df) * predict_pred$se.fit,
# upr = predict_pred$fit[,1] - qt(0.025, predict_pred$df) * predict_pred$se.fit
# )
# conf_fit
#
# pred_fit <- data.frame(fit = predict_pred$fit[,1],
# lwr = predict_pred$fit[,1] + qt(0.025, predict_pred$df) * se_pred,
# upr = predict_pred$fit[,1] - qt(0.025, predict_pred$df) * se_pred
# )
# pred_fit
#
# # plot forecast
# pred_fit$party <- dat_2017$party
# preds_df <- arrange(pred_fit, fit)
# preds_df$partyrank <- seq_along(preds_df$party)
# preds_df$partyname <- recode_partynames(preds_df$party)
#
# par(mar=c(3.5,7,0,3)+.1)
# par(oma=c(0,0,0,0)+.1)
# plot(preds_df$fit, preds_df$partyrank, ylim = c(0.5, 7.5), xlim = c(0, 45), xaxt = "n", yaxt = "n", ylab = "", xlab = "", pch = 20)
# axis(1, seq(0, 45, 5), seq(0, 45, 5))
# axis(1, mean(c(0, 45)), "Forecasted vote share (%)", line = 1, tick = F)
# axis(2, preds_df$partyrank, labels = preds_df$partyname, las = 1, tick = F)
# axis(4, preds_df$partyrank, labels = paste0(format(preds_df$fit, digits = 2, trim = TRUE), "%")
# , line = 1.5, tick = F,las = 2, hadj = 1)
#
# abline(v = seq(0, 45, 5), col = "darkgrey", lty = 2)
# for (i in preds_df$partyrank){
# lines(x=c(preds_df$lwr[i],preds_df$upr[i]), y=c(i,i), lwd = 1)
# }
### Explorative / Descriptive Analysis ------------------
# # past vote share
# plot(btw0517$erst_l1, btw0517$per_erst, xaxt = "n", yaxt = "n", xlab = "", ylab = "", main = "(a)", xlim = c(0, .8), ylim = c(0, .8))
# axis(1, seq(0, 1, 0.10), seq(0, 1, 0.10))
# axis(1, 0.5, "previous vote share (%)", line = 1, tick = F)
# axis(2, seq(0, 1, 0.10), seq(0, 1, 0.10))
# axis(2, 0.5, "vote share (%)", line = 1, tick = F)
#
#
# # run model, add regression line
# model_out <- lm(per_erst ~ erst_l1 - 1, data = btw0517)
# model_out_aug <- augment(model_out)
# model_out_aug$case_label <- paste(btw0517$wkr_nummer, btw0517$year, btw0517$party, sep = "_") %>% .[model_out_aug$.rownames %>% num()]
# abline(model_out, lty = 2)
#
#
# # identify important outliers
# obs_id <- abs(model_out_aug$.std.resid) > 1.53
# points(model_out_aug$erst_l1[obs_id], model_out_aug$per_erst[obs_id], pch = 20)
#
# # plot labels of outliers based on resid or cooksd
# label_position <- ifelse(model_out_aug$.resid > 0, 3, 1)
# text(model_out_aug$erst_l1[obs_id], model_out_aug$per_erst[obs_id], label = model_out_aug$case_label[obs_id], cex = .7, pos = label_position[obs_id], offset = .47)
# grid()
# ### Out-of-Sample Checks ------------------
#
# # prepare formula
# vars <- c("erst_l1", "swing")
# fmla <- as.formula(paste("per_erst ~ ", paste(vars, collapse= "+")))
#
# # run out-of-sample predictions
# model_out <- list()
# model_pred <- list()
# for(i in seq_along(years)) {
# insample <- filter(ger_df_long, year != election_years[i])
# outsample <- filter(ger_df_long, year == election_years[i])
# model_out[[i]] <- lm(fmla, data = insample)
# model_pred[[i]] <- augment(model_out[[i]], newdata = outsample, type.predict = "response")
# }
#
# # evaluate fit
# model_pred_df <- do.call(rbind, model_pred)
# mean(abs(model_pred_df$voteshare - model_pred_df$.fitted), na.rm = TRUE)
# group_by(model_pred_df, party) %>% summarize(mae = mean(abs(voteshare - .fitted), na.rm = TRUE))
# plot(model_pred_df$.fitted, model_pred_df$voteshare, cex = .5, pch = 20)
# text(model_pred_df$.fitted, model_pred_df$voteshare, paste0(model_pred_df$party, str_sub(as.character(model_pred_df$year), -2, -1)), pos = 3, offset = .15, cex = .6)
# grid()
# abline(0, 1)
# End
|
995c9ed9c4326acbf99caad6423c7cecf1496ae6
|
35098889de63b37c470a409d5f06d6fec10d0c70
|
/R-main/02-move-figures.R
|
c4d04fd531bb9c696ae9a380e48f03f28bcd639c
|
[] |
no_license
|
eriqande/gaiah-wiwa
|
3cd206d059fba73c0a965036877a5130c77e9953
|
8d3056a10256fb0422d5e8e2718a774e8567b682
|
refs/heads/master
| 2023-05-15T10:05:23.103418
| 2023-04-27T15:19:17
| 2023-04-27T15:19:17
| 79,846,863
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 785
|
r
|
02-move-figures.R
|
# just simple code to move figures created in 01-wiwa-analysis.R
# to where they need to be for LaTeXing the document
#### DEFINE PATHS: You must do this for your own installation ####
figoutpath <- "../../git-overleaf-repos/gaiah-overleaf/figures/"
figinpath <- "outputs/figures"
#### NAME FIGS TO MOVE ####
paper_figs <- c("pmgcd_altogether.pdf", "pmgcd_boxplots.pdf", "rem_mig_dist_fig.pdf", "figure1.pdf")
paper_figs_full <- file.path(figinpath, paper_figs)
#### PDF CROP THINGS: This requires a system call to pdfcrop ####
call <- paste("cd ", figinpath, "; for i in ", paste(paper_figs, collapse = " "), "; do pdfcrop $i; done")
system(call)
#### THEN MOVE all the -crop.pdf things ####
call <- paste("mv ", figinpath, "/*-crop.pdf ", figoutpath, sep = "")
system(call)
|
bf05ad09ceeb5bf8be47f8399777821d9f6ce6cc
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/11984_0/rinput.R
|
0032ddcaf61da7cea0b3e25748be5b458f861797
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("11984_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11984_0_unrooted.txt")
|
fb28e887113db761bfea299c51ee669ee51b7b6a
|
ddd5dd6898d18fa111a54cfa7a130b7bc1b8718a
|
/R/scale_x_tickr.R
|
cd2112021d01c128b4f967b72552b883c4c7a18b
|
[
"MIT"
] |
permissive
|
ben-williams/funcr
|
ed30c0863aabdc9624ae6506100bc9ceb3eae65b
|
c828d4f464d2d6921644352cc3e4fd6891e9b9fb
|
refs/heads/master
| 2021-07-09T05:09:22.053009
| 2020-11-18T15:13:10
| 2020-11-18T15:13:10
| 214,511,958
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 595
|
r
|
scale_x_tickr.R
|
#' Adjust x-axis tick marks and labels
#'
#' @param data = input dataframe
#' @param var = variable of interest e.g., year
#' @param to = step increase desired e.g., every 5 years
#' @param start = adjust the start value
#' @param end = adjust the end vlaue
#' @param min = lowest value to label
#'
#' @return scale_x_tickr
#' @export scale_x_tickr
#'
#' @examples
scale_x_tickr <- function(data, var, to = 5, start = NULL, end = NULL, min = NULL){
tickr(data, {{var}}, to, start, end, min) -> tick
scale_x_continuous(breaks = tick$breaks,
labels = tick$labels)
}
|
48937f9a5271a02dddbe91dcc269b8aa2dcc272d
|
e1b73386edd9b767153192f4b359f0d1e98b33b2
|
/man/IdentifyStructure.Rd
|
dfee740158995e9de0cb00627158d523679811bf
|
[
"MIT"
] |
permissive
|
RodolfoPelinson/AtlanticForestMetacommunity
|
28200f5e7964c922cd02f04efcc4e579271ce5d5
|
be954ca5273ebe3673ec8eb58987b0c0a57e9450
|
refs/heads/master
| 2023-08-08T15:50:58.715554
| 2023-07-27T18:40:40
| 2023-07-27T18:40:40
| 355,654,618
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,016
|
rd
|
IdentifyStructure.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IdentifyStructure_function.R
\name{IdentifyStructure}
\alias{IdentifyStructure}
\title{Run the EMS analysis and identify the idealized structure}
\usage{
IdentifyStructure(
comm,
names = NULL,
scores = 1,
CoherenceMethod = "curveball",
turnoverMethod = "EMS",
sims = 1000,
order = T,
orderNulls = F,
seed = NULL,
fill = T,
round = NULL,
elapsed_time = TRUE
)
}
\arguments{
\item{comm}{A list of incidence matrices (i.e. metacommunities).}
\item{names}{The names of the metacommunities. A vector with the same length as the list of metacommunities.}
\item{scores}{Numeric. What axis of the CA should be used for ordination? (Defaut is 1)}
\item{CoherenceMethod}{null model randomization method used by 'nullmaker' to compute Coherence. See the Coherence function from package metacom. (default is "curveball")}
\item{turnoverMethod}{null model randomization method used by 'nullmaker' or 'EMS' to use the approach outlined in Leibold and Mikkelson 2002. See the Turnover function from package metacom. (default is "EMS")}
\item{sims}{Number of randomizations (default is 1000)}
\item{order}{Should the original matrix be ordered by reciprocal averaging?}
\item{orderNulls}{Should the null communities be ordered? (default is TRUE)}
\item{seed}{seed for simulating the null model. Null matrices should be repeatable.}
\item{fill}{should embedded absences be filled before the statistics are calculated? (default is TRUE)}
\item{round}{Numeric. Should numeric results be rounded? If so, how many digits? Defaut is set to NULL.}
\item{elapsed_time}{Logical. Should a message with the elapsed time be returned?}
}
\description{
This function simply runs the functions Coherence(), Turnover() and BoundaryClump() from package metacom and automaticaly identify the idealizes structure according to Presley et al. 2010 "A comprehensive framework for the evaluation of metacommunity structure". Oikos 119 (6).
}
|
b92b3b737e3e855f8bd899187e5a23fb95672bfd
|
93ea1e4f53ec560d4f7d9f799de2508485e6afa3
|
/Ominer_miRNA_workflow/generate_venn.R
|
2093a4e71d5ec2f0eb305fcd5fdbe806953e63aa
|
[] |
no_license
|
JMarzec/Ominer
|
da8bfe1fcbac4fbcc09da8f2cefb69d76fc263ad
|
35c9b0e7869c96e2e892c441432c03a05d570393
|
refs/heads/master
| 2021-04-26T22:19:05.060397
| 2017-08-08T12:24:22
| 2017-08-08T12:24:22
| 124,069,488
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,582
|
r
|
generate_venn.R
|
##########################################################################################
#
#File name: generate_venn.R
#Authors: Ajanthah Sangaralingam (a.sangaralingam@qmul.ac.uk)
#
#
#
#Barts Cancer Institute
#Queen Mary, University of London
#Charterhouse Square, London EC1M 6BQ
##########################################################################################
##########################################################################################
#Description: function to generate a venn diagram for up to four different comparisons
#ebA.txt - is a file generated from running limma analysis giving the number(s) of genes up and down regulated both between and within the different biological groups
#project is the given project name
################################################################################################KM plots - overall survival from a target file generates .txt file and KM plots for 5,10 and 15 years
######
generate_venn <- function (ebA,project)
{
readdir <- paste("ominer_results",project,sep="/")
ebA.decideTests <- read.table(paste(readdir,"/","DifferentialExpression/",ebA,sep=""),sep="\t", as.is = T,
header = T, strip.white = T)
comps = colnames(ebA.decideTests)
source("Venn.R")
png(paste("ominer_results/",project,"/","cluster","/vennDiagram.png",sep=""))
par(mar = c(5, 3, 1, 1))
a <- vennCounts(ebA.decideTests[, 1:length(comps)], include = "both")
vennDiagram(a, main = "Results for all probes")
dev.off()
png(paste("ominer_results/",project,"/","cluster","/vennDiagram_up.png",sep=""))
par(mar = c(5, 3, 1, 1))
a <- vennCounts(ebA.decideTests[, 1:length(comps)], include = "up")
vennDiagram(a, main = "Results for up-regulated probes")
dev.off()
png(paste("ominer_results/",project,"/","cluster","/","/vennDiagram_down.png",sep=""))
par(mar = c(5, 3, 1, 1))
a <- vennCounts(ebA.decideTests[, 1:length(comps)], include = "down")
vennDiagram(a, main = "Results for down-regulated probes")
dev.off()
pdf(paste("ominer_results/",project,"/","cluster","/vennDiagram.pdf",sep=""))
a <- vennCounts(ebA.decideTests[, 1:length(comps)])
vennDiagram(a, main = "Results for all probes")
par(mar = c(5, 3, 1, 1))
a <- vennCounts(ebA.decideTests[, 1:length(comps)], include = "up")
vennDiagram(a, main = "Results for up-regulated probes")
par(mar = c(5, 3, 1, 1))
a <- vennCounts(ebA.decideTests[, 1:length(comps)], include = "down")
vennDiagram(a, main = "Results for down-regulated probes")
dev.off()
}
|
956dda457c143c214276d256f8756d195d87434a
|
57e2c05ea9069edb9c8becc57997adfaa70646e7
|
/leoniGeneComparisons.R
|
d1457d777aab5b8441ec98c62e1c59fd0d197034
|
[
"MIT"
] |
permissive
|
uhkniazi/Scratch
|
b399c1aff87a712d5fcc80f3ec99fae613463fdd
|
b3157e5f54179753ed6d77a47ff9ebebce1d4cd6
|
refs/heads/master
| 2022-05-20T09:53:07.761740
| 2022-05-04T12:50:23
| 2022-05-04T12:50:23
| 92,270,968
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,431
|
r
|
leoniGeneComparisons.R
|
# leoniGeneComparisons.R
table(grepl(pattern = genesIn[1], week.3$gene, ignore.case = T))
s = sapply(seq_along(genesIn), function(x) grep(pattern = paste0("^", genesIn[x], "$"), week.3$gene, ignore.case = T))
week.3.sub = week.3[unlist(s),]
s = sapply(seq_along(genesIn), function(x) grep(pattern = paste0("^", genesIn[x], "$"), week.8$gene, ignore.case = T))
week.8.sub = week.8[unlist(s),]
s = sapply(seq_along(genesIn), function(x) grep(pattern = paste0("^", genesIn[x], "$"), week.11$gene, ignore.case = T))
week.11.sub = week.11[unlist(s),]
identical(week.3.sub$gene, week.8.sub$gene)
identical(week.3.sub$gene, week.11.sub$gene)
w3 = data.frame(wt=week.3.sub$value_1, tg=week.3.sub$value_2)
w3 = stack(w3)
w3$gene = week.3.sub$gene
w3$time = 3
w8 = data.frame(wt=week.8.sub$value_1, tg=week.8.sub$value_2)
w8 = stack(w8)
w8$gene = week.8.sub$gene
w8$time = 8
w11 = data.frame(wt=week.11.sub$value_1, tg=week.11.sub$value_2)
w11 = stack(w11)
w11$gene = week.11.sub$gene
w11$time = 11
dfData = rbind(w3, w8, w11)
dfData$values = log(dfData$values+0.5)
library(lattice)
xyplot(values ~ time | gene, data=dfData, groups=ind, pch=20, col=1:2,
key=list(columns=2, text=list(levels(dfData$ind)), points=list(pch=20, col=1:2)),
type=c('g', 'r', 'p'), ylab='Log Average', xlab='Time Weeks')
dfExport = rbind(week.3.sub, week.8.sub, week.11.sub)
write.csv(dfExport, file='dataExternal/leoni/selectedGenes.csv')
|
acbb8aaca29c31f99fb3da6f8f281845922d940b
|
b8c4095d11d3512c21e7ff9d1c77b09400b3f6c6
|
/Módulo 6 - Bancos de Dadps (SQLite)/SQLite_no_R.R
|
6b940f350be583c02d502163d53b396c64b265d8
|
[
"MIT"
] |
permissive
|
andreljunior/R-projects
|
b8987fb5cd5de9b18cf5a3a304a7054c4fe4a342
|
3dbe341caa851bc88bbc8349f27d753da1d9ef94
|
refs/heads/master
| 2022-11-30T01:23:28.188925
| 2020-08-10T14:05:32
| 2020-08-10T14:05:32
| 284,563,186
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 393
|
r
|
SQLite_no_R.R
|
library(RSQLite)
#conectar ao BD
conn = dbConnect(dbDriver("SQLite"),"carteira.sqlite")
conn
df = dbGetQuery(conn, "SELECT * FROM Estoque WHERE data = '2018-06-15'")
df
str(df)
rows = dbExecute(conn,"INSERT INTO Estoque (preco,quantidade,vencimento,papel,data)VALUES (3150.208525,540,'2040-08-15','NTN-C','2018-06-15')")
rows
rows = dbExecute(conn,"DELETE FROM Estoque WHERE id = 6")
rows
|
b99389efa55d5e344ca79d2fbda52ca5f6b5c189
|
604655aa047641f92f72454b7524333f0661409e
|
/man/getDateOffset.Rd
|
79de15fe0d31c379466611198b6da34d1a3c77f6
|
[] |
no_license
|
troyhill/TTFF
|
dcbb03563cf8b99a23c525dd56bfdfce0ac4db91
|
b4fb6f1f0e2094b4b4bb846798c7567a8090b0a4
|
refs/heads/master
| 2021-11-27T17:57:06.130433
| 2021-08-04T19:27:16
| 2021-08-04T19:27:16
| 175,022,979
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,038
|
rd
|
getDateOffset.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getDateOffset.R
\name{getDateOffset}
\alias{getDateOffset}
\title{getDateOffser}
\usage{
getDateOffset(dataset, dateColumn = "date", day = "Friday")
}
\arguments{
\item{dataset}{input dataframe}
\item{dateColumn}{name of column with datestamp (Date or POSIX data type)}
\item{day}{character element specifying the first day of each week (default = "Friday", purely to match SFWMD's arbitrary choice)}
}
\value{
a vector of values
}
\description{
Identify the number of days needed to convert between a week starting on "Monday" and the desired start day.
}
\examples{
dateDat <- data.frame(date = seq.Date(from = Sys.Date() - 100, to = Sys.Date(), by = 1))
dateDat$dow <- weekdays(dateDat$date)
dateDat$week.R <- format(as.Date(dateDat$date), "\%Y-\%W")
head(dateDat, 10)
dateOffset <- getDateOffset(dateDat, day = "Friday")
dateDat$week.new <- format(as.Date(dateDat$date) - dateOffset, "\%Y-\%W")
head(dateDat, 10)
}
|
e81bc1b862927ea386e9b7a95f930b7bb67ab3a1
|
1e93457070996e0cc20cbcfe261047dfc5957e0b
|
/demo/boosting.R
|
94db35f9be914d38e3f04145b40aab8aab79c2dd
|
[] |
no_license
|
caojilin/stat154
|
b8aeb74758122bfc293acff3a49e50adce969b74
|
e405f16f13aa30421646e7b8df3856d551d42e47
|
refs/heads/master
| 2020-05-14T14:23:51.399836
| 2019-06-11T16:22:23
| 2019-06-11T16:22:23
| 181,831,569
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,291
|
r
|
boosting.R
|
library(gbm)
library(MASS)
library(glmnet)
library(spatstat)
#boosting
set.seed(1)
train = sample (1: nrow(Boston ), nrow(Boston )/2)
boston.test=Boston[-train ,"medv"]
boost.boston = gbm(medv ~ ., data=Boston[train,], distribution="gaussian", n.trees = 5000,
interaction.depth = 4)
summary (boost.boston)
plot(boost.boston ,i="rm")
plot(boost.boston ,i="lstat")
yhat.boost = predict(boost.boston ,newdata=Boston[-train ,],
n.trees =5000)
mean((yhat.boost -boston.test)^2)
# linear regression
lm.boston = lm(medv ~., data=Boston[train,])
yhat.lm = predict(lm.boston, Boston[-train,])
mean((yhat.lm -boston.test)^2)
#lasso and ridge
training = Boston[train,-14]
# feature_classes <- sapply(names(training),function(x){class(training[[x]])})
# categorical_feats <- names(feature_classes[feature_classes == "integer"])
# training[categorical_feats] = factor(training[categorical_feats])
training = dummify(training)
training.label = Boston[train, "medv"]
testing = dummify(Boston[-train,])
fit.ridge= cv.glmnet(training, training.label, alpha=0, family="gaussian",type.measure = "mse")
fit.ridge = glmnet(training, training.label, alpha=0, family="gaussian",lambda = 0.7604016)
y_hat = predict(fit.ridge, training)
mean((y_hat - training.label)^2)
|
e8816a979f8d4088147393023838304952d1f874
|
4d7bdf931cafae81679072816c0f1b03efaf63a8
|
/tests/testthat/test-timeconversion.R
|
b3ff6a8fc4b719e57603f3d74550c66ffdc882dd
|
[] |
no_license
|
CC-HIC/ccanonym
|
b8826f815758c32ac169629cf646f40571d7ba79
|
09a2e03607932e08db5bcd371e1eefb9baee2923
|
refs/heads/master
| 2021-03-22T07:41:27.297340
| 2018-01-22T10:52:18
| 2018-01-22T10:52:18
| 66,528,436
| 1
| 1
| null | 2018-01-22T10:52:19
| 2016-08-25T05:43:02
|
R
|
UTF-8
|
R
| false
| false
| 1,333
|
r
|
test-timeconversion.R
|
context("Date/time to numeric conversion")
test_that("convert to numeric", {
dtype <- sapply(cleanEHR:::ITEM_REF, function(x) x$Datatype)
tdate <- dtype[dtype == "date"]
ttime <- dtype[dtype == "time"]
tdatetime <- dtype[dtype == "date/time"]
create.test.data <- function(hic_code, val) {
stname <- code2stname(names(hic_code))
rep.data <- data.frame(t(rep(val, length(stname))), stringsAsFactors=F)
names(rep.data) <- stname
return(rep.data)
}
test_data <- create.test.data(tdate, "2010-01-01")
result <- convert.numeric.datetime(test_data)
expect_equal(length(unique(t(result))), 1)
expect_equal(as.POSIXct("2010-01-01"), as.POSIXct(as.numeric(result[1]), origin="1970-01-01"))
expect_equivalent(convert.back.datetime(result), test_data)
test_data <- create.test.data(ttime, "00:00")
result <- convert.numeric.datetime(test_data)
expect_equal(length(unique(t(result))), 1)
expect_true(!NA %in% result)
expect_equivalent(convert.back.datetime(result), test_data)
test_data <- create.test.data(tdatetime, "2010-01-01T00:00:00")
result <- convert.numeric.datetime(test_data)
expect_equal(length(unique(t(result))), 1)
expect_true(!NA %in% result)
expect_equivalent(convert.back.datetime(result), test_data)
})
|
424b3a5606f4067e627f32f759d291bf60d1ce83
|
796aa6d8e9f42024946275b2cc52006e7f487a1d
|
/man/MultiLociPOE.Rd
|
7d2a00dcb64953ce8247c7676459cec18e11377f
|
[] |
no_license
|
proman666/CCMO
|
9108d9954e46b4423c200bd9202378e172506dd3
|
1ab0f40555bd56e2ac9f5fab69a51d3b3a0c3cea
|
refs/heads/master
| 2023-03-12T11:39:37.737973
| 2021-03-03T01:12:24
| 2021-03-03T01:12:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,401
|
rd
|
MultiLociPOE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CCMO.R
\name{MultiLociPOE}
\alias{MultiLociPOE}
\title{POEs analysis using multi-locus genotype data from case-control mother-offspring pairs}
\usage{
MultiLociPOE(Y, gmm, gcc, X, loci, hap, f, ppi)
}
\arguments{
\item{Y}{a \code{n}-vector of disease statuses for \code{n} offspring (1 for case and 0 for control).}
\item{gmm}{a \code{n} x \code{m} matrix of genotypes for mothers (\code{n}: number of mothers; \code{m}: number of SNPs). The possible values should be 0, 1, 2.}
\item{gcc}{a \code{n} x \code{m} matrix of genotypes for offspring (\code{n}: number of offspring; \code{m}: number of SNPs). The possible values should be 0, 1, 2.}
\item{X}{a \code{n}-vector of maternal covariates for main effects (\code{n}: number of mothers).}
\item{loci}{an indicator for the test locus. The possible values could be 1,...,\code{m} (\code{m}: number of SNPs).}
\item{hap}{a \code{l} x \code{m} matrix of possible haplotypes in the population of interest (\code{l}: number of possible haplotypes; \code{m}: number of SNPs).}
\item{f}{specified disease prevalence.}
\item{ppi}{a \code{l}-vector of the corresponding haplotype frequencies (\code{m}: number of possible haplotypes).}
}
\value{
a list with the following elements
\item{\code{new}}{estimation and significance test results for the new method M-HAP}
\item{\code{log}}{estimation and significance test results for the standard logistic regression method}
\item{\code{cov.new}}{covariance matrix of the estimated parameters by the new method M-HAP}
\item{\code{cov.log}}{covariance matrix of the estimated parameters by the standard logistic regression method)}
}
\description{
This function implements the statistical method M-HAP for detecting POEs using genotype data from case-control mother-offspring pairs by adjusting for covariates. M-HAP utilizes available information such as Medelian inheritance law, Hardy-Weinberg equilibrium, and conditional independence between offspring genotypes and maternal covariates given maternal genotypes.
}
\examples{
\dontrun{
data(POESampleData)
Y = POESampleData[,1]
gmm = POESampleData[,2:6]
gcc = POESampleData[,7:11]
X = POESampleData[,12]
loci = 1
f = 0.01
data = MultiLociPOE.input(gmm,gcc,0)
gmm = data$gmm
gcc = data$gcc
hap = data$hap
ppi = data$ppi
fit = MultiLociPOE(Y,gmm,gcc,X,loci,hap,f,ppi)
}
}
|
bab5d142d410f85bc7040a342f2cb6dfa71a3d21
|
ede2f028adc9ca3f7b170d2779b836124918c1dd
|
/man/cluscomp.Rd
|
e144572f24bbde56a8cc3df06f922b849763e3a1
|
[] |
no_license
|
biomedicalinformaticsgroup/clusterCons
|
7167a224ba1b926c87f5ce99fd6a5211be09e940
|
114dc338d8919825e2c52e4db8a84c1da2cef2d3
|
refs/heads/main
| 2023-04-11T22:57:27.135255
| 2022-02-22T21:17:55
| 2022-02-22T21:17:55
| 461,762,921
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,078
|
rd
|
cluscomp.Rd
|
\name{cluscomp}
\alias{cluscomp}
\title{Perform consensus clustering with the option of using multiple algorithms and parameters and merging}
\description{
Calculates an NxN consensus matrix for each clustering experiment performed where each entry has a value between 0 (never observed) and 1 (always observed)\cr
When running with more than one algorithm or with the same algorithm and multiple conditions a consensus matrix will be generated for each.
These can optionally be merged into a \code{\link{mergematrix}} by cluster number by setting merge=1.\cr
}
\usage{
cluscomp(
x,
diss=FALSE,
algorithms = list("kmeans"),
alparams = list(),
alweights = list(),
clmin = 2,
clmax = 10,
prop = 0.8,
reps = 50,
merge = 0
)
}
\arguments{
\item{x}{
data.frame of numerical data with conditions as the column names and unique ids as the row names. All variables must be numeric. Missing values(NAs) are not allowed.
Optionally you can pass a distance matrix directly, in which case you must ensure that the distance matrix is a data.frame and that the row and column names match each other (as the
distance matrix is a pair-wise distance calculation).
}
\item{diss}{
set to TRUE if you are providing a distance matrix, default is FALSE
}
\item{algorithms}{
list of algorithm names which can be drawn from 'agnes','diana','pam','kmeans' or 'hclust'. The user can also write a simple wrapper for any other clustering method (see details)
}
\item{alparams}{
list of algorithm paramter lists using the same specification as for the individual algorithm called (see details)
}
\item{alweights}{
list of integer weights for each algorithm (only used when merging consensus results between algorithms)
}
\item{clmin}{
integer for the smallest cluster number to consider
}
\item{clmax}{
integer for the largest cluster number to consider
}
\item{prop}{
numeric for the proportion of rows to sample during the process. Must be between 0 and 1
}
\item{reps}{
integer for the number of iterations to perform per clustering
}
\item{merge}{
an integer indicating whether you also want the merged matrices (1) or just the consensus ones (0), accepts only 1 or 0.
}
}
\details{
\code{\link{cluscomp}} is an implementation of a consensus clustering methodology first proposed by Monti et al. (2003) in which the connectivity between any two members of a data matrix is tested by resampling statistics. The principle is that by only sampling a random proportion of rows in the data matrix and performing many clustering experiments we can capture information about the robustness of the clusters identified by the full unsampled clustering result.
For each re-sampling experiment run a zero square matrix is created with identical rows and columns matching the unique ids of the rows of the data matrix, this matrix is called the connectivity matrix. A second identically sized matrix is created to count the number of times that any pair of row ids are called in any one re-sampled clustering. This matrix is called the identity matrix. For each iteration within the experiment the rows sampled are recorded in the identity matrix and then the co-occurrence of all pairs are recorded in the connectivity matrix. These values are incremented for each iteration until finally a conensensus matrix is generated by dividing the connectivity matrix by the identity matrix.
The consensus matrix is the raw output from \code{\link{cluscomp}} implemented as a class
\code{\link{consmatrix}}. If the user has specified to return a merged matrix in addition to the consensus
matrices then for each clustering with the same k (cluster number value) an object of class \code{\link{mergematrix}} is also
returned in the list which is identical to a \code{\link{consmatrix}} with the exception that the
'cm' slot is occupied by the merged matrix (a weighted average of all the consensus matrices for
the cluster number matched consensus matrices) and there is no reference matrix slot (as there is no
reference clustering for the merge). The user should instead call the \code{\link{memrob}}
function using the merge matrix and providing a reference matrix from one of the cluster number
matched \code{\link{consmatrix}} objects from which the merge was generated. This provides a way
to quantify the difference between single and multi-algorithm resampling schemes.
}
\value{
a list of objects of class \code{\link{consmatrix}} and (if merge specified) \code{\link{mergematrix}}. See \code{\link{consmatrix}} and \code{\link{mergematrix}} for details.
}
\references{
Merged consensus clustering to assess and improve class discovery with microarray data. Simpson TI, Armstrong JD and Jarman AP. BMC Bioinformatics 2010, 11:590.\cr
Consensus clustering: A resampling-based method for class discovery and visualization of gene expression microarray data. Monti, S., Tamayo, P., Mesirov, J. and Golub, T. Machine Learning, 52, July 2003.\cr
}
\author{
Dr. T. Ian Simpson
\email{ian.simpson@ed.ac.uk}
}
\seealso{
\code{\link{cluster}},\code{\link{clrob}},\code{\link{memrob}}
}
\examples{
#load test data
data(sim_profile);
#perform a group of re-sampling clustering experiments accepting default parameters
#for the clustering algorithms
cmr <- cluscomp(
sim_profile,
algorithms=list('kmeans','pam'),
merge=1,
clmin=2,
clmax=5,
reps=5
)
#display resulting matrices contained in the consensus result list
summary(cmr);
#display the cluster robusteness for the kmeans k=4 consensus matrix
clrob(cmr$e2_pam_k4);
#plot a heatmap of the consensus matrix, note you access the cluster matrix object
#through the cm slot
#heatmap(cmr$e2_pam_k4@cm);
#display the membership robustness for kmeans k=4 cluster 1
memrob(cmr$e2_pam_k4)$cluster1;
#merged consensus example
#data(testcmr);
#calculate the membership robustness for the merge matrix when cluster number k=4,
#in reference to the pam scaffold. (see memrob for more details).
#mr <- memrob(testcmr$merge_k4,testcmr$e1_kmeans_k4@rm);
#show the membership robustness for cluster 1
#mr$cluster1;
}
|
953cbd7a4e3b7030b70bc1aea2e4da6eae67026f
|
e3ad0a98623d6a1cc96cff58aa92c494657b1bc4
|
/Section4/Sim-RI.R
|
a17ccd88d8ceca29cb8a64e62f3a42484a1d96b3
|
[] |
no_license
|
sshonosuke/EHE
|
91a8780d8627202fb3a399e4e0059c06218b86db
|
6277d5e7138395183ccfde8c883c15f2d9d83ed5
|
refs/heads/master
| 2023-08-22T13:11:28.032202
| 2021-10-07T13:55:44
| 2021-10-07T13:55:44
| 261,907,836
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,549
|
r
|
Sim-RI.R
|
library(MASS)
library(MCMCpack)
library(bayesm)
source("Function-RI.R")
set.seed(123)
R <- 500 # number of Monte Carlo replications
om <- 0.05 # contamination ratio
aa <- 15 # outlier location: 5 or 10 or 15 or 20
p <- 10 # the number of predictor variables: 10 or 20
m <- 50 # the number of subjects
nn <- 10
N <- m*nn
ID <- rep(1:m, rep(nn, m))
mc <- 3000
bn <- 1000
# settings
tau <- 0.5
sigma <- 1
Beta <- rep(0,p)
Beta[c(1,4,7,10)] <- c(0.3, 0.3, 2, 2) # non-zero coeffieicnts
int <- 0.5 # intercept
Para <- c(int, Beta)
quant <- function(x){ quantile(x, prob=c(0.025,0.975)) }
# covariates
rho <- 0.2
mat <- matrix(NA,p,p)
for(k in 1:p){
for(j in 1:p){ mat[k,j] <- rho^(abs(k-j)) }
}
X <- mvrnorm(N, rep(0, p), mat)
# result box
meth <- c("EHE", "aEHE","Cauchy", "aT", "Mix-T", "Normal")
L <- length(meth)
MSE <- matrix(NA, R, L)
MSE.RE <- matrix(NA, R, L)
CP <- matrix(NA, R, L)
AL <- matrix(NA, R, L)
dimnames(MSE)[[2]] <- dimnames(MSE.RE)[[2]] <- meth
dimnames(CP)[[2]] <- dimnames(AL)[[2]] <- meth
data <- list()
# replication
for(r in 1:R){
ch <- rbinom(N, 1, om)
noise <- (1-ch)*rnorm(N, 0, 1) + ch*rnorm(N, aa, 1)
RE <- rnorm(m, 0, tau)
Y <- int + as.vector(X%*%Beta) + RE[ID] + sigma*noise
plot(X[,1], Y)
data[[r]] <- Y
# EHE
fit.EHE <- EHE.RI(Y, X, ID, gam.est=F, mc=mc, burn=bn)
est.EHE <- apply(fit.EHE$Beta, 2, mean)
CI.EHE <- apply(fit.EHE$Beta, 2, quant)
RE.EHE <- apply(fit.EHE$RE, 2, mean)
# aEHE
fit.aEHE <- EHE.RI(Y, X, ID, gam.est=T, mc=mc, burn=bn)
plot(fit.aEHE$Gam)
est.aEHE <- apply(fit.aEHE$Beta, 2, mean)
CI.aEHE <- apply(fit.aEHE$Beta, 2, quant)
RE.aEHE <- apply(fit.aEHE$RE, 2, mean)
# Cauchy
fit.c <- tBR.RI(Y, X, ID, mc=mc, burn=bn, nu=1)
est.c <- apply(fit.c$Beta, 2, mean)
CI.c <- apply(fit.c$Beta, 2, quant)
RE.c <- apply(fit.c$RE, 2, mean)
# T (estimated)
fit.at <- tBR.RI(Y, X, ID, mc=mc, burn=bn, nu=3, estimation=T)
est.at <- apply(fit.at$Beta, 2, mean)
CI.at <- apply(fit.at$Beta, 2, quant)
RE.at <- apply(fit.at$RE, 2, mean)
# mix-T
fit.mixt <- mix.tBR.RI(Y, X, ID, mc=mc, burn=bn, nu=1/2)
est.mixt <- apply(fit.mixt$Beta, 2, mean)
CI.mixt <- apply(fit.mixt$Beta, 2, quant)
RE.mixt <- apply(fit.mixt$RE, 2, mean)
# Linear regression (Benchmark)
fit.lm <- BR.RI(Y, X, ID, mc=mc, burn=bn)
est.lm <- apply(fit.lm$Beta, 2, mean)
CI.lm <- apply(fit.lm$Beta, 2, quant)
RE.lm <- apply(fit.lm$RE, 2, mean)
# MSE
Est <- cbind(est.EHE, est.aEHE, est.c, est.at, est.mixt, est.lm)
MSE[r,] <- apply((Est-Para)^2, 2, mean)
# MSE-RE
Est <- cbind(RE.EHE, RE.aEHE, RE.c, RE.at, RE.mixt, RE.lm)
MSE.RE[r,] <- apply((Est-RE)^2, 2, mean)
# Coverage prob
CP[r,1] <- mean( ifelse(CI.EHE[1,]<Para & CI.EHE[2,]>Para, 1, 0) )
CP[r,2] <- mean( ifelse(CI.aEHE[1,]<Para & CI.aEHE[2,]>Para, 1, 0) )
CP[r,3] <- mean( ifelse(CI.c[1,]<Para & CI.c[2,]>Para, 1, 0) )
CP[r,4] <- mean( ifelse(CI.at[1,]<Para & CI.at[2,]>Para, 1, 0) )
CP[r,5] <- mean( ifelse(CI.mixt[1,]<Para & CI.mixt[2,]>Para, 1, 0) )
CP[r,6] <- mean( ifelse(CI.lm[1,]<Para & CI.lm[2,]>Para, 1, 0) )
# Averae length
AL[r,1] <- mean(CI.EHE[2,]-CI.EHE[1,])
AL[r,2] <- mean(CI.aEHE[2,]-CI.aEHE[1,])
AL[r,3] <- mean(CI.c[2,]-CI.c[1,])
AL[r,4] <- mean(CI.at[2,]-CI.at[1,])
AL[r,5] <- mean(CI.mixt[2,]-CI.mixt[1,])
AL[r,6] <- mean(CI.lm[2,]-CI.lm[1,])
if(round(r/100)==(r/100)){ print(r) }
print(100*MSE.RE[r,])
}
save(list=ls(), file=paste0("sim-RI(a=",aa,",om=",om,").RData"))
|
0705b8dac6eb058f5724e6d0e998bf0a6b1af6b2
|
b54789971135132705b66808f7d62b391c80255f
|
/inst/doc/shapper_classification.R
|
e0f40058353e2218992dd8d442509b33a6124e6a
|
[] |
no_license
|
cran/shapper
|
983faa88eb35042452334a1b21e8587becb79002
|
9cac7a9a22a775eaed98c88a8d29398f1de51da0
|
refs/heads/master
| 2021-07-06T05:21:26.493422
| 2020-08-28T08:00:03
| 2020-08-28T08:00:03
| 173,755,598
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,989
|
r
|
shapper_classification.R
|
## ----setup, include=FALSE, eval=FALSE-----------------------------------------
# knitr::opts_chunk$set(echo = TRUE,
# message = FALSE,
# warning = FALSE)
## ---- eval = FALSE------------------------------------------------------------
# shapper::install_shap()
## ----eval = FALSE-------------------------------------------------------------
# library("DALEX")
# Y_train <- HR$status
# x_train <- HR[ , -6]
#
## ----eval = FALSE-------------------------------------------------------------
# library("randomForest")
# set.seed(123)
# model_rf <- randomForest(x = x_train, y = Y_train)
#
# library(rpart)
# model_tree <- rpart(status~. , data = HR)
## ----eval = FALSE-------------------------------------------------------------
# library(shapper)
#
# p_function <- function(model, data) predict(model, newdata = data, type = "prob")
#
# ive_rf <- individual_variable_effect(model_rf, data = x_train, predict_function = p_function,
# new_observation = x_train[1:2,], nsamples = 50)
#
#
# ive_tree <- individual_variable_effect(model_tree, data = x_train, predict_function = p_function,
# new_observation = x_train[1:2,], nsamples = 50)
#
## ----eval = FALSE-------------------------------------------------------------
# ive_rf
## ----eval = FALSE-------------------------------------------------------------
# plot(ive_rf, bar_width = 4)
## ----eval = FALSE-------------------------------------------------------------
# plot(ive_rf, show_predicted = FALSE, bar_width = 4)
## ----eval = FALSE-------------------------------------------------------------
# plot(ive_rf, ive_tree, show_predicted = FALSE, bar_width = 4)
## ----eval = FALSE-------------------------------------------------------------
# ive_rf_filtered <- ive_rf[ive_rf$`_ylevel_` =="fired", ]
# shapper:::plot.individual_variable_effect(ive_rf_filtered)
|
dbd3bac62c72cb4e903c3f7dd4dc7cb0a6451e9f
|
53191a96a722c594ca5e87c964fdd267474e8ce1
|
/irr.R
|
a7b2531a90646a346c27ce96f519e021fa79a026
|
[] |
no_license
|
FPupillo/hippocampal_ranging
|
8189169041ba7c26b21382de0b32bfe44ad4bbbf
|
4964a9b14d5389fe7b04e57d369a9a61183cef5c
|
refs/heads/main
| 2023-05-30T17:38:18.501835
| 2021-06-21T09:07:25
| 2021-06-21T09:07:25
| 369,465,447
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,401
|
r
|
irr.R
|
#------------------------------------------------------------------------------------------------#
# ----------------------- Calculate interrater reliability---------------------------------------#
# ------------------------ hippocampal ranging --------------------------------------------------#
#------------------------------------------------------------------------------------------------#
# Created by Francesco Pupillo, Goethe University
# date: "Fri May 21 09:47:04 2021"
#
#------------------------------------------------------------------------------------------------#
rm(list = ls())
# getting the neccessary libs
library(irr)
library(reshape)
library(ggplot2)
# function to calculate the kappa
KappaCalc<-function(rater1, rater2){
# calculating kappas of interest
StartLeftKappa <- kappa2(cbind(rater1$Starting.slice.left,rater2$Starting.slice.left))
StartRightKappa <-kappa2(cbind(rater1$Starting.slice.right,rater2$Starting.slice.right))
EndLeftKappa <-kappa2(cbind(rater1$end.slice.left,rater2$end.slice.left))
EndRightKappa <-kappa2(cbind(rater1$end.slice.right,rater2$end.slice.right))
# writing these in a vector
AllKappas <- cbind(StartLeftKappa$value,StartRightKappa$value,EndLeftKappa$value,EndRightKappa$value)
return(AllKappas)
}
# compare against correct slices or with another rater?
# 1: correct slices
# 2: another rater
whichComp<-1
# list all the test sets
rater1<-list.files("rater1")
# load data
cd<-getwd() # store the current path in a variable
setwd("rater1") # navigate to the folder where the test sets are
for (n in 1:length(rater1)){
# extract the test set from name of the file
testset<- as.numeric(substr(rater1[n], 8,8))
# extract the round
round<-as.numeric(substr(rater1[n], 10,10))
# create a dataset
assign(paste("rater1.", testset,".",round, sep=""), read.csv(rater1[n])) # load each test set and store in a variable
}
setwd(cd) # return to the previous folder
# load correct slices or rater two slices
# list all the correct slices sets
if (whichComp==1){
rater2<-list.files("correct.slices")
setwd("correct.slices")
for (n in 1:length(rater2)){
assign(paste("rater2.", n, sep=""), read.csv(rater2[n]))
}
setwd(cd)
} else if (whichComp==2){
rater2<-list.files("rater2")
setwd("rater2")
for (n in 1:length(rater2)){
# extract test set
testset<-as.numeric(substr(rater2[n], 8,8))
# extract the round
round<-as.numeric(substr(rater2[n], 10,10))
assign(paste("rater2.", testset, ".", round, sep=""), read.csv(rater2[n]))
}
setwd(cd)
}
# calculate the kappa
files<-ls(pattern ="^rater1.")
for (n in files){
# get the set number
setN<-substr(n, 8, 8)
# extract the round
round<-as.numeric(substr(n, 10,10))
curr_rater1<-get(n)
if (whichComp==1){ # it we are comparing against correct slices
curr_rater2<-get(paste("rater2.", setN, sep="")) # the corresponsed correct slice
} else { # if we are comparing against other rather
curr_rater2<-get(paste("rater2.", setN, ".", round, sep="")) # the corresponsed correct slice
}
assign(paste("kappa_",n, sep=""),
KappaCalc(curr_rater1, curr_rater2))
}
# plot
# bind the kappa
kappaAll<-vector()
for (n in ls(pattern="^kappa_rater")){
curr_kappa<-data.frame(get(n))
names(curr_kappa)<-c("start.left", "start.right", "end.left", "end.right")
kappaAll<-data.frame(rbind(kappaAll, curr_kappa))
}
# get the round
# and the testset
round<-vector()
testset<-vector()
kappas<-ls(pattern="^kappa_rater")
for(n in 1: length(kappas)){
round[n]<-as.numeric(substr(kappas[n], nchar(kappas[n]), nchar(kappas[n])))
testset[n]<-as.numeric(substr(kappas[n], 14,14))
}
# create test set variable
kappaAll$round<-round
kappaAll$testset<-testset
# sort
kappaAll<-kappaAll[order(kappaAll$round, kappaAll$testset),]
# create progressive variable
kappaAll$testN<-1:nrow(kappaAll)
# merge
kappamer<-melt(kappaAll[,c(1:4,7)], id.vars="testN")
names(kappamer)[2:3]<-c("Slice", "IRR")
# plot
print(
ggplot(kappamer, aes(x=testN, y=IRR, group = Slice, colour=Slice))+
geom_line()+
geom_point()+
geom_text(aes(label = round(IRR, 2)),
vjust = "outward", hjust = "inward",
show.legend = FALSE, colour = "black") +
scale_x_continuous("Test Number", breaks = seq(1, 100, 1))
)
# save the plot
ggsave( "irr.jpg", plot = last_plot())
|
b22ebf9713f67cb5f77dcbbb55c67321dcde99d1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/IMIFA/examples/get_IMIFA_results.Rd.R
|
6c48b31e867d3eff4c33bdfda6e3bf8499ab26b2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,343
|
r
|
get_IMIFA_results.Rd.R
|
library(IMIFA)
### Name: get_IMIFA_results
### Title: Extract results, conduct posterior inference and compute
### performance metrics for MCMC samples of models from the IMIFA family
### Aliases: get_IMIFA_results print.Results_IMIFA summary.Results_IMIFA
### Keywords: IMIFA main
### ** Examples
# data(coffee)
# data(olive)
# Run a MFA model on the coffee data over a range of clusters and factors.
# simMFAcoffee <- mcmc_IMIFA(coffee, method="MFA", range.G=2:3, range.Q=0:3, n.iters=1000)
# Accept all defaults to extract the optimal model.
# resMFAcoffee <- get_IMIFA_results(simMFAcoffee)
# Instead let's get results for a 3-cluster model, allowing Q be chosen by aic.mcmc.
# resMFAcoffee2 <- get_IMIFA_results(simMFAcoffee, G=3, criterion="aic.mcmc")
# Run an IMIFA model on the olive data, accepting all defaults.
# simIMIFAolive <- mcmc_IMIFA(olive, method="IMIFA", n.iters=10000)
# Extract optimum results
# Estimate G & Q by the median of their posterior distributions
# Construct 90% credible intervals and try to return the similarity matrix.
# resIMIFAolive <- get_IMIFA_results(simIMIFAolive, G.meth="median", Q.meth="median",
# conf.level=0.9, z.avgsim=TRUE)
# summary(resIMIFAolive)
# Simulate new data from the above model
# newdata <- sim_IMIFA_model(resIMIFAolive)
|
df043ca34d4152e3d9513e2cfcc9c742727014ab
|
0d681f86c8b93184d0f1068d785099ba00b5750a
|
/R/RcppExports.R
|
29c733a3833240c667f27e5be7a08a675f78f0eb
|
[
"Apache-2.0"
] |
permissive
|
liyangyang12/GNET2
|
a6340fd1ee1ee07a6ee94b0eeadfbcffb1be083b
|
f450971edb064aa1bba750585b1a7e0ac70b3356
|
refs/heads/master
| 2022-12-27T11:12:12.924442
| 2020-10-17T02:26:22
| 2020-10-17T02:26:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,405
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Fit a regression tree.
#'
#' Fit a regression tree based on Gaussian Likelihood score. Provided in case the best split
#' is not applicable for R dnorm() function.
#' @param X A n by p matrix as input.
#' @param Y A n by q matrix as response.
#' @param max_depth Maximum depth of the tree.
#' @param cor_cutoff Cutoff for within group Pearson correlation coefficient, if all data belong to a node
#' have average correlation greater or equal to this, the node would not split anymore.
#' @param min_divide_size Minimum number of data belong to a node allowed for further split of the node.
#'
#' @return A matrix for sample informatrion for each partition level. First column is feature index used
#' by the node and second is the value used to split, the rest of the columns are the split of sample: 0 means
#' less or equal, 1 means greater and -1 means the sample does not belong to this node.
#' @examples
#' build_module(X = matrix(rnorm(5*10),5,10), Y = matrix(rnorm(5*10),5,10),
#' max_depth=3,cor_cutoff=0.9,min_divide_size=3)
#' @export
build_module <- function(X, Y, max_depth, cor_cutoff, min_divide_size) {
.Call('_GNET2_build_module', PACKAGE = 'GNET2', X, Y, max_depth, cor_cutoff, min_divide_size)
}
|
98b1397cc4cc2ffc2975096944e1f3dd5b109336
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rsurfer/examples/eliminateabnormalities.rows.Rd.R
|
ed5bb475acd03a3efcc8b26ef1c436ff64ac4553
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 223
|
r
|
eliminateabnormalities.rows.Rd.R
|
library(rsurfer)
### Name: eliminateabnormalities.rows
### Title: Eliminate Abnormal Rows
### Aliases: eliminateabnormalities.rows
### ** Examples
data <- generaterandomsubjects(10)
eliminateabnormalities.rows(data)
|
5b2afb2dd9ae23d3c74e375255aa515d1c305f13
|
df149ed84544c17b0218fae9f4c9fbb1852a31ff
|
/R/set-line-distance.R
|
435f656275947732c0dc51303933a790eb577aba
|
[
"MIT"
] |
permissive
|
data-science-made-easy/james
|
c15ca1467a418dba71d23b183bd0d1799d78abe7
|
38c40f3a1de8c69fa0ea8b92f44e67359100128d
|
refs/heads/master
| 2021-06-17T07:48:57.087651
| 2021-01-08T15:04:13
| 2021-01-08T15:04:13
| 129,382,477
| 0
| 0
| null | 2018-04-16T09:27:17
| 2018-04-13T09:47:29
|
R
|
UTF-8
|
R
| false
| false
| 123
|
r
|
set-line-distance.R
|
set_line_distance <- function(p) {
print_debug_info(p)
# Set line distance
par(lheight = p$line_distance)
p
}
|
a3e569dba5ff76c8c427082d3cee201d4c354e23
|
76f966ef048df6e295658adc1e179386f4db9ff2
|
/OpenIIT_15_Main.R
|
f26ffe336019771fcdf5e12e2b68a8692af1f6bd
|
[] |
no_license
|
evijit/OpenIIT_DataAnalytics_2015
|
49410df7e0815406ab414f710114be11fc4a287c
|
30bf43e121a57705633232e5ef6fe1baa1d8f5b8
|
refs/heads/master
| 2021-06-01T01:33:03.147170
| 2016-04-22T09:32:26
| 2016-04-22T09:32:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 685
|
r
|
OpenIIT_15_Main.R
|
require("plyr")
require("ggplot2")
options(max.print=1000000)
maindata <- read.csv('MainData.csv')
require('scales')
maindata <- maindata[order(maindata$Dept),]
maindata<-plyr::arrange(maindata, Dept, Product, City)
X <- split(maindata, maindata$Product)
prod20<-X[[1]]
prod20_hist <- prod20[!is.na(as.numeric(as.character(prod20$Difference))),]
prod20_hist$Date <- as.Date(prod20_hist$Date, "%d-%m-%Y")
dat1 <- subset(prod20_hist, Difference >= 0)
dat2 <- subset(prod20_hist,Difference < 0)
ggplot() +
geom_line(data=dat1, aes(x=Date, y=Difference, group=City, colour=City)) +
geom_point()+
labs(x = "Month", y = "Surplus") +
scale_x_date(labels = date_format("%b-%Y"))
|
ec1f940035f317fef124e33dec4c911baca85998
|
f6cea49ec83440cc5dd37602fb94d07132953339
|
/code/hist_feature_selected.R
|
d86aaa09e49b2f08187db01dca329adc804e61fe
|
[] |
no_license
|
hainguyenct/geneFS
|
39c4d6e7e2ad9580ecb45016c59a7aacc7f95ce8
|
49597614d987ca9d4474ebcef115855c915250a5
|
refs/heads/master
| 2023-04-18T18:05:52.610486
| 2021-05-07T02:04:46
| 2021-05-07T02:04:46
| 242,274,581
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 948
|
r
|
hist_feature_selected.R
|
#step 1: read data####
path_machine = "/Users/dcongtinh/gene-abundance/experiment/results/fc_model_with_feature_selected/qtf_pc576_10_fillseqf_nb10_auy_gray/"
path_r =path_machine
setwd(path_r)
dataset_name <- "Wt2dgene"
file_name <- paste0('feature_selected_', tolower(dataset_name), '.csv')
table = read.csv(file_name, header = TRUE)
dim(table)
#step 2: visualize feature selected####
library(ggplot2)
title_size <- 32
font_size <- 24
ggplot(data=table, aes(x=freq)) + ylab("The number of features")+ xlab("Frequency of selected times") +
geom_bar() + stat_count(aes(y=..count..,label=..count..),geom="text",vjust=-0.8,size=5.5) +
theme(axis.text=element_text(size=font_size),
axis.title=element_text(size=font_size,face="bold"),
plot.title = element_text(hjust = 0.5, size=title_size, face="bold", margin=margin(8, 0, 16, 0))) +
ggtitle(paste0("Histogram for selected ", dataset_name, "'s features"))
|
da2e748bfeb5e5b2cee97bb2910f526b7ff734cc
|
7e5e5139f817c4f4729c019b9270eb95978feb39
|
/Introduction to Tidyverse/Chapter 2-Data visualization/6.R
|
bc60eefefda8fd13351f952e8b163d99015f82f8
|
[] |
no_license
|
Pranav-Polavarapu/Datacamp-Data-Scientist-with-R-Track-
|
a45594a8a9078076fe90076f675ec509ae694761
|
a50740cb3545c3d03f19fc79930cb895b33af7c4
|
refs/heads/main
| 2023-05-08T19:45:46.830676
| 2021-05-31T03:30:08
| 2021-05-31T03:30:08
| 366,929,815
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 694
|
r
|
6.R
|
# Adding color to a scatter plot
# In this lesson you learned how to use the color aesthetic, which can be used to show which continent each point in a scatter plot represents.
#
# Instructions
# 100 XP
# Create a scatter plot with population (pop) on the x-axis, life expectancy (lifeExp) on the y-axis, and with continent (continent) represented by the color of the points. Put the x-axis on a log scale.
library(gapminder)
library(dplyr)
library(ggplot2)
gapminder_1952 <- gapminder %>%
filter(year == 1952)
# Scatter plot comparing pop and lifeExp, with color representing continent
ggplot(gapminder_1952, aes(x = pop, y = lifeExp, color = continent)) + geom_point() + scale_x_log10()
|
c466d1bd1c01467536443a0fcc90b8efa0ae500b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sommer/examples/DT_technow.Rd.R
|
333aed30296ecaa7181067f1d8b5ab70c5a69dc3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,612
|
r
|
DT_technow.Rd.R
|
library(sommer)
### Name: DT_technow
### Title: Genotypic and Phenotypic data from single cross hybrids (Technow
### et al. (2014))
### Aliases: DT_technow
### Keywords: datasets
### ** Examples
####=========================================####
#### For CRAN time limitations most lines in the
#### examples are silenced with one '#' mark,
#### remove them and run the examples using
#### command + shift + C |OR| control + shift + C
####=========================================####
data(DT_technow)
####=========================================####
####=========================================####
# ans2 <- mmer(GY~1,
# random=~vs(dent,Gu=Ad) + vs(flint,Gu=Af),
# rcov=~units,
# data=DT)
# summary(ans2)
####=========================================####
#### multivariate overlayed model
####=========================================####
# M <- rbind(Md,Mf)
# A <- A.mat(M)
# ans3 <- mmer(cbind(GY,GM)~1,
# random=~vs(overlay(dent,flint),Gu=A),
# rcov=~vs(units,Gtc=diag(2)),
# data=DT)
# summary(ans2)
# cov2cor(ans3$sigma[[1]])
# ####=========================================####
# #### Hybrid GWAS
# ####=========================================####
# M <- (rbind(Md,Mf) *2 )-1
# inds <- colnames(overlay(DT$dent,DT$flint)[[1]])
# Minds <- M[inds,]
#
# A <- A.mat(Minds)
# A[1:4,1:4]
# ans3 <- GWAS(GM~1, iters = 20,
# random=~vs(overlay(dent,flint),Gu=A),
# rcov=~vs(units),na.method.Y = "include",
# M=Minds, gTerm="dent",
# data=DT)
# plot(ans3$scores[1,])
|
ebd652f29b0d33bc04cbfbe90a3485a444b50bef
|
352159789935579fe40d10389862fa29f610c1fa
|
/Kode R/kode R weight matrix dan moran test.R
|
5d39cb162c59e47d5e07d3f497f5e6ea41323e74
|
[] |
no_license
|
novalsaputra/Data
|
a3318907fa8f1d02e6f6a99e0ce0bbc156bd741b
|
6c2cf6834b7ff283fd72a7dde23d2e3462d921ba
|
refs/heads/master
| 2020-11-24T07:54:34.803155
| 2020-06-14T09:49:27
| 2020-06-14T09:49:27
| 228,038,687
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,975
|
r
|
kode R weight matrix dan moran test.R
|
### OLS data ASLI
mydatap <- read.table("D:/tugas s2/TESIS..... FIGHT!!!/Kode R/data asli.txt",header=T)
head(mydatap)
names(mydatap)
attach(mydatap)
### Scatterplot
myvar=mydatap[c("Y","KP")]
plot(myvar)
myvar2=mydatap[c("Y","H","K","P","TG","S")]
plot(myvar2)
####### creating neighbour map ###########
library(spdep)
library(sp)
library(Matrix)
library(maptools)
DASp<- readShapePoly("D:/tugas s2/SPATIAL/citarum_peta_ordo3/ordo3_all.shp")
DASd<- readShapeSpatial("D:/tugas s2/SPATIAL/citarum_peta_ordo3/ordo3_all.shp")
summary(DASd)
DAS.d.df<- as.data.frame(DASd)
head(DAS.d.df)
summary(DAS.d.df)
################################
### Contiguity neighbour #######
################################
## Queen
class(DASd)
DAS_nb <- poly2nb(DASd,row.names = seq(1,92))
DAS_nb
plot(DASd, col="white", border="grey")
plot(DAS_nb, coordinates(DASd), col="red", add=TRUE)
text(coordinates(DASd), labels=DAS.d.df$IDp, cex=0.7, col="blue",pos=4, offset=0.4)
## ROOK
DAS_nb2 <- poly2nb(DASd, queen=FALSE, row.names = seq(1,92))
DAS_nb2
summary(DAS_nb2)
plot(DASd, col="white", border="grey")
plot(DAS_nb2,coordinates(DASd),col="red",cex=0.3,add=TRUE)
plot(DAS_nb2,coordinates(DASd),col="red",cex=0.5,add=TRUE)
plot(DAS_nb2,coordinates(DASd),col="red",cex=0.7,add=TRUE)
plot(DAS_nb2,coordinates(DASd),col="red",cex=0.8,add=TRUE)
plot(DAS_nb2,coordinates(DASd),col="red",cex=0.9,add=TRUE)
text(coordinates(DASd), labels=DAS.d.df$IDp, cex=0.7, col="blue",pos=4, offset=0.4)
##BISHOP
plot(DASd, col="white", border="grey")
plot(diffnb(DAS_nb, DAS_nb2), coordinates(DASd), col="red", add=TRUE)
text(coordinates(DASd), labels=DAS.d.df$IDp, cex=0.7, col="blue",pos=4, offset=0.4)
### difference queen and Rook
plot(DASd, col="white", border="grey")
plot(DAS_nb, coordinates(DASd), col="dark grey",add=TRUE)
plot(DAS_nb2,coordinates(DASd),col="black",cex=0.9,add=TRUE)
plot(diffnb(DAS_nb, DAS_nb2), coordinates(DASd), col="red", add=TRUE)
text(coordinates(DASd), labels=DAS.d.df$IDp, cex=0.7, col="blue",pos=4, offset=0.4)
#### weight matrix
#QUEEN
DASw <- nb2listw(DAS_nb)
summary(DASw)
#ROOK
DASw2 <- nb2listw(DAS_nb2)
summary(DASw2)
### moran test
moran.test(mydatap$Y,DASw2,randomisation = F,alternative = "two.sided")
moran.plot(mydatap$Y,DASw2, col="blue", xlab="Indeks kekritisan Air", ylab=" spatial lag")
moran.test(log(Y),DASw2)
moran.plot(log(Y),DASw2, col="blue", xlab="Indeks Kritis Air", ylab="lag")
moran.test(log(KP),DASw2,randomisation = F,alternative = "two.sided")
moran.plot(mydatap$KP,DASw2, col="blue", xlab="Kepadatan Penduduk", ylab="lag")
moran.test(mydatap$H,DASw2)
moran.plot(mydatap$H,DASw2, col="blue", xlab="Persentase Luas Hutan", ylab="lag")
moran.test(mydatap$K,DASw2)
moran.plot(mydatap$K,DASw2, col="blue", xlab="Persentase Luas Kebun", ylab="lag")
moran.test(mydatap$TG,DASw2)
moran.plot(mydatap$TG,DASw2, col="blue", xlab="Persentase Luas Tegalan", ylab="lag")
moran.test(mydatap$S,DASw2)
moran.plot(mydatap$S,DASw2, col="blue", xlab="Persentase Luas Sawah", ylab="lag")
moran.test(mydatap$M,DASw2)
moran.plot(mydatap$M,DASw2, col="blue", xlab="Persentase Luas Pemukiman", ylab="lag")
moran.test(mydatap$P,DASw2)
moran.plot(mydatap$P,DASw2, col="blue", xlab="Persentase Luas Kebun", ylab="lag")
############## Correlogram MORAN I #############################
mor1<-sp.correlogram(DAS_nb2, mydatap$Y, order = 6, method = "I",
style = "W", randomisation = TRUE, zero.policy = NULL, spChk=NULL)
plot(mor1)
mor1<-sp.correlogram(DAS_nb2, mydatap$KP, order = 6, method = "I",
style = "W", randomisation = TRUE, zero.policy = NULL, spChk=NULL)
plot(mor1)
mor1<-sp.correlogram(DAS_nb, mydatap$P, order = 6, method = "I",
style = "W", randomisation = TRUE, zero.policy = NULL, spChk=NULL)
plot(mor1)
mor1<-sp.correlogram(DAS_nb2, mydatap$H, order = 6, method = "I",
style = "W", randomisation = TRUE, zero.policy = NULL, spChk=NULL)
plot(mor1)
|
ff82c6a91e87b3519ab014892fd4fb06d60f7ed7
|
a10d3e11fb352cf896326c34bd0290cb44f2c0fe
|
/demo/CMHF_Dashboard.R
|
1a1193e0f773985fb8f040e29ce0a8c0963fd0f7
|
[
"CC0-1.0"
] |
permissive
|
Romi111/intro_rmd
|
9ec3c39ae7c653695abfda5ba43c5a7e6c7431bf
|
45e7bcd432a517f17b83f64d74625926a50ddf66
|
refs/heads/main
| 2023-09-03T18:49:10.756609
| 2021-11-02T21:23:01
| 2021-11-02T21:23:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,525
|
r
|
CMHF_Dashboard.R
|
library(tidyverse)
library(pander)
my_plot <- function(data) {
if (data == 'mtcars') {
mtcars %>% ggplot() +
geom_point(aes(x = wt, y = mpg)) +
geom_smooth(aes(x = wt, y = mpg))
}
else {
iris %>% ggplot() +
geom_point(aes(x = Sepal.Length, y = Sepal.Width)) +
geom_smooth(aes(x = Sepal.Length, y = Sepal.Width))
}
}
my_lm <- function(data) {
if (data == 'mtcars') {
pander(summary(lm(mpg ~ wt, data = mtcars)))
}
else {
pander(summary(lm(Sepal.Width ~ Sepal.Length, data = iris)))
}
}
make.tabs <- function(data){
res <- list()
for (i in seq_along(data)) {
res[[i]] <- c(paste('###', data[i]), '\n',
paste('#### My Plot', i), '\n',
"```{r, results = 'asis'}", '\n',
"print('Hello World')", '\n',
"my_plot('", data[i] ,"')", '\n',
'```', '\n\n',
paste('#### My Linear Model', i), '\n',
"```{r, results = 'asis'}", '\n',
"print('Hello World')", '\n',
"my_lm('", data[i] ,"')", '\n',
'```', '\n\n')
}
return(unlist(res))
}
my_tabs <- make.tabs(c('mtcars','iris'))
# Create the Rmd to knit
cat(
'---
title: "Untitled"
author: "author"
date: "2017-10-23"
output: html_document
---
# Some Tabs
## {.tabset}
```{r}
library(dplyr)
library(tidyverse)
```
',make.tabs(c('mtcars','iris')),
sep = "",
file = "filetoknit.Rmd")
rmarkdown::render("filetoknit.Rmd")
|
a2a88c7fdd130a3509ff9af3d30360cb4d224f52
|
a698a346bfb430de48a5961716f2066bdc25b86e
|
/plot4.R
|
5567a60fde69ccaed3e1c7770cab2ddf0b0a7b73
|
[] |
no_license
|
Nyanez615/ExData_Plotting1
|
c7c6bc135e9488e715071a59df65c40b601b48ef
|
24388d6b698df671bff3bbf36b87574ca8d9ca7e
|
refs/heads/master
| 2020-06-17T10:58:57.664986
| 2019-07-09T20:02:18
| 2019-07-09T20:02:18
| 195,904,287
| 0
| 0
| null | 2019-07-09T00:33:07
| 2019-07-09T00:33:07
| null |
UTF-8
|
R
| false
| false
| 2,638
|
r
|
plot4.R
|
# This script recreates Plot 4 of the Course Project 1 of the Exploratory Data
# Analysis Coursera course, based on the Individual household electric power
# consumption Data Set, found in the UC Irvine Machine Learning Repository.
# Loading libraries
library(tidyverse)
library(lubridate)
# Setting file names and URLs
file <- "exdata-data-household_power_consumption.zip"
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
# Checking if .zip exists
if (!file.exists(file)) {
download.file(fileURL, destfile = "./exdata-data-household_power_consumption.zip", method="curl")
}
# Checking if file exists
if (!file.exists("household_power_consumption.txt")) {
unzip(file)
}
# Reading household_power_consumption.txt and addressing "?" as NA
household_power_consumption <- read.csv("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = c("?"), stringsAsFactors = FALSE)
# Reclassing date and time using lubridate
household_power_consumption$DateTime <- paste(household_power_consumption$Date, household_power_consumption$Time)
household_power_consumption$DateTime <- dmy_hms(household_power_consumption$DateTime)
# Filtering only dates 2007-02-01 and 2007-02-02
household_power_consumption <- filter(household_power_consumption, date(DateTime) == "2007-02-01" | date(DateTime) == "2007-02-02")
# Setting par() mfcol argument to 2 by 2 plots and opening png device "plot4.png"
png(filename = "plot4.png", width = 480, height = 480)
par(mfcol = c(2, 2))
# Creating first plot with appropriate y-axis label
with(household_power_consumption, plot(DateTime, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power"))
# Creating second plot with appropriate y-axis label, line colors, and legend
with(household_power_consumption, plot(DateTime, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering"))
with(household_power_consumption, points(DateTime, Sub_metering_2, type = "l", col = "red"))
with(household_power_consumption, points(DateTime, Sub_metering_3, type = "l", col = "blue"))
legend("topright", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, bty = "n")
# Creating third plot with appropriate x- and y- axis labels
with(household_power_consumption, plot(DateTime, Voltage, type = "l", xlab = "datetime", ylab = "Voltage"))
# Creating fourth plot with appropriate x- and y- axis labels
with(household_power_consumption, plot(DateTime, Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power"))
dev.off()
|
abb12c2d005ced17cbc40acabd2e9a2d49081cbb
|
8ee94e3fedadbb772afdc11f634caaccae490967
|
/Rcode/TrigGerber.R
|
d0f867cc14d1519b08b32d27836368b449d8c81b
|
[] |
no_license
|
ShinyMCS/TAS-Bartko-BB
|
b7008cfdf35cfee2b4a5b741393139015effdd7a
|
17a2aff3d7a5f473676fe2c6a248505627b9988b
|
refs/heads/master
| 2020-06-06T18:52:08.781737
| 2014-03-01T23:15:21
| 2014-03-01T23:15:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 998
|
r
|
TrigGerber.R
|
#Trig Gerber
Trig = c(0.96, 1.16, 0.97, 1.01, 1.25, 1.22, 1.46, 1.66, 1.75, 1.72,
1.67, 1.67, 1.93, 1.99, 2.01, 2.28, 2.15, 2.29, 2.45, 2.40, 2.79,
2.77, 2.64, 2.73, 2.67, 2.61, 3.01, 2.93, 3.18, 3.18, 3.19, 3.12,
3.33, 3.51, 3.66, 3.95, 4.20, 4.05, 4.30, 4.74, 4.71, 4.71, 4.74,
5.23, 6.21)
Gerber = c(0.85, 1.00, 1.00, 1.00, 1.20, 1.20, 1.38, 1.65, 1.68, 1.70,
1.70, 1.70, 1.88, 2.00, 2.05, 2.17, 2.20, 2.28, 2.43, 2.55, 2.60,
2.65, 2.67, 2.70, 2.70, 2.70, 3.00, 3.02, 3.03, 3.11, 3.15, 3.15,
3.40, 3.42, 3.62, 3.95, 4.27, 4.30, 4.35, 4.75, 4.79, 4.80, 4.80,
5.42, 6.20)
Y1 = Trig
Y2= Gerber
#############################################
Ds = Y2-Y1
Ss = Y2+Y1
Smc = Ss-mean(Ss) # Mean Centering
mean(Ds)
sd(Ds)
#############################################
FitBB <- lm(Ds ~ Ss)
summary(FitBB)
FitKH <- lm(Ds ~ Smc)
summary(FitKH)
#############################################
t.test(Y1,Y2,paired=T)
cor.test(Ds,Ss)
var.test(Y1,Y2)
|
63c5f3ee0166d411cee3a8c6e07d74a6df1cb3ba
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/hoa/examples/anova.rsm.Rd.R
|
a31a9b6d2e95437081b04a4a04ea2d726ea5c939
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 671
|
r
|
anova.rsm.Rd.R
|
library(hoa)
### Name: anova.rsm
### Title: ANOVA Table for a RSM Object
### Aliases: anova.rsm
### Keywords: internal methods models regression
### ** Examples
## Sea Level Data
data(venice)
attach(venice)
Year <- 1:51/51
c11 <- cos(2*pi*1:51/11) ; s11 <- sin(2*pi*1:51/11)
c19 <- cos(2*pi*1:51/18.62) ; s19 <- sin(2*pi*1:51/18.62)
venice.p <- rsm(sea ~ Year + I(Year^2) + c11 + s11 + c19 + s19,
family = extreme)
anova(venice.p)
##
venice.l <- rsm(sea ~ Year + I(Year^2), family = extreme)
anova(venice.p, venice.l)
##
detach()
## House Price Data
data(houses)
houses.rsm <- rsm(price ~ ., family = student(5), data = houses)
anova(houses.rsm)
|
1a0c6a5df9cff58b03d0b6d44d77f41c663664b9
|
bffd95e4ee6d169caa6687e18b22611550c8df93
|
/man/tiDaily.Rd
|
85a880c6a6ced0f8c5decdd1d4b9634933cab937
|
[] |
no_license
|
cran/tis
|
286eb68a7aeb0be636a07babcb16a362d62aa5f2
|
f254c391711e5cbdc4deba8ea68c2a234317d4bd
|
refs/heads/master
| 2021-10-13T14:19:03.631652
| 2021-09-28T18:50:02
| 2021-09-28T18:50:02
| 17,700,509
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,027
|
rd
|
tiDaily.Rd
|
\name{tiDaily}
\alias{tiDaily}
\alias{tiBusiness}
\title{ Daily and Business Day Time Indexes }
\description{Return a daily or business day \code{ti} corresponding to a
specified position within a time index.}
\usage{
tiDaily(xTi, offset = 1)
tiBusiness(xTi, offset = 1)
}
\arguments{
\item{xTi}{
a \code{ti} object or something that the \code{ti()}
function can turn into a \code{ti} object }
\item{offset}{
for \code{ti} \code{xTi}, a number in the range [0,1] telling where in the
period represented by \code{x} to find the day. 0 means the first
day of the period, 1 the last day of the period, and fractional
values for in-between day.}
}
\value{
\code{tiDaily} converts its first argument to a \code{jul} using the
offset provided, and returns a daily \code{ti} for that day.
\code{tiBusiness} converts its first argument to a \code{jul} using the
offset provided, and returns a "business" \code{ti} for that day.
}
\seealso{ \code{\link{ti}}, \code{\link{jul}} }
\keyword{ chron }
\keyword{ ts }
|
2a5a4dca465885608c9e043250260d4cc66ee5c4
|
73744a740941b13641c0175c8e583b20cfd023a1
|
/analysis/books/16_audience_models_SI.R
|
8fc294716555f48374556d764e85ddc1ca28cf53
|
[] |
no_license
|
mllewis/WCBC_GENDER
|
8afe092a60852283fd2aa7aea52b613f7b909203
|
ed2d96361f7ad09ba70b564281a733da187573ca
|
refs/heads/master
| 2021-12-25T22:41:21.914309
| 2021-12-22T19:08:36
| 2021-12-22T19:08:36
| 248,584,454
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,618
|
r
|
16_audience_models_SI.R
|
# predict reviews with other measures using glmer; make plots for SI
library(tidyverse)
library(lme4)
library(here)
library(broom.mixed)
REVIEWS_DATA_PATH <- here("data/processed/other/amazon_gender_scores.csv")
IBDB_TIDY_PATH <- here("data/processed/other/ibdb_tidy.csv")
BOOK_MEANS_PATH <- here("data/processed/books/gender_token_type_by_book.csv")
MODEL_OUTFILE <- here("data/processed/other/audience_mixed_effect_models.csv")
PLOT_DATA_OUTFILE <- here("data/processed/other/audience_plot_data.csv")
# our measures of book gender
book_means <- read_csv(BOOK_MEANS_PATH)
book_content_measures <- book_means %>%
select(book_id, corpus_type, token_gender_mean) %>%
spread("corpus_type", "token_gender_mean")
ibdb_data <- read_csv(IBDB_TIDY_PATH)
review_data <- read_csv(REVIEWS_DATA_PATH) %>%
left_join(book_content_measures) %>%
left_join(ibdb_data) %>%
filter(!(book_id %in% c("L105", "L112"))) # "Journey" and "Anno's Journey" are pictures books
make_model_pretty <- function(md, type) {
pretty_model <- md %>%
tidy() %>%
rename(Beta = estimate, SE = std.error, Z = statistic, p = p.value) %>%
mutate(p = case_when(p < .001 ~ "<.001",
TRUE ~ as.character(round(p, 3)))) %>%
mutate_if(is.numeric, ~ round(.,2)) %>%
select(term, everything()) %>%
select(-group)%>%
slice(-(n()))
pretty_model_reordered <- pretty_model%>%
mutate(p = ifelse(p == "1", ">.99", p),
model_type = type)
pretty_model_reordered
}
ibdb_amazon_model <- glmer(cbind(n_female_token, n_male_token) ~
child_gender + (1|book_id),
family = binomial(link ='logit'),
data = review_data)
adressee_char_model <- glmer(cbind(n_female_token, n_male_token) ~
char_only + (1|book_id),
family = binomial(link = 'logit'),
control = glmerControl(optimizer = "bobyqa"),
data = review_data)
adressee_content_model <- glmer(cbind(n_female_token, n_male_token) ~
no_char + (1|book_id),
family = binomial(link = 'logit'),
control = glmerControl(optimizer = "bobyqa"),
data = review_data)
additive_adressee_model <- glmer(cbind(n_female_token, n_male_token) ~
char_only + no_char + (1|book_id),
family = binomial(link ='logit'),
data = review_data)
model_params <- map2_df(list(ibdb_amazon_model, adressee_char_model, adressee_content_model, additive_adressee_model),
list("ibdb", "char", "content", "char_content"),
make_model_pretty)
model_params_tidy <- model_params %>%
mutate(term = case_when(term == "child_gender" ~ "prop. female, Hudson Kam and Matthewson (2017)",
term == "char_only" ~ "character score",
term == "no_char" ~ "content score",
TRUE ~ term))
write_csv(model_params_tidy, MODEL_OUTFILE)
# plot data
by_book_review_data <- review_data %>%
group_by(book_id, n_reviews_total, n_reviews_gendered,
prop_review_gendered, char_only, no_char, child_gender) %>%
summarize(addressee_gender_score_token =
sum(n_female_token)/(sum(n_female_token) +
sum(n_male_token)))
write_csv(by_book_review_data, PLOT_DATA_OUTFILE)
|
01b7aa93bd4d7e149372256151658511407a433e
|
fda4611281af0bc21fd28b376e26266a101bdd3b
|
/IHDP/hill-2011/code/example1.code.R
|
e5d4a8c86b83a03c22dde7eb470fb930dd1b6bb9
|
[
"MIT"
] |
permissive
|
imkemayer/causal-inference-missing
|
bc75749682273ef2a5536540ff4af84cd7276ee8
|
e440ecc7084bc80205f2291c1a9e1dff55723325
|
refs/heads/master
| 2021-09-27T00:13:39.469262
| 2021-09-22T13:32:50
| 2021-09-22T13:32:50
| 185,151,389
| 8
| 2
| null | 2020-10-13T14:08:06
| 2019-05-06T08:05:23
|
HTML
|
UTF-8
|
R
| false
| false
| 10,026
|
r
|
example1.code.R
|
### try fitting various matching methods to the data and compare to bart
### save treatment effect estimates and s.e.'s (approx for matching) for all
### save balance summaries for unmatched and all matched
set.seed(3847293)
library(MatchIt)
############# first load the data
load("example.data")
source("functions.R")
covs.cont=c("bw","momage","nnhealth","birth.o","parity","moreprem","cigs","alcohol","ppvt.imp")
covs.catF=c("bwg","female","mlt.birtF","b.marryF","livwhoF","languageF","whenprenF","drugs","othstudy","momed4F","siteF","momraceF","workdur.imp")
covsF=c(covs.cont,covs.catF)
ncovs.cont=length(covs.cont)
usek = na.omit(ihdp[!(ihdp$treat==1 & ihdp$dose400==0),c("iqsb.36","dose400",covsF)])
######################## run methods, record balance, tes, ses ########################
formyF = as.formula(usek[,c("iqsb.36","dose400",covsF)])
formy0 = as.formula(usek[,c(1:2)])
# estimate the pscore with an additive model
formzF = as.formula(usek[,c("dose400",covsF)])
modqx = glm(formzF,data=usek,family="binomial",x=TRUE)
qx = modqx$fitted
## now figure total # covs out from the design matrix because of the complications caused by the factors
ncovs=ncol(modqx$x)-1
library(mgcv)
# took the smooths off of parity, birth.o and moreprem because # unique cats was so small
# i was getting error messages
form.gamF = as.formula("dose400 ~ s(bw) + s(momage) + s(nnhealth) + birth.o + parity + moreprem + s(cigs) + s(alcohol) + s(ppvt.imp) + bwg + female + mlt.birtF + b.marryF + livwhoF + languageF + whenprenF + drugs + othstudy + momed4F + siteF + momraceF + workdur.imp")
## now pscore estimated using GAM (easier to do outside of matchit)
qxg = gam(form.gamF,data=usek,family="binomial")$fitted
### now create pscore model to try to emphasize the terms i'm going to be judging balance on
form.quadz <- as.formula("dose400 ~ (bw + momage + nnhealth + birth.o + parity + moreprem + cigs + alcohol + ppvt.imp + bwg + female + mlt.birtF + b.marryF + livwhoF + languageF + whenprenF + drugs + othstudy + momed4F + siteF + momraceF + workdur.imp)^2 + I(bw^2) + I(momage^2) + I(nnhealth^2) + I(birth.o^2) + I(parity^2) + I(moreprem^2) + I(cigs^2) + I(alcohol^2) + I(ppvt.imp^2)")
modq0=glm(formula=form.quadz,data=usek,family="binomial",x=TRUE)
qxq=modq0$fitted
# pulling out most important terms from this regression and then adding quadratic
# terms only for those
form.red <- as.formula("dose400 ~ (bw + nnhealth + parity + moreprem + ppvt.imp + bwg + b.marryF + languageF + momed4F + siteF + momraceF)")
mod.red=glm(formula=form.red,data=usek,family="binomial",x=TRUE)
qx.red=mod.red$fitted
# dimnames(mod.red$x)[[2]]
rm(mod.red)
### now create model to set up all the terms i'm going to be judging balance on
form.bal <- as.formula("dose400 ~ (bw + momage + nnhealth + birth.o + parity + moreprem + cigs + alcohol + ppvt.imp + bwg + female + mlt.birtF + b.marryF + livwhoF + languageF + whenprenF + drugs + othstudy + momed4F + siteF + momraceF + workdur.imp)^2")
# balance in dist for continuous will be examined using QQ stats rather than squared terms
## specify the combination of options for matching
methods = c(rep("nearest",24),rep("optimal",12),rep("full",4),rep("iptw",4))
ratios = c(rep(c(1,2,3),8),rep(c(1,2,3),4),c(1,1,1))
which.link = c(rep(1,3),rep(2,3),rep(3,3),rep(4,3),rep(1,3),rep(2,3),rep(3,3),rep(4,3),rep(1,3),rep(2,3),rep(3,3),rep(4,3),c(1,2,3,4))
links = list(qx=qx,quad=qxq,gam=qxg,reduced=qx.red)
# to differentiate in the plot
# nearest:
labels = c(rep("N",24),rep("O",12),rep("F",4),rep("W",4))
n.methods = length(methods)+1
tes.400.dm = matrix(0,n.methods,2)
tes.400.ols = matrix(0,n.methods,2)
balb.400.mat = matrix(0,n.methods,3)
balc1.400.mat = matrix(0,n.methods,7)
balc2.400.mat = matrix(0,n.methods,7)
nms = c("no match",rep(NA,n.methods-1))
num.cont=9
num.cov=41
nc=sum(usek$dose400==0)
### FIRST OLS
tes.400.dm[1, ] = summary(lm(formula=formy0, data=usek))$coef[2,1:2]
tes.400.ols[1, ] = summary(lm(formula=formyF, data=usek))$coef[2,1:2]
#######################################################
############# now all the matching craziness
# default for nearest neighbor matching is no replacement
# only way to run optimal and full matching is with no replacement
### FIGURE OUT GAM OPTION
library(MatchIt)
## here we can iterate through the following options
for(k in 1:length(methods)){
#
if(methods[k]=="iptw"){
ps=links[[k-40]]
wts = rep(1,nrow(usek))
wts[usek$dose400==0] = ps[usek$dose400==0]/(1-ps[usek$dose400==0])
wts[usek$dose400==0] = wts[usek$dose400==0]*nc/sum(wts[usek$dose400==0])
# to trick matchit into calculating the balance statistics for us
# we first create matchit output for matching with appropriate
# distance measure then replace their weights with ours
m.out = matchit(form=form.bal, data=usek, distance=ps)
m.out$weights=wts
bal = balance.sum(m.out,num.cont=num.cont,num.cov=num.cov)
balb.400.mat[k+1,] = unlist(bal$bal.bin)
balc1.400.mat[k+1,] = unlist(bal$bal.cont1)
balc2.400.mat[k+1,] = unlist(bal$bal.cont2)
tes.400.dm[k+1,] = summary(lm(formula=formy0,data=usek,weights=wts))$coef[2,1:2]
tes.400.ols[k+1,] = summary(lm(formula=formyF,data=usek,weights=wts))$coef[2,1:2]
cat(k,tes.400.ols[k+1,],"\n")
nms[k+1] = paste(methods[k],k-40,sep=".")
}
else{
if(methods[k]=="full"){
m.out = matchit(form=form.bal, data=usek, method=methods[k], distance=links[[which.link[k]]])
nms[k+1] = paste(methods[k],ratios[k],which.link[k],sep=".")
}
if(methods[k]!="full"){
if(k>0 & k<13){
m.out = matchit(form=form.bal, data=usek, method=methods[k], ratio=ratios[k], distance=links[[which.link[k]]],replace=TRUE)
nms[k+1] = paste(methods[k],ratios[k],which.link[k],"R",sep=".")
}
else{
m.out = matchit(form=form.bal, data=usek, method=methods[k], ratio=ratios[k], distance=links[[which.link[k]]])
nms[k+1] = paste(methods[k],ratios[k],which.link[k],sep=".")
}
}
if(k==1){
bal = balance.sum(m.out,num.cont=num.cont,num.cov=num.cov,matched=FALSE)
balb.400.mat[1,] = unlist(bal$bal.bin)
balc1.400.mat[1,] = unlist(bal$bal.cont1)
balc2.400.mat[1,] = unlist(bal$bal.cont2)
}
bal = balance.sum(m.out,num.cont=num.cont,num.cov=num.cov)
balb.400.mat[k+1,] = unlist(bal$bal.bin)
balc1.400.mat[k+1,] = unlist(bal$bal.cont1)
balc2.400.mat[k+1,] = unlist(bal$bal.cont2)
mdat = match.data(m.out, weights="wts")
tes.400.dm[k+1,] = summary(lm(formula=formy0,data=mdat,weights=wts))$coef[2,1:2]
tes.400.ols[k+1,] = summary(lm(formula=formyF,data=mdat,weights=wts))$coef[2,1:2]
cat(k,tes.400.ols[k+1,],"\n")
}
}
cbind.data.frame(nms,round(balb.400.mat,2))
cbind.data.frame(nms,round(balc1.400.mat,2))
cbind.data.frame(nms,round(balc2.400.mat,2))
# look only at the treatment effect estimates with the best balance
# on the univariate statistics
## plot of all
## a small amount of jitter is added to make it easier to distinguish multiple
## observations at the same point
ind = balb.400.mat[,2]<.2 & balc1.400.mat[,1]<.2
res=tes.400.ols[ind,1]
plot(y=rep(1,length(res)),x=res,ylim=c(.5,6.5),xlab="treatment effect estimates",ylab="",yaxt="n",xlim=c(6.5,max(res)),mgp=c(2,.5,0),cex=1,pch=labels[ind])
axis(side=2,at=c(1,2,3,4,5),labels=c("Row 1","Row 2","Row 3","Row 4","Row 5"),las=1,mgp=c(4,.5,0),xlab="balance criteria")
text(x=c(6.6,7.6,8.6,9.6),y=rep(6.4,4),labels=c("max","max", "max","pct"),cex=.8)
text(x=c(6.6,7.6,8.6,9.6),y=rep(6,4),labels=c("STD(B)","STD(C)", "EQQmax(C)","STD(C2)>.1"),cex=.8)
text(x=c(6.6,7.6),y=rep(1,2),labels=c(".2",".2"),cex=.8)
#
ind = balb.400.mat[,2]<.15 & balc1.400.mat[,2]<.15
text(x=c(6.6,7.6),y=rep(2,2),labels=c(".15",".15"),cex=.8)
res=tes.400.ols[ind,1]
points(y=rep(2,sum(ind)),x=res,ylim=c(0,4),cex=1,pch=labels[ind])
ind = balb.400.mat[,2]<.13 & balc1.400.mat[,2]<.13 & balc1.400.mat[,7]<.15
text(x=c(6.6,7.6,8.6),y=rep(3,3),labels=c(".13",".13", ".15"),cex=.8)
res=tes.400.ols[ind,1]
points(y=rep(3,sum(ind)),x=res,ylim=c(0,4),cex=1,pch=labels[ind])
ind = balb.400.mat[,2]<.13 & balc1.400.mat[,2]<.13 & balc1.400.mat[,7]<.15 & balc2.400.mat[,3]/742<.2
text(x=c(6.6,7.6,8.6,9.6),y=rep(4,4),labels=c(".13",".13",".15",".2"),cex=.8)
res=tes.400.ols[ind,1]
points(y=rep(4,sum(ind)),x=res,ylim=c(0,4),cex=1,pch=labels[ind])
ind = balb.400.mat[,2]<.11 & balc1.400.mat[,2]<.11 & balc1.400.mat[,7]<.11 & balc2.400.mat[,3]/742<.2
text(x=c(6.6,7.6,8.6,9.6),y=rep(5,4),labels=c(".11",".11",".11",".2"),cex=.8)
res=tes.400.ols[ind,1]
points(y=rep(5,sum(ind)),x=res,ylim=c(0,4),cex=1,pch=labels[ind])
## add in line for BART estimate
abline(v=12.9,col="red")
## add in the genmatch points (for these results see example1.code.gm.R)
points(y=c(1,1),x=c(13.2,13.6),cex=1,pch="G")
points(y=c(2,2),x=c(13.2,13.6),cex=1,pch="G")
points(y=c(3,3),x=c(13.2,13.6),cex=1,pch="G")
points(y=c(4,4),x=c(13.2,13.6),cex=1,pch="G")
points(y=c(5),x=c(13.2),cex=1,pch="G")
## 13.2 for intelligent, 13.6 for default
###########################################################################
## now BART
covs.cont=c("bw","momage","nnhealth","birth.o","parity","moreprem","cigs","alcohol","ppvt.imp")
covs.cat=c("bwg","female","mlt.birt","b.marry","livwho","language","whenpren","drugs","othstudy","mom.lths","mom.hs","mom.coll","mom.scoll","site1","site2","site3","site4","site5","site6","site7","site8","momblack","momhisp","momwhite","workdur.imp")
covs=c(covs.cont,covs.cat)
ncovs=length(covs)
usek = na.omit(ihdp[!(ihdp$treat==1 & ihdp$dose400==0),c("iqsb.36","dose400",covs)])
# important to excluse the treated with "low" dose since "highdose=0"
# for them means something very different than it does for those in
# then control group
xt=as.matrix(usek[,-1])
xp=as.matrix(usek[usek$dose400==1,-1])
xp[,1]=0
y=as.numeric(usek[,1])
library(BayesTree)
bart.tot <- bart(x.train=xt, y.train=y, x.test=xp)
save.image()
# check convergence
plot(bart.tot$sigma)
#### results
# first just effect of treatment on treated
diffs=bart.tot$yhat.train[,usek$dose400==1]-bart.tot$yhat.test
mndiffs=apply(diffs,1,mean)
mean(mndiffs)
# 12.9
sd(mndiffs)
# 1.96
# get a sense of t.e. heterogeneity
hist(apply(diffs,2,mean))
|
735dad15c69e71672b1cfd2022bc168e3c81c4a2
|
eb14e64b646bb33b8f89da5b75bd4b7531aecdad
|
/man/bmProxy-class.Rd
|
69fcd7ac3d83857046b009060aba3865ebdd2bd2
|
[] |
no_license
|
5vxssbdahves/RBMproxy
|
1bc26f6362d0ac22353123b44760a22245fbd8a5
|
430ab921fb8c2225f80c1f3a8039fe576ebe9d77
|
refs/heads/master
| 2020-12-25T10:38:18.018974
| 2014-02-10T09:24:24
| 2014-02-10T09:24:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 844
|
rd
|
bmProxy-class.Rd
|
\docType{class}
\name{bmProxy-class}
\alias{bmProxy-class}
\title{CLASS bmProxy}
\description{
bmProxy Class uses the RESTAPI to communicate with the
BrowserMob Proxy.
}
\details{
bmProxy is a generator object. To define a new bmProxy
class method `new` is called. The slots (default value)
that are user defined are:
}
\section{Slots}{
\describe{ \item{\code{host}:}{Object of class
\code{"character"}, giving the ip of the remote server.
Defaults to localhost} \item{\code{port}:}{Object of
class \code{"numeric"}, the port of the remote server on
which to connect.} \item{\code{selHost}:}{Object of class
\code{"character"}, giving the ip of the remote server.
Defaults to localhost} \item{\code{selPort}:}{Object of
class \code{"numeric"}, the port of the remote server on
which to connect.} }
}
\examples{
\dontrun{
}
}
|
2ec9b049deb869417d44ce81616ce5a6c1525ec0
|
9dd1972f821e4b8c07836e4a44566c1fb332a68d
|
/pig/allele_validation.R
|
1ef99d8690c094fa4f7f4f51de7483593de9cff4
|
[] |
no_license
|
russ-dvm/scripts
|
d2ec5c7618ae180a72f238bcc5b1f5de291ffaa2
|
63b177f96a7b23add9b69fc60cca563978448aff
|
refs/heads/master
| 2020-12-14T04:51:46.704108
| 2018-05-31T19:19:10
| 2018-05-31T19:19:10
| 51,455,634
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,095
|
r
|
allele_validation.R
|
library(ggplot2)
alleles <- read.table("~/sandbox/allele_freq_comparison/results_for_R.txt", h=T)
##CALCULATE TOTALS
alleles$total_aff <- alleles$AFF_alt + alleles$AFF_ref
alleles$total_unaff <- alleles$UNAFF_alt + alleles$UNAFF_ref
##CALCULATE ALLELE FREQUENCIES
alleles$ill_MAF_diseased <- alleles$ill_alt_diseased/(alleles$ill_alt_diseased + alleles$ill_ref_diseased)
alleles$ill_MAF_normal <- alleles$ill_alt_normal/(alleles$ill_alt_normal + alleles$ill_ref_normal)
alleles$seq_MAF_diseased <- alleles$seq_alt_diseased/(alleles$seq_alt_diseased + alleles$seq_ref_diseased)
alleles$seq_MAF_normal <- alleles$seq_alt_normal/(alleles$seq_alt_normal + alleles$seq_ref_normal)
###################
##PLOT
ggplot(alleles, aes(x=P_illumina_fisher, y=P_seq_fisher)) + geom_point() + geom_smooth() + theme_bw()
ggplot(alleles,aes(x=seq_MAF_diseased, y=ill_MAF_diseased)) + geom_point() + theme_bw() + geom_smooth(method="lm", se=T) +geom_abline(intercept = 0, slope = 1, color="green")
cor(alleles$seq_MAF_diseased, alleles$ill_MAF_diseased, use="complete")
alleles$total_ill_normal_alleles <- alleles$ill_alt_normal + alleles$ill_ref_normal
deeply.sequenced <- subset(alleles, alleles$total_ill_normal_alleles > 240)
cor(deeply.sequenced$ill_MAF_normal, deeply.sequenced$seq_MAF_normal)
cor(deeply.sequenced$ill_MAF_diseased, deeply.sequenced$seq_MAF_diseased)
cor(deeply.sequenced$P_seq_chisq, deeply.sequenced$P_illumina_chisq)
ggplot(deeply.sequenced, aes(x=seq_MAF_normal, y=ill_MAF_normal)) + geom_point() + theme_bw() + geom_smooth(method="lm", se=T) +geom_abline(intercept = 0, slope = 1, color="green") + geom_point(aes(x=P_illumina_fisher, y=P_seq_fisher), color = "red")
ggplot(deeply.sequenced, aes(x=P_illumina_fisher, y=P_seq_fisher)) + geom_point() + theme_bw() + geom_smooth(method="lm", se=T) + geom_abline(intercept = 0, slope =1, color = "green") + geom_text(aes(label=SNP))
ggplot(alleles, aes(x=seq_MAF_diseased, y=ill_MAF_diseased)) + geom_point()
#####POPOOLATION
ggplot(deeply.sequenced, aes(x=P_seq_chisq, y=P_popoolation)) + geom_point()
head(alleles)
|
d7d7bc64c4c50c601b3b97b4571f7bfdbd22c2e9
|
4ea3ee297cc89dd3b807efcca79c14b29c9e6eb5
|
/man/DeLorean.Rd
|
6f5869c93fe0d9190d6d6ae8a47abdca4e0fbb89
|
[] |
no_license
|
bgoodri/DeLorean
|
125a14af08373207f714a306ea6cc1a84d67058b
|
0e44768658d004aa4e86dfa656771020b799e5fa
|
refs/heads/master
| 2020-04-01T15:38:59.955595
| 2018-10-16T10:11:00
| 2018-10-16T10:11:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 208
|
rd
|
DeLorean.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.r
\docType{package}
\name{DeLorean}
\alias{DeLorean}
\alias{DeLorean-package}
\title{DeLorean.}
\description{
DeLorean.
}
|
07e247809ea912a653386985a78354cd74745a10
|
184180d341d2928ab7c5a626d94f2a9863726c65
|
/valgrind_test_dir/arcDistMat-test.R
|
ee86b06d88691662a822b6ce7519cfd368209506
|
[] |
no_license
|
akhikolla/RcppDeepStateTest
|
f102ddf03a22b0fc05e02239d53405c8977cbc2b
|
97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5
|
refs/heads/master
| 2023-03-03T12:19:31.725234
| 2021-02-12T21:50:12
| 2021-02-12T21:50:12
| 254,214,504
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 183
|
r
|
arcDistMat-test.R
|
function (X, r)
{
e <- get("data.env", .GlobalEnv)
e[["arcDistMat"]][[length(e[["arcDistMat"]]) + 1]] <- list(X = X,
r = r)
.Call("_signnet_arcDistMat", X, r)
}
|
ba20dccc139401eead91573de9bcb0d90a643388
|
65b3a69f095f85495e00c96e0b8263606b93eff8
|
/R files/sace6.R
|
9a1167d0cb6f8011bee2e78c0e56cb40ed3d791d
|
[] |
no_license
|
nbbrd/jd3-rtests
|
57786d88e967a96f211bbc50ad80c20ea2aba7c8
|
3ee3582f4e230a0e5ff4d248ae9ec159ea35722e
|
refs/heads/master
| 2021-05-07T20:57:33.385080
| 2019-10-08T08:48:58
| 2019-10-08T08:48:58
| 108,976,840
| 8
| 2
| null | 2019-10-08T08:49:00
| 2017-10-31T09:58:01
|
HTML
|
UTF-8
|
R
| false
| false
| 2,079
|
r
|
sace6.R
|
source("./R files/jd3_init.R")
source("./R files/jd3_fractionalairline.R")
source("./R files/jd3_x11.R")
source("./R files/jd3_stl.R")
source("./R files/jd3_holidays.R")
usclaims<-read.table("./Data/usclaims.txt")
w<-jd3_periodicAirline(usclaims[,2], periods=365.25/7, outliers=c("ao", "ls", "wo"), criticalValue = 6)
print(dictionary(w))
print(result(w,"outliers"))
uk<-read.table("./Data/ukcasualties.txt")
y<-log(uk[,1])
jhol<-jd3_holidays()
add(jhol, "NewYear")
add(jhol, c(1,2))
add(jhol, "Christmas")
add(jhol, "Christmas", offset=+1)
add(jhol, "Easter", offset=-2)
add(jhol, "EasterMonday")
add(jhol, c(5, 29))
add(jhol, c(8, 28))
hol<-jd3_holidaysMatrix(jhol, "2005-01-01", length = length(y), type = "Default")
d<-jd3_periodicAirline(y, x=hol, periods=c(7, 365.25), outliers=c("ao", "ls"), criticalValue = 5)
print(result(d,"parameters"))
print(result(d,"outliers"))
print(result(d, "b"))
print(result(d, "t"))
y<-result(d, "lin")
c<-jd3_fractionalAirlineDecomposition(y, period=7, TRUE)
c1<-jd3_fractionalAirlineDecomposition(result(c,"sa"), period=365.25, adjust = FALSE)
# The final decomposition is given by
w<-result(c,"s")
t<-result(c1,"t")
sa<-result(c1,"sa")
s<-result(c1,"s")
i<-result(c1,"i")
seatsdecomp<-cbind(y,t,sa,w,s,i)
y<-exp(y)
# sa of daily series with X11
a<-jd3_x11(y, period=7, multiplicative = TRUE, seas0="S3X15", seas1="S3X15", trendLength = 9)
a1<-jd3_x11(result(a,"d11"), period=365.25, multiplicative = TRUE, trendLength = 367)
# The final decomposition is given by
w<-result(a,"d10")
t<-result(a1,"d12")
sa<-result(a1,"d11")
s<-result(a1,"d10")
i<-result(a1,"d13")
x11decomp<-cbind(y,t,sa,w, s,i)
b<-jd3_stl(y, period=7, multiplicative = TRUE, swindow=15, twindow=9)
b1<-jd3_stl(result(b,"sa"), period=365, multiplicative = TRUE)
# The final decomposition is given by
w<-result(b,"s")
t<-result(b1,"t")
sa<-result(b1,"sa")
s<-result(b1,"s")
i<-result(b1,"i")
stldecomp<-cbind(y,t,sa,w,s,i)
plot(x11decomp[3000:3070, "w"], type="l")
lines(exp(seatsdecomp[3000:3070, "w"]), col="red")
lines(stldecomp[3000:3070, "w"], col="blue")
|
5e5331d7434d5e7216de21c3263a3f371f610a4d
|
8cb3dd50c76e465ea06101dc53ed7c5a12517a30
|
/scripts/visualization.R
|
ba354ca6280d5bfbf8465ef557eb5032599c01f6
|
[] |
no_license
|
The-Institute-for-Perception/Course_2021
|
0d743d3d28fb9de4a52b04c1d25c803ddc92241f
|
3915d8177cac96ef35fd03c2fc1aac9dc77bcfea
|
refs/heads/main
| 2023-02-18T17:26:33.744627
| 2021-01-19T15:25:19
| 2021-01-19T15:25:19
| 331,021,079
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,050
|
r
|
visualization.R
|
### Plot descriptive data and output to PowerPoint
###
### Author: Will Russ
###
### This script demonstrates how to plot descriptive data and then export the plots to PowerPoint
###
# load libraries
library(tidyverse)
# load local functions
source("functions/make_descriptive_plot.R")
source("functions/add_graphical_slide.R")
# load data
protein_bar_descriptive_data <- readxl::read_xlsx("data/protein_bar_descriptive_data.xlsx")
# create the plots
protein_bar_descriptive_plots <- protein_bar_descriptive_data %>%
gather(key = "attribute", value = "value", -Product) %>%
group_by(Product) %>%
nest() %>%
mutate(plot = map(data, make_descriptive_plot, flip_axes = TRUE, polar = FALSE)) %>%
select(Product, plot) %>%
deframe()
# create the PowerPoint presentation
presentation <- officer::read_pptx()
iwalk(protein_bar_descriptive_plots, function(plot, name) {
presentation <- presentation %>%
add_graphical_slide(plot, name)
})
# output/save the presentation
print(presentation, target = "output/visualization.pptx")
|
d8706e4d9748f60d755fda6f3d070f23ee03a8a5
|
de1d0064b49664f711f217028046cc73007d5d9f
|
/man/twtr_auth.Rd
|
1f6ec95c1f4c15da4b909b4fc8b260bdce6be272
|
[] |
no_license
|
yutannihilation/twihttr
|
aeeab32c31372dc345e5a008127ca02e6bf3a4f9
|
5bf5b7a7792c7e82e5865310e668e6b96a89bcc4
|
refs/heads/master
| 2021-01-20T21:06:10.949602
| 2016-11-30T03:16:14
| 2016-11-30T03:16:14
| 65,869,824
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 233
|
rd
|
twtr_auth.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auth.R
\name{twtr_auth}
\alias{twtr_auth}
\title{Get Authentication}
\usage{
twtr_auth(key = NULL, secret = NULL)
}
\description{
Generate OAuth1 token
}
|
87c2b25b971c22ee4e7f0c5f1f5ae90854c6cc5b
|
26c0d024a84f6bcf461eb5f4ae97e7ca1fd9eaba
|
/man-roxygen/common.R
|
7dc241782c639be36d0674006003fe43f323ae06
|
[] |
no_license
|
rBatt/trawlData
|
11deca8341155dbd09afbdb0fcab046e4ff06c3f
|
266c5cda94b78790474ed8a5b3e8a66b6bde04d8
|
refs/heads/master
| 2021-01-21T08:57:44.568307
| 2018-06-24T20:55:19
| 2018-06-24T20:55:19
| 44,205,244
| 8
| 3
| null | 2017-06-28T20:43:56
| 2015-10-13T21:14:11
|
R
|
UTF-8
|
R
| false
| false
| 76
|
r
|
common.R
|
#' \itemize{\item <%=common.name%> the common name of the organism sampled}
|
43aa55e3a851a6efc74f7846af48b2bc07dd8ca6
|
a031335810ac3183e9ba7d2cdc668299a71ae983
|
/Code_OtherML.r
|
420945b82d7da8ac93101e8689e191a47283d7db
|
[] |
no_license
|
Joonhyun0982/2017-Travelers-Case-Competition
|
e700c5a8e7f550ff42fd418ff8df0b30f64de0f5
|
55afdbff7fd01e308ebfbe84fbdbf52bab67bd9b
|
refs/heads/master
| 2020-03-15T22:08:45.334599
| 2018-06-12T02:24:09
| 2018-06-12T02:24:09
| 132,367,828
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,365
|
r
|
Code_OtherML.r
|
#=============================================================================
# 2017 Travelers Case Competition
# Method: Other Machine Learning Methods
#=============================================================================
# ============== #
# 1. Preparation #
# ============== #
# install.packages('caret', dependencies = TRUE)
# install.packages("randomForest")
# install.packages("gbm")
# install.packages("neuralnet")
library("caret") #folds creation
library("class") #for KNN
library("rpart") #for classification tree
library("randomForest") #for random forest
library("gbm") #for gbm
library("neuralnet") #Neural Network
library("ROCR") #for ROC, AUC
library("mice") #for missing data imputation on test set
train = read.csv("Train.csv")
head(train)
# Data cleaning
train <- subset(train, train$cancel!=-1)
train <- subset(train, train$ni.age < 100)
train <- subset(train, train$ni.age - train$len.at.res > 0)
train <- train[complete.cases(train[,1:18]),] #deleting rows with NA. Can be done by train <- na.omit(train) as well
# Constructing dummy variables for qualitative variables
train1 <- train
train1$ni.genderM <- ifelse(train1$ni.gender == "M", 1, 0)
train1$sales.channelBroker <- ifelse(train1$sales.channel == "Broker", 1, 0)
train1$sales.channelOnline <- ifelse(train1$sales.channel == "Online", 1, 0)
train1$sales.channelPhone <- ifelse(train1$sales.channel == "Phone", 1, 0)
train1$coverage.typeA <- ifelse(train1$coverage.type == "A", 1, 0)
train1$coverage.typeB <- ifelse(train1$coverage.type == "B", 1, 0)
train1$coverage.typeC <- ifelse(train1$coverage.type == "C", 1, 0)
train1$dwelling.typeCondo <- ifelse(train1$dwelling.type == "Condo", 1, 0)
train1$dwelling.typeHouse <- ifelse(train1$dwelling.type == "House", 1, 0)
train1$dwelling.typeTenant <- ifelse(train1$dwelling.type == "Tenant", 1, 0)
train1$creditlow <- ifelse(train1$credit == "low", 1, 0)
train1$creditmedium <- ifelse(train1$credit == "medium", 1, 0)
train1$credithigh <- ifelse(train1$credit == "high", 1, 0)
train1$house.colorblue <- ifelse(train1$house.color == "blue", 1, 0)
train1$house.colorred <- ifelse(train1$house.color == "red", 1, 0)
train1$house.colorwhite <- ifelse(train1$house.color == "white", 1, 0)
train1$house.coloryellow <- ifelse(train1$house.color == "yellow", 1, 0)
train1$zip.codeAZ <- ifelse(train1$zip.code >= 85000 & train1$zip.code < 86000, 1, 0)
train1$zip.codeCO <- ifelse(train1$zip.code >= 80000 & train1$zip.code < 81000, 1, 0)
train1$zip.codeDC <- ifelse(train1$zip.code >= 20000 & train1$zip.code < 21000, 1, 0)
train1$zip.codeIA <- ifelse(train1$zip.code >= 50000 & train1$zip.code < 51000, 1, 0)
train1$zip.codePA <- ifelse(train1$zip.code >= 15000 & train1$zip.code < 16000, 1, 0)
train1$zip.codeWA <- ifelse(train1$zip.code >= 98000 & train1$zip.code < 99000, 1, 0)
# Removing redundant variables
train1 <- subset(train1, select = -c(ni.gender, sales.channel, coverage.type, dwelling.type, credit, house.color, year, zip.code))
# data frame for auc of each methods
results <- data.frame(Method = as.numeric(), AUC = as.numeric())
# ================================== #
# 2. KNN along with cross validation #
# ================================== #
train2 <- subset(train1, select=-c(id, cancel))
# Convert the dependent var to factor. Normalize the numeric variables
train.cancel <- factor(train1$cancel)
ind <- sapply(train2, is.numeric)
train2[ind] <- lapply(train2[ind], scale)
# Creating folds randomly with equal size and no overlapping between folds
folds <- createFolds(train1$id, k = 10, list = TRUE, returnTrain = FALSE)
# KNN with k values from 1 to 29
accuracy <- data.frame(fold = as.numeric())
auc <- data.frame(fold = as.numeric())
for(i in 1:10) {
k_fold_test <- train2[folds[[i]],]
k_fold_train <- train2[-folds[[i]],]
train.def <- train.cancel[-folds[[i]]]
test.def <- train.cancel[folds[[i]]]
for(j in 1:15) {
knn <- knn(k_fold_train, k_fold_test, train.def, k=2*j-1, prob=TRUE)
accuracy[i,j+1] <- sum(test.def == knn)/nrow(k_fold_test)
prob <- as.data.frame(knn)
prob$pred <- ifelse(knn == 1, attr(knn,"prob"), 1-attr(knn,"prob"))
auc[i,j+1] <- as.numeric(performance(prediction(prob$pred, test.def),"auc")@y.values)
}
accuracy[i,1] <- i
auc[i,1] <- i
}
# changing variable names of accuracy/auc matrix
accuracy[i+1,1] <- "mean"
for(j in 1:15) {
colnames(accuracy)[j+1] <- paste0("k",2*j-1)
accuracy[i+1,j+1] <- mean(accuracy[-(i+1),j+1])
}
auc[i+1,1] <- "mean"
for(j in 1:15) {
colnames(auc)[j+1] <- paste0("k",2*j-1)
auc[i+1,j+1] <- mean(auc[-(i+1),j+1])
}
results[1,1] <- "KNN"
results[1,2] <- auc[11,"k25"]
# ======== #
# 3. Trees #
# ======== #
############### Classification Tree
modCT <- rpart(cancel ~ .-id, method="class", data=train1)
printcp(modCT) # display the results
plotcp(modCT) # visualize cross-validation results
summary(modCT) # detailed summary of splits
# plot tree
plot(modCT, uniform=TRUE, main="Classification Tree for cancel")
text(modCT, use.n=TRUE, all=TRUE, cex=.8)
# ROC curve, AUC
pred <- prediction(predict(modCT, train1)[,2], train1$cancel)
perf <- performance(pred, "tpr", "fpr")
plot(perf)
abline(0,1)
performance(pred,"auc")
results[2,1] <- "Classification Tree"
results[2,2] <- performance(pred,"auc")@y.values
############### Regression Tree
modRT <- rpart(cancel ~ .-id, method="anova", data=train1)
printcp(modRT) # display the results
plotcp(modRT) # visualize cross-validation results
summary(modRT) # detailed summary of splits
rsq.rpart(modRT) # visualize cross-validation results
# plot tree
plot(modRT, uniform=TRUE, main="Regression Tree for cancel")
text(modRT, use.n=TRUE, all=TRUE, cex=.8)
# ROC curve, AUC
pred <- prediction(predict(modRT, train1), train1$cancel)
perf <- performance(pred, "tpr", "fpr")
plot(perf)
abline(0,1)
performance(pred,"auc")
results[3,1] <- "Regression Tree"
results[3,2] <- performance(pred,"auc")@y.values
############### Random Forest
train1$cancel <- as.factor(train1$cancel)
modRF <- randomForest(cancel ~ .-id, data=train1)
summary(modRF)
getTree(modRF, k=2, labelVar=TRUE)
# ROC curve, AUC
pred <- prediction(modRF$votes[,2], train1$cancel)
perf <- performance(pred, "tpr", "fpr")
plot(perf)
abline(0,1)
performance(pred,"auc")
results[4,1] <- "Random Forest"
results[4,2] <- performance(pred,"auc")@y.values
# =========== #
# 4. Boosting #
# =========== #
train1$cancel <- train$cancel
modGBM <- gbm(cancel~.-id, data=train1, shrinkage=0.01, distribution = 'bernoulli', cv.folds=5, n.trees=3000, verbose=F)
# check the best iteration number
best.iter = gbm.perf(modGBM, method="cv")
best.iter
summary(modGBM)
for (i in 1:length(modGBM)) {
plot.gbm(modGBM, i, best.iter)
}
# ROC curve, AUC
pred <- prediction(predict(modGBM, train1), train1$cancel)
perf <- performance(pred, "tpr", "fpr")
plot(perf)
abline(0,1)
performance(pred,"auc")
results[5,1] <- "Generalized Boosted Models"
results[5,2] <- performance(pred,"auc")@y.values
# ================== #
# 5. Neural Networks #
# ================== #
train2$cancel <- train1$cancel
n <- names(train2)
f <- as.formula(paste("cancel ~", paste(n[!n %in% "cancel"], collapse = " + ")))
modNN <- neuralnet(f, data=train2, hidden=c(3,2), stepmax = 1e+06, linear.output=FALSE)
plot(modNN)
# ROC curve, AUC
pred <- prediction(modNN$net.result, train2$cancel)
perf <- performance(pred, "tpr", "fpr")
plot(perf)
abline(0,1)
performance(pred,"auc")
results[6,1] <- "Neural Networks"
results[6,2] <- performance(pred,"auc")@y.values
# ====================== #
# 6. Logestic Regression #
# ====================== #
mod <- glm(cancel ~ creditlow +sales.channelBroker +creditmedium +zip.codeDC +n.children
+zip.codePA +claim.ind +ni.age +len.at.res +ni.marital.status +tenure +n.adults
+zip.codeCO, family=binomial ,data=train1)
summary(mod)
# ROC curve, AUC
pred <- prediction(mod$fitted.values, train1$cancel)
perf <- performance(pred, "tpr", "fpr")
plot(perf)
abline(0,1)
performance(pred,"auc")
results[7,1] <- "Logestic Regression"
results[7,2] <- performance(pred,"auc")@y.values
results
|
c19f4c7d74d0e313931517c3ad9ce6739a9b3ab9
|
3aea7bdf5abf8298bf0c2508cdd13ee87aeebf48
|
/plot1.R
|
74d972744a2a7a23753d8cc15f55b24895b13624
|
[] |
no_license
|
Tomasyzy/ExploratoryDataAnalysis
|
e7af8026c8bbcbaba67ad070d0bf7f11e1cdb56a
|
7d8f258705eea6e568b89073c51f6bf5b3a0a1d8
|
refs/heads/master
| 2020-05-18T03:56:53.568485
| 2014-09-07T10:05:50
| 2014-09-07T10:05:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 504
|
r
|
plot1.R
|
data3<-read.table("household_power_consumption.txt", header=TRUE, sep=";")
data4<-data3[1]
data5<-data4=="1/2/2007"
data6<-data3[data5,]
data6
head(data6)
data7<-data4=="2/2/2007"
data8<-data3[data7,]
data9<-rbind(data6,data8)
data9$Global_active_power<-as.numeric(levels(data9$Global_active_power))[data9$Global_active_power]
#Plot 1
hist(data9$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", col="orangered1")
dev.copy(png, file = "plot1.png")
dev.off()
|
ab8789e21062d6464c70478342e1756e39c2f62e
|
69a919e839126b7a1a44ccfc2388990a1fabaa8f
|
/Project_data/Plotting.R
|
a01b77dc05ad8436aea50072bf396098ce7dd00a
|
[] |
no_license
|
dkopp3/Insect_Traits
|
71455ca4686e35c514edcda8f8797285c9ff422b
|
9c085e8bbf27431174b0ef091d79faa05d6f1003
|
refs/heads/main
| 2023-02-28T18:47:16.218857
| 2021-02-16T16:17:17
| 2021-02-16T16:17:17
| 332,040,595
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,720
|
r
|
Plotting.R
|
##################
#Create plots
#################
library(ggplot2)
library(rethinking)
library(randomForest)
g_legend<-function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
legend
}
project.directory <- "C:/Users/AllenLabWorkstation/Dropbox/Dissertation/Chapter_3_Distance_Deposition/Manuscript/Ecography_Submission/Project_data"
setwd(project.directory)
#directory to write files
fig.dir <- "C:/Users/AllenLabWorkstation/Dropbox/Dissertation/Chapter_3_Distance_Deposition/Manuscript/Figures"
#shapefile for marginal effects - avalabe form NHDPlusV2
#vpushp <- read_sf("C:/Users/AllenLabWorkstation/Dropbox/Dissertation/StreamResiliencyRCN/VPU_NAD83.shp")
###########################################################
# Random Forest Plots
###########################################################
optifiles <- c("rf_eptdo_optimize", " rf_SS_25_optimize", "empty", "empty",
"rf_ESP_p_optimize", "rf_VOM_p_optimize", "rf_DSF_p_optimize", "empty",
"rf_BSLM_p_optimize", "rf_FDH_p_optimize", "rf_LSLS_p_optimize", "rf_FSS_p_optimize")
plotnames <- c("FLYING", "SS25", "EMPTY", "EMPTY",
"ESP", "VOM", "DSF", "EMPTY",
"BSL", "FDH", "LSL", "FSS")
#mtry
#####
# read in optimized models
windowsFonts(A = windowsFont("Times New Roman"))
windows(7, 7)
par(mfcol = c(4, 3))
namcnt <- 1
for (i in optifiles){
if(i == "empty"){
plot(1, 1, type = "n", xaxt = "n", yaxt = "n")
} else {
#i <- "rf_eptdo_optimize"
load(i)
par(mar = c(2, 2, 1.5, 0.5))
par(bty = "n")
count <- 1
for (j in unique(rf_model_list$optimize_mtry$mtry)){
if(count == 1){
plot(0, 0, type="n", col="black", xlab="ntree", ylab="PVE",
xlim = c(min(rf_model_list$optimize_mtry$ntree),
max(rf_model_list$optimize_mtry$ntree)),
ylim = c(min(rf_model_list$optimize_mtry$PVE),
max(rf_model_list$optimize_mtry$PVE)),
main = plotnames[namcnt],
family = "A")
lines(rf_model_list$optimize_mtry$ntree[rf_model_list$optimize_mtry$mtry == j],
rf_model_list$optimize_mtry$PVE[rf_model_list$optimize_mtry$mtry == j],
col=count, lwd = 2)
} else {
lines(rf_model_list$optimize_mtry$ntree[rf_model_list$optimize_mtry$mtry == j],
rf_model_list$optimize_mtry$PVE[rf_model_list$optimize_mtry$mtry == j],
col=count, lwd = 2)
}
count <- count + 1
}
}
namcnt <- namcnt + 1
}
# create legend legend
windows(4, 4)
par(mar = c(2, 4, 1.5, 0.5))
par(bty = "n")
par(family = "A")
plot(0, 0)
legend("bottom", horiz = F,
legend = paste("p=",as.character(unique(rf_model_list$optimize_mtry$mtry))),
lty = 1, col = 1:length(unique(rf_model_list$optimize_mtry$mtry)), lwd = 3)
#savePlot(filename = paste0(fig.dir, "/mtry_legend.jpg"), type = "jpg")
#dev.off(which = dev.cur())
################
#savePlot(filename = paste0(fig.dir, "/all_mtry.jpg"), type = "jpg")
#dev.off(which = dev.cur())
#ntry
#####
windows(7, 7)
par(mfcol = c(4, 3))
windowsFonts(A = windowsFont("Times New Roman"))
namcnt <- 1
for (i in optifiles){
if(i == "empty"){
plot(1, 1, type = "n",xaxt = "n", yaxt = "n")
} else {
par(mar = c(2, 4, 3, 0.5))
par(bty = "n")
load(i)
plot(rf_model_list[["optimize_ntree"]]$ntree,
rf_model_list[["optimize_ntree"]]$corr,
xlab = "",
ylab = "",
main = plotnames[namcnt],
ylim = c(min(rf_model_list[["optimize_ntree"]]$corr), 1),
family = "A")
points(max(rf_model_list[["optimize_ntree"]]$ntree),
max(rf_model_list[["optimize_ntree"]]$corr),
pch = 19, cex = 2)
}
namcnt <- namcnt + 1
}
################
#savePlot(filename = paste0(fig.dir, "/all_ntree.jpg"), type = "jpg")
#dev.off(which = dev.cur())
#Importance plots
#####
windows(7, 10)
par(mfcol = c(4, 3))
par(oma = c(1.2, 1, 1.5, 1))
windowsFonts(A = windowsFont("Times New Roman"))
namcnt <- 1
for(q in optifiles){
if(q == "empty"){
plot(1, 1, type = "n",xaxt = "n", yaxt = "n")
} else {
par(mar = c(2, 4, 3, 0.5))
par(bty = "n")
#q <- "rf_eptdo_optimize"
load(q)
colvec <- viridis(6, begin = 0, end = 0.9)
#extract importance
imp <- importance(rf_model_list[["randomforest"]], scale = T, type = 1)
impvar <- rownames(imp)[order(imp[, 1], decreasing = TRUE)]
rownames(imp) <- c("EMBED", "AREA", "SLOPE",
"URBAN", "AGRI", "FINES",
"ELEV", "REGION", "PRECIP", "PRECIP_CoV",
"TEMP_MA", "TEMP_CoV", "HYDROCLASS")
vpuloc <- which(names(sort(imp[, 1], decreasing = T)) == "REGION")
par(mai = c(0.5, 0.5, 0, 0))#, font.axis = 1, xaxt = "s")
par(bty="n")
dotchart(sort(imp[, 1]),
color = "black",
bg = "black",
pt.cex = 0.7,
xlim = c(min(imp[, 1]) - 5, max(imp[, 1]) + 5),
cex = 0.6,
xaxt = "n",
lcolor = "white",
xlab = "",
bty="n",
family = "A")
mtext(plotnames[namcnt], family = "A")
}
namcnt<-namcnt+1
}
################
#savePlot(filename = paste0(fig.dir,"/all_varimp.jpg"), type = c("jpg"), device=dev.cur())
#dev.off(which = dev.cur())
#partial dependence plots
#####
# identify most important variables for each model.
# output table used of ordering Figure 1
out <- data.frame()
for(q in optifiles){
#q <- "rf_eptdo_optimize"
load(q)
#extract importance
imp <- randomForest::importance(rf_model_list[["randomforest"]], scale = T, type = 1)
impvar <- rownames(imp)[order(imp[, 1], decreasing = TRUE)]
rownames(imp) <- c("EMBED", "AREA", "SLOPE",
"URBAN", "AGRI", "FINES",
"ELEV", "REGION", "PRECIP",
"PRECIP_CoV", "TEMP_MA",
"TEMP_CoV", "HYRDOCLASS")
for (i in grep("vpu", impvar, invert = T)[5:1]) {
a <- partialPlot(rf_model_list[["randomforest"]],
pred.data = rf_model_list[["traindat"]],
x.var = impvar[i],
plot = F)
resp <- unlist(lapply(strsplit(q, "_"),"[[", 2))
resp <- ifelse(resp != "SS" & resp != "eptdo",
paste0(resp,"_p"),
ifelse(resp=="SS", paste0(resp,"_25"), resp))
plot_threshold <- (max(a$y)-min(a$y))/(max(rf_model_list$traindat[,resp]) - min(rf_model_list$traindat[,resp]))
temp <- data.frame(q, resp, var = impvar[i], plot_threshold, rank = i,
labels = names(sort(imp[,1]))[length(sort(imp[,1])):(length(sort(imp[,1]))-5)][i])
out <- rbind(out, temp)
}
}
out$plot_threshold<-round(out$plot_threshold,2)
out <- split(out, out$q)
# plot each q
# can save each plot inside for loop
for(q in out){
#q <- out$rf_BSLM_p_optimize
load(as.character(q[1, 1]))
#initialize plot window
windows(2.5, 3)
windowsFonts(A = windowsFont("Times New Roman"))
par(oma = c(2, 0.5, 4, 0.2))
par(mfrow = c(ifelse(nrow(q) < 3, 3, nrow(q)), 1))
par(font.axis = 2, xaxt = "s" )
q <- q[order(q$rank),]
vars <- as.character(q$var)
draw.axis <- max(q[q$var != "exp", "rank"])
#offset to fit labels in plot area
offest <- ifelse(q[1,1] == "rf_SS_25_optimize", 0.4, 0.025)
if(length(vars[vars != "exp"]) > 1){
min.val <- min(apply(rf_model_list[["traindat"]][, vars[vars != "exp"]], 2, min))
max.val <- max(apply(rf_model_list[["traindat"]][, vars[vars != "exp"]], 2, max))
} else {
min.val <- min(rf_model_list[["traindat"]][, vars[vars != "exp"]])
max.val <- max(rf_model_list[["traindat"]][, vars[vars != "exp"]])
}
for (i in 1:nrow(q)){
par(mar = c (1, 2.8, 0.5, 0.5))
#add more room at the bottom
if(q[i, "rank"] == draw.axis & q[i, "rank"] != max(q[i, "rank"])){
par(mar = c (2, 2.8, 0.5, 0.5))
}
if (vars[i] != "exp"){
a <- partialPlot(rf_model_list[["randomforest"]],
pred.data = rf_model_list[["traindat"]],
x.var = vars[i],
plot = F)
plot(a$x, a$y, type = "l",
xlim = c(min.val, max.val), ylim = c(min(a$y) - offest, max(a$y) + offest),
cex.lab = 1.3, cex.axis = 1.2, lwd = (7 - q[i, "rank"]),
main = as.character(q[i, "labels"]),cex.main = 0.9,
col = "black",
las = 1, xlab = "Predictor Value (Mean-Centered & Scaled)", axes = F, family="A")
#axis twice to labe min and max only
axis(2, at = c(min(a$y) - offest, (max(a$y) + min(a$y))/2, max(a$y) + offest), labels = FALSE, las = 1)
axis(2, at = c(min(a$y) - offest, max(a$y) + offest),
labels = round(c(min(a$y) - offest, max(a$y) + offest), 2), las = 1)
}
else if(vars[i] == "exp"){
a <- partialPlot(rf_model_list[["randomforest"]],
pred.data = rf_model_list[["traindat"]],
x.var = vars[i], plot = F)
a <- data.frame(a)
a <- a[order(a$y), ]
#equal intervals along continuous variable range
a <- data.frame(a, seq = seq(min.val, max.val, by = abs(min.val-max.val)/length(a$x))[-1])
plot(a$y ~ a$seq, ylab = "", ylim = c(min(a$y) - offest, max(a$y) + offest),
type = "n", col = "white", axes = F, xlab = "",
main = as.character(q[i, "labels"]),
cex.main = 0.9, family = "A")
text(a$seq, a$y, labels = a$x, las = 1, col = "black", family = "A")
#axis twice to label min and max only
axis(2, at = c(min(a$y) - offest, (max(a$y) + min(a$y))/2, max(a$y) + offest),
labels = FALSE, las = 1, family = "A")
axis(2, at = c(min(a$y) - offest, max(a$y) + offest),
labels = round(c(min(a$y), max(a$y) + offest), 2),
las = 1, family = "A")
}
#draw axis
if(q[i, "rank"] == draw.axis){
axis(1, family = "A")
}
}
mtext(unique(q$q), 3, outer = T, cex= 1.3, line = 1, family = "A")
#savePlot(filename = paste0(fig.dir, "/MarEff_",q$q[1],"_all.jpg"),
# type = c("jpg"),
# device=dev.cur())
#dev.off(which = dev.cur())
}
#rank legend
windows()
par(family = "A")
plot(0,0, type = "n")
legend("center", legend = 1:5, lwd = 5:1)
#savePlot(filename = paste0(fig.dir, "/MarEff_legend.jpg"),
# type = c("jpg"),
# device=dev.cur())
#################
#Marginal effets of hydrologic region
#write shapefiles, Maps produced in ArcGIS
#####
for (q in optifiles){
#q <- "rf_eptdo_optimize"
load(q)
#extract importance
imp <- importance(rf_model_list[["randomforest"]], scale = T, type = 1)
impvar <- rownames(imp)[order(imp[, 1], decreasing = TRUE)]
rownames(imp) <- c("EMBED", "AREA", "SLOPE",
"URBAN", "AGRI", "FINES",
"ELEV", "REGION", "PRECIP", "PRECIP_CoV",
"TEMP_MA", "TEMP_CoV", "HYROCLASS")
a <- partialPlot(rf_model_list[["randomforest"]],
pred.data = rf_model_list[["traindat"]],
x.var = "vpu", rug = F, plot = F)
a <- data.frame(VPUID = a$x, mar_eff = a$y, rank = which(impvar == "vpu"))
a <- merge(vpushp, a, by = "VPUID")
write_sf(a, paste0(fig.dir,"/mar_eff_", q,".shp"))
}
################
###########################################################
# Intercept Only Models
# Posterior Distributions
# trait density models
###########################################################
# eptdo
#####
load(paste0("DisturbanceIntercepts_", "eptdo"))
s <- extract.samples(VE.EPTDO, n = 8000, clean.names = F)
CrI <- precis(VE.EPTDO, depth = 2, prob = 0.95)@output[c(3:5), c("Mean", "lower 0.95", "upper 0.95")]
#windows(1.5,1.5)
windowsFonts(A = windowsFont("Times New Roman"))
minlab <- round(min(data.frame(x = logistic(s$a[,c(3,2,4)]))), 2)
maxlab <- round(max(data.frame(x = logistic(s$a[,c(3,2,4)]))), 2)
midlab <- round((maxlab+minlab)/2, 2)
flying <- ggplot() +
geom_density(aes(x=x), fill="white",
data = data.frame(x = logistic(s$a[,3])), alpha=.5) +
geom_density(aes(x=x), fill="grey",
data=data.frame(x = logistic(s$a[,2])), alpha=.5)+
geom_density(aes(x=x), fill="black",
data=data.frame(x = logistic(s$a[,4])), alpha=.5)+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))+
xlab("Flying") +
ylab("Density") +
theme(axis.text = element_text(family = "A", size = 10),
axis.title = element_text(family = "A", face = "bold")) +
scale_x_continuous(breaks = c(minlab, midlab, maxlab),
labels = c(minlab, midlab, maxlab))
###############
#savePlot(filename = paste0("EPTDO","Rplot"), type = c("jpg"), device=dev.cur(), res = 300)
flying
#ss_25
#####
load(paste0("DisturbanceIntercepts_", "SS_25"))
s <- extract.samples(VE.SS_25, n = 3000, clean.names = F)
CrI <- precis(VE.SS_25,depth = 2, prob = 0.95)@output[c(2:4), c("Mean", "lower 0.95", "upper 0.95")]
#windows(2,2)
windowsFonts(A = windowsFont("Times New Roman"))
minlab <- round(min(data.frame(x = s$a[,c(3,2,4)])), 2)
maxlab <- round(max(data.frame(x = s$a[,c(3,2,4)])), 2)
midlab <- round((maxlab+minlab)/2, 2)
SS25 <- ggplot() +
geom_density(aes(x=x), fill="white", data = data.frame(x =s$a[,3]), alpha=.5) +
geom_density(aes(x=x), fill="grey", data=data.frame(x = s$a[,2]), alpha=.5)+
geom_density(aes(x=x), fill="black", data=data.frame(x = s$a[,4]), alpha=.5)+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))+
scale_y_continuous(limits=c(0,NA), expand=c(0,0)) +
xlab("SS25 (m)")+
ylab("Density") +
theme(axis.text = element_text(family="A", size = 10),
axis.title = element_text(family = "A", face = "bold")) +
scale_x_continuous(breaks = c(minlab, midlab, maxlab),
labels = c(minlab, midlab, maxlab))
###############
#savePlot(filename = paste0("ss25", "Rplot"), type = c("jpg"), device = dev.cur())
SS25
#create legend
#####
x <- data.frame(x= logistic(s$a[, 3]), Condition = "Least Disturbed")
x <- rbind(x, data.frame(x= logistic(s$a[, 2]), Condition = "Intermediate"))
x <- rbind(x, data.frame(x= logistic(s$a[, 4]), Condition = "Most Disturbed"))
legplot <- ggplot() +
geom_density(aes(x = x, fill = Condition), data = x, alpha=.5)+
scale_fill_manual(values=c("white", "grey", "black")) +
theme(legend.text = element_text(family="A", size=10),
legend.title = element_text(family = "A", size = 12),
legend.title.align = 0.5,
legend.key.size = unit(0.75, "lines"),
legend.justification = "center") +
labs(fill = "Site Condition")
legend <- g_legend(legplot)
##########
#Traits
#####
#ordering
tras <- c("DisturbanceIntercepts_ESP", "DisturbanceIntercepts_VOM",
"DisturbanceIntercepts_DSF",
"DisturbanceIntercepts_BSLM", "DisturbanceIntercepts_FDH",
"DisturbanceIntercepts_LSLS", "DisturbanceIntercepts_FSS")
windowsFonts(A = windowsFont("Times New Roman"))
out <- data.frame()
plotlist <- list(flying, SS25, legend)
count<-length(plotlist)+1
for (i in tras){
#i<-"DisturbanceIntercepts_DSF"
load(i)
#windows(2,2)
xaxisName <- unlist(lapply(strsplit(i, "_"), "[[", 2))
xaxisName <- ifelse(length(xaxisName)!=3,substring(xaxisName,1,3), xaxisName)
s <- extract.samples(VE.traits, n = 3000, clean.names = F)
samp.out <- data.frame(i, s$a[,c(3, 2, 4)])
names(samp.out) <- c("model", "least", "fair", "most")
CrI <- round(precis(VE.traits,depth = 2, prob = 0.95)@
output[c(3:5), c("Mean", "lower 0.95", "upper 0.95")],2)
z <- apply(CrI,2,function(x) round(logistic(x),3))
rownames(z) <- c("fair", "least", "most")
out <- rbind(out, data.frame(z,i))
minlab <- round(min(data.frame(x = logistic(s$a[,c(3,2,4)]))), 2)
maxlab <- round(max(data.frame(x = logistic(s$a[,c(3,2,4)]))), 2)
midlab <- round((maxlab+minlab)/2, 2)
plotlist[[count]] <- ggplot() +
geom_density(aes(x=x),
fill="white",
data = data.frame(x = logistic(s$a[,3])),
alpha=.5) +
geom_density(aes(x=x),
fill="grey",
data=data.frame(x = logistic(s$a[,2])),
alpha=.5) +
geom_density(aes(x=x),
fill="black",
data=data.frame(x = logistic(s$a[,4])),
alpha=.5) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
scale_y_continuous(limits=c(0, NA), expand=c(0, 0)) +
xlab(xaxisName)+
ylab("Density") +
theme(axis.text = element_text(family="A", size = 10),
axis.title = element_text(family = "A", face = "bold")) +
scale_x_continuous(breaks = c(minlab, midlab, maxlab),
labels = c(minlab, midlab, maxlab))
count<-count+1
#savePlot(filename = paste0(i,"Rplot"), type = c("jpg"), device=dev.cur())
}
###############
#windows(4.5, 4.5)
grid.arrange(grobs = plotlist,
layout_matrix = rbind(c(1, 4, 7),
c(2, 5, 8),
c(3, 6, 9),
c(NA,NA,10)))
#################
#savePlot(filename = "disturbance", type = c("jpg"), device = dev.cur())
|
7cdf2ed9dc45db3a00af9c5018bd3b6ce7982163
|
e87b48bbd92d3a8626b86ee0049807ed02f2323e
|
/R/bind_tweets.R
|
9b9e4481153996d4329c3a82b1b8317808b96c0b
|
[
"MIT"
] |
permissive
|
justinchuntingho/CrowdtangleR
|
64f4c267f01621991e33c63794d0867b05666ab0
|
a5fc58427126b299478995b70659e25a8d712109
|
refs/heads/master
| 2023-06-02T12:05:52.111160
| 2021-06-23T22:33:23
| 2021-06-23T22:33:23
| 379,681,974
| 13
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,029
|
r
|
bind_tweets.R
|
#' Bind information stored as JSON files
#'
#' This function binds information stored as JSON files. By default, it binds into a data frame containing tweets (from data_*id*.json files). If users is TRUE, it binds into a data frame containing user information (from users_*id*.json).
#'
#' @param data_path string, file path to directory of stored tweets data saved as data_*id*.json and users_*id*.json
#' @param user If `FALSE`, this function binds JSON files into a data frame containing tweets; data frame containing user information otherwise
#' @param verbose If `FALSE`, messages are suppressed
#' @return a data.frame containing either tweets or user information
#' @export
#'
#' @examples
#' \dontrun{
#' # bind json files in the directory "data" into a data frame containing tweets
#' bind_tweets(data_path = "data/")
#' # bind json files in the directory "data" into a data frame containing user information
#' bind_tweets(data_path = "data/", user = TRUE)
#' }
bind_tweets <- function(data_path, user = FALSE, verbose = TRUE) {
if(user) {
files <- ls_files(data_path, "^users_")
} else {
files <- ls_files(data_path, "^data_")
}
if (verbose) {
pb <- utils::txtProgressBar(min = 0, max = length(files), initial = 0)
}
json.df.all <- data.frame()
for (i in seq_along(files)) {
filename <- files[[i]]
json.df <- jsonlite::read_json(filename, simplifyVector = TRUE)
if (user) {
json.df <- json.df$users
}
json.df.all <- dplyr::bind_rows(json.df.all, json.df)
if (verbose) {
utils::setTxtProgressBar(pb, i)
}
}
.vcat(verbose, "\n")
return(json.df.all)
}
ls_files <- function(data_path, pattern) {
## parse and bind
files <-
list.files(
path = file.path(data_path),
pattern = pattern,
recursive = T,
include.dirs = T,
full.names = T
)
if (length(files) < 1) {
stop(paste0("There are no files matching the pattern `", pattern, "` in the specified directory."), call. = FALSE)
}
return(files)
}
|
aea4597fc14d3a37c1bb23889c05e44f970f93a9
|
d05acb1d4eed8d421d4bd4ab4c9caa63f53446df
|
/man/deployTFModel.Rd
|
bf070d2adbb758252f269128204d4fc99f328eaa
|
[] |
no_license
|
btongson/rsconnect
|
80ccdffeeaee56de8b33a533996bc36a46fe18c9
|
f5854bb71464f6e3017da9855f058fe3d5b32efd
|
refs/heads/main
| 2023-07-07T11:48:31.574955
| 2021-08-10T17:08:46
| 2021-08-10T17:08:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,664
|
rd
|
deployTFModel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deployTFModel.R
\name{deployTFModel}
\alias{deployTFModel}
\title{Deploy a TensorFlow saved model}
\usage{
deployTFModel(modelDir, ...)
}
\arguments{
\item{modelDir}{Path to the saved model directory. MUST contain
\emph{saved_model.pb} or \emph{saved_model.pbtxt}}
\item{...}{Additional arguments to \code{\link[=deployApp]{deployApp()}}.}
}
\description{
Deploys a directory containing a Tensorflow saved model file.
}
\details{
Deploy a single Tensorflow saved model as a bundle. Should be passed a
directory that contains the \emph{saved_model.pb} or \emph{saved_model.pbtxt} file,
as well as any variables and assets necessary to load the model.
A saved model directory might look like this:\preformatted{./1/
./1/saved_model.pb or ./1/saved_model.pbtxt
./1/variables/
./1/variables/variables.data-00000-of-00001
./1/variables/variables.index
}
For information on creating saved models, see the Keras method
\code{\link[keras:export_savedmodel.keras.engine.training.Model]{keras::export_savedmodel.keras.engine.training.Model()}} or the TensorFlow
method \code{\link[tensorflow:export_savedmodel]{tensorflow::export_savedmodel()}}. If using the TensorFlow package for
R, the official \href{https://www.tensorflow.org/guide/saved_model}{TensorFlow guide for saving and restoring models}
may be useful.
}
\references{
\url{https://www.tensorflow.org/guide/saved_model}
}
\seealso{
Other Deployment functions:
\code{\link{applications}()},
\code{\link{deployAPI}()},
\code{\link{deployApp}()},
\code{\link{deployDoc}()},
\code{\link{deploySite}()}
}
\concept{Deployment functions}
|
5cc0c78c218d27cf79118c2c69be9803c1fe0c11
|
0f104ea64886750d6c5f7051810b4ee39fa91ba9
|
/playgrounds/decimal-comma.R
|
935f504097d4e58805a7c19db81a8e357ed6384f
|
[
"MIT"
] |
permissive
|
OuhscBbmc/REDCapR
|
3ca0c106e93b14d55e2c3e678f7178f0e925a83a
|
34f2154852fb52fb99bccd8e8295df8171eb1c18
|
refs/heads/main
| 2023-07-24T02:44:12.211484
| 2023-07-15T23:03:31
| 2023-07-15T23:03:31
| 14,738,204
| 108
| 43
|
NOASSERTION
| 2023-09-04T23:07:30
| 2013-11-27T05:27:58
|
R
|
UTF-8
|
R
| false
| false
| 4,377
|
r
|
decimal-comma.R
|
d_expected <-
tibble::tribble(
~record_id,~last_name,~height,~weight,~bmi,~demographics_complete,
1, "Valdez" , 1.54, 52.3, 22.1, 2L,
2, "Rich" , 1.84, 92.3, 27.3, 2L,
3, "Furtado" , 1.95, 123.4, 32.5, 2L,
4, "Akbar" , 1.61, 45.9, 17.7, 2L,
)
# ---- both commas & dots -------------------------------------------------
s_both <-
'record_id,last_name,height_dot,height_comma,weight_dot,weight_comma,bmi_dot,bmi_comma,demographics_complete
1,Valdez,1.54,"1,54",52.3,"52,3",22.1,"22,1",0
2,Rich,1.84,"1,84",92.3,"92,3",27.3,"27,3",0
3,Furtado,1.95,"1,95",123.4,"123,4",32.5,"32,5",0
4,Akbar,1.61,"1,61",45.9,"45,9",17.7,"17,7",0'
col_types_both <- readr::cols_only(
`record_id` = readr::col_integer(),
`last_name` = readr::col_character(),
`height_dot` = readr::col_double(),
`height_comma` = readr::col_character(),
`weight_dot` = readr::col_double(),
`weight_comma` = readr::col_character(),
`bmi_dot` = readr::col_double(),
`bmi_comma` = readr::col_character(),
`demographics_complete` = readr::col_double()
)
d_both <-
readr::read_csv(
file = I(s_both),
# locale= readr::locale(decimal_mark = ",")
col_types = col_types_both
) |>
dplyr::mutate(
height_comma = readr::parse_number(height_comma, locale = readr::locale(decimal_mark = ",")),
weight_comma = readr::parse_number(weight_comma, locale = readr::locale(decimal_mark = ",")),
bmi_comma = readr::parse_number(bmi_comma , locale = readr::locale(decimal_mark = ",")),
)
testit::assert(d_both$height_dot == d_both$height_comma)
testit::assert(d_both$weight_dot == d_both$weight_comma)
testit::assert(d_both$bmi_dot == d_both$bmi_comma )
testit::assert(d_both$height_dot == d_expected$height)
testit::assert(d_both$weight_dot == d_expected$weight)
testit::assert(d_both$bmi_dot == d_expected$bmi )
# ---- commas -------------------------------------------------
s_commas <-
'record_id,last_name,height,weight,bmi,demographics_complete
1,Valdez,"1,54","52,3","22,1",0
2,Rich,"1,84","92,3","27,3",0
3,Furtado,"1,95","123,4","32,5",0
4,Akbar,"1,61","45,9","17,7",0'
col_types_commas <- readr::cols_only(
`record_id` = readr::col_integer(),
`last_name` = readr::col_character(),
`height` = readr::col_double(),
`weight` = readr::col_double(),
`bmi` = readr::col_double(),
`demographics_complete` = readr::col_double()
)
d_commas <-
readr::read_csv(
file = I(s_commas),
locale= readr::locale(decimal_mark = ","),
col_types = col_types_commas
)
testit::assert(d_commas$height == d_expected$height)
testit::assert(d_commas$weight == d_expected$weight)
testit::assert(d_commas$bmi == d_expected$bmi )
# ---- dots -------------------------------------------------
s_dots <-
'record_id,last_name,height,weight,bmi,demographics_complete
1,Valdez,"1.54","52.3","22.1",0
2,Rich,"1.84","92.3","27.3",0
3,Furtado,"1.95","123.4","32.5",0
4,Akbar,"1.61","45.9","17.7",0'
col_types_dots <- readr::cols_only(
`record_id` = readr::col_integer(),
`last_name` = readr::col_character(),
`height` = readr::col_double(),
`weight` = readr::col_double(),
`bmi` = readr::col_double(),
`demographics_complete` = readr::col_double()
)
d_dots <-
readr::read_csv(
file = I(s_dots),
# locale= readr::locale(decimal_mark = ","),
col_types = col_types_dots
)
testit::assert(d_dots$height == d_expected$height)
testit::assert(d_dots$weight == d_expected$weight)
testit::assert(d_dots$bmi == d_expected$bmi )
d_dots_null <-
readr::read_csv(
file = I(s_dots),
locale = readr::default_locale(),
col_types = col_types_dots
)
testit::assert(d_dots_null$height == d_expected$height)
testit::assert(d_dots_null$weight == d_expected$weight)
testit::assert(d_dots_null$bmi == d_expected$bmi )
# # ---- using validation from dictionary ----------------------------------------
# url <- "https://bbmc.ouhsc.edu/redcap/api/"
# token <- "33DDFEF1D68EE5A5119DDF06C602430E"
#
# m <- REDCapR::redcap_metadata_read(url, token)$data
#
# m |>
# dplyr::select(
# field_name,
#
# )
|
fa45c26d417edaa0019dfaa0dbf8f23820e39fdc
|
0e576756a3abeeabf00250c01530b1048d19e444
|
/Tessa/real_data.R
|
151d8c1264b9e57d49610f872b5e729e2a7accd5
|
[] |
no_license
|
renzy93/TESSA
|
4cc24c42afb971837601be62325118d2d63c0b16
|
9887b20cc54a06d8b1eb8d5fed5c4e1c52229d4d
|
refs/heads/master
| 2023-05-02T04:55:47.072122
| 2021-01-14T16:12:55
| 2021-01-14T16:12:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,130
|
r
|
real_data.R
|
args=commandArgs(trailingOnly = TRUE)
exp_file=args[2]
contigs_file=args[3]
cdr3_file=args[4]
save=args[5]
is_sampleCluster=as.logical(args[6])
fixed_b=args[7]
xi=1e+25
g=0.001
initialize_cluster_factor=6
source(paste(args[1],'update.R',sep='/'))
source(paste(args[1],'initialization.R',sep='/'))
source(paste(args[1],'MCMC_control.R',sep='/'))
source(paste(args[1],'utility.R',sep='/'))
source(paste(args[1],'post_analysis.R',sep='/'))
library(MASS)
library(LaplacesDemon)
library(Rtsne)
# the users need to provide these data:
# The columns/rows/length of them should be matched up wherever applicable
# exp_file: expression data, cells on columns, and genes on rows.
# e can be constructed by PCA or t-SNE, the first row is the first PC, the second row is the second PC, etc
# contigs_file: encoded CDR3 values, cells on columns, and embeddings on rows。
# cdr3: character vectors of CDR3 sequences
# save: a file dir to store tessa results
# (optional) sample_id: a column vector of sample categories. If is_sampleCluster=TRUE, users must provide an additional
# column next to the cdr3 column.
# (optional) fixed_b: a vector of pre-defined b. The vector must be numerical and has the length of TCR embeddings.
exp_data=read.csv(exp_file,row.names=1,stringsAsFactors=F)
n=ncol(exp_data)
tmp=apply(exp_data,1,sd)
e=t(Rtsne(t(exp_data[tmp>quantile(tmp,0.9),1:n]),dims=3)$Y) # Run TSNE
colnames(e)=colnames(exp_data)[1:n]
contigs_encoded=read.csv(contigs_file,stringsAsFactors=F)
t=t(contigs_encoded[1:n,-1])
meta=read.csv(cdr3_file,header=TRUE,stringsAsFactors=F)
cdr3=meta$cdr3
if(is_sampleCluster){
sample_id=meta$sample
}else{
sample_id=NULL
}
if(fixed_b!='NA'){
b=read.csv(fixed_b,header=TRUE,stringsAsFactors=F)$b
}else{
b=NULL
}
# the users need to provide these parameters, here are the suggested values
hyper_priors=list(lambda=mean(apply(t,1,var)),
xi=xi,g=g,tau=100,u=0.1,v=0.1,initialize_cluster_factor=initialize_cluster_factor)
max_iter=1000
#save="~/projects/scTCR/data/Tessa_save"
# Tessa
tessa_results=Tessa(e,cdr3,t,hyper_priors,max_iter,sample_id,save,b)
plot_tessaClsuters(tessa_results,save)
|
e9421398be3c9e12cacfb7b5511456ade6a6a462
|
518e98ca0e674e5b61f248ecc689ff26fe3de95d
|
/plot1.r
|
6e42d062b1fec6655736b10c8f0358f93880671f
|
[] |
no_license
|
tnt16dj/Exploratory-Data-Project-2
|
92a6412a0e5d98ae0d5a50f3b625ff62195a1c93
|
d41c104461285653228a1571aebacd33f9f421f8
|
refs/heads/master
| 2021-01-16T19:20:38.612924
| 2015-01-25T17:18:22
| 2015-01-25T17:18:22
| 29,821,892
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,187
|
r
|
plot1.r
|
## check if data is downloaded, if not download
if (!file.exists("./EPAData.zip")) {
## download the EPA Data file.
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(fileURL,destfile="./EPAData.zip",method="curl")
## unzip the file
unzip("./EPAData.zip")
}
## Read the data into memory
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## Summarize the data by year
NEI <- aggregate(Emissions ~ year,NEI, sum)
## Scale the data to "millions", i.e. 10^6
NEI$Emissions <- NEI$Emissions/10^6
## setup a png device to write to
png(filename="plot1.png",width=640,height=480,units="px",bg="transparent")
## set global plot preferences
par(bg="white",col="blue")
## Create the plot -- line plot, Emissions by Year
with(NEI,
plot(year,
Emissions,
type="b",
xlab="Year",
ylab=expression(paste('PM'[2.5],' Emissions (millions of tons)')),
main=expression(paste('Total US Emissions of PM'[2.5],' from All Sources')),
xlim=c(1999,2008),
ylim=c(2,8)))
## Turn off the png device
dev.off()
|
4d8408b1615fae58c4e041c25b837f889df90b6a
|
818bcedc9cc432fbdcaa0f9474727094a28d3004
|
/R/test.R
|
e95e696c94bd58192e796ef486f348933deeb5dc
|
[] |
no_license
|
naveen519/data_science
|
e366b75adf2ebceb9873d7ffefcec9f1f55857d7
|
bd0bc2be087b3e5d8e9d7291a1f77114face12b9
|
refs/heads/master
| 2020-05-19T05:26:51.201782
| 2019-05-04T07:56:43
| 2019-05-04T07:56:43
| 184,849,000
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,747
|
r
|
test.R
|
a=10
a
b=20
b
a+b
id = c(1,2,3,4,5)
age = c(22,31,34,40,45)
gen = c("M","F","M","F","M")
df1 = data.frame(id,age,gen)
id = c(1,2,3,6,7)
sal = c(100,200,300,400,500)
desg = c("A","B","A","B","A")
df2 = data.frame(id,sal,desg)
df1
df2
### inner merge
df3 = merge(x=df1,y=df2, by ="id") ## inner merge
df3
df3 = merge(x=df1,y=df2, by ="id", all.x=T) ## left merge
df3
df3 = merge(x=df1, y=df2, by="id", all.y = T) ## right merge
df3
df3 = merge(x=df1, y=df2, by="id", all.x=T, all.y = T)
df3
age = 30
if(age <= 25){
agegrp = "0-25"
}else if(age>25 & age<=35){
agegrp = "26-35"
}else agegrp = "35+"
agegrp
age = c(21, 36, 49, 54)
for (i in 1:length(age)){
if(age[i] <= 25){
agegrp[i] = '0-25'
}else if( age[i] >25 & age[i] <=30){
agegrp = '26-30'
}else if( age[i] >30 & age[i]<=40){
agegrp[i] = '31-40'
} else agegrp[i] ='40+'
}
agegrp
age = c(20,30,40)
agegrp =c()
for (i in 1:length(age)){
if(age[i] <=25){
agegrp[i] = '0-25'
}else if( age[i] >25 & age[i] <= 30){
agegrp[i] = '26-30'
}else if( age[i] > 30 & age[i]<=40){
agegrp[i] = '31-40'
} else agegrp[i] ='40+'
}
agegrp
summary(df3)
summary((df3$age))
table(df3$gen)
table(df3$gen, df3$desg)
table(df3$gen, useNA = "always")
seq(2,10,2)
a = 1:10
?seq
a = seq(2,100, length.out = 10)
##
a = c("M","F","M","F")
unique(a)
### sort
a = c(10,89,66,32,14,19)
sort(a) ## ascending order
sort(a, decreasing = T)
## sorting data frame
df3
df3 = df3[sort(df3$id, decreasing = T) ,]
df3
df3 = df3[,sort(colnames(df3)) ]
df3
a = c("apple","microsoft","google","facebook")
b = sort(a)
b
df3 = df3[,c(4,2,1,3,5)]
df3
d = c(20,86,44, 69, 32,NA,NA)
sort(d)
df3
###
df2 = na.omit(df3)
df2
###
is.na(d)
d[is.na(d)] = 0
d
## dataframe functions
dim(df3) ## number of rows and num of columns
nrow(df3) ## number of rows
ncol(df3) ## number of columns
colnames(df3) ## list of column names
dfc_names = colnames(df3)
names(df3) = c("id","designation","age","gender","salary")
### numeric functions
round(10.73, 1)
round(10.993,2)
## ceiling and floor
ceiling(10.99)
ceiling(10.1)
floor(9.99)
floor(9.01)
trunc(10.66)
trunc(10.99)
## character functions
a = "apple"
toupper(a)
tolower(a)
length(a)
nchar(a)
b = "google"
nchar(b)
b = c("apple","microsoft","google","facebook")
nchar(b)
substr(a, 2,4)
date = Sys.Date()
date
year = substr(date,1,4)
year
a = "Firstname Lastname"
x = unlist(strsplit(a," "))
x[1]
date
d = as.character(date)
y = strsplit(date,"-")
d
###
d = sub(d,pattern = "-",replacement = "/")
d
d = gsub(d, pattern = "-", replacement = "/")
d
strsplit(d,"/")
date = Sys.Date()
d = as.character(date)
strsplit(d, "-")
##
## Data manipulations
a= 4
sqrt(a)
log(a)
log2(a)
log10(a)
## Stat functions
a = c(20,30,40,50,60)
mean(a)
a = c(20,30,40,50,60,NA,NA)
mean(a,na.rm = T)
median(a, na.rm = T)
min(a)
max(a)
p = range(a)
p[1]
var(a)
sd(a)
###
exp(a)
b = log(10)
b
a = exp(b)
a
##
k = c(rep("a",10), rep("b",4))
k
## matrices
a = c(10,20,30,40)
b = matrix(a,nrow=2, ncol=2)
d = matrix(seq(2,20,length.out = 10),nrow = 5, ncol=2)
d
d = matrix(seq(2,20,length.out = 10), nrow=4,ncol=4)
d
d[1,]
d[2,1:2]
d[,2]
sum(d[,1])
sum(d[,2])
m = matrix(seq(2,20,2), nrow=5, ncol=2, byrow = T)
m
a=0
nrow(m)
for( i in 1:nrow(m)){
a = a + m[i,1]
}
a
sum(m[,1])
colSums(m)
rowSums(m)
m
a=c(2,3,4,5)
b = c(1,2,3,4)
a = matrix(a,nrow = 2, ncol=2)
b = matrix(b, nrow=2,ncol=2)
## date functions - package "lubridate"
library("lubridate")
date = Sys.Date()
a = "10/10/2017"
class(a)
a_date = as.Date(a,"%d/%m/%Y" )
a_date
a = "10-jun-2017"
a_date = as.Date(a,"%d-%b-%Y")
a_date
a = "10-08-18"
a_date = as.Date(a,"%d-%m-%y")
a_date
a = "10-08-89"
a_date =as.Date(a,"%d-%m-%y", origin = "2000-01-01" )
a_date
month(a_date)
year(a_date)
day(a_date)
week(a_date)
weekdays(a_date)
?weekdays
a = as.Date("2018-08-26")
b = as.Date("2018-08-01")
a-b
a = as.Date("2018-01-26")
b = as.Date("2018-08-01")
difftime(b,a, units = "weeks")
### install packages
library("RODBC")
install.packages("RODBC")
mydbcon = odbcConnect(dsn = "mydb", uid ="a123", pwd="xyz123")
mydbcon = odbcConnect(dsn="mydb",uid = "poweruser")
dfa = sqlQuery(channel = mydbcon ,query = "select * from mydb.table " )
library(sqldf)
sqldf()
sqldf("select * from df3")
sqldf( "select a.id, b.age, b.sal, a.gen from df1 a , df2 as b
join on a.id = b.id ")
library(dplyr)
## Select
select(df3, id, age, gen)
## filter()
filter(df3, gen == "F")
filter(df3, gen=="F" & sal >=100)
## mutate
setwd("D:/AP/Dtrees")
churn = read.csv("Churn.csv")
head(churn, 10)
## select
churn2 = select(churn, Day.Mins , Eve.Mins, Night.Mins)
churn3 = filter(churn, Day.Mins >= 200)
churn3 = filter(churn, Day.Mins >=200 & State %in% c("NY","LA","CA"))
## mutate
churn4 = mutate(churn, usage = Day.Mins+Eve.Mins+Night.Mins)
churn5 = transmute(churn, usage = Day.Mins + Eve.Mins+Night.Mins)
summarise(churn)
churn6 = arrange(churn, Day.Mins)
churn6 = arrange(churn, Day.Mins, desc(Eve.Mins))
id = c(1,2,3,4)
age =c(10,20,30,40)
gen=c("M","F","M","F")
df1 = data.frame(id,age,gen)
id = c(2,3,4,5)
dept = c("A","B","A","B")
sal = c(100,200,300,400)
df2 = data.frame(id,dept,sal)
inner_join(df1,df2,"id")
left_join(df1,df2,"id")
right_join(df1,df2,"id")
full_join(df1,df2,"id")
id = c(1,2,3,4,1)
sal = c(100,200,300,400,100)
age = c(20,30,40,50,20)
df4 = data.frame(id,sal,age)
df5 = distinct(df4)
id = c(1,2,3,4,1)
sal = c(100,200,300,400,500)
age = c(20,30,40,50,20)
dfx = data.frame(id,sal, age )
distinct(dfx, id, .keep_all = T)
### mutate
chrunx = mutate(churn, usage = Day.Mins+Eve.Mins+Night.Mins,
charges = usage *1.08)
## summarise
?summarise
summarise(churn, mean(Day.Mins) )
### Sample
churn4 = sample_n(churn, 100)
churn8 = sample_frac(churn, 0.1)
region = c(rep("A",3), rep("B",3))
month = c("Jan","Feb","Mar", "Jan","Feb","Mar")
sales = c(100,200,300,400,200,100)
dfk = data.frame(region, month, sales)
dfk
library(reshape)
dfy = reshape(dfk, timevar = "month", idvar="region", direction = "wide")
varnames = colnames(dfy)
varnames
for(i in 1:length(varnames)){
varnames[i] = gsub(varnames[i],pattern = "sales.",replacement = "")
}
varnames
names(dfy) = varnames
colnames(dfy)
|
0faeca027b2cc1618f67cf31eb17a40c809243de
|
a546edb72260612a371847728a903f704cd15918
|
/man/NoNA_df.Rd
|
e7c77f0c86b0679436bacea2de35ef5ec7ef1a18
|
[
"MIT"
] |
permissive
|
wizbionet/wizbionet
|
adcf0366d002892a67209357a6802cd6a179348c
|
b5fe22074d770df36b3afc47805cf899c69a7bfa
|
refs/heads/master
| 2022-12-08T07:18:00.668772
| 2020-09-02T21:20:01
| 2020-09-02T21:20:01
| 292,099,931
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 773
|
rd
|
NoNA_df.Rd
|
\name{NoNA.df}
\alias{NoNA.df}
\alias{NoNA_df}
\alias{wizbionet:NoNA.df}
\title{
missing values into NAs
}
\description{
This functions changes all kinds of missing values into NAs ("", NULL", "NaN" etc)
}
\usage{
NoNA.df(x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
data frame
}
}
\value{
data frame will have missing values replaced with NAs
}
\author{
Zofia Wicik
}
\examples{
inputDF<- data.frame( symbols=c('ONECUT2','NEBL','NaN','-','KAT6A','','IGSF10','NEBL'),
values=c(0.01,0.5,0.05,0.001,0.9,0.03,0.06,0.03))
NoNA.df(inputDF)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ NA}% use one of RShowDoc("KEYWORDS")
\keyword{ !is.na }% __ONLY ONE__ keyword per line
|
929d19f69e4563c0d57eaffc049d553134081685
|
c85471f60e9d5c462de6c60c880d05898ec81411
|
/cache/thebioengineer|TidyTuesday|2019-09-10__Amusing_Injuries.R
|
8c36fe40393f5e1c97b826a46efb47dae9d7ff96
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
a-rosenberg/github-content-scraper
|
2416d644ea58403beacba33349ee127e4eb42afe
|
ed3340610a20bb3bd569f5e19db56008365e7ffa
|
refs/heads/master
| 2020-09-06T08:34:58.186945
| 2019-11-15T05:14:37
| 2019-11-15T05:14:37
| 220,376,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,043
|
r
|
thebioengineer|TidyTuesday|2019-09-10__Amusing_Injuries.R
|
## ----load_libraries------------------------------------------------------
library(tidyverse)
library(tidytuesdayR)
library(plotly)
library(geofacet)
tt<-tt_load("2019-09-10")
tt
## ----transform-----------------------------------------------------------
injuries_by_state<-tt$saferparks %>%
select(state = acc_state,
industry = industry_sector,
operator = op_error,
mechanical = mechanical,
employee = employee,
age = age_youngest) %>%
mutate(operator = if_else(is.na(operator),0,1),
mechanical = if_else(is.na(mechanical),0,1),
employee = if_else(is.na(employee),0,1),
other = as.numeric((operator + mechanical + employee) == 0 ))
injured_plots<-injuries_by_state %>%
gather(error_type,at_fault,operator,mechanical,employee,other) %>%
filter(at_fault == 1) %>%
ggplot(aes(x=age, fill = error_type)) +
geom_density() +
theme_bw() +
facet_grid(industry~error_type)+
theme(legend.position = NULL)
ggplotly(injured_plots)
|
835632b6eaf72ee2ec58fdf0469a8d9fc0d159e3
|
a2fff0cf44ba7b3c00bb77e337282e43976b54c4
|
/FinalFunction.R
|
8ecf6abfde905cfd833d37c7d029bb27f4cb93bc
|
[] |
no_license
|
krrishsgk/Scraping
|
e5aabc95c47136a945a8d2f3b50e4fe90911f125
|
41f34b1f6934766921b6c27891f54f1af5fac4a3
|
refs/heads/master
| 2021-06-04T11:21:47.614052
| 2019-02-18T00:12:59
| 2019-02-18T00:12:59
| 32,724,654
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 785
|
r
|
FinalFunction.R
|
#All the required functions have now been coded. Creating this code to call all the functions
#in this giant big function
#Run all the other functions before running this one as this calls all the other functions
#Submit the link that has the results of all the games to this function
getItTogether <- function(eplurl)
{
library(data.table)
library(dplyr)
epllinks <- leaguelinks(eplurl)
#Run the data.table package before using rbindlist
finaldf <- lapply(epllinks$matchlinks,FUN=getmatchdata)
finaldf <- rbindlist(finaldf)
finaldf <- makedata(finaldf)
#merging data with fixtures and score
newDataFrame <- merge(epllinks, finaldf, by.x="matchlinks", by.y="url")
#creating dataset for shots alone
shotsData <- filter(.data=newDataFrame,eventType=="shot")
return(shotsData)
}
|
57b0ea58fa1414b32290af7f1f39e0932df37910
|
57733a660c18345d4c5c05b940974064d203b3bb
|
/r/fusion-model.r
|
10fdab61e52caadf38bfb77a200b412fcd52345a
|
[] |
no_license
|
patkilleen/twitter_sentiment_analysis
|
1e4bda48224402d66601b7fae0cd18adcb620bfc
|
4d10ed967ad023a42b3563aecc87b5ef33433add
|
refs/heads/master
| 2022-04-23T03:27:12.817121
| 2020-04-23T21:21:10
| 2020-04-23T21:21:10
| 257,695,222
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,782
|
r
|
fusion-model.r
|
library("kappalab")#
args <- commandArgs(TRUE)
fusionTrainingDatasetPath <- args[1]
fusionTestDataDatasetPath <- args[2]
resultFilePath <- args[3]
alphaParam <- args[4]
#read the fuision training dataset
#fusion dataset format: pi(j),pi(j+1),...,pi(j+n),realtag
#where pij is the prediction of the ith sample made by model j
print(paste0("reading fusion training set from ",fusionTrainingDatasetPath));
fusionTrainingDataset = read.csv(file=fusionTrainingDatasetPath,header=FALSE,sep=",")
print("read the training fusion file");
#get number of models (-1 since real tag is a column in the fusion training dataset)
numModels = ncol(fusionTrainingDataset) -1
print(paste0("number of modesl: ", numModels));
print("training dataset")
realTagColIx = ncol(fusionTrainingDataset)
print(paste0("column ix for real tag: ",realTagColIx))
print("creating real tag list");
#extract the real tags column as vector
realTrainingTags = fusionTrainingDataset[,realTagColIx]
print(head(realTrainingTags))
print("creating prediction matrix");
#this assumes there is atleast one model prediction in fusion isntance
#fusionTrainingInstanceMatrix = cbind(fusionTrainingDataset[,1])
fusionTrainingInstanceMatrix = data.matrix(cbind(fusionTrainingDataset[,1:(realTagColIx-1)]))
print("created prediction matrix");
print(head(fusionTrainingInstanceMatrix))
#put all the fusion instance predictions in to a matrix
#with format as pj pj+1 pn
# i
# i+1
# i+2
# where pj is jth model, and i is the ith sample
#for(i in 2:numModels){
# fusionTrainingInstanceMatrix = cbind(fusionTrainingDataset[,i])
#}
#give equal weight/relavence to each model (number of models = number of columns)
mu.unif = as.capacity(uniform.capacity(numModels))
print("training using HLMS...");
print(paste0("alpha value: ",alphaParam))
#run the HLMS algorithm to train fusion model
hls = heuristic.ls.capa.ident(numModels,mu.unif,fusionTrainingInstanceMatrix,realTrainingTags,alpha=as.numeric(alphaParam))#alpha=0.05
#now we trained our fusion model, we can feed test data to fusion model using Choquet integral on trained model/coeficient estiation
#iterate the testing data, predict label for each sample and output the results to a file
#> testTweet = c(Pos,Pos,Neg,Neg)
#Choquet.integral(hls$solution,testTweet)
#read the test dataset (sep="" indicates split by whitespace, tab, and carriage return)
#fusion test dataset format: pi(j),pi(j+1),...,pi(j+n),realtag
#where pij is the prediction of the ith sample made by model j
print(paste0("reading fusion test set from ",fusionTestDataDatasetPath));
fusionTestingDataset = read.csv(file=fusionTestDataDatasetPath,header=FALSE,sep=",")
realTagColIx = ncol(fusionTestingDataset)
#extract the real tags column as vector
realTestTags = fusionTestingDataset[,realTagColIx]
#this assumes there is atleast one model prediction in fusion isntance
#fusionTestingInstanceMatrix = cbind(fusionTestingDataset[,1])
#put all the fusion instance predictions in to a matrix
#with format as pj pj+1 pn
# i
# i+1
# i+2
# where pj is jth model, and i is the ith sample
#for(i in 2:numModels){
# fusionTestingInstanceMatrix = cbind(fusionTestingDataset,fusionTestingDataset[,i])
#}
fusionTestingInstanceMatrix = as.matrix(fusionTestingDataset[,1:(realTagColIx-1)])
print(paste0("writing fusion prediction results to ",resultFilePath));
for(sampleIx in 1:nrow(fusionTestingInstanceMatrix)) {
testTweet=fusionTestingInstanceMatrix[sampleIx,]
realTag=realTestTags[sampleIx]
#make a prediction using fusion model
prediction = Choquet.integral(hls$solution,testTweet)
#make line of form: prediction,real-tag
#to append to output result file
outputLine = paste0(prediction,",",realTag)
write(outputLine,file=resultFilePath,append=TRUE)
}
print("done")
|
de58ec9d8e28e18c096c78ac7fb8c2cdb1599694
|
9591f5820092cf51ce5fb1a42dfe30eb5ab441b9
|
/Nowicka2017/02_heatmaps_codes_pvs.R
|
83fcc2fd75e170a358ad4502a69e2d562e137db8
|
[] |
no_license
|
yhoang/drfz
|
443d58837141ffd96a3b0037b07a8d6f67d56350
|
38925398d30737051a3df4b9903bdc8774c18081
|
refs/heads/master
| 2021-06-30T23:12:36.186438
| 2021-02-09T11:10:32
| 2021-02-09T11:10:32
| 218,074,297
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33,130
|
r
|
02_heatmaps_codes_pvs.R
|
Sys.time()
# Load packages
library(FlowSOM)
library(ConsensusClusterPlus)
library(gdata)
library(ComplexHeatmap)
library(RColorBrewer)
##############################################################################
# Test arguments
##############################################################################
prefix='23_03_pca1_cl20_merging4_'
outdir='../carsten_cytof/PD1_project/CK_2016-06-23_03/030_codes'
path_data='../carsten_cytof/PD1_project/CK_2016-06-23_03/010_data/23_03_expr_raw.rds'
path_data_norm='../carsten_cytof/PD1_project/CK_2016-06-23_03/010_data/23_03_expr_norm.rds'
path_clustering_observables='../carsten_cytof/PD1_project/CK_2016-06-23_03/030_heatmaps/23_03_pca1_clustering_observables.xls'
path_codes_clustering='../carsten_cytof/PD1_project/CK_2016-06-23_03/030_heatmaps/23_03_pca1_cl20_codes_clustering.xls'
path_codes_clustering_labels='../carsten_cytof/PD1_project/CK_2016-06-23_03/030_heatmaps/23_03_pca1_cl20_codes_clustering_labels.xls'
path_marker_selection='../carsten_cytof/PD1_project/CK_2016-06-23_03/010_helpfiles/23_03_pca1_merging4_marker_selection_codes.txt'
path_cluster_merging='../carsten_cytof/PD1_project/CK_2016-06-23_03/010_helpfiles/23_03_pca1_cl20_cluster_merging4.xlsx'
path_fsom='../carsten_cytof/PD1_project/CK_2016-06-23_03/030_heatmaps/23_03_pca1_cl20_fsom.rds'
path_fccp='../carsten_cytof/PD1_project/CK_2016-06-23_03/030_heatmaps/23_03_pca1_cl20_fccp.rds'
path_pvs='../carsten_cytof/PD1_project/CK_2016-06-23_03/050_frequencies_codes/23_03_pca1_cl20_frequencies_pvs_glmer_binomial_interglht_top10.xls'
path_coeffs='../carsten_cytof/PD1_project/CK_2016-06-23_03/050_frequencies_codes/23_03_pca1_cl20_frequencies_coeffs_glmer_binomial_interglht_top10.xls'
FDR_cutoff='10'
# path_cluster_merging=NULL
args <- NULL
##############################################################################
# Read in the arguments
##############################################################################
rm(list = ls())
args <- (commandArgs(trailingOnly = TRUE))
for (i in 1:length(args)) {
eval(parse(text = args[[i]]))
}
cat(paste0(args, collapse = "\n"), fill = TRUE)
##############################################################################
if(!file.exists(outdir))
dir.create(outdir, recursive = TRUE)
linkage <- "average"
pheatmap_palette <- 'YlGnBu'
pheatmap_palette_rev <- FALSE
pheatmap_palette_norm <- 'RdYlBu'
pheatmap_palette_norm_rev <- TRUE
if(!any(grepl("aggregate_fun=", args))){
aggregate_fun='median'
}
if(!any(grepl("scale=", args))){
scale=TRUE
}
suffix <- paste0("_top", FDR_cutoff)
FDR_cutoff <- as.numeric(paste0("0.", FDR_cutoff))
FDR_cutoff
# ------------------------------------------------------------
# Load expression data
# ------------------------------------------------------------
expr <- readRDS(path_data)
cell_id <- expr[, "cell_id"]
samp <- expr[, "sample_id"]
fcs_colnames <- colnames(expr)[!grepl("cell_id|sample_id", colnames(expr))]
e <- expr[, fcs_colnames]
if(!is.null(path_data_norm)){
expr_norm <- readRDS(path_data_norm)
e_norm <- expr_norm[, fcs_colnames]
}
# ------------------------------------------------------------
# Load clustering data
# ------------------------------------------------------------
# clustering
clustering <- read.table(path_codes_clustering, header = TRUE, sep = "\t", as.is = TRUE)
clust <- clustering[, "cluster"]
names(clust) <- clustering[, "cell_id"]
# clustering labels
labels <- read.table(path_codes_clustering_labels, header = TRUE, sep = "\t", as.is = TRUE)
labels <- labels[order(labels$cluster, decreasing = FALSE), ]
labels$label <- factor(labels$label, levels = unique(labels$label))
rownames(labels) <- labels$cluster
labels
# clustering observables
clustering_observables <- read.table(path_clustering_observables, header = TRUE, sep = "\t", as.is = TRUE)
rownames(clustering_observables) <- clustering_observables$mass
clustering_observables
clust_observ <- clustering_observables[clustering_observables$clustering_observable, "mass"]
clust_observ
# ------------------------------------------------------------
# Load FlowSOM objects
# ------------------------------------------------------------
fsom <- readRDS(path_fsom)
fccp <- readRDS(path_fccp)
k <- nmetaclusts <- length(fccp)
codes <- fsom$codes
ncodes <- nrow(codes)
rownames(codes) <- 1:ncodes
fsom_mc <- fccp[[k]]$consensusClass
if(!length(fsom_mc) == ncodes)
stop("Some of the codes have zero cells!")
fsom_mc_tree <- fccp[[k]]$consensusTree
# ------------------------------------------------------------
# Load pvalues
# ------------------------------------------------------------
pvs <- read.table(path_pvs, header = TRUE, sep = "\t", as.is = TRUE)
pvs <- pvs[order(pvs$cluster), ]
comparisons <- colnames(pvs)[grep("adjp_", colnames(pvs))]
comparisons
# comparisons <- "adjp_NRvsR"
# ------------------------------------------------------------
# Load coeffs
# ------------------------------------------------------------
coeffs <- read.table(path_coeffs, header = TRUE, sep = "\t", as.is = TRUE)
coeffs <- coeffs[order(coeffs$cluster), ]
### Check
stopifnot(all(pvs$label == coeffs$label))
# ------------------------------------------------------------
# Prepare a color annotation for heatmaps
# ------------------------------------------------------------
# ggplot palette
gg_color_hue <- function(n) {
hues = seq(15, 375, length=n+1)
hcl(h=hues, l=60 , c=100)[1:n]
}
# color blind palette
colors_muted <- c("#DC050C", "#E8601C", "#1965B0", "#7BAFDE", "#882E72", "#B17BA6", "#F1932D", "#F6C141", "#F7EE55", "#4EB265", "#90C987", "#CAEDAB")
color_ramp <- c(colors_muted, gg_color_hue(max(1, k - length(colors_muted))))
colors_clusters <- color_ramp[1:k]
names(colors_clusters) <- 1:k
colors_clusters
# ------------------------------
# Annotation for merging or for the original clusters
# ------------------------------
annotation_row <- data.frame(cluster = factor(fsom_mc))
rownames(annotation_row) <- 1:ncodes
annotation_colors <- list(cluster = colors_clusters)
rows_order <- order(fsom_mc)
if(!is.null(path_cluster_merging)){
### Read in cluster merging file
cm <- gdata::read.xls(path_cluster_merging)
if(!all(c("old_cluster", "label", "new_cluster") %in% colnames(cm)))
stop("Merging file must contain 'old_cluster', 'label' and 'new_cluster' columns!")
### Remove spaces in labels bcs they are problematic...
cm$label <- factor(cm$label, labels = gsub(" ", "_", levels(cm$label)))
cm_unique <- unique(cm[, c("label", "new_cluster")])
cm_unique <- cm_unique[order(cm_unique$new_cluster), ]
### Add merging to the annotation
mm <- match(annotation_row$cluster, cm$old_cluster)
annotation_row$cluster_merging <- cm$label[mm]
annotation_row$cluster_merging <- factor(annotation_row$cluster_merging, levels = cm_unique$label)
### Add colors for merging
color_ramp <- c(colors_muted, gg_color_hue(max(1, nlevels(cm_unique$label) - length(colors_muted))))
colors_clusters_merging <- color_ramp[1:nlevels(cm_unique$label)]
names(colors_clusters_merging) <- cm_unique$label
annotation_colors[["cluster_merging"]] <- colors_clusters_merging
rows_order <- order(annotation_row$cluster_merging, annotation_row$cluster)
### Drop the "drop" cluster
rows_order <- rows_order[annotation_row$cluster_merging[rows_order] != "drop"]
### Reorder the annotations so that merging is more to the left in the Figure
annotation_colors <- annotation_colors[2:1]
annotation_row <- annotation_row[, 2:1, drop = FALSE]
}
# ------------------------------------------------------------
# Load marker selection for plotting on the heatmaps
# ------------------------------------------------------------
marker_selection <- NULL
if(!is.null(path_marker_selection)){
if(file.exists(path_marker_selection)){
marker_selection <- read.table(file.path(path_marker_selection), header = TRUE, sep = "\t", as.is = TRUE)
marker_selection <- marker_selection[, 1]
if(!all(marker_selection %in% clustering_observables$marker))
stop("Marker selection is wrong")
}
}
marker_selection
# ------------------------------------------------------------
# Marker information
# ------------------------------------------------------------
# Get the isotope and antigen for fcs markers
m <- match(fcs_colnames, clustering_observables$mass)
fcs_panel <- data.frame(fcs_colname = fcs_colnames, Isotope = clustering_observables$mass[m], Antigen = clustering_observables$marker[m], stringsAsFactors = FALSE)
# Indeces of observables used for clustering
scols <- which(fcs_colnames %in% clust_observ)
# Indeces of other observables
xcols <- which(!fcs_colnames %in% clust_observ)
# Ordered by decreasing pca score
if("avg_score" %in% colnames(clustering_observables)){
scols <- scols[order(clustering_observables[fcs_colnames[scols], "avg_score"], decreasing = TRUE)]
xcols <- xcols[order(clustering_observables[fcs_colnames[xcols], "avg_score"], decreasing = TRUE)]
}
smarkers <- fcs_panel$Antigen[scols]
smarkers
xmarkers <- fcs_panel$Antigen[xcols]
xmarkers
# ------------------------------------------------------------
# Plotting
# ------------------------------------------------------------
clust <- clustering[, "cluster"]
# ------------------------------------------------------------
# Get the median expression
# ------------------------------------------------------------
colnames(e) <- fcs_panel$Antigen
a <- aggregate(e, by = list(clust), FUN = aggregate_fun)
# get cluster frequencies
freq_clust <- table(clust)
### Save cluster frequencies and the median expression
clusters_out <- data.frame(cluster = names(freq_clust), label = labels[names(freq_clust), "label"], counts = as.numeric(freq_clust), frequencies = as.numeric(freq_clust)/sum(freq_clust), a[, fcs_panel$Antigen[c(scols, xcols)]])
write.table(clusters_out, file.path(outdir, paste0(prefix, "codes_cluster_median_expression_raw.xls")), sep = "\t", quote = FALSE, row.names = FALSE, col.names = TRUE)
# ------------------------------------------------------------
# Row clustering from the fccp object
# ------------------------------------------------------------
### This clustering is used in all the heatmaps
cluster_rows <- fsom_mc_tree
# ------------------------------------------------------------
# Heatmaps of raw median expression
# ------------------------------------------------------------
### Use all markers for plotting
expr <- as.matrix(a[, c(smarkers, xmarkers)])
rownames(expr) <- 1:ncodes
labels_row <- paste0(as.character(1:ncodes), " (", as.numeric(freq_clust), ")")
labels_col <- colnames(expr)
if(pheatmap_palette_rev){
color <- colorRampPalette(rev(brewer.pal(n = 8, name = pheatmap_palette)))(100)
}else{
color <- colorRampPalette(brewer.pal(n = 8, name = pheatmap_palette))(100)
}
pvs_cut_tmp <- cut(c(0.01, 0.05, 0.1, 1), c(0, 0.01, 0.05, 0.1, 1))
pvs_cut_tmp
pvs_cut_tmp <- factor(c(paste0("up", pvs_cut_tmp), paste0("down", pvs_cut_tmp)), levels = c(paste0("up", pvs_cut_tmp), paste0("down", pvs_cut_tmp)))
pvs_cut_tmp
colors_pvs <- c(colorRampPalette(c("#00008b", "#f5f5f5"), space = "Lab")(4), colorRampPalette(c("#dc143c", "#f5f5f5"), space = "Lab")(4))
names(colors_pvs) <- levels(pvs_cut_tmp)
comparisons <- list("adjp_NRvsR", "adjp_NRvsR_base", "adjp_NRvsR_tx", c("adjp_NRvsR_base", "adjp_NRvsR_tx"))
comparison_suffixs <- c("NRvsR", "NRvsR_base", "NRvsR_tx", "NRvsR_baseANDtx")
for(i in 1:length(comparisons)){
# i = 2
comparison <- comparisons[[i]]
print(comparison)
comparison_suffix <- comparison_suffixs[i]
pvs_discrete <- cut(as.numeric(as.matrix(pvs[, comparison, drop = FALSE])), c(0, 0.01, 0.05, 0.1, 1))
sign_discrete <- sign(as.numeric(as.matrix(coeffs[, gsub("adjp_" , "", comparison), drop = FALSE])))
sign_discrete <- ifelse(sign_discrete == 1, "up", "down")
pvs_discrete <- paste0(sign_discrete, pvs_discrete)
pvs_heat <- matrix(pvs_discrete, ncol = length(comparison), byrow = FALSE)
colnames(pvs_heat) <- comparison
rownames(pvs_heat) <- pvs$cluster
head(pvs_heat)
legend_breaks <- seq(from = floor(min(expr)), to = ceiling(max(expr)), by = 1)
## With row clustering
ha <- HeatmapAnnotation(df = annotation_row, col = annotation_colors, which = "row", width = unit(1.5, "cm"))
ha_text = rowAnnotation(text = row_anno_text(rownames(expr), gp = gpar(fontsize = 10)))
ht1s <- Heatmap(expr[, smarkers, drop = FALSE], name = "in", col = color, cluster_columns = FALSE, cluster_rows = cluster_rows, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE, row_dend_width = unit(20, "mm"))
ht1x <- Heatmap(expr[, xmarkers, drop = FALSE], name = "out", col = color, cluster_columns = FALSE, cluster_rows = cluster_rows, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE)
ht2 <- Heatmap(pvs_heat, name = "apvs", col = colors_pvs, cluster_columns = FALSE, cluster_rows = cluster_rows, row_dend_reorder = FALSE, show_row_names = FALSE, width = unit(1, "cm"))
pdf(file.path(outdir, paste0(prefix, "ComplexHeatmap_codes_all_row_clust_raw_", comparison_suffix ,".pdf")), width = 12, height = 14)
draw(ha + ht1s + ht1x + ht2 + ha_text, row_dend_side = "left")
dev.off()
## No row clustering
ha <- HeatmapAnnotation(df = annotation_row[rows_order, , drop = FALSE], col = annotation_colors, which = "row", width = unit(1.5, "cm"))
ha_text = rowAnnotation(text = row_anno_text(rownames(expr[rows_order, , drop = FALSE]), gp = gpar(fontsize = 10)))
ht1s <- Heatmap(expr[rows_order, smarkers, drop = FALSE], name = "in", col = color, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE, row_dend_width = unit(20, "mm"))
ht1x <- Heatmap(expr[rows_order, xmarkers, drop = FALSE], name = "out", col = color, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE)
ht2 <- Heatmap(pvs_heat[rows_order, , drop = FALSE], name = "apvs", col = colors_pvs, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, show_row_names = FALSE, width = unit(1, "cm"))
pdf(file.path(outdir, paste0(prefix, "ComplexHeatmap_codes_all_no_clust_raw_", comparison_suffix ,".pdf")), width = 12, height = 14)
draw(ha + ht1s + ht1x + ht2 + ha_text, row_dend_side = "left")
dev.off()
## No row clustering + plot only the significant codes
codes_sign <- rowSums(pvs[, comparison, drop = FALSE] < FDR_cutoff) > 0
if(any(codes_sign)){
hh <- sum(codes_sign) / 4 + 2
ha <- HeatmapAnnotation(df = annotation_row[rows_order, , drop = FALSE][codes_sign[rows_order], , drop = FALSE], col = annotation_colors, which = "row", width = unit(1.5, "cm"))
ha_text = rowAnnotation(text = row_anno_text(rownames(expr[rows_order, , drop = FALSE][codes_sign[rows_order], , drop = FALSE]), gp = gpar(fontsize = 10)))
ht1s <- Heatmap(expr[rows_order, smarkers, drop = FALSE][codes_sign[rows_order], , drop = FALSE], name = "in", col = color, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE, row_dend_width = unit(20, "mm"))
ht1x <- Heatmap(expr[rows_order, xmarkers, drop = FALSE][codes_sign[rows_order], , drop = FALSE], name = "out", col = color, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE)
ht2 <- Heatmap(pvs_heat[rows_order, , drop = FALSE][codes_sign[rows_order], , drop = FALSE], name = "apvs", col = colors_pvs, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, show_row_names = FALSE, width = unit(1, "cm"))
pdf(file.path(outdir, paste0(prefix, "ComplexHeatmap_codes_all_no_clust_raw_", comparison_suffix, suffix ,".pdf")), width = 12, height = hh)
draw(ha + ht1s + ht1x + ht2 + ha_text, row_dend_side = "left")
dev.off()
}
## Plot only the selected markers
if(!is.null(marker_selection)){
expr_sub <- expr[, marker_selection, drop = FALSE]
}
if(scale){
# ------------------------------------------------------------
# Heatmaps of raw median expression scalled by marker (column)
# ------------------------------------------------------------
scalling_type <- "s01"
switch(scalling_type,
snorm = {
## scalled to mean = 0, sd = 1
expr_scaled <- apply(expr, 2, function(x){(x-mean(x))/sd(x)})
th <- 2.5
expr_scaled[expr_scaled > th] <- th
expr_scaled[expr_scaled < -th] <- -th
breaks = seq(from = -th, to = th, length.out = 101)
legend_breaks = seq(from = -round(th), to = round(th), by = 1)
},
s01 = {
## scalled to 01
expr_scaled <- apply(expr, 2, function(x){(x-min(x))/(max(x)-min(x))})
breaks = seq(from = 0, to = 1, length.out = 101)
legend_breaks = seq(from = 0, to = 1, by = 0.25)
}
)
color <- colorRampPalette(brewer.pal(n = 8, name = "Greys"))(120)[11:110]
## With row clustering
ha <- HeatmapAnnotation(df = annotation_row, col = annotation_colors, which = "row", width = unit(1.5, "cm"))
ha_text = rowAnnotation(text = row_anno_text(rownames(expr_scaled), gp = gpar(fontsize = 10)))
ht1s <- Heatmap(expr_scaled[, smarkers, drop = FALSE], name = "in", col = color, cluster_columns = FALSE, cluster_rows = cluster_rows, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE, row_dend_width = unit(20, "mm"))
ht1x <- Heatmap(expr_scaled[, xmarkers, drop = FALSE], name = "out", col = color, cluster_columns = FALSE, cluster_rows = cluster_rows, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE)
ht2 <- Heatmap(pvs_heat, name = "apvs", col = colors_pvs, cluster_columns = FALSE, cluster_rows = cluster_rows, row_dend_reorder = FALSE, show_row_names = FALSE, width = unit(1, "cm"))
pdf(file.path(outdir, paste0(prefix, "ComplexHeatmap_codes_all_row_clust_scale_", comparison_suffix ,".pdf")), width = 12, height = 14)
draw(ha + ht1s + ht1x + ht2 + ha_text, row_dend_side = "left")
dev.off()
## No row clustering
ha <- HeatmapAnnotation(df = annotation_row[rows_order, , drop = FALSE], col = annotation_colors, which = "row", width = unit(1.5, "cm"))
ha_text = rowAnnotation(text = row_anno_text(rownames(expr_scaled[rows_order, , drop = FALSE]), gp = gpar(fontsize = 10)))
ht1s <- Heatmap(expr_scaled[rows_order, smarkers, drop = FALSE], name = "in", col = color, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE, row_dend_width = unit(20, "mm"))
ht1x <- Heatmap(expr_scaled[rows_order, xmarkers, drop = FALSE], name = "out", col = color, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE)
ht2 <- Heatmap(pvs_heat[rows_order, , drop = FALSE], name = "apvs", col = colors_pvs, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, show_row_names = FALSE, width = unit(1, "cm"))
pdf(file.path(outdir, paste0(prefix, "ComplexHeatmap_codes_all_no_clust_scale_", comparison_suffix ,".pdf")), width = 12, height = 14)
draw(ha + ht1s + ht1x + ht2 + ha_text, row_dend_side = "left")
dev.off()
## No row clustering + plot only the significant codes
codes_sign <- rowSums(pvs[, comparison, drop = FALSE] < FDR_cutoff) > 0
if(any(codes_sign)){
hh <- sum(codes_sign) / 4 + 2
ha <- HeatmapAnnotation(df = annotation_row[rows_order, , drop = FALSE][codes_sign[rows_order], , drop = FALSE], col = annotation_colors, which = "row", width = unit(1.5, "cm"))
ha_text = rowAnnotation(text = row_anno_text(rownames(expr_scaled[rows_order, , drop = FALSE][codes_sign[rows_order], , drop = FALSE]), gp = gpar(fontsize = 10)))
ht1s <- Heatmap(expr_scaled[rows_order, smarkers, drop = FALSE][codes_sign[rows_order], , drop = FALSE], name = "in", col = color, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE, row_dend_width = unit(20, "mm"))
ht1x <- Heatmap(expr_scaled[rows_order, xmarkers, drop = FALSE][codes_sign[rows_order], , drop = FALSE], name = "out", col = color, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE)
ht2 <- Heatmap(pvs_heat[rows_order, , drop = FALSE][codes_sign[rows_order], , drop = FALSE], name = "apvs", col = colors_pvs, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, show_row_names = FALSE, width = unit(1, "cm"))
pdf(file.path(outdir, paste0(prefix, "ComplexHeatmap_codes_all_no_clust_scale_", comparison_suffix , suffix,".pdf")), width = 12, height = hh)
draw(ha + ht1s + ht1x + ht2 + ha_text, row_dend_side = "left")
dev.off()
}
## Plot only the selected markers
if(!is.null(marker_selection)){
## No row clustering
ha <- HeatmapAnnotation(df = annotation_row[rows_order, , drop = FALSE], col = annotation_colors, which = "row", width = unit(1.5, "cm"))
ha_text = rowAnnotation(text = row_anno_text(rownames(expr_scaled[rows_order, , drop = FALSE]), gp = gpar(fontsize = 10)))
ht1s <- Heatmap(expr_scaled[rows_order, intersect(marker_selection, smarkers), drop = FALSE], name = "in", col = color, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE, row_dend_width = unit(20, "mm"))
ht1x <- Heatmap(expr_scaled[rows_order, intersect(marker_selection, xmarkers), drop = FALSE], name = "out", col = color, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE)
ht2 <- Heatmap(pvs_heat[rows_order, , drop = FALSE], name = "apvs", col = colors_pvs, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, show_row_names = FALSE, width = unit(1, "cm"))
pdf(file.path(outdir, paste0(prefix, "ComplexHeatmap_codes_sel_no_clust_scale_", comparison_suffix ,".pdf")), width = 12, height = 14)
draw(ha + ht1s + ht1x + ht2 + ha_text, row_dend_side = "left")
dev.off()
}
}
if(!is.null(path_data_norm)){
# ------------------------------------------------------------
# Heatmaps of norm median expression
# Had to do this way because I want to plot the 01 normalized data, but I want to keep row clustering from the raw data
# ------------------------------------------------------------
# ------------------------------------------------------------
# Get the median expression
# ------------------------------------------------------------
colnames(e_norm) <- fcs_panel$Antigen
a_norm <- aggregate(e_norm, by = list(clust), FUN = aggregate_fun)
# ------------------------------------------------------------
# pheatmaps of median expression
# ------------------------------------------------------------
### Use all markers for plotting
expr_norm <- as.matrix(a_norm[, fcs_panel$Antigen[c(scols, xcols)]])
rownames(expr_norm) <- 1:ncodes
labels_row <- paste0(as.character(1:ncodes), " (", as.numeric(freq_clust), ")")
labels_col <- colnames(expr_norm)
if(pheatmap_palette_norm_rev){
color <- colorRampPalette(rev(brewer.pal(n = 8, name = pheatmap_palette_norm)))(101)
}else{
color <- colorRampPalette(brewer.pal(n = 8, name = pheatmap_palette_norm))(101)
}
### Fixed legend range from 0 to 1
breaks = seq(from = 0, to = 1, length.out = 101)
legend_breaks = seq(from = 0, to = 1, by = 0.2)
## With row clustering
ha <- HeatmapAnnotation(df = annotation_row, col = annotation_colors, which = "row", width = unit(1.5, "cm"))
ha_text = rowAnnotation(text = row_anno_text(rownames(expr_norm), gp = gpar(fontsize = 10)))
ht1s <- Heatmap(expr_norm[, smarkers, drop = FALSE], name = "in", col = color, cluster_columns = FALSE, cluster_rows = cluster_rows, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE, row_dend_width = unit(20, "mm"))
ht1x <- Heatmap(expr_norm[, xmarkers, drop = FALSE], name = "out", col = color, cluster_columns = FALSE, cluster_rows = cluster_rows, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE)
ht2 <- Heatmap(pvs_heat, name = "apvs", col = colors_pvs, cluster_columns = FALSE, cluster_rows = cluster_rows, row_dend_reorder = FALSE, show_row_names = FALSE, width = unit(1, "cm"))
pdf(file.path(outdir, paste0(prefix, "ComplexHeatmap_codes_all_row_clust_norm_", comparison_suffix ,".pdf")), width = 12, height = 14)
draw(ha + ht1s + ht1x + ht2 + ha_text, row_dend_side = "left")
dev.off()
## No row clustering
ha <- HeatmapAnnotation(df = annotation_row[rows_order, , drop = FALSE], col = annotation_colors, which = "row", width = unit(1.5, "cm"))
ha_text = rowAnnotation(text = row_anno_text(rownames(expr_norm[rows_order, , drop = FALSE]), gp = gpar(fontsize = 10)))
ht1s <- Heatmap(expr_norm[rows_order, smarkers, drop = FALSE], name = "in", col = color, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE, row_dend_width = unit(20, "mm"))
ht1x <- Heatmap(expr_norm[rows_order, xmarkers, drop = FALSE], name = "out", col = color, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE)
ht2 <- Heatmap(pvs_heat[rows_order, , drop = FALSE], name = "apvs", col = colors_pvs, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, show_row_names = FALSE, width = unit(1, "cm"))
pdf(file.path(outdir, paste0(prefix, "ComplexHeatmap_codes_all_no_clust_norm_", comparison_suffix ,".pdf")), width = 12, height = 14)
draw(ha + ht1s + ht1x + ht2 + ha_text, row_dend_side = "left")
dev.off()
## No row clustering + plot only the significant codes
codes_sign <- rowSums(pvs[, comparison, drop = FALSE] < FDR_cutoff) > 0
if(any(codes_sign)){
hh <- sum(codes_sign) / 4 + 2
ha <- HeatmapAnnotation(df = annotation_row[rows_order, , drop = FALSE][codes_sign[rows_order], , drop = FALSE], col = annotation_colors, which = "row", width = unit(1.5, "cm"))
ha_text = rowAnnotation(text = row_anno_text(rownames(expr_norm[rows_order, , drop = FALSE][codes_sign[rows_order], , drop = FALSE]), gp = gpar(fontsize = 10)))
ht1s <- Heatmap(expr_norm[rows_order, smarkers, drop = FALSE][codes_sign[rows_order], , drop = FALSE], name = "in", col = color, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE, row_dend_width = unit(20, "mm"))
ht1x <- Heatmap(expr_norm[rows_order, xmarkers, drop = FALSE][codes_sign[rows_order], , drop = FALSE], name = "out", col = color, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE)
ht2 <- Heatmap(pvs_heat[rows_order, , drop = FALSE][codes_sign[rows_order], , drop = FALSE], name = "apvs", col = colors_pvs, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, show_row_names = FALSE, width = unit(1, "cm"))
pdf(file.path(outdir, paste0(prefix, "ComplexHeatmap_codes_all_no_clust_norm_", comparison_suffix , suffix, ".pdf")), width = 12, height = hh)
draw(ha + ht1s + ht1x + ht2 + ha_text, row_dend_side = "left")
dev.off()
}
## Plot only the selected markers
if(!is.null(marker_selection)){
expr_sub <- expr_norm[, marker_selection, drop = FALSE]
}
}
# ------------------------------------------------------------
# Heatmaps of the original codes
# ------------------------------------------------------------
### Use the code markers for plotting
expr_codes <- codes
rownames(expr_codes) <- 1:ncodes
mm <- match(colnames(codes), fcs_panel$fcs_colname)
colnames(expr_codes) <- fcs_panel$Antigen[mm]
expr_codes <- expr_codes[, fcs_panel$Antigen[scols]]
labels_row <- paste0(as.character(1:ncodes), " (", as.numeric(freq_clust), ")")
labels_col <- colnames(expr_codes)
color <- colorRampPalette(rev(brewer.pal(n = 8, name = "Spectral")))(101)
## With row clustering
ha <- HeatmapAnnotation(df = annotation_row, col = annotation_colors, which = "row", width = unit(1.5, "cm"))
ha_text = rowAnnotation(text = row_anno_text(rownames(expr_codes), gp = gpar(fontsize = 10)))
ht1s <- Heatmap(expr_codes, name = "in", col = color, cluster_columns = FALSE, cluster_rows = cluster_rows, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE, row_dend_width = unit(20, "mm"))
ht1x <- Heatmap(expr_codes, name = "out", col = color, cluster_columns = FALSE, cluster_rows = cluster_rows, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE)
ht2 <- Heatmap(pvs_heat, name = "apvs", col = colors_pvs, cluster_columns = FALSE, cluster_rows = cluster_rows, row_dend_reorder = FALSE, show_row_names = FALSE, width = unit(1, "cm"))
pdf(file.path(outdir, paste0(prefix, "ComplexHeatmap_codes_codes_row_clust_raw_", comparison_suffix ,".pdf")), width = 12, height = 14)
draw(ha + ht1s + ht1x + ht2 + ha_text, row_dend_side = "left")
dev.off()
## No row clustering
ha <- HeatmapAnnotation(df = annotation_row[rows_order, , drop = FALSE], col = annotation_colors, which = "row", width = unit(1.5, "cm"))
ha_text = rowAnnotation(text = row_anno_text(rownames(expr_codes[rows_order, , drop = FALSE]), gp = gpar(fontsize = 10)))
ht1s <- Heatmap(expr_codes[rows_order, , drop = FALSE], name = "in", col = color, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE, row_dend_width = unit(20, "mm"))
ht1x <- Heatmap(expr_codes[rows_order, , drop = FALSE], name = "out", col = color, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous"), show_row_names = FALSE)
ht2 <- Heatmap(pvs_heat[rows_order, , drop = FALSE], name = "apvs", col = colors_pvs, cluster_columns = FALSE, cluster_rows = FALSE, row_dend_reorder = FALSE, show_row_names = FALSE, width = unit(1, "cm"))
pdf(file.path(outdir, paste0(prefix, "ComplexHeatmap_codes_codes_no_clust_raw_", comparison_suffix ,".pdf")), width = 12, height = 14)
draw(ha + ht1s + ht1x + ht2 + ha_text, row_dend_side = "left")
dev.off()
}
sessionInfo()
|
ffad43abed444960624fa236f573beb27ae0de53
|
c1225525697e27a0c2fe46455c465930f25db6df
|
/man/belplau.Rd
|
4dcfaca45bdd090bc7e013d3fc806229e577faee
|
[] |
no_license
|
yyuzhong/dst
|
2754ac80f5bb656731959e1cf5c2dec2d9cfd552
|
c7a1151071c8299a23e3ee666d4ddebbf92f7a50
|
refs/heads/master
| 2021-01-15T12:41:46.821871
| 2014-12-30T00:00:00
| 2014-12-30T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,369
|
rd
|
belplau.Rd
|
\name{belplau}
\alias{belplau}
\title{Calculation of the measures of belief and plausibility for each focal element of a matrix}
\usage{
belplau(x, remove = FALSE)
}
\arguments{
\item{x}{A matrix of propositions, i.e. 1 col of masses plus boolean matrix describing focal elements.}
\item{remove}{= TRUE removes focal elements with 0 mass.}
}
\description{
The input table x is a matrix of focal elements with their mass, generally obtained as the result of the function nzdsr (Dempster's rule of combination). Elements with zero mass can be removed if present. Measures of belief and plausibility are obtained.
}
\details{
The belief function bel is defined by:\cr
bel(A)=Sum(m(B); B <= A), for every subset A of the frame of discernment.\cr
The plausibility function pl is defined by:\cr
pl(A)=Sum(m(B); B & A not empty), for every subset A of the frame of discernment.
}
\value{
A matrix of m rows by 2 columns, where m is the number of rows of the input matrix.
}
\author{
Claude Boivin, Stat.ASSQ
}
\references{
Shafer, G., (1976). A Mathematical Theory of Evidence. Princeton University Press, Princeton, New Jersey. 296 p.
}
\examples{
## Not run:
x<-list(DempsterRule=t(matrix(c(.9,1,0,.1,1,1),ncol=2)),con=0)
y<-list(DempsterRule=t(matrix(c(.5,0,1,.5,1,1),ncol=2)),con=0)
r1<-dsrwon(x,y)
r<-nzdsr(r1)
bel<-belplau(r$DempsterRule)
## End(Not run)
}
|
cdd51eea6cfffa8520b95590a88fab4157c17535
|
c1f40916fafdd40be5aade0117e17c3caa3d5737
|
/script.R
|
0fb4dcf065928bf290bd862ff58f98e856c48d5e
|
[] |
no_license
|
darokun/ME-GIS
|
8f44b6c588d936c267ab699b74202700dacb0f85
|
a3bd7ea3f85fd817cc83c95acdfe92cca8593ecd
|
refs/heads/master
| 2021-01-12T08:25:36.475625
| 2016-12-15T17:02:37
| 2016-12-15T17:02:37
| 76,575,808
| 0
| 0
| null | 2016-12-15T16:19:53
| 2016-12-15T16:19:52
| null |
UTF-8
|
R
| false
| false
| 2,300
|
r
|
script.R
|
# Middle Earth Maps
# https://www.r-bloggers.com/interactive-and-styled-middle-earth-map/
rm(list = ls())
library(dplyr)
library(maptools)
library(highcharter)
library(geojsonio)
library(rmapshaper)
fldr <- "~/Documents/GitHub/ME-GIS"
shp_to_geoj_smpl <- function(file = "Coastline2.shp", k = 0.5) {
d <- readShapeSpatial(file.path(fldr, file))
d <- ms_simplify(d, keep = k)
d <- geojson_list(d)
d
}
shp_points_to_geoj <- function(file) {
outp <- readShapeSpatial(file.path(fldr, file))
outp <- geojson_json(outp)
outp
}
cstln <- shp_to_geoj_smpl("Coastline2.shp", .65)
rvers <- shp_to_geoj_smpl("Rivers19.shp", .01)
frsts <- shp_to_geoj_smpl("Forests.shp", 0.90)
lakes <- shp_to_geoj_smpl("Lakes2.shp", 0.1)
roads <- shp_to_geoj_smpl("PrimaryRoads.shp", 1)
cties <- shp_points_to_geoj("Cities.shp")
towns <- shp_points_to_geoj("Towns.shp")
pointsyles <- list(
symbol = "circle",
lineWidth= 1,
radius= 4,
fillColor= "transparent",
lineColor= NULL
)
hcme <- highchart(type = "map") %>%
hc_chart(style = list(fontFamily = "Macondo"), backgroundColor = "#F4C283") %>%
hc_title(text = "The Middle Earth", style = list(fontFamily = "Tangerine", fontSize = "40px")) %>%
hc_add_series(data = cstln, type = "mapline", color = "brown", name = "Coast") %>%
hc_add_series(data = rvers, type = "mapline", color = "#7e88ee", name = "Rivers") %>%
hc_add_series(data = roads, type = "mapline", color = "#634d53", name = "Main Roads") %>%
hc_add_series(data = frsts, type = "map", color = "#228B22", name = "Forest") %>%
hc_add_series(data = lakes, type = "map", color = "#7e88ee", name = "Lakes") %>%
hc_add_series(data = cties, type = "mappoint", color = "black", name = "Cities",
dataLabels = list(enabled = TRUE), marker = list(radius = 4, lineColor = "black")) %>%
hc_add_series(data = towns, type = "mappoint", color = "black", name = "Towns",
dataLabels = list(enabled = TRUE), marker = list(radius = 1, fillColor = "rgba(190,190,190,0.7)")) %>%
hc_plotOptions(
series = list(
marker = pointsyles,
tooltip = list(headerFormat = "", poinFormat = "{series.name}"),
dataLabels = list(enabled = FALSE, format = '{point.properties.Name}')
)
) %>%
hc_mapNavigation(enabled = TRUE)
hcme
|
1845c184734d4b3df237865b0bdbdf678e8d69b9
|
243b1751d4870b8b20ab3fab297eecfb4102872f
|
/Aula1.R
|
f4624a6df19bfe668c62c89138c71bf1d8a4f55a
|
[] |
no_license
|
Lucasc27/POO-e-Funcional
|
af1ef245194db64afc7bbda1aabd480f0dea5ab3
|
27f7a537eaec2d4710eb6064ec2caec2c46a7e3a
|
refs/heads/master
| 2020-04-24T06:12:31.651365
| 2019-04-10T23:40:47
| 2019-04-10T23:40:47
| 171,757,121
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,937
|
r
|
Aula1.R
|
# Programação Funcional e Orientada a Objetos
# Aula 1
# Prof. Neylson Crepalde
# Lucas Cesar Fernandes Ferreira
# ------------------------------- #
5 + 3 # Soma
5 - 4 # Subtração
15 / 5 # Divisão
5 * 3 # Multiplicação
5 ^ 2 # Potência
16 %/% 5 # Divisão inteira
16 %% 5 # O resto da divisão
###################
# Criando Objetos #
###################
x <- 5 # Atribuição
y <- 7
y
x + y
###################
# Criando Vetores #
###################
x <- c(1,2,3,4,5)
x
y <- c(1:5)
y
x + y # Soma com vetores
sqrt(x) # Raiz quadrada
exp(x) # Exponencial
log(x) # Logaritmo natural
log10(x) # Logaritmo base 10
###########
# Classes #
###########
class(x) # Identificando o tipo de classe do objeto
idades <- c(23L, 25L, 27L, 32L, 31L) # Números inteiros ocupam menos memoria
idades
anos <- c(2015, 2016, 2017, 2018, 2019)
anos
anos <- as.integer(anos)
class(anos)
#########################
# Trabalhando com texto #
#########################
nome <- "Lucas"
nome
sobrenome <- "Cesar"
vetor_nomes <- c(nome,sobrenome)
length(vetor_nomes) # Tamanho do vetor
nome_completo <- paste(nome, sobrenome)
paste(nome, sobrenome, sep = ",")
#####################
# Variáveis Lógicas #
#####################
logicos <- c(TRUE, T, FALSE, F)
x == 5
x != 5
#################################
# Identação(Índice) e Subseting #
#################################
x[1]
x[3]
x[1:3]
x[x > 3]
x[x < 4]
x[x != 3]
x[x > 2 & x < 4] # Condicional &(e) e |(ou)
##########
# Matriz #
##########
args(matrix)
A <- matrix(data = 1:16, nrow = 4)
A[3,2]
A <- matrix(data = 1:16, nrow = 4, byrow = T)
A * 2
# -------------------------------------------------- #
# Exercicios
# http://neylsoncrepalde.github.io/2016-09-26-introducao-ao-r-exercicio-1/
|
6d98f02ec076a5eb154b88b13c899cbf84a98ce1
|
c5f494b0295e63ff26288db7dfce713313b82371
|
/man/prune.Rd
|
d5e0fad8fd87323b64bcf164850018a79a8e33e0
|
[] |
no_license
|
briandconnelly/mutationtree
|
42476df911cfeda760df877422b996ca424551ca
|
03b95889bee0d30f7a9e0ef696500517eb9a5a71
|
refs/heads/master
| 2020-05-20T06:12:24.185997
| 2016-01-19T18:32:15
| 2016-01-19T18:32:15
| 31,677,053
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 615
|
rd
|
prune.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{prune}
\alias{prune}
\title{Remove extinct leaf nodes from the population graph}
\usage{
prune(population)
}
\arguments{
\item{population}{A mutationtree population}
}
\value{
A pruned mutationtree population
}
\description{
To help minimize the size of a graph, \code{prune} removes nodes that both
have zero abundance and are leaves (i.e., have no children). This second
condition is added so that extant lineages with extinct parents aren't
pruned. While more aggressive pruning is perhaps possible, this should remove
most unneeded nodes over time.
}
|
c6f7bfa9b7c98b1afbf11a81fc7b43c43daf47a6
|
ee816637db73015d86002e55c496c2fdeae52b99
|
/0009.special_pathagorean_triplet.R
|
dfd581c4614432b3688528050ba224d352030048
|
[] |
no_license
|
OTStats/euler-project
|
a5c54e4ef989549c2ebba81ad9a61ed91debdbd5
|
3688b53382068e437cd84d19b0570b91b857a6a8
|
refs/heads/master
| 2021-11-16T02:31:42.894898
| 2021-09-27T13:30:51
| 2021-09-27T13:30:51
| 204,961,979
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 604
|
r
|
0009.special_pathagorean_triplet.R
|
# A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
#
# a^2 + b^2 = c^2
# For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
#
# There exists exactly one Pythagorean triplet for which a + b + c = 1000.
# Find the product abc.
# -- Load libraries
library(tidyverse)
crossing(a = 1:999,
b = 1:999) %>%
filter(a < b) %>%
mutate(c = 1000 - a - b) %>%
filter(b < c) %>%
filter(a^2 + b^2 == c^2) %>%
mutate(abc_product = prod(a, b, c))
# # A tibble: 1 x 4
# a b c abc_product
# <int> <int> <dbl> <dbl>
# 1 200 375 425 31875000
|
5db013d167b9d98d558a2482a97d2a211b01cc1d
|
66cf4adc85683e73290da2139c071365b40b6209
|
/man/plot_GenePosition.Rd
|
486d8b67885db17f32e75fc67395fd18a3a834b6
|
[
"MIT"
] |
permissive
|
dpelegri/EASIER
|
db51d623eae85300b4b63f30f97ac93c9676a004
|
ce9101b990874c13f6d8564867fdb3cbdc5a7841
|
refs/heads/main
| 2023-08-21T21:33:43.968027
| 2021-10-22T15:28:07
| 2021-10-22T15:28:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 539
|
rd
|
plot_GenePosition.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_GenePosition.R
\name{plot_GenePosition}
\alias{plot_GenePosition}
\title{Gene Pisition Plots}
\usage{
plot_GenePosition(
x,
outputdir = ".",
outputfile = NULL,
main = "",
xlab = "",
...
)
}
\arguments{
\item{x}{Dataframe with Gene Position data}
\item{outputdir}{string with relative path}
\item{main}{optional, string with title}
\item{xlab}{optional, string with xlab text}
}
\value{
distribution plot
}
\description{
Gene Pisition plot
}
|
af6d3357a6fec210dc4537aae5cb20f0aef5a266
|
83c43061a32c1c899aefb564ba6c63c32f48b210
|
/General Code/CMcCode/man/vTable.Rd
|
3038be392a361b2b48506f1e9c33717db8f32323
|
[] |
no_license
|
chacemcneil/Personal
|
c694b71439d4884884a22a599f7dfd6e3549f49c
|
021c2f270f644654730c601b33458fad23185c26
|
refs/heads/master
| 2021-12-10T22:49:48.057640
| 2021-11-09T01:36:40
| 2021-11-09T01:36:40
| 45,845,740
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 679
|
rd
|
vTable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/UsefulFunctions.R
\name{vTable}
\alias{vTable}
\title{Venn Table Function}
\usage{
vTable(..., prop = FALSE, sums = FALSE)
}
\arguments{
\item{...}{Vectors to compare}
\item{prop}{Whether to show proportions instead of counts. Default is FALSE.}
\item{sums}{Whether to show row and columns sums using \code{Table}. Default is FALSE.}
}
\description{
Similar to a Venn diagram. Gives the counts of unique entries that are shared between given inputs.
}
\examples{
x <- c(2, 3, 3, 10, 12, 13)
y <- 1:6
vTable(x, y)
vTable(x, y, prop = TRUE)
vTable(x, y, sums = TRUE)
}
\seealso{
\code{\link{Table}}
}
|
5482e29f3669adfd20d4f28d77a2502ca29db158
|
d28c7b35872ef4a5fe6baf91e6b4b8721e58a174
|
/backup/topo_elements.R
|
c74c15309053fed255ed5fed85d510d5349a7313
|
[] |
no_license
|
emiliolr/abm
|
b00c63d945d28e4258f371ea3447b08156dc1377
|
c9de884fda6ff1b3fede3600e26a662f27787c08
|
refs/heads/master
| 2023-02-20T22:12:02.361291
| 2021-01-24T19:54:42
| 2021-01-24T19:54:42
| 288,858,757
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,467
|
r
|
topo_elements.R
|
library(tidyverse)
library(sf)
library(raster)
library(rayrender)
library(rayshader)
library(png)
#setup stuff...
setwd("~/Desktop/DATA\ 440/Deliverables")
path_to_add_data <- "~/Desktop/DATA\ 440/ABM_Summer_Stuff/section_2.1/data"
#bringing in all the data
load("adm2_urban_areas.RData")
load("for_roads_and_healthcare.RData")
swz_topo <- raster("Data/swz_srtm_topo_100m.tif")
swz_adm2 <- str_c(path_to_add_data, "/gadm36_SWZ_shp/gadm36_SWZ_2.shp") %>% read_sf()
#preparing the data
comb_topo <- crop(swz_topo, urban_areas_exp) #cropping down to the bouding box
comb_matrix <- raster_to_matrix(comb_topo) #a rayshader function (DIMENSIONS: 300x364)
#plotting the data in 2D and 3D using rayshader
comb_matrix %>%
sphere_shade() %>% #shading the topo raster
add_water(detect_water(comb_matrix)) %>% #adding in any detected bodies of water
plot_map() #plotting
ambient_shadows <- ambient_shade(comb_matrix) #adding ambient shadows
comb_matrix %>% #making a 3D plot --> displays in a new window
sphere_shade(texture = "imhof3") %>%
add_water(detect_water(comb_matrix), color = "azure") %>%
add_shadow(ray_shade(comb_matrix, sunaltitude = 3, zscale = 33, lambert = FALSE), max_darken = 0.5) %>%
add_shadow(lamb_shade(comb_matrix, sunaltitude = 3, zscale = 33), max_darken = 0.7) %>%
add_shadow(ambient_shadows, max_darken = 0.1) %>%
plot_3d(comb_matrix, zscale = 20, windowsize = c(1000, 1000), phi = 40, theta = 130, zoom = 0.75,
background = "grey40", shadowcolor = "grey5", soliddepth = -50, shadowdepth = -100)
render_snapshot() #rendering a snapshot on the plot while the window is open
#elements to add to the 2D/3D plots
comb_elements <- ggplot() +
geom_sf(data = comb_adm2, fill = NA, color = "gold", linetype = "11", size = 1.5) + #making the outline
geom_sf(data = urban_areas_exp, alpha = 0.5, fill = "gold3", color = NA) +
geom_sf(data = primary_routes_comb, color = "orange", size = 1.2) +
geom_sf(data = secondary_routes_comb, color = "orange", size = 0.4) +
# geom_sf(data = tertiary_routes_comb, color = "range3", size = 0.2) +
geom_sf(data = comb_schools, size = 2.5, color = "deepskyblue") +
geom_sf(data = comb_health_care, color = "hotpink", shape = 3, size = 3.5, show.legend = FALSE) +
theme_void() +
scale_x_continuous(expand = expansion()) +
scale_y_continuous(expand = expansion())
comb_elements
png("Figures/combined.png", width = 300, height = 364, units = "px", bg = "transparent") #saving the outline as a .png
comb_elements
dev.off()
#adding the adm2 outline to the 2D/3D plots
overlay_img <- readPNG("Figures/combined.png")
comb_matrix %>% #the 2D plot
sphere_shade() %>%
add_water(detect_water(comb_matrix)) %>%
add_overlay(overlay_img, alphalayer = 0.95) %>% #adding the adm2 stuff
plot_map()
comb_matrix %>% #the 3D plot
sphere_shade(texture = "imhof3") %>%
add_water(detect_water(comb_matrix), color = "azure") %>%
add_shadow(ray_shade(comb_matrix, sunaltitude = 3, zscale = 33, lambert = FALSE), max_darken = 0.5) %>%
add_shadow(lamb_shade(comb_matrix, sunaltitude = 3, zscale = 33), max_darken = 0.7) %>%
add_shadow(ambient_shadows, max_darken = 0.1) %>%
add_overlay(overlay_img, alphalayer = 0.95) %>%
plot_3d(comb_matrix, zscale = 20, windowsize = c(1000, 1000), phi = 50, theta = 350, zoom = 0.65,
background = "grey40", shadowcolor = "grey5", soliddepth = -50, shadowdepth = -100)
render_snapshot("Figures/adm2_w_topo.png")
|
e8b442094b54071554cbb34b58685f3fc9ebc9ad
|
cf9e466f05004b2c4c02ebe7c24d67846229a9bd
|
/phylodynamic/subset_clades.R
|
fdfc38de47deafce15d2b616359a78be424e4f48
|
[] |
no_license
|
JuliaPalacios/Covid19_Analyses
|
afadd0bc2b481c1a96ed8201c468fab44da72566
|
55d54686df7e44405b60471b50400247441d24ec
|
refs/heads/master
| 2021-04-18T07:42:19.198979
| 2020-08-11T02:53:58
| 2020-08-11T02:53:58
| 249,518,712
| 3
| 1
| null | 2020-04-05T19:03:25
| 2020-03-23T18:59:20
|
HTML
|
UTF-8
|
R
| false
| false
| 3,178
|
r
|
subset_clades.R
|
rm(list=ls())
library(phylodyn)
library(ape)
library(lubridate)
library(phangorn)
library(phytools)
library(ggplot2)
library(ggtree)
library(gridExtra)
setwd("~/Documents/Covid_Analysis/phylodynamic/")
#base.dir <- '~/Desktop/Coronavirus/R/tree_processing/'
#setwd(base.dir)
source("function_serial.R")
#Read fasta file
country<-"Californiadist"
data<-paste("~/Documents/Covid_Analysis/alignment/data/CaliforniaTest/",country,".RData",sep="")
#data<-paste(country,".RData",sep="")
load(data)
distList<-listout
# Extract sequences, sequence names, and sampling dates from the file
#Save name of the sequence and sampling times from the file
n<-distList$n
seq_names<-distList$seq_names
samp_times<-c()
for (r in seq_names){
samp_times<-c(samp_times,paste(strsplit(r,"|")[[1]][(length(strsplit(r,"|")[[1]])-9):length(strsplit(r,"|")[[1]])],collapse =""))
}
samp_times<-decimal_date(date(samp_times))
lastdate<-max(samp_times)
samp_times2<-max(samp_times)-samp_times
name_samp<-cbind(samp_times2,seq_names)
# Ensure that there are at least two sequences at t=0, otherwise remove the sequence
while(table(samp_times2)[1]==1){
idx<-which(samp_times2==0)
#Remove the sequence observed only once
#fastaformat <- fastaformat[-idx]
seq_names<-seq_names[-idx]
samp_times2<-samp_times2[-idx]
name_samp<-name_samp[-idx,]
#Make the the last sequence the new 0
#samp_times<-samp_times-min(samp_times)
distList$distGen<-distList$distGen[-idx,-idx]
}
# Compute mutation rate. Note: the reference sequence must be in the last spot
mu<-mu_linear_reg_inputDist(distList)
# Compute serial UPGMA tree and plot it
tree<-serial_upgma_inputDist(distList,mu, samp_times2, name_samp)
plot(tree,show.tip.label = FALSE,cex=.3)
# =================================================
# plot and store subtree, interactive
subtr <- subtreeplot(tree, wait=TRUE, show.tip.label=FALSE)
# =================================================
# get number of descendant tips from each internal node
n.tip <- Ntip(tree)
subtr.list <- ape::subtrees(tree, wait=TRUE)
child.ntip <- data.frame(node.id=(1:Nnode(tree))+n.tip,
n.tip=unlist(lapply(subtr.list, Ntip)))
n.tip.min <- 500 # the minimum number of tips in a subclade
n.tip.max <- 1000 # the maximum number of tips in a subclade
plt.ind <- which(child.ntip$n.tip > n.tip.min & child.ntip$n.tip < n.tip.max)
plt.node.id <- child.ntip$node.id[plt.ind] #internal node ids
n.plt <- length(plt.node.id)
plt.list <- vector("list", length=n.plt)
for (i in 1:n.plt) {
print(paste('processing plot', i, 'out of', n.plt))
plt.list[[i]] <- ggtree(tree) +
geom_hilight(node=plt.node.id[i], fill="steelblue", alpha=0.5) +
theme(axis.title.x=element_text(size=15, face="bold")) +
xlab(paste('node', plt.node.id[i]))
}
# plot all subtrees meeting the criteria
disp.plt <- marrangeGrob(plt.list, nrow=2, ncol=3)
print(disp.plt)
# Let's say we like internal node 1852.
# Extract tip labels in subclade with the selected node.
root.node <- 1852 # this is internal node index act as a root of a subtree
subtr <- subtr.list[[root.node - n.tip]]
seq.lab <- subtr$tip.label
|
74c87607effb340146206e7b9d1a5cb50b215854
|
122b73ddcab44dfcc4f2da4ac32253247bf21ba1
|
/JagsScript.R
|
92b42bd0db272741a2f38fe745abe9cc948021af
|
[] |
no_license
|
jBernardADFG/ChenaSalchaStateSpace
|
c5ebd4bffdbb31817242f34bddea037930d4342d
|
d614f384bad81e4d27abf3aa881280b0a83a1d06
|
refs/heads/master
| 2022-10-22T02:45:56.767096
| 2020-06-17T16:27:20
| 2020-06-17T16:27:20
| 273,022,905
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,339
|
r
|
JagsScript.R
|
# PROJECT OUT A FEW YEARS #
# SOME THOUGHTS -- WE CAN PROJECT THE NUMBER OF SPAWNERS/RECRUITS OUT A FEW YEARS #
# THINK ABOUT TOSSING IN A TERM FOR SEA-SURFACE TEMPERATURE #
# ADDITIVE VS LINEAR EFFECTS #
# HIERARCHICAL TERMS FOR ALPHA AND BETA ?
# READ IN THE AND ORGANIZE DATA FOR USE IN JAGS MODEL #
{
rm(list=objects())
library(readxl)
jordyData <- as.matrix(read_excel("D:/Jordy/ChenaSalchaSpawnerRecruit/Data/jordyData.xlsx"))
n_years <- nrow(jordyData)
n_ages <- 8
log_mr_est <- log(jordyData[,2:3])
log_tow_est <- log(jordyData[,6:7])
mr_cv <- jordyData[,4:5]
tow_cv <- jordyData[,8:9]
for (y in 1:n_years){
for (r in 1:2){
if(is.na(mr_cv[y,r])){
mr_cv[y,r] <- -999
}
if(is.na(tow_cv[y,r])){
tow_cv[y,r] <- -999
}
}
}
log_h_riv_est <- log(jordyData[,10:11])
log_h_my_est <- log(jordyData[,12])
n_my_riv <- jordyData[,13:14]
n_my_tot <- jordyData[,15]
for (y in 1:length(n_my_tot)){
if(is.na(n_my_tot[y])){
n_my_tot[y] <- 0
}
}
age_samples <- array(NA, dim=c(2,n_years,6))
age_samples[1,,] <- jordyData[,16:21]
age_samples[2,,] <- jordyData[,22:27]
n_age_samples <- apply(age_samples[,,], c(2,1), sum, na.rm=T)
# MEAN MONTHLY DISCHARGE #
chena_water_data <- read.delim("D:/Jordy/ChenaSalchaSpawnerRecruit/Data/chena-at-fairbanks")
salcha_water_data <- read.delim("D:/Jordy/ChenaSalchaSpawnerRecruit/Data/salcha-near-salchaket")
get.monthly.discharge <- function(water_data, month, start_year, end_year){
water_data <- water_data[,5:7]
names(water_data) <- c("year", "month", "discharge")
water_data <- water_data[water_data$month==month,]
water_data <- water_data[,c(1,3)]
water_data <- water_data[water_data$year>=start_year & water_data$year<=end_year,]
water_data <- water_data[,2]
water_data <- water_data - median(water_data)
return(water_data)
}
chena_water_data <- get.monthly.discharge(chena_water_data, 8, 1986, 2007)
salcha_water_data <- get.monthly.discharge(salcha_water_data, 8, 1986, 2007)
water_level <- cbind(chena_water_data, salcha_water_data)
s_max=5000
data <- list(n_years=n_years,
n_ages=n_ages,
log_mr_est=log_mr_est,
log_tow_est=log_tow_est,
mr_cv=mr_cv,
tow_cv=tow_cv,
log_h_riv_est=log_h_riv_est,
log_h_my_est=log_h_my_est,
n_my_riv=n_my_riv,
n_my_tot=n_my_tot,
age_samples=age_samples,
n_age_samples=n_age_samples,
water_level=water_level)
}
# WRITE JAGS MODEL #
mod <-
"model{
########################################################################
############################ LATENT PROCESS ############################
########################################################################
# --------------
# IN-RIVER-RUN-ABUNDANCE ON THE CHENA AND SALCHA DURING THE INITIAL YEARS #
for (r in 1:2){
for (y in 1:n_ages){
IRRA[y,r] ~ dnorm(mu_e[r], tau_e[r])T(0,)
}
mu_e[r] ~ dunif(0,25000)
tau_e[r] <- pow(1/sig_e[r], 2)
sig_e[r] ~ dexp(1E-4)
}
# --------------
# HARVEST ON THE CHENA AND SALCHA #
for (r in 1:2){
for (y in 1:n_years){
H_riv[y,r] ~ dnorm(mu_h_riv[r], tau_h_riv[r])T(0,)
}
mu_h_riv[r] ~ dunif(0, 2000)
tau_h_riv[r] <- pow(1/sig_h_riv[r], 2)
sig_h_riv[r] ~ dexp(1E-4)
}
# --------------
# SPAWNERS GIVEN IN-RIVER-RUN ABUNDANCE AND HARVEST ON THE CHENA AND SALCHA #
for (r in 1:2){
for (y in 1:n_years){
S[y,r] <- max(IRRA[y,r]-H_riv[y,r], 1)
}
}
# ------------------------------------------------- #
# ----------------- RS PROCESSES ----------------- #
# ------------------------------------------------- #
# ---------------- USE ONE OF THE FOLLOWING TEN OPTIONS ---------------- #
# --------------
# SIMPLE RICKER RS PROCESS #
for (r in 1:2){
for (y in 1:n_years){
log_R[y,r] ~ dnorm(mu_sr[y,r], tau_w[r])
mu_sr[y,r] <- log(alpha[r]) + log(S[y,r]) - beta[r]*S[y,r]
R[y,r] <- exp(log_R[y,r])
}
tau_w[r] <- pow(1/sig_w[r], 2)
sig_w[r] ~ dexp(0.1)
alpha[r] ~ dexp(1E-2)T(1,)
log_alpha[r] <- log(alpha[r])
beta[r] ~ dexp(1E2)
}
# # --------------
# # RICKER RS PROCESS WITH AN AR(1) TERM #
# for (r in 1:2){
# log_R[1,r] ~ dnorm(mu_sr[1,r], tau_w[r])
# mu_sr[1,r] <- log(alpha[r]) + log(S[1,r]) - beta[r]*S[1,r]
# resid[1, r] <- 0
# R[1,r] <- exp(log_R[1,r])
# for (y in 2:n_years){
# log_R[y,r] ~ dnorm(mu_sr[y,r], tau_w[r])
# mu_sr[y,r] <- log(alpha[r]) + log(S[y,r]) - beta[r]*S[y,r] + phi[r]*resid[y-1,r]
# resid[y, r] <- log_R[y,r]-log(alpha[r])-log(S[y,r])+beta[r]*S[y,r]
# R[y,r] <- exp(log_R[y,r])
# }
# tau_w[r] <- pow(1/sig_w[r], 2)
# sig_w[r] ~ dexp(0.1)
# alpha[r] ~ dexp(1E-2)T(1,)
# log_alpha[r] <- log(alpha[r])
# phi[r] ~ dunif(-1,1)
# beta[r] ~ dexp(1E2)
# }
# # --------------
# # RICKER RS PROCESS WITH A TIME VARYING PRODUCTIVITY PARAMETER #
# for (r in 1:2){
# for (y in 1:n_years){
# log_R[y,r] ~ dnorm(mu_sr[y,r], tau_w[r])
# mu_sr[y,r] <- log_alpha[y,r] + log(S[y,r]) - beta[r]*S[y,r]
# log_alpha[y,r] <- c[r] + d[r]*(y-1)
# alpha[y,r] <- exp(log_alpha[y,r])
# R[y,r] <- exp(log_R[y,r])
# }
# tau_w[r] <- pow(1/sig_w[r], 2)
# sig_w[r] ~ dexp(0.1)
# c[r] ~ dexp(1E-2)
# d[r] ~ dnorm(0, 1E-1)T(-c[r]/(n_years-1),)
# beta[r] ~ dexp(1E2)
# }
# # --------------
# # RICKER RS PROCESS WITH AN AR(1) TERM AND A TIME VARYING PRODUCTIVITY PARAMETER #
# for (r in 1:2){
# log_R[1,r] ~ dnorm(mu_sr[1,r], tau_w[r])
# mu_sr[1,r] <- log_alpha[1,r] + log(S[1,r]) - beta[r]*S[1,r]
# log_alpha[1,r] <- c[r]
# alpha[1,r] <- exp(log_alpha[1,r])
# R[1,r] <- exp(log_R[1,r])
# resid[1, r] <- 0
# for (y in 2:n_years){
# log_R[y,r] ~ dnorm(mu_sr[y,r], tau_w[r])
# mu_sr[y,r] <- log(alpha[y,r]) + log(S[y,r]) - beta[r]*S[y,r] + phi[r]*resid[y-1,r]
# log_alpha[y,r] <- c[r] + d[r]*(y-1)
# alpha[y,r] <- exp(log_alpha[y,r])
# resid[y, r] <- log_R[y,r]-log(alpha[y, r])-log(S[y,r])+beta[r]*S[y,r]
# R[y,r] <- exp(log_R[y,r])
# }
# tau_w[r] <- pow(1/sig_w[r], 2)
# sig_w[r] ~ dexp(0.1)
# c[r] ~ dexp(1E-2)
# d[r] ~ dnorm(0, 1E-1)T(-c[r]/(n_years-1),)
# phi[r] ~ dunif(-1,1)
# beta[r] ~ dexp(1E2)
# }
# # --------------
# # RICKER RS PROCESS WITH A TERM FOR THE WATER LEVEL #
# for (r in 1:2){
# for (y in 1:n_years){
# log_R[y,r] ~ dnorm(mu_sr[y,r], tau_w[r])
# mu_sr[y,r] <- log(alpha[r]) + log(S[y,r]) - beta[r]*S[y,r] + omega[r]*water_level[y,r]
# R[y,r] <- exp(log_R[y,r])
# }
# tau_w[r] <- pow(1/sig_w[r], 2)
# sig_w[r] ~ dexp(0.1)
# alpha[r] ~ dexp(1E-2)T(1,)
# omega[r] ~ dnorm(0, 1E-4)
# log_alpha[r] <- log(alpha[r])
# beta[r] ~ dexp(1E2)
# }
# # --------------
# # RICKER RS PROCESS WITH WATER LEVEL AND AR(1) TERMS #
# for (r in 1:2){
# log_R[1,r] ~ dnorm(mu_sr[1,r], tau_w[r])
# mu_sr[1,r] <- log(alpha[r]) + log(S[1,r]) - beta[r]*S[1,r] + omega[r]*water_level[1,r]
# resid[1, r] <- 0
# R[1,r] <- exp(log_R[1,r])
# for (y in 2:n_years){
# log_R[y,r] ~ dnorm(mu_sr[y,r], tau_w[r])
# mu_sr[y,r] <- log(alpha[r]) + log(S[y,r]) - beta[r]*S[y,r] + omega[r]*water_level[y,r] + phi[r]*resid[y-1,r]
# resid[y, r] <- log_R[y,r]-log(alpha[r])-log(S[y,r])+beta[r]*S[y,r]-omega[r]*water_level[1,r]
# R[y,r] <- exp(log_R[y,r])
# }
# tau_w[r] <- pow(1/sig_w[r], 2)
# sig_w[r] ~ dexp(0.1)
# alpha[r] ~ dexp(1E-2)T(1,)
# omega[r] ~ dnorm(0, 1E-4)
# phi[r] ~ dunif(-1,1)
# log_alpha[r] <- log(alpha[r])
# beta[r] ~ dexp(1E2)
# }
# # --------------
# # RICKER RS PROCESS WITH TIME VARYING PRODUCTIVITY AND A TERM FOR WATER LEVEL #
# for (r in 1:2){
# for (y in 1:n_years){
# log_R[y,r] ~ dnorm(mu_sr[y,r], tau_w[r])
# mu_sr[y,r] <- log_alpha[y,r] + log(S[y,r]) - beta[r]*S[y,r] + omega[r]*water_level[y,r]
# log_alpha[y,r] <- c[r] + d[r]*(y-1)
# alpha[y,r] <- exp(log_alpha[y,r])
# R[y,r] <- exp(log_R[y,r])
# }
# tau_w[r] <- pow(1/sig_w[r], 2)
# sig_w[r] ~ dexp(0.1)
# c[r] ~ dexp(1E-2)
# d[r] ~ dnorm(0, 1E-1)T(-c[r]/(n_years-1),)
# omega[r] ~ dnorm(0, 1E-4)
# beta[r] ~ dexp(1E2)
# }
# # --------------
# # RICKER RS PROCESS WITH TIME VARYING PRODUCTIVITY AND AR(1) AND WATER LEVEL TERMS #
# for (r in 1:2){
# log_R[1,r] ~ dnorm(mu_sr[1,r], tau_w[r])
# mu_sr[1,r] <- log_alpha[1,r] + log(S[1,r]) - beta[r]*S[1,r] + omega[r]*water_level[1,r]
# log_alpha[1,r] <- c[r]
# alpha[1,r] <- exp(log_alpha[1,r])
# R[1,r] <- exp(log_R[1,r])
# resid[1, r] <- 0
# for (y in 2:n_years){
# log_R[y,r] ~ dnorm(mu_sr[y,r], tau_w[r])
# mu_sr[y,r] <- log(alpha[y,r]) + log(S[y,r]) - beta[r]*S[y,r] + phi[r]*resid[y-1,r] + omega[r]*water_level[y,r]
# log_alpha[y,r] <- c[r] + d[r]*(y-1)
# alpha[y,r] <- exp(log_alpha[y,r])
# resid[y, r] <- log_R[y,r] - log(alpha[y, r]) - log(S[y,r]) + beta[r]*S[y,r] - omega[r]*water_level[y,r]
# R[y,r] <- exp(log_R[y,r])
# }
# tau_w[r] <- pow(1/sig_w[r], 2)
# sig_w[r] ~ dexp(0.1)
# c[r] ~ dexp(1E-2)
# d[r] ~ dnorm(0, 1E-1)T(-c[r]/(n_years-1),)
# phi[r] ~ dunif(-1,1)
# omega[r] ~ dnorm(0, 1E-4)
# beta[r] ~ dexp(1E2)
# }
# # --------------
# # SIMPLE BEVERTON-HOLT RS PROCESS #
# for (r in 1:2){
# for (y in 1:n_years){
# log_R[y,r] ~ dnorm(mu_sr[y,r], tau_w[r])
# mu_sr[y,r] <- log(alpha[r]) + log(S[y,r]) - log(1+beta[r]*S[y,r])
# R[y,r] <- exp(log_R[y,r])
# }
# tau_w[r] <- pow(1/sig_w[r], 2)
# sig_w[r] ~ dexp(0.1)
# alpha[r] ~ dexp(1E-4)T(1,)
# log_alpha[r] <- log(alpha[r])
# beta[r] ~ dexp(1E-4)
# }
# # --------------
# # BEVERTON-HOLT RS PROCESS WITH AN AR(1) TERM #
# for (r in 1:2){
# log_R[1,r] ~ dnorm(mu_sr[1,r], tau_w[r])
# mu_sr[1,r] <- log(alpha[r]) + log(S[1,r]) - beta[r]*S[1,r]
# resid[1, r] <- 0
# for (y in 1:n_years){
# log_R[y,r] ~ dnorm(mu_sr[y,r], tau_w[r])
# mu_sr[y,r] <- log(alpha[r]) + log(S[y,r]) - log(1+beta[r]*S[y,r]) + phi[r]*resid[y-1,r]
# resid[y, r] <- log_R[y,r]-log(alpha[r])-log(S[y,r])+beta[r]*S[y,r]
# R[y,r] <- exp(log_R[y,r])
# }
# tau_w[r] <- pow(1/sig_w[r], 2)
# sig_w[r] ~ dexp(0.1)
# alpha[r] ~ dexp(1E-4)T(1,)
# log_alpha[r] <- log(alpha[r])
# phi[r] ~ dunif(-1,1)
# beta[r] ~ dexp(1E-4)
# }
# ------------------------------------------------- #
# --------------
# RETURNERS GIVEN RECRUITS #
for (r in 1:2){
for (y in (n_ages+1):n_years){
for (a in 1:6){
A1[y,r,a] <- R[(y-9+a),r]*p_maturity[r,y,7-a]
}
Returners[y,r] <- sum(A1[y,r,1:6])
}
}
# # ---------------- CHOOSE ONE OF THE FOLLOWING TWO OPTIONS ---------------- #
# # ----------------
# # ----------------
# # WITHOUT TIME VARYING AGE-AT-MATURITY VERSION 1 #
# for (r in 1:2){
# for (y in 1:n_years){
# p_maturity[r,y,1:6] ~ ddirch(gamma[1:6]+0.1)
# }
# }
# for (a in 1:n_ages){
# gamma[a] ~ dexp(0.1)
# }
# # ----------------
# # WITHOUT TIME VARYING AGE-AT-MATURITY VERSION 2 #
# for (r in 1:2){
# for (y in 1:n_years){
# p_maturity[r,y,1:6] ~ ddirch(gamma[r,1:6]+0.1)
# }
# }
# for (r in 1:2){
# for (a in 1:n_ages){
# gamma[r,a] ~ dexp(0.1)
# }
# }
# ----------------
# WITH TIME VARYING AGE AT MATURITY #
for (r in 1:2){
for (y in 1:n_years){
p_maturity[r,y,1:6] ~ ddirch(gamma[r,y,1:6]+0.1)
for (a in 1:6){
gamma[r,y,a] <- pi[r,y,a]*D[r]
pi[r,y,a] <- logistic[r,y,a]/sum(logistic[r,y,1:6])
logistic[r,y,a] <- exp(n[1,r,a]+n[2,r,a]*y)
}
}
D[r] ~ dexp(0.001)T(1,)
for (a in 1:6){
n[1,r,a] ~ dunif(-100, 100)
n[2,r,a] ~ dunif((-100-n[1,r,a])/n_years, (100-n[1,r,a])/n_years)
}
}
# ------------------------------------------------- #
# --------------
# PROBABILITY OF CHENA AND SALCHA HARVEST ON THE MIDDLE YUKON #
for (r in 1:2){
for (y in 1:n_years){
p_my_harvest[y,r] ~ dbeta(a[r], b[r])
}
a[r] <- xi[r] + 1
b[r] <- nu[r] - xi[r] + 1
nu[r] ~ dexp(1E-4)
xi[r] ~ dunif(0, nu[r])
}
# --------------
# MIDDLE YUKON HARVEST #
for (y in 1:n_years){
H_my[y] ~ dnorm(mu_h_my, tau_h_my)T(0,)
}
mu_h_my ~ dunif(0, 50000)
tau_h_my <- pow(1/sig_h_my, 2)
sig_h_my ~ dexp(1E-5)
# --------------
# IRRA GIVEN RETURNERS AND MIDDLE YUKON HARVEST #
for (r in 1:2){
for (y in (n_ages+1):n_years){
IRRA[y,r] <- max(Returners[y,r]-p_my_harvest[y,r]*H_my[y], 1)
}
}
#############################################################################
############################ OBSERVATION PROCESS ############################
#############################################################################
for(r in 1:2){
for (y in 1:n_years){
# --------------
# MARK-RECAPTURE ABUNDANCE ESTIMATES #
log_mr_est[y, r] ~ dnorm(log(IRRA[y,r]), tau_mr[y,r])
tau_mr[y,r] <- 1/var_mr[y,r]
var_mr[y,r] <- log(pow(mr_cv[y,r], 2)+1)
# --------------
# TOWER COUNTS #
log_tow_est[y, r] ~ dnorm(log(IRRA[y,r]), tau_tow[y,r])
tau_tow[y,r] <- 1/var_tow[y,r]
var_tow[y,r] <- log(pow(tow_cv[y,r],2)+1)
# --------------
# CHENA AND SALCHA HARVEST #
log_h_riv_est[y,r] ~ dnorm(log(H_riv[y,r]), tau_h_riv_est[r])
# --------------
# AGE DATA FROM THE CHENA AND SALCHA #
age_samples[r, y, 1:6] ~ dmulti(p_maturity[r,y,1:6], n_age_samples[y,r])
# --------------
# MOVEMENT BETWEEN THE MIDDLE YUKON AND THE CHENA AND SALCHA #
n_my_riv[y,r] ~ dbin(p_my_harvest[y,r], n_my_tot[y])
}
# --------------
# HYPERPRIOR FOR THE CHENA AND SALCHA HARVEST #
tau_h_riv_est[r] <- pow(1/sig_h_riv_est[r], 2)
sig_h_riv_est[r] ~ dexp(1E-2)
}
# --------------
# HARVEST IN THE MIDDLE YUKON #
for (y in 1:n_years){
log_h_my_est[y] ~ dnorm(log(H_my[y]), tau_h_my_est)
}
tau_h_my_est <- pow(1/sig_h_my_est, 2)
sig_h_my_est ~ dexp(1E-2)
############################################################################################
############################ CALCULATING SOME USEFUL STATISTICS ############################
############################################################################################
# ---------------- TOGGLE THE COMMENTS ACCORDING TO THE RS PROCESS ---------------- #
for (r in 1:2){
# ---------------- USE ONE OF THE FOLLOWING FOUR OPTIONS ---------------- #
# WITHOUT THE AR(1) TERM #
alpha_prime[r] <- alpha[r]*exp(pow(sig_w[r], 2)/2)
# # WITH THE AR(1) TERM #
# alpha_prime[r] <- alpha[r]*exp(pow(sig_w[r], 2)/(2*(1-pow(phi[r], 2))))
# # TIME VARYING PRODUCTIVITY WITHOUT THE AR(1) TERM #
# for (y in 1:n_years){
# alpha_prime[y,r] <- alpha[y,r]*exp(pow(sig_w[r], 2)/2)
# }
# # TIME VARYING PRODUCTIVITY WITH THE AR(1) TERM #
# for (y in 1:n_years){
# alpha_prime[y,r] <- alpha[y,r]*exp(pow(sig_w[r], 2)/(2*(1-pow(phi[r], 2))))
# }
# ---------------- USE ONE OF THE FOLLOWING THREE OPTIONS ---------------- #
# FOR THE RICKER RS RELATIONSHIP WITHOUT TIME VARYING PRODUCTIVITY #
S_msy[r] <- log(alpha_prime[r])/beta[r]*(0.5-0.07*log(alpha_prime[r]))
S_max[r] <- 1/beta[r]
S_eq[r] <- log(alpha_prime[r])/beta[r]
U_msy[r] <- log(alpha_prime[r])*(0.5-0.07*log(alpha_prime[r]))
# RICKER RS RELATIONSHIP WITH TIME VARYING PRODUCTIVITY PARAMETER #
# for (y in 1:n_years){
# S_msy[y,r] <- log(alpha_prime[y,r])/beta[r]*(0.5-0.07*log(alpha_prime[y,r]))
# S_max[y,r] <- 1/beta[r]
# S_eq[y,r] <- log(alpha_prime[y,r])/beta[r]
# U_msy[y,r] <- log(alpha_prime[y,r])*(0.5-0.07*log(alpha_prime[y,r]))
# }
# # FOR THE BEVERTON-HOLT RS RELATIONSHIP #
# S_msy[r] <- (sqrt(alpha_prime[r])-1)/beta[r]
# S_max[r] <- 1/beta[r]
# S_eq[r] <- (alpha_prime[r]-1)/beta[r]
# U_msy[r] <- 1-sqrt(alpha_prime[r])/alpha_prime[r]
}
}"
fmod = "D:/Jordy/ChenaSalchaSpawnerRecruit/R/JAGS/SR3.R"
writeLines(mod,con=fmod)
# RUN JAGS MODEL #
library(rjags)
jags_model = jags.model(fmod,
data = data,
n.chains = 4,
n.adapt = 20000)
samples = coda.samples(jags_model,
variable.names = c("pi"),
n.burnin = 25000,
n.iter = 50000,
thin = 50)
dic <- dic.samples(jags_model,
n.burnin = 25000,
n.iter = 50000,
thin = 50)
# CONVERGENCE DIAGNOSTICS #
MCMCvis::MCMCtrace(samples)
# GET SOME GELMAN RUBIN BROOKS PLOTS
summary(samples)
|
2cd1154c204f1264d9c778e89aaf4410ccda3bdf
|
28c0bb9cf47bc8a8f629b389ba62c1808fd34691
|
/R/lactation.calf.model.r
|
4fb99a333cf02ff8aafb85d0cb9a161f3c96a252
|
[] |
no_license
|
gcostaneto/ZeBook
|
836e3dc8ab80de9ecce782e809606f4d647f30c0
|
b892a7e80a233b1c468526307eb5f7b49d95514d
|
refs/heads/master
| 2020-05-14T08:41:25.584061
| 2018-11-09T16:40:03
| 2018-11-09T16:40:03
| 181,727,649
| 1
| 0
| null | 2019-04-16T16:33:07
| 2019-04-16T16:33:07
| null |
UTF-8
|
R
| false
| false
| 7,165
|
r
|
lactation.calf.model.r
|
################################ FUNCTIONS #####################################
# Contribution of Juliette Adrian, Master2 internship, january-jully 2013
#' @title The Lactation model
#' @description \strong{Model description.}
#' This model is a model of lactating mammary glands of cattle described by Heather et al. (1983). This model was then inspired more complex models based on these principles.
#' This model simulates the dynamics of the production of cow's milk.
#' the system is represented by 6 state variables: change in hormone levels (H), the production and loss of milk secreting cells (CS), and removing the secretion of milk (M), the average quantity of milk contained in the animal (Mmean), the amount of milk removed (RM) and yield (Y).
#' The model has a time step dt = 0.1 for regular consumption of milk by a calf.
#' The model is defined by a few equations, with a total of fourteen parameters for the described process.
#' @param cu : number of undifferentiated cells
#' @param kdiv : cell division rate, Michaelis-Menten constant
#' @param kdl : constant degradation of milk
#' @param kdh : rate of decomposition of the hormone
#' @param km : constant secretion of milk
#' @param ksl : milk secretion rate, Michaelis-Menten constant
#' @param kr : average milk constant
#' @param ks : rate of degradation of the basal cells
#' @param ksm : constant rate of degradation of milk secreting cells
#' @param mh : parameter
#' @param mm : storage Capacity milk the animal
#' @param p : parameter
#' @param mum : setting the maximum rate of cell division
#' @param rc : parameter of milk m (t) function
#' @param duration : duration of simulation
#' @param dt : time step
#' @return data.frame with CS, M, Mmoy, RM, day, week
#' @examples lactation.calf.model2(lactation.define.param()["nominal",],300,0.1)
#' @export
lactation.calf.model <- function(cu,kdiv,kdl,kdh,km,ksl,kr,ks,ksm,mh,mm,p,mum,rc,duration,dt)
{
# Initialize variables
# 5 states variables, as 5 vectors initialized to NA
# H : Hormone effector of cell division (kg/m3)
H=rep(NA,(duration-1)/dt)
# CS : Number of secretory cells
CS=rep(NA,(duration-1)/dt)
# M : Quantity of milk in animal (kg)
M=rep(NA,(duration-1)/dt)
# Mmoy : Time average of M (kg)
Mmoy=rep(NA,(duration-1)/dt)
# RM : Rate of removal of milk
RM=rep(NA,(duration-1)/dt)
# Initialization of state variables
H[1]=1.0
CS[1]=520
M[1]=0.0
Mmoy[1]=0.0
i=1
# Simulation loop
for (t in seq(0, duration, by = dt))
{
# Calculate rates of change of state variables (dH,dCS,dM,dMmoy)
dH = - kdh * H[i] * dt
dCS = (mum * (H[i]/(kdiv+H[i]))*cu - (ks + ksm*((Mmoy[i]/mh)^p/(1+(Mmoy[i]/mh)^p)))*CS[i] ) * dt
dM = (km * CS[i] * ((mm-M[i])/(mm-M[i]+ksl))-(M[i]/(kdl+M[i]))*rc ) * dt
dMmoy = kr*(M[i]-Mmoy[i]) * dt
# Uptade state variables
H[i+1]= H[i] +dH
CS[i+1]= CS[i] + dCS
M[i+1]= M[i] + dM
Mmoy[i+1]= Mmoy[i] + dMmoy
# removal of milk
RM[i]=(M[i]/(kdl+M[i]))*rc
i=i+1
}
# End simulation loop
# conversion day to week
day=seq(dt,duration,by=dt)
week=day%/%7
results1=data.frame(M=M[1:(duration/dt)],Mmoy=Mmoy[1:(duration/dt)],CS=CS[1:(duration/dt)],RM=RM[1:(duration/dt)],day=day,week=week)
# mean by week
#result = by(,results1$week, mean)
result = by(results1[,c("week","M","Mmoy","CS","RM")],results1$week,function(x) apply(x,2,mean))
results2 = matrix(unlist(result),ncol=5, byrow=TRUE,dimnames=list(NULL, c("week","M","Mmoy","CS","RM")) )
return(results2)
#results=data.frame(CS=CS[1:(duration/dt)],M=M[1:(duration/dt)],Mmoy=Mmoy[1:(duration/dt)],RM=RM[1:(duration/dt)],day=seq(0.1,duration,by=dt),week=seq(0.1/7,duration/7,by=dt/7))
#return(results)
}
################################################################################
#' @title The Lactation model for use with lactation.calf.simule
#' @description see lactation.calf.model for model description.
#' @param param : a vector of parameters
#' @param duration : duration of simulation
#' @param dt : time step
#' @return data.frame with CS, M, Mmoy, RM, day, week
#' @examples sim=lactation.calf.model2(lactation.define.param()["nominal",],6+2*7, 0.1)
#' @export
lactation.calf.model2 <- function(param,duration,dt){
# use lactation.calf.model function to run the model
return(lactation.calf.model(param["cu"],param["kdiv"],param["kdl"],param["kdh"],param["km"],param["ksl"],param["kr"],param["ks"],param["ksm"],param["mh"],param["mm"],param["p"],param["mum"],param["rc"],duration,dt))
}
################################################################################
#' @title Wrapper function to run the Lactation model for multiple sets of parameter values
#' @description Wrapper function to run the Lactation model for multiple sets of parameter values
#' @param X : parameter matrix
#' @param duration : duration of simulation
#' @param dt : time step
#' @return data.frame with : number of paramter vector (line number from X), week, CS, M, Mmoy, RM, day, week
#' @export
lactation.calf.simule = function(X, duration, dt){
# output : all
#sim <- apply(X,1,function(v) lactation.calf.model2(v,duration, dt))
#sim=do.call(rbind, sim)
sim <- lapply(1:dim(X)[1], function(id) cbind(id,lactation.calf.model2(X[id,],duration, dt)))
sim=do.call(rbind, sim)
return(sim)
}
################################################################################
#' @title Define values of the parameters for the Lactation model
#' @description values from Heather et al. (1983) for different scenarios
#' @param type : for which model version ? "calf" or "machine"
#' @return matrix with parameter values (nominal, binf, bsup)
#' @examples lactation.define.param()
#' @export
lactation.define.param <- function(type="calf")
{
# nominal, binf, bsup
#cu : number of undifferentiated cells (Unit ?)
cu=c(1000, NA, NA)
#kdiv : cell division rate, Michaelis-Menten constant (Unit ?)
kdiv=c(0.2, NA, NA)
#kdl : constant degradation of milk (Unit ?)
kdl=c(4.43, NA, NA)
#kdh : rate of decomposition of the hormone (Unit ?)
kdh=c(0.01, NA, NA)
#km : constant secretion of milk ksl : milk secretion rate, Michaelis-Menten constant (Unit ?)
km=c(0.005, NA, NA)
#
ksl=c(3.0, NA, NA)
#kr : average milk constant (Unit ?)
kr=c(0.048, NA, NA)
#ks : rate of degradation of the basal cells (Unit ?)
ks=c(0.1, NA, NA)
#ksm : constant rate of degradation of milk secreting cells (Unit ?)
ksm=c(0.2, NA, NA)
#mh : parameter mm : storage Capacity milk the animal (Unit ?)
mh=c(27, NA, NA)
#
mm=c(30, NA, NA)
#p : parameter mum : setting the maximum rate of cell division (Unit ?)
p=c(10, NA, NA)
#
mum=c(1, NA, NA)
#for calf
#rc : parameter of milk m (t) function
rc=c(40, NA, NA)
#for machine
#rma : parameter of milk m (t) function (Unit ?)
rma=c(NA, NA, NA)
CSi=c(NA, NA, NA)
Mi=c(NA, NA, NA)
if (type=="calf"){param<-data.frame(cu,kdiv,kdl,kdh,km,ksl,kr,ks,ksm,mh,mm,p,mum,rc)}
else {if (type=="machine") {param<-data.frame("TODO")}}
row.names(param)<-c("nominal","binf","bsup")
return(as.matrix(param))
}
# end of file
|
fb36ef0e493f86235fb9262dae6cfd07f712a71b
|
8474e5591c6e2564895bde0522424f7cb60c90d1
|
/R/diff_methylsig.R
|
aa817f06d2a5786eaea3da0d8e9b068930139066
|
[] |
no_license
|
ajpatel2007/methylSig
|
398504ffe01d51c806098ee9da2751e09d260f65
|
cb469678e2e4b5c3569d0927675d698dbe0f8f01
|
refs/heads/master
| 2022-04-14T04:20:20.587995
| 2020-03-25T18:38:33
| 2020-03-25T18:38:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,925
|
r
|
diff_methylsig.R
|
.weight_function <- function(u) (1-u^2)^3
.derivative_phi <- function(phi, local_c, local_t, mu, weight) {
derivative = 0
indicator_c = local_c > 0
indicator_t = local_t > 0
indicator_ct = local_c + local_t > 0
if(nrow(local_c) == 1) {
derivative =
sum( indicator_c * ( mu * (digamma((mu * phi) + local_c + 1e-100) - digamma(mu * phi + 1e-100)) ) ) +
sum( indicator_t * ((1 - mu) * (digamma( ((1 - mu) * phi) + local_t + 1e-100) - digamma( ((1-mu) * phi) + 1e-100))) ) -
sum( indicator_ct * (digamma(phi + local_c + local_t + 1e-100) - digamma(phi)) )
} else {
for(g in seq(ncol(local_c))) {
derivative = derivative +
sum( indicator_c[,g] * (weight * mu[,g] * (digamma(mu[,g] * phi + local_c[,g] + 1e-100) - digamma(mu[,g] * phi + 1e-100))) ) +
sum( indicator_t[,g] * (weight * (1 - mu[,g]) * (digamma((1 - mu[,g]) * phi + local_t[,g] + 1e-100) - digamma((1 - mu[,g]) * phi + 1e-100))) ) -
sum( indicator_ct[,g] * (weight * (digamma(phi + local_c[,g] + local_t[,g] + 1e-100) - digamma(phi))) )
}
}
derivative
}
.derivative_mu <- function(mu, local_c, local_t, phi, weight) {
derivative = 0
indicator_c = local_c > 0
indicator_t = local_t > 0
if(nrow(local_c) == 1) {
derivative =
sum( indicator_c * (digamma(mu * phi + local_c + 1e-100) - digamma(mu * phi + 1e-100)) ) -
sum( indicator_t * (digamma((1 - mu) * phi + local_t + 1e-100) - digamma((1 - mu) * phi + 1e-100)) )
} else {
for(g in seq(ncol(local_c))) {
derivative = derivative +
sum( indicator_c[,g] * (weight * (digamma(mu * phi + local_c[,g]+ 1e-100) - digamma(mu * phi + 1e-100))) ) -
sum( indicator_t[,g] * (weight * (digamma((1 - mu) * phi + local_t[,g] + 1e-100) - digamma((1 - mu) * phi + 1e-100))) )
}
}
derivative
}
.log_likelihood <- function(mu, phi, local_c, local_t, weight) {
llik = 0
indicator_c = local_c > 0
indicator_t = local_t > 0
if(nrow(local_c) == 1) {
llik = llik +
sum( indicator_c * (lgamma(mu * phi + local_c + 1e-100) - lgamma(mu * phi + 1e-100)) ) +
sum( indicator_t * (lgamma((1 - mu) * phi + local_t + 1e-100) - lgamma((1 - mu) * phi + 1e-100)) )
} else {
for(g in seq(ncol(local_c))) {
llik = llik +
sum( indicator_c[,g] * (weight * (lgamma(mu * phi + local_c[,g] + 1e-100) - lgamma(mu * phi + 1e-100))) ) +
sum( indicator_t[,g] * (weight * (lgamma((1 - mu) * phi + local_t[,g] + 1e-100) - lgamma((1 - mu) + 1e-100))) )
}
}
2*llik
}
#' Calculates differential methylation statistics using a Beta-binomial approach.
#'
#' The function calculates differential methylation statistics between two groups of samples using a beta-binomial approach to calculate differential methylation statistics, accounting for variation among samples within each group. The function can be applied to a \code{BSseq} object subjected to \code{filter_loci_by_coverage()}, \code{filter_loci_by_snps()}, \code{filter_loci_by_group_coverage()} or any combination thereof. Moreover, the function can be applied to a \code{BSseq} object which has been tiled with \code{tile_by_regions()} or \code{tile_by_windows()}.
#'
#' @param bs a \code{BSseq} object.
#' @param group_column a \code{character} string indicating the column of \code{pData(bs)} to use for determining group membership.
#' @param comparison_groups a named \code{character} vector indicating the \code{case} and \code{control} factors of \code{group_column} for the comparison.
#' @param disp_groups a named \code{logical} vector indicating the whether to use \code{case}, \code{control}, or both to estimate the dispersion.
#' @param local_window_size an \code{integer} indicating the size of the window for use in determining local information to improve mean and dispersion parameter estimations. In addition to a the distance constraint, a maximum of 5 loci upstream and downstream of the locus are used. The default is \code{0}, indicating no local information is used.
#' @param local_weight_function a weight kernel function. The default is the tri-weight kernel function defined as \code{function(u) = (1-u^2)^3}. The domain of any given weight function should be [-1,1], and the range should be [0,1].
#' @param t_approx a \code{logical} value indicating whether to use squared t approximation for the likelihood ratio statistics. Chi-square approximation (\code{t_approx = FALSE}) is recommended when the sample size is large. Default is \code{TRUE}.
#' @param n_cores an \code{integer} denoting how many cores should be used for differential methylation calculations.
#'
#' @return A \code{GRanges} object containing the following \code{mcols}:
#' \describe{
#' \item{meth_case:}{ Methylation estimate for case. }
#' \item{meth_control:}{ Methylation estimate for control. }
#' \item{meth_diff:}{ The difference \code{meth_case - meth_control}. }
#' \item{direction:}{ The group for which the locus is hyper-methylated. Note, this is not subject to significance thresholds. }
#' \item{pvalue:}{ The p-value from the t-test (\code{t_approx = TRUE}) or the Chi-Square test (\code{t_approx = FALSE}). }
#' \item{fdr:}{ The Benjamini-Hochberg adjusted p-values using \code{p.adjust(method = 'BH')}. }
#' \item{disp_est:}{ The dispersion estimate. }
#' \item{log_lik_ratio:}{ The log likelihood ratio. }
#' \item{df:}{ Degrees of freedom used when \code{t_approx = TRUE}. }
#' }
#'
#' @examples
#' data(BS.cancer.ex, package = 'bsseqData')
#'
#' bs = filter_loci_by_group_coverage(
#' bs = BS.cancer.ex,
#' group_column = 'Type',
#' c('cancer' = 2, 'normal' = 2))
#'
#' small_test = bs[seq(50)]
#'
#' diff_gr = diff_methylsig(
#' bs = small_test,
#' group_column = 'Type',
#' comparison_groups = c('case' = 'cancer', 'control' = 'normal'),
#' disp_groups = c('case' = TRUE, 'control' = TRUE),
#' local_window_size = 0,
#' t_approx = TRUE,
#' n_cores = 1)
#'
#' @export
diff_methylsig = function(
bs,
group_column,
comparison_groups,
disp_groups,
local_window_size = 0,
local_weight_function,
t_approx = TRUE,
n_cores = 1) {
# Constants
min_disp = 1e-6
min_inverse_disp = 0.001
max_inverse_disp = max(1/max(min_disp, 1e-6), min_inverse_disp)
min_meth = 0
max_meth = 1
#####################################
# Check missing
if (missing(bs)) {
stop('Must pass bs as a BSseq object.')
}
if (missing(group_column)) {
stop('Must pass group_column as a character string.')
}
if (missing(comparison_groups)) {
stop('Must pass comparison_groups as a named character vector with names "case" and "control".')
}
if (missing(disp_groups)) {
stop('Must pass disp_groups as a logical vector.')
}
# Use .weight_function by default, but not in the function definition because
# this introduces some strange exporting issues. The user really shouldn't
# have to think about this at all.
if (missing(local_weight_function)) {
local_weight_function = .weight_function
}
#####################################
# Check types
if (!is(bs, 'BSseq')) {
stop('bs must be class BSseq.')
}
if (!(is(group_column, 'character') && length(group_column) == 1)) {
stop('group_column must be a character string.')
}
if (!is(comparison_groups, 'character')) {
stop('comparison_groups must be a named character vector.')
}
if (!is(disp_groups, 'logical')) {
stop('disp_groups must be a named logical vector.')
}
if (!(is(local_window_size, 'numeric') && length(local_window_size) == 1)) {
stop('local_window_size must be an integer.')
}
if (!is(local_weight_function, 'function')) {
stop('local_weight_function must be a function.')
}
if (!is(t_approx, 'logical')) {
stop('t_approx must be TRUE/FALSE.')
}
if (!(is(n_cores, 'numeric') && length(n_cores) == 1)) {
stop('n_cores must be an integer.')
}
#####################################
# Check valid group_column name
if (!(group_column %in% colnames(pData(bs)))) {
stop(sprintf('group_column: %s not in column names of pData(bs): %s',
group_column, paste(colnames(pData(bs)), collapse = ', ')))
}
# Check valid comparison_groups values in group_column of pData(bs)
if (!all(comparison_groups %in% pData(bs)[, group_column])) {
stop(sprintf('Not all comparison_groups are in group_column: %s',
paste(setdiff(comparison_groups, pData(bs)[, group_column]), collapse = ', ') ))
}
# Check valid comparison_groups names
if (!all(c('case','control') %in% names(comparison_groups))) {
stop('comparison_groups vector must be a named vector with names "case" and "control".')
}
# Check valid disp_groups names
if (!all(c('case','control') %in% names(disp_groups))) {
stop('disp_groups vector must be a named vector with names "case" and "control".')
}
# Check valid disp_groups values
if (!any(disp_groups)) {
stop('disp_groups must be a named logical vector with at least one TRUE value corresponding to group name for case or control.')
}
# Check for invalid local_window_size == 0 && regions state
# Cannot use local information on region-resolution data, that's the point of tiling
if (local_window_size > 0 && median(width(bs)) > 2) {
stop('Cannot use local information on region-resolution data. Detected local_window_size > 0 and median width of loci > 2')
}
#####################################
case = comparison_groups['case']
control = comparison_groups['control']
# Rows of pdata and columns of bs
pdata = bsseq::pData(bs)
case_idx = which(pdata[, group_column] == case)
control_idx = which(pdata[, group_column] == control)
if(all(disp_groups)) {
disp_groups_idx = c(case_idx, control_idx)
} else if (disp_groups['case'] & !disp_groups['control']) {
disp_groups_idx = case_idx
} else if (!disp_groups['case'] & disp_groups['control']) {
disp_groups_idx = control_idx
}
#####################################
num_loci = length(bs)
gr = granges(bs)
cov_mat = as.matrix(bsseq::getCoverage(bs, type = 'Cov'))
meth_mat = as.matrix(bsseq::getCoverage(bs, type = 'M'))
# Estimate meth per locus within each group. The same value is used for all samples within the same group.
# Note, the approach is to sum reads over all samples per group per locus
meth_est = matrix(0, ncol = ncol(bs), nrow = nrow(bs))
meth_est[, case_idx] = base::rowSums(meth_mat[, case_idx]) / (base::rowSums(cov_mat[, case_idx]) + 1e-100)
meth_est[, control_idx] = base::rowSums(meth_mat[, control_idx]) / (base::rowSums(cov_mat[, control_idx]) + 1e-100)
#####################################
result = do.call(rbind, parallel::mclapply(seq_along(gr), function(locus_idx){
### Deal with local information (or not)
if(local_window_size != 0) {
# NOTE: It is much faster to work with subsets of the result of start()
# than it is to work with subsets of GRanges.
# Get the indices which are within the local_window_size, but also limit to 5 CpGs on either side
# NOTE, local information is only used with cytosine/CpG resolution data so start() is valid.
# If regions were allowed, we would have to pay attention to which side we're on and use start()/end()
local_loci_idx = intersect(
which(abs(start(gr)[locus_idx] - start(gr)) < local_window_size),
max(1, locus_idx - 5):min(num_loci, locus_idx + 5))
if(length(local_loci_idx) == 1) {
# Do not use local information when there is only one local locus
local_loci_idx = locus_idx
local_weights = 1
# Collect Cov and M matrices for all the loci in the window
# Rows are loci and columns are samples
local_cov = matrix(cov_mat[local_loci_idx, ], nrow = 1)
local_meth = matrix(meth_mat[local_loci_idx, ], nrow = 1)
local_unmeth = local_cov - local_meth
# Collect the correct rows of meth_est
local_meth_est = matrix(meth_est[local_loci_idx, ], nrow = 1)
} else {
# We need to scale the loci in the window onto the interval [-1, 1] because
# that is the domain of the local_weight_function.
# This is a vector of the distances of the local loci to the loci of interest (domain)
local_loci_norm = (start(gr)[local_loci_idx] - start(gr)[locus_idx]) / (local_window_size + 1)
# Calculate the weights
# Each is a vector of values of the weight function (range)
local_weights = local_weight_function(local_loci_norm)
# Collect Cov and M matrices for all the loci in the window
# Rows are loci and columns are samples
local_cov = cov_mat[local_loci_idx, ]
local_meth = meth_mat[local_loci_idx, ]
local_unmeth = local_cov - local_meth
# Collect the correct rows of meth_est
local_meth_est = meth_est[local_loci_idx, ]
}
} else {
# Do not use local information when the local_window_size is 0
local_loci_idx = locus_idx
local_weights = 1
# Collect Cov and M matrices for all the loci in the window
# Rows are loci and columns are samples
local_cov = matrix(cov_mat[local_loci_idx, ], nrow = 1)
local_meth = matrix(meth_mat[local_loci_idx, ], nrow = 1)
local_unmeth = local_cov - local_meth
# Collect the correct rows of meth_est
local_meth_est = matrix(meth_est[local_loci_idx, ], nrow = 1)
}
#####################################
### Compute the degrees of freedom for the locus
if(all(disp_groups)) {
df_subtract = 2
} else {
df_subtract = 1
}
df = pmax(rowSums(local_cov[, disp_groups_idx, drop = FALSE] > 0) - df_subtract, 0)
# Compute the degrees of freedom to be used in the test for differential methylation
df = sum(df * local_weights)
#####################################
if(df > 1) {
### Common disp_groups calculation
# This returns a singleton numeric
if(.derivative_phi(
phi = max_inverse_disp,
local_c = local_meth[, disp_groups_idx, drop = FALSE],
local_t = local_unmeth[, disp_groups_idx, drop = FALSE],
mu = local_meth_est[, disp_groups_idx, drop = FALSE],
weight = local_weights) >= 0) {
disp_est = max_inverse_disp
} else if(.derivative_phi(
phi = min_inverse_disp,
local_c = local_meth[, disp_groups_idx, drop = FALSE],
local_t = local_unmeth[, disp_groups_idx, drop = FALSE],
mu = local_meth_est[, disp_groups_idx, drop = FALSE],
weight = local_weights) <= 0){
disp_est = min_inverse_disp
} else {
disp_est = stats::uniroot(
f = .derivative_phi,
interval = c(min_inverse_disp, max_inverse_disp),
local_meth[, disp_groups_idx, drop = FALSE],
local_unmeth[, disp_groups_idx, drop = FALSE],
local_meth_est[, disp_groups_idx, drop = FALSE],
local_weights)$root
}
#####################################
### Common group means calculation
# This returns a numeric vector (control, case, control + case) with the mu est
group_meth_est_list = list(control_idx, case_idx, c(control_idx, case_idx))
group_meth_est = rep(0, length(group_meth_est_list))
for(group_idx in seq_along(group_meth_est_list)) {
if(sum(local_meth[, group_meth_est_list[[group_idx]], drop = FALSE]) == 0) {
# If there are no local C reads, methylation is 0
group_meth_est[group_idx] = 0
} else if (sum(local_unmeth[, group_meth_est_list[[group_idx]], drop = FALSE]) == 0) {
# If there are no local T reads, methylation is 1
group_meth_est[group_idx] = 1
} else {
# Otherwise, do something fancier
group_meth_est[group_idx] = stats::uniroot(
f = .derivative_mu,
interval = c(min_meth, max_meth),
local_meth[, group_meth_est_list[[group_idx]], drop = FALSE],
local_unmeth[, group_meth_est_list[[group_idx]], drop = FALSE],
disp_est,
local_weights)$root
}
}
#####################################
### log Likelihood ratio calculation
log_lik_ratio =
.log_likelihood(
mu = group_meth_est[1],
phi = disp_est,
local_c = local_meth[, control_idx, drop = FALSE],
local_t = local_unmeth[, control_idx, drop = FALSE],
weight = local_weights) +
.log_likelihood(
mu = group_meth_est[2],
phi = disp_est,
local_c = local_meth[, case_idx, drop = FALSE],
local_t = local_unmeth[, case_idx, drop = FALSE],
weight = local_weights) -
.log_likelihood(
mu = group_meth_est[3],
phi = disp_est,
local_c = local_meth[, c(control_idx, case_idx), drop = FALSE],
local_t = local_unmeth[, c(control_idx, case_idx), drop = FALSE],
weight = local_weights)
#####################################
locus_data = c(
disp_est = disp_est,
log_lik_ratio = log_lik_ratio,
meth_control = group_meth_est[1]*100,
meth_case = group_meth_est[2]*100,
meth_all = group_meth_est[3]*100,
df = df + 2)
} else {
# Not enough degrees of freedom, return NAs, these will be removed
# with a message to the user with how many
locus_data = c(
disp_est = NA,
log_lik_ratio = NA,
meth_control = NA,
meth_case = NA,
meth_all = NA,
df = df)
}
return(locus_data)
}, mc.cores = n_cores))
#####################################
# Build GRanges version of result
result_gr = gr
mcols(result_gr) = result
# Calculate pvalue
if(t_approx) {
result_gr$pvalue = stats::pt(-sqrt(pmax(result_gr$log_lik_ratio, 0)), result_gr$df) * 2
} else {
result_gr$pvalue = stats::pchisq(pmax(result_gr$log_lik_ratio, 0), 1, lower.tail = FALSE)
}
# Calculate meth_diff and set very small differences to 0
result_gr$meth_diff = (result_gr$meth_case - result_gr$meth_control)
result_gr$meth_diff[abs(result_gr$meth_diff) < 0.01] = 0
# Correct for multiple testing
result_gr$fdr = stats::p.adjust(result_gr$pvalue, method = 'BH')
# Assign direction of hypermethylation (NOTE, this is not "significance")
result_gr$direction = ifelse(result_gr$meth_diff >= 0, case, control)
#####################################
# Order output columns and attach to GRanges
col_order = c(
'meth_case',
'meth_control',
'meth_diff',
'direction',
'pvalue',
'fdr',
'disp_est',
'log_lik_ratio',
'df'
)
mcols(result_gr) = mcols(result_gr)[, col_order]
#####################################
# Check for NA results and indicate how many loci were dropped because of
# a lack of available degrees of freedom
insufficient_df = result_gr$df == 1
if(sum(insufficient_df) > 0) {
result_gr = result_gr[!insufficient_df]
message(sprintf('%s loci were dropped due to insufficient degrees of freedom (df = 1).', sum(insufficient_df)))
}
return(result_gr)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.