content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# Author : Moksha Menghaney
# Date : October 27th, 2020
# This piece of code will generate tract & zipcode level urban/suburban/rural classification
# for policy scan, files HS02
# It also generates county level % rurality metrics for policy scan which is stored
# in raw files folder for further processing.
library(xlsx)
library(tidyverse)
geometryFilesLoc <- './opioid-policy-scan/Policy_Scan/data_final/geometryFiles/'
rawDataFilesLoc <- './opioid-policy-scan/Policy_Scan/data_raw/'
outputFilesLoc <- './opioid-policy-scan/Policy_Scan/data_final/'
#classifications finalized
urban <- c(1.0, 1.1)
suburban <- c(2.0, 2.1, 4.0, 4.1)
# everything else rural
### RUCA AT ZCTA LEVEL
rucaZipcode <- read.xlsx(paste0(rawDataFilesLoc,'RUCA2010zipcode.xlsx'),
sheetName = 'Data', header = TRUE) %>% select(-c(STATE,ZIP_TYPE))
rucaZipcode$rurality <- ifelse(rucaZipcode$RUCA2 %in% urban, "Urban",
ifelse(rucaZipcode$RUCA2 %in% suburban, "Suburban", "Rural"))
rucaZipcode$rurality <- factor(rucaZipcode$rurality , levels= c('Urban','Suburban','Rural'))
rucaZipcode <- rucaZipcode %>%
mutate(RUCA1 = as.character(RUCA1),
RUCA2 = as.character(RUCA2))
write.csv(rucaZipcode,paste0(outputFilesLoc,'HS02_RUCA_Z.csv'),
row.names = FALSE)
### RUCA AT TRACT LEVEL
rucaTract <- openxlsx::read.xlsx(paste0(rawDataFilesLoc,'ruca2010revisedTract.xlsx'),
sheet = 1, startRow = 2, colNames = TRUE)
colnames(rucaTract) <- c('countyFIPS','State','County','tractFIPS','RUCA1',
'RUCA2','Pop_2010','Area_2010','PopDen_2010')
rucaTract$rurality <- ifelse(rucaTract$RUCA2 %in% urban, "Urban",
ifelse(rucaTract$RUCA2 %in% suburban, "Suburban", "Rural"))
rucaTract$rurality <- factor(rucaTract$rurality , levels= c('Urban','Suburban','Rural'))
write.csv(rucaTract %>%
select(tractFIPS, RUCA1, RUCA2, rurality) %>%
mutate(RUCA1 = as.character(RUCA1),
RUCA2 = as.character(RUCA2)),
paste0(outputFilesLoc,'HS02_RUCA_T.csv'), row.names = FALSE)
### RUCA AT COUNTY LEVEL
# calculate % of tracts in county rural, urban, suburban
rucaCountyRurality <- rucaTract %>%
select(countyFIPS, rurality) %>%
count(countyFIPS, rurality) %>%
group_by(countyFIPS) %>%
mutate(pct = n / sum(n))
rucaCountyRurality <- pivot_wider(rucaCountyRurality,id_cols = 'countyFIPS',
names_from = 'rurality',
values_from = 'pct', values_fill = 0) %>%
mutate(check = round(sum(Urban+Suburban+Rural),2))
## check data and clean up
rucaCountyRurality[which(rucaCountyRurality$check !=1),]
rucaCountyRurality <- data.frame(rucaCountyRurality %>%
mutate(Urban = round(Urban,2),
Suburban = round(Suburban,2),
Rural = round(Rural,2)) %>%
rename(GEOID = countyFIPS,
rcaUrbP = Urban,
rcaSubrbP = Suburban,
rcaRuralP = Rural))
write.csv(rucaCountyRurality %>% select(-check), paste0(rawDataFilesLoc,'county_RUCA_rurality.csv'),
row.names = FALSE)
| /code/ruralityRUCA_T_Z.R | no_license | sterlingfearing/opioid-policy-scan | R | false | false | 3,554 | r | # Author : Moksha Menghaney
# Date : October 27th, 2020
# This piece of code will generate tract & zipcode level urban/suburban/rural classification
# for policy scan, files HS02
# It also generates county level % rurality metrics for policy scan which is stored
# in raw files folder for further processing.
library(xlsx)
library(tidyverse)
geometryFilesLoc <- './opioid-policy-scan/Policy_Scan/data_final/geometryFiles/'
rawDataFilesLoc <- './opioid-policy-scan/Policy_Scan/data_raw/'
outputFilesLoc <- './opioid-policy-scan/Policy_Scan/data_final/'
#classifications finalized
urban <- c(1.0, 1.1)
suburban <- c(2.0, 2.1, 4.0, 4.1)
# everything else rural
### RUCA AT ZCTA LEVEL
rucaZipcode <- read.xlsx(paste0(rawDataFilesLoc,'RUCA2010zipcode.xlsx'),
sheetName = 'Data', header = TRUE) %>% select(-c(STATE,ZIP_TYPE))
rucaZipcode$rurality <- ifelse(rucaZipcode$RUCA2 %in% urban, "Urban",
ifelse(rucaZipcode$RUCA2 %in% suburban, "Suburban", "Rural"))
rucaZipcode$rurality <- factor(rucaZipcode$rurality , levels= c('Urban','Suburban','Rural'))
rucaZipcode <- rucaZipcode %>%
mutate(RUCA1 = as.character(RUCA1),
RUCA2 = as.character(RUCA2))
write.csv(rucaZipcode,paste0(outputFilesLoc,'HS02_RUCA_Z.csv'),
row.names = FALSE)
### RUCA AT TRACT LEVEL
rucaTract <- openxlsx::read.xlsx(paste0(rawDataFilesLoc,'ruca2010revisedTract.xlsx'),
sheet = 1, startRow = 2, colNames = TRUE)
colnames(rucaTract) <- c('countyFIPS','State','County','tractFIPS','RUCA1',
'RUCA2','Pop_2010','Area_2010','PopDen_2010')
rucaTract$rurality <- ifelse(rucaTract$RUCA2 %in% urban, "Urban",
ifelse(rucaTract$RUCA2 %in% suburban, "Suburban", "Rural"))
rucaTract$rurality <- factor(rucaTract$rurality , levels= c('Urban','Suburban','Rural'))
write.csv(rucaTract %>%
select(tractFIPS, RUCA1, RUCA2, rurality) %>%
mutate(RUCA1 = as.character(RUCA1),
RUCA2 = as.character(RUCA2)),
paste0(outputFilesLoc,'HS02_RUCA_T.csv'), row.names = FALSE)
### RUCA AT COUNTY LEVEL
# calculate % of tracts in county rural, urban, suburban
rucaCountyRurality <- rucaTract %>%
select(countyFIPS, rurality) %>%
count(countyFIPS, rurality) %>%
group_by(countyFIPS) %>%
mutate(pct = n / sum(n))
rucaCountyRurality <- pivot_wider(rucaCountyRurality,id_cols = 'countyFIPS',
names_from = 'rurality',
values_from = 'pct', values_fill = 0) %>%
mutate(check = round(sum(Urban+Suburban+Rural),2))
## check data and clean up
rucaCountyRurality[which(rucaCountyRurality$check !=1),]
rucaCountyRurality <- data.frame(rucaCountyRurality %>%
mutate(Urban = round(Urban,2),
Suburban = round(Suburban,2),
Rural = round(Rural,2)) %>%
rename(GEOID = countyFIPS,
rcaUrbP = Urban,
rcaSubrbP = Suburban,
rcaRuralP = Rural))
write.csv(rucaCountyRurality %>% select(-check), paste0(rawDataFilesLoc,'county_RUCA_rurality.csv'),
row.names = FALSE)
|
window_select_SI_calculation <-
function(workspace){
parameter<-c("Kw","ce","cd","ch","coef_mix_conv","coef_wind_stir","coef_mix_shear","coef_mix_turb","coef_mix_KH","coef_mix_hyp","seepage_rate","inflow_factor","outflow_factor","rain_factor","wind_factor")
win_SI <- gwindow("Calculate SI-value", width = 400, visible = FALSE)
win_SI_1 <- ggroup(horizontal = FALSE ,container = win_SI)
sub_label <-glabel("1. Select Parameter(s)",container = win_SI_1)
font(sub_label) <- c(weight="bold")
win_SI_para <- ggroup(horizontal = TRUE, container=win_SI_1, fill=TRUE )
win_SI_para_1 <- ggroup(horizontal = FALSE, container=win_SI_para, fill=TRUE )
cb_kw <- gcheckbox ("Kw",container = win_SI_para_1,checked =TRUE)
cb_ce <- gcheckbox ("ce",container = win_SI_para_1,checked =TRUE)
cb_cd <- gcheckbox ("cd",container = win_SI_para_1,checked =TRUE)
cb_ch <- gcheckbox ("ch",container = win_SI_para_1,checked =TRUE)
win_SI_para_2 <- ggroup(horizontal = FALSE, container=win_SI_para, fill=TRUE )
cb_coef_mix_conv <- gcheckbox ("coef_mix_conv",container = win_SI_para_2,checked =FALSE)
cb_coef_wind_stir <- gcheckbox ("coef_wind_stir",container = win_SI_para_2,checked =FALSE)
cb_coef_mix_shear <- gcheckbox ("coef_mix_shear",container = win_SI_para_2,checked =FALSE)
cb_coef_mix_turb <- gcheckbox ("coef_mix_turb",container = win_SI_para_2,checked =FALSE)
cb_coef_mix_KH <- gcheckbox ("coef_mix_KH",container = win_SI_para_2,checked =FALSE)
cb_coef_mix_hyp <- gcheckbox ("coef_mix_hyp",container = win_SI_para_2,checked =FALSE)
win_SI_para_3 <- ggroup(horizontal = FALSE, container=win_SI_para, fill=TRUE )
cb_seepage_rate <- gcheckbox ("seepage_rate",container = win_SI_para_3,checked =TRUE)
cb_inflow_factor <- gcheckbox ("inflow_factor",container = win_SI_para_3,checked =TRUE)
cb_outflow_factor <- gcheckbox ("outflow_factor",container = win_SI_para_3,checked =TRUE) #AENDERUNG outflow dazu
cb_rain_factor <- gcheckbox ("rain_factor",container = win_SI_para_3,checked =TRUE)
cb_wind_factor <- gcheckbox ("wind_factor",container = win_SI_para_3,checked =TRUE)
gseparator(horizontal=TRUE, container=win_SI_1, expand=TRUE)
sub_label <-glabel("2. Select Increase %",container = win_SI_1)
font(sub_label) <- c(weight="bold")
radio_button_percent <- gradio(c("5","10","20","50"), container=win_SI_1, selected=2, horizontal = TRUE)
gseparator(horizontal=TRUE, container=win_SI_1, expand=TRUE)
sub_label <-glabel("3. Select Field Data",container = win_SI_1)
font(sub_label) <- c(weight="bold")
radio_button_field <- gradio(c("Temperature","Lake Level"), container=win_SI_1,horizontal =TRUE, selected=1) #AENDERUNG: Combined entfernt
gseparator(horizontal=TRUE, container=win_SI_1, expand=TRUE)
sub_label <-glabel("4. Select measure of difference",container = win_SI_1)
font(sub_label) <- c(weight="bold")
radio_button_guete <- gradio(c("RMSE","Model output"), container=win_SI_1,horizontal =TRUE, selected=1)
gseparator(horizontal=TRUE, container=win_SI_1, expand=TRUE)
#dir_field_temp
win_SI_3 <- ggroup(horizontal = TRUE, container=win_SI_1, fill=TRUE )
but_cal_si <- gbutton("Calculate SI-Values", container = win_SI_3, handler=function(h,...) {
if((dir_field_temp!= "" &&svalue(radio_button_field) =="Temperature")|| (dir_field_level!= "" &&svalue(radio_button_field) =="Lake Level")){
print("1")
if(svalue(but_cal_si) == "Calculate SI-Values"){
List_parameter <- list()
#"Kw","ce","cd","ch" "coef_mix_conv","coef_wind_stir","coef_mix_shear","coef_mix_turb","coef_mix_KH","coef_mix_hyp","seepage_rate","inflow_factor", "outflow_factor" ,"rain_factor","wind_factor"
if(svalue(cb_kw)){List_parameter[length(List_parameter)+1]<- "Kw"}
if(svalue(cb_ch)){List_parameter[length(List_parameter)+1]<- "ch"}
if(svalue(cb_ce)){List_parameter[length(List_parameter)+1]<- "ce"}
if(svalue(cb_cd)){List_parameter[length(List_parameter)+1]<- "cd"}
if(svalue(cb_coef_mix_conv)){List_parameter[length(List_parameter)+1]<- "coef_mix_conv"}
if(svalue(cb_coef_wind_stir)){List_parameter[length(List_parameter)+1]<- "coef_wind_stir"}
if(svalue(cb_coef_mix_shear)){List_parameter[length(List_parameter)+1]<- "coef_mix_shear"}
if(svalue(cb_coef_mix_turb)){List_parameter[length(List_parameter)+1]<- "coef_mix_turb"}
if(svalue(cb_coef_mix_KH)){List_parameter[length(List_parameter)+1]<- "coef_mix_KH"}
if(svalue(cb_coef_mix_hyp)){List_parameter[length(List_parameter)+1]<- "coef_mix_hyp"}
if(svalue(cb_seepage_rate)){List_parameter[length(List_parameter)+1]<- "seepage_rate"}
if(svalue(cb_inflow_factor)){List_parameter[length(List_parameter)+1]<- "inflow_factor"}
if(svalue(cb_outflow_factor)){List_parameter[length(List_parameter)+1]<- "outflow_factor"} #AENDERUNG outflow auch dazu
if(svalue(cb_rain_factor)){List_parameter[length(List_parameter)+1]<- "rain_factor"}
if(svalue(cb_wind_factor)){List_parameter[length(List_parameter)+1]<- "wind_factor"}
enabled(cb_kw)<- FALSE
enabled(cb_ce) <- FALSE
enabled(cb_cd) <- FALSE
enabled(cb_ch) <- FALSE
enabled(cb_coef_mix_conv)<-FALSE
enabled(cb_coef_wind_stir)<-FALSE
enabled(cb_coef_mix_shear)<-FALSE
enabled(cb_coef_mix_turb) <-FALSE
enabled(cb_coef_mix_KH) <-FALSE
enabled(cb_coef_mix_hyp) <- FALSE
enabled(cb_seepage_rate) <-FALSE
enabled(cb_inflow_factor) <-FALSE
enabled(cb_outflow_factor) <-FALSE
enabled(cb_rain_factor) <-FALSE
enabled(cb_wind_factor) <-FALSE
svalue(but_cal_si)<-"Cancel Calculation"
calculate_SI_value(List_parameter,svalue(radio_button_percent),svalue(radio_button_guete),svalue(radio_button_field),workspace,label_status_SI_calculation,but_cal_si)
#calculation finished or canceled
svalue(but_cal_si)<-"Calculate SI-Values"
enabled(cb_kw)<- TRUE
enabled(cb_ce) <- TRUE
enabled(cb_cd) <- TRUE
enabled(cb_ch) <- TRUE
enabled(cb_coef_mix_conv)<-TRUE
enabled(cb_coef_wind_stir)<-TRUE
enabled(cb_coef_mix_shear)<-TRUE
enabled(cb_coef_mix_turb) <-TRUE
enabled(cb_coef_mix_KH) <-TRUE
enabled(cb_coef_mix_hyp) <- TRUE
enabled(cb_seepage_rate) <-TRUE
enabled(cb_inflow_factor) <-TRUE
enabled(cb_outflow_factor) <-TRUE
enabled(cb_rain_factor) <-TRUE
enabled(cb_wind_factor) <-TRUE
}
else{
svalue(but_cal_si)<-"canceling..."
}}
else{
show_message("Missing Field Data.")
}})
but_cal_close <- gbutton("Close", container = win_SI_3, handler=function(h,...) {dispose((h$obj)) })
glabel("status:",container = win_SI_3,fg="red")
label_status_SI_calculation <<-glabel("",container = win_SI_3,fg="red")
visible(win_SI) <- TRUE
}
| /R/window_select_SI_calculation.R | no_license | jsta/glmgui | R | false | false | 6,713 | r | window_select_SI_calculation <-
function(workspace){
parameter<-c("Kw","ce","cd","ch","coef_mix_conv","coef_wind_stir","coef_mix_shear","coef_mix_turb","coef_mix_KH","coef_mix_hyp","seepage_rate","inflow_factor","outflow_factor","rain_factor","wind_factor")
win_SI <- gwindow("Calculate SI-value", width = 400, visible = FALSE)
win_SI_1 <- ggroup(horizontal = FALSE ,container = win_SI)
sub_label <-glabel("1. Select Parameter(s)",container = win_SI_1)
font(sub_label) <- c(weight="bold")
win_SI_para <- ggroup(horizontal = TRUE, container=win_SI_1, fill=TRUE )
win_SI_para_1 <- ggroup(horizontal = FALSE, container=win_SI_para, fill=TRUE )
cb_kw <- gcheckbox ("Kw",container = win_SI_para_1,checked =TRUE)
cb_ce <- gcheckbox ("ce",container = win_SI_para_1,checked =TRUE)
cb_cd <- gcheckbox ("cd",container = win_SI_para_1,checked =TRUE)
cb_ch <- gcheckbox ("ch",container = win_SI_para_1,checked =TRUE)
win_SI_para_2 <- ggroup(horizontal = FALSE, container=win_SI_para, fill=TRUE )
cb_coef_mix_conv <- gcheckbox ("coef_mix_conv",container = win_SI_para_2,checked =FALSE)
cb_coef_wind_stir <- gcheckbox ("coef_wind_stir",container = win_SI_para_2,checked =FALSE)
cb_coef_mix_shear <- gcheckbox ("coef_mix_shear",container = win_SI_para_2,checked =FALSE)
cb_coef_mix_turb <- gcheckbox ("coef_mix_turb",container = win_SI_para_2,checked =FALSE)
cb_coef_mix_KH <- gcheckbox ("coef_mix_KH",container = win_SI_para_2,checked =FALSE)
cb_coef_mix_hyp <- gcheckbox ("coef_mix_hyp",container = win_SI_para_2,checked =FALSE)
win_SI_para_3 <- ggroup(horizontal = FALSE, container=win_SI_para, fill=TRUE )
cb_seepage_rate <- gcheckbox ("seepage_rate",container = win_SI_para_3,checked =TRUE)
cb_inflow_factor <- gcheckbox ("inflow_factor",container = win_SI_para_3,checked =TRUE)
cb_outflow_factor <- gcheckbox ("outflow_factor",container = win_SI_para_3,checked =TRUE) #AENDERUNG outflow dazu
cb_rain_factor <- gcheckbox ("rain_factor",container = win_SI_para_3,checked =TRUE)
cb_wind_factor <- gcheckbox ("wind_factor",container = win_SI_para_3,checked =TRUE)
gseparator(horizontal=TRUE, container=win_SI_1, expand=TRUE)
sub_label <-glabel("2. Select Increase %",container = win_SI_1)
font(sub_label) <- c(weight="bold")
radio_button_percent <- gradio(c("5","10","20","50"), container=win_SI_1, selected=2, horizontal = TRUE)
gseparator(horizontal=TRUE, container=win_SI_1, expand=TRUE)
sub_label <-glabel("3. Select Field Data",container = win_SI_1)
font(sub_label) <- c(weight="bold")
radio_button_field <- gradio(c("Temperature","Lake Level"), container=win_SI_1,horizontal =TRUE, selected=1) #AENDERUNG: Combined entfernt
gseparator(horizontal=TRUE, container=win_SI_1, expand=TRUE)
sub_label <-glabel("4. Select measure of difference",container = win_SI_1)
font(sub_label) <- c(weight="bold")
radio_button_guete <- gradio(c("RMSE","Model output"), container=win_SI_1,horizontal =TRUE, selected=1)
gseparator(horizontal=TRUE, container=win_SI_1, expand=TRUE)
#dir_field_temp
win_SI_3 <- ggroup(horizontal = TRUE, container=win_SI_1, fill=TRUE )
but_cal_si <- gbutton("Calculate SI-Values", container = win_SI_3, handler=function(h,...) {
if((dir_field_temp!= "" &&svalue(radio_button_field) =="Temperature")|| (dir_field_level!= "" &&svalue(radio_button_field) =="Lake Level")){
print("1")
if(svalue(but_cal_si) == "Calculate SI-Values"){
List_parameter <- list()
#"Kw","ce","cd","ch" "coef_mix_conv","coef_wind_stir","coef_mix_shear","coef_mix_turb","coef_mix_KH","coef_mix_hyp","seepage_rate","inflow_factor", "outflow_factor" ,"rain_factor","wind_factor"
if(svalue(cb_kw)){List_parameter[length(List_parameter)+1]<- "Kw"}
if(svalue(cb_ch)){List_parameter[length(List_parameter)+1]<- "ch"}
if(svalue(cb_ce)){List_parameter[length(List_parameter)+1]<- "ce"}
if(svalue(cb_cd)){List_parameter[length(List_parameter)+1]<- "cd"}
if(svalue(cb_coef_mix_conv)){List_parameter[length(List_parameter)+1]<- "coef_mix_conv"}
if(svalue(cb_coef_wind_stir)){List_parameter[length(List_parameter)+1]<- "coef_wind_stir"}
if(svalue(cb_coef_mix_shear)){List_parameter[length(List_parameter)+1]<- "coef_mix_shear"}
if(svalue(cb_coef_mix_turb)){List_parameter[length(List_parameter)+1]<- "coef_mix_turb"}
if(svalue(cb_coef_mix_KH)){List_parameter[length(List_parameter)+1]<- "coef_mix_KH"}
if(svalue(cb_coef_mix_hyp)){List_parameter[length(List_parameter)+1]<- "coef_mix_hyp"}
if(svalue(cb_seepage_rate)){List_parameter[length(List_parameter)+1]<- "seepage_rate"}
if(svalue(cb_inflow_factor)){List_parameter[length(List_parameter)+1]<- "inflow_factor"}
if(svalue(cb_outflow_factor)){List_parameter[length(List_parameter)+1]<- "outflow_factor"} #AENDERUNG outflow auch dazu
if(svalue(cb_rain_factor)){List_parameter[length(List_parameter)+1]<- "rain_factor"}
if(svalue(cb_wind_factor)){List_parameter[length(List_parameter)+1]<- "wind_factor"}
enabled(cb_kw)<- FALSE
enabled(cb_ce) <- FALSE
enabled(cb_cd) <- FALSE
enabled(cb_ch) <- FALSE
enabled(cb_coef_mix_conv)<-FALSE
enabled(cb_coef_wind_stir)<-FALSE
enabled(cb_coef_mix_shear)<-FALSE
enabled(cb_coef_mix_turb) <-FALSE
enabled(cb_coef_mix_KH) <-FALSE
enabled(cb_coef_mix_hyp) <- FALSE
enabled(cb_seepage_rate) <-FALSE
enabled(cb_inflow_factor) <-FALSE
enabled(cb_outflow_factor) <-FALSE
enabled(cb_rain_factor) <-FALSE
enabled(cb_wind_factor) <-FALSE
svalue(but_cal_si)<-"Cancel Calculation"
calculate_SI_value(List_parameter,svalue(radio_button_percent),svalue(radio_button_guete),svalue(radio_button_field),workspace,label_status_SI_calculation,but_cal_si)
#calculation finished or canceled
svalue(but_cal_si)<-"Calculate SI-Values"
enabled(cb_kw)<- TRUE
enabled(cb_ce) <- TRUE
enabled(cb_cd) <- TRUE
enabled(cb_ch) <- TRUE
enabled(cb_coef_mix_conv)<-TRUE
enabled(cb_coef_wind_stir)<-TRUE
enabled(cb_coef_mix_shear)<-TRUE
enabled(cb_coef_mix_turb) <-TRUE
enabled(cb_coef_mix_KH) <-TRUE
enabled(cb_coef_mix_hyp) <- TRUE
enabled(cb_seepage_rate) <-TRUE
enabled(cb_inflow_factor) <-TRUE
enabled(cb_outflow_factor) <-TRUE
enabled(cb_rain_factor) <-TRUE
enabled(cb_wind_factor) <-TRUE
}
else{
svalue(but_cal_si)<-"canceling..."
}}
else{
show_message("Missing Field Data.")
}})
but_cal_close <- gbutton("Close", container = win_SI_3, handler=function(h,...) {dispose((h$obj)) })
glabel("status:",container = win_SI_3,fg="red")
label_status_SI_calculation <<-glabel("",container = win_SI_3,fg="red")
visible(win_SI) <- TRUE
}
|
#' Skip to content
#' This repository
#' Search
#' Pull requests
#' Issues
#' Gist
#' @aptperson
#' Watch 282
#' Star 1,918
#' Fork 1,097 dmlc/xgboost
#' Code Issues 113 Pull requests 6 Wiki Pulse Graphs
#' Branch: master Find file Copy pathxgboost/R-package/R/utils.R
#' 4db3dfe 27 days ago
#' @hetong007 hetong007 Update utils.R
#' 5 contributors @hetong007 @tqchen @khotilov @terrytangyuan @nagadomi
#' RawBlameHistory 347 lines (330 sloc) 11.7 KB
#' #' @importClassesFrom Matrix dgCMatrix dgeMatrix
#' #' @import methods
#'
#' # depends on matrix
#' .onLoad <- function(libname, pkgname) {
#' library.dynam("xgboost", pkgname, libname)
#' }
#' .onUnload <- function(libpath) {
#' library.dynam.unload("xgboost", libpath)
#' }
# set information into dmatrix, this mutate dmatrix
xgb.setinfo <- function(dmat, name, info) {
if (class(dmat) != "xgb.DMatrix") {
stop("xgb.setinfo: first argument dtrain must be xgb.DMatrix")
}
if (name == "label") {
if (length(info) != xgb.numrow(dmat))
stop("The length of labels must equal to the number of rows in the input data")
.Call("XGDMatrixSetInfo_R", dmat, name, as.numeric(info),
PACKAGE = "xgboost")
return(TRUE)
}
if (name == "weight") {
if (length(info) != xgb.numrow(dmat))
stop("The length of weights must equal to the number of rows in the input data")
.Call("XGDMatrixSetInfo_R", dmat, name, as.numeric(info),
PACKAGE = "xgboost")
return(TRUE)
}
if (name == "base_margin") {
# if (length(info)!=xgb.numrow(dmat))
# stop("The length of base margin must equal to the number of rows in the input data")
.Call("XGDMatrixSetInfo_R", dmat, name, as.numeric(info),
PACKAGE = "xgboost")
return(TRUE)
}
if (name == "group") {
if (sum(info) != xgb.numrow(dmat))
stop("The sum of groups must equal to the number of rows in the input data")
.Call("XGDMatrixSetInfo_R", dmat, name, as.integer(info),
PACKAGE = "xgboost")
return(TRUE)
}
stop(paste("xgb.setinfo: unknown info name", name))
return(FALSE)
}
# construct a Booster from cachelist
xgb.Booster <- function(params = list(), cachelist = list(), modelfile = NULL) {
if (typeof(cachelist) != "list") {
stop("xgb.Booster: only accepts list of DMatrix as cachelist")
}
for (dm in cachelist) {
if (class(dm) != "xgb.DMatrix") {
stop("xgb.Booster: only accepts list of DMatrix as cachelist")
}
}
handle <- .Call("XGBoosterCreate_R", cachelist, PACKAGE = "xgboost")
if (length(params) != 0) {
for (i in 1:length(params)) {
p <- params[i]
.Call("XGBoosterSetParam_R", handle, gsub("\\.", "_", names(p)), as.character(p),
PACKAGE = "xgboost")
}
}
if (!is.null(modelfile)) {
if (typeof(modelfile) == "character") {
.Call("XGBoosterLoadModel_R", handle, modelfile, PACKAGE = "xgboost")
} else if (typeof(modelfile) == "raw") {
.Call("XGBoosterLoadModelFromRaw_R", handle, modelfile, PACKAGE = "xgboost")
} else {
stop("xgb.Booster: modelfile must be character or raw vector")
}
}
return(structure(handle, class = "xgb.Booster.handle"))
}
# convert xgb.Booster.handle to xgb.Booster
xgb.handleToBooster <- function(handle, raw = NULL)
{
bst <- list(handle = handle, raw = raw)
class(bst) <- "xgb.Booster"
return(bst)
}
# Check whether an xgb.Booster object is complete
xgb.Booster.check <- function(bst, saveraw = TRUE)
{
isnull <- is.null(bst$handle)
if (!isnull) {
isnull <- .Call("XGCheckNullPtr_R", bst$handle, PACKAGE="xgboost")
}
if (isnull) {
bst$handle <- xgb.Booster(modelfile = bst$raw)
} else {
if (is.null(bst$raw) && saveraw)
bst$raw <- xgb.save.raw(bst$handle)
}
return(bst)
}
## ----the following are low level iteratively function, not needed if
## you do not want to use them ---------------------------------------
# get dmatrix from data, label
xgb.get.DMatrix <- function(data, label = NULL, missing = NA, weight = NULL) {
inClass <- class(data)
if (inClass == "dgCMatrix" || inClass == "matrix") {
if (is.null(label)) {
stop("xgboost: need label when data is a matrix")
}
dtrain <- xgb.DMatrix(data, label = label, missing = missing)
if (!is.null(weight)){
xgb.setinfo(dtrain, "weight", weight)
}
} else {
if (!is.null(label)) {
warning("xgboost: label will be ignored.")
}
if (inClass == "character") {
dtrain <- xgb.DMatrix(data)
} else if (inClass == "xgb.DMatrix") {
dtrain <- data
} else if (inClass == "data.frame") {
stop("xgboost only support numerical matrix input,
use 'data.matrix' to transform the data.")
} else {
stop("xgboost: Invalid input of data")
}
}
return (dtrain)
}
xgb.numrow <- function(dmat) {
nrow <- .Call("XGDMatrixNumRow_R", dmat, PACKAGE="xgboost")
return(nrow)
}
# iteratively update booster with customized statistics
xgb.iter.boost <- function(booster, dtrain, gpair) {
if (class(booster) != "xgb.Booster.handle") {
stop("xgb.iter.update: first argument must be type xgb.Booster.handle")
}
if (class(dtrain) != "xgb.DMatrix") {
stop("xgb.iter.update: second argument must be type xgb.DMatrix")
}
.Call("XGBoosterBoostOneIter_R", booster, dtrain, gpair$grad, gpair$hess, PACKAGE = "xgboost")
return(TRUE)
}
# iteratively update booster with dtrain
xgb.iter.update <- function(booster, dtrain, iter, obj = NULL) {
if (class(booster) != "xgb.Booster.handle") {
stop("xgb.iter.update: first argument must be type xgb.Booster.handle")
}
if (class(dtrain) != "xgb.DMatrix") {
stop("xgb.iter.update: second argument must be type xgb.DMatrix")
}
if (is.null(obj)) {
.Call("XGBoosterUpdateOneIter_R", booster, as.integer(iter), dtrain,
PACKAGE = "xgboost")
} else {
pred <- predict(booster, dtrain)
gpair <- obj(pred, dtrain)
succ <- xgb.iter.boost(booster, dtrain, gpair)
}
return(TRUE)
}
# iteratively evaluate one iteration
xgb.iter.eval <- function(booster, watchlist, iter, feval = NULL, prediction = FALSE) {
if (class(booster) != "xgb.Booster.handle") {
stop("xgb.eval: first argument must be type xgb.Booster")
}
if (typeof(watchlist) != "list") {
stop("xgb.eval: only accepts list of DMatrix as watchlist")
}
for (w in watchlist) {
if (class(w) != "xgb.DMatrix") {
stop("xgb.eval: watch list can only contain xgb.DMatrix")
}
}
if (length(watchlist) != 0) {
if (is.null(feval)) {
evnames <- list()
for (i in 1:length(watchlist)) {
w <- watchlist[i]
if (length(names(w)) == 0) {
stop("xgb.eval: name tag must be presented for every elements in watchlist")
}
evnames <- append(evnames, names(w))
}
msg <- .Call("XGBoosterEvalOneIter_R", booster, as.integer(iter), watchlist,
evnames, PACKAGE = "xgboost")
} else {
msg <- paste("[", iter, "]", sep="")
for (j in 1:length(watchlist)) {
w <- watchlist[j]
if (length(names(w)) == 0) {
stop("xgb.eval: name tag must be presented for every elements in watchlist")
}
preds <- predict(booster, w[[1]])
ret <- feval(preds, w[[1]])
msg <- paste(msg, "\t", names(w), "-", ret$metric, ":", ret$value, sep="")
}
}
} else {
msg <- ""
}
if (prediction){
preds <- predict(booster,watchlist[[2]])
return(list(msg,preds))
}
return(msg)
}
#------------------------------------------
# helper functions for cross validation
#
xgb.cv.mknfold <- function(dall, nfold, param, stratified, folds) {
if (nfold <= 1) {
stop("nfold must be bigger than 1")
}
if(is.null(folds)) {
if (exists('objective', where=param) && is.character(param$objective) &&
strtrim(param[['objective']], 5) == 'rank:') {
stop("\tAutomatic creation of CV-folds is not implemented for ranking!\n",
"\tConsider providing pre-computed CV-folds through the folds parameter.")
}
y <- getinfo(dall, 'label')
randidx <- sample(1 : xgb.numrow(dall))
if (stratified & length(y) == length(randidx)) {
y <- y[randidx]
#
# WARNING: some heuristic logic is employed to identify classification setting!
#
# For classification, need to convert y labels to factor before making the folds,
# and then do stratification by factor levels.
# For regression, leave y numeric and do stratification by quantiles.
if (exists('objective', where=param) && is.character(param$objective)) {
# If 'objective' provided in params, assume that y is a classification label
# unless objective is reg:linear
if (param[['objective']] != 'reg:linear') y <- factor(y)
} else {
# If no 'objective' given in params, it means that user either wants to use
# the default 'reg:linear' objective or has provided a custom obj function.
# Here, assume classification setting when y has 5 or less unique values:
if (length(unique(y)) <= 5) y <- factor(y)
}
folds <- xgb.createFolds(y, nfold)
} else {
# make simple non-stratified folds
kstep <- length(randidx) %/% nfold
folds <- list()
for (i in 1:(nfold - 1)) {
folds[[i]] <- randidx[1:kstep]
randidx <- setdiff(randidx, folds[[i]])
}
folds[[nfold]] <- randidx
}
}
ret <- list()
for (k in 1:nfold) {
dtest <- slice(dall, folds[[k]])
didx <- c()
for (i in 1:nfold) {
if (i != k) {
didx <- append(didx, folds[[i]])
}
}
dtrain <- slice(dall, didx)
bst <- xgb.Booster(param, list(dtrain, dtest))
watchlist <- list(train=dtrain, test=dtest)
ret[[k]] <- list(dtrain=dtrain, booster=bst, watchlist=watchlist, index=folds[[k]])
}
return (ret)
}
xgb.cv.aggcv <- function(res, showsd = TRUE) {
header <- res[[1]]
ret <- header[1]
for (i in 2:length(header)) {
kv <- strsplit(header[i], ":")[[1]]
ret <- paste(ret, "\t", kv[1], ":", sep="")
stats <- c()
stats[1] <- as.numeric(kv[2])
for (j in 2:length(res)) {
tkv <- strsplit(res[[j]][i], ":")[[1]]
stats[j] <- as.numeric(tkv[2])
}
ret <- paste(ret, sprintf("%f", mean(stats)), sep="")
if (showsd) {
ret <- paste(ret, sprintf("+%f", stats::sd(stats)), sep="")
}
}
return (ret)
}
# Shamelessly copied from caret::createFolds
# and simplified by always returning an unnamed list of test indices
xgb.createFolds <- function(y, k = 10)
{
if(is.numeric(y)) {
## Group the numeric data based on their magnitudes
## and sample within those groups.
## When the number of samples is low, we may have
## issues further slicing the numeric data into
## groups. The number of groups will depend on the
## ratio of the number of folds to the sample size.
## At most, we will use quantiles. If the sample
## is too small, we just do regular unstratified
## CV
cuts <- floor(length(y) / k)
if (cuts < 2) cuts <- 2
if (cuts > 5) cuts <- 5
y <- cut(y,
unique(stats::quantile(y, probs = seq(0, 1, length = cuts))),
include.lowest = TRUE)
}
if(k < length(y)) {
## reset levels so that the possible levels and
## the levels in the vector are the same
y <- factor(as.character(y))
numInClass <- table(y)
foldVector <- vector(mode = "integer", length(y))
## For each class, balance the fold allocation as far
## as possible, then resample the remainder.
## The final assignment of folds is also randomized.
for(i in 1:length(numInClass)) {
## create a vector of integers from 1:k as many times as possible without
## going over the number of samples in the class. Note that if the number
## of samples in a class is less than k, nothing is producd here.
seqVector <- rep(1:k, numInClass[i] %/% k)
## add enough random integers to get length(seqVector) == numInClass[i]
if(numInClass[i] %% k > 0) seqVector <- c(seqVector, sample(1:k, numInClass[i] %% k))
## shuffle the integers for fold assignment and assign to this classes's data
foldVector[which(y == dimnames(numInClass)$y[i])] <- sample(seqVector)
}
} else foldVector <- seq(along = y)
out <- split(seq(along = y), foldVector)
names(out) <- NULL
out
}
# Status API Training Shop Blog About Pricing
# © 2015 GitHub, Inc. Terms Privacy Security Contact Help | /xgbHelpers.r | no_license | aptperson/telstra | R | false | false | 12,561 | r | #' Skip to content
#' This repository
#' Search
#' Pull requests
#' Issues
#' Gist
#' @aptperson
#' Watch 282
#' Star 1,918
#' Fork 1,097 dmlc/xgboost
#' Code Issues 113 Pull requests 6 Wiki Pulse Graphs
#' Branch: master Find file Copy pathxgboost/R-package/R/utils.R
#' 4db3dfe 27 days ago
#' @hetong007 hetong007 Update utils.R
#' 5 contributors @hetong007 @tqchen @khotilov @terrytangyuan @nagadomi
#' RawBlameHistory 347 lines (330 sloc) 11.7 KB
#' #' @importClassesFrom Matrix dgCMatrix dgeMatrix
#' #' @import methods
#'
#' # depends on matrix
#' .onLoad <- function(libname, pkgname) {
#' library.dynam("xgboost", pkgname, libname)
#' }
#' .onUnload <- function(libpath) {
#' library.dynam.unload("xgboost", libpath)
#' }
# set information into dmatrix, this mutate dmatrix
xgb.setinfo <- function(dmat, name, info) {
if (class(dmat) != "xgb.DMatrix") {
stop("xgb.setinfo: first argument dtrain must be xgb.DMatrix")
}
if (name == "label") {
if (length(info) != xgb.numrow(dmat))
stop("The length of labels must equal to the number of rows in the input data")
.Call("XGDMatrixSetInfo_R", dmat, name, as.numeric(info),
PACKAGE = "xgboost")
return(TRUE)
}
if (name == "weight") {
if (length(info) != xgb.numrow(dmat))
stop("The length of weights must equal to the number of rows in the input data")
.Call("XGDMatrixSetInfo_R", dmat, name, as.numeric(info),
PACKAGE = "xgboost")
return(TRUE)
}
if (name == "base_margin") {
# if (length(info)!=xgb.numrow(dmat))
# stop("The length of base margin must equal to the number of rows in the input data")
.Call("XGDMatrixSetInfo_R", dmat, name, as.numeric(info),
PACKAGE = "xgboost")
return(TRUE)
}
if (name == "group") {
if (sum(info) != xgb.numrow(dmat))
stop("The sum of groups must equal to the number of rows in the input data")
.Call("XGDMatrixSetInfo_R", dmat, name, as.integer(info),
PACKAGE = "xgboost")
return(TRUE)
}
stop(paste("xgb.setinfo: unknown info name", name))
return(FALSE)
}
# construct a Booster from cachelist
xgb.Booster <- function(params = list(), cachelist = list(), modelfile = NULL) {
if (typeof(cachelist) != "list") {
stop("xgb.Booster: only accepts list of DMatrix as cachelist")
}
for (dm in cachelist) {
if (class(dm) != "xgb.DMatrix") {
stop("xgb.Booster: only accepts list of DMatrix as cachelist")
}
}
handle <- .Call("XGBoosterCreate_R", cachelist, PACKAGE = "xgboost")
if (length(params) != 0) {
for (i in 1:length(params)) {
p <- params[i]
.Call("XGBoosterSetParam_R", handle, gsub("\\.", "_", names(p)), as.character(p),
PACKAGE = "xgboost")
}
}
if (!is.null(modelfile)) {
if (typeof(modelfile) == "character") {
.Call("XGBoosterLoadModel_R", handle, modelfile, PACKAGE = "xgboost")
} else if (typeof(modelfile) == "raw") {
.Call("XGBoosterLoadModelFromRaw_R", handle, modelfile, PACKAGE = "xgboost")
} else {
stop("xgb.Booster: modelfile must be character or raw vector")
}
}
return(structure(handle, class = "xgb.Booster.handle"))
}
# convert xgb.Booster.handle to xgb.Booster
xgb.handleToBooster <- function(handle, raw = NULL)
{
bst <- list(handle = handle, raw = raw)
class(bst) <- "xgb.Booster"
return(bst)
}
# Check whether an xgb.Booster object is complete
xgb.Booster.check <- function(bst, saveraw = TRUE)
{
isnull <- is.null(bst$handle)
if (!isnull) {
isnull <- .Call("XGCheckNullPtr_R", bst$handle, PACKAGE="xgboost")
}
if (isnull) {
bst$handle <- xgb.Booster(modelfile = bst$raw)
} else {
if (is.null(bst$raw) && saveraw)
bst$raw <- xgb.save.raw(bst$handle)
}
return(bst)
}
## ----the following are low level iteratively function, not needed if
## you do not want to use them ---------------------------------------
# get dmatrix from data, label
xgb.get.DMatrix <- function(data, label = NULL, missing = NA, weight = NULL) {
inClass <- class(data)
if (inClass == "dgCMatrix" || inClass == "matrix") {
if (is.null(label)) {
stop("xgboost: need label when data is a matrix")
}
dtrain <- xgb.DMatrix(data, label = label, missing = missing)
if (!is.null(weight)){
xgb.setinfo(dtrain, "weight", weight)
}
} else {
if (!is.null(label)) {
warning("xgboost: label will be ignored.")
}
if (inClass == "character") {
dtrain <- xgb.DMatrix(data)
} else if (inClass == "xgb.DMatrix") {
dtrain <- data
} else if (inClass == "data.frame") {
stop("xgboost only support numerical matrix input,
use 'data.matrix' to transform the data.")
} else {
stop("xgboost: Invalid input of data")
}
}
return (dtrain)
}
xgb.numrow <- function(dmat) {
nrow <- .Call("XGDMatrixNumRow_R", dmat, PACKAGE="xgboost")
return(nrow)
}
# iteratively update booster with customized statistics
xgb.iter.boost <- function(booster, dtrain, gpair) {
if (class(booster) != "xgb.Booster.handle") {
stop("xgb.iter.update: first argument must be type xgb.Booster.handle")
}
if (class(dtrain) != "xgb.DMatrix") {
stop("xgb.iter.update: second argument must be type xgb.DMatrix")
}
.Call("XGBoosterBoostOneIter_R", booster, dtrain, gpair$grad, gpair$hess, PACKAGE = "xgboost")
return(TRUE)
}
# iteratively update booster with dtrain
xgb.iter.update <- function(booster, dtrain, iter, obj = NULL) {
if (class(booster) != "xgb.Booster.handle") {
stop("xgb.iter.update: first argument must be type xgb.Booster.handle")
}
if (class(dtrain) != "xgb.DMatrix") {
stop("xgb.iter.update: second argument must be type xgb.DMatrix")
}
if (is.null(obj)) {
.Call("XGBoosterUpdateOneIter_R", booster, as.integer(iter), dtrain,
PACKAGE = "xgboost")
} else {
pred <- predict(booster, dtrain)
gpair <- obj(pred, dtrain)
succ <- xgb.iter.boost(booster, dtrain, gpair)
}
return(TRUE)
}
# iteratively evaluate one iteration
xgb.iter.eval <- function(booster, watchlist, iter, feval = NULL, prediction = FALSE) {
if (class(booster) != "xgb.Booster.handle") {
stop("xgb.eval: first argument must be type xgb.Booster")
}
if (typeof(watchlist) != "list") {
stop("xgb.eval: only accepts list of DMatrix as watchlist")
}
for (w in watchlist) {
if (class(w) != "xgb.DMatrix") {
stop("xgb.eval: watch list can only contain xgb.DMatrix")
}
}
if (length(watchlist) != 0) {
if (is.null(feval)) {
evnames <- list()
for (i in 1:length(watchlist)) {
w <- watchlist[i]
if (length(names(w)) == 0) {
stop("xgb.eval: name tag must be presented for every elements in watchlist")
}
evnames <- append(evnames, names(w))
}
msg <- .Call("XGBoosterEvalOneIter_R", booster, as.integer(iter), watchlist,
evnames, PACKAGE = "xgboost")
} else {
msg <- paste("[", iter, "]", sep="")
for (j in 1:length(watchlist)) {
w <- watchlist[j]
if (length(names(w)) == 0) {
stop("xgb.eval: name tag must be presented for every elements in watchlist")
}
preds <- predict(booster, w[[1]])
ret <- feval(preds, w[[1]])
msg <- paste(msg, "\t", names(w), "-", ret$metric, ":", ret$value, sep="")
}
}
} else {
msg <- ""
}
if (prediction){
preds <- predict(booster,watchlist[[2]])
return(list(msg,preds))
}
return(msg)
}
#------------------------------------------
# helper functions for cross validation
#
xgb.cv.mknfold <- function(dall, nfold, param, stratified, folds) {
if (nfold <= 1) {
stop("nfold must be bigger than 1")
}
if(is.null(folds)) {
if (exists('objective', where=param) && is.character(param$objective) &&
strtrim(param[['objective']], 5) == 'rank:') {
stop("\tAutomatic creation of CV-folds is not implemented for ranking!\n",
"\tConsider providing pre-computed CV-folds through the folds parameter.")
}
y <- getinfo(dall, 'label')
randidx <- sample(1 : xgb.numrow(dall))
if (stratified & length(y) == length(randidx)) {
y <- y[randidx]
#
# WARNING: some heuristic logic is employed to identify classification setting!
#
# For classification, need to convert y labels to factor before making the folds,
# and then do stratification by factor levels.
# For regression, leave y numeric and do stratification by quantiles.
if (exists('objective', where=param) && is.character(param$objective)) {
# If 'objective' provided in params, assume that y is a classification label
# unless objective is reg:linear
if (param[['objective']] != 'reg:linear') y <- factor(y)
} else {
# If no 'objective' given in params, it means that user either wants to use
# the default 'reg:linear' objective or has provided a custom obj function.
# Here, assume classification setting when y has 5 or less unique values:
if (length(unique(y)) <= 5) y <- factor(y)
}
folds <- xgb.createFolds(y, nfold)
} else {
# make simple non-stratified folds
kstep <- length(randidx) %/% nfold
folds <- list()
for (i in 1:(nfold - 1)) {
folds[[i]] <- randidx[1:kstep]
randidx <- setdiff(randidx, folds[[i]])
}
folds[[nfold]] <- randidx
}
}
ret <- list()
for (k in 1:nfold) {
dtest <- slice(dall, folds[[k]])
didx <- c()
for (i in 1:nfold) {
if (i != k) {
didx <- append(didx, folds[[i]])
}
}
dtrain <- slice(dall, didx)
bst <- xgb.Booster(param, list(dtrain, dtest))
watchlist <- list(train=dtrain, test=dtest)
ret[[k]] <- list(dtrain=dtrain, booster=bst, watchlist=watchlist, index=folds[[k]])
}
return (ret)
}
xgb.cv.aggcv <- function(res, showsd = TRUE) {
header <- res[[1]]
ret <- header[1]
for (i in 2:length(header)) {
kv <- strsplit(header[i], ":")[[1]]
ret <- paste(ret, "\t", kv[1], ":", sep="")
stats <- c()
stats[1] <- as.numeric(kv[2])
for (j in 2:length(res)) {
tkv <- strsplit(res[[j]][i], ":")[[1]]
stats[j] <- as.numeric(tkv[2])
}
ret <- paste(ret, sprintf("%f", mean(stats)), sep="")
if (showsd) {
ret <- paste(ret, sprintf("+%f", stats::sd(stats)), sep="")
}
}
return (ret)
}
# Shamelessly copied from caret::createFolds
# and simplified by always returning an unnamed list of test indices
xgb.createFolds <- function(y, k = 10)
{
if(is.numeric(y)) {
## Group the numeric data based on their magnitudes
## and sample within those groups.
## When the number of samples is low, we may have
## issues further slicing the numeric data into
## groups. The number of groups will depend on the
## ratio of the number of folds to the sample size.
## At most, we will use quantiles. If the sample
## is too small, we just do regular unstratified
## CV
cuts <- floor(length(y) / k)
if (cuts < 2) cuts <- 2
if (cuts > 5) cuts <- 5
y <- cut(y,
unique(stats::quantile(y, probs = seq(0, 1, length = cuts))),
include.lowest = TRUE)
}
if(k < length(y)) {
## reset levels so that the possible levels and
## the levels in the vector are the same
y <- factor(as.character(y))
numInClass <- table(y)
foldVector <- vector(mode = "integer", length(y))
## For each class, balance the fold allocation as far
## as possible, then resample the remainder.
## The final assignment of folds is also randomized.
for(i in 1:length(numInClass)) {
## create a vector of integers from 1:k as many times as possible without
## going over the number of samples in the class. Note that if the number
## of samples in a class is less than k, nothing is producd here.
seqVector <- rep(1:k, numInClass[i] %/% k)
## add enough random integers to get length(seqVector) == numInClass[i]
if(numInClass[i] %% k > 0) seqVector <- c(seqVector, sample(1:k, numInClass[i] %% k))
## shuffle the integers for fold assignment and assign to this classes's data
foldVector[which(y == dimnames(numInClass)$y[i])] <- sample(seqVector)
}
} else foldVector <- seq(along = y)
out <- split(seq(along = y), foldVector)
names(out) <- NULL
out
}
# Status API Training Shop Blog About Pricing
# © 2015 GitHub, Inc. Terms Privacy Security Contact Help |
library(quantmod)
library(xlsx) | /prophet.R | no_license | Bassem16/testR | R | false | false | 31 | r | library(quantmod)
library(xlsx) |
data <- read.csv(file.choose(),header=TRUE,na.string=".")
attach(data)
names(data)
data
treatment1 <- c(enter treatment 1 header)
treatment2 <- c(enter treatment 2 header)
mean(treatment1)
mean(treatment2)
se <- function(x) {sd(x,na.rm=TRUE)/sqrt(length(x))}
se(treatment1)
se(treatment2)
t.test(treatment2,treatment1, paired=TRUE)
| /Rtemplate_t-test (1).R | no_license | ishika-patel/EvolutionaryBioCode | R | false | false | 333 | r | data <- read.csv(file.choose(),header=TRUE,na.string=".")
attach(data)
names(data)
data
treatment1 <- c(enter treatment 1 header)
treatment2 <- c(enter treatment 2 header)
mean(treatment1)
mean(treatment2)
se <- function(x) {sd(x,na.rm=TRUE)/sqrt(length(x))}
se(treatment1)
se(treatment2)
t.test(treatment2,treatment1, paired=TRUE)
|
### Heatmap Sensitivity ====
# Authors: Quinn Webber, Michel Laforge, Maegwin Bonar, Chris Hart,
# Alec Robitaille, Sana Zabihi-Seissan, Eric Vander Wal
### Packages ----
libs <- c('raster', 'lme4', 'piecewiseSEM',
'data.table', 'ggplot2')
lapply(libs, require, character.only = TRUE)
### Set variables ----
source('R/variables.R')
### Input ----
Sens <- readRDS(paste0(derived, 'quantile-sensitivity.Rds'))
fpt <- readRDS(paste0(derived, 'first-passage-time.Rds'))
range <- readRDS(paste0(derived, 'areas.Rds'))
info <- readRDS(paste0(derived, 'info-blockidyr.Rds'))
patchiness <- readRDS(paste0(derived, 'patchiness.Rds'))
# Merge the fpt, moran and RSF scores
DT <- Reduce(function(x, y) x[y, on = "blockidyr"],
list(fpt, patchiness, info, range))
### Prep ----
# Drop where moran is NA
DT <- DT[!is.na(moran)]
# Set year as factor
DT[, year := factor(year)]
# Cast block as factor
DT[, block := factor(block)]
# Scale moran
DT[, moranScaled := scale(moran)]
### By percent ----
byPercent <- lapply(seq(0.50, 0.95, by = 0.05), function(prb) {
sub <- na.omit(Sens[probs == prb],
col = 'moran')
probDT <- Reduce(function(x, y) x[y, on = 'blockidyr'],
list(fpt, range, info, sub))
probDT[, moranScaled := scale(moran)]
heat <-
lmer(tInPatch ~ fptScaled * moranScaled + areaRatioScaled * moranScaled +
(1 | ANIMAL_ID),
data = probDT)
fpt <- data.frame(
x = rep(seq(min(probDT$moranScaled, na.rm = TRUE),
max(probDT$moranScaled, na.rm = TRUE),
by = ((max(probDT$moranScaled, na.rm = TRUE) - min(probDT$moranScaled, na.rm = TRUE))/200)), 201),
y = rep(seq(min(probDT$fptScaled, na.rm = TRUE),
max(probDT$fptScaled, na.rm = TRUE),
by = ((max(probDT$fptScaled, na.rm = TRUE) - min(probDT$fptScaled, na.rm = TRUE))/200)), each = 201))
kde <- data.frame(
x = rep(seq(min(probDT$moranScaled, na.rm = TRUE),
max(probDT$moranScaled, na.rm = TRUE),
by = ((max(probDT$moranScaled, na.rm = TRUE) - min(probDT$moranScaled, na.rm = TRUE))/200)), 201),
y = rep(seq(min(probDT$areaRatioScaled, na.rm = TRUE),
max(probDT$areaRatioScaled, na.rm = TRUE),
by = ((max(probDT$areaRatioScaled, na.rm = TRUE) - min(probDT$areaRatioScaled, na.rm = TRUE))/200)), each = 201))
fpt$z <- ((fixef(heat)[1]) +
(fixef(heat)[3] * fpt$x) +
(fixef(heat)[4] * mean(probDT$areaRatioScaled)) +
(fixef(heat)[2] * fpt$y) +
(fixef(heat)[5] * (fpt$x) * (fpt$y)) +
(fixef(heat)[6] * (fpt$x) * mean(probDT$areaRatioScaled)))
kde$z <- ((fixef(heat)[1]) +
(fixef(heat)[3] * kde$x) +
(fixef(heat)[2] * mean(probDT$fptScaled)) +
(fixef(heat)[4] * kde$y) +
(fixef(heat)[6] * (kde$x) * (kde$y)) +
(fixef(heat)[5] * (kde$x) * mean(probDT$fptScaled)))
fpt$xnew <-
(fpt$x - (min(fpt$x))) / (max(fpt$x) - min(fpt$x))
fpt$ynew <-
(fpt$y - (min(fpt$y))) / (max(fpt$y) - min(fpt$y))
kde$xnew <-
(kde$x - (min(kde$x))) / (max(kde$x) - min(kde$x))
kde$ynew <-
(kde$y - (min(kde$y))) / (max(kde$y) - min(kde$y))
rastFPT <- cbind(fpt$xnew, fpt$ynew, fpt$z)
rastKDE <- cbind(kde$xnew, kde$ynew, kde$z)
heatFPT <- rasterFromXYZ(rastFPT)
heatKDE <- rasterFromXYZ(rastKDE)
contKDE <- rasterToContour(heatKDE, nlevels = 5)
contFPT <- rasterToContour(heatFPT, nlevels = 5)
rbPal <- colorRampPalette(c('#ffffe5', '#f7fcb9', '#d9f0a3',
'#addd8e', '#78c679', '#41ab5d',
'#238443', '#006837', '#004529'))(75)
png(paste0('graphics/Supplement3/HeatSensPieces/FPT', prb * 100, '.png'),
width = 3.15,
height = 3.15,
units = 'cm',
res = 600
)
par(mar = c(0, 0, 0, 0))
image(heatFPT, col = rbPal,
zlim = c(min(c(minValue(heatKDE), minValue(heatFPT))),
max(c(maxValue(heatKDE), maxValue(heatFPT)))))
lines(contFPT)
dev.off()
png(paste0('graphics/Supplement3/HeatSensPieces/KDE', prb * 100, '.png'),
width = 3.15,
height = 3.15,
units = 'cm',
res = 600
)
par(mar = c(0, 0, 0, 0))
image(heatKDE, col = rbPal,
zlim = c(min(c(minValue(heatKDE), minValue(heatFPT))),
max(c(maxValue(heatKDE), maxValue(heatFPT)))))
lines(contKDE)
dev.off()
})
### Legend ----
rbPal <- colorRampPalette(c('#ffffe5', '#f7fcb9', '#d9f0a3',
'#addd8e', '#78c679', '#41ab5d',
'#238443', '#006837', '#004529'))
legend_im <- as.raster(matrix(rev(rbPal(20)), ncol = 1))
png(
'graphics/Supplement3/HeatSensPieces/Legend.png',
height = 7.3,
width = 1,
units = 'cm',
res = 600
)
par(mar = c(0, 0, 0, 0))
plot(legend_im)
dev.off()
| /R/sensitivity/7.1-HeatmapSensitivity.R | no_license | wildlifeevoeco/MovingAcrossGradients | R | false | false | 4,596 | r | ### Heatmap Sensitivity ====
# Authors: Quinn Webber, Michel Laforge, Maegwin Bonar, Chris Hart,
# Alec Robitaille, Sana Zabihi-Seissan, Eric Vander Wal
### Packages ----
libs <- c('raster', 'lme4', 'piecewiseSEM',
'data.table', 'ggplot2')
lapply(libs, require, character.only = TRUE)
### Set variables ----
source('R/variables.R')
### Input ----
Sens <- readRDS(paste0(derived, 'quantile-sensitivity.Rds'))
fpt <- readRDS(paste0(derived, 'first-passage-time.Rds'))
range <- readRDS(paste0(derived, 'areas.Rds'))
info <- readRDS(paste0(derived, 'info-blockidyr.Rds'))
patchiness <- readRDS(paste0(derived, 'patchiness.Rds'))
# Merge the fpt, moran and RSF scores
DT <- Reduce(function(x, y) x[y, on = "blockidyr"],
list(fpt, patchiness, info, range))
### Prep ----
# Drop where moran is NA
DT <- DT[!is.na(moran)]
# Set year as factor
DT[, year := factor(year)]
# Cast block as factor
DT[, block := factor(block)]
# Scale moran
DT[, moranScaled := scale(moran)]
### By percent ----
byPercent <- lapply(seq(0.50, 0.95, by = 0.05), function(prb) {
sub <- na.omit(Sens[probs == prb],
col = 'moran')
probDT <- Reduce(function(x, y) x[y, on = 'blockidyr'],
list(fpt, range, info, sub))
probDT[, moranScaled := scale(moran)]
heat <-
lmer(tInPatch ~ fptScaled * moranScaled + areaRatioScaled * moranScaled +
(1 | ANIMAL_ID),
data = probDT)
fpt <- data.frame(
x = rep(seq(min(probDT$moranScaled, na.rm = TRUE),
max(probDT$moranScaled, na.rm = TRUE),
by = ((max(probDT$moranScaled, na.rm = TRUE) - min(probDT$moranScaled, na.rm = TRUE))/200)), 201),
y = rep(seq(min(probDT$fptScaled, na.rm = TRUE),
max(probDT$fptScaled, na.rm = TRUE),
by = ((max(probDT$fptScaled, na.rm = TRUE) - min(probDT$fptScaled, na.rm = TRUE))/200)), each = 201))
kde <- data.frame(
x = rep(seq(min(probDT$moranScaled, na.rm = TRUE),
max(probDT$moranScaled, na.rm = TRUE),
by = ((max(probDT$moranScaled, na.rm = TRUE) - min(probDT$moranScaled, na.rm = TRUE))/200)), 201),
y = rep(seq(min(probDT$areaRatioScaled, na.rm = TRUE),
max(probDT$areaRatioScaled, na.rm = TRUE),
by = ((max(probDT$areaRatioScaled, na.rm = TRUE) - min(probDT$areaRatioScaled, na.rm = TRUE))/200)), each = 201))
fpt$z <- ((fixef(heat)[1]) +
(fixef(heat)[3] * fpt$x) +
(fixef(heat)[4] * mean(probDT$areaRatioScaled)) +
(fixef(heat)[2] * fpt$y) +
(fixef(heat)[5] * (fpt$x) * (fpt$y)) +
(fixef(heat)[6] * (fpt$x) * mean(probDT$areaRatioScaled)))
kde$z <- ((fixef(heat)[1]) +
(fixef(heat)[3] * kde$x) +
(fixef(heat)[2] * mean(probDT$fptScaled)) +
(fixef(heat)[4] * kde$y) +
(fixef(heat)[6] * (kde$x) * (kde$y)) +
(fixef(heat)[5] * (kde$x) * mean(probDT$fptScaled)))
fpt$xnew <-
(fpt$x - (min(fpt$x))) / (max(fpt$x) - min(fpt$x))
fpt$ynew <-
(fpt$y - (min(fpt$y))) / (max(fpt$y) - min(fpt$y))
kde$xnew <-
(kde$x - (min(kde$x))) / (max(kde$x) - min(kde$x))
kde$ynew <-
(kde$y - (min(kde$y))) / (max(kde$y) - min(kde$y))
rastFPT <- cbind(fpt$xnew, fpt$ynew, fpt$z)
rastKDE <- cbind(kde$xnew, kde$ynew, kde$z)
heatFPT <- rasterFromXYZ(rastFPT)
heatKDE <- rasterFromXYZ(rastKDE)
contKDE <- rasterToContour(heatKDE, nlevels = 5)
contFPT <- rasterToContour(heatFPT, nlevels = 5)
rbPal <- colorRampPalette(c('#ffffe5', '#f7fcb9', '#d9f0a3',
'#addd8e', '#78c679', '#41ab5d',
'#238443', '#006837', '#004529'))(75)
png(paste0('graphics/Supplement3/HeatSensPieces/FPT', prb * 100, '.png'),
width = 3.15,
height = 3.15,
units = 'cm',
res = 600
)
par(mar = c(0, 0, 0, 0))
image(heatFPT, col = rbPal,
zlim = c(min(c(minValue(heatKDE), minValue(heatFPT))),
max(c(maxValue(heatKDE), maxValue(heatFPT)))))
lines(contFPT)
dev.off()
png(paste0('graphics/Supplement3/HeatSensPieces/KDE', prb * 100, '.png'),
width = 3.15,
height = 3.15,
units = 'cm',
res = 600
)
par(mar = c(0, 0, 0, 0))
image(heatKDE, col = rbPal,
zlim = c(min(c(minValue(heatKDE), minValue(heatFPT))),
max(c(maxValue(heatKDE), maxValue(heatFPT)))))
lines(contKDE)
dev.off()
})
### Legend ----
rbPal <- colorRampPalette(c('#ffffe5', '#f7fcb9', '#d9f0a3',
'#addd8e', '#78c679', '#41ab5d',
'#238443', '#006837', '#004529'))
legend_im <- as.raster(matrix(rev(rbPal(20)), ncol = 1))
png(
'graphics/Supplement3/HeatSensPieces/Legend.png',
height = 7.3,
width = 1,
units = 'cm',
res = 600
)
par(mar = c(0, 0, 0, 0))
plot(legend_im)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_functions.R
\name{targetHttpProxies.list}
\alias{targetHttpProxies.list}
\title{Retrieves the list of TargetHttpProxy resources available to the specified project.}
\usage{
targetHttpProxies.list(project, filter = NULL, maxResults = NULL,
pageToken = NULL)
}
\arguments{
\item{project}{Project ID for this request}
\item{filter}{Sets a filter expression for filtering listed resources, in the form filter={expression}}
\item{maxResults}{The maximum number of results per page that should be returned}
\item{pageToken}{Specifies a page token to use}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/compute
\item https://www.googleapis.com/auth/compute.readonly
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/compute, https://www.googleapis.com/auth/compute.readonly)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/compute/docs/reference/latest/}{Google Documentation}
}
| /googlecomputev1.auto/man/targetHttpProxies.list.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 1,360 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_functions.R
\name{targetHttpProxies.list}
\alias{targetHttpProxies.list}
\title{Retrieves the list of TargetHttpProxy resources available to the specified project.}
\usage{
targetHttpProxies.list(project, filter = NULL, maxResults = NULL,
pageToken = NULL)
}
\arguments{
\item{project}{Project ID for this request}
\item{filter}{Sets a filter expression for filtering listed resources, in the form filter={expression}}
\item{maxResults}{The maximum number of results per page that should be returned}
\item{pageToken}{Specifies a page token to use}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/compute
\item https://www.googleapis.com/auth/compute.readonly
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/compute, https://www.googleapis.com/auth/compute.readonly)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/compute/docs/reference/latest/}{Google Documentation}
}
|
library(scalpel)
### Name: reviewNeuronsInteractive
### Title: Manually classify the identified neurons from SCALPEL.
### Aliases: reviewNeuronsInteractive
### ** Examples
## Not run:
##D ### many of the functions in this package are interconnected so the
##D ### easiest way to learn to use the package is by working through the vignette,
##D ### which is available at ajpete.com/software
##D
##D #assumes you have run the example for the "scalpel" function
##D
##D #we review the set of spatial components from Step 2,
##D #which are contained in scalpelOutput$A
##D reviewNeuronsInteractive(scalpelOutput = scalpelOutput, neuronSet = "A")
##D #enter "Y" for the first neuron and then "Q"
##D #entering "Q" allows us to finish manually classifying later using the same command
##D #this time there are fewer left to review
##D reviewNeuronsInteractive(scalpelOutput = scalpelOutput, neuronSet = "A")
##D #enter "N" for the first and "?" for the second this time
##D #note that once a neuron is classified as "N", it disappears from the plot
## End(Not run)
| /data/genthat_extracted_code/scalpel/examples/reviewNeuronsInteractive.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,069 | r | library(scalpel)
### Name: reviewNeuronsInteractive
### Title: Manually classify the identified neurons from SCALPEL.
### Aliases: reviewNeuronsInteractive
### ** Examples
## Not run:
##D ### many of the functions in this package are interconnected so the
##D ### easiest way to learn to use the package is by working through the vignette,
##D ### which is available at ajpete.com/software
##D
##D #assumes you have run the example for the "scalpel" function
##D
##D #we review the set of spatial components from Step 2,
##D #which are contained in scalpelOutput$A
##D reviewNeuronsInteractive(scalpelOutput = scalpelOutput, neuronSet = "A")
##D #enter "Y" for the first neuron and then "Q"
##D #entering "Q" allows us to finish manually classifying later using the same command
##D #this time there are fewer left to review
##D reviewNeuronsInteractive(scalpelOutput = scalpelOutput, neuronSet = "A")
##D #enter "N" for the first and "?" for the second this time
##D #note that once a neuron is classified as "N", it disappears from the plot
## End(Not run)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 21990
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 21990
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc05-uniform-depth-23.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 8353
c no.of clauses 21990
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 21990
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc05-uniform-depth-23.qdimacs 8353 21990 E1 [] 0 48 8235 21990 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Miller-Marin/trafficlight-controller/tlc05-uniform-depth-23/tlc05-uniform-depth-23.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 685 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 21990
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 21990
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc05-uniform-depth-23.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 8353
c no.of clauses 21990
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 21990
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc05-uniform-depth-23.qdimacs 8353 21990 E1 [] 0 48 8235 21990 NONE
|
# script for making plots
# histogram of estimated # causal variants
dat<-read.table("path/to/output_1",header = T,sep="\t")
hist(dat$N_Causal,breaks=100,xlab = "# estimaed causal variants",main="")
# Posterior Probs distribution boxplots
# Pre Probs
dat<-read.table("path/to/output_2",header = T,sep="\t")
dat$N_Causal<-as.factor(dat$N_Causal)
p <- ggplot(dat, aes(x=N_Causal, y=PostProb, fill = N_Causal)) +
geom_boxplot() +
geom_jitter(shape=16, position=position_jitter(0.2))
print(p)
# annotation pie chart
library(dplyr)
dat<-read.table("path/to/${ANNOVAR_OUTPUT}.hg19_multianno.txt",sep="\t",header=T)
summary <- dat %>% group_by(Func.refGene) %>% summarise(m=n())
pie <- ggplot(summary, aes(x="", y=m, fill=Func.refGene))+
geom_bar(width = 1, stat = "identity") + coord_polar("y", start=0) +
scale_fill_brewer(palette="Set2")
print(pie)
| /04_making_plots.R | no_license | saorisakaue/SS_FINEMAP | R | false | false | 860 | r | # script for making plots
# histogram of estimated # causal variants
dat<-read.table("path/to/output_1",header = T,sep="\t")
hist(dat$N_Causal,breaks=100,xlab = "# estimaed causal variants",main="")
# Posterior Probs distribution boxplots
# Pre Probs
dat<-read.table("path/to/output_2",header = T,sep="\t")
dat$N_Causal<-as.factor(dat$N_Causal)
p <- ggplot(dat, aes(x=N_Causal, y=PostProb, fill = N_Causal)) +
geom_boxplot() +
geom_jitter(shape=16, position=position_jitter(0.2))
print(p)
# annotation pie chart
library(dplyr)
dat<-read.table("path/to/${ANNOVAR_OUTPUT}.hg19_multianno.txt",sep="\t",header=T)
summary <- dat %>% group_by(Func.refGene) %>% summarise(m=n())
pie <- ggplot(summary, aes(x="", y=m, fill=Func.refGene))+
geom_bar(width = 1, stat = "identity") + coord_polar("y", start=0) +
scale_fill_brewer(palette="Set2")
print(pie)
|
TwoSampleSeqCrossOver.Equivalence <-
function(alpha,beta,sigma,sequence,delta,margin){
n<-(qnorm(1-alpha)+qnorm(1-beta/2))^2*sigma/(sequence*(margin-abs(delta))^2)
n
}
| /R/TwoSampleSeqCrossOver.Equivalence.R | no_license | cran/TrialSize | R | false | false | 173 | r | TwoSampleSeqCrossOver.Equivalence <-
function(alpha,beta,sigma,sequence,delta,margin){
n<-(qnorm(1-alpha)+qnorm(1-beta/2))^2*sigma/(sequence*(margin-abs(delta))^2)
n
}
|
# define a function for Twitter Search
get_twitter<-function(input_str) {
## Read secret keys from a local file
myProp <- read.table(secretLoc,header=FALSE, sep="=", row.names=1, strip.white=TRUE, na.strings="NA", stringsAsFactors=FALSE)
TWITTER_API_KEY <- myProp["TWITTER_API_KEY",1]
TWITTER_API_SECRET <- myProp["TWITTER_API_SECRET",1]
TWITTER_ACCESS_TOKEN <- myProp["TWITTER_ACCESS_TOKEN",1]
TWITTER_ACCESS_SECRET <- myProp["TWITTER_ACCESS_SECRET",1]
## Authenticate with Twitter
setup_twitter_oauth(TWITTER_API_KEY,TWITTER_API_SECRET,TWITTER_ACCESS_TOKEN,TWITTER_ACCESS_SECRET)
## Search Twitter
r_stats <- searchTwitter(input_str, n=100, lang="en")
return(r_stats)
}
# define a function that takes get_twitter and compute sentiment scores
get_sentiments<-function(get_twitter)
# define a function to display wordcloud
display_wordcloud<-function(get_twitter) {
r_stats_text <- sapply(get_twitter, function(x) x$getText())
r_stats_text_corpus <- Corpus(VectorSource(r_stats_text))
tdm <- TermDocumentMatrix(r_stats_text_corpus)
m <- as.matrix(tdm)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
#filter common words
skipWords <- c("and", "the", "for", "are", "but", "or", "nor", "yet", "so",
"if", "a", "an", "from", "want", "how")
inds <- 1:200
inds <- which(!(inds %in% which(d$word %in% skipWords)))
#filter usernames
inds <- inds[which(!(inds %in% grep("@", d$word)))]
## Display Wordcloud
wordcloud(d[inds, "word"], d[inds,"freq"])
} | /wordcloud.R | no_license | pietersv/pogo | R | false | false | 1,498 | r |
# define a function for Twitter Search
get_twitter<-function(input_str) {
## Read secret keys from a local file
myProp <- read.table(secretLoc,header=FALSE, sep="=", row.names=1, strip.white=TRUE, na.strings="NA", stringsAsFactors=FALSE)
TWITTER_API_KEY <- myProp["TWITTER_API_KEY",1]
TWITTER_API_SECRET <- myProp["TWITTER_API_SECRET",1]
TWITTER_ACCESS_TOKEN <- myProp["TWITTER_ACCESS_TOKEN",1]
TWITTER_ACCESS_SECRET <- myProp["TWITTER_ACCESS_SECRET",1]
## Authenticate with Twitter
setup_twitter_oauth(TWITTER_API_KEY,TWITTER_API_SECRET,TWITTER_ACCESS_TOKEN,TWITTER_ACCESS_SECRET)
## Search Twitter
r_stats <- searchTwitter(input_str, n=100, lang="en")
return(r_stats)
}
# define a function that takes get_twitter and compute sentiment scores
get_sentiments<-function(get_twitter)
# define a function to display wordcloud
display_wordcloud<-function(get_twitter) {
r_stats_text <- sapply(get_twitter, function(x) x$getText())
r_stats_text_corpus <- Corpus(VectorSource(r_stats_text))
tdm <- TermDocumentMatrix(r_stats_text_corpus)
m <- as.matrix(tdm)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
#filter common words
skipWords <- c("and", "the", "for", "are", "but", "or", "nor", "yet", "so",
"if", "a", "an", "from", "want", "how")
inds <- 1:200
inds <- which(!(inds %in% which(d$word %in% skipWords)))
#filter usernames
inds <- inds[which(!(inds %in% grep("@", d$word)))]
## Display Wordcloud
wordcloud(d[inds, "word"], d[inds,"freq"])
} |
library(dplyr)
library(readr)
library(tibble)
library(sf)
# load geometry data
df_geom <- readr::read_delim("~/Projects/azmpdata/tmp/data/polygons/SS_coordinates.csv",
col_names=T, delim=",")
# load attributes data - id/names
df_attrib <- readr::read_delim("~/Projects/azmpdata/tmp/data/polygons/SS_names.csv",
col_names=T, delim=",")
# create sf object
sf_SS <- df_geom %>%
st_as_sf(coords = c("longitude", "latitude"), crs = 4326) %>%
group_by(record) %>%
summarise() %>%
select(-record) %>%
st_cast("POLYGON") %>%
st_convex_hull() # check what that does
# save to RData
save(file="~/Projects/azmpdata/tmp/data/polygons/SS.RData", sf_SS)
| /tmp/BC/R/SS_csv2sf.R | permissive | casaultb/azmpdata | R | false | false | 711 | r | library(dplyr)
library(readr)
library(tibble)
library(sf)
# load geometry data
df_geom <- readr::read_delim("~/Projects/azmpdata/tmp/data/polygons/SS_coordinates.csv",
col_names=T, delim=",")
# load attributes data - id/names
df_attrib <- readr::read_delim("~/Projects/azmpdata/tmp/data/polygons/SS_names.csv",
col_names=T, delim=",")
# create sf object
sf_SS <- df_geom %>%
st_as_sf(coords = c("longitude", "latitude"), crs = 4326) %>%
group_by(record) %>%
summarise() %>%
select(-record) %>%
st_cast("POLYGON") %>%
st_convex_hull() # check what that does
# save to RData
save(file="~/Projects/azmpdata/tmp/data/polygons/SS.RData", sf_SS)
|
library(ggtree)
library(tidyverse)
library(reshape2)
library(patchwork)
library(gdata)
setwd("~/Users/islekbro/Desktop/Rstudio/interactions/fzd-gα/")
preabDat <- read_csv("interactions_binary.csv")
#premat <- as.matrix(preabDat)
#rownames(premat) <- preabDat$X1
#premat <- premat[1:nrow(premat),2:ncol(premat)]
#matCite <- matrix(nrow = nrow(premat), ncol = ncol(premat))
#rownames(matCite) = rownames(premat)
#colnames(matCite) = colnames(premat)
#
#for (i in 1:nrow(premat)){
# for (j in 1:ncol(premat)){
# if (premat[i,j] == 1){
# matCite[i,j] <- round(runif(1, min = 0, max = 100),digits = 0)
# }
# }
#}
#
#dfCite <- as.data.frame(matCite)
#dfCite$X1 <- preabDat$X1
#dfCite <- read_csv("interactions.csv")
dfCite <- read_csv("citedat.csv")
ordFZD <- c("FZD7","FZD1","FZD2","FZD8","FZD5","FZD10","FZD9","FZD4","FZD6","FZD3")
ordwnt <- c("G-alpha / i3","G-alpha / i1","G-alpha / i2","G-alpha / o","G-alpha / t1","G-alpha / t2","G-alpha / t3","G-alpha / z","G-alpha / q","G-alpha / 11","G-alpha / 14","G-alpha / 15","G-alpha / 13","G-alpha / 12","G-alpha / s1","G-alpha / s2","G-alpha / olf")
preabDat.m <- melt(preabDat)
dfCite.m <- melt(dfCite, id.vars = "A")
preabDat.m <- merge.data.frame(preabDat.m, dfCite.m,by = c("A", "variable"))
preabDat.m$variable <- reorder.factor(preabDat.m$variable, new.order=ordwnt)
preabDat.m$A <- reorder.factor(preabDat.m$A, new.order=rev(ordFZD))
preabDat.m <- preabDat.m %>%
arrange(variable,A)
pTile <- ggplot(preabDat.m, aes(variable,A,fill = ifelse(value.x == 0, "Unreviewed", "Reviewed"))) +
geom_tile(color = "gray70", size = 0.2) +
#geom_text(aes(label = value.y), color = "gray20", size = 2)+
scale_fill_manual(name = "", values = c("tan1","khaki1")) +
scale_y_discrete(position = "right") +
theme_minimal() +
theme(axis.title = element_blank(), axis.text.x = element_text(angle = 90, hjust = 1),
axis.text = element_text(face = "bold", colour = "black", vjust = 0.5),#axis.text.y = element_blank(),
axis.ticks = element_blank(), panel.grid = element_blank(),
plot.margin=margin(t = 0, l = 0), legend.position = "bottom",
legend.text = element_text(face="bold")) ; pTile
dend1 <- read.tree("FZDs_tree.nw")
dend2 <- read.tree("G-alpha-guidetree.nw")
pDend1 <- ggtree(dend1,branch.length = "none", color="gray40") +
#geom_nodepoint(color="#b5e521", alpha=1/3, size=5) +
theme(plot.margin=margin(r = -0.3, l = -0.3,unit = "cm")) #+
#geom_tiplab()
#xlim(NA, 8) #+ geom_text(aes(label=node))
pDend2 <- ggtree(dend2,branch.length = "none",color="gray40") +
layout_dendrogram() + scale_y_reverse() +
theme(plot.margin=margin(r = 0, l = 0,unit = "cm"))#+ geom_tiplab(angle = 90,hjust = 1) #+ scale_x()# + xlim(NA, 25)
design <- "#########
#########
##AAAAAAA
BBCCCCCCC
BBCCCCCCC"
wr <- wrap_plots(A = pDend2, B = pDend1, C = pTile, design = design); wr
ggsave("fzd-galpha_interactions1.pdf", height = 8.31, width = 7.72, wr, dpi = 300, device = "pdf") | /fzd-g_alpha_interactions.R | no_license | islekburak/R-codes | R | false | false | 3,043 | r | library(ggtree)
library(tidyverse)
library(reshape2)
library(patchwork)
library(gdata)
setwd("~/Users/islekbro/Desktop/Rstudio/interactions/fzd-gα/")
preabDat <- read_csv("interactions_binary.csv")
#premat <- as.matrix(preabDat)
#rownames(premat) <- preabDat$X1
#premat <- premat[1:nrow(premat),2:ncol(premat)]
#matCite <- matrix(nrow = nrow(premat), ncol = ncol(premat))
#rownames(matCite) = rownames(premat)
#colnames(matCite) = colnames(premat)
#
#for (i in 1:nrow(premat)){
# for (j in 1:ncol(premat)){
# if (premat[i,j] == 1){
# matCite[i,j] <- round(runif(1, min = 0, max = 100),digits = 0)
# }
# }
#}
#
#dfCite <- as.data.frame(matCite)
#dfCite$X1 <- preabDat$X1
#dfCite <- read_csv("interactions.csv")
dfCite <- read_csv("citedat.csv")
ordFZD <- c("FZD7","FZD1","FZD2","FZD8","FZD5","FZD10","FZD9","FZD4","FZD6","FZD3")
ordwnt <- c("G-alpha / i3","G-alpha / i1","G-alpha / i2","G-alpha / o","G-alpha / t1","G-alpha / t2","G-alpha / t3","G-alpha / z","G-alpha / q","G-alpha / 11","G-alpha / 14","G-alpha / 15","G-alpha / 13","G-alpha / 12","G-alpha / s1","G-alpha / s2","G-alpha / olf")
preabDat.m <- melt(preabDat)
dfCite.m <- melt(dfCite, id.vars = "A")
preabDat.m <- merge.data.frame(preabDat.m, dfCite.m,by = c("A", "variable"))
preabDat.m$variable <- reorder.factor(preabDat.m$variable, new.order=ordwnt)
preabDat.m$A <- reorder.factor(preabDat.m$A, new.order=rev(ordFZD))
preabDat.m <- preabDat.m %>%
arrange(variable,A)
pTile <- ggplot(preabDat.m, aes(variable,A,fill = ifelse(value.x == 0, "Unreviewed", "Reviewed"))) +
geom_tile(color = "gray70", size = 0.2) +
#geom_text(aes(label = value.y), color = "gray20", size = 2)+
scale_fill_manual(name = "", values = c("tan1","khaki1")) +
scale_y_discrete(position = "right") +
theme_minimal() +
theme(axis.title = element_blank(), axis.text.x = element_text(angle = 90, hjust = 1),
axis.text = element_text(face = "bold", colour = "black", vjust = 0.5),#axis.text.y = element_blank(),
axis.ticks = element_blank(), panel.grid = element_blank(),
plot.margin=margin(t = 0, l = 0), legend.position = "bottom",
legend.text = element_text(face="bold")) ; pTile
dend1 <- read.tree("FZDs_tree.nw")
dend2 <- read.tree("G-alpha-guidetree.nw")
pDend1 <- ggtree(dend1,branch.length = "none", color="gray40") +
#geom_nodepoint(color="#b5e521", alpha=1/3, size=5) +
theme(plot.margin=margin(r = -0.3, l = -0.3,unit = "cm")) #+
#geom_tiplab()
#xlim(NA, 8) #+ geom_text(aes(label=node))
pDend2 <- ggtree(dend2,branch.length = "none",color="gray40") +
layout_dendrogram() + scale_y_reverse() +
theme(plot.margin=margin(r = 0, l = 0,unit = "cm"))#+ geom_tiplab(angle = 90,hjust = 1) #+ scale_x()# + xlim(NA, 25)
design <- "#########
#########
##AAAAAAA
BBCCCCCCC
BBCCCCCCC"
wr <- wrap_plots(A = pDend2, B = pDend1, C = pTile, design = design); wr
ggsave("fzd-galpha_interactions1.pdf", height = 8.31, width = 7.72, wr, dpi = 300, device = "pdf") |
\name{labeledHeatmap}
\alias{labeledHeatmap}
\title{ Produce a labeled heatmap plot }
\description{
Plots a heatmap plot with color legend, row and column annotation, and optional text within th heatmap.
}
\usage{
labeledHeatmap(
Matrix,
xLabels, yLabels = NULL,
xSymbols = NULL, ySymbols = NULL,
colorLabels = NULL,
xColorLabels = FALSE, yColorLabels = FALSE,
checkColorsValid = TRUE,
invertColors = FALSE,
setStdMargins = TRUE,
xLabelsPosition = "bottom",
xLabelsAngle = 45,
xLabelsAdj = 1,
yLabelsPosition = "left",
xColorWidth = 2 * strheight("M"),
yColorWidth = 2 * strwidth("M"),
xColorOffset = strheight("M")/3,
yColorOffset = strwidth("M")/3,
colors = NULL,
naColor = "grey",
textMatrix = NULL,
cex.text = NULL,
textAdj = c(0.5, 0.5),
cex.lab = NULL,
cex.lab.x = cex.lab,
cex.lab.y = cex.lab,
colors.lab.x = 1,
colors.lab.y = 1,
font.lab.x = 1,
font.lab.y = 1,
bg.lab.x = NULL,
bg.lab.y = NULL,
x.adj.lab.y = 1,
plotLegend = TRUE,
keepLegendSpace = plotLegend,
# Separator line specification
verticalSeparator.x = NULL,
verticalSeparator.col = 1,
verticalSeparator.lty = 1,
verticalSeparator.lwd = 1,
verticalSeparator.ext = 0,
verticalSeparator.interval = 0,
horizontalSeparator.y = NULL,
horizontalSeparator.col = 1,
horizontalSeparator.lty = 1,
horizontalSeparator.lwd = 1,
horizontalSeparator.ext = 0,
horizontalSeparator.interval = 0,
# optional restrictions on which rows and columns to actually show
showRows = NULL,
showCols = NULL,
...)
}
\arguments{
\item{Matrix}{ numerical matrix to be plotted in the heatmap. }
\item{xLabels}{ labels for the columns. See Details. }
\item{yLabels}{ labels for the rows. See Details. }
\item{xSymbols}{ additional labels used when \code{xLabels} are interpreted as colors. See Details. }
\item{ySymbols}{ additional labels used when \code{yLabels} are interpreted as colors. See Details. }
\item{colorLabels}{ logical: should \code{xLabels} and \code{yLabels} be interpreted as colors? If
given, overrides \code{xColorLabels} and \code{yColorLabels} below.}
\item{xColorLabels}{ logical: should \code{xLabels} be interpreted as colors? }
\item{yColorLabels}{ logical: should \code{yLabels} be interpreted as colors? }
\item{checkColorsValid}{ logical: should given colors be checked for validity
against the output of \code{colors()} ? If this argument is \code{FALSE}, invalid color specification
will trigger an error.}
\item{invertColors}{ logical: should the color order be inverted? }
\item{setStdMargins}{ logical: should standard margins be set before calling the plot function?
Standard margins depend on \code{colorLabels}: they are wider for text labels and narrower for color
labels. The defaults are static, that is the function does not attempt to guess the optimal margins. }
\item{xLabelsPosition}{ a character string specifying the position of labels for the columns.
Recognized values are (unique abbreviations of) \code{"top", "bottom"}. }
\item{xLabelsAngle}{ angle by which the column labels should be rotated. }
\item{xLabelsAdj}{ justification parameter for column labels. See \code{\link{par}} and the
description of parameter \code{"adj"}. }
\item{yLabelsPosition}{ a character string specifying the position of labels for the columns.
Recognized values are (unique abbreviations of) \code{"left", "right"}. }
\item{xColorWidth}{ width of the color labels for the x axis expressed in user corrdinates.}
\item{yColorWidth}{ width of the color labels for the y axis expressed in user coordinates.}
\item{xColorOffset}{ gap between the y axis and color labels, in user coordinates.}
\item{yColorOffset}{ gap between the x axis and color labels, in user coordinates.}
\item{colors}{ color pallette to be used in the heatmap. Defaults to \code{\link{heat.colors}}. }
\item{naColor}{ color to be used for encoding missing data. }
\item{textMatrix}{ optional text entries for each cell. Either a matrix of the same dimensions as
\code{Matrix} or a vector of the same length as the number of entries in \code{Matrix}. }
\item{cex.text}{ character expansion factor for \code{textMatrix}. }
\item{textAdj}{Adjustment for the entries in the text matrix. See the \code{adj} argument to
\code{\link{text}}.}
\item{cex.lab}{ character expansion factor for text labels labeling the axes. }
\item{cex.lab.x}{ character expansion factor for text labels labeling the x axis. Overrides \code{cex.lab}
above. }
\item{cex.lab.y}{ character expansion factor for text labels labeling the y axis. Overrides \code{cex.lab}
above. }
\item{colors.lab.x}{colors for character labels or symbols along x axis.}
\item{colors.lab.y}{colors for character labels or symbols along y axis.}
\item{font.lab.x}{integer specifying font for labels or symbols along x axis. See \code{\link{text}}.}
\item{font.lab.y}{integer specifying font for labels or symbols along y axis. See \code{\link{text}}.}
\item{bg.lab.x}{background color for the margin along the x axis.}
\item{bg.lab.y}{background color for the margin along the y axs.}
\item{x.adj.lab.y}{Justification of labels for the y axis along the x direction. A value of 0
produces left-justified text, 0.5 (the default) centered
text and 1 right-justified text. }
\item{plotLegend}{ logical: should a color legend be plotted? }
\item{keepLegendSpace}{ logical: if the color legend is not drawn, should the space be left empty
(\code{TRUE}), or should the heatmap fill the space (\code{FALSE})?}
\item{verticalSeparator.x}{indices of columns in input \code{Matrix} after
which separator lines (vertical lines between columns)
should be drawn. \code{NULL} means no lines will be drawn.}
\item{verticalSeparator.col}{color(s) of the vertical separator lines. Recycled if need be. }
\item{verticalSeparator.lty}{line type of the vertical separator lines. Recycled if need be. }
\item{verticalSeparator.lwd}{line width of the vertical separator lines. Recycled if need be. }
\item{verticalSeparator.ext}{number giving the extension of the separator line into the margin as a fraction
of the margin width. 0 means no extension, 1 means extend all the way through the margin. }
\item{verticalSeparator.interval}{number giving the interval for vertical separators. If larger than zero, vertical
separators will be drawn after every \code{verticalSeparator.interval} of displayed columns.
Used only when length of \code{verticalSeparator.x} is zero. }
\item{horizontalSeparator.y}{indices of columns in input \code{Matrix} after which separator lines (horizontal lines
between columns) should be drawn. \code{NULL} means no lines will be drawn.}
\item{horizontalSeparator.col}{ color(s) of the horizontal separator lines. Recycled if need be. }
\item{horizontalSeparator.lty}{line type of the horizontal separator lines. Recycled if need be. }
\item{horizontalSeparator.lwd}{line width of the horizontal separator lines. Recycled if need be. }
\item{horizontalSeparator.ext}{number giving the extension of the separator line into the margin as a
fraction of the margin width. 0 means no extension, 1 means extend all the way through the margin. }
\item{horizontalSeparator.interval}{number giving the interval for horizontal separators. If larger than zero, horizontal
separators will be drawn after every \code{horizontalSeparator.interval} of displayed rows.
Used only when length of \code{horizontalSeparator.y} is zero. }
\item{showRows}{A numeric vector giving the indices of rows that are actually to be shown. Defaults to all rows.}
\item{showCols}{A numeric vector giving the indices of columns that are actually to be shown. Defaults to all columns.}
\item{\dots}{ other arguments to function \code{\link{heatmap}}. }
}
\details{
The function basically plots a standard heatmap plot of the given \code{Matrix} and embellishes it with
row and column labels and/or with text within the heatmap entries. Row and column labels can be either
character strings or color squares, or both.
To get simple text labels, use \code{colorLabels=FALSE} and pass the desired row and column labels in
\code{yLabels} and \code{xLabels}, respectively.
To label rows and columns by color squares, use
\code{colorLabels=TRUE}; \code{yLabels} and \code{xLabels} are then expected to represent valid colors.
For reasons of compatibility with other functions, each entry in \code{yLabels} and \code{xLabels} is
expected to consist of a color designation preceded by 2 characters: an example would be
\code{MEturquoise}. The first two characters can be arbitrary, they are stripped.
Any labels that do not represent valid colors will be considered text labels and printed in full,
allowing the user to mix text and color labels.
It is also possible to label rows and columns by both color squares and additional text annotation. To
achieve this, use the above technique to get color labels and, additionally, pass the desired text
annotation in the \code{xSymbols} and \code{ySymbols} arguments.
}
\value{
None.
}
\author{ Peter Langfelder}
\seealso{ \code{\link{heatmap}}, \code{\link{colors}} }
\examples{
# This example illustrates 4 main ways of annotating columns and rows of a heatmap.
# Copy and paste the whole example into an R session with an interactive plot window;
# alternatively, you may replace the command sizeGrWindow below by opening
# another graphical device such as pdf.
# Generate a matrix to be plotted
nCol = 8; nRow = 7;
mat = matrix(runif(nCol*nRow, min = -1, max = 1), nRow, nCol);
rowColors = standardColors(nRow);
colColors = standardColors(nRow + nCol)[(nRow+1):(nRow + nCol)];
rowColors;
colColors;
sizeGrWindow(9,7)
par(mfrow = c(2,2))
par(mar = c(4, 5, 4, 6));
# Label rows and columns by text:
labeledHeatmap(mat, xLabels = colColors, yLabels = rowColors,
colors = greenWhiteRed(50),
setStdMargins = FALSE,
textMatrix = signif(mat, 2),
main = "Text-labeled heatmap");
# Label rows and columns by colors:
rowLabels = paste("ME", rowColors, sep="");
colLabels = paste("ME", colColors, sep="");
labeledHeatmap(mat, xLabels = colLabels, yLabels = rowLabels,
colorLabels = TRUE,
colors = greenWhiteRed(50),
setStdMargins = FALSE,
textMatrix = signif(mat, 2),
main = "Color-labeled heatmap");
# Mix text and color labels:
rowLabels[3] = "Row 3";
colLabels[1] = "Column 1";
labeledHeatmap(mat, xLabels = colLabels, yLabels = rowLabels,
colorLabels = TRUE,
colors = greenWhiteRed(50),
setStdMargins = FALSE,
textMatrix = signif(mat, 2),
main = "Mix-labeled heatmap");
# Color labels and additional text labels
rowLabels = paste("ME", rowColors, sep="");
colLabels = paste("ME", colColors, sep="");
extraRowLabels = paste("Row", c(1:nRow));
extraColLabels = paste("Column", c(1:nCol));
# Extend margins to fit all labels
par(mar = c(6, 6, 4, 6));
labeledHeatmap(mat, xLabels = colLabels, yLabels = rowLabels,
xSymbols = extraColLabels,
ySymbols = extraRowLabels,
colorLabels = TRUE,
colors = greenWhiteRed(50),
setStdMargins = FALSE,
textMatrix = signif(mat, 2),
main = "Text- + color-labeled heatmap");
}
\keyword{ hplot }% __ONLY ONE__ keyword per line
| /man/labeledHeatmap.Rd | no_license | pdicarl3/WGCNA | R | false | false | 11,904 | rd | \name{labeledHeatmap}
\alias{labeledHeatmap}
\title{ Produce a labeled heatmap plot }
\description{
Plots a heatmap plot with color legend, row and column annotation, and optional text within th heatmap.
}
\usage{
labeledHeatmap(
Matrix,
xLabels, yLabels = NULL,
xSymbols = NULL, ySymbols = NULL,
colorLabels = NULL,
xColorLabels = FALSE, yColorLabels = FALSE,
checkColorsValid = TRUE,
invertColors = FALSE,
setStdMargins = TRUE,
xLabelsPosition = "bottom",
xLabelsAngle = 45,
xLabelsAdj = 1,
yLabelsPosition = "left",
xColorWidth = 2 * strheight("M"),
yColorWidth = 2 * strwidth("M"),
xColorOffset = strheight("M")/3,
yColorOffset = strwidth("M")/3,
colors = NULL,
naColor = "grey",
textMatrix = NULL,
cex.text = NULL,
textAdj = c(0.5, 0.5),
cex.lab = NULL,
cex.lab.x = cex.lab,
cex.lab.y = cex.lab,
colors.lab.x = 1,
colors.lab.y = 1,
font.lab.x = 1,
font.lab.y = 1,
bg.lab.x = NULL,
bg.lab.y = NULL,
x.adj.lab.y = 1,
plotLegend = TRUE,
keepLegendSpace = plotLegend,
# Separator line specification
verticalSeparator.x = NULL,
verticalSeparator.col = 1,
verticalSeparator.lty = 1,
verticalSeparator.lwd = 1,
verticalSeparator.ext = 0,
verticalSeparator.interval = 0,
horizontalSeparator.y = NULL,
horizontalSeparator.col = 1,
horizontalSeparator.lty = 1,
horizontalSeparator.lwd = 1,
horizontalSeparator.ext = 0,
horizontalSeparator.interval = 0,
# optional restrictions on which rows and columns to actually show
showRows = NULL,
showCols = NULL,
...)
}
\arguments{
\item{Matrix}{ numerical matrix to be plotted in the heatmap. }
\item{xLabels}{ labels for the columns. See Details. }
\item{yLabels}{ labels for the rows. See Details. }
\item{xSymbols}{ additional labels used when \code{xLabels} are interpreted as colors. See Details. }
\item{ySymbols}{ additional labels used when \code{yLabels} are interpreted as colors. See Details. }
\item{colorLabels}{ logical: should \code{xLabels} and \code{yLabels} be interpreted as colors? If
given, overrides \code{xColorLabels} and \code{yColorLabels} below.}
\item{xColorLabels}{ logical: should \code{xLabels} be interpreted as colors? }
\item{yColorLabels}{ logical: should \code{yLabels} be interpreted as colors? }
\item{checkColorsValid}{ logical: should given colors be checked for validity
against the output of \code{colors()} ? If this argument is \code{FALSE}, invalid color specification
will trigger an error.}
\item{invertColors}{ logical: should the color order be inverted? }
\item{setStdMargins}{ logical: should standard margins be set before calling the plot function?
Standard margins depend on \code{colorLabels}: they are wider for text labels and narrower for color
labels. The defaults are static, that is the function does not attempt to guess the optimal margins. }
\item{xLabelsPosition}{ a character string specifying the position of labels for the columns.
Recognized values are (unique abbreviations of) \code{"top", "bottom"}. }
\item{xLabelsAngle}{ angle by which the column labels should be rotated. }
\item{xLabelsAdj}{ justification parameter for column labels. See \code{\link{par}} and the
description of parameter \code{"adj"}. }
\item{yLabelsPosition}{ a character string specifying the position of labels for the columns.
Recognized values are (unique abbreviations of) \code{"left", "right"}. }
\item{xColorWidth}{ width of the color labels for the x axis expressed in user corrdinates.}
\item{yColorWidth}{ width of the color labels for the y axis expressed in user coordinates.}
\item{xColorOffset}{ gap between the y axis and color labels, in user coordinates.}
\item{yColorOffset}{ gap between the x axis and color labels, in user coordinates.}
\item{colors}{ color pallette to be used in the heatmap. Defaults to \code{\link{heat.colors}}. }
\item{naColor}{ color to be used for encoding missing data. }
\item{textMatrix}{ optional text entries for each cell. Either a matrix of the same dimensions as
\code{Matrix} or a vector of the same length as the number of entries in \code{Matrix}. }
\item{cex.text}{ character expansion factor for \code{textMatrix}. }
\item{textAdj}{Adjustment for the entries in the text matrix. See the \code{adj} argument to
\code{\link{text}}.}
\item{cex.lab}{ character expansion factor for text labels labeling the axes. }
\item{cex.lab.x}{ character expansion factor for text labels labeling the x axis. Overrides \code{cex.lab}
above. }
\item{cex.lab.y}{ character expansion factor for text labels labeling the y axis. Overrides \code{cex.lab}
above. }
\item{colors.lab.x}{colors for character labels or symbols along x axis.}
\item{colors.lab.y}{colors for character labels or symbols along y axis.}
\item{font.lab.x}{integer specifying font for labels or symbols along x axis. See \code{\link{text}}.}
\item{font.lab.y}{integer specifying font for labels or symbols along y axis. See \code{\link{text}}.}
\item{bg.lab.x}{background color for the margin along the x axis.}
\item{bg.lab.y}{background color for the margin along the y axs.}
\item{x.adj.lab.y}{Justification of labels for the y axis along the x direction. A value of 0
produces left-justified text, 0.5 (the default) centered
text and 1 right-justified text. }
\item{plotLegend}{ logical: should a color legend be plotted? }
\item{keepLegendSpace}{ logical: if the color legend is not drawn, should the space be left empty
(\code{TRUE}), or should the heatmap fill the space (\code{FALSE})?}
\item{verticalSeparator.x}{indices of columns in input \code{Matrix} after
which separator lines (vertical lines between columns)
should be drawn. \code{NULL} means no lines will be drawn.}
\item{verticalSeparator.col}{color(s) of the vertical separator lines. Recycled if need be. }
\item{verticalSeparator.lty}{line type of the vertical separator lines. Recycled if need be. }
\item{verticalSeparator.lwd}{line width of the vertical separator lines. Recycled if need be. }
\item{verticalSeparator.ext}{number giving the extension of the separator line into the margin as a fraction
of the margin width. 0 means no extension, 1 means extend all the way through the margin. }
\item{verticalSeparator.interval}{number giving the interval for vertical separators. If larger than zero, vertical
separators will be drawn after every \code{verticalSeparator.interval} of displayed columns.
Used only when length of \code{verticalSeparator.x} is zero. }
\item{horizontalSeparator.y}{indices of columns in input \code{Matrix} after which separator lines (horizontal lines
between columns) should be drawn. \code{NULL} means no lines will be drawn.}
\item{horizontalSeparator.col}{ color(s) of the horizontal separator lines. Recycled if need be. }
\item{horizontalSeparator.lty}{line type of the horizontal separator lines. Recycled if need be. }
\item{horizontalSeparator.lwd}{line width of the horizontal separator lines. Recycled if need be. }
\item{horizontalSeparator.ext}{number giving the extension of the separator line into the margin as a
fraction of the margin width. 0 means no extension, 1 means extend all the way through the margin. }
\item{horizontalSeparator.interval}{number giving the interval for horizontal separators. If larger than zero, horizontal
separators will be drawn after every \code{horizontalSeparator.interval} of displayed rows.
Used only when length of \code{horizontalSeparator.y} is zero. }
\item{showRows}{A numeric vector giving the indices of rows that are actually to be shown. Defaults to all rows.}
\item{showCols}{A numeric vector giving the indices of columns that are actually to be shown. Defaults to all columns.}
\item{\dots}{ other arguments to function \code{\link{heatmap}}. }
}
\details{
The function basically plots a standard heatmap plot of the given \code{Matrix} and embellishes it with
row and column labels and/or with text within the heatmap entries. Row and column labels can be either
character strings or color squares, or both.
To get simple text labels, use \code{colorLabels=FALSE} and pass the desired row and column labels in
\code{yLabels} and \code{xLabels}, respectively.
To label rows and columns by color squares, use
\code{colorLabels=TRUE}; \code{yLabels} and \code{xLabels} are then expected to represent valid colors.
For reasons of compatibility with other functions, each entry in \code{yLabels} and \code{xLabels} is
expected to consist of a color designation preceded by 2 characters: an example would be
\code{MEturquoise}. The first two characters can be arbitrary, they are stripped.
Any labels that do not represent valid colors will be considered text labels and printed in full,
allowing the user to mix text and color labels.
It is also possible to label rows and columns by both color squares and additional text annotation. To
achieve this, use the above technique to get color labels and, additionally, pass the desired text
annotation in the \code{xSymbols} and \code{ySymbols} arguments.
}
\value{
None.
}
\author{ Peter Langfelder}
\seealso{ \code{\link{heatmap}}, \code{\link{colors}} }
\examples{
# This example illustrates 4 main ways of annotating columns and rows of a heatmap.
# Copy and paste the whole example into an R session with an interactive plot window;
# alternatively, you may replace the command sizeGrWindow below by opening
# another graphical device such as pdf.
# Generate a matrix to be plotted
nCol = 8; nRow = 7;
mat = matrix(runif(nCol*nRow, min = -1, max = 1), nRow, nCol);
rowColors = standardColors(nRow);
colColors = standardColors(nRow + nCol)[(nRow+1):(nRow + nCol)];
rowColors;
colColors;
sizeGrWindow(9,7)
par(mfrow = c(2,2))
par(mar = c(4, 5, 4, 6));
# Label rows and columns by text:
labeledHeatmap(mat, xLabels = colColors, yLabels = rowColors,
colors = greenWhiteRed(50),
setStdMargins = FALSE,
textMatrix = signif(mat, 2),
main = "Text-labeled heatmap");
# Label rows and columns by colors:
rowLabels = paste("ME", rowColors, sep="");
colLabels = paste("ME", colColors, sep="");
labeledHeatmap(mat, xLabels = colLabels, yLabels = rowLabels,
colorLabels = TRUE,
colors = greenWhiteRed(50),
setStdMargins = FALSE,
textMatrix = signif(mat, 2),
main = "Color-labeled heatmap");
# Mix text and color labels:
rowLabels[3] = "Row 3";
colLabels[1] = "Column 1";
labeledHeatmap(mat, xLabels = colLabels, yLabels = rowLabels,
colorLabels = TRUE,
colors = greenWhiteRed(50),
setStdMargins = FALSE,
textMatrix = signif(mat, 2),
main = "Mix-labeled heatmap");
# Color labels and additional text labels
rowLabels = paste("ME", rowColors, sep="");
colLabels = paste("ME", colColors, sep="");
extraRowLabels = paste("Row", c(1:nRow));
extraColLabels = paste("Column", c(1:nCol));
# Extend margins to fit all labels
par(mar = c(6, 6, 4, 6));
labeledHeatmap(mat, xLabels = colLabels, yLabels = rowLabels,
xSymbols = extraColLabels,
ySymbols = extraRowLabels,
colorLabels = TRUE,
colors = greenWhiteRed(50),
setStdMargins = FALSE,
textMatrix = signif(mat, 2),
main = "Text- + color-labeled heatmap");
}
\keyword{ hplot }% __ONLY ONE__ keyword per line
|
/ZirkoniumOld/AudioUnit/Zirk2.r | no_license | eriser/zirkonium | R | false | false | 3,657 | r | ||
## run_analysis.R - course assignment for Coursera Data Science - Getting and Cleaning Data
## 1. Merges the training and the test sets to create one data set.
###################################################################
## read x, y and subject TRAINING data
x_train <- read.table("./train/X_train.txt")
y_train <- read.table("./train/y_train.txt")
subject_train <- read.table("./train/subject_train.txt")
## read x, y and subject TEST data
x_test <- read.table("./test/X_test.txt")
y_test <- read.table("./test/y_test.txt")
subject_test <- read.table("./test/subject_test.txt")
## create combined x data for test and train data
x_data <- rbind(x_test, x_train)
## create combined y data for test and train data
y_data <- rbind(y_test, y_train)
## create combined subject data
subject_data <- rbind(subject_test, subject_train)
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
#############################################################################################
## load all measures from file
all_measurements <- read.table("features.txt")
##extract only mean and std measurements by greping original file
measurements <- grep("-(mean|std)\\(\\)", all_measurements[, 2])
## create a subset of x_data by measurements only for mean and std
x_data_sub <- x_data[, measurements]
## 3. Uses descriptive activity names to name the activities in the data set and set label names for data
## &
## 4. Appropriately labels the data set with descriptive variable names.
########################################################################
## set labels for x data subset
names(x_data_sub) <- all_measurements[measurements, 2]
## read activities from file
activity <- read.table("activity_labels.txt")
## update y_data with activity labels
y_data[,1] <- activity[y_data[,1],2]
## set variable name for y_data
names(y_data) <- "activity"
## set variable name for subject_data
names(subject_data) <- "subject"
## combine all data into one data set
data <- cbind(subject_data, y_data, x_data_sub)
## 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
####################################################################################################################################################
## compose a new tidy data set from data and calculate means except for activity and subject (col 1:2)
tidy = aggregate(data[, 3:68], by=list(activity = data$activity, subject=data$subject), mean)
## write output of tidy file
write.table(tidy, "tidy_mean.txt", row.name=FALSE)
| /run_analysis.R | no_license | jarmojam/getting_and_cleaning_data | R | false | false | 2,650 | r | ## run_analysis.R - course assignment for Coursera Data Science - Getting and Cleaning Data
## 1. Merges the training and the test sets to create one data set.
###################################################################
## read x, y and subject TRAINING data
x_train <- read.table("./train/X_train.txt")
y_train <- read.table("./train/y_train.txt")
subject_train <- read.table("./train/subject_train.txt")
## read x, y and subject TEST data
x_test <- read.table("./test/X_test.txt")
y_test <- read.table("./test/y_test.txt")
subject_test <- read.table("./test/subject_test.txt")
## create combined x data for test and train data
x_data <- rbind(x_test, x_train)
## create combined y data for test and train data
y_data <- rbind(y_test, y_train)
## create combined subject data
subject_data <- rbind(subject_test, subject_train)
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
#############################################################################################
## load all measures from file
all_measurements <- read.table("features.txt")
##extract only mean and std measurements by greping original file
measurements <- grep("-(mean|std)\\(\\)", all_measurements[, 2])
## create a subset of x_data by measurements only for mean and std
x_data_sub <- x_data[, measurements]
## 3. Uses descriptive activity names to name the activities in the data set and set label names for data
## &
## 4. Appropriately labels the data set with descriptive variable names.
########################################################################
## set labels for x data subset
names(x_data_sub) <- all_measurements[measurements, 2]
## read activities from file
activity <- read.table("activity_labels.txt")
## update y_data with activity labels
y_data[,1] <- activity[y_data[,1],2]
## set variable name for y_data
names(y_data) <- "activity"
## set variable name for subject_data
names(subject_data) <- "subject"
## combine all data into one data set
data <- cbind(subject_data, y_data, x_data_sub)
## 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
####################################################################################################################################################
## compose a new tidy data set from data and calculate means except for activity and subject (col 1:2)
tidy = aggregate(data[, 3:68], by=list(activity = data$activity, subject=data$subject), mean)
## write output of tidy file
write.table(tidy, "tidy_mean.txt", row.name=FALSE)
|
require(rjson)
require(readr)
require(tidyr)
require(dplyr)
require(magrittr)
require(stringr)
library(argparse)
parser = ArgumentParser(description = "return status json")
parser$add_argument(
"-s",
"--submission_file",
type = "character",
required = TRUE,
help = "submission file")
parser$add_argument(
"-g",
"--gold_standard",
type = "character",
required = TRUE,
help = "gold_standard file")
args <- parser$parse_args()
JOIN_COLUMNS = list(
"Compound_SMILES",
"Compound_InchiKeys",
"Compound_Name",
"UniProt_Id",
"Entrez_Gene_Symbol",
"DiscoveRx_Gene_Symbol"
)
PREDICTION_COLUMN = "pKd_[M]_pred"
GOLDSTANDARD_COLUMN = "pKd_[M]"
REQUIRED_COLUMNS = c(JOIN_COLUMNS, PREDICTION_COLUMN)
get_submission_status_json <- function(submission_file, validation_file){
status <- check_submission_file(submission_file, validation_file)
if(status$status == "VALIDATED"){
result_list = list(
'prediction_file_errors' = "",
'prediction_file_status' = status$status)
} else {
result_list = list(
'prediction_file_errors' = stringr::str_c(
status$reasons,
collapse = "\n"),
'prediction_file_status' = status$status)
}
return(rjson::toJSON(result_list))
}
check_submission_file <- function(submission_file, validation_file){
validation_df <- readr::read_csv(validation_file)
status <- list("status" = "VALIDATED", "reasons" = c())
status <- check_submission_file_readable(status, submission_file)
if(status$status == "INVALID") return(status)
submission_df <- readr::read_csv(submission_file)
status <- check_submission_structure(status, validation_df, submission_df)
if(status$status == "INVALID") return(status)
status <- check_submission_values(status, submission_df)
return(status)
}
check_submission_file_readable <- function(status, submission_file){
result <- try(readr::read_csv(submission_file), silent = TRUE)
if (is.data.frame(result)){
return(status)
} else {
status$status = "INVALID"
status$reasons = result[[1]]
return(status)
}
}
check_submission_structure <- function(status, validation_df, submission_df){
if(GOLDSTANDARD_COLUMN %in% colnames(submission_df)) {
status$status = "INVALID"
status$reasons = str_c("Submission file cannot have column: ",
GOLDSTANDARD_COLUMN)
return(status)
}
if(!PREDICTION_COLUMN %in% colnames(submission_df)) {
status$status = "INVALID"
status$reasons = str_c("Submission file missing column: ",
PREDICTION_COLUMN)
return(status)
}
extra_columns <- submission_df %>%
colnames() %>%
setdiff(REQUIRED_COLUMNS) %>%
unlist()
missing_columns <- REQUIRED_COLUMNS %>%
setdiff(colnames(submission_df)) %>%
unlist()
extra_rows <-
left_join(submission_df, validation_df) %>%
mutate(n_row = 1:nrow(.)) %>%
filter(is.na(`pKd_[M]`)) %>%
use_series(n_row)
missing_rows <-
left_join(validation_df, submission_df) %>%
mutate(n_row = 1:nrow(.)) %>%
filter(is.na(`pKd_[M]_pred`)) %>%
use_series(n_row)
invalid_item_list <- list(
extra_columns,
missing_columns,
extra_rows,
missing_rows
)
error_messages <- c(
"Submission file has extra columns: ",
"Submission file has missing columns: ",
"Submission file has extra rows: ",
"Submission file has missing rows: "
)
for(i in 1:length(error_messages)){
status <- update_submission_status_and_reasons(
status,
invalid_item_list[[i]],
error_messages[[i]])
}
return(status)
}
check_submission_values <- function(status, submission_df){
prediction_df <- submission_df %>%
dplyr::mutate(prediction = as.numeric(`pKd_[M]_pred`))
contains_na <- prediction_df %>%
magrittr::use_series(prediction) %>%
is.na() %>%
any
contains_inf <- prediction_df %>%
magrittr::use_series(prediction) %>%
is.infinite() %>%
any
if(contains_na) {
status$status = "INVALID"
status$reasons = "Submission_df missing numeric values"
}
if(contains_inf) {
status$status = "INVALID"
status$reasons = c(status$reasons, "Submission_df contains the value Inf")
}
if(status$status == "INVALID"){
return(status)
}
variance <- prediction_df %>%
magrittr::use_series(prediction) %>%
var()
if(variance == 0){
status$status = "INVALID"
status$reasons = c(status$reasons, "Submission_df predictions have a variance of 0")
}
return(status)
}
get_samples_from_df <- function(df){
df %>%
dplyr::select(-cell_type) %>%
colnames()
}
get_non_unique_items <- function(df){
df %>%
dplyr::group_by(item_col) %>%
dplyr::summarise(count = dplyr::n()) %>%
dplyr::filter(count > 1) %>%
magrittr::use_series(item_col)
}
update_submission_status_and_reasons <- function(
current_status, invalid_items, error_message){
if (length(invalid_items) > 0){
updated_status <- "INVALID"
updated_reasons <- invalid_items %>%
stringr::str_c(collapse = ", ") %>%
stringr::str_c(error_message, .) %>%
c(current_status$reasons, .)
} else {
updated_status <- current_status$status
updated_reasons <- current_status$reasons
}
list("status" = updated_status, "reasons" = updated_reasons)
}
json <- get_submission_status_json(args$submission_file, args$gold_standard)
write(json, "results.json")
| /round1b/validate/bin/validate.R | permissive | allaway/IDG-DREAM-Drug-Kinase-Challenge | R | false | false | 6,010 | r | require(rjson)
require(readr)
require(tidyr)
require(dplyr)
require(magrittr)
require(stringr)
library(argparse)
parser = ArgumentParser(description = "return status json")
parser$add_argument(
"-s",
"--submission_file",
type = "character",
required = TRUE,
help = "submission file")
parser$add_argument(
"-g",
"--gold_standard",
type = "character",
required = TRUE,
help = "gold_standard file")
args <- parser$parse_args()
JOIN_COLUMNS = list(
"Compound_SMILES",
"Compound_InchiKeys",
"Compound_Name",
"UniProt_Id",
"Entrez_Gene_Symbol",
"DiscoveRx_Gene_Symbol"
)
PREDICTION_COLUMN = "pKd_[M]_pred"
GOLDSTANDARD_COLUMN = "pKd_[M]"
REQUIRED_COLUMNS = c(JOIN_COLUMNS, PREDICTION_COLUMN)
get_submission_status_json <- function(submission_file, validation_file){
status <- check_submission_file(submission_file, validation_file)
if(status$status == "VALIDATED"){
result_list = list(
'prediction_file_errors' = "",
'prediction_file_status' = status$status)
} else {
result_list = list(
'prediction_file_errors' = stringr::str_c(
status$reasons,
collapse = "\n"),
'prediction_file_status' = status$status)
}
return(rjson::toJSON(result_list))
}
check_submission_file <- function(submission_file, validation_file){
validation_df <- readr::read_csv(validation_file)
status <- list("status" = "VALIDATED", "reasons" = c())
status <- check_submission_file_readable(status, submission_file)
if(status$status == "INVALID") return(status)
submission_df <- readr::read_csv(submission_file)
status <- check_submission_structure(status, validation_df, submission_df)
if(status$status == "INVALID") return(status)
status <- check_submission_values(status, submission_df)
return(status)
}
check_submission_file_readable <- function(status, submission_file){
result <- try(readr::read_csv(submission_file), silent = TRUE)
if (is.data.frame(result)){
return(status)
} else {
status$status = "INVALID"
status$reasons = result[[1]]
return(status)
}
}
check_submission_structure <- function(status, validation_df, submission_df){
if(GOLDSTANDARD_COLUMN %in% colnames(submission_df)) {
status$status = "INVALID"
status$reasons = str_c("Submission file cannot have column: ",
GOLDSTANDARD_COLUMN)
return(status)
}
if(!PREDICTION_COLUMN %in% colnames(submission_df)) {
status$status = "INVALID"
status$reasons = str_c("Submission file missing column: ",
PREDICTION_COLUMN)
return(status)
}
extra_columns <- submission_df %>%
colnames() %>%
setdiff(REQUIRED_COLUMNS) %>%
unlist()
missing_columns <- REQUIRED_COLUMNS %>%
setdiff(colnames(submission_df)) %>%
unlist()
extra_rows <-
left_join(submission_df, validation_df) %>%
mutate(n_row = 1:nrow(.)) %>%
filter(is.na(`pKd_[M]`)) %>%
use_series(n_row)
missing_rows <-
left_join(validation_df, submission_df) %>%
mutate(n_row = 1:nrow(.)) %>%
filter(is.na(`pKd_[M]_pred`)) %>%
use_series(n_row)
invalid_item_list <- list(
extra_columns,
missing_columns,
extra_rows,
missing_rows
)
error_messages <- c(
"Submission file has extra columns: ",
"Submission file has missing columns: ",
"Submission file has extra rows: ",
"Submission file has missing rows: "
)
for(i in 1:length(error_messages)){
status <- update_submission_status_and_reasons(
status,
invalid_item_list[[i]],
error_messages[[i]])
}
return(status)
}
check_submission_values <- function(status, submission_df){
prediction_df <- submission_df %>%
dplyr::mutate(prediction = as.numeric(`pKd_[M]_pred`))
contains_na <- prediction_df %>%
magrittr::use_series(prediction) %>%
is.na() %>%
any
contains_inf <- prediction_df %>%
magrittr::use_series(prediction) %>%
is.infinite() %>%
any
if(contains_na) {
status$status = "INVALID"
status$reasons = "Submission_df missing numeric values"
}
if(contains_inf) {
status$status = "INVALID"
status$reasons = c(status$reasons, "Submission_df contains the value Inf")
}
if(status$status == "INVALID"){
return(status)
}
variance <- prediction_df %>%
magrittr::use_series(prediction) %>%
var()
if(variance == 0){
status$status = "INVALID"
status$reasons = c(status$reasons, "Submission_df predictions have a variance of 0")
}
return(status)
}
get_samples_from_df <- function(df){
df %>%
dplyr::select(-cell_type) %>%
colnames()
}
get_non_unique_items <- function(df){
df %>%
dplyr::group_by(item_col) %>%
dplyr::summarise(count = dplyr::n()) %>%
dplyr::filter(count > 1) %>%
magrittr::use_series(item_col)
}
update_submission_status_and_reasons <- function(
current_status, invalid_items, error_message){
if (length(invalid_items) > 0){
updated_status <- "INVALID"
updated_reasons <- invalid_items %>%
stringr::str_c(collapse = ", ") %>%
stringr::str_c(error_message, .) %>%
c(current_status$reasons, .)
} else {
updated_status <- current_status$status
updated_reasons <- current_status$reasons
}
list("status" = updated_status, "reasons" = updated_reasons)
}
json <- get_submission_status_json(args$submission_file, args$gold_standard)
write(json, "results.json")
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.35,family="gaussian",standardize=TRUE)
sink('./urinary_tract_045.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Correlation/urinary_tract/urinary_tract_045.R | no_license | esbgkannan/QSMART | R | false | false | 364 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.35,family="gaussian",standardize=TRUE)
sink('./urinary_tract_045.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
library(languageR)
### Name: makeSplineData.fnc
### Title: generate simulated data set with nonlinear function
### Aliases: makeSplineData.fnc
### Keywords: regression
### ** Examples
## Not run:
##D require("rms")
##D require("optimx")
##D require("lmerTest")
##D dfr = makeSplineData.fnc()
##D table(dfr$Subject)
##D xylowess.fnc(Y ~ X | Subject, data = dfr)
##D
##D dfr.lmer = lmer(Y ~ rcs(X, 5) + (1|Subject), data = dfr,
##D control=lmerControl(optimizer="optimx",optCtrl=list(method="nlminb")))
##D dfr$fittedLMER = as.vector(dfr.lmer@X %*% fixef(dfr.lmer))
##D
##D dfr.dd = datadist(dfr)
##D options(datadist='dfr.dd')
##D dfr.ols = ols(Y~Subject+rcs(X), data=dfr, x=T, y=T)
##D dfr$fittedOLS = fitted(dfr.ols)
##D
##D # we plot the lmer() fit in blue, the ols() fit in red (both adjusted for
##D # subject S1), and plot the underlying model in green
##D plot(dfr[dfr$Subject=="S1",]$X, dfr[dfr$Subject=="S1",]$fittedLMER +
##D ranef(dfr.lmer)[[1]]["S1",], type="l", col="blue",
##D ylim = range(dfr$y + ranef(dfr.lmer)[[1]]["S1",],
##D dfr[dfr$Subject == "S1",]$fittedLMER,
##D dfr[dfr$Subject == "S1",]$fittedOLS), xlab="X", ylab="Y")
##D lines(dfr[dfr$Subject=="S1",]$X, dfr[dfr$Subject=="S1",]$fittedOLS, col="red")
##D lines(dfr[dfr$Subject=="S1",]$X, dfr[dfr$Subject=="S1",]$y+ranef(dfr.lmer)[[1]]["S1",],
##D col="green")
##D legend(2,29,c("30+cos(x)", "lmer (S1)", "ols (S1)"), lty=rep(1,3),
##D col=c("green", "blue", "red"))
## End(Not run)
| /data/genthat_extracted_code/languageR/examples/makeSplineData.fnc.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,486 | r | library(languageR)
### Name: makeSplineData.fnc
### Title: generate simulated data set with nonlinear function
### Aliases: makeSplineData.fnc
### Keywords: regression
### ** Examples
## Not run:
##D require("rms")
##D require("optimx")
##D require("lmerTest")
##D dfr = makeSplineData.fnc()
##D table(dfr$Subject)
##D xylowess.fnc(Y ~ X | Subject, data = dfr)
##D
##D dfr.lmer = lmer(Y ~ rcs(X, 5) + (1|Subject), data = dfr,
##D control=lmerControl(optimizer="optimx",optCtrl=list(method="nlminb")))
##D dfr$fittedLMER = as.vector(dfr.lmer@X %*% fixef(dfr.lmer))
##D
##D dfr.dd = datadist(dfr)
##D options(datadist='dfr.dd')
##D dfr.ols = ols(Y~Subject+rcs(X), data=dfr, x=T, y=T)
##D dfr$fittedOLS = fitted(dfr.ols)
##D
##D # we plot the lmer() fit in blue, the ols() fit in red (both adjusted for
##D # subject S1), and plot the underlying model in green
##D plot(dfr[dfr$Subject=="S1",]$X, dfr[dfr$Subject=="S1",]$fittedLMER +
##D ranef(dfr.lmer)[[1]]["S1",], type="l", col="blue",
##D ylim = range(dfr$y + ranef(dfr.lmer)[[1]]["S1",],
##D dfr[dfr$Subject == "S1",]$fittedLMER,
##D dfr[dfr$Subject == "S1",]$fittedOLS), xlab="X", ylab="Y")
##D lines(dfr[dfr$Subject=="S1",]$X, dfr[dfr$Subject=="S1",]$fittedOLS, col="red")
##D lines(dfr[dfr$Subject=="S1",]$X, dfr[dfr$Subject=="S1",]$y+ranef(dfr.lmer)[[1]]["S1",],
##D col="green")
##D legend(2,29,c("30+cos(x)", "lmer (S1)", "ols (S1)"), lty=rep(1,3),
##D col=c("green", "blue", "red"))
## End(Not run)
|
shinyUI(
fluidPage(
includeHTML("documentation.html"),
sidebarPanel(
h4(strong("Filter by Node")),
uiOutput("nameUI"),
uiOutput("typeUI")
),
mainPanel(
fluidRow(DT::dataTableOutput("table"))
)
)
) | /ui.R | no_license | dmd123/DDP_Project2 | R | false | false | 388 | r | shinyUI(
fluidPage(
includeHTML("documentation.html"),
sidebarPanel(
h4(strong("Filter by Node")),
uiOutput("nameUI"),
uiOutput("typeUI")
),
mainPanel(
fluidRow(DT::dataTableOutput("table"))
)
)
) |
################################################################################
######################################### Configurações globais pacotes e pastas
################################################################################
#Limpa o ambiente
rm(list = ls())
#Configurações Gerais dos Dígitos
options(digits = 5)
#pacotes para MLG
library(ggplot2)
library(xtable)
library(gridExtra)
library(devtools)
library(psych)
library(maptools)
library(osmar)
library(plotly)
library(GISTools)
library(rgdal)
# Pasta de referência (A pasta aonde seus arquivos estão e serão salvos. Sempre separe por "\\")
setwd(getwd())
################################################################################
####################################################### Extração de dados do OSM
################################################################################
############################### Definição dos parâmetros das cidades de trabalho
# Escolha da escala do mapa
size<-1000
#escolha a caixa de exportação do local a se trabalhar
## Paris
paris_lon<-2.338202
paris_lat<-48.873912
## Rome
rome_lon<-12.500969
rome_lat<-41.911136
## Rio
rio_lon<- -43.203816
rio_lat<- -22.983794
## New York
ny_lon<--73.990020
ny_lat<-40.743726
# Função de delimitação da área dos mapas
a_map<-function(x, y) {center_bbox(x, y, size, size)}
# Aplicação da função
paris<-a_map(paris_lon,paris_lat)
rome<-a_map(rome_lon,rome_lat)
rio<-a_map(rio_lon,rio_lat)
ny<-a_map(ny_lon,ny_lat)
############################################################# acessando os dados
#definindo o caminho da api
src <- osmsource_api()
#baixando os mapas do OSM
paris <- get_osm(paris, source = src)
rome <- get_osm(rome, source = src)
rio <- get_osm(rio, source = src)
ny <- get_osm(ny, source = src)
################################################# extração de poligonos e linhas
#função de estração dos prédios
bg_func<-function (x){
bg_ids <- find(x, way(tags(k == "building")))
bg_ids <- find_down(x, way(bg_ids))
bg <- subset(x, ids = bg_ids)
bg_poly <- as_sp(bg, "polygons")
}
#função de estração das ruas
hw_func<-function (x){
hw_ids <- find(x, way(tags(k == "highway")))
hw_ids <- find_down(x, way(hw_ids))
hw <- subset(x, ids = hw_ids)
hw_line <- as_sp(hw, "lines")
}
# Extraindo os poligonos das cidades
ny_poly<-bg_func(ny)
rio_poly<-bg_func(rio)
rome_poly<-bg_func(rome)
paris_poly<-bg_func(paris)
# Estraindo as linhas das cidades
ny_line<-hw_func(ny)
rio_line<-hw_func(rio)
rome_line<-hw_func(rome)
paris_line<-hw_func(paris)
############################################################## plotando os mapas
par(mfrow = c(2, 2))
#New York
plot(ny_poly, col = "gray", main='New York')
plot(ny_line, add = TRUE, col = "light gray")
#Rio
plot(rio_poly, col = "gray", main='Rio de Janeiro')
plot(rio_line, add = TRUE, col = "light gray")
#Roma
plot(rome_poly, col = "gray", main='Rome')
plot(rome_line, add = TRUE, col = "light gray")
#Paris
plot(paris_poly, col = "gray", main='Paris')
plot(paris_line, add = TRUE, col = "light gray")
par(mfrow=c(1,1))
################################################################ salvando em SHP
# New York
writeOGR(obj=ny_poly, dsn="ny_builgings", layer="ny_builgings", driver="ESRI Shapefile")
writeOGR(obj=ny_line, dsn="ny_streets", layer="ny_streets", driver="ESRI Shapefile")
# Rio de Janeiro
writeOGR(obj=rio_poly, dsn="rio_builgings", layer="rio_builgings", driver="ESRI Shapefile")
writeOGR(obj=rio_line, dsn="rio_streets", layer="rio_streets", driver="ESRI Shapefile")
# Paris
writeOGR(obj=paris_poly, dsn="paris_builgings", layer="paris_builgings", driver="ESRI Shapefile")
writeOGR(obj=paris_line, dsn="paris_streets", layer="paris_streets", driver="ESRI Shapefile")
# Rome
writeOGR(obj=rome_poly, dsn="rome_builgings", layer="rome_builgings", driver="ESRI Shapefile")
writeOGR(obj=rome_line, dsn="rome_streets", layer="rome_streets", driver="ESRI Shapefile")
################################################################################
############################################ Aplicação do método de box-counting
################################################################################
#
#
#
#
# Por enquanto esta etapa está sendo desenvolvida no ArcMAP. Em breve
# prosseguirei com o desenvolvimoento em R.
#
#
#
#
################################################################################
############################################# Leitura de banco de dados fractais
################################################################################
Fractal<- read.csv("T-fractal.csv", head = T, sep = ";")
colnames(Fractal)<-c('COUNT','NAME','CELL')
Fractal<-transform(Fractal, r = (1/CELL))
################################################################################
############################################################# Cálculo do Fractal
################################################################################
#Usando Cell
FUN<-function(x){
glm(COUNT ~ CELL, family = poisson (link = "log"), data = x)
}
teste<-by(Fractal, Fractal$NAME, FUN)
head(teste)
coef<-lapply(teste, coefficients)
coef<-as.data.frame(coef)
coef<-as.data.frame(t(coef))
write.csv(coef, "teste.csv")
DF<-read.csv("teste.csv", head = T, sep = ",")
colnames(DF) <- c("X", "Intercepto", "Estimador_CELL")
DF<- transform(DF, DF=(log(Intercepto)/log(5)))
DF<-read.csv("DF.csv", head = T, sep = ",")
### Fim do cálculo da dimensão fractal
################################################################################
################################################ Análises Gráficas Exploratórias
################################################################################
ggplot(Fractal, aes(CELL,log(COUNT)))+
geom_point(aes(color = factor(NAME)))+
geom_line(aes(color = factor(NAME),fill = factor(NAME))) | /Leitura_Dados.R | permissive | mairapinheiro/fractais | R | false | false | 6,090 | r | ################################################################################
######################################### Configurações globais pacotes e pastas
################################################################################
#Limpa o ambiente
rm(list = ls())
#Configurações Gerais dos Dígitos
options(digits = 5)
#pacotes para MLG
library(ggplot2)
library(xtable)
library(gridExtra)
library(devtools)
library(psych)
library(maptools)
library(osmar)
library(plotly)
library(GISTools)
library(rgdal)
# Pasta de referência (A pasta aonde seus arquivos estão e serão salvos. Sempre separe por "\\")
setwd(getwd())
################################################################################
####################################################### Extração de dados do OSM
################################################################################
############################### Definição dos parâmetros das cidades de trabalho
# Escolha da escala do mapa
size<-1000
#escolha a caixa de exportação do local a se trabalhar
## Paris
paris_lon<-2.338202
paris_lat<-48.873912
## Rome
rome_lon<-12.500969
rome_lat<-41.911136
## Rio
rio_lon<- -43.203816
rio_lat<- -22.983794
## New York
ny_lon<--73.990020
ny_lat<-40.743726
# Função de delimitação da área dos mapas
a_map<-function(x, y) {center_bbox(x, y, size, size)}
# Aplicação da função
paris<-a_map(paris_lon,paris_lat)
rome<-a_map(rome_lon,rome_lat)
rio<-a_map(rio_lon,rio_lat)
ny<-a_map(ny_lon,ny_lat)
############################################################# acessando os dados
#definindo o caminho da api
src <- osmsource_api()
#baixando os mapas do OSM
paris <- get_osm(paris, source = src)
rome <- get_osm(rome, source = src)
rio <- get_osm(rio, source = src)
ny <- get_osm(ny, source = src)
################################################# extração de poligonos e linhas
#função de estração dos prédios
bg_func<-function (x){
bg_ids <- find(x, way(tags(k == "building")))
bg_ids <- find_down(x, way(bg_ids))
bg <- subset(x, ids = bg_ids)
bg_poly <- as_sp(bg, "polygons")
}
#função de estração das ruas
hw_func<-function (x){
hw_ids <- find(x, way(tags(k == "highway")))
hw_ids <- find_down(x, way(hw_ids))
hw <- subset(x, ids = hw_ids)
hw_line <- as_sp(hw, "lines")
}
# Extraindo os poligonos das cidades
ny_poly<-bg_func(ny)
rio_poly<-bg_func(rio)
rome_poly<-bg_func(rome)
paris_poly<-bg_func(paris)
# Estraindo as linhas das cidades
ny_line<-hw_func(ny)
rio_line<-hw_func(rio)
rome_line<-hw_func(rome)
paris_line<-hw_func(paris)
############################################################## plotando os mapas
par(mfrow = c(2, 2))
#New York
plot(ny_poly, col = "gray", main='New York')
plot(ny_line, add = TRUE, col = "light gray")
#Rio
plot(rio_poly, col = "gray", main='Rio de Janeiro')
plot(rio_line, add = TRUE, col = "light gray")
#Roma
plot(rome_poly, col = "gray", main='Rome')
plot(rome_line, add = TRUE, col = "light gray")
#Paris
plot(paris_poly, col = "gray", main='Paris')
plot(paris_line, add = TRUE, col = "light gray")
par(mfrow=c(1,1))
################################################################ salvando em SHP
# New York
writeOGR(obj=ny_poly, dsn="ny_builgings", layer="ny_builgings", driver="ESRI Shapefile")
writeOGR(obj=ny_line, dsn="ny_streets", layer="ny_streets", driver="ESRI Shapefile")
# Rio de Janeiro
writeOGR(obj=rio_poly, dsn="rio_builgings", layer="rio_builgings", driver="ESRI Shapefile")
writeOGR(obj=rio_line, dsn="rio_streets", layer="rio_streets", driver="ESRI Shapefile")
# Paris
writeOGR(obj=paris_poly, dsn="paris_builgings", layer="paris_builgings", driver="ESRI Shapefile")
writeOGR(obj=paris_line, dsn="paris_streets", layer="paris_streets", driver="ESRI Shapefile")
# Rome
writeOGR(obj=rome_poly, dsn="rome_builgings", layer="rome_builgings", driver="ESRI Shapefile")
writeOGR(obj=rome_line, dsn="rome_streets", layer="rome_streets", driver="ESRI Shapefile")
################################################################################
############################################ Aplicação do método de box-counting
################################################################################
#
#
#
#
# Por enquanto esta etapa está sendo desenvolvida no ArcMAP. Em breve
# prosseguirei com o desenvolvimoento em R.
#
#
#
#
################################################################################
############################################# Leitura de banco de dados fractais
################################################################################
Fractal<- read.csv("T-fractal.csv", head = T, sep = ";")
colnames(Fractal)<-c('COUNT','NAME','CELL')
Fractal<-transform(Fractal, r = (1/CELL))
################################################################################
############################################################# Cálculo do Fractal
################################################################################
#Usando Cell
FUN<-function(x){
glm(COUNT ~ CELL, family = poisson (link = "log"), data = x)
}
teste<-by(Fractal, Fractal$NAME, FUN)
head(teste)
coef<-lapply(teste, coefficients)
coef<-as.data.frame(coef)
coef<-as.data.frame(t(coef))
write.csv(coef, "teste.csv")
DF<-read.csv("teste.csv", head = T, sep = ",")
colnames(DF) <- c("X", "Intercepto", "Estimador_CELL")
DF<- transform(DF, DF=(log(Intercepto)/log(5)))
DF<-read.csv("DF.csv", head = T, sep = ",")
### Fim do cálculo da dimensão fractal
################################################################################
################################################ Análises Gráficas Exploratórias
################################################################################
ggplot(Fractal, aes(CELL,log(COUNT)))+
geom_point(aes(color = factor(NAME)))+
geom_line(aes(color = factor(NAME),fill = factor(NAME))) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metaX.R
\docType{methods}
\name{sampleListFile<-}
\alias{sampleListFile<-}
\title{sampleListFile}
\usage{
sampleListFile(para) <- value
}
\arguments{
\item{para}{An object of metaXpara}
\item{value}{value}
}
\value{
An object of metaXpara
}
\description{
sampleListFile
}
\examples{
para <- new("metaXpara")
sampleListFile(para) <- "sample.txt"
}
\author{
Bo Wen \email{wenbo@genomics.cn}
}
| /man/sampleListFile.Rd | no_license | jaspershen/metaX | R | false | true | 499 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metaX.R
\docType{methods}
\name{sampleListFile<-}
\alias{sampleListFile<-}
\title{sampleListFile}
\usage{
sampleListFile(para) <- value
}
\arguments{
\item{para}{An object of metaXpara}
\item{value}{value}
}
\value{
An object of metaXpara
}
\description{
sampleListFile
}
\examples{
para <- new("metaXpara")
sampleListFile(para) <- "sample.txt"
}
\author{
Bo Wen \email{wenbo@genomics.cn}
}
|
# Plots found in the article are commented with "ARTICLE"
############
# Preamble #
############
library(plyr)
library(dplyr)
library(tidyr)
library(ggplot2)
#library(rgl)
twins_blue <- "#0C2341"
twins_red <- "#BA0C2E"
twins_gold <- "#CFAB7A"
colors_vec <- c("FF" = twins_blue, "SL" = twins_red, "CH" = twins_gold)
#setwd("C:/Users/jack.werner1/Documents/BB")
setwd("/Users/jackwerner/Documents/My Stuff/Baseball/Scraping Files")
# Read data
pitch <- read.csv(file = "pitch_data_2016.csv") #%>% filter(pitcher == 429722)
####################
# Reference tables #
####################
# At-bat results
simpleResults <- data.frame(event = as.character(sort(unique(pitch$event))),
simple_event = c("Out", "Out", "Out", "Out", "HBP",
"Hit", "Out", "Hit", "Out", "Out",
"Out", "Out", "Out", "Out", "Out",
"HBP", "Hit", "BB", "Out", "Out",
"Out", "Out", "Out", "Out", "Out",
"Hit", "K", "K", "Hit", "Out", "BB"),
stringsAsFactors = F)
# Pitch classifications
simplePitches <- data.frame(pitch_type = sort(as.character(unique(pitch$pitch_type))),
simple_pitch_type = c("UN", "UN", "CH", "CU", "CH", "FC", "FF",
"PO", "SI", "FT", "UN", "CU", "KN", "PO",
"UN", "SI", "SL", "UN"),
fastball = c("UN", "UN", "O", "O", "O", "F", "F", "O", "F",
"F", "UN", "O", "O", "O", "UN", "F", "O", "UN")
)
# Pitch results
simplePitchResults <- data.frame(pitch_result = sort(as.character(unique(pitch$pitch_result))),
simple_pitch_result = c("Ball", "Ball", "Ball", "Strike", "Foul",
"Foul", "Foul", "Foul", "HBP", "InPlay",
"InPlay", "InPlay", "Ball", "Strike", "Ball",
"Strike", "Strike", "Strike"),
stringsAsFactors = F
)
# Player names/IDs
pitcher_names <- read.csv("playerid_list.csv") %>%
mutate(name = paste0(FIRSTNAME, " ", LASTNAME), id = MLBCODE) %>%
select(name, id)
######################
# Manipulate dataset #
######################
# Add Simple Event, Simple Pitch Type, Fastball, Player Names
ervin.pre <- pitch %>% filter(pitcher == 429722) %>%
left_join(simpleResults, by = "event") %>%
left_join(simplePitches, by = "pitch_type") %>%
left_join(pitcher_names, by = c("batter" = "id")) %>%
rename(batter_name = name) %>%
left_join(pitcher_names, by = c("pitcher" = "id")) %>%
rename(pitcher_name = name)
# A
ervin <- ervin.pre %>%
mutate(hand_match = b_hand == p_throws) %>% # Handedness match
group_by(gid, ab_num) %>%
mutate(finalCount = paste0(b, "-", s), # Count on last pitch
last = row_number() == n(),
next_balls = pmin(cumsum(type == "B"), 3), next_strikes = pmin(cumsum(type == "S"), 2),
next_count = ifelse(last, simple_event, paste0(next_balls, "-", next_strikes)),
count = lag(as.character(next_count), default = "0-0"),
balls = lag(as.character(next_balls), default = "0"),
strikes = lag(as.character(next_strikes), default = "0")) %>%
ungroup()
#########################
# Check out pitch types #
#########################
table(ervin$simple_pitch_type)
# Get rid of unknowns
ervin <- ervin %>% filter(simple_pitch_type != "UN") %>%
mutate(simple_pitch_type = as.character(simple_pitch_type))
# Break
ggplot(data = ervin, aes(pfx_x, pfx_z, color = simple_pitch_type)) + geom_point()
# Velocity
ggplot(data = ervin, aes(start_speed)) + facet_grid(simple_pitch_type~.) + geom_histogram()
ggplot(data = ervin, aes(start_speed, fill = simple_pitch_type, color = simple_pitch_type)) +
geom_density(alpha = .5, size = 1)
ervin <- ervin %>%
mutate(simple_pitch_type = ifelse(simple_pitch_type == "FT", "FF", simple_pitch_type))
# Try getting pitch types through clustering
ervin.mat <- ervin %>% select(pfx_x, pfx_z, start_speed) %>% as.matrix() %>% scale()
ervin$cluster <- kmeans(ervin.mat, centers = 3)$cluster
(clust.tab <- table(ervin$cluster, ervin$simple_pitch_type))
conv.df <- data.frame(cluster = as.numeric(as.character(rownames(clust.tab))),
cluster_type = colnames(clust.tab)[apply(clust.tab, 1, which.max)])
ervin <- ervin %>% left_join(conv.df, by = "cluster")
ervin <- ervin %>% mutate(mismatch = simple_pitch_type == cluster_type)
# Look at groups by break
ggplot(data = ervin, aes(pfx_x, pfx_z, color = simple_pitch_type, size = mismatch)) + geom_point() +
scale_size_manual(values = c(2, 1)) +
ggtitle("Colored by Pitchf/x")
ggplot(data = ervin, aes(pfx_x, pfx_z, color = cluster_type, size = mismatch)) + geom_point() +
scale_size_manual(values = c(2, 1)) +
ggtitle("Colored by Cluster")
ggplot(data = ervin, aes(pfx_x, pfx_z, color = mismatch)) + geom_point() +
scale_color_manual(values = c("red", "grey70"))
# Look at groups by velocity
ggplot(data = ervin, aes(start_speed, pfx_z, color = simple_pitch_type, size = mismatch)) + geom_point() +
scale_size_manual(values = c(2, 1)) +
ggtitle("Colored by Pitchf/x")
ggplot(data = ervin, aes(start_speed, pfx_z, color = cluster_type, size = mismatch)) + geom_point() +
scale_size_manual(values = c(2, 1)) +
ggtitle("Colored by Cluster")
ggplot(data = ervin, aes(start_speed, pfx_z, color = mismatch)) + geom_point() +
scale_color_manual(values = c("red", "grey70"))
# 3d Plot
colors <- ifelse(ervin$simple_pitch_type == "FF", "red",
ifelse(ervin$simple_pitch_type == "SL", "green", "blue"))
plot3d(ervin$px, ervin$pz, ervin$start_speed, col = colors,
xlab = "x", ylab = "z", zlab = "Velocity")
###############################
# Pitches by count/handedness #
###############################
tables <- ervin %>% group_by(count, balls, strikes, b_hand) %>%
summarize(FF = sum(simple_pitch_type == "FF"),
SL = sum(simple_pitch_type == "SL"),
CH = sum(simple_pitch_type == "CH"),
FF_p = FF/n(), SL_p = SL/n(), CH_p = CH/n(), total = n()) %>%
ungroup()
tables %>% filter(b_hand == "R") %>% as.data.frame()
tables %>% filter(b_hand == "L") %>% as.data.frame()
ervin$simple_pitch_type <- factor(ervin$simple_pitch_type, levels = c("SL", "CH", "FF"))
ggplot(data = ervin, aes(b_hand, fill = simple_pitch_type)) + facet_grid(strikes~balls) +
geom_bar(position = "fill") + scale_fill_manual(values = colors_vec)
ggplot(data = filter(ervin, b_hand == "R"), aes(b_hand, fill = simple_pitch_type)) + facet_grid(strikes~balls) +
geom_bar(position = "fill") + scale_fill_manual(values = colors_vec)
########################
# Pitch Location Plots #
########################
strike.zone <- data.frame(x = c(17/24, 17/24, -17/24, -17/24, 17/24), y = c(1.5812, 3.4499, 3.4499, 1.5812, 1.5812))
# Strike zone
ggplot(data = filter(ervin, pitch_result %in% c("Ball", "Ball In Dirt", "Called Strike")),
aes(px, pz, color = type)) +
geom_point() +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
# By pitch type
ggplot(data = ervin, aes(px, pz)) + geom_point(color = "red", alpha = .4) +
facet_grid(b_hand~simple_pitch_type) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
ervin <- ervin %>% mutate(k = simple_event == "K" & last,
pitch_ab_res = ifelse(last, simple_event, "Cont."))
# By count, type
ggplot(data = ervin, aes(px, pz, color = simple_pitch_type)) +
facet_grid(balls~strikes) +
geom_point(alpha = .4) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
##### Individual Counts ######
# 0-2 count by type, hand ARTICLE
ggplot(data = filter(ervin, count == "0-2"), aes(px, pz, color = simple_pitch_type)) +
facet_wrap(~b_hand) +
geom_point(size = 3) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed(xlim = c(min(ervin$px), max(ervin$px)), ylim = c(min(ervin$pz), max(ervin$pz))) +
labs(x = "Horizontal Position", y = "Vertical Position",
title = "0-2 Pitches", color = "Pitch") +
scale_color_manual(values = c("FF" = "#e41a1c", "SL" = "#377eb8"),
labels = c("Slider", "Fastball")) +
theme(legend.position = "bottom",
legend.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=15),
legend.text = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=12),
plot.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=30, hjust=0),
axis.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=20))
# 0-2 count by type, result, hand
ggplot(data = filter(ervin, count == "0-2"), aes(px, pz, color = pitch_ab_res)) +
facet_grid(b_hand~simple_pitch_type) +
geom_point(size = 2) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed() +
scale_color_manual(values = c("grey40", "red", "blue", "purple"))
(tab.02 <- table(ervin$simple_pitch_type[ervin$count == "0-2"], ervin$b_hand[ervin$count == "0-2"]))
prop.table(tab.02, 2)
# 1-2 count by type, hand ARTICLE
ggplot(data = filter(ervin, count == "1-2"), aes(px, pz, color = simple_pitch_type)) +
facet_wrap(~b_hand) +
geom_point(size = 3) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed(xlim = c(min(ervin$px), max(ervin$px)), ylim = c(min(ervin$pz), max(ervin$pz))) +
scale_color_manual(values = c("FF" = "#e41a1c", "SL" = "#377eb8", "CH" = "#4daf4a"),
labels = c("Slider", "Changeup", "Fastball")) +
labs(x = "Horizontal Position", y = "Vertical Position",
title = "1-2 Pitches", color = "Pitch") +
theme(legend.position = "bottom",
legend.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=15),
legend.text = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=12),
plot.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=30, hjust=0),
axis.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=20))
ggplot(data = filter(ervin, count == "1-2"), aes(px, pz, fill = simple_pitch_type)) +
facet_wrap(~b_hand) +
geom_point(size = 3, color = "black", shape = 21) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed() +
scale_fill_manual(values = colors_vec)
# 1-2 count by type, result, hand
ggplot(data = filter(ervin, count == "1-2"), aes(px, pz, color = pitch_ab_res)) +
facet_grid(b_hand~simple_pitch_type) +
geom_point(size = 2) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed() +
scale_color_manual(values = c("grey40", "orange", "red", "blue", "purple"))
(tab.12 <- table(ervin$simple_pitch_type[ervin$count == "1-2"], ervin$b_hand[ervin$count == "1-2"]))
prop.table(tab.12, 2)
# 2 strikes by count, hand, type ARTICLE
ggplot(data = filter(ervin, strikes == 2), aes(px, pz, color = simple_pitch_type)) +
facet_grid(b_hand~balls) +
geom_point(size = 1) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
# Location by type, result
ggplot(data = ervin, aes(px, pz)) +
facet_grid(simple_pitch_type~pitch_ab_res) +
geom_point(size = 2) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
# How did Ervin get strikeouts?
table(ervin$simple_pitch_type[ervin$strikes == 2])/sum(ervin$strikes == 2)
table(ervin$simple_pitch_type[ervin$last & ervin$simple_event == "K"])/sum(ervin$last & ervin$simple_event == "K")
table(ervin$simple_pitch_type[ervin$last & ervin$simple_event == "K"])
table(ervin$count[ervin$last & ervin$simple_event == "K"],
ervin$simple_pitch_type[ervin$last & ervin$simple_event == "K"])
table(ervin$count[ervin$last & ervin$simple_event == "K"],
ervin$simple_pitch_type[ervin$last & ervin$simple_event == "K"],
ervin$b_hand[ervin$last & ervin$simple_event == "K"])
####################
# Pitch sequencing #
####################
ervin.seq <- ervin %>% group_by(gid, ab_num) %>%
mutate(prev_count = lag(count, 1, default = "None"),
prev_pitch = lag(as.character(simple_pitch_type), 1, default = "None"),
back_2 = lag(as.character(simple_pitch_type), 2, default = "None"),
next_pitch = lead(as.character(simple_pitch_type), 1, default = "None"),
pitch_num = 1:n()) %>%
ungroup()
ervin.seq %>% select(pitch_result, prev_pitch, simple_pitch_type, next_pitch) %>% View()
table(ervin.seq$prev_pitch, ervin.seq$simple_pitch_type) %>% prop.table(1)
##### Individual Counts #####
# 0-2 count
ggplot(data = filter(ervin.seq, count == "0-2"), aes(px, pz, color = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) +
geom_point(size = 2) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
table(ervin.seq$prev_pitch[ervin$count == "0-2"],
ervin.seq$simple_pitch_type[ervin$count == "0-2"]) %>%
prop.table(1)
table(ervin.seq$prev_pitch[ervin$count == "0-2"],
ervin.seq$simple_pitch_type[ervin$count == "0-2"],
ervin.seq$b_hand[ervin$count == "0-2"]) %>%
prop.table(c(1, 3))
ggplot(data = filter(ervin.seq, count == "0-2"), aes(x = b_hand, fill = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) + geom_bar(position = "fill")
# 1-2 count
ggplot(data = filter(ervin.seq, count == "1-2"), aes(px, pz, color = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) +
geom_point(size = 2) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
table(ervin.seq$prev_pitch[ervin$count == "1-2"],
ervin.seq$simple_pitch_type[ervin$count == "1-2"]) %>%
prop.table(1)
table(ervin.seq$prev_pitch[ervin$count == "1-2"],
ervin.seq$simple_pitch_type[ervin$count == "1-2"],
ervin.seq$b_hand[ervin$count == "1-2"]) %>%
prop.table(c(1, 3))
ggplot(data = filter(ervin.seq, count == "1-2"), aes(x = b_hand, fill = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) + geom_bar(position = "fill")
# ASIDE: Fouled off pitches
ggplot(data = filter(ervin.seq, count == prev_count, b_hand == "L"), aes(x = b_hand, fill = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) + geom_bar(position = "fill")
table((ervin.seq$prev_count == "1-2")[ervin.seq$count == "1-2"],
(ervin.seq$simple_pitch_type == "CH")[ervin.seq$count == "1-2"]) %>%
prop.table(c(1))
table((ervin.seq$count == ervin.seq$prev_count)[ervin.seq$strikes == 2 & ervin.seq$balls < 2],
(ervin.seq$simple_pitch_type == "CH")[ervin.seq$strikes == 2 & ervin.seq$balls < 2]) %>%
prop.table(c(1))
ggplot(data = filter(ervin.seq, count == prev_count, b_hand == "R", prev_pitch != "CH"), aes(x = b_hand, fill = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) + geom_bar(position = "fill")
table(ervin.seq$prev_pitch[ervin.seq$count == ervin.seq$prev_count & ervin.seq$b_hand == "R"],
ervin.seq$simple_pitch_type[ervin.seq$count == ervin.seq$prev_count & ervin.seq$b_hand == "R"])
# 2-2 count
ggplot(data = filter(ervin.seq, count == "2-2"), aes(px, pz, color = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) +
geom_point(size = 2) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
table(ervin.seq$prev_pitch[ervin.seq$count == "2-2"],
ervin.seq$simple_pitch_type[ervin.seq$count == "2-2"]) %>%
prop.table(1)
table(ervin.seq$prev_pitch[ervin$count == "2-2"],
ervin.seq$simple_pitch_type[ervin$count == "2-2"],
ervin.seq$b_hand[ervin$count == "2-2"]) %>%
prop.table(c(1, 3))
ggplot(data = filter(ervin.seq, count == "2-2"), aes(x = b_hand, fill = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) + geom_bar(position = "fill")
# 3-2 count
ggplot(data = filter(ervin.seq, count == "3-2"), aes(px, pz, color = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) +
geom_point(size = 2) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
table(ervin.seq$prev_pitch[ervin$count == "3-2"],
ervin.seq$simple_pitch_type[ervin$count == "3-2"]) %>%
prop.table(1)
table(ervin.seq$prev_pitch[ervin$count == "3-2"],
ervin.seq$simple_pitch_type[ervin$count == "3-2"],
ervin.seq$b_hand[ervin$count == "3-2"]) %>%
prop.table(c(1, 3))
ggplot(data = filter(ervin.seq, count == "3-2"), aes(x = b_hand, fill = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) + geom_bar(position = "fill")
# Second pitch
ggplot(data = filter(ervin.seq, count %in% c("1-0", "0-1")), aes(x = b_hand, fill = simple_pitch_type)) +
facet_grid(count~prev_pitch) + geom_bar(position = "fill")
#############
# By inning #
#############
table(ervin$simple_pitch_type, ervin$inning, ervin$b_hand) %>% prop.table(c(2, 3))
inning.df <- ervin %>% group_by(inning) %>%
summarize(Fastball = sum(simple_pitch_type == "FF")/n(),
Slider = sum(simple_pitch_type == "SL")/n(),
Changeup = sum(simple_pitch_type == "CH")/n(),
Total = n()) %>%
ungroup() %>%
gather(key = Pitch, value = Frequency, Fastball, Slider, Changeup)
ggplot(data = filter(inning.df, inning <= 7), aes(x = inning, y = Frequency, color = Pitch)) +
geom_line(size = 2) + geom_point(size = 4) +
coord_cartesian(ylim = c(0, .75)) +
labs(x = "Inning", y = "Frequency",
title = "Pitch Type by Inning", color = "Pitch") +
scale_color_manual(values = c("Fastball" = "#e41a1c", "Slider" = "#377eb8", "Changeup" = "#4daf4a")) +
scale_x_continuous(breaks = 1:9) +
theme(#legend.position = "bottom",
legend.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=15),
legend.text = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=12),
plot.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=30, hjust=0),
axis.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=20),
axis.text.y = element_text(family = "Trebuchet MS", color = "#666666", size = 15),
axis.text.x = element_text(family = "Trebuchet MS", color = "#666666", size = 15))
inning.df <- ervin %>% group_by(gid) %>%
mutate(into_seventh = any(inning >= 6)) %>% filter(into_seventh) %>%
ungroup() %>% group_by(inning) %>%
summarize(Fastball = sum(simple_pitch_type == "FF")/n(),
Slider = sum(simple_pitch_type == "SL")/n(),
Changeup = sum(simple_pitch_type == "CH")/n(),
Total = n()) %>%
ungroup() %>%
gather(key = Pitch, value = Frequency, Fastball, Slider, Changeup)
ggplot(data = filter(inning.df, inning <= 7), aes(x = inning, y = Frequency, color = Pitch)) +
geom_line() + geom_point()
##################
# By baserunners #
##################
ervin_b <- ervin %>% mutate(baserunner = !is.na(runner_1) | !is.na(runner_2) | !is.na(runner_3),
on_third = !is.na(runner_3),
on_second = !is.na(runner_2))
table(ervin_b$baserunner, ervin_b$simple_pitch_type) %>% prop.table(1)
table(ervin_b$on_third, ervin_b$simple_pitch_type) %>% prop.table(1)
table(ervin_b$on_second, ervin_b$simple_pitch_type) %>% prop.table(1)
#################
# Pitch strings #
#################
# Actual
abid <- paste0(ervin$gid, ervin$ab_num)
pitches.1 <- ervin$simple_pitch_type %>% as.character() %>% substr(1, 1)
seqs <- tapply(pitches.1, abid, paste0, collapse = "", simplify = T) %>%
as.vector()
ps <- c("F", "S", "C")
all.duos <- paste0(rep(ps, each = 3), ps)
all.trios <- paste0(rep(all.duos, each = 3), ps)
duos.to.match <- paste0("(?=", all.duos, ")")
duos.freq <- rep(0, length(all.duos))
for (i in 1:length(all.duos)) {
duos.freq[i] <- gregexpr(duos.to.match[i], seqs, perl = T) %>% sapply(function(x){sum(x>0)}) %>% sum()
}
trios.to.match <- paste0("(?=", all.trios, ")")
trios.freq <- rep(0, length(all.trios))
for (i in 1:length(all.trios)) {
trios.freq[i] <- gregexpr(trios.to.match[i], seqs, perl = T) %>% sapply(function(x){sum(x>0)}) %>% sum()
}
seqs.df <- data.frame(pattern = c(all.duos, all.trios), freq = c(duos.freq, trios.freq))
# Random
rand.freqs <- matrix(0, nrow = length(all.duos) + length(all.trios), ncol = 100)
for (j in 1:100) {
pitches.1.r <- ervin$simple_pitch_type %>% as.character() %>% substr(1, 1) %>% sample()
seqs.r <- tapply(pitches.1.r, abid, paste0, collapse = "", simplify = T) %>%
as.vector()
duos.freq.r <- rep(0, length(all.duos))
for (i in 1:length(all.duos)) {
duos.freq.r[i] <- gregexpr(duos.to.match[i], seqs.r, perl = T) %>% sapply(function(x){sum(x>0)}) %>% sum()
}
trios.freq.r <- rep(0, length(all.trios))
for (i in 1:length(all.trios)) {
trios.freq.r[i] <- gregexpr(trios.to.match[i], seqs.r, perl = T) %>% sapply(function(x){sum(x>0)}) %>% sum()
}
rand.freqs[,j] <- c(duos.freq.r, trios.freq.r)
print(j)
}
seqs.df <- data.frame(pattern = c(all.duos, all.trios),
freq = c(duos.freq, trios.freq),
exp = apply(rand.freqs, 1, mean)) %>%
mutate(p_diff = (freq - exp)/exp)
duos.df <- seqs.df[1:9,]
trios.df <- seqs.df[10:nrow(seqs.df),]
ggplot(data = duos.df, aes(x = exp, y = freq, label = pattern)) +
geom_text() +
geom_abline(slope = 1, intercept = 0, color = "red")
ggplot(data = trios.df, aes(x = exp, y = freq, label = pattern)) +
geom_text() +
geom_abline(slope = 1, intercept = 0, color = "red")
ggplot(data = filter(duos.df, exp > 100), aes(x = reorder(pattern, -p_diff), y = p_diff)) +
geom_bar(stat = "identity", fill = twins_blue, color = twins_gold, size = 1) + theme_minimal()
ggplot(data = filter(trios.df, exp > 100), aes(x = reorder(pattern, -p_diff), y = p_diff)) + geom_bar(stat = "identity")
| /Ervin_Santana.R | no_license | jackoliverwerner/Twins-Rotation-Preview | R | false | false | 23,306 | r | # Plots found in the article are commented with "ARTICLE"
############
# Preamble #
############
library(plyr)
library(dplyr)
library(tidyr)
library(ggplot2)
#library(rgl)
twins_blue <- "#0C2341"
twins_red <- "#BA0C2E"
twins_gold <- "#CFAB7A"
colors_vec <- c("FF" = twins_blue, "SL" = twins_red, "CH" = twins_gold)
#setwd("C:/Users/jack.werner1/Documents/BB")
setwd("/Users/jackwerner/Documents/My Stuff/Baseball/Scraping Files")
# Read data
pitch <- read.csv(file = "pitch_data_2016.csv") #%>% filter(pitcher == 429722)
####################
# Reference tables #
####################
# At-bat results
simpleResults <- data.frame(event = as.character(sort(unique(pitch$event))),
simple_event = c("Out", "Out", "Out", "Out", "HBP",
"Hit", "Out", "Hit", "Out", "Out",
"Out", "Out", "Out", "Out", "Out",
"HBP", "Hit", "BB", "Out", "Out",
"Out", "Out", "Out", "Out", "Out",
"Hit", "K", "K", "Hit", "Out", "BB"),
stringsAsFactors = F)
# Pitch classifications
simplePitches <- data.frame(pitch_type = sort(as.character(unique(pitch$pitch_type))),
simple_pitch_type = c("UN", "UN", "CH", "CU", "CH", "FC", "FF",
"PO", "SI", "FT", "UN", "CU", "KN", "PO",
"UN", "SI", "SL", "UN"),
fastball = c("UN", "UN", "O", "O", "O", "F", "F", "O", "F",
"F", "UN", "O", "O", "O", "UN", "F", "O", "UN")
)
# Pitch results
simplePitchResults <- data.frame(pitch_result = sort(as.character(unique(pitch$pitch_result))),
simple_pitch_result = c("Ball", "Ball", "Ball", "Strike", "Foul",
"Foul", "Foul", "Foul", "HBP", "InPlay",
"InPlay", "InPlay", "Ball", "Strike", "Ball",
"Strike", "Strike", "Strike"),
stringsAsFactors = F
)
# Player names/IDs
pitcher_names <- read.csv("playerid_list.csv") %>%
mutate(name = paste0(FIRSTNAME, " ", LASTNAME), id = MLBCODE) %>%
select(name, id)
######################
# Manipulate dataset #
######################
# Add Simple Event, Simple Pitch Type, Fastball, Player Names
ervin.pre <- pitch %>% filter(pitcher == 429722) %>%
left_join(simpleResults, by = "event") %>%
left_join(simplePitches, by = "pitch_type") %>%
left_join(pitcher_names, by = c("batter" = "id")) %>%
rename(batter_name = name) %>%
left_join(pitcher_names, by = c("pitcher" = "id")) %>%
rename(pitcher_name = name)
# A
ervin <- ervin.pre %>%
mutate(hand_match = b_hand == p_throws) %>% # Handedness match
group_by(gid, ab_num) %>%
mutate(finalCount = paste0(b, "-", s), # Count on last pitch
last = row_number() == n(),
next_balls = pmin(cumsum(type == "B"), 3), next_strikes = pmin(cumsum(type == "S"), 2),
next_count = ifelse(last, simple_event, paste0(next_balls, "-", next_strikes)),
count = lag(as.character(next_count), default = "0-0"),
balls = lag(as.character(next_balls), default = "0"),
strikes = lag(as.character(next_strikes), default = "0")) %>%
ungroup()
#########################
# Check out pitch types #
#########################
table(ervin$simple_pitch_type)
# Get rid of unknowns
ervin <- ervin %>% filter(simple_pitch_type != "UN") %>%
mutate(simple_pitch_type = as.character(simple_pitch_type))
# Break
ggplot(data = ervin, aes(pfx_x, pfx_z, color = simple_pitch_type)) + geom_point()
# Velocity
ggplot(data = ervin, aes(start_speed)) + facet_grid(simple_pitch_type~.) + geom_histogram()
ggplot(data = ervin, aes(start_speed, fill = simple_pitch_type, color = simple_pitch_type)) +
geom_density(alpha = .5, size = 1)
ervin <- ervin %>%
mutate(simple_pitch_type = ifelse(simple_pitch_type == "FT", "FF", simple_pitch_type))
# Try getting pitch types through clustering
ervin.mat <- ervin %>% select(pfx_x, pfx_z, start_speed) %>% as.matrix() %>% scale()
ervin$cluster <- kmeans(ervin.mat, centers = 3)$cluster
(clust.tab <- table(ervin$cluster, ervin$simple_pitch_type))
conv.df <- data.frame(cluster = as.numeric(as.character(rownames(clust.tab))),
cluster_type = colnames(clust.tab)[apply(clust.tab, 1, which.max)])
ervin <- ervin %>% left_join(conv.df, by = "cluster")
ervin <- ervin %>% mutate(mismatch = simple_pitch_type == cluster_type)
# Look at groups by break
ggplot(data = ervin, aes(pfx_x, pfx_z, color = simple_pitch_type, size = mismatch)) + geom_point() +
scale_size_manual(values = c(2, 1)) +
ggtitle("Colored by Pitchf/x")
ggplot(data = ervin, aes(pfx_x, pfx_z, color = cluster_type, size = mismatch)) + geom_point() +
scale_size_manual(values = c(2, 1)) +
ggtitle("Colored by Cluster")
ggplot(data = ervin, aes(pfx_x, pfx_z, color = mismatch)) + geom_point() +
scale_color_manual(values = c("red", "grey70"))
# Look at groups by velocity
ggplot(data = ervin, aes(start_speed, pfx_z, color = simple_pitch_type, size = mismatch)) + geom_point() +
scale_size_manual(values = c(2, 1)) +
ggtitle("Colored by Pitchf/x")
ggplot(data = ervin, aes(start_speed, pfx_z, color = cluster_type, size = mismatch)) + geom_point() +
scale_size_manual(values = c(2, 1)) +
ggtitle("Colored by Cluster")
ggplot(data = ervin, aes(start_speed, pfx_z, color = mismatch)) + geom_point() +
scale_color_manual(values = c("red", "grey70"))
# 3d Plot
colors <- ifelse(ervin$simple_pitch_type == "FF", "red",
ifelse(ervin$simple_pitch_type == "SL", "green", "blue"))
plot3d(ervin$px, ervin$pz, ervin$start_speed, col = colors,
xlab = "x", ylab = "z", zlab = "Velocity")
###############################
# Pitches by count/handedness #
###############################
tables <- ervin %>% group_by(count, balls, strikes, b_hand) %>%
summarize(FF = sum(simple_pitch_type == "FF"),
SL = sum(simple_pitch_type == "SL"),
CH = sum(simple_pitch_type == "CH"),
FF_p = FF/n(), SL_p = SL/n(), CH_p = CH/n(), total = n()) %>%
ungroup()
tables %>% filter(b_hand == "R") %>% as.data.frame()
tables %>% filter(b_hand == "L") %>% as.data.frame()
ervin$simple_pitch_type <- factor(ervin$simple_pitch_type, levels = c("SL", "CH", "FF"))
ggplot(data = ervin, aes(b_hand, fill = simple_pitch_type)) + facet_grid(strikes~balls) +
geom_bar(position = "fill") + scale_fill_manual(values = colors_vec)
ggplot(data = filter(ervin, b_hand == "R"), aes(b_hand, fill = simple_pitch_type)) + facet_grid(strikes~balls) +
geom_bar(position = "fill") + scale_fill_manual(values = colors_vec)
########################
# Pitch Location Plots #
########################
strike.zone <- data.frame(x = c(17/24, 17/24, -17/24, -17/24, 17/24), y = c(1.5812, 3.4499, 3.4499, 1.5812, 1.5812))
# Strike zone
ggplot(data = filter(ervin, pitch_result %in% c("Ball", "Ball In Dirt", "Called Strike")),
aes(px, pz, color = type)) +
geom_point() +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
# By pitch type
ggplot(data = ervin, aes(px, pz)) + geom_point(color = "red", alpha = .4) +
facet_grid(b_hand~simple_pitch_type) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
ervin <- ervin %>% mutate(k = simple_event == "K" & last,
pitch_ab_res = ifelse(last, simple_event, "Cont."))
# By count, type
ggplot(data = ervin, aes(px, pz, color = simple_pitch_type)) +
facet_grid(balls~strikes) +
geom_point(alpha = .4) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
##### Individual Counts ######
# 0-2 count by type, hand ARTICLE
ggplot(data = filter(ervin, count == "0-2"), aes(px, pz, color = simple_pitch_type)) +
facet_wrap(~b_hand) +
geom_point(size = 3) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed(xlim = c(min(ervin$px), max(ervin$px)), ylim = c(min(ervin$pz), max(ervin$pz))) +
labs(x = "Horizontal Position", y = "Vertical Position",
title = "0-2 Pitches", color = "Pitch") +
scale_color_manual(values = c("FF" = "#e41a1c", "SL" = "#377eb8"),
labels = c("Slider", "Fastball")) +
theme(legend.position = "bottom",
legend.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=15),
legend.text = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=12),
plot.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=30, hjust=0),
axis.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=20))
# 0-2 count by type, result, hand
ggplot(data = filter(ervin, count == "0-2"), aes(px, pz, color = pitch_ab_res)) +
facet_grid(b_hand~simple_pitch_type) +
geom_point(size = 2) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed() +
scale_color_manual(values = c("grey40", "red", "blue", "purple"))
(tab.02 <- table(ervin$simple_pitch_type[ervin$count == "0-2"], ervin$b_hand[ervin$count == "0-2"]))
prop.table(tab.02, 2)
# 1-2 count by type, hand ARTICLE
ggplot(data = filter(ervin, count == "1-2"), aes(px, pz, color = simple_pitch_type)) +
facet_wrap(~b_hand) +
geom_point(size = 3) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed(xlim = c(min(ervin$px), max(ervin$px)), ylim = c(min(ervin$pz), max(ervin$pz))) +
scale_color_manual(values = c("FF" = "#e41a1c", "SL" = "#377eb8", "CH" = "#4daf4a"),
labels = c("Slider", "Changeup", "Fastball")) +
labs(x = "Horizontal Position", y = "Vertical Position",
title = "1-2 Pitches", color = "Pitch") +
theme(legend.position = "bottom",
legend.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=15),
legend.text = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=12),
plot.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=30, hjust=0),
axis.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=20))
ggplot(data = filter(ervin, count == "1-2"), aes(px, pz, fill = simple_pitch_type)) +
facet_wrap(~b_hand) +
geom_point(size = 3, color = "black", shape = 21) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed() +
scale_fill_manual(values = colors_vec)
# 1-2 count by type, result, hand
ggplot(data = filter(ervin, count == "1-2"), aes(px, pz, color = pitch_ab_res)) +
facet_grid(b_hand~simple_pitch_type) +
geom_point(size = 2) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed() +
scale_color_manual(values = c("grey40", "orange", "red", "blue", "purple"))
(tab.12 <- table(ervin$simple_pitch_type[ervin$count == "1-2"], ervin$b_hand[ervin$count == "1-2"]))
prop.table(tab.12, 2)
# 2 strikes by count, hand, type ARTICLE
ggplot(data = filter(ervin, strikes == 2), aes(px, pz, color = simple_pitch_type)) +
facet_grid(b_hand~balls) +
geom_point(size = 1) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
# Location by type, result
ggplot(data = ervin, aes(px, pz)) +
facet_grid(simple_pitch_type~pitch_ab_res) +
geom_point(size = 2) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
# How did Ervin get strikeouts?
table(ervin$simple_pitch_type[ervin$strikes == 2])/sum(ervin$strikes == 2)
table(ervin$simple_pitch_type[ervin$last & ervin$simple_event == "K"])/sum(ervin$last & ervin$simple_event == "K")
table(ervin$simple_pitch_type[ervin$last & ervin$simple_event == "K"])
table(ervin$count[ervin$last & ervin$simple_event == "K"],
ervin$simple_pitch_type[ervin$last & ervin$simple_event == "K"])
table(ervin$count[ervin$last & ervin$simple_event == "K"],
ervin$simple_pitch_type[ervin$last & ervin$simple_event == "K"],
ervin$b_hand[ervin$last & ervin$simple_event == "K"])
####################
# Pitch sequencing #
####################
ervin.seq <- ervin %>% group_by(gid, ab_num) %>%
mutate(prev_count = lag(count, 1, default = "None"),
prev_pitch = lag(as.character(simple_pitch_type), 1, default = "None"),
back_2 = lag(as.character(simple_pitch_type), 2, default = "None"),
next_pitch = lead(as.character(simple_pitch_type), 1, default = "None"),
pitch_num = 1:n()) %>%
ungroup()
ervin.seq %>% select(pitch_result, prev_pitch, simple_pitch_type, next_pitch) %>% View()
table(ervin.seq$prev_pitch, ervin.seq$simple_pitch_type) %>% prop.table(1)
##### Individual Counts #####
# 0-2 count
ggplot(data = filter(ervin.seq, count == "0-2"), aes(px, pz, color = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) +
geom_point(size = 2) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
table(ervin.seq$prev_pitch[ervin$count == "0-2"],
ervin.seq$simple_pitch_type[ervin$count == "0-2"]) %>%
prop.table(1)
table(ervin.seq$prev_pitch[ervin$count == "0-2"],
ervin.seq$simple_pitch_type[ervin$count == "0-2"],
ervin.seq$b_hand[ervin$count == "0-2"]) %>%
prop.table(c(1, 3))
ggplot(data = filter(ervin.seq, count == "0-2"), aes(x = b_hand, fill = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) + geom_bar(position = "fill")
# 1-2 count
ggplot(data = filter(ervin.seq, count == "1-2"), aes(px, pz, color = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) +
geom_point(size = 2) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
table(ervin.seq$prev_pitch[ervin$count == "1-2"],
ervin.seq$simple_pitch_type[ervin$count == "1-2"]) %>%
prop.table(1)
table(ervin.seq$prev_pitch[ervin$count == "1-2"],
ervin.seq$simple_pitch_type[ervin$count == "1-2"],
ervin.seq$b_hand[ervin$count == "1-2"]) %>%
prop.table(c(1, 3))
ggplot(data = filter(ervin.seq, count == "1-2"), aes(x = b_hand, fill = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) + geom_bar(position = "fill")
# ASIDE: Fouled off pitches
ggplot(data = filter(ervin.seq, count == prev_count, b_hand == "L"), aes(x = b_hand, fill = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) + geom_bar(position = "fill")
table((ervin.seq$prev_count == "1-2")[ervin.seq$count == "1-2"],
(ervin.seq$simple_pitch_type == "CH")[ervin.seq$count == "1-2"]) %>%
prop.table(c(1))
table((ervin.seq$count == ervin.seq$prev_count)[ervin.seq$strikes == 2 & ervin.seq$balls < 2],
(ervin.seq$simple_pitch_type == "CH")[ervin.seq$strikes == 2 & ervin.seq$balls < 2]) %>%
prop.table(c(1))
ggplot(data = filter(ervin.seq, count == prev_count, b_hand == "R", prev_pitch != "CH"), aes(x = b_hand, fill = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) + geom_bar(position = "fill")
table(ervin.seq$prev_pitch[ervin.seq$count == ervin.seq$prev_count & ervin.seq$b_hand == "R"],
ervin.seq$simple_pitch_type[ervin.seq$count == ervin.seq$prev_count & ervin.seq$b_hand == "R"])
# 2-2 count
ggplot(data = filter(ervin.seq, count == "2-2"), aes(px, pz, color = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) +
geom_point(size = 2) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
table(ervin.seq$prev_pitch[ervin.seq$count == "2-2"],
ervin.seq$simple_pitch_type[ervin.seq$count == "2-2"]) %>%
prop.table(1)
table(ervin.seq$prev_pitch[ervin$count == "2-2"],
ervin.seq$simple_pitch_type[ervin$count == "2-2"],
ervin.seq$b_hand[ervin$count == "2-2"]) %>%
prop.table(c(1, 3))
ggplot(data = filter(ervin.seq, count == "2-2"), aes(x = b_hand, fill = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) + geom_bar(position = "fill")
# 3-2 count
ggplot(data = filter(ervin.seq, count == "3-2"), aes(px, pz, color = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) +
geom_point(size = 2) +
geom_polygon(data = strike.zone, aes(x = x, y = y, color = NA), fill = NA, color = "black") +
coord_fixed()
table(ervin.seq$prev_pitch[ervin$count == "3-2"],
ervin.seq$simple_pitch_type[ervin$count == "3-2"]) %>%
prop.table(1)
table(ervin.seq$prev_pitch[ervin$count == "3-2"],
ervin.seq$simple_pitch_type[ervin$count == "3-2"],
ervin.seq$b_hand[ervin$count == "3-2"]) %>%
prop.table(c(1, 3))
ggplot(data = filter(ervin.seq, count == "3-2"), aes(x = b_hand, fill = simple_pitch_type)) +
facet_grid(prev_count~prev_pitch) + geom_bar(position = "fill")
# Second pitch
ggplot(data = filter(ervin.seq, count %in% c("1-0", "0-1")), aes(x = b_hand, fill = simple_pitch_type)) +
facet_grid(count~prev_pitch) + geom_bar(position = "fill")
#############
# By inning #
#############
table(ervin$simple_pitch_type, ervin$inning, ervin$b_hand) %>% prop.table(c(2, 3))
inning.df <- ervin %>% group_by(inning) %>%
summarize(Fastball = sum(simple_pitch_type == "FF")/n(),
Slider = sum(simple_pitch_type == "SL")/n(),
Changeup = sum(simple_pitch_type == "CH")/n(),
Total = n()) %>%
ungroup() %>%
gather(key = Pitch, value = Frequency, Fastball, Slider, Changeup)
ggplot(data = filter(inning.df, inning <= 7), aes(x = inning, y = Frequency, color = Pitch)) +
geom_line(size = 2) + geom_point(size = 4) +
coord_cartesian(ylim = c(0, .75)) +
labs(x = "Inning", y = "Frequency",
title = "Pitch Type by Inning", color = "Pitch") +
scale_color_manual(values = c("Fastball" = "#e41a1c", "Slider" = "#377eb8", "Changeup" = "#4daf4a")) +
scale_x_continuous(breaks = 1:9) +
theme(#legend.position = "bottom",
legend.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=15),
legend.text = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=12),
plot.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=30, hjust=0),
axis.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", size=20),
axis.text.y = element_text(family = "Trebuchet MS", color = "#666666", size = 15),
axis.text.x = element_text(family = "Trebuchet MS", color = "#666666", size = 15))
inning.df <- ervin %>% group_by(gid) %>%
mutate(into_seventh = any(inning >= 6)) %>% filter(into_seventh) %>%
ungroup() %>% group_by(inning) %>%
summarize(Fastball = sum(simple_pitch_type == "FF")/n(),
Slider = sum(simple_pitch_type == "SL")/n(),
Changeup = sum(simple_pitch_type == "CH")/n(),
Total = n()) %>%
ungroup() %>%
gather(key = Pitch, value = Frequency, Fastball, Slider, Changeup)
ggplot(data = filter(inning.df, inning <= 7), aes(x = inning, y = Frequency, color = Pitch)) +
geom_line() + geom_point()
##################
# By baserunners #
##################
ervin_b <- ervin %>% mutate(baserunner = !is.na(runner_1) | !is.na(runner_2) | !is.na(runner_3),
on_third = !is.na(runner_3),
on_second = !is.na(runner_2))
table(ervin_b$baserunner, ervin_b$simple_pitch_type) %>% prop.table(1)
table(ervin_b$on_third, ervin_b$simple_pitch_type) %>% prop.table(1)
table(ervin_b$on_second, ervin_b$simple_pitch_type) %>% prop.table(1)
#################
# Pitch strings #
#################
# Actual
abid <- paste0(ervin$gid, ervin$ab_num)
pitches.1 <- ervin$simple_pitch_type %>% as.character() %>% substr(1, 1)
seqs <- tapply(pitches.1, abid, paste0, collapse = "", simplify = T) %>%
as.vector()
ps <- c("F", "S", "C")
all.duos <- paste0(rep(ps, each = 3), ps)
all.trios <- paste0(rep(all.duos, each = 3), ps)
duos.to.match <- paste0("(?=", all.duos, ")")
duos.freq <- rep(0, length(all.duos))
for (i in 1:length(all.duos)) {
duos.freq[i] <- gregexpr(duos.to.match[i], seqs, perl = T) %>% sapply(function(x){sum(x>0)}) %>% sum()
}
trios.to.match <- paste0("(?=", all.trios, ")")
trios.freq <- rep(0, length(all.trios))
for (i in 1:length(all.trios)) {
trios.freq[i] <- gregexpr(trios.to.match[i], seqs, perl = T) %>% sapply(function(x){sum(x>0)}) %>% sum()
}
seqs.df <- data.frame(pattern = c(all.duos, all.trios), freq = c(duos.freq, trios.freq))
# Random
rand.freqs <- matrix(0, nrow = length(all.duos) + length(all.trios), ncol = 100)
for (j in 1:100) {
pitches.1.r <- ervin$simple_pitch_type %>% as.character() %>% substr(1, 1) %>% sample()
seqs.r <- tapply(pitches.1.r, abid, paste0, collapse = "", simplify = T) %>%
as.vector()
duos.freq.r <- rep(0, length(all.duos))
for (i in 1:length(all.duos)) {
duos.freq.r[i] <- gregexpr(duos.to.match[i], seqs.r, perl = T) %>% sapply(function(x){sum(x>0)}) %>% sum()
}
trios.freq.r <- rep(0, length(all.trios))
for (i in 1:length(all.trios)) {
trios.freq.r[i] <- gregexpr(trios.to.match[i], seqs.r, perl = T) %>% sapply(function(x){sum(x>0)}) %>% sum()
}
rand.freqs[,j] <- c(duos.freq.r, trios.freq.r)
print(j)
}
seqs.df <- data.frame(pattern = c(all.duos, all.trios),
freq = c(duos.freq, trios.freq),
exp = apply(rand.freqs, 1, mean)) %>%
mutate(p_diff = (freq - exp)/exp)
duos.df <- seqs.df[1:9,]
trios.df <- seqs.df[10:nrow(seqs.df),]
ggplot(data = duos.df, aes(x = exp, y = freq, label = pattern)) +
geom_text() +
geom_abline(slope = 1, intercept = 0, color = "red")
ggplot(data = trios.df, aes(x = exp, y = freq, label = pattern)) +
geom_text() +
geom_abline(slope = 1, intercept = 0, color = "red")
ggplot(data = filter(duos.df, exp > 100), aes(x = reorder(pattern, -p_diff), y = p_diff)) +
geom_bar(stat = "identity", fill = twins_blue, color = twins_gold, size = 1) + theme_minimal()
ggplot(data = filter(trios.df, exp > 100), aes(x = reorder(pattern, -p_diff), y = p_diff)) + geom_bar(stat = "identity")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slim_lang.R
\name{outputMS}
\alias{outputMS}
\alias{Genome$outputMS}
\alias{.G$outputMS}
\title{SLiM method outputMS}
\usage{
outputMS(filePath, append, filterMonomorphic)
}
\arguments{
\item{filePath}{An object of type null or string. Must be of length 1 (a
singleton). The default value is \code{NULL}. See details for description.}
\item{append}{An object of type logical. Must be of length 1 (a singleton). The
default value is \code{F}. See details for description.}
\item{filterMonomorphic}{An object of type logical. Must be of length 1 (a
singleton). The default value is \code{F}. See details for description.}
}
\value{
An object of type void.
}
\description{
Documentation for SLiM function \code{outputMS}, which is a method of the SLiM
class \code{Genome}.
Note that the R function is a stub, it does not do anything in R (except bring
up this documentation). It will only do
anything useful when used inside a \code{\link{slim_block}} function further
nested in a \code{\link{slim_script}}
function call, where it will be translated into valid SLiM code as part of a
full SLiM script.
}
\details{
Output the target genomes in MS format (see section 25.3.2 for output
format details). This low-level output method may be used to output any sample
of Genome objects (the Eidos function sample() may be useful for constructing
custom samples, as may the SLiM class Individual). For output of a sample
from a single Subpopulation, the outputMSSample() of Subpopulation may be
more straightforward to use. If the optional parameter filePath is NULL (the
default), output is directed to SLiM’s standard output. Otherwise, the output
is sent to the file specified by filePath, overwriting that file if append if F,
or appending to the end of it if append is T. Positions in the output will span
the interval [0,1]. If filterMonomorphic is F (the default), all mutations that
are present in the sample will be included in the output. This means that some
mutations may be included that are actually monomorphic within the sample (i.e.,
that exist in every sampled genome, and are thus apparently fixed). These may be
filtered out with filterMonomorphic = T if desired; note that this option means
that some mutations that do exist in the sampled genomes might not be included
in the output, simply because they exist in every sampled genome. See output()
and outputVCF() for other output formats. Output is generally done in a late()
event, so that the output reflects the state of the simulation at the end of a
generation.
}
\section{Copyright}{
This is documentation for a function in the SLiM software, and has been
reproduced from the official manual,
which can be found here: \url{http://benhaller.com/slim/SLiM_Manual.pdf}. This
documentation is
Copyright © 2016–2020 Philipp Messer. All rights reserved. More information
about SLiM can be found
on the official website: \url{https://messerlab.org/slim/}
}
\author{
Benjamin C Haller (\email{bhaller@benhaller.com}) and Philipp W Messer
(\email{messer@cornell.edu})
}
| /man/outputMS.Rd | permissive | rdinnager/slimrlang | R | false | true | 3,118 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slim_lang.R
\name{outputMS}
\alias{outputMS}
\alias{Genome$outputMS}
\alias{.G$outputMS}
\title{SLiM method outputMS}
\usage{
outputMS(filePath, append, filterMonomorphic)
}
\arguments{
\item{filePath}{An object of type null or string. Must be of length 1 (a
singleton). The default value is \code{NULL}. See details for description.}
\item{append}{An object of type logical. Must be of length 1 (a singleton). The
default value is \code{F}. See details for description.}
\item{filterMonomorphic}{An object of type logical. Must be of length 1 (a
singleton). The default value is \code{F}. See details for description.}
}
\value{
An object of type void.
}
\description{
Documentation for SLiM function \code{outputMS}, which is a method of the SLiM
class \code{Genome}.
Note that the R function is a stub, it does not do anything in R (except bring
up this documentation). It will only do
anything useful when used inside a \code{\link{slim_block}} function further
nested in a \code{\link{slim_script}}
function call, where it will be translated into valid SLiM code as part of a
full SLiM script.
}
\details{
Output the target genomes in MS format (see section 25.3.2 for output
format details). This low-level output method may be used to output any sample
of Genome objects (the Eidos function sample() may be useful for constructing
custom samples, as may the SLiM class Individual). For output of a sample
from a single Subpopulation, the outputMSSample() of Subpopulation may be
more straightforward to use. If the optional parameter filePath is NULL (the
default), output is directed to SLiM’s standard output. Otherwise, the output
is sent to the file specified by filePath, overwriting that file if append if F,
or appending to the end of it if append is T. Positions in the output will span
the interval [0,1]. If filterMonomorphic is F (the default), all mutations that
are present in the sample will be included in the output. This means that some
mutations may be included that are actually monomorphic within the sample (i.e.,
that exist in every sampled genome, and are thus apparently fixed). These may be
filtered out with filterMonomorphic = T if desired; note that this option means
that some mutations that do exist in the sampled genomes might not be included
in the output, simply because they exist in every sampled genome. See output()
and outputVCF() for other output formats. Output is generally done in a late()
event, so that the output reflects the state of the simulation at the end of a
generation.
}
\section{Copyright}{
This is documentation for a function in the SLiM software, and has been
reproduced from the official manual,
which can be found here: \url{http://benhaller.com/slim/SLiM_Manual.pdf}. This
documentation is
Copyright © 2016–2020 Philipp Messer. All rights reserved. More information
about SLiM can be found
on the official website: \url{https://messerlab.org/slim/}
}
\author{
Benjamin C Haller (\email{bhaller@benhaller.com}) and Philipp W Messer
(\email{messer@cornell.edu})
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query.R
\name{qry_sparql}
\alias{qry_sparql}
\title{Constructor function for SPARQL queries.}
\usage{
qry_sparql(query_string, params = NULL)
}
\arguments{
\item{query_string}{SPARQL query string.}
\item{params}{Sequence of named query parameters.}
}
\value{
Object of type \code{sparql}.
}
\description{
Constructor function for SPARQL queries.
}
| /man/qry_sparql.Rd | permissive | MarkEdmondson1234/data.world-r | R | false | true | 427 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query.R
\name{qry_sparql}
\alias{qry_sparql}
\title{Constructor function for SPARQL queries.}
\usage{
qry_sparql(query_string, params = NULL)
}
\arguments{
\item{query_string}{SPARQL query string.}
\item{params}{Sequence of named query parameters.}
}
\value{
Object of type \code{sparql}.
}
\description{
Constructor function for SPARQL queries.
}
|
#Revision for test
#ex1
colony <- read.csv("D:/University of Brighton/2016-2017 Data Analytics MSc/2016 MM705 - Multivariate Analysis and Statistical Modelling OPTIONAL SEM 2 20CR/colony.csv")
plot(colony)
lines(colony) #line connecting the instances
abline(lm(colony$Count~colony$Time), col="red") # regression line (y~x)
lines(lowess(colony$Time,colony$Count), col="blue") # lowess line (x,y)
summary(colony.model <- aov(Count~Time, data = colony))
summary(lm(Count~Time, data = colony))
| /MM705/revision test.R | no_license | pkkirilov/MSc-Data-Analytics | R | false | false | 492 | r | #Revision for test
#ex1
colony <- read.csv("D:/University of Brighton/2016-2017 Data Analytics MSc/2016 MM705 - Multivariate Analysis and Statistical Modelling OPTIONAL SEM 2 20CR/colony.csv")
plot(colony)
lines(colony) #line connecting the instances
abline(lm(colony$Count~colony$Time), col="red") # regression line (y~x)
lines(lowess(colony$Time,colony$Count), col="blue") # lowess line (x,y)
summary(colony.model <- aov(Count~Time, data = colony))
summary(lm(Count~Time, data = colony))
|
rm(list=ls())
library(data.table)
library(smbinning)
library(plyr)
library(caTools)
library(glmnet)
library(glm)
library(glm2)
library(rpart)
library(rpart.plot)
library(RColorBrewer)
library(rattle)
library(glmulti)
setwd('D:/Confidential/Projects/Steel/LD2 BDS/prelim_analysis/data/second iteration/constructed_data/')
data_1 = read.csv('dat_1_level_three_18_08.csv')
data_0 = read.csv('dat_0_level_three_18_08.csv')
colnames(data_0) = colnames(data_1)
data_1 = data_1[,-c(1,2,3)]
data_0 = data_0[,-c(1,2,3)]
mod_data = rbind(data_1,data_0)
mod_data$ratio = mod_data$stdv1/mod_data$stdv2
tree_model = rpart(y~.,method="class",data=mod_data,control = rpart.control(maxdepth = 30,minsplit = 25,cp = 0.01))
rpart.plot(tree_model,extra=101,digits=5,nn=FALSE,branch=0.5,cex = 0.75)
| /model_building_level_three.R | no_license | anurgbht/BDS_modelling | R | false | false | 810 | r | rm(list=ls())
library(data.table)
library(smbinning)
library(plyr)
library(caTools)
library(glmnet)
library(glm)
library(glm2)
library(rpart)
library(rpart.plot)
library(RColorBrewer)
library(rattle)
library(glmulti)
setwd('D:/Confidential/Projects/Steel/LD2 BDS/prelim_analysis/data/second iteration/constructed_data/')
data_1 = read.csv('dat_1_level_three_18_08.csv')
data_0 = read.csv('dat_0_level_three_18_08.csv')
colnames(data_0) = colnames(data_1)
data_1 = data_1[,-c(1,2,3)]
data_0 = data_0[,-c(1,2,3)]
mod_data = rbind(data_1,data_0)
mod_data$ratio = mod_data$stdv1/mod_data$stdv2
tree_model = rpart(y~.,method="class",data=mod_data,control = rpart.control(maxdepth = 30,minsplit = 25,cp = 0.01))
rpart.plot(tree_model,extra=101,digits=5,nn=FALSE,branch=0.5,cex = 0.75)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod.prev.R
\name{prevalence_het}
\alias{prevalence_het}
\title{Prevalence Module}
\usage{
prevalence_het(dat, at)
}
\arguments{
\item{dat}{Master data list object of class \code{dat} containing networks,
individual-level attributes, and summary statistics.}
\item{at}{Current time step.}
}
\description{
Module function to calculate and store summary statistics for
disease prevalence, demographics, and other epidemiological
outcomes.
}
| /man/prevalence_het.Rd | no_license | EpiModel/EpiModelHIVhet | R | false | true | 544 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod.prev.R
\name{prevalence_het}
\alias{prevalence_het}
\title{Prevalence Module}
\usage{
prevalence_het(dat, at)
}
\arguments{
\item{dat}{Master data list object of class \code{dat} containing networks,
individual-level attributes, and summary statistics.}
\item{at}{Current time step.}
}
\description{
Module function to calculate and store summary statistics for
disease prevalence, demographics, and other epidemiological
outcomes.
}
|
#test_math_funcs.r
# library(testthat)
library(tibble)
# Unit tests for num_order_to_word() ####
test_that("Missing and incorrect input to x", {
expect_error(num_order_to_word())
expect_error(num_order_to_word(x = "a"))
expect_error(num_order_to_word(x = list(5435435, 55435435)))
expect_error(num_order_to_word(x = data.frame(a = c(5435435, 55435435))))
})
test_lookup <- tibble(expon = c(33, 30, 27, 24, 21, 18, 15, 12, 9, 6, 3,
0, -3, -6, -9, -12),
word = c("decillion", "nonillian", "octillian",
"septillion", "sextillion", "quintillion", "quadrillion",
"trillion", "billion", "million", "thousand",
"", "thousandth", "millionth", "billionth",
"trillionth"))
test_lookup2 <- test_lookup
test_lookup2[["expon"]] <- as.integer(test_lookup2[["expon"]])
test_lookup3 <- test_lookup
test_lookup3[["word"]] <- as.factor(test_lookup3[["word"]])
test_that("Check input of lookup table.", {
expect_error(num_order_to_word(5435435, lookup = test_lookup), NA)
expect_error(num_order_to_word(5435435, lookup = test_lookup2), NA)
expect_error(num_order_to_word(5435435, lookup = test_lookup3), NA)
})
rm(test_lookup, test_lookup2, test_lookup3)
# Unit tests for area_hex ####
testthat::test_that("Missing parameters", {
expect_error(area_hex())
})
testthat::test_that("Suspicious parameters", {
testthat::expect_warning(area_hex(3, 6))
})
testthat::test_that("Invalid parameters", {
expect_error(area_hex(3, "a"))
expect_error(area_hex("a", 6))
expect_error(area_hex(3, TRUE))
expect_error(area_hex(TRUE, 6))
})
# Unit tests for decimal_places ####
| /tests/testthat/test-math_funcs.R | no_license | tomhopper/numbr | R | false | false | 1,665 | r | #test_math_funcs.r
# library(testthat)
library(tibble)
# Unit tests for num_order_to_word() ####
test_that("Missing and incorrect input to x", {
expect_error(num_order_to_word())
expect_error(num_order_to_word(x = "a"))
expect_error(num_order_to_word(x = list(5435435, 55435435)))
expect_error(num_order_to_word(x = data.frame(a = c(5435435, 55435435))))
})
test_lookup <- tibble(expon = c(33, 30, 27, 24, 21, 18, 15, 12, 9, 6, 3,
0, -3, -6, -9, -12),
word = c("decillion", "nonillian", "octillian",
"septillion", "sextillion", "quintillion", "quadrillion",
"trillion", "billion", "million", "thousand",
"", "thousandth", "millionth", "billionth",
"trillionth"))
test_lookup2 <- test_lookup
test_lookup2[["expon"]] <- as.integer(test_lookup2[["expon"]])
test_lookup3 <- test_lookup
test_lookup3[["word"]] <- as.factor(test_lookup3[["word"]])
test_that("Check input of lookup table.", {
expect_error(num_order_to_word(5435435, lookup = test_lookup), NA)
expect_error(num_order_to_word(5435435, lookup = test_lookup2), NA)
expect_error(num_order_to_word(5435435, lookup = test_lookup3), NA)
})
rm(test_lookup, test_lookup2, test_lookup3)
# Unit tests for area_hex ####
testthat::test_that("Missing parameters", {
expect_error(area_hex())
})
testthat::test_that("Suspicious parameters", {
testthat::expect_warning(area_hex(3, 6))
})
testthat::test_that("Invalid parameters", {
expect_error(area_hex(3, "a"))
expect_error(area_hex("a", 6))
expect_error(area_hex(3, TRUE))
expect_error(area_hex(TRUE, 6))
})
# Unit tests for decimal_places ####
|
#' Super resolution GAN model
#'
#' Super resolution generative adverserial network from the paper:
#'
#' https://arxiv.org/abs/1609.04802
#'
#' and ported from the Keras (python) implementation:
#'
#' https://github.com/eriklindernoren/Keras-GAN/blob/master/srgan/srgan.py
#'
#' @docType class
#'
#'
#' @section Arguments:
#' \describe{
#' \item{lowResolutionImageSize}{}
#' \item{numberOfResidualBlocks}{}
#' }
#'
#' @section Details:
#' \code{$initialize} {instantiates a new class and builds the
#' generator and discriminator.}
#' \code{$buildGenerator}{build generator.}
#' \code{$buildGenerator}{build discriminator.}
#'
#' @author Tustison NJ
#'
#' @examples
#' \dontrun{
#'
#' library( keras )
#' library( ANTsRNet )
#'
#' keras::backend()$clear_session()
#'
#' ganModel <- SuperResolutionGanModel$new(
#' lowResolutionImageSize = c( 112, 112, 3 ) )
#' testthat::expect_error({
#' ganModel <- SuperResolutionGanModel$new(
#' lowResolutionImageSize = c( 64, 65, 3 ) )
#' })
#' }
#'
#' @name SuperResolutionGanModel
NULL
#' @export
SuperResolutionGanModel <- R6::R6Class( "SuperResolutionGanModel",
inherit = NULL,
lock_objects = FALSE,
public = list(
dimensionality = 2,
lowResolutionImageSize = c( 64, 64, 3 ),
highResolutionImageSize = c( 256, 256, 3 ),
numberOfChannels = 3,
numberOfResidualBlocks = 16,
numberOfFiltersAtBaseLayer = c( 64, 64 ),
scaleFactor = 2,
useImageNetWeights = TRUE,
initialize = function( lowResolutionImageSize,
scaleFactor = 2, useImageNetWeights = TRUE,
numberOfResidualBlocks = 16, numberOfFiltersAtBaseLayer = c( 64, 64 ) )
{
self$lowResolutionImageSize <- lowResolutionImageSize
self$numberOfChannels <- tail( self$lowResolutionImageSize, 1 )
self$numberOfResidualBlocks <- numberOfResidualBlocks
self$numberOfFiltersAtBaseLayer <- numberOfFiltersAtBaseLayer
self$useImageNetWeights <- useImageNetWeights
self$scaleFactor <- scaleFactor
if( ! scaleFactor %in% c( 1, 2, 4, 8 ) )
{
stop( "Error: scale factor must be one of 1, 2, 4, or 8." )
}
self$dimensionality <- NA
if( length( self$lowResolutionImageSize ) == 3 )
{
self$dimensionality <- 2
} else if( length( self$lowResolutionImageSize ) == 4 ) {
self$dimensionality <- 3
if( self$useImageNetWeights == TRUE )
{
self$useImageNetWeights <- FALSE
warning( "Warning: imageNet weights are unavailable for 3D." )
}
} else {
stop( "Incorrect size for lowResolutionImageSize.\n" )
}
optimizer <- optimizer_adam( lr = 0.0002, beta_1 = 0.5 )
# Images
self$highResolutionImageSize <- c( as.integer( self$scaleFactor ) *
self$lowResolutionImageSize[1:self$dimensionality], self$numberOfChannels )
highResolutionImage <- layer_input( shape = self$highResolutionImageSize )
lowResolutionImage <- layer_input( shape = self$lowResolutionImageSize )
# Build generator
self$generator <- self$buildGenerator()
fakeHighResolutionImage <- self$generator( lowResolutionImage )
# Build discriminator
self$discriminator <- self$buildDiscriminator()
self$discriminator$compile( loss = 'mse',
optimizer = optimizer, metrics = list( 'acc' ) )
# Vgg
self$vggModel <- self$buildTruncatedVggModel()
self$vggModel$trainable <- FALSE
self$vggModel$compile( loss = 'mse', optimizer = optimizer,
metrics = list( 'accuracy') )
if( self$dimensionality == 2 )
{
self$discriminatorPatchSize <- c( 16, 16, 1 )
} else {
self$discriminatorPatchSize <- c( 16, 16, 16, 1 )
}
# unlist( self$vggModel$output_shape )[1:self$dimensionality], 1 )
# Discriminator
self$discriminator$trainable <- FALSE
validity <- self$discriminator( fakeHighResolutionImage )
# Combined model
if( self$useImageNetWeights == TRUE )
{
fakeFeatures <- self$vggModel( fakeHighResolutionImage )
self$combinedModel = keras_model( inputs = list( lowResolutionImage, highResolutionImage ),
outputs = list( validity, fakeFeatures ) )
self$combinedModel$compile( loss = list( 'binary_crossentropy', 'mse' ),
loss_weights = list( 1e-3, 1 ), optimizer = optimizer )
} else {
self$combinedModel = keras_model( inputs = list( lowResolutionImage, highResolutionImage ),
outputs = validity )
self$combinedModel$compile( loss = list( 'binary_crossentropy' ),
optimizer = optimizer )
}
},
buildTruncatedVggModel = function()
{
vggTmp <- NULL
if( self$dimensionality == 2 )
{
if( self$useImageNetWeights == TRUE )
{
vggTmp <- createVggModel2D( c( 224, 224, 3 ), style = '19' )
kerasVgg <- application_vgg19( weights = "imagenet" )
vggTmp$set_weights( kerasVgg$get_weights() )
} else {
vggTmp <- createVggModel2D( self$highResolutionImageSize, style = '19' )
}
} else {
vggTmp <- createVggModel3D( self$highResolutionImageSize, style = '19' )
}
vggTmp$outputs = list( vggTmp$layers[[10]]$output )
highResolutionImage <- layer_input( self$highResolutionImageSize )
highResolutionImageFeatures <- vggTmp( highResolutionImage )
vggModel <- keras_model( inputs = highResolutionImage,
outputs = highResolutionImageFeatures )
return( vggModel )
},
buildGenerator = function( numberOfFilters = 64 )
{
buildResidualBlock <- function( input, numberOfFilters, kernelSize = 3 )
{
shortcut <- input
if( self$dimensionality == 2 )
{
input <- input %>% layer_conv_2d( filters = numberOfFilters,
kernel_size = kernelSize, strides = 1, padding = 'same' )
} else {
input <- input %>% layer_conv_3d( filters = numberOfFilters,
kernel_size = kernelSize, strides = 1, padding = 'same' )
}
input <- input %>% layer_activation_relu()
input <- input %>% layer_batch_normalization( momentum = 0.8 )
if( self$dimensionality == 2 )
{
input <- input %>% layer_conv_2d( filters = numberOfFilters,
kernel_size = kernelSize, strides = 1, padding = 'same' )
} else {
input <- input %>% layer_conv_3d( filters = numberOfFilters,
kernel_size = kernelSize, strides = 1, padding = 'same' )
}
input <- input %>% layer_batch_normalization( momentum = 0.8 )
input <- list( input, shortcut ) %>% layer_add()
return( input )
}
buildDeconvolutionLayer <- function( input, numberOfFilters = 256, kernelSize = 3 )
{
model <- input
if( self$dimensionality == 2 )
{
model <- model %>% layer_upsampling_2d( size = 2 )
model <- model %>% layer_conv_2d( filters = numberOfFilters,
kernel_size = kernelSize, strides = 1, padding = 'same' )
} else {
model <- model %>% layer_upsampling_3d( size = 2 )
model <- model %>% layer_conv_3d( filters = numberOfFilters,
kernel_size = kernelSize, strides = 1, padding = 'same' )
}
model <- model %>% layer_activation_relu()
return( model )
}
image <- layer_input( shape = self$lowResolutionImageSize )
preResidual <- image
if( self$dimensionality == 2 )
{
preResidual <- preResidual %>% layer_conv_2d( filters = numberOfFilters,
kernel_size = 9, strides = 1, padding = 'same' )
} else {
preResidual <- preResidual %>% layer_conv_3d( filters = numberOfFilters,
kernel_size = 9, strides = 1, padding = 'same' )
}
preResidual <- preResidual %>% layer_activation_relu()
residuals <- preResidual %>% buildResidualBlock(
numberOfFilters = self$numberOfFiltersAtBaseLayer[1] )
for( i in seq_len( self$numberOfResidualBlocks - 1 ) )
{
residuals <- residuals %>% buildResidualBlock(
numberOfFilters = self$numberOfFiltersAtBaseLayer[1] )
}
postResidual <- residuals
if( self$dimensionality == 2 )
{
postResidual <- postResidual %>% layer_conv_2d( filters = numberOfFilters,
kernel_size = 3, strides = 1, padding = 'same' )
} else {
postResidual <- postResidual %>% layer_conv_3d( filters = numberOfFilters,
kernel_size = 3, strides = 1, padding = 'same' )
}
postResidual <- postResidual %>% layer_batch_normalization( momentum = 0.8 )
model <- list( postResidual, preResidual ) %>% layer_add()
# upsampling
if( self$scaleFactor >= 2 )
{
model <- buildDeconvolutionLayer( model )
}
if( self$scaleFactor >= 4 )
{
model <- buildDeconvolutionLayer( model )
}
if( self$scaleFactor == 8 )
{
model <- buildDeconvolutionLayer( model )
}
if( self$dimensionality == 2 )
{
model <- model %>% layer_conv_2d( filters = self$numberOfChannels,
kernel_size = 9, strides = 1, padding = 'same',
activation = 'tanh' )
} else {
postResidual <- model %>% layer_conv_3d( filters = self$numberOfChannels,
kernel_size = 9, strides = 1, padding = 'same',
activation = 'tanh' )
}
generator <- keras_model( inputs = image, outputs = model )
return( generator )
},
buildDiscriminator = function()
{
buildLayer <- function( input, numberOfFilters, strides = 1,
kernelSize = 3, normalization = TRUE )
{
layer <- input
if( self$dimensionality == 2 )
{
layer <- layer %>% layer_conv_2d( numberOfFilters,
kernel_size = kernelSize, strides = strides, padding = 'same' )
} else {
layer <- layer %>% layer_conv_3d( numberOfFilters,
kernel_size = kernelSize, strides = strides, padding = 'same' )
}
layer <- layer %>% layer_activation_leaky_relu( alpha = 0.2 )
if( normalization == TRUE )
{
layer <- layer %>% layer_batch_normalization( momentum = 0.8 )
}
return( layer )
}
image <- layer_input( shape = self$highResolutionImageSize )
model <- image %>% buildLayer( self$numberOfFiltersAtBaseLayer[2],
normalization = FALSE )
model <- model %>% buildLayer( self$numberOfFiltersAtBaseLayer[2],
strides = 2 )
model <- model %>% buildLayer( self$numberOfFiltersAtBaseLayer[2] * 2 )
model <- model %>% buildLayer( self$numberOfFiltersAtBaseLayer[2] * 2,
strides = 2 )
model <- model %>% buildLayer( self$numberOfFiltersAtBaseLayer[2] * 4 )
model <- model %>% buildLayer( self$numberOfFiltersAtBaseLayer[2] * 4,
strides = 2 )
model <- model %>% buildLayer( self$numberOfFiltersAtBaseLayer[2] * 8 )
model <- model %>% buildLayer( self$numberOfFiltersAtBaseLayer[2] * 8,
strides = 2 )
model <- model %>%
layer_dense( units = self$numberOfFiltersAtBaseLayer[2] * 16 )
model <- model %>% layer_activation_leaky_relu( alpha = 0.2 )
validity <- model %>% layer_dense( units = 1, activation = 'sigmoid' )
discriminator <- keras_model( inputs = image, outputs = validity )
return( discriminator )
},
train = function( X_trainLowResolution, X_trainHighResolution, numberOfEpochs,
batchSize = 128, sampleInterval = NA, sampleFilePrefix = 'sample' )
{
valid <- array( data = 1, dim = c( batchSize, self$discriminatorPatchSize ) )
fake <- array( data = 0, dim = c( batchSize, self$discriminatorPatchSize ) )
for( epoch in seq_len( numberOfEpochs ) )
{
indices <- sample.int( dim( X_trainLowResolution )[1], batchSize )
lowResolutionImages <- NULL
highResolutionImages <- NULL
if( self$dimensionality == 2 )
{
lowResolutionImages <- X_trainLowResolution[indices,,,, drop = FALSE]
highResolutionImages <- X_trainHighResolution[indices,,,, drop = FALSE]
} else {
lowResolutionImages <- X_trainLowResolution[indices,,,,, drop = FALSE]
highResolutionImages <- X_trainHighResolution[indices,,,,, drop = FALSE]
}
# train discriminator
fakeHighResolutionImages <- self$generator$predict( lowResolutionImages )
dLossReal <- self$discriminator$train_on_batch( highResolutionImages, valid )
dLossFake <- self$discriminator$train_on_batch( fakeHighResolutionImages, fake )
dLoss <- list()
for( i in seq_len( length( dLossReal ) ) )
{
dLoss[[i]] <- 0.5 * ( dLossReal[[i]] + dLossFake[[i]] )
}
# train generator
gLoss <- NULL
if( self$useImageNetWeights == TRUE )
{
imageFeatures = self$vggModel$predict( highResolutionImages )
gLoss <- self$combinedModel$train_on_batch(
list( lowResolutionImages, highResolutionImages ),
list( valid, imageFeatures ) )
} else {
gLoss <- self$combinedModel$train_on_batch(
list( lowResolutionImages, highResolutionImages ), valid )
}
cat( "Epoch ", epoch, ": [Discriminator loss: ", dLoss[[1]], "] ",
"[Generator loss: ", gLoss[[1]], "]\n", sep = '' )
if( self$dimensionality == 2 )
{
if( ! is.na( sampleInterval ) )
{
if( ( ( epoch - 1 ) %% sampleInterval ) == 0 )
{
# Do a 2x3 grid
#
# low res image | high res image | original high res image
# low res image | high res image | original high res image
X <- list()
indices <- sample.int( dim( X_trainLowResolution )[1], 2 )
lowResolutionImage <- X_trainLowResolution[indices[1],,,, drop = FALSE]
highResolutionImage <- X_trainHighResolution[indices[1],,,, drop = FALSE]
X[[1]] <- lowResolutionImage
X[[2]] <- self$generator$predict( lowResolutionImage )
X[[3]] <- highResolutionImage
lowResolutionImage <- X_trainLowResolution[indices[2],,,, drop = FALSE]
highResolutionImage <- X_trainHighResolution[indices[2],,,, drop = FALSE]
X[[4]] <- lowResolutionImage
X[[5]] <- self$generator$predict( lowResolutionImage )
X[[6]] <- highResolutionImage
for( i in seq_len( length( X ) ) )
{
X[[i]] <- ( X[[i]] - min( X[[i]] ) ) /
( max( X[[i]] ) - min( X[[i]] ) )
X[[i]] <- drop( X[[i]] )
}
XrowA <- image_append(
c( image_read( X[[1]] ),
image_read( X[[2]] ),
image_read( X[[3]] ) ) )
XrowB <- image_append(
c( image_read( X[[4]] ),
image_read( X[[5]] ),
image_read( X[[6]] ) ) )
XAB <- image_append( c( XrowA, XrowB ), stack = TRUE )
sampleDir <- dirname( sampleFilePrefix )
if( ! dir.exists( sampleDir ) )
{
dir.create( sampleDir, showWarnings = TRUE, recursive = TRUE )
}
imageFileName <- paste0( sampleFilePrefix, "_iteration" , epoch, ".jpg" )
cat( " --> writing sample image: ", imageFileName, "\n" )
image_write( XAB, path = imageFileName, format = "jpg")
}
}
}
}
}
)
)
| /R/createSuperResolutionGanModel.R | permissive | ANTsX/ANTsRNet | R | false | false | 16,135 | r | #' Super resolution GAN model
#'
#' Super resolution generative adverserial network from the paper:
#'
#' https://arxiv.org/abs/1609.04802
#'
#' and ported from the Keras (python) implementation:
#'
#' https://github.com/eriklindernoren/Keras-GAN/blob/master/srgan/srgan.py
#'
#' @docType class
#'
#'
#' @section Arguments:
#' \describe{
#' \item{lowResolutionImageSize}{}
#' \item{numberOfResidualBlocks}{}
#' }
#'
#' @section Details:
#' \code{$initialize} {instantiates a new class and builds the
#' generator and discriminator.}
#' \code{$buildGenerator}{build generator.}
#' \code{$buildGenerator}{build discriminator.}
#'
#' @author Tustison NJ
#'
#' @examples
#' \dontrun{
#'
#' library( keras )
#' library( ANTsRNet )
#'
#' keras::backend()$clear_session()
#'
#' ganModel <- SuperResolutionGanModel$new(
#' lowResolutionImageSize = c( 112, 112, 3 ) )
#' testthat::expect_error({
#' ganModel <- SuperResolutionGanModel$new(
#' lowResolutionImageSize = c( 64, 65, 3 ) )
#' })
#' }
#'
#' @name SuperResolutionGanModel
NULL
#' @export
SuperResolutionGanModel <- R6::R6Class( "SuperResolutionGanModel",
inherit = NULL,
lock_objects = FALSE,
public = list(
dimensionality = 2,
lowResolutionImageSize = c( 64, 64, 3 ),
highResolutionImageSize = c( 256, 256, 3 ),
numberOfChannels = 3,
numberOfResidualBlocks = 16,
numberOfFiltersAtBaseLayer = c( 64, 64 ),
scaleFactor = 2,
useImageNetWeights = TRUE,
initialize = function( lowResolutionImageSize,
scaleFactor = 2, useImageNetWeights = TRUE,
numberOfResidualBlocks = 16, numberOfFiltersAtBaseLayer = c( 64, 64 ) )
{
self$lowResolutionImageSize <- lowResolutionImageSize
self$numberOfChannels <- tail( self$lowResolutionImageSize, 1 )
self$numberOfResidualBlocks <- numberOfResidualBlocks
self$numberOfFiltersAtBaseLayer <- numberOfFiltersAtBaseLayer
self$useImageNetWeights <- useImageNetWeights
self$scaleFactor <- scaleFactor
if( ! scaleFactor %in% c( 1, 2, 4, 8 ) )
{
stop( "Error: scale factor must be one of 1, 2, 4, or 8." )
}
self$dimensionality <- NA
if( length( self$lowResolutionImageSize ) == 3 )
{
self$dimensionality <- 2
} else if( length( self$lowResolutionImageSize ) == 4 ) {
self$dimensionality <- 3
if( self$useImageNetWeights == TRUE )
{
self$useImageNetWeights <- FALSE
warning( "Warning: imageNet weights are unavailable for 3D." )
}
} else {
stop( "Incorrect size for lowResolutionImageSize.\n" )
}
optimizer <- optimizer_adam( lr = 0.0002, beta_1 = 0.5 )
# Images
self$highResolutionImageSize <- c( as.integer( self$scaleFactor ) *
self$lowResolutionImageSize[1:self$dimensionality], self$numberOfChannels )
highResolutionImage <- layer_input( shape = self$highResolutionImageSize )
lowResolutionImage <- layer_input( shape = self$lowResolutionImageSize )
# Build generator
self$generator <- self$buildGenerator()
fakeHighResolutionImage <- self$generator( lowResolutionImage )
# Build discriminator
self$discriminator <- self$buildDiscriminator()
self$discriminator$compile( loss = 'mse',
optimizer = optimizer, metrics = list( 'acc' ) )
# Vgg
self$vggModel <- self$buildTruncatedVggModel()
self$vggModel$trainable <- FALSE
self$vggModel$compile( loss = 'mse', optimizer = optimizer,
metrics = list( 'accuracy') )
if( self$dimensionality == 2 )
{
self$discriminatorPatchSize <- c( 16, 16, 1 )
} else {
self$discriminatorPatchSize <- c( 16, 16, 16, 1 )
}
# unlist( self$vggModel$output_shape )[1:self$dimensionality], 1 )
# Discriminator
self$discriminator$trainable <- FALSE
validity <- self$discriminator( fakeHighResolutionImage )
# Combined model
if( self$useImageNetWeights == TRUE )
{
fakeFeatures <- self$vggModel( fakeHighResolutionImage )
self$combinedModel = keras_model( inputs = list( lowResolutionImage, highResolutionImage ),
outputs = list( validity, fakeFeatures ) )
self$combinedModel$compile( loss = list( 'binary_crossentropy', 'mse' ),
loss_weights = list( 1e-3, 1 ), optimizer = optimizer )
} else {
self$combinedModel = keras_model( inputs = list( lowResolutionImage, highResolutionImage ),
outputs = validity )
self$combinedModel$compile( loss = list( 'binary_crossentropy' ),
optimizer = optimizer )
}
},
buildTruncatedVggModel = function()
{
vggTmp <- NULL
if( self$dimensionality == 2 )
{
if( self$useImageNetWeights == TRUE )
{
vggTmp <- createVggModel2D( c( 224, 224, 3 ), style = '19' )
kerasVgg <- application_vgg19( weights = "imagenet" )
vggTmp$set_weights( kerasVgg$get_weights() )
} else {
vggTmp <- createVggModel2D( self$highResolutionImageSize, style = '19' )
}
} else {
vggTmp <- createVggModel3D( self$highResolutionImageSize, style = '19' )
}
vggTmp$outputs = list( vggTmp$layers[[10]]$output )
highResolutionImage <- layer_input( self$highResolutionImageSize )
highResolutionImageFeatures <- vggTmp( highResolutionImage )
vggModel <- keras_model( inputs = highResolutionImage,
outputs = highResolutionImageFeatures )
return( vggModel )
},
buildGenerator = function( numberOfFilters = 64 )
{
buildResidualBlock <- function( input, numberOfFilters, kernelSize = 3 )
{
shortcut <- input
if( self$dimensionality == 2 )
{
input <- input %>% layer_conv_2d( filters = numberOfFilters,
kernel_size = kernelSize, strides = 1, padding = 'same' )
} else {
input <- input %>% layer_conv_3d( filters = numberOfFilters,
kernel_size = kernelSize, strides = 1, padding = 'same' )
}
input <- input %>% layer_activation_relu()
input <- input %>% layer_batch_normalization( momentum = 0.8 )
if( self$dimensionality == 2 )
{
input <- input %>% layer_conv_2d( filters = numberOfFilters,
kernel_size = kernelSize, strides = 1, padding = 'same' )
} else {
input <- input %>% layer_conv_3d( filters = numberOfFilters,
kernel_size = kernelSize, strides = 1, padding = 'same' )
}
input <- input %>% layer_batch_normalization( momentum = 0.8 )
input <- list( input, shortcut ) %>% layer_add()
return( input )
}
buildDeconvolutionLayer <- function( input, numberOfFilters = 256, kernelSize = 3 )
{
model <- input
if( self$dimensionality == 2 )
{
model <- model %>% layer_upsampling_2d( size = 2 )
model <- model %>% layer_conv_2d( filters = numberOfFilters,
kernel_size = kernelSize, strides = 1, padding = 'same' )
} else {
model <- model %>% layer_upsampling_3d( size = 2 )
model <- model %>% layer_conv_3d( filters = numberOfFilters,
kernel_size = kernelSize, strides = 1, padding = 'same' )
}
model <- model %>% layer_activation_relu()
return( model )
}
image <- layer_input( shape = self$lowResolutionImageSize )
preResidual <- image
if( self$dimensionality == 2 )
{
preResidual <- preResidual %>% layer_conv_2d( filters = numberOfFilters,
kernel_size = 9, strides = 1, padding = 'same' )
} else {
preResidual <- preResidual %>% layer_conv_3d( filters = numberOfFilters,
kernel_size = 9, strides = 1, padding = 'same' )
}
preResidual <- preResidual %>% layer_activation_relu()
residuals <- preResidual %>% buildResidualBlock(
numberOfFilters = self$numberOfFiltersAtBaseLayer[1] )
for( i in seq_len( self$numberOfResidualBlocks - 1 ) )
{
residuals <- residuals %>% buildResidualBlock(
numberOfFilters = self$numberOfFiltersAtBaseLayer[1] )
}
postResidual <- residuals
if( self$dimensionality == 2 )
{
postResidual <- postResidual %>% layer_conv_2d( filters = numberOfFilters,
kernel_size = 3, strides = 1, padding = 'same' )
} else {
postResidual <- postResidual %>% layer_conv_3d( filters = numberOfFilters,
kernel_size = 3, strides = 1, padding = 'same' )
}
postResidual <- postResidual %>% layer_batch_normalization( momentum = 0.8 )
model <- list( postResidual, preResidual ) %>% layer_add()
# upsampling
if( self$scaleFactor >= 2 )
{
model <- buildDeconvolutionLayer( model )
}
if( self$scaleFactor >= 4 )
{
model <- buildDeconvolutionLayer( model )
}
if( self$scaleFactor == 8 )
{
model <- buildDeconvolutionLayer( model )
}
if( self$dimensionality == 2 )
{
model <- model %>% layer_conv_2d( filters = self$numberOfChannels,
kernel_size = 9, strides = 1, padding = 'same',
activation = 'tanh' )
} else {
postResidual <- model %>% layer_conv_3d( filters = self$numberOfChannels,
kernel_size = 9, strides = 1, padding = 'same',
activation = 'tanh' )
}
generator <- keras_model( inputs = image, outputs = model )
return( generator )
},
buildDiscriminator = function()
{
buildLayer <- function( input, numberOfFilters, strides = 1,
kernelSize = 3, normalization = TRUE )
{
layer <- input
if( self$dimensionality == 2 )
{
layer <- layer %>% layer_conv_2d( numberOfFilters,
kernel_size = kernelSize, strides = strides, padding = 'same' )
} else {
layer <- layer %>% layer_conv_3d( numberOfFilters,
kernel_size = kernelSize, strides = strides, padding = 'same' )
}
layer <- layer %>% layer_activation_leaky_relu( alpha = 0.2 )
if( normalization == TRUE )
{
layer <- layer %>% layer_batch_normalization( momentum = 0.8 )
}
return( layer )
}
image <- layer_input( shape = self$highResolutionImageSize )
model <- image %>% buildLayer( self$numberOfFiltersAtBaseLayer[2],
normalization = FALSE )
model <- model %>% buildLayer( self$numberOfFiltersAtBaseLayer[2],
strides = 2 )
model <- model %>% buildLayer( self$numberOfFiltersAtBaseLayer[2] * 2 )
model <- model %>% buildLayer( self$numberOfFiltersAtBaseLayer[2] * 2,
strides = 2 )
model <- model %>% buildLayer( self$numberOfFiltersAtBaseLayer[2] * 4 )
model <- model %>% buildLayer( self$numberOfFiltersAtBaseLayer[2] * 4,
strides = 2 )
model <- model %>% buildLayer( self$numberOfFiltersAtBaseLayer[2] * 8 )
model <- model %>% buildLayer( self$numberOfFiltersAtBaseLayer[2] * 8,
strides = 2 )
model <- model %>%
layer_dense( units = self$numberOfFiltersAtBaseLayer[2] * 16 )
model <- model %>% layer_activation_leaky_relu( alpha = 0.2 )
validity <- model %>% layer_dense( units = 1, activation = 'sigmoid' )
discriminator <- keras_model( inputs = image, outputs = validity )
return( discriminator )
},
train = function( X_trainLowResolution, X_trainHighResolution, numberOfEpochs,
batchSize = 128, sampleInterval = NA, sampleFilePrefix = 'sample' )
{
valid <- array( data = 1, dim = c( batchSize, self$discriminatorPatchSize ) )
fake <- array( data = 0, dim = c( batchSize, self$discriminatorPatchSize ) )
for( epoch in seq_len( numberOfEpochs ) )
{
indices <- sample.int( dim( X_trainLowResolution )[1], batchSize )
lowResolutionImages <- NULL
highResolutionImages <- NULL
if( self$dimensionality == 2 )
{
lowResolutionImages <- X_trainLowResolution[indices,,,, drop = FALSE]
highResolutionImages <- X_trainHighResolution[indices,,,, drop = FALSE]
} else {
lowResolutionImages <- X_trainLowResolution[indices,,,,, drop = FALSE]
highResolutionImages <- X_trainHighResolution[indices,,,,, drop = FALSE]
}
# train discriminator
fakeHighResolutionImages <- self$generator$predict( lowResolutionImages )
dLossReal <- self$discriminator$train_on_batch( highResolutionImages, valid )
dLossFake <- self$discriminator$train_on_batch( fakeHighResolutionImages, fake )
dLoss <- list()
for( i in seq_len( length( dLossReal ) ) )
{
dLoss[[i]] <- 0.5 * ( dLossReal[[i]] + dLossFake[[i]] )
}
# train generator
gLoss <- NULL
if( self$useImageNetWeights == TRUE )
{
imageFeatures = self$vggModel$predict( highResolutionImages )
gLoss <- self$combinedModel$train_on_batch(
list( lowResolutionImages, highResolutionImages ),
list( valid, imageFeatures ) )
} else {
gLoss <- self$combinedModel$train_on_batch(
list( lowResolutionImages, highResolutionImages ), valid )
}
cat( "Epoch ", epoch, ": [Discriminator loss: ", dLoss[[1]], "] ",
"[Generator loss: ", gLoss[[1]], "]\n", sep = '' )
if( self$dimensionality == 2 )
{
if( ! is.na( sampleInterval ) )
{
if( ( ( epoch - 1 ) %% sampleInterval ) == 0 )
{
# Do a 2x3 grid
#
# low res image | high res image | original high res image
# low res image | high res image | original high res image
X <- list()
indices <- sample.int( dim( X_trainLowResolution )[1], 2 )
lowResolutionImage <- X_trainLowResolution[indices[1],,,, drop = FALSE]
highResolutionImage <- X_trainHighResolution[indices[1],,,, drop = FALSE]
X[[1]] <- lowResolutionImage
X[[2]] <- self$generator$predict( lowResolutionImage )
X[[3]] <- highResolutionImage
lowResolutionImage <- X_trainLowResolution[indices[2],,,, drop = FALSE]
highResolutionImage <- X_trainHighResolution[indices[2],,,, drop = FALSE]
X[[4]] <- lowResolutionImage
X[[5]] <- self$generator$predict( lowResolutionImage )
X[[6]] <- highResolutionImage
for( i in seq_len( length( X ) ) )
{
X[[i]] <- ( X[[i]] - min( X[[i]] ) ) /
( max( X[[i]] ) - min( X[[i]] ) )
X[[i]] <- drop( X[[i]] )
}
XrowA <- image_append(
c( image_read( X[[1]] ),
image_read( X[[2]] ),
image_read( X[[3]] ) ) )
XrowB <- image_append(
c( image_read( X[[4]] ),
image_read( X[[5]] ),
image_read( X[[6]] ) ) )
XAB <- image_append( c( XrowA, XrowB ), stack = TRUE )
sampleDir <- dirname( sampleFilePrefix )
if( ! dir.exists( sampleDir ) )
{
dir.create( sampleDir, showWarnings = TRUE, recursive = TRUE )
}
imageFileName <- paste0( sampleFilePrefix, "_iteration" , epoch, ".jpg" )
cat( " --> writing sample image: ", imageFileName, "\n" )
image_write( XAB, path = imageFileName, format = "jpg")
}
}
}
}
}
)
)
|
data <- read.csv("../data/all.csv", as.is=TRUE)
stopifnot(all(!is.na(data$n)))
# everything has a student count
stopifnot(all(!duplicated(data[, 1:4])))
# exactly one row per DBN-grade-year-subject
# best not to use the pre-computed sums; one error as seen earlier
data <- subset(data, grade != "All Grades")
data$grade <- as.numeric(data$grade)
# look at ELA and math
data <- merge(subset(data, subject=="Math"), subset(data, subject=="ELA"),
by=c("dbn", "grade", "year"), all=TRUE)
data <- data[, c(1:3, 5, 8)]
names(data)[4:5] <- c("Math", "ELA")
# missing means zero
data$Math[is.na(data$Math)] <- 0
data$ELA[is.na(data$ELA)] <- 0
data$n <- pmax(data$Math, data$ELA)
data$m <- data$Math + data$ELA
library(reshape)
summary <- ddply(data, .(year, grade), summarize, n = sum(n), m=sum(m))
library(ggplot2)
library(scales)
library(gridExtra)
a <- ggplot(summary) + aes(x=paste("grade", grade), y=n) + geom_point() +
theme_bw() + scale_y_continuous(labels=comma) + xlab("") +
ylab("lower bound on number of tested students")
b <- ggplot(summary) + aes(x=grade, y=n) + geom_line() + geom_point() +
facet_grid(~year) + theme_bw() + scale_y_continuous(labels=comma) +
ylab("lower bound on number of tested students")
png(width=800, height=640, filename="../figure/5a.png")
grid.arrange(a, b, main="\nFigure 5a. Lower bound on the number of tested students in NYC public schools (charter and non-charter) for grades 3-8 in 2006-2013")
dev.off()
a <- ggplot(summary) + aes(x=paste("grade", grade), y=m) + geom_point() +
theme_bw() + scale_y_continuous(labels=comma) + xlab("") +
ylab("total number of tests")
b <- ggplot(summary) + aes(x=grade, y=m) + geom_line() + geom_point() +
facet_grid(~year) + theme_bw() + scale_y_continuous(labels=comma) +
ylab("total number of tests")
png(width=800, height=640, filename="../figure/5b.png")
grid.arrange(a, b, main="\nFigure 5b. Total number of tests reported for NYC public schools (charter and non-charter) for grades 3-8 in 2006-2013")
dev.off()
| /code/figure5.r | no_license | ajschumacher/NYCtests | R | false | false | 2,031 | r |
data <- read.csv("../data/all.csv", as.is=TRUE)
stopifnot(all(!is.na(data$n)))
# everything has a student count
stopifnot(all(!duplicated(data[, 1:4])))
# exactly one row per DBN-grade-year-subject
# best not to use the pre-computed sums; one error as seen earlier
data <- subset(data, grade != "All Grades")
data$grade <- as.numeric(data$grade)
# look at ELA and math
data <- merge(subset(data, subject=="Math"), subset(data, subject=="ELA"),
by=c("dbn", "grade", "year"), all=TRUE)
data <- data[, c(1:3, 5, 8)]
names(data)[4:5] <- c("Math", "ELA")
# missing means zero
data$Math[is.na(data$Math)] <- 0
data$ELA[is.na(data$ELA)] <- 0
data$n <- pmax(data$Math, data$ELA)
data$m <- data$Math + data$ELA
library(reshape)
summary <- ddply(data, .(year, grade), summarize, n = sum(n), m=sum(m))
library(ggplot2)
library(scales)
library(gridExtra)
a <- ggplot(summary) + aes(x=paste("grade", grade), y=n) + geom_point() +
theme_bw() + scale_y_continuous(labels=comma) + xlab("") +
ylab("lower bound on number of tested students")
b <- ggplot(summary) + aes(x=grade, y=n) + geom_line() + geom_point() +
facet_grid(~year) + theme_bw() + scale_y_continuous(labels=comma) +
ylab("lower bound on number of tested students")
png(width=800, height=640, filename="../figure/5a.png")
grid.arrange(a, b, main="\nFigure 5a. Lower bound on the number of tested students in NYC public schools (charter and non-charter) for grades 3-8 in 2006-2013")
dev.off()
a <- ggplot(summary) + aes(x=paste("grade", grade), y=m) + geom_point() +
theme_bw() + scale_y_continuous(labels=comma) + xlab("") +
ylab("total number of tests")
b <- ggplot(summary) + aes(x=grade, y=m) + geom_line() + geom_point() +
facet_grid(~year) + theme_bw() + scale_y_continuous(labels=comma) +
ylab("total number of tests")
png(width=800, height=640, filename="../figure/5b.png")
grid.arrange(a, b, main="\nFigure 5b. Total number of tests reported for NYC public schools (charter and non-charter) for grades 3-8 in 2006-2013")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genericrules.R
\name{field_format}
\alias{field_format}
\title{Check whether a field conforms to a regular expression}
\usage{
field_format(x, pattern, type = c("glob", "regex"), ...)
}
\arguments{
\item{x}{Bare (unquoted) name of a variable.
Otherwise a vector of class \code{character}. Coerced to character as
necessary.}
\item{pattern}{\code{[character]} a regular expression}
\item{type}{\code{[character]} How to interpret \code{pattern}. In globbing,
the asterisk (`*`) is used as a wildcard that stands for 'zero or more
characters'.}
\item{...}{passed to grepl}
}
\description{
A convenience wrapper around \code{grepl} to make rule sets more readable.
}
\seealso{
Other format-checkers:
\code{\link{field_length}()},
\code{\link{number_format}()}
}
\concept{format-checkers}
| /man/field_format.Rd | no_license | cran/validate | R | false | true | 869 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genericrules.R
\name{field_format}
\alias{field_format}
\title{Check whether a field conforms to a regular expression}
\usage{
field_format(x, pattern, type = c("glob", "regex"), ...)
}
\arguments{
\item{x}{Bare (unquoted) name of a variable.
Otherwise a vector of class \code{character}. Coerced to character as
necessary.}
\item{pattern}{\code{[character]} a regular expression}
\item{type}{\code{[character]} How to interpret \code{pattern}. In globbing,
the asterisk (`*`) is used as a wildcard that stands for 'zero or more
characters'.}
\item{...}{passed to grepl}
}
\description{
A convenience wrapper around \code{grepl} to make rule sets more readable.
}
\seealso{
Other format-checkers:
\code{\link{field_length}()},
\code{\link{number_format}()}
}
\concept{format-checkers}
|
library(tidyverse)
library(ragg)
library(ggtext)
source("workflow/fig-scripts/theme.R")
lookup <- read_tsv("results/figs/celltype_rename_table.tsv") %>%
mutate(clusters = str_extract(clusters2,"\\d+(?=\\/.+)")) %>%
dplyr::select(clusters, clusters.rename) %>%
deframe()
df <- read_csv("results/finalized/x-dataset-comparison/mod_scores.csv.gz", col_types = c("ccdddc")) %>%
mutate(clusters = ifelse(dataset=="larval",lookup[clusters],clusters)) %>%
dplyr::rename(X1='...1')
expression <- read_csv("results/finalized/x-dataset-comparison/te_expression.csv.gz", col_types = c("ccdc")) %>%
dplyr::rename(X1='...1')
top_corr <- df %>%
dplyr::select(X1, clusters, dataset) %>%
left_join(expression,.) %>%
group_by(feature,dataset, clusters) %>%
summarize(mean.expr = mean(expression),.groups = "drop") %>%
left_join(dplyr::select(filter(.,dataset=="larval" & clusters == "3/Spermatocyte"), feature,ref = mean.expr), ., by="feature") %>%
group_by(dataset, clusters) %>%
do(tibble(corr=cor(.$ref,.$mean.expr, method = "spearman"))) %>%
filter(dataset =="wt")
expr_corr_df <- df %>%
dplyr::select(X1, clusters, dataset) %>%
left_join(expression,.) %>%
group_by(feature,dataset, clusters) %>%
summarize(mean.expr = mean(expression),.groups = "drop") %>%
left_join(dplyr::select(filter(.,dataset=="larval" & clusters == "3/Spermatocyte"), feature,ref = mean.expr), ., by="feature") %>%
left_join(top_corr,.) %>%
#mutate(dataset = paste("Witt et al.",str_to_upper(dataset)))
filter(dataset=="wt") %>%
mutate(is_top_hit = corr > 0.2) %>%
mutate(clusters = fct_reorder(clusters,corr))
g2 <- ggplot(expr_corr_df, aes(ref, mean.expr,color=is_top_hit)) +
geom_point(size=1) +
facet_wrap(~reorder(clusters,-corr), scales="free", strip.position = "left") +
ggpubr::stat_cor(color="black",method = "spearman",size=7/.pt) +
guides(color=F) +
theme_gte21() +
theme(aspect.ratio = NULL, strip.placement = "outside") +
scale_color_gte21("binary",reverse = T) +
xlab("Expression: L3 *w1118* 3/Spermatocyte") +
ylab("") +
geom_smooth(method = "lm",se = F) +
ggtitle("Comparison with Witt et al. Wild Strain") +
theme(axis.title.x = element_markdown())
agg_png(snakemake@output[['png_tes']], width=10, height =10, units = 'in', scaling = 1.5, bitsize = 16, res = 300, background = 'transparent')
print(g2)
dev.off()
saveRDS(g2,snakemake@output[['ggp_tes']])
write_tsv(expr_corr_df,snakemake@output[['dat_tes']])
# Export stats info -----------------------------------------------------------------------------------
raw.stats <- g2$data %>%
split(.$clusters) %>%
#map(dplyr::select,c("ref","mean_expr"))
#map(~{cor.test(x=.$ref, y=.$mean.expr,method = "spearman")})
map_df(~{broom::tidy(cor.test(x=.$ref, y=.$mean.expr,method = "spearman"),)},.id="comparison")
stats.export <- raw.stats %>%
mutate(script= "all_dataset_tep_scores.R") %>%
mutate(desc = "correlation of mean expression values between clusters/datasets") %>%
mutate(func = "stats::cor.test/ggpubr::stat_cor") %>%
mutate(ci = NA) %>%
mutate(comparison = paste("Witt et al. Wild Strain vs",comparison)) %>%
dplyr::select(script, comparison, desc, method, func, alternative,p.value,statistic=estimate, ci)
write_tsv(stats.export,snakemake@output[['stats']])
| /workflow/fig-scripts/all_dataset_tep_scores.R | permissive | Ellison-Lab/TestisTEs2021 | R | false | false | 3,311 | r | library(tidyverse)
library(ragg)
library(ggtext)
source("workflow/fig-scripts/theme.R")
lookup <- read_tsv("results/figs/celltype_rename_table.tsv") %>%
mutate(clusters = str_extract(clusters2,"\\d+(?=\\/.+)")) %>%
dplyr::select(clusters, clusters.rename) %>%
deframe()
df <- read_csv("results/finalized/x-dataset-comparison/mod_scores.csv.gz", col_types = c("ccdddc")) %>%
mutate(clusters = ifelse(dataset=="larval",lookup[clusters],clusters)) %>%
dplyr::rename(X1='...1')
expression <- read_csv("results/finalized/x-dataset-comparison/te_expression.csv.gz", col_types = c("ccdc")) %>%
dplyr::rename(X1='...1')
top_corr <- df %>%
dplyr::select(X1, clusters, dataset) %>%
left_join(expression,.) %>%
group_by(feature,dataset, clusters) %>%
summarize(mean.expr = mean(expression),.groups = "drop") %>%
left_join(dplyr::select(filter(.,dataset=="larval" & clusters == "3/Spermatocyte"), feature,ref = mean.expr), ., by="feature") %>%
group_by(dataset, clusters) %>%
do(tibble(corr=cor(.$ref,.$mean.expr, method = "spearman"))) %>%
filter(dataset =="wt")
expr_corr_df <- df %>%
dplyr::select(X1, clusters, dataset) %>%
left_join(expression,.) %>%
group_by(feature,dataset, clusters) %>%
summarize(mean.expr = mean(expression),.groups = "drop") %>%
left_join(dplyr::select(filter(.,dataset=="larval" & clusters == "3/Spermatocyte"), feature,ref = mean.expr), ., by="feature") %>%
left_join(top_corr,.) %>%
#mutate(dataset = paste("Witt et al.",str_to_upper(dataset)))
filter(dataset=="wt") %>%
mutate(is_top_hit = corr > 0.2) %>%
mutate(clusters = fct_reorder(clusters,corr))
g2 <- ggplot(expr_corr_df, aes(ref, mean.expr,color=is_top_hit)) +
geom_point(size=1) +
facet_wrap(~reorder(clusters,-corr), scales="free", strip.position = "left") +
ggpubr::stat_cor(color="black",method = "spearman",size=7/.pt) +
guides(color=F) +
theme_gte21() +
theme(aspect.ratio = NULL, strip.placement = "outside") +
scale_color_gte21("binary",reverse = T) +
xlab("Expression: L3 *w1118* 3/Spermatocyte") +
ylab("") +
geom_smooth(method = "lm",se = F) +
ggtitle("Comparison with Witt et al. Wild Strain") +
theme(axis.title.x = element_markdown())
agg_png(snakemake@output[['png_tes']], width=10, height =10, units = 'in', scaling = 1.5, bitsize = 16, res = 300, background = 'transparent')
print(g2)
dev.off()
saveRDS(g2,snakemake@output[['ggp_tes']])
write_tsv(expr_corr_df,snakemake@output[['dat_tes']])
# Export stats info -----------------------------------------------------------------------------------
raw.stats <- g2$data %>%
split(.$clusters) %>%
#map(dplyr::select,c("ref","mean_expr"))
#map(~{cor.test(x=.$ref, y=.$mean.expr,method = "spearman")})
map_df(~{broom::tidy(cor.test(x=.$ref, y=.$mean.expr,method = "spearman"),)},.id="comparison")
stats.export <- raw.stats %>%
mutate(script= "all_dataset_tep_scores.R") %>%
mutate(desc = "correlation of mean expression values between clusters/datasets") %>%
mutate(func = "stats::cor.test/ggpubr::stat_cor") %>%
mutate(ci = NA) %>%
mutate(comparison = paste("Witt et al. Wild Strain vs",comparison)) %>%
dplyr::select(script, comparison, desc, method, func, alternative,p.value,statistic=estimate, ci)
write_tsv(stats.export,snakemake@output[['stats']])
|
#Draws the supply function (E vs PlantPsi) for the current soil state and plant hydraulic parameters
hydraulics.supplyFunctionPlot<-function(soil, x, type="E") {
psic = soil$psi
VG_nc = soil$VG_n
VG_alphac = soil$VG_alpha
VCroot_kmax = x$below$VCroot_kmax
VGrhizo_kmax = x$below$VGrhizo_kmax
pEmb = x$ProportionCavitated
numericParams = x$control$numericParams
VCroot_c = x$paramsTransp$VCroot_c
VCroot_d = x$paramsTransp$VCroot_d
VCstem_kmax = x$paramsTransp$VCstem_kmax
VCstem_c = x$paramsTransp$VCstem_c
VCstem_d = x$paramsTransp$VCstem_d
ncoh = nrow(x$above)
l = vector("list", ncoh)
for(i in 1:ncoh) {
psiCav = hydraulics.xylemPsi(1.0-pEmb[i], 1.0, VCstem_c[i], VCstem_d[i])
l[[i]] = hydraulics.supplyFunctionNetwork(psic,
VGrhizo_kmax[i,],VG_nc,VG_alphac,
VCroot_kmax[i,], VCroot_c[i],VCroot_d[i],
VCstem_kmax[i], VCstem_c[i],VCstem_d[i], psiCav = psiCav,
maxNsteps = numericParams$maxNsteps, psiStep = numericParams$psiStep,
psiMax = numericParams$psiMax, ntrial = numericParams$ntrial,
psiTol = numericParams$psiTol, ETol = numericParams$ETol)
}
if(type=="E") {
maxE = 0
minPsi = 0
for(i in 1:ncoh) {
maxE = max(maxE, max(l[[i]]$E, na.rm=T))
minPsi = min(minPsi, min(l[[i]]$PsiPlant))
}
for(i in 1:ncoh) {
if(i==1) {
plot(-l[[i]]$PsiPlant, l[[i]]$E, type="l", ylim=c(0,maxE+0.1), xlim=c(0,-minPsi),
xlab = "Plant pressure (-MPa)", ylab = "Flow rate")
} else {
lines(-l[[i]]$PsiPlant, l[[i]]$E, lty=i)
}
}
}
else if(type=="dEdP") {
maxdEdP = 0
minPsi = 0
for(i in 1:ncoh) {
maxdEdP = max(maxdEdP, max(l[[i]]$dEdP, na.rm=T))
minPsi = min(minPsi, min(l[[i]]$PsiPlant))
}
for(i in 1:ncoh) {
if(i==1) {
plot(-l[[i]]$PsiPlant, l[[i]]$dEdP, type="l", ylim=c(0,maxdEdP+0.1), xlim=c(0,-minPsi),
xlab = "Plant pressure (-MPa)", ylab = "dE/dP")
} else {
lines(-l[[i]]$PsiPlant, l[[i]]$dEdP, lty=i)
}
}
}
else if(type=="Elayers") {
minE = 0
maxE = 0
minPsi = 0
for(i in 1:ncoh) {
maxE = max(maxE, max(l[[i]]$Elayers, na.rm=T))
minE = min(minE, min(l[[i]]$Elayers, na.rm=T))
minPsi = min(minPsi, min(l[[i]]$PsiPlant))
}
for(i in 1:ncoh) {
if(i==1) {
matplot(-l[[i]]$PsiPlant, l[[i]]$Elayers, type="l", lty=i, ylim=c(minE-0.1,maxE+0.1), xlim=c(0,-minPsi),
xlab = "Plant pressure (-MPa)", ylab = "Flow rate")
} else {
matlines(-l[[i]]$PsiPlant, l[[i]]$Elayers, lty=i)
}
}
abline(h=0, col="gray")
}
else if(type=="PsiRhizo") {
minE = 0
maxE = 0
minPsi = 0
for(i in 1:ncoh) {
maxE = max(maxE, max(l[[i]]$PsiRhizo, na.rm=T))
minE = min(minE, min(l[[i]]$PsiRhizo, na.rm=T))
minPsi = min(minPsi, min(l[[i]]$PsiPlant))
}
for(i in 1:ncoh) {
if(i==1) {
matplot(-l[[i]]$PsiPlant, l[[i]]$PsiRhizo, type="l", lty=i, ylim=c(minE-0.1,maxE+0.1), xlim=c(0,-minPsi),
xlab = "Plant pressure (-MPa)", ylab = "Rhizosphere pressure (-MPa)")
} else {
matlines(-l[[i]]$PsiPlant, l[[i]]$PsiRhizo, lty=i)
}
}
abline(h=0, col="gray")
}
invisible(l)
} | /R/supplyFunctionPlot.R | no_license | MalditoBarbudo/medfate | R | false | false | 3,508 | r | #Draws the supply function (E vs PlantPsi) for the current soil state and plant hydraulic parameters
hydraulics.supplyFunctionPlot<-function(soil, x, type="E") {
psic = soil$psi
VG_nc = soil$VG_n
VG_alphac = soil$VG_alpha
VCroot_kmax = x$below$VCroot_kmax
VGrhizo_kmax = x$below$VGrhizo_kmax
pEmb = x$ProportionCavitated
numericParams = x$control$numericParams
VCroot_c = x$paramsTransp$VCroot_c
VCroot_d = x$paramsTransp$VCroot_d
VCstem_kmax = x$paramsTransp$VCstem_kmax
VCstem_c = x$paramsTransp$VCstem_c
VCstem_d = x$paramsTransp$VCstem_d
ncoh = nrow(x$above)
l = vector("list", ncoh)
for(i in 1:ncoh) {
psiCav = hydraulics.xylemPsi(1.0-pEmb[i], 1.0, VCstem_c[i], VCstem_d[i])
l[[i]] = hydraulics.supplyFunctionNetwork(psic,
VGrhizo_kmax[i,],VG_nc,VG_alphac,
VCroot_kmax[i,], VCroot_c[i],VCroot_d[i],
VCstem_kmax[i], VCstem_c[i],VCstem_d[i], psiCav = psiCav,
maxNsteps = numericParams$maxNsteps, psiStep = numericParams$psiStep,
psiMax = numericParams$psiMax, ntrial = numericParams$ntrial,
psiTol = numericParams$psiTol, ETol = numericParams$ETol)
}
if(type=="E") {
maxE = 0
minPsi = 0
for(i in 1:ncoh) {
maxE = max(maxE, max(l[[i]]$E, na.rm=T))
minPsi = min(minPsi, min(l[[i]]$PsiPlant))
}
for(i in 1:ncoh) {
if(i==1) {
plot(-l[[i]]$PsiPlant, l[[i]]$E, type="l", ylim=c(0,maxE+0.1), xlim=c(0,-minPsi),
xlab = "Plant pressure (-MPa)", ylab = "Flow rate")
} else {
lines(-l[[i]]$PsiPlant, l[[i]]$E, lty=i)
}
}
}
else if(type=="dEdP") {
maxdEdP = 0
minPsi = 0
for(i in 1:ncoh) {
maxdEdP = max(maxdEdP, max(l[[i]]$dEdP, na.rm=T))
minPsi = min(minPsi, min(l[[i]]$PsiPlant))
}
for(i in 1:ncoh) {
if(i==1) {
plot(-l[[i]]$PsiPlant, l[[i]]$dEdP, type="l", ylim=c(0,maxdEdP+0.1), xlim=c(0,-minPsi),
xlab = "Plant pressure (-MPa)", ylab = "dE/dP")
} else {
lines(-l[[i]]$PsiPlant, l[[i]]$dEdP, lty=i)
}
}
}
else if(type=="Elayers") {
minE = 0
maxE = 0
minPsi = 0
for(i in 1:ncoh) {
maxE = max(maxE, max(l[[i]]$Elayers, na.rm=T))
minE = min(minE, min(l[[i]]$Elayers, na.rm=T))
minPsi = min(minPsi, min(l[[i]]$PsiPlant))
}
for(i in 1:ncoh) {
if(i==1) {
matplot(-l[[i]]$PsiPlant, l[[i]]$Elayers, type="l", lty=i, ylim=c(minE-0.1,maxE+0.1), xlim=c(0,-minPsi),
xlab = "Plant pressure (-MPa)", ylab = "Flow rate")
} else {
matlines(-l[[i]]$PsiPlant, l[[i]]$Elayers, lty=i)
}
}
abline(h=0, col="gray")
}
else if(type=="PsiRhizo") {
minE = 0
maxE = 0
minPsi = 0
for(i in 1:ncoh) {
maxE = max(maxE, max(l[[i]]$PsiRhizo, na.rm=T))
minE = min(minE, min(l[[i]]$PsiRhizo, na.rm=T))
minPsi = min(minPsi, min(l[[i]]$PsiPlant))
}
for(i in 1:ncoh) {
if(i==1) {
matplot(-l[[i]]$PsiPlant, l[[i]]$PsiRhizo, type="l", lty=i, ylim=c(minE-0.1,maxE+0.1), xlim=c(0,-minPsi),
xlab = "Plant pressure (-MPa)", ylab = "Rhizosphere pressure (-MPa)")
} else {
matlines(-l[[i]]$PsiPlant, l[[i]]$PsiRhizo, lty=i)
}
}
abline(h=0, col="gray")
}
invisible(l)
} |
User <- setRefClass("User", contains = "Item",
fields = c("username", "email",
"first_name", "last_name",
"affiliation", "phone", "address",
"city", "state", "country", "zip_code",
"projects", "billing_groups", "tasks"),
methods = list(
initialize = function(
username = "",
email = "",
first_name = "",
last_name = "",
affiliation = "",
phone = "",
address = "",
city = "",
state = "",
country = "",
zip_code = "",
projects = "",
billing_groups = "",
tasks = "", ...) {
username <<- username
email <<- email
first_name <<- first_name
last_name <<- last_name
affiliation <<- affiliation
phone <<- phone
address <<- address
city <<- city
state <<- state
country <<- country
zip_code <<- zip_code
projects <<- projects
billing_groups <<- billing_groups
tasks <<- tasks
callSuper(...)
},
show = function() {
.showFields(.self,
"== User ==",
values = c("href",
"username",
"email",
"first_name",
"last_name",
"affiliation",
"phone",
"address",
"city",
"state",
"country",
"zip_code",
"projects",
"billing_groups",
"tasks"))
}
))
.asUser <- function(x) {
User(href = x$href,
username = x$username,
email = x$email,
first_name = x$first_name,
last_name = x$last_name,
affiliation = x$affiliation,
phone = x$phone,
address = x$addrss,
city = x$city,
state = x$state,
country = x$country,
zip_code = x$zip_code,
projects = x$projects,
billing_groups = x$billing_groups,
tasks = x$tasks,
response = response(x))
}
| /R/class-user.R | permissive | mlrdk/sevenbridges-r | R | false | false | 3,755 | r | User <- setRefClass("User", contains = "Item",
fields = c("username", "email",
"first_name", "last_name",
"affiliation", "phone", "address",
"city", "state", "country", "zip_code",
"projects", "billing_groups", "tasks"),
methods = list(
initialize = function(
username = "",
email = "",
first_name = "",
last_name = "",
affiliation = "",
phone = "",
address = "",
city = "",
state = "",
country = "",
zip_code = "",
projects = "",
billing_groups = "",
tasks = "", ...) {
username <<- username
email <<- email
first_name <<- first_name
last_name <<- last_name
affiliation <<- affiliation
phone <<- phone
address <<- address
city <<- city
state <<- state
country <<- country
zip_code <<- zip_code
projects <<- projects
billing_groups <<- billing_groups
tasks <<- tasks
callSuper(...)
},
show = function() {
.showFields(.self,
"== User ==",
values = c("href",
"username",
"email",
"first_name",
"last_name",
"affiliation",
"phone",
"address",
"city",
"state",
"country",
"zip_code",
"projects",
"billing_groups",
"tasks"))
}
))
.asUser <- function(x) {
User(href = x$href,
username = x$username,
email = x$email,
first_name = x$first_name,
last_name = x$last_name,
affiliation = x$affiliation,
phone = x$phone,
address = x$addrss,
city = x$city,
state = x$state,
country = x$country,
zip_code = x$zip_code,
projects = x$projects,
billing_groups = x$billing_groups,
tasks = x$tasks,
response = response(x))
}
|
## Create one R script called run_analysis.R that does the following:
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
if (!require("data.table")) {
install.packages("data.table")
}
if (!require("reshape2")) {
install.packages("reshape2")
}
require("data.table")
require("reshape2")
# Load activity labels from the file
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")[,2]
# Load data column names from the file
features <- read.table("./UCI HAR Dataset/features.txt")[,2]
# Extract only the measurements on the mean and standard deviation for each measurement.
extract_features <- grepl("mean|std", features)
# Load and process X_test & y_test data.
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
names(X_test) = features
# Extract only the measurements on the mean and standard deviation for each measurement.
X_test = X_test[,extract_features]
# Load activity labels
y_test[,2] = activity_labels[y_test[,1]]
names(y_test) = c("Activity_ID", "Activity_Label")
names(subject_test) = "subject"
# Bind data
test_data <- cbind(as.data.table(subject_test), y_test, X_test)
# Load and process X_train & y_train data.
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
names(X_train) = features
# Extract only the measurements on the mean and standard deviation for each measurement.
X_train = X_train[,extract_features]
# Load activity data
y_train[,2] = activity_labels[y_train[,1]]
names(y_train) = c("Activity_ID", "Activity_Label")
names(subject_train) = "subject"
# Bind data
train_data <- cbind(as.data.table(subject_train), y_train, X_train)
# Merge test and train data
data = rbind(test_data, train_data)
id_labels = c("subject", "Activity_ID", "Activity_Label")
data_labels = setdiff(colnames(data), id_labels)
melt_data = melt(data, id = id_labels, measure.vars = data_labels)
# Apply mean function to dataset using dcast function
tidy_data = dcast(melt_data, subject + Activity_Label ~ variable, mean)
write.table(tidy_data, file = "./tidy_data.txt")
| /run_analysis.R | no_license | moratam/Getting_and_Cleaning_Data_Course_Project | R | false | false | 2,680 | r | ## Create one R script called run_analysis.R that does the following:
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
if (!require("data.table")) {
install.packages("data.table")
}
if (!require("reshape2")) {
install.packages("reshape2")
}
require("data.table")
require("reshape2")
# Load activity labels from the file
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")[,2]
# Load data column names from the file
features <- read.table("./UCI HAR Dataset/features.txt")[,2]
# Extract only the measurements on the mean and standard deviation for each measurement.
extract_features <- grepl("mean|std", features)
# Load and process X_test & y_test data.
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
names(X_test) = features
# Extract only the measurements on the mean and standard deviation for each measurement.
X_test = X_test[,extract_features]
# Load activity labels
y_test[,2] = activity_labels[y_test[,1]]
names(y_test) = c("Activity_ID", "Activity_Label")
names(subject_test) = "subject"
# Bind data
test_data <- cbind(as.data.table(subject_test), y_test, X_test)
# Load and process X_train & y_train data.
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
names(X_train) = features
# Extract only the measurements on the mean and standard deviation for each measurement.
X_train = X_train[,extract_features]
# Load activity data
y_train[,2] = activity_labels[y_train[,1]]
names(y_train) = c("Activity_ID", "Activity_Label")
names(subject_train) = "subject"
# Bind data
train_data <- cbind(as.data.table(subject_train), y_train, X_train)
# Merge test and train data
data = rbind(test_data, train_data)
id_labels = c("subject", "Activity_ID", "Activity_Label")
data_labels = setdiff(colnames(data), id_labels)
melt_data = melt(data, id = id_labels, measure.vars = data_labels)
# Apply mean function to dataset using dcast function
tidy_data = dcast(melt_data, subject + Activity_Label ~ variable, mean)
write.table(tidy_data, file = "./tidy_data.txt")
|
# This function downloads whole data set, extracts and returns only the part
# from the dates 2007-02-01 and 2007-02-02.
read_data <- function()
{
# download and unzip data if not done yet
dataURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
dataArchive <- "exdata-data-household_power_consumption.zip"
dataFile <- "household_power_consumption.txt"
if(!file.exists(dataArchive)) download.file(dataURL, dataArchive, method="curl")
if(!file.exists(dataFile)) unzip(dataArchive)
# get only data from the dates 2007-02-01 and 2007-02-02.
shift <- difftime(strptime("1/2/2007 00:00:00", "%d/%m/%Y %H:%M:%S"), strptime("16/12/2006 17:24:00", "%d/%m/%Y %H:%M:%S"), units="min") + 1
hpc <- read.table(dataFile, sep=";", na.strings="?", nrows=2*24*60, skip=shift)
header <- read.table(dataFile, sep=";", nrows=0, header=TRUE)
names(hpc) <- names(header)
hpc
}
# This function creates a combined overview plot and stores it into a bitmap file.
# I've not targeted to achieve pixel to pixel equivalence, so exact match means the plot has
# exact the same information on it compared to reference figures from assigment.
plot4 <- function(hpc)
{
# writes by default 480x480 image file, transparency wasn't explicitly mentioned,
# but toggled in the sample figures
png("plot4.png", bg = "transparent")
par(mfrow=c(2,2))
par(cex=.8)
lbls <- c("Thu","Fri","Sat")
ticks <- c(0,1440,2880)
plot(hpc$Global_active_power, type="l", ylab="Global Active Power", xaxt="n", xlab="")
axis(1, at=ticks, labels=lbls)
plot(hpc$Voltage, type="l", xaxt="n", xlab="datetime", ylab="Voltage")
axis(1, at=ticks, labels=lbls)
plot(hpc$Sub_metering_1, type="l", ylab="Energy sub metering", xaxt="n", xlab="")
lines(hpc$Sub_metering_2, col="red")
lines(hpc$Sub_metering_3, col="blue")
axis(1, at=ticks, labels=lbls)
legend("topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"), lty=1, bty="n")
plot(hpc$Global_reactive_power, xaxt="n", xlab="datetime", ylab="Global_reactive_power", type="l")
axis(1, at=ticks, labels=lbls)
dev.off()
}
hpc <- read_data()
plot4(hpc) | /plot4.R | no_license | naganmail/ExData_Plotting1 | R | false | false | 2,216 | r | # This function downloads whole data set, extracts and returns only the part
# from the dates 2007-02-01 and 2007-02-02.
read_data <- function()
{
# download and unzip data if not done yet
dataURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
dataArchive <- "exdata-data-household_power_consumption.zip"
dataFile <- "household_power_consumption.txt"
if(!file.exists(dataArchive)) download.file(dataURL, dataArchive, method="curl")
if(!file.exists(dataFile)) unzip(dataArchive)
# get only data from the dates 2007-02-01 and 2007-02-02.
shift <- difftime(strptime("1/2/2007 00:00:00", "%d/%m/%Y %H:%M:%S"), strptime("16/12/2006 17:24:00", "%d/%m/%Y %H:%M:%S"), units="min") + 1
hpc <- read.table(dataFile, sep=";", na.strings="?", nrows=2*24*60, skip=shift)
header <- read.table(dataFile, sep=";", nrows=0, header=TRUE)
names(hpc) <- names(header)
hpc
}
# This function creates a combined overview plot and stores it into a bitmap file.
# I've not targeted to achieve pixel to pixel equivalence, so exact match means the plot has
# exact the same information on it compared to reference figures from assigment.
plot4 <- function(hpc)
{
# writes by default 480x480 image file, transparency wasn't explicitly mentioned,
# but toggled in the sample figures
png("plot4.png", bg = "transparent")
par(mfrow=c(2,2))
par(cex=.8)
lbls <- c("Thu","Fri","Sat")
ticks <- c(0,1440,2880)
plot(hpc$Global_active_power, type="l", ylab="Global Active Power", xaxt="n", xlab="")
axis(1, at=ticks, labels=lbls)
plot(hpc$Voltage, type="l", xaxt="n", xlab="datetime", ylab="Voltage")
axis(1, at=ticks, labels=lbls)
plot(hpc$Sub_metering_1, type="l", ylab="Energy sub metering", xaxt="n", xlab="")
lines(hpc$Sub_metering_2, col="red")
lines(hpc$Sub_metering_3, col="blue")
axis(1, at=ticks, labels=lbls)
legend("topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"), lty=1, bty="n")
plot(hpc$Global_reactive_power, xaxt="n", xlab="datetime", ylab="Global_reactive_power", type="l")
axis(1, at=ticks, labels=lbls)
dev.off()
}
hpc <- read_data()
plot4(hpc) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transform_rasters.R
\name{transform_rasters}
\alias{transform_rasters}
\title{Transform raster values using custom calls.}
\usage{
transform_rasters(raster_stack, separator = "_", ncores = 1, ...)
}
\arguments{
\item{raster_stack}{RasterStack. Stack with environmental layers.}
\item{separator}{character. Character that separates variable names, years and scenarios.}
\item{ncores}{integer. Number of cores to use in parallel processing.}
\item{...}{New rasters created.}
}
\value{
Returns a RasterStack with layers for the predictions required.
}
\description{
\code{transform_rasters} Applies custom expressions to transform the values of spatial rasters in a stack, taking into account temporal repetition of those rasters.
}
\examples{
\dontrun{
FulanusEcoRasters_present <-
get_rasters(
var = c('prec', 'tmax', 'tmin'),
scenarios = 'present',
source = "C:/Users/gabri/Dropbox/Mapinguari/global_grids_10_minutes",
ext = FulanusDistribution[c(2,3)],
margin = 5,
reorder = c(1, 10, 11, 12, 2, 3, 4, 5, 6, 7, 8, 9))
# You can apply any function to subsets of rasters in the stack,
# by selecting the layers with double brackets.
transform_rasters(raster_stack = FulanusEcoRasters_present$present,
total_1sem = sum(tmax[1:6]),
mean_1sem = mean(tmax[1:6]),
sd_1sem = sd(tmax[1:6]),
total_2sem = sum(tmax[7:12]),
mean_2sem = mean(tmax[7:12]),
sd_2sem = sd(tmax[7:12]))
}
}
| /man/transform_rasters.Rd | no_license | cran/Mapinguari | R | false | true | 1,551 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transform_rasters.R
\name{transform_rasters}
\alias{transform_rasters}
\title{Transform raster values using custom calls.}
\usage{
transform_rasters(raster_stack, separator = "_", ncores = 1, ...)
}
\arguments{
\item{raster_stack}{RasterStack. Stack with environmental layers.}
\item{separator}{character. Character that separates variable names, years and scenarios.}
\item{ncores}{integer. Number of cores to use in parallel processing.}
\item{...}{New rasters created.}
}
\value{
Returns a RasterStack with layers for the predictions required.
}
\description{
\code{transform_rasters} Applies custom expressions to transform the values of spatial rasters in a stack, taking into account temporal repetition of those rasters.
}
\examples{
\dontrun{
FulanusEcoRasters_present <-
get_rasters(
var = c('prec', 'tmax', 'tmin'),
scenarios = 'present',
source = "C:/Users/gabri/Dropbox/Mapinguari/global_grids_10_minutes",
ext = FulanusDistribution[c(2,3)],
margin = 5,
reorder = c(1, 10, 11, 12, 2, 3, 4, 5, 6, 7, 8, 9))
# You can apply any function to subsets of rasters in the stack,
# by selecting the layers with double brackets.
transform_rasters(raster_stack = FulanusEcoRasters_present$present,
total_1sem = sum(tmax[1:6]),
mean_1sem = mean(tmax[1:6]),
sd_1sem = sd(tmax[1:6]),
total_2sem = sum(tmax[7:12]),
mean_2sem = mean(tmax[7:12]),
sd_2sem = sd(tmax[7:12]))
}
}
|
## utiles para modelar con state-space
## 20170614
##
AIC.SSModel <- function(ob, k=2){
}
##
BIC.SSModel <- function(ob, k=2){
}
| /statespace.r | no_license | ecastellon/mag | R | false | false | 131 | r | ## utiles para modelar con state-space
## 20170614
##
AIC.SSModel <- function(ob, k=2){
}
##
BIC.SSModel <- function(ob, k=2){
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.nphawkesT.R
\name{plot.nphawkesT}
\alias{plot.nphawkesT}
\title{Function to plot the magnitude productivity, spatial, temporal, and background components of the Hawkes model}
\usage{
\method{plot}{nphawkesT}(x, print = FALSE, ...)
}
\arguments{
\item{x}{An object of class nphawkesMSTH}
\item{print}{A logical indicating whether the plot should be printed or returned}
\item{...}{Other parameters passed in}
}
\value{
p A ggplot2 plot
}
\description{
Function to plot the magnitude productivity, spatial, temporal, and background components of the Hawkes model
}
\examples{
data(catalog)
data <- nphData(data = catalog[catalog$Magnitude > 4.0,], time_var = 'tdiff', x_var = 'Longitude', y_var = 'Latitude', mag = 'Magnitude')
fit <- nphawkesT(data = data)
plot(x = fit, type = 'time')
}
| /man/plot.nphawkesT.Rd | no_license | mrjoshuagordon/nphawkes | R | false | true | 872 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.nphawkesT.R
\name{plot.nphawkesT}
\alias{plot.nphawkesT}
\title{Function to plot the magnitude productivity, spatial, temporal, and background components of the Hawkes model}
\usage{
\method{plot}{nphawkesT}(x, print = FALSE, ...)
}
\arguments{
\item{x}{An object of class nphawkesMSTH}
\item{print}{A logical indicating whether the plot should be printed or returned}
\item{...}{Other parameters passed in}
}
\value{
p A ggplot2 plot
}
\description{
Function to plot the magnitude productivity, spatial, temporal, and background components of the Hawkes model
}
\examples{
data(catalog)
data <- nphData(data = catalog[catalog$Magnitude > 4.0,], time_var = 'tdiff', x_var = 'Longitude', y_var = 'Latitude', mag = 'Magnitude')
fit <- nphawkesT(data = data)
plot(x = fit, type = 'time')
}
|
data("cars")# load dataset
View(cars)plot(dist~speed,data=cars)
#view the no of obs
head(cars,20)# display the first 20 obs
scatter.smooth(x=cars$speed, y=cars$dist, main="Dist ~ Speed") # scatterplot
cor(cars$speed, cars$dist) # calculate correlation between speed and distance
linearMod <- lm(dist ~ speed, data=cars) # build linear regression model on full data
print(linearMod)
abline(linearMod) # draw a line of best fit
plot(linearMod) # other plots
summary(linearMod) # model summary
AIC(linearMod)
BIC(linearMod)
a <- data.frame(speed = 24) #make a data frame for the predict fn
result <- predict(linearMod,a) # predict fn
print(result) # print the result
modelSummary <- summary(linearMod) # capture model summary as an object
modelCoeffs <- modelSummary$coefficients # model coefficients
beta.estimate <- modelCoeffs["speed", "Estimate"] # get beta estimate for speed
std.error <- modelCoeffs["speed", "Std. Error"] # get std.error for speed
t_value <- beta.estimate/std.error # calc t statistic
modelSummary <- summary(linearMod) # capture model summary as an object
modelCoeffs <- modelSummary$coefficients # model coefficients
beta.estimate <- modelCoeffs["speed", "Estimate"] # get beta estimate for speed
std.error <- modelCoeffs["speed", "Std. Error"] # get std.error for speed
t_value <- beta.estimate/std.error # calc t statistic
p_value <- 2*pt(-abs(t_value), df=nrow(cars)-ncol(cars)) # calc p Value
f_statistic <- linearMod$fstatistic[1] # fstatistic
f <- summary(linearMod)$fstatistic # parameters for model p-value calc
model_p <- pf(f[1], f[2], f[3], lower=FALSE)
p_value <- 2*pt(-abs(t_value), df=nrow(cars)-ncol(cars)) # calc p Value
f_statistic <- linearMod$fstatistic[1] # fstatistic
f <- summary(linearMod)$fstatistic # parameters for model p-value calc
| /linear.R | no_license | kagsburg/recess2018-BSE2301-Group16 | R | false | false | 1,820 | r | data("cars")# load dataset
View(cars)plot(dist~speed,data=cars)
#view the no of obs
head(cars,20)# display the first 20 obs
scatter.smooth(x=cars$speed, y=cars$dist, main="Dist ~ Speed") # scatterplot
cor(cars$speed, cars$dist) # calculate correlation between speed and distance
linearMod <- lm(dist ~ speed, data=cars) # build linear regression model on full data
print(linearMod)
abline(linearMod) # draw a line of best fit
plot(linearMod) # other plots
summary(linearMod) # model summary
AIC(linearMod)
BIC(linearMod)
a <- data.frame(speed = 24) #make a data frame for the predict fn
result <- predict(linearMod,a) # predict fn
print(result) # print the result
modelSummary <- summary(linearMod) # capture model summary as an object
modelCoeffs <- modelSummary$coefficients # model coefficients
beta.estimate <- modelCoeffs["speed", "Estimate"] # get beta estimate for speed
std.error <- modelCoeffs["speed", "Std. Error"] # get std.error for speed
t_value <- beta.estimate/std.error # calc t statistic
modelSummary <- summary(linearMod) # capture model summary as an object
modelCoeffs <- modelSummary$coefficients # model coefficients
beta.estimate <- modelCoeffs["speed", "Estimate"] # get beta estimate for speed
std.error <- modelCoeffs["speed", "Std. Error"] # get std.error for speed
t_value <- beta.estimate/std.error # calc t statistic
p_value <- 2*pt(-abs(t_value), df=nrow(cars)-ncol(cars)) # calc p Value
f_statistic <- linearMod$fstatistic[1] # fstatistic
f <- summary(linearMod)$fstatistic # parameters for model p-value calc
model_p <- pf(f[1], f[2], f[3], lower=FALSE)
p_value <- 2*pt(-abs(t_value), df=nrow(cars)-ncol(cars)) # calc p Value
f_statistic <- linearMod$fstatistic[1] # fstatistic
f <- summary(linearMod)$fstatistic # parameters for model p-value calc
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nn-rnn.R
\name{nn_gru}
\alias{nn_gru}
\title{Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence.}
\usage{
nn_gru(
input_size,
hidden_size,
num_layers = 1,
bias = TRUE,
batch_first = FALSE,
dropout = 0,
bidirectional = FALSE,
...
)
}
\arguments{
\item{input_size}{The number of expected features in the input \code{x}}
\item{hidden_size}{The number of features in the hidden state \code{h}}
\item{num_layers}{Number of recurrent layers. E.g., setting \code{num_layers=2}
would mean stacking two GRUs together to form a \verb{stacked GRU},
with the second GRU taking in outputs of the first GRU and
computing the final results. Default: 1}
\item{bias}{If \code{FALSE}, then the layer does not use bias weights \code{b_ih} and \code{b_hh}.
Default: \code{TRUE}}
\item{batch_first}{If \code{TRUE}, then the input and output tensors are provided
as (batch, seq, feature). Default: \code{FALSE}}
\item{dropout}{If non-zero, introduces a \code{Dropout} layer on the outputs of each
GRU layer except the last layer, with dropout probability equal to
\code{dropout}. Default: 0}
\item{bidirectional}{If \code{TRUE}, becomes a bidirectional GRU. Default: \code{FALSE}}
\item{...}{currently unused.}
}
\description{
For each element in the input sequence, each layer computes the following
function:
}
\details{
\deqn{
\begin{array}{ll}
r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\
n_t = \tanh(W_{in} x_t + b_{in} + r_t (W_{hn} h_{(t-1)}+ b_{hn})) \\
h_t = (1 - z_t) n_t + z_t h_{(t-1)}
\end{array}
}
where \eqn{h_t} is the hidden state at time \code{t}, \eqn{x_t} is the input
at time \code{t}, \eqn{h_{(t-1)}} is the hidden state of the previous layer
at time \code{t-1} or the initial hidden state at time \code{0}, and \eqn{r_t},
\eqn{z_t}, \eqn{n_t} are the reset, update, and new gates, respectively.
\eqn{\sigma} is the sigmoid function.
}
\note{
All the weights and biases are initialized from \eqn{\mathcal{U}(-\sqrt{k}, \sqrt{k})}
where \eqn{k = \frac{1}{\mbox{hidden\_size}}}
}
\section{Inputs}{
Inputs: input, h_0
\itemize{
\item \strong{input} of shape \verb{(seq_len, batch, input_size)}: tensor containing the features
of the input sequence. The input can also be a packed variable length
sequence. See \code{\link[=nn_utils_rnn_pack_padded_sequence]{nn_utils_rnn_pack_padded_sequence()}}
for details.
\item \strong{h_0} of shape \verb{(num_layers * num_directions, batch, hidden_size)}: tensor
containing the initial hidden state for each element in the batch.
Defaults to zero if not provided.
}
}
\section{Outputs}{
Outputs: output, h_n
\itemize{
\item \strong{output} of shape \verb{(seq_len, batch, num_directions * hidden_size)}: tensor
containing the output features h_t from the last layer of the GRU,
for each t. If a \code{PackedSequence} has been
given as the input, the output will also be a packed sequence.
For the unpacked case, the directions can be separated
using \code{output$view(c(seq_len, batch, num_directions, hidden_size))},
with forward and backward being direction \code{0} and \code{1} respectively.
Similarly, the directions can be separated in the packed case.
\item \strong{h_n} of shape \verb{(num_layers * num_directions, batch, hidden_size)}: tensor
containing the hidden state for \code{t = seq_len}
Like \emph{output}, the layers can be separated using
\code{h_n$view(num_layers, num_directions, batch, hidden_size)}.
}
}
\section{Attributes}{
\itemize{
\item \code{weight_ih_l[k]} : the learnable input-hidden weights of the \eqn{\mbox{k}^{th}} layer
(W_ir|W_iz|W_in), of shape \verb{(3*hidden_size x input_size)}
\item \code{weight_hh_l[k]} : the learnable hidden-hidden weights of the \eqn{\mbox{k}^{th}} layer
(W_hr|W_hz|W_hn), of shape \verb{(3*hidden_size x hidden_size)}
\item \code{bias_ih_l[k]} : the learnable input-hidden bias of the \eqn{\mbox{k}^{th}} layer
(b_ir|b_iz|b_in), of shape \code{(3*hidden_size)}
\item \code{bias_hh_l[k]} : the learnable hidden-hidden bias of the \eqn{\mbox{k}^{th}} layer
(b_hr|b_hz|b_hn), of shape \code{(3*hidden_size)}
}
}
\examples{
if (torch_is_installed()) {
rnn <- nn_gru(10, 20, 2)
input <- torch_randn(5, 3, 10)
h0 <- torch_randn(2, 3, 20)
output <- rnn(input, h0)
}
}
| /fuzzedpackages/torch/man/nn_gru.Rd | no_license | akhikolla/testpackages | R | false | true | 4,381 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nn-rnn.R
\name{nn_gru}
\alias{nn_gru}
\title{Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence.}
\usage{
nn_gru(
input_size,
hidden_size,
num_layers = 1,
bias = TRUE,
batch_first = FALSE,
dropout = 0,
bidirectional = FALSE,
...
)
}
\arguments{
\item{input_size}{The number of expected features in the input \code{x}}
\item{hidden_size}{The number of features in the hidden state \code{h}}
\item{num_layers}{Number of recurrent layers. E.g., setting \code{num_layers=2}
would mean stacking two GRUs together to form a \verb{stacked GRU},
with the second GRU taking in outputs of the first GRU and
computing the final results. Default: 1}
\item{bias}{If \code{FALSE}, then the layer does not use bias weights \code{b_ih} and \code{b_hh}.
Default: \code{TRUE}}
\item{batch_first}{If \code{TRUE}, then the input and output tensors are provided
as (batch, seq, feature). Default: \code{FALSE}}
\item{dropout}{If non-zero, introduces a \code{Dropout} layer on the outputs of each
GRU layer except the last layer, with dropout probability equal to
\code{dropout}. Default: 0}
\item{bidirectional}{If \code{TRUE}, becomes a bidirectional GRU. Default: \code{FALSE}}
\item{...}{currently unused.}
}
\description{
For each element in the input sequence, each layer computes the following
function:
}
\details{
\deqn{
\begin{array}{ll}
r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\
n_t = \tanh(W_{in} x_t + b_{in} + r_t (W_{hn} h_{(t-1)}+ b_{hn})) \\
h_t = (1 - z_t) n_t + z_t h_{(t-1)}
\end{array}
}
where \eqn{h_t} is the hidden state at time \code{t}, \eqn{x_t} is the input
at time \code{t}, \eqn{h_{(t-1)}} is the hidden state of the previous layer
at time \code{t-1} or the initial hidden state at time \code{0}, and \eqn{r_t},
\eqn{z_t}, \eqn{n_t} are the reset, update, and new gates, respectively.
\eqn{\sigma} is the sigmoid function.
}
\note{
All the weights and biases are initialized from \eqn{\mathcal{U}(-\sqrt{k}, \sqrt{k})}
where \eqn{k = \frac{1}{\mbox{hidden\_size}}}
}
\section{Inputs}{
Inputs: input, h_0
\itemize{
\item \strong{input} of shape \verb{(seq_len, batch, input_size)}: tensor containing the features
of the input sequence. The input can also be a packed variable length
sequence. See \code{\link[=nn_utils_rnn_pack_padded_sequence]{nn_utils_rnn_pack_padded_sequence()}}
for details.
\item \strong{h_0} of shape \verb{(num_layers * num_directions, batch, hidden_size)}: tensor
containing the initial hidden state for each element in the batch.
Defaults to zero if not provided.
}
}
\section{Outputs}{
Outputs: output, h_n
\itemize{
\item \strong{output} of shape \verb{(seq_len, batch, num_directions * hidden_size)}: tensor
containing the output features h_t from the last layer of the GRU,
for each t. If a \code{PackedSequence} has been
given as the input, the output will also be a packed sequence.
For the unpacked case, the directions can be separated
using \code{output$view(c(seq_len, batch, num_directions, hidden_size))},
with forward and backward being direction \code{0} and \code{1} respectively.
Similarly, the directions can be separated in the packed case.
\item \strong{h_n} of shape \verb{(num_layers * num_directions, batch, hidden_size)}: tensor
containing the hidden state for \code{t = seq_len}
Like \emph{output}, the layers can be separated using
\code{h_n$view(num_layers, num_directions, batch, hidden_size)}.
}
}
\section{Attributes}{
\itemize{
\item \code{weight_ih_l[k]} : the learnable input-hidden weights of the \eqn{\mbox{k}^{th}} layer
(W_ir|W_iz|W_in), of shape \verb{(3*hidden_size x input_size)}
\item \code{weight_hh_l[k]} : the learnable hidden-hidden weights of the \eqn{\mbox{k}^{th}} layer
(W_hr|W_hz|W_hn), of shape \verb{(3*hidden_size x hidden_size)}
\item \code{bias_ih_l[k]} : the learnable input-hidden bias of the \eqn{\mbox{k}^{th}} layer
(b_ir|b_iz|b_in), of shape \code{(3*hidden_size)}
\item \code{bias_hh_l[k]} : the learnable hidden-hidden bias of the \eqn{\mbox{k}^{th}} layer
(b_hr|b_hz|b_hn), of shape \code{(3*hidden_size)}
}
}
\examples{
if (torch_is_installed()) {
rnn <- nn_gru(10, 20, 2)
input <- torch_randn(5, 3, 10)
h0 <- torch_randn(2, 3, 20)
output <- rnn(input, h0)
}
}
|
library(dplyr)
library(httr)
library(lubridate)
library(stringr)
library(tidyr)
datefmt <- function(datetime) {
## Format a POSIXct datetime for the WBEA API
paste(year(datetime),
month(datetime) - 1,
day(datetime),
hour(datetime),
minute(datetime), sep = ",")
}
wbea_request <- function(ids, start, end) {
## Request data from the wbea continuous data viewer
qstring <- list(a = "wbe",
c = paste(ids, collapse = ","),
s = datefmt(start),
e = datefmt(end))
r <- httr::GET("http://67.210.212.45/silverdata2", query = qstring, verbose())
data <- strsplit(httr::content(r, "text"), "<!>")
data[[1]][-1]
}
extract <- function(response, pattern, separator, names) {
## Extract data from response string
data <- str_match(response, pattern = pattern)[, 2]
l <- strsplit(data, separator, fixed = T) %>%
lapply(function(x) {if (is.null(x) | length(x) == 0) {NA} else { x }}) ## Catch empty flags
df <- data.frame(matrix(unlist(l), nrow = length(l), byrow = T), stringsAsFactors = F)
names(df) <- names
df
}
parse <- function(response) {
## Parse response string to openair format
header <- extract(response, "^(.*)<l>", "+", c("id", "measurement", "station", "units"))
dates <- extract(response, "<l>(.*)<d>", ",", c("n", "step", "start", "nodata"))
data <- extract(response, "<d>(.*)<f>", ";;", c("data"))
flags <- extract(response, "<f>(.*)", ";;", c("flags"))
df <- cbind(header, dates, data, flags)
df$station <- str_trim(df$station)
df$n <- as.numeric(df$n)
df$step <- as.numeric(df$step)
df$start <- ymd_hm(df$start)
df <- df %>%
separate_rows(data, flags, sep=",") %>%
mutate(masked_data = as.numeric(ifelse(flags == 0, data, NA))) %>%
group_by(id) %>%
mutate(timestamp = start + ((seq(n()) - 1) * step)) %>%
filter(timestamp < start + (n * step)) %>%
select(-c(n, step, start, nodata))
stations <- split(df, df$station)
lapply(stations, function(station) {
pivot_wider(station,
id_cols = timestamp,
names_from = id,
values_from = masked_data)
}
)
}
| /request.r | permissive | GaganKapoor/Air-Monitoring | R | false | false | 2,375 | r | library(dplyr)
library(httr)
library(lubridate)
library(stringr)
library(tidyr)
datefmt <- function(datetime) {
## Format a POSIXct datetime for the WBEA API
paste(year(datetime),
month(datetime) - 1,
day(datetime),
hour(datetime),
minute(datetime), sep = ",")
}
wbea_request <- function(ids, start, end) {
## Request data from the wbea continuous data viewer
qstring <- list(a = "wbe",
c = paste(ids, collapse = ","),
s = datefmt(start),
e = datefmt(end))
r <- httr::GET("http://67.210.212.45/silverdata2", query = qstring, verbose())
data <- strsplit(httr::content(r, "text"), "<!>")
data[[1]][-1]
}
extract <- function(response, pattern, separator, names) {
## Extract data from response string
data <- str_match(response, pattern = pattern)[, 2]
l <- strsplit(data, separator, fixed = T) %>%
lapply(function(x) {if (is.null(x) | length(x) == 0) {NA} else { x }}) ## Catch empty flags
df <- data.frame(matrix(unlist(l), nrow = length(l), byrow = T), stringsAsFactors = F)
names(df) <- names
df
}
parse <- function(response) {
## Parse response string to openair format
header <- extract(response, "^(.*)<l>", "+", c("id", "measurement", "station", "units"))
dates <- extract(response, "<l>(.*)<d>", ",", c("n", "step", "start", "nodata"))
data <- extract(response, "<d>(.*)<f>", ";;", c("data"))
flags <- extract(response, "<f>(.*)", ";;", c("flags"))
df <- cbind(header, dates, data, flags)
df$station <- str_trim(df$station)
df$n <- as.numeric(df$n)
df$step <- as.numeric(df$step)
df$start <- ymd_hm(df$start)
df <- df %>%
separate_rows(data, flags, sep=",") %>%
mutate(masked_data = as.numeric(ifelse(flags == 0, data, NA))) %>%
group_by(id) %>%
mutate(timestamp = start + ((seq(n()) - 1) * step)) %>%
filter(timestamp < start + (n * step)) %>%
select(-c(n, step, start, nodata))
stations <- split(df, df$station)
lapply(stations, function(station) {
pivot_wider(station,
id_cols = timestamp,
names_from = id,
values_from = masked_data)
}
)
}
|
## CONTOUR PLOT OF ABACO TEMPERATURE STRING DATA
require(plotly) # plotting
require(lubridate) # dates
require(zoo) # misc data handling functions
require(tidyr) # for reshaping data sets
require(RColorBrewer)
# read data, a few minor processing bits
RawTemperatureData <- read.csv('abaco_temp_data.csv')
RawTemperatureData <- na.trim(RawTemperatureData)
RawTemperatureData$datetime <- lubridate::mdy_hm(RawTemperatureData$datetime)
# reshape data into three columns (x,y,z)
ReshapedTempData <- gather(RawTemperatureData, key = Depth, value = Temperature, -datetime)
# convert temperature column names into numbers
ReshapedTempData$Depth <- as.numeric(substr(ReshapedTempData$Depth, 2, 6))
TestData <- ReshapedTempData %>% filter(datetime < ymd('2016-08-01'))
## ggplot with gradient color scale
gg <- ggplot(TestData, aes(datetime, Depth)) +
geom_raster(aes(fill = Temperature)) +
scale_y_reverse() +
scale_fill_gradientn(colours = colorRamps::matlab.like(10))
ggplotly(gg)
# improved color gradient, removed background
gg <- ggplot(TestData, aes(datetime, Depth)) +
geom_raster(aes(fill = Temperature)) +
scale_y_reverse() +
scale_fill_gradientn(colours = rev(brewer.pal(11, 'Spectral'))) +
theme_bw() +
theme(panel.grid.minor=element_blank(),
panel.grid.major=element_blank())
p <- ggplotly(gg)
## generate color palette and scale dynamically
## from https://stackoverflow.com/questions/16922988/interpolating-a-sequential-brewer-palette-as-a-legend-for-ggplot2
tempRange <- range(TestData$Temperature)
ncolors <- (ceiling(tempRange[2]) - floor(tempRange[1])) * 4
palette <- colorRampPalette(rev(brewer.pal(11,"Spectral")))(ncolors + 1)
colorIndex <- (0:ncolors)/ncolors
colorscale <- data.frame(index = colorIndex, palette = palette)
#The colorscale must be an array containing arrays mapping a normalized value to an
# rgb, rgba, hex, hsl, hsv, or named color string. At minimum, a mapping for the
# lowest (0) and highest (1) values are required. For example,
# `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`.
# To control the bounds of the colorscale in z space, use zmin and zmax
## contour using just plotly
p <- plot_ly(
type = 'contour',
data = TestData,
x = ~datetime,
y = ~Depth,
z = ~Temperature,
contours = list(
coloring = 'fill',
showlines = FALSE
),
colorscale = colorscale,
colorbar = list(title = "Temperature (°C)")
) %>% layout(
yaxis = list(
autorange = "reversed",
title = 'Depth (m)'
),
xaxis = list(
title = "Date/Time"
),
title = "Abaco Temperature Profile"
)
#### add rangeslider
firstDate <- min(TestData$datetime)
firstWeek <- firstDate + lubridate::weeks(1)
p <- plot_ly(
type = 'contour',
data = TestData,
x = ~datetime,
y = ~Depth,
z = ~Temperature,
contours = list(
coloring = 'fill',
showlines = FALSE
),
colorscale = colorscale,
colorbar = list(title = "Temperature (°C)")
) %>% layout(
yaxis = list(
autorange = "reversed",
title = 'Depth (m)'
),
xaxis = list(
title = "Date/Time",
range = c(firstDate,firstWeek),
rangeslider = list(
type = "date",
range = range(TestData$datetime)
)
),
title = "Abaco Temperature Profile"
)
#### attempt with full data; crash computer
tempRange <- range(ReshapedTempData$Temperature)
ncolors <- (ceiling(tempRange[2]) - floor(tempRange[1])) * 4
palette <- colorRampPalette(rev(brewer.pal(11,"Spectral")))(ncolors + 1)
colorIndex <- (0:ncolors)/ncolors
colorscale <- data.frame(index = colorIndex, palette = palette)
firstDate <- min(ReshapedTempData$datetime)
firstWeek <- firstDate + lubridate::weeks(1)
p <- plot_ly(
type = 'contour',
data = ReshapedTempData,
x = ~datetime,
y = ~Depth,
z = ~Temperature,
hoverinfo = 'none',
contours = list(
coloring = 'fill',
showlines = FALSE
),
colorscale = colorscale,
colorbar = list(title = "Temperature (°C)")
) %>% layout(
yaxis = list(
autorange = "reversed",
title = 'Depth (m)'
),
xaxis = list(
title = "Date/Time",
range = c(firstDate,firstWeek),
rangeslider = list(
type = "date",
range = range(ReshapedTempData$datetime)
)
),
title = "Abaco Temperature Profile"
)
#### just a month of data
OneMonthTempData <- ReshapedTempData %>% filter(datetime < ymd('2016-08-08'))
tempRange <- range(OneMonthTempData$Temperature)
ncolors <- (ceiling(tempRange[2]) - floor(tempRange[1])) * 4
palette <- colorRampPalette(rev(brewer.pal(11,"Spectral")))(ncolors + 1)
colorIndex <- (0:ncolors)/ncolors
colorscale <- data.frame(index = colorIndex, palette = palette)
firstDate <- min(OneMonthTempData$datetime)
firstWeek <- firstDate + lubridate::weeks(1)
p <- plot_ly(
type = 'contour',
data = OneMonthTempData,
x = ~datetime,
y = ~Depth,
z = ~Temperature,
hoverinfo = 'none',
contours = list(
coloring = 'fill',
showlines = FALSE
),
colorscale = colorscale,
colorbar = list(title = "Temperature (°C)")
) %>% layout(
yaxis = list(
autorange = "reversed",
title = 'Depth (m)'
),
xaxis = list(
title = "Date/Time",
range = c(firstDate,firstWeek),
rangeslider = list(
type = "date",
range = range(OneMonthTempData$datetime)
)
),
title = "Abaco Temperature Profile"
)
## TODO: Convert from contour to square scatter? | /Scripts/temperature string data vis.R | no_license | abby-lammers/LSM303-Data-Processing | R | false | false | 5,340 | r | ## CONTOUR PLOT OF ABACO TEMPERATURE STRING DATA
require(plotly) # plotting
require(lubridate) # dates
require(zoo) # misc data handling functions
require(tidyr) # for reshaping data sets
require(RColorBrewer)
# read data, a few minor processing bits
RawTemperatureData <- read.csv('abaco_temp_data.csv')
RawTemperatureData <- na.trim(RawTemperatureData)
RawTemperatureData$datetime <- lubridate::mdy_hm(RawTemperatureData$datetime)
# reshape data into three columns (x,y,z)
ReshapedTempData <- gather(RawTemperatureData, key = Depth, value = Temperature, -datetime)
# convert temperature column names into numbers
ReshapedTempData$Depth <- as.numeric(substr(ReshapedTempData$Depth, 2, 6))
TestData <- ReshapedTempData %>% filter(datetime < ymd('2016-08-01'))
## ggplot with gradient color scale
gg <- ggplot(TestData, aes(datetime, Depth)) +
geom_raster(aes(fill = Temperature)) +
scale_y_reverse() +
scale_fill_gradientn(colours = colorRamps::matlab.like(10))
ggplotly(gg)
# improved color gradient, removed background
gg <- ggplot(TestData, aes(datetime, Depth)) +
geom_raster(aes(fill = Temperature)) +
scale_y_reverse() +
scale_fill_gradientn(colours = rev(brewer.pal(11, 'Spectral'))) +
theme_bw() +
theme(panel.grid.minor=element_blank(),
panel.grid.major=element_blank())
p <- ggplotly(gg)
## generate color palette and scale dynamically
## from https://stackoverflow.com/questions/16922988/interpolating-a-sequential-brewer-palette-as-a-legend-for-ggplot2
tempRange <- range(TestData$Temperature)
ncolors <- (ceiling(tempRange[2]) - floor(tempRange[1])) * 4
palette <- colorRampPalette(rev(brewer.pal(11,"Spectral")))(ncolors + 1)
colorIndex <- (0:ncolors)/ncolors
colorscale <- data.frame(index = colorIndex, palette = palette)
#The colorscale must be an array containing arrays mapping a normalized value to an
# rgb, rgba, hex, hsl, hsv, or named color string. At minimum, a mapping for the
# lowest (0) and highest (1) values are required. For example,
# `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`.
# To control the bounds of the colorscale in z space, use zmin and zmax
## contour using just plotly
p <- plot_ly(
type = 'contour',
data = TestData,
x = ~datetime,
y = ~Depth,
z = ~Temperature,
contours = list(
coloring = 'fill',
showlines = FALSE
),
colorscale = colorscale,
colorbar = list(title = "Temperature (°C)")
) %>% layout(
yaxis = list(
autorange = "reversed",
title = 'Depth (m)'
),
xaxis = list(
title = "Date/Time"
),
title = "Abaco Temperature Profile"
)
#### add rangeslider
firstDate <- min(TestData$datetime)
firstWeek <- firstDate + lubridate::weeks(1)
p <- plot_ly(
type = 'contour',
data = TestData,
x = ~datetime,
y = ~Depth,
z = ~Temperature,
contours = list(
coloring = 'fill',
showlines = FALSE
),
colorscale = colorscale,
colorbar = list(title = "Temperature (°C)")
) %>% layout(
yaxis = list(
autorange = "reversed",
title = 'Depth (m)'
),
xaxis = list(
title = "Date/Time",
range = c(firstDate,firstWeek),
rangeslider = list(
type = "date",
range = range(TestData$datetime)
)
),
title = "Abaco Temperature Profile"
)
#### attempt with full data; crash computer
tempRange <- range(ReshapedTempData$Temperature)
ncolors <- (ceiling(tempRange[2]) - floor(tempRange[1])) * 4
palette <- colorRampPalette(rev(brewer.pal(11,"Spectral")))(ncolors + 1)
colorIndex <- (0:ncolors)/ncolors
colorscale <- data.frame(index = colorIndex, palette = palette)
firstDate <- min(ReshapedTempData$datetime)
firstWeek <- firstDate + lubridate::weeks(1)
p <- plot_ly(
type = 'contour',
data = ReshapedTempData,
x = ~datetime,
y = ~Depth,
z = ~Temperature,
hoverinfo = 'none',
contours = list(
coloring = 'fill',
showlines = FALSE
),
colorscale = colorscale,
colorbar = list(title = "Temperature (°C)")
) %>% layout(
yaxis = list(
autorange = "reversed",
title = 'Depth (m)'
),
xaxis = list(
title = "Date/Time",
range = c(firstDate,firstWeek),
rangeslider = list(
type = "date",
range = range(ReshapedTempData$datetime)
)
),
title = "Abaco Temperature Profile"
)
#### just a month of data
OneMonthTempData <- ReshapedTempData %>% filter(datetime < ymd('2016-08-08'))
tempRange <- range(OneMonthTempData$Temperature)
ncolors <- (ceiling(tempRange[2]) - floor(tempRange[1])) * 4
palette <- colorRampPalette(rev(brewer.pal(11,"Spectral")))(ncolors + 1)
colorIndex <- (0:ncolors)/ncolors
colorscale <- data.frame(index = colorIndex, palette = palette)
firstDate <- min(OneMonthTempData$datetime)
firstWeek <- firstDate + lubridate::weeks(1)
p <- plot_ly(
type = 'contour',
data = OneMonthTempData,
x = ~datetime,
y = ~Depth,
z = ~Temperature,
hoverinfo = 'none',
contours = list(
coloring = 'fill',
showlines = FALSE
),
colorscale = colorscale,
colorbar = list(title = "Temperature (°C)")
) %>% layout(
yaxis = list(
autorange = "reversed",
title = 'Depth (m)'
),
xaxis = list(
title = "Date/Time",
range = c(firstDate,firstWeek),
rangeslider = list(
type = "date",
range = range(OneMonthTempData$datetime)
)
),
title = "Abaco Temperature Profile"
)
## TODO: Convert from contour to square scatter? |
ProbabilidadPerdidaOptimizada<-function(portafolio,TiempoFinal){
rendimientosln<-matrix(0,length(portafolio[,1]),length(portafolio[1,]))
rendimientosln<-na.omit(diff(log(portafolio)))
r<-efficientPortfolio(timeSeries(rendimientosln))
r<-r@spec@portfolio$weights
r<-as.vector(r)
pp<-ProbabilidadPerdida(TiempoFinal,r,portafolio)
pp<-list("Probab"=pp,"por"=r)
return(pp)
} | /rmetrics.R | no_license | danilhramon/Portafolio | R | false | false | 395 | r | ProbabilidadPerdidaOptimizada<-function(portafolio,TiempoFinal){
rendimientosln<-matrix(0,length(portafolio[,1]),length(portafolio[1,]))
rendimientosln<-na.omit(diff(log(portafolio)))
r<-efficientPortfolio(timeSeries(rendimientosln))
r<-r@spec@portfolio$weights
r<-as.vector(r)
pp<-ProbabilidadPerdida(TiempoFinal,r,portafolio)
pp<-list("Probab"=pp,"por"=r)
return(pp)
} |
context("Testing bootstrap functions")
test_that("auc boot functions", {
set.seed(123)
n <- 100
p <- 1
X <- data.frame(matrix(rnorm(n*p), nrow = n, ncol = p))
Y <- rbinom(n, 1, plogis(0.2 * X[,1]))
boot1 <- boot_auc(Y = Y, X = X, B = 10)
boot2 <- boot_auc(Y = Y, X = X, B = 10, correct632 = TRUE)
lpo <- lpo_auc(Y = Y, X = X, max_pairs = 10)
expect_true(is.numeric(boot1$auc))
expect_true(is.numeric(boot2$auc))
expect_true(is.numeric(lpo$auc))
expect_true(boot1$auc >= 0 & boot1$auc <= 1)
expect_true(boot2$auc >= 0 & boot2$auc <= 1)
expect_true(lpo$auc >= 0 & lpo$auc <= 1)
})
test_that("scrnp boot functions", {
set.seed(123)
n <- 100
p <- 1
X <- data.frame(matrix(rnorm(n*p), nrow = n, ncol = p))
Y <- rbinom(n, 1, plogis(0.2 * X[,1]))
boot1 <- boot_scrnp(Y = Y, X = X, B = 10)
boot2 <- boot_scrnp(Y = Y, X = X, B = 10, correct632 = TRUE)
expect_true(is.numeric(boot1$scrnp))
expect_true(is.numeric(boot2$scrnp))
})
| /tests/testthat/testBoot.R | permissive | benkeser/nlpred | R | false | false | 974 | r | context("Testing bootstrap functions")
test_that("auc boot functions", {
set.seed(123)
n <- 100
p <- 1
X <- data.frame(matrix(rnorm(n*p), nrow = n, ncol = p))
Y <- rbinom(n, 1, plogis(0.2 * X[,1]))
boot1 <- boot_auc(Y = Y, X = X, B = 10)
boot2 <- boot_auc(Y = Y, X = X, B = 10, correct632 = TRUE)
lpo <- lpo_auc(Y = Y, X = X, max_pairs = 10)
expect_true(is.numeric(boot1$auc))
expect_true(is.numeric(boot2$auc))
expect_true(is.numeric(lpo$auc))
expect_true(boot1$auc >= 0 & boot1$auc <= 1)
expect_true(boot2$auc >= 0 & boot2$auc <= 1)
expect_true(lpo$auc >= 0 & lpo$auc <= 1)
})
test_that("scrnp boot functions", {
set.seed(123)
n <- 100
p <- 1
X <- data.frame(matrix(rnorm(n*p), nrow = n, ncol = p))
Y <- rbinom(n, 1, plogis(0.2 * X[,1]))
boot1 <- boot_scrnp(Y = Y, X = X, B = 10)
boot2 <- boot_scrnp(Y = Y, X = X, B = 10, correct632 = TRUE)
expect_true(is.numeric(boot1$scrnp))
expect_true(is.numeric(boot2$scrnp))
})
|
\name{solve_QP_SOCP}
\alias{solve_QP_SOCP}
\title{Solve a Quadratic Programming Problem}
\description{
This routine implements the second order cone programming method from
Kim-Chuan Toh , Michael J. Todd, and Reha H. Tutuncu for solving quadratic
programming problems of the form
\eqn{\min(-d^T b + 1/2 b^T D b)}{min(-d^T b + 1/2 b^T D b)} with the
constraints \eqn{A^T b >= b_0}.
}
\usage{
solve_QP_SOCP(Dmat, dvec, Amat, bvec)
}
\arguments{
\item{Dmat}{
matrix appearing in the quadratic function to be minimized.
}
\item{dvec}{
vector appearing in the quadratic function to be minimized.
}
\item{Amat}{
matrix defining the constraints under which we want to minimize the
quadratic function.
}
\item{bvec}{
vector holding the values of \eqn{b_0} (defaults to zero).
}
}
\value{
a list with the following components:
\item{solution}{
vector containing the solution of the quadratic programming problem.
}
}
\references{
Kim-Chuan Toh , Michael J. Todd, and Reha H. Tutuncu\cr
\emph{SDPT3 version 4.0 -- a MATLAB software for semidefinite-quadratic-linear
programming}\cr
\url{http://www.math.nus.edu.sg/~mattohkc/sdpt3.html}
}
\author{
Hanwen Huang: \email{hanwenh.unc@gmail.com};
Perry Haaland: \email{Perry_Haaland@bd.com};
Xiaosun Lu: \email{Xiaosun_Lu@bd.com};
Yufeng Liu: \email{yfliu@email.unc.edu};
J. S. Marron: \email{marron@email.unc.edu}
}
\seealso{
\code{\link{sqlp}}
}
\examples{
##
## Assume we want to minimize: -(0 5 0) \%*\% b + 1/2 b^T b
## under the constraints: A^T b >= b0
## with b0 = (-8,2,0)^T
## and (-4 2 0)
## A = (-3 1 -2)
## ( 0 0 1)
## we can use solve.QP as follows:
##
Dmat <- matrix(0,3,3)
diag(Dmat) <- 1
dvec <- c(0,5,0)
Amat <- matrix(c(-4,-3,0,2,1,0,0,-2,1),3,3)
bvec <- c(-8,2,0)
solve_QP_SOCP(Dmat,dvec,Amat,bvec=bvec)
}
\keyword{optimize}
| /Distance-Weighted-Discrimination/dwdpackage/DWD/man/solve_QP_SOCP.Rd | no_license | MeileiJiang/robust-against-heterogeneity | R | false | false | 1,912 | rd | \name{solve_QP_SOCP}
\alias{solve_QP_SOCP}
\title{Solve a Quadratic Programming Problem}
\description{
This routine implements the second order cone programming method from
Kim-Chuan Toh , Michael J. Todd, and Reha H. Tutuncu for solving quadratic
programming problems of the form
\eqn{\min(-d^T b + 1/2 b^T D b)}{min(-d^T b + 1/2 b^T D b)} with the
constraints \eqn{A^T b >= b_0}.
}
\usage{
solve_QP_SOCP(Dmat, dvec, Amat, bvec)
}
\arguments{
\item{Dmat}{
matrix appearing in the quadratic function to be minimized.
}
\item{dvec}{
vector appearing in the quadratic function to be minimized.
}
\item{Amat}{
matrix defining the constraints under which we want to minimize the
quadratic function.
}
\item{bvec}{
vector holding the values of \eqn{b_0} (defaults to zero).
}
}
\value{
a list with the following components:
\item{solution}{
vector containing the solution of the quadratic programming problem.
}
}
\references{
Kim-Chuan Toh , Michael J. Todd, and Reha H. Tutuncu\cr
\emph{SDPT3 version 4.0 -- a MATLAB software for semidefinite-quadratic-linear
programming}\cr
\url{http://www.math.nus.edu.sg/~mattohkc/sdpt3.html}
}
\author{
Hanwen Huang: \email{hanwenh.unc@gmail.com};
Perry Haaland: \email{Perry_Haaland@bd.com};
Xiaosun Lu: \email{Xiaosun_Lu@bd.com};
Yufeng Liu: \email{yfliu@email.unc.edu};
J. S. Marron: \email{marron@email.unc.edu}
}
\seealso{
\code{\link{sqlp}}
}
\examples{
##
## Assume we want to minimize: -(0 5 0) \%*\% b + 1/2 b^T b
## under the constraints: A^T b >= b0
## with b0 = (-8,2,0)^T
## and (-4 2 0)
## A = (-3 1 -2)
## ( 0 0 1)
## we can use solve.QP as follows:
##
Dmat <- matrix(0,3,3)
diag(Dmat) <- 1
dvec <- c(0,5,0)
Amat <- matrix(c(-4,-3,0,2,1,0,0,-2,1),3,3)
bvec <- c(-8,2,0)
solve_QP_SOCP(Dmat,dvec,Amat,bvec=bvec)
}
\keyword{optimize}
|
#######################################################################################################
## Prediccion
#######################################################################################################
## Salario de jugadores NBA
##
#######################################################################################################
## - Propósito
## determinar el mejor modelo, dadas las variabes, para predecir el salario de los jugadores.
## Partiremos de seleccionar un grupo de modelos que tengan buen poder explicativo. A partir
## de este grupo de modelos, tomaremos el que mayor poder de prediccion tenga.
##
#######################################################################################################
##
## Forward Stepwise para calcular el mejor modelo partiendo de pocas variables a muchas variables
##
## Paquetes:
library(MASS)
library(dplyr)
library(readr)
library(leaps)
library(car)
library(fBasics)
library(akima)
library(ISLR)
##
datos <- na.omit(read_csv("nba.csv"))
##
## Con regsubset method = Forward, genera modelos agragando variables que mejoren el criterio de
## información.
## - He empezado con todas las variables menos Player y NBA_Country porque complican el proceso
##
regfit.fwd <- regsubsets(data = datos, Salary ~ . - Player - NBA_Country, method ="forward")
regfit.summary <- summary(regfit.fwd )
##
## Ha genarado 8 modelos y se detiene.
##
## - Residual sum of squares for each model
regfit.summary$rss
##
## - The r-squared for each model
regfit.summary$rsq
##
## - Adjusted r-squared
regfit.summary$adjr2
##
## - Schwartz's information criterion, BIC
regfit.summary$bic
##
## Variables en modelos
variables <- colnames(regfit.summary$which)
##
## De los 8 modelos, nos quedaremos con los 4 que mayor poder explicativo tenga
## - Min BIC (cuatro modelos de los 8)
numModelo <- c()
for(i in 1:4){
numModelo <- c(numModelo, which(regfit.summary$bic == sort(regfit.summary$bic)[i]))
}
numModelo
## Usaremos los modelos 6, 7, 8 y 5
## - modelos con minimo criterio de informacion BIC
mod6Names <- variables[regfit.summary$which[6,]][-1]
mod7Names <- variables[regfit.summary$which[7,]][-1]
mod8Names <- variables[regfit.summary$which[8,]][-1]
mod5Names <- variables[regfit.summary$which[5,]][-1]
## i.e. nombre de las variables sin intercepto
##
## Parece que HOU es significativo
## - agregaré una columna con HOU (0 no pertenece y 1 pertenece)
datos$HOU <- 0
for(i in 1:nrow(datos)) {
if(datos$Tm[i] == "HOU"){
datos$HOU[i] <- 1
}
}
#######################################################################################################
##
## Analizaremos los modelos por separado para probar los supuestos y determinar si son adecuados o no.
## Despues, de los modelos a utilizar escogeremos el que mejor prediga el salario.
##
mod6 <- lm( data = datos, Salary ~ NBA_DraftNumber + Age + G + MP + `USG%` + WS)
round(mean(mod6$residuals),2) == 0 ## E[res] = 0
qqPlot(mod6$residuals) ## Normalidad: comparamos graficamente
jbTest(resid(mod6)) ## Normalidad: Jarque Bera (No norm)
crPlots(mod6) ## Linealidad: componentes - Residuales
ncvTest(mod6) ## Prueba de Heterocedasticidad *AJUSTE*
sqrt(vif(mod6)) > 2 ## Multicolinealidad (Prob. con G y MP)
influencePlot(mod6)
##
mod7 <- lm( data = datos, Salary ~ NBA_DraftNumber + Age + G + MP + `DRB%` + `USG%` + WS)
round(mean(mod7$residuals),2) == 0 ## E[res] = 0
qqPlot(mod7$residuals) ## Normalidad: comparamos graficamente
jbTest(resid(mod7)) ## Normalidad: Jarque Bera (No norm)
crPlots(mod7) ## Linealidad: componentes - Residuales
ncvTest(mod7) ## Prueba de Heterocedasticidad *AJUSTE*
sqrt(vif(mod7)) > 2 ## Multicolinealidad (Prob. con G y MP)
##
mod8 <- lm( data = datos, Salary ~ NBA_DraftNumber + Age + HOU + G + MP + `DRB%` + `USG%` + WS)
round(mean(mod8$residuals),2) == 0 ## E[res] = 0
qqPlot(mod8$residuals) ## Normalidad: comparamos graficamente
jbTest(resid(mod8)) ## Normalidad: Jarque Bera (No norm)
crPlots(mod8) ## Linealidad: componentes - Residuales
ncvTest(mod8) ## Prueba de Heterocedasticidad *AJUSTE*
sqrt(vif(mod8)) > 2 ## Multicolinealidad (Prob. con G y MP)
##
mod5 <- lm( data = datos, Salary ~ NBA_DraftNumber + Age + G + `USG%` + WS)
round(mean(mod5$residuals),2) == 0 ## E[res] = 0
qqPlot(mod5$residuals) ## Normalidad: comparamos graficamente
jbTest(resid(mod5)) ## Normalidad: Jarque Bera (No norm)
crPlots(mod5) ## Linealidad: componentes - Residuales
ncvTest(mod5) ## Prueba de Heterocedasticidad *AJUSTE*
sqrt(vif(mod5)) > 2 ## Multicolinealidad (Sin Problemas)
##
#######################################################################################################
##
## Cross Validation
## - Objetivo: encontrar el modelo que mejor prediga
##
MSE <- c()
##
##
## Modelo 6
set.seed(6)
numData <- nrow(datos)
training <- sample(numData, numData/2)
mod6_T <- lm( data = datos, Salary ~ NBA_DraftNumber + Age + G + MP + `USG%` + WS, subset = training)
attach(datos)
MSE <- c(MSE,mean((datos$Salary-predict(mod6_T ,Auto))[-training]^2))
detach(datos)
##
## Modelo 7
set.seed(7)
numData <- nrow(datos)
training <- sample(numData, numData/2)
mod7_T <- lm( data = datos, Salary ~ NBA_DraftNumber + Age + G + MP + `DRB%` + `USG%` + WS,
subset = training)
attach(datos)
MSE <- c(MSE,mean((datos$Salary-predict(mod7_T ,Auto))[-training]^2))
detach(datos)
##
## Modelo 8
set.seed(8)
numData <- nrow(datos)
training <- sample(numData, numData/2)
mod8_T <- lm( data = datos, Salary ~ NBA_DraftNumber + Age + HOU + G + MP + `DRB%` + `USG%` + WS,
subset = training)
attach(datos)
MSE <- c(MSE,mean((datos$Salary-predict(mod8_T ,Auto))[-training]^2))
detach(datos)
##
## Modelo 5
set.seed(5)
numData <- nrow(datos)
training <- sample(numData, numData/2)
mod5_T <- lm( data = datos, Salary ~ NBA_DraftNumber + Age + G + `USG%` + WS, subset = training)
attach(datos)
MSE <- c(MSE,mean((datos$Salary-predict(mod5_T ,Auto))[-training]^2))
detach(datos)
##
##
MSE
## El modelo con menor MSE es el modelo 8 | /Prediccion/Scripts/nba.R | no_license | chemadix/MasterDataScienceCUNEF | R | false | false | 6,357 | r | #######################################################################################################
## Prediccion
#######################################################################################################
## Salario de jugadores NBA
##
#######################################################################################################
## - Propósito
## determinar el mejor modelo, dadas las variabes, para predecir el salario de los jugadores.
## Partiremos de seleccionar un grupo de modelos que tengan buen poder explicativo. A partir
## de este grupo de modelos, tomaremos el que mayor poder de prediccion tenga.
##
#######################################################################################################
##
## Forward Stepwise para calcular el mejor modelo partiendo de pocas variables a muchas variables
##
## Paquetes:
library(MASS)
library(dplyr)
library(readr)
library(leaps)
library(car)
library(fBasics)
library(akima)
library(ISLR)
##
datos <- na.omit(read_csv("nba.csv"))
##
## Con regsubset method = Forward, genera modelos agragando variables que mejoren el criterio de
## información.
## - He empezado con todas las variables menos Player y NBA_Country porque complican el proceso
##
regfit.fwd <- regsubsets(data = datos, Salary ~ . - Player - NBA_Country, method ="forward")
regfit.summary <- summary(regfit.fwd )
##
## Ha genarado 8 modelos y se detiene.
##
## - Residual sum of squares for each model
regfit.summary$rss
##
## - The r-squared for each model
regfit.summary$rsq
##
## - Adjusted r-squared
regfit.summary$adjr2
##
## - Schwartz's information criterion, BIC
regfit.summary$bic
##
## Variables en modelos
variables <- colnames(regfit.summary$which)
##
## De los 8 modelos, nos quedaremos con los 4 que mayor poder explicativo tenga
## - Min BIC (cuatro modelos de los 8)
numModelo <- c()
for(i in 1:4){
numModelo <- c(numModelo, which(regfit.summary$bic == sort(regfit.summary$bic)[i]))
}
numModelo
## Usaremos los modelos 6, 7, 8 y 5
## - modelos con minimo criterio de informacion BIC
mod6Names <- variables[regfit.summary$which[6,]][-1]
mod7Names <- variables[regfit.summary$which[7,]][-1]
mod8Names <- variables[regfit.summary$which[8,]][-1]
mod5Names <- variables[regfit.summary$which[5,]][-1]
## i.e. nombre de las variables sin intercepto
##
## Parece que HOU es significativo
## - agregaré una columna con HOU (0 no pertenece y 1 pertenece)
datos$HOU <- 0
for(i in 1:nrow(datos)) {
if(datos$Tm[i] == "HOU"){
datos$HOU[i] <- 1
}
}
#######################################################################################################
##
## Analizaremos los modelos por separado para probar los supuestos y determinar si son adecuados o no.
## Despues, de los modelos a utilizar escogeremos el que mejor prediga el salario.
##
mod6 <- lm( data = datos, Salary ~ NBA_DraftNumber + Age + G + MP + `USG%` + WS)
round(mean(mod6$residuals),2) == 0 ## E[res] = 0
qqPlot(mod6$residuals) ## Normalidad: comparamos graficamente
jbTest(resid(mod6)) ## Normalidad: Jarque Bera (No norm)
crPlots(mod6) ## Linealidad: componentes - Residuales
ncvTest(mod6) ## Prueba de Heterocedasticidad *AJUSTE*
sqrt(vif(mod6)) > 2 ## Multicolinealidad (Prob. con G y MP)
influencePlot(mod6)
##
mod7 <- lm( data = datos, Salary ~ NBA_DraftNumber + Age + G + MP + `DRB%` + `USG%` + WS)
round(mean(mod7$residuals),2) == 0 ## E[res] = 0
qqPlot(mod7$residuals) ## Normalidad: comparamos graficamente
jbTest(resid(mod7)) ## Normalidad: Jarque Bera (No norm)
crPlots(mod7) ## Linealidad: componentes - Residuales
ncvTest(mod7) ## Prueba de Heterocedasticidad *AJUSTE*
sqrt(vif(mod7)) > 2 ## Multicolinealidad (Prob. con G y MP)
##
mod8 <- lm( data = datos, Salary ~ NBA_DraftNumber + Age + HOU + G + MP + `DRB%` + `USG%` + WS)
round(mean(mod8$residuals),2) == 0 ## E[res] = 0
qqPlot(mod8$residuals) ## Normalidad: comparamos graficamente
jbTest(resid(mod8)) ## Normalidad: Jarque Bera (No norm)
crPlots(mod8) ## Linealidad: componentes - Residuales
ncvTest(mod8) ## Prueba de Heterocedasticidad *AJUSTE*
sqrt(vif(mod8)) > 2 ## Multicolinealidad (Prob. con G y MP)
##
mod5 <- lm( data = datos, Salary ~ NBA_DraftNumber + Age + G + `USG%` + WS)
round(mean(mod5$residuals),2) == 0 ## E[res] = 0
qqPlot(mod5$residuals) ## Normalidad: comparamos graficamente
jbTest(resid(mod5)) ## Normalidad: Jarque Bera (No norm)
crPlots(mod5) ## Linealidad: componentes - Residuales
ncvTest(mod5) ## Prueba de Heterocedasticidad *AJUSTE*
sqrt(vif(mod5)) > 2 ## Multicolinealidad (Sin Problemas)
##
#######################################################################################################
##
## Cross Validation
## - Objetivo: encontrar el modelo que mejor prediga
##
MSE <- c()
##
##
## Modelo 6
set.seed(6)
numData <- nrow(datos)
training <- sample(numData, numData/2)
mod6_T <- lm( data = datos, Salary ~ NBA_DraftNumber + Age + G + MP + `USG%` + WS, subset = training)
attach(datos)
MSE <- c(MSE,mean((datos$Salary-predict(mod6_T ,Auto))[-training]^2))
detach(datos)
##
## Modelo 7
set.seed(7)
numData <- nrow(datos)
training <- sample(numData, numData/2)
mod7_T <- lm( data = datos, Salary ~ NBA_DraftNumber + Age + G + MP + `DRB%` + `USG%` + WS,
subset = training)
attach(datos)
MSE <- c(MSE,mean((datos$Salary-predict(mod7_T ,Auto))[-training]^2))
detach(datos)
##
## Modelo 8
set.seed(8)
numData <- nrow(datos)
training <- sample(numData, numData/2)
mod8_T <- lm( data = datos, Salary ~ NBA_DraftNumber + Age + HOU + G + MP + `DRB%` + `USG%` + WS,
subset = training)
attach(datos)
MSE <- c(MSE,mean((datos$Salary-predict(mod8_T ,Auto))[-training]^2))
detach(datos)
##
## Modelo 5
set.seed(5)
numData <- nrow(datos)
training <- sample(numData, numData/2)
mod5_T <- lm( data = datos, Salary ~ NBA_DraftNumber + Age + G + `USG%` + WS, subset = training)
attach(datos)
MSE <- c(MSE,mean((datos$Salary-predict(mod5_T ,Auto))[-training]^2))
detach(datos)
##
##
MSE
## El modelo con menor MSE es el modelo 8 |
\name{panderOptions}
\alias{pander.option}
\alias{panderOptions}
\title{Querying/setting pander option}
\usage{
panderOptions(o, value)
}
\arguments{
\item{o}{option name (string). See below.}
\item{value}{value to assign (optional)}
}
\description{
To list all \code{pander} options, just run this function
without any parameters provided. To query only one value,
pass the first parameter. To set that, use the
\code{value} parameter too.
}
\details{
The following \code{pander} options are available:
\itemize{ \item \code{digits}: numeric (default:
\code{2}) passed to \code{format} \item
\code{decimal.mark}: string (default: \code{.}) passed to
\code{format} \item \code{big.mark}: string (default: '')
passed to \code{format} \item \code{round}: numeric
(default: \code{Inf}) passed to \code{round} \item
\code{keep.trailing.zeros}: boolean (default:
\code{FALSE}) to show or remove trailing zeros in numbers
\item \code{date}: string (default: \code{'\%Y/\%m/\%d
\%X'}) passed to \code{format} when printing dates
(\code{POSIXct} or \code{POSIXt}) \item
\code{header.style}: \code{'atx'} or \code{'setext'}
passed to \code{\link{pandoc.header}} \item
\code{list.style}: \code{'bullet'}, \code{'ordered'} or
\code{'roman'} passed to \code{\link{pandoc.list}}.
Please not that this has no effect on \code{pander}
methods. \item \code{table.style}: \code{'multiline'},
\code{'grid'}, \code{'simple'} or \code{'rmarkdown'}
passed to \code{\link{pandoc.table}} \item
\code{table.split.table}: numeric passed to
\code{\link{pandoc.table}} and also affects \code{pander}
methods. This option tells \code{pander} where to split
too wide tables. The default value (\code{80}) suggests
the conventional number of characters used in a line,
feel free to change (e.g. to \code{Inf} to disable this
feature) if you are not using a VT100 terminal any more
:) \item \code{table.split.cells}: numeric (default:
\code{30}) passed to \code{\link{pandoc.table}} and also
affects \code{pander} methods. This option tells
\code{pander} where to split too wide cells with line
breaks. Set \code{Inf} to disable. \item
\code{table.caption.prefix}: string (default:
\code{'Table: '}) passed to \code{\link{pandoc.table}} to
be used as caption prefix. Be sure about what you are
doing if changing to other than \code{'Table: '} or
\code{':'}. \item \code{table.continues}: string
(default: \code{'Table continues below'}) passed to
\code{\link{pandoc.table}} to be used as caption for long
(split) without a use defined caption \item
\code{table.continues.affix}: string (default:
\code{'(continued below)'}) passed to
\code{\link{pandoc.table}} to be used as an affix
concatenated to the user defined caption for long (split)
tables \item \code{table.alignment.default}: string
(default: \code{centre}) that defines the default
alignment of cells. Can be \code{left}, \code{right} or
\code{centre} that latter can be also spelled as
\code{center}. \item \code{table.alignment.rownames}:
string (default: \code{centre}) that defines the
alignment of rownames in tables. Can be \code{left},
\code{right} or \code{centre} that latter can be also
spelled as \code{center}. \item \code{evals.messages}:
boolean (default: \code{TRUE}) passed to \code{evals}'
\code{pander} method specifying if messages should be
rendered \item \code{p.wrap}: a string (default:
\code{'_'}) to wrap vector elements passed to \code{p}
function \item \code{p.sep}: a string (default: \code{',
'}) with the main separator passed to \code{p} function
\item \code{p.copula}: a string (default: \code{' and '})
with ending separator passed to \code{p} function \item
\code{graph.nomargin}: boolean (default: \code{TRUE}) if
trying to keep plots' margins at minimal \item
\code{graph.fontfamily}: string (default: \code{'sans'})
specifying the font family to be used in images. Please
note, that using a custom font on Windows requires
\code{grDevices:::windowsFonts} first. \item
\code{graph.fontcolor}: string (default: \code{'black'})
specifying the default font color \item
\code{graph.fontsize}: numeric (default: \code{12})
specifying the \emph{base} font size in pixels. Main
title is rendered with \code{1.2} and labels with
\code{0.8} multiplier. \item \code{graph.grid}: boolean
(default: \code{TRUE}) if a grid should be added to the
plot \item \code{graph.grid.minor}: boolean (default:
\code{TRUE}) if a miner grid should be also rendered
\item \code{graph.grid.color}: string (default:
\code{'grey'}) specifying the color of the rendered grid
\item \code{graph.grid.lty}: string (default:
\code{'dashed'}) specifying the line type of grid \item
\code{graph.boxes}: boolean (default: \code{FALSE}) if to
render a border around of plot (and e.g. around strip)
\item \code{graph.legend.position}: string (default:
\code{'right'}) specifying the position of the legend:
'top', 'right', 'bottom' or 'left' \item
\code{graph.background}: string (default: \code{'white'})
specifying the plots main background's color \item
\code{graph.panel.background}: string (default:
\code{'transparent'}) specifying the plot's main panel
background. Please \emph{note}, that this option is not
supported with \code{base} graphics. \item
\code{graph.colors}: character vector of default color
palette (defaults to a colorblind theme:
\url{http://jfly.iam.u-tokyo.ac.jp/color/}). Please
\emph{note} that this update work with \code{base} plots
by appending the \code{col} argument to the call if not
set. \item \code{graph.color.rnd}: boolean (default:
\code{FALSE}) specifying if the palette should be
reordered randomly before rendering each plot to get
colorful images \item \code{graph.axis.angle}: numeric
(default: \code{1}) specifying the angle of axes' labels.
The available options are based on \code{par(les)} and
sets if the labels should be: \itemize{ \item \code{1}:
parallel to the axis, \item \code{2}: horizontal, \item
\code{3}: perpendicular to the axis or \item \code{4}:
vertical. } \item \code{graph.symbol}: numeric (default:
\code{1}) specifying a symbol (see the \code{pch}
parameter of \code{par}) }
}
\note{
\code{pander.option} is deprecated and is to be removed
in future releases.
}
\examples{
\dontrun{
panderOptions()
panderOptions('digits')
panderOptions('digits', 5)
}
}
\seealso{
\code{\link{evalsOptions}}
}
| /man/panderOptions.Rd | no_license | jburos/pander | R | false | false | 6,539 | rd | \name{panderOptions}
\alias{pander.option}
\alias{panderOptions}
\title{Querying/setting pander option}
\usage{
panderOptions(o, value)
}
\arguments{
\item{o}{option name (string). See below.}
\item{value}{value to assign (optional)}
}
\description{
To list all \code{pander} options, just run this function
without any parameters provided. To query only one value,
pass the first parameter. To set that, use the
\code{value} parameter too.
}
\details{
The following \code{pander} options are available:
\itemize{ \item \code{digits}: numeric (default:
\code{2}) passed to \code{format} \item
\code{decimal.mark}: string (default: \code{.}) passed to
\code{format} \item \code{big.mark}: string (default: '')
passed to \code{format} \item \code{round}: numeric
(default: \code{Inf}) passed to \code{round} \item
\code{keep.trailing.zeros}: boolean (default:
\code{FALSE}) to show or remove trailing zeros in numbers
\item \code{date}: string (default: \code{'\%Y/\%m/\%d
\%X'}) passed to \code{format} when printing dates
(\code{POSIXct} or \code{POSIXt}) \item
\code{header.style}: \code{'atx'} or \code{'setext'}
passed to \code{\link{pandoc.header}} \item
\code{list.style}: \code{'bullet'}, \code{'ordered'} or
\code{'roman'} passed to \code{\link{pandoc.list}}.
Please not that this has no effect on \code{pander}
methods. \item \code{table.style}: \code{'multiline'},
\code{'grid'}, \code{'simple'} or \code{'rmarkdown'}
passed to \code{\link{pandoc.table}} \item
\code{table.split.table}: numeric passed to
\code{\link{pandoc.table}} and also affects \code{pander}
methods. This option tells \code{pander} where to split
too wide tables. The default value (\code{80}) suggests
the conventional number of characters used in a line,
feel free to change (e.g. to \code{Inf} to disable this
feature) if you are not using a VT100 terminal any more
:) \item \code{table.split.cells}: numeric (default:
\code{30}) passed to \code{\link{pandoc.table}} and also
affects \code{pander} methods. This option tells
\code{pander} where to split too wide cells with line
breaks. Set \code{Inf} to disable. \item
\code{table.caption.prefix}: string (default:
\code{'Table: '}) passed to \code{\link{pandoc.table}} to
be used as caption prefix. Be sure about what you are
doing if changing to other than \code{'Table: '} or
\code{':'}. \item \code{table.continues}: string
(default: \code{'Table continues below'}) passed to
\code{\link{pandoc.table}} to be used as caption for long
(split) without a use defined caption \item
\code{table.continues.affix}: string (default:
\code{'(continued below)'}) passed to
\code{\link{pandoc.table}} to be used as an affix
concatenated to the user defined caption for long (split)
tables \item \code{table.alignment.default}: string
(default: \code{centre}) that defines the default
alignment of cells. Can be \code{left}, \code{right} or
\code{centre} that latter can be also spelled as
\code{center}. \item \code{table.alignment.rownames}:
string (default: \code{centre}) that defines the
alignment of rownames in tables. Can be \code{left},
\code{right} or \code{centre} that latter can be also
spelled as \code{center}. \item \code{evals.messages}:
boolean (default: \code{TRUE}) passed to \code{evals}'
\code{pander} method specifying if messages should be
rendered \item \code{p.wrap}: a string (default:
\code{'_'}) to wrap vector elements passed to \code{p}
function \item \code{p.sep}: a string (default: \code{',
'}) with the main separator passed to \code{p} function
\item \code{p.copula}: a string (default: \code{' and '})
with ending separator passed to \code{p} function \item
\code{graph.nomargin}: boolean (default: \code{TRUE}) if
trying to keep plots' margins at minimal \item
\code{graph.fontfamily}: string (default: \code{'sans'})
specifying the font family to be used in images. Please
note, that using a custom font on Windows requires
\code{grDevices:::windowsFonts} first. \item
\code{graph.fontcolor}: string (default: \code{'black'})
specifying the default font color \item
\code{graph.fontsize}: numeric (default: \code{12})
specifying the \emph{base} font size in pixels. Main
title is rendered with \code{1.2} and labels with
\code{0.8} multiplier. \item \code{graph.grid}: boolean
(default: \code{TRUE}) if a grid should be added to the
plot \item \code{graph.grid.minor}: boolean (default:
\code{TRUE}) if a miner grid should be also rendered
\item \code{graph.grid.color}: string (default:
\code{'grey'}) specifying the color of the rendered grid
\item \code{graph.grid.lty}: string (default:
\code{'dashed'}) specifying the line type of grid \item
\code{graph.boxes}: boolean (default: \code{FALSE}) if to
render a border around of plot (and e.g. around strip)
\item \code{graph.legend.position}: string (default:
\code{'right'}) specifying the position of the legend:
'top', 'right', 'bottom' or 'left' \item
\code{graph.background}: string (default: \code{'white'})
specifying the plots main background's color \item
\code{graph.panel.background}: string (default:
\code{'transparent'}) specifying the plot's main panel
background. Please \emph{note}, that this option is not
supported with \code{base} graphics. \item
\code{graph.colors}: character vector of default color
palette (defaults to a colorblind theme:
\url{http://jfly.iam.u-tokyo.ac.jp/color/}). Please
\emph{note} that this update work with \code{base} plots
by appending the \code{col} argument to the call if not
set. \item \code{graph.color.rnd}: boolean (default:
\code{FALSE}) specifying if the palette should be
reordered randomly before rendering each plot to get
colorful images \item \code{graph.axis.angle}: numeric
(default: \code{1}) specifying the angle of axes' labels.
The available options are based on \code{par(les)} and
sets if the labels should be: \itemize{ \item \code{1}:
parallel to the axis, \item \code{2}: horizontal, \item
\code{3}: perpendicular to the axis or \item \code{4}:
vertical. } \item \code{graph.symbol}: numeric (default:
\code{1}) specifying a symbol (see the \code{pch}
parameter of \code{par}) }
}
\note{
\code{pander.option} is deprecated and is to be removed
in future releases.
}
\examples{
\dontrun{
panderOptions()
panderOptions('digits')
panderOptions('digits', 5)
}
}
\seealso{
\code{\link{evalsOptions}}
}
|
# Kaggle driver telematics challenge
# Model building script
library(ROCR)
library(randomForest)
library(gbm)
library(dplyr)
library(ggplot2)
# Set the working directory
setwd('E:/Kaggle/drivers/')
# Global parameters
nRides <- 200 # Number of rides per driver
nRandomDrivers <- 300
propTraining <- 0.75
propTest <- 1 - propTraining
drivers <- list.files('./drivers/')
# Feature engineering parameters
nDT <- 6 # Delta time used for velocity and accelaration
stationary_dist <- 10 # if the movement in meters in nDT seconds is lower than this, we say the car was stationary
avgTrim <- 0.025 # controls the % data that is trimmed when computing means
# Load in helper functions/run helper scripts
source('preprocessing_helper.R')
source('modeling_helper.R')
# For every driver, we fit a model and predict the labels
drivers = drivers[1:30]
nPredictions <- propTest * (nRandomDrivers + nRides) # number of entries in every test set
AUCdata <- data.frame(preds1 = numeric(length(drivers)*nPredictions),
preds2 = numeric(length(drivers)*nPredictions),
preds3 = numeric(length(drivers)*nPredictions),
stackpred = numeric(length(drivers)*nPredictions),
obs = factor(x = numeric(length(drivers)*nPredictions), levels = c(0, 1)))
counter <- 0
for(driver in drivers) {
# Split data of interest in train and test set.
currentData <- splitData(driver)
# Fit a linear model
model1 <- glm(target ~ total_duration + total_distance + stationary + norm_accel_50_perc + tang_accel_50_perc + accel_50_perc + speed_50_perc,
data = currentData$train,
family = binomial(link = "logit"))
# Fit a GBM
model2 <- gbm(formula = target ~ . - driverID - rideID,
data = currentData$train,
distribution = "adaboost")
# Fit a random forest
currentData$train$target <- as.factor(currentData$train$target)
currentData$test$target <- as.factor(currentData$test$target)
model3 <- randomForest(x = select(currentData$train, -driverID, -rideID, -target),
y = currentData$train$target)
# Stacking the models
stackdf <- data.frame(target = currentData$train$target,
pred_glm = predict(model1, type = "response"),
pred_gbm = predict(model2, n.trees = 100, type = "response"),
pred_rf = predict(model3, type = "prob")[,2])
stack1 <- glm(formula = target ~ pred_glm + pred_rf,
data = stackdf,
family = binomial(link = "logit"))
# Predict the labels
preds1 <- predict(model1, newdata = currentData$test, type = "response")
preds2 <- predict(model2, newdata = currentData$test, n.trees = 100, type = "response")
preds3 <- predict(model3, newdata = select(currentData$test, -driverID, -rideID, -target), type = "prob")[,2]
stackdf_pred <- data.frame(target = currentData$test$target,
pred_glm = preds1,
pred_gbm = preds2,
pred_rf = preds3)
stackpred <- predict(stack1, newdata = stackdf_pred, type = "response")
obs <- currentData$test$target
# Store the predictions and observations in a data rame
AUCdata[(1 + counter*nPredictions):(nPredictions + counter*nPredictions), ] <- data.frame(preds1, preds2, preds3, stackpred, obs)
# Increase the counter
counter <- counter + 1
message("Finished processing driver ", driver)
}
totalPreds1 <- ROCR::prediction(AUCdata$preds1, AUCdata$obs)
totalPreds2 <- ROCR::prediction(AUCdata$preds2, AUCdata$obs)
totalPreds3 <- ROCR::prediction(AUCdata$preds3, AUCdata$obs)
totalStack <- ROCR::prediction(AUCdata$stackpred, AUCdata$obs)
perf1 <- ROCR::performance(totalPreds1, "tpr", "fpr")
perf2 <- ROCR::performance(totalPreds2, "tpr", "fpr")
perf3 <- ROCR::performance(totalPreds3, "tpr", "fpr")
perf4 <- ROCR::performance(totalStack, "tpr", "fpr")
ROCR::performance(totalPreds1, "auc")@y.values
ROCR::performance(totalPreds2, "auc")@y.values
ROCR::performance(totalPreds3, "auc")@y.values
ROCR::performance(totalStack, "auc")@y.values
plot(perf1, col = "green")
plot(perf2, col = "red", add = TRUE)
plot(perf3, col = "blue", add = TRUE)
plot(perf4, col = "yellow", add = TRUE)
| /Driver telematics analysis/modeling.R | no_license | thuijskens/Kaggle | R | false | false | 4,346 | r | # Kaggle driver telematics challenge
# Model building script
library(ROCR)
library(randomForest)
library(gbm)
library(dplyr)
library(ggplot2)
# Set the working directory
setwd('E:/Kaggle/drivers/')
# Global parameters
nRides <- 200 # Number of rides per driver
nRandomDrivers <- 300
propTraining <- 0.75
propTest <- 1 - propTraining
drivers <- list.files('./drivers/')
# Feature engineering parameters
nDT <- 6 # Delta time used for velocity and accelaration
stationary_dist <- 10 # if the movement in meters in nDT seconds is lower than this, we say the car was stationary
avgTrim <- 0.025 # controls the % data that is trimmed when computing means
# Load in helper functions/run helper scripts
source('preprocessing_helper.R')
source('modeling_helper.R')
# For every driver, we fit a model and predict the labels
drivers = drivers[1:30]
nPredictions <- propTest * (nRandomDrivers + nRides) # number of entries in every test set
AUCdata <- data.frame(preds1 = numeric(length(drivers)*nPredictions),
preds2 = numeric(length(drivers)*nPredictions),
preds3 = numeric(length(drivers)*nPredictions),
stackpred = numeric(length(drivers)*nPredictions),
obs = factor(x = numeric(length(drivers)*nPredictions), levels = c(0, 1)))
counter <- 0
for(driver in drivers) {
# Split data of interest in train and test set.
currentData <- splitData(driver)
# Fit a linear model
model1 <- glm(target ~ total_duration + total_distance + stationary + norm_accel_50_perc + tang_accel_50_perc + accel_50_perc + speed_50_perc,
data = currentData$train,
family = binomial(link = "logit"))
# Fit a GBM
model2 <- gbm(formula = target ~ . - driverID - rideID,
data = currentData$train,
distribution = "adaboost")
# Fit a random forest
currentData$train$target <- as.factor(currentData$train$target)
currentData$test$target <- as.factor(currentData$test$target)
model3 <- randomForest(x = select(currentData$train, -driverID, -rideID, -target),
y = currentData$train$target)
# Stacking the models
stackdf <- data.frame(target = currentData$train$target,
pred_glm = predict(model1, type = "response"),
pred_gbm = predict(model2, n.trees = 100, type = "response"),
pred_rf = predict(model3, type = "prob")[,2])
stack1 <- glm(formula = target ~ pred_glm + pred_rf,
data = stackdf,
family = binomial(link = "logit"))
# Predict the labels
preds1 <- predict(model1, newdata = currentData$test, type = "response")
preds2 <- predict(model2, newdata = currentData$test, n.trees = 100, type = "response")
preds3 <- predict(model3, newdata = select(currentData$test, -driverID, -rideID, -target), type = "prob")[,2]
stackdf_pred <- data.frame(target = currentData$test$target,
pred_glm = preds1,
pred_gbm = preds2,
pred_rf = preds3)
stackpred <- predict(stack1, newdata = stackdf_pred, type = "response")
obs <- currentData$test$target
# Store the predictions and observations in a data rame
AUCdata[(1 + counter*nPredictions):(nPredictions + counter*nPredictions), ] <- data.frame(preds1, preds2, preds3, stackpred, obs)
# Increase the counter
counter <- counter + 1
message("Finished processing driver ", driver)
}
totalPreds1 <- ROCR::prediction(AUCdata$preds1, AUCdata$obs)
totalPreds2 <- ROCR::prediction(AUCdata$preds2, AUCdata$obs)
totalPreds3 <- ROCR::prediction(AUCdata$preds3, AUCdata$obs)
totalStack <- ROCR::prediction(AUCdata$stackpred, AUCdata$obs)
perf1 <- ROCR::performance(totalPreds1, "tpr", "fpr")
perf2 <- ROCR::performance(totalPreds2, "tpr", "fpr")
perf3 <- ROCR::performance(totalPreds3, "tpr", "fpr")
perf4 <- ROCR::performance(totalStack, "tpr", "fpr")
ROCR::performance(totalPreds1, "auc")@y.values
ROCR::performance(totalPreds2, "auc")@y.values
ROCR::performance(totalPreds3, "auc")@y.values
ROCR::performance(totalStack, "auc")@y.values
plot(perf1, col = "green")
plot(perf2, col = "red", add = TRUE)
plot(perf3, col = "blue", add = TRUE)
plot(perf4, col = "yellow", add = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get-paste.r
\name{toString.paste}
\alias{toString.paste}
\title{Extract just the paste text from a paste object}
\usage{
\method{toString}{paste}(x, ...)
}
\arguments{
\item{x}{paste object}
\item{...}{unused}
}
\description{
Extract just the paste text from a paste object
}
| /man/toString.paste.Rd | no_license | anandprabhakar0507/pastebin | R | false | true | 355 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get-paste.r
\name{toString.paste}
\alias{toString.paste}
\title{Extract just the paste text from a paste object}
\usage{
\method{toString}{paste}(x, ...)
}
\arguments{
\item{x}{paste object}
\item{...}{unused}
}
\description{
Extract just the paste text from a paste object
}
|
#Functions needed for reading, filtering and normalizing Cel-SEQ data
#remove or keep only spike ins from given data frame
rmspike<-function(x){
ERCCs<-grep("ERCC-",row.names(x)) # gives vector with row # of spike ins
data<-x[-ERCCs,] # make new data frame without the specified rows
return(data) # output new data frame
}
keepspike<-function(x){
ERCCs<-grep("ERCC-",row.names(x)) # gives vector with row # of spike ins
data<-x[ERCCs,] # make new data frame with only the specified rows
return(data) # output new data frame
}
# chop of chromosome lables (Abel)
chop_chr <- function (name, splitcharacter = "__") { strsplit(name, splitcharacter)[[1]][1]}
#plot expression of one gene as barplot
plotgene<-function(x,n){
barplot(as.matrix(x[grep(n,rownames(x)),]),main=n)
}
# make GENEID the rownames and remove column GENEID
mvgeneid<-function(data){
data <- as.data.frame(data)
rownames(data) = data[,1]
data= data[,-1]
return(data)
}
#reorder cells from four CS1 primer libraries into one 384 column-long library
# libraries is a vector containing the four library names, in the order A1,A2,B1,B2
reorder.cs1<-function(libraries,name){
tc<-list()
rc<-list()
bc<-list()
for(i in 1:4){
tc[[i]]<- read.csv(paste(libraries[i],".coutt.csv", sep=""), header = TRUE, sep = "\t",row.names =1)
rc[[i]] <- read.csv(paste(libraries[i],".coutc.csv", sep=""), header = TRUE, sep = "\t",row.names =1)
bc[[i]] <- read.csv(paste(libraries[i],".coutb.csv", sep=""), header = TRUE, sep = "\t",row.names =1)
}
merge.tc<-intersectmatrix(tc[[1]],intersectmatrix(tc[[2]],intersectmatrix(tc[[3]],tc[[4]])))
merge.bc<-intersectmatrix(bc[[1]],intersectmatrix(bc[[2]],intersectmatrix(bc[[3]],bc[[4]])))
merge.rc<-intersectmatrix(rc[[1]],intersectmatrix(rc[[2]],intersectmatrix(rc[[3]],rc[[4]])))
order<-c(matrix(c(96*0+seq(1,96), 96*1+seq(1,96)), 2, byrow = T))
order2<-c(matrix(c(96*2+seq(1,96), 96*3+seq(1,96)), 2, byrow = T))
all<-c()
for(i in 0:7){
all<-c(all,order[(1+i*24):((i+1)*24)],order2[(1+i*24):((i+1)*24)])
}
merge.order.tc<-merge.tc[all]
merge.order.bc<-merge.bc[all]
merge.order.rc<-merge.rc[all]
merge.order.tc<- merge.order.tc[order(rownames( merge.order.tc)), ]
merge.order.bc<- merge.order.bc[order(rownames( merge.order.bc)), ]
merge.order.rc<- merge.order.rc[order(rownames( merge.order.rc)), ]
write.table(merge.order.tc,paste(name,".coutt.csv",sep=""),sep="\t")
write.table(merge.order.bc,paste(name,".coutb.csv",sep=""),sep="\t")
write.table(merge.order.rc,paste(name,".coutc.csv",sep=""),sep="\t")
}
#JC's merge function (produces new rows on bottom of new dataframe, so reorder rows alphabetically afterwards)
intersectmatrix<-function(x,y){
a<-setdiff(row.names(x),row.names(y))
b<-setdiff(row.names(y),row.names(x))
d<-matrix(data = 0,nrow = length(a),ncol = ncol(y))
row.names(d)<-a
colnames(d)<-colnames(y)
c<-matrix(data = 0,nrow = length(b),ncol = ncol(x))
row.names(c)<-b
colnames(c)<-colnames(x)
y<-rbind(y,d)
x<-rbind(x,c)
e <- match(rownames(x), rownames(y))
f <- cbind( x, y[e,])
return(f)
}
#overseq2, plot oversequencing per transcript
overseq2 <- function(x,y){
main=paste("oversequencing_molecules") # mixes string + name of choice
xlab=bquote(log[10] ~ "read counts / barcode counts") # subscript in string
rc.v<-as.vector(unlist(x))[as.vector(unlist(x>0))]
bc.v<-as.vector(unlist(y))[as.vector(unlist(y>0))]
results<-rc.v/bc.v
sub=paste("median",round(median(rc.v/bc.v),3),sep=" ")
hist(log10(results),breaks=75, col="red", main=main,xlab=xlab,sub=sub)
}
#plot total number of reads per sample
totalreads <- function(data,plotmethod=c("barplot","hist","cumulative","combo")){
if ( ! plotmethod %in% c("barplot","hist","cumulative","combo") ) stop("invalid method")
if(plotmethod == "hist"){
a<-hist(log10(colSums(data)),breaks=100,xlab="log10(counts)",ylab="frequency",main="total unique reads",col="grey",xaxt="n",col.sub="red")
mtext(paste("mean:",round(mean(colSums(data)))," median:",round(median(colSums(data)))),side=3,col="red",cex=0.8)
axis(1,at=a$breaks[which(a$breaks %in% seq(0,max(a$breaks),1))],labels=a$breaks[which(a$breaks %in% c(0,1,2,3,4,5))])
axis(1,at=a$breaks[which(a$breaks %in% seq(0,max(a$breaks),1000))],labels=a$breaks[which(a$breaks %in% seq(0,max(a$breaks),1000))])
abline(v=log10(mean(colSums(data))/2),col="red")
text(log10(mean(colSums(data))/2),max(a$counts)-2, round(mean(colSums(data))/2), srt=0.2, col = "red",pos=2)
}
if(plotmethod == "barplot"){
b<-barplot(colSums(data),xaxt="n",xlab="cells",sub=paste("mean total read:",round(mean(colSums(data)))),main="total unique reads",col="black",border=NA)
axis(1,at=b,labels=c(1:length(data))) # 1=horizontal at = position of marks
abline(h=mean(colSums(data)),col="red")
}
if(plotmethod == "cumulative"){
plot(ecdf(colSums(data)),xlab="total reads",ylab="fraction",main="total unique reads",col="red",tck=1,pch=19,cex=0.5,cex.axis=0.8)
abline(v=mean(colSums(data)/2),col="red")
mtext(paste("mean:",round(mean(colSums(data)))," median:",round(median(colSums(data)))),side=3,col="red",cex=0.8)
}
if(plotmethod == "combo"){
a<-hist(log10(colSums(data)),breaks=100,xlab="log10(counts)",ylab="frequency",main="total unique reads",col="grey",xaxt="n",col.sub="red")
mtext(paste("mean:",round(mean(colSums(data)))," median:",round(median(colSums(data)))),side=3,col="red",cex=0.8)
axis(1,at=a$breaks[which(a$breaks %in% c(0,1,2,3,4,5))],labels=a$breaks[which(a$breaks %in% c(0,1,2,3,4,5))])
abline(v=log10(mean(colSums(data))/2),col="red")
text(log10(mean(colSums(data))/2),max(a$counts)-2, round(mean(colSums(data))/2), srt=0.2, col = "red",pos=2)
plotInset(log10(1),max(a$counts)/4,log10(250), max(a$counts),mar=c(1,1,1,1),
plot(ecdf(colSums(data)),pch=".",col="red",cex=0.5,ylab=NA,xlab=NA,main=NA,cex.axis=0.8,xaxt="n",las=3,mgp=c(2,0.1,0),tck=1,bty="n"),
debug = getOption("oceDebug"))
}
}
#plot amount of genes detected per cell
cellgenes<-function(data,plotmethod=c("hist","cumulative","combo")){
if ( ! plotmethod %in% c("hist","cumulative","combo") ) stop("invalid plotting method")
genes<-apply(data,2,function(x) sum(x>=1))
if(plotmethod == "hist"){
a<-hist(genes,breaks=100,xlab="total genes",ylab="frequency",main="detected genes/cell",col="steelblue1",xaxt="n")
mtext(paste("mean:",round(mean(genes))," median:",round(median(genes))),side=3,col="red",cex=0.8)
axis(1,at=a$breaks[which(a$breaks %in% seq(0,max(a$breaks),1000))],labels=a$breaks[which(a$breaks %in% seq(0,max(a$breaks),1000))])
}
if(plotmethod == "cumulative"){
plot(ecdf(genes),pch=19,col="red",cex=0.5,ylab="frequency",xlab="detected genes/cell",main="cumulative dist genes",cex.axis=1,las=1,tck=1)
mtext(paste("mean:",round(mean(genes))," median:",round(median(genes))),side=3,col="red",cex=0.8)
}
if(plotmethod == "combo"){
a<-hist(genes,breaks=100,xlab="log10(counts)",ylab="frequency",main="detected genes/cell",col="steelblue1",xaxt="n")
mtext(paste("mean:",round(mean(genes))," median:",round(median(genes))),side=3,col="red",cex=0.8)
axis(1,at=a$breaks[which(a$breaks %in% seq(0,max(a$breaks),1000))],labels=a$breaks[which(a$breaks %in% seq(0,max(a$breaks),1000))])
plotInset(max(genes)/3,max(a$counts)/3,max(genes), max(a$counts),mar=c(1,1,1,1),
plot(ecdf(colSums(data)),pch=19,col="red",cex=0.5,ylab=NA,xlab=NA,main=NA,cex.axis=0.6,las=3),
debug = getOption("oceDebug"))
}
}
#plot ERCC reads
plotspike<-function(data){
erccs<-data[grep("ERCC-",rownames(data)),]
b<-barplot(colSums(erccs),main="ERCC reads",ylab="total ERCC reads",xlab="cells",col="orange",xaxt="n",border=NA)
axis(1,at=b,labels=c(1:length(data)))
}
#plot number of available transcripts vs cutoffs of median detected transcripts
testcutoff<-function(data,n,pdf=FALSE){
main=paste("genes cutoff test",n)
for(l in 1:15){
z = apply(data,1,median) > l
if(l==1){
rc.cutoff = z
} else {
rc.cutoff = cbind(rc.cutoff,z)
}
}
if (pdf){
pdf(paste(getwd(),main,".pdf",sep=""))
plot(apply(rc.cutoff,2,sum),ylab = "number of transcripts",col="black",
xlab = "cutoff (mean transcript no.)",main=main,type="b",lty=2,pch=19)
dev.off()
}
else{
plot(apply(rc.cutoff,2,sum),ylab = "number of transcripts",col="black",
xlab = "cutoff (mean transcript no.)",main=main,type="b",lty=2,pch=19)
}
}
#plot number of total reads, ERCC-reads and genes/cell over a 384-well plate layout
plate.plots<-function(data){
# genes<-apply(data,2,function(x) sum(x>=1))# calculate detected genes/cell
spike<-colSums(keepspike(data))+0.1
# calculate sum of spike in per cell
total<-colSums(rmspike(data+0.1)) # sum of unique reads after removing spike ins
palette <- colorRampPalette(rev(brewer.pal(n = 11,name = "RdYlBu")))(10) # pick which palette for plate plotting
coordinates<-expand.grid(seq(1,24),rev(seq(1,16)))
plot(expand.grid(x = c(1:24), y = c(1:16)),main="Unique non ERCC reads",ylab=NA,xlab=NA) #plate layout
mtext(paste(">1500 unique reads :",round(length(which(colSums(data)>1500))/384*100),"%"),col="red",cex=0.9)
points(coordinates,pch=19,col=palette[cut(log10(total),10)]) # plot total non-ERCC reads/cell over layout
plot(expand.grid(x = c(1:24), y = c(1:16)),main="sum of all ERCCs",ylab=NA,xlab=NA) #plate layout
points(coordinates,pch=19,col=palette[cut(log10(spike),10)]) #plot sum of spike ins over plate
mtext(paste(">100 ERCCs :",round(length(which(colSums(keepspike(data))>100))/384*100),"%"),col="red",cex=0.9)
plot(expand.grid(x = c(1:24), y = c(1:16)),main="sum ERCC/sum non ERCC reads",ylab=NA,xlab=NA)
points(coordinates,pch=19,col=palette[cut(spike/total,10)]) #plot ERCC reads/non-ERCC reads/cell
mtext(paste(">10% spike in reads:",round(length(which(spike/total>0.05))/384*100),"%"),col="red",cex=0.9)
}
# plot the top 20 genes with expresion bar and then a CV plot for the same genes
topgenes<-function(data){
data<-rmspike(data)
means<-apply(data,1,mean)
vars<-apply(data,1,var)
cv<-vars/means
means<-means[order(means, decreasing = TRUE)]
cv<-cv[order(cv, decreasing = TRUE)]
names(means)<-sapply(names(means),chop_chr)
names(cv)<-sapply(names(cv),chop_chr)
barplot(log2(rev(means[1:20])),las=1,cex.names = 0.5, main="top expressed genes", xlab="log2(mean expression)",horiz=TRUE)
barplot(log2(rev(cv[1:20])),las=1,cex.names = 0.5, main="top noisy genes",xlab="log2(var/mean)",horiz=TRUE)
}
#Read files in specified directory automatically (based on Thoms script)
read_files <- function(dir = "", name = Sys.Date()){
#add "/" to dir
if(substr(dir, start = nchar(dir), stop = nchar(dir)) != "/" && dir != ""){
dir <- paste(dir, "/", sep = "")
}
#Read files
files <- list.files(dir, ".cout(t|b|c).csv")
split <- strsplit(files,split = ".cout")
file_names <- unique(as.character(data.frame(split, stringsAsFactors = FALSE)[1,]))
#This check if all necessary files are in the script
error <- ""
for(i in 1:length(file_names)){
if(file.exists(paste(dir, file_names[i],".coutb.csv", sep="")) == FALSE){
f <- paste(file_names[i], ".coutb.csv", " is not found!", sep = "")
error <- paste(error, "\n", f)
}
if(file.exists(paste(dir, file_names[i],".coutc.csv", sep="")) == FALSE){
f <- paste(file_names[i], ".coutc.csv", " is not found!", sep = "")
error <- paste(error, "\n", f)
}
if(file.exists(paste(dir,file_names[i],".coutt.csv", sep="")) == FALSE){
f <- paste(file_names[i], ".coutt.csv", " is not found!", sep = "")
error <- paste(error, "\n", f)
}
}
if(error != ""){
stop(error)
}
cat("the following plates will be processed:\n")
print(file_names)
output <- paste(dir,file_names, sep="")
return(output)
}
# check expression in empty corner of plate and calculate "leakyness" from highly expressed genes
leakygenes<-function(data){
corner<-data[emptywells] # subset data to 8 wells specified in diagnotics script as empty corner
names(corner)<-c("O21","O22","O23","O24","P21","P22","P23","P24")
genes<-apply(data,2,function(x) sum(x>=1)) # check how many genes are detected
genes.corner<-apply(rmspike(corner),2,function(x) sum(x>=1)) # remove ERCC reads
spike.corner<-colSums(keepspike(corner)) # keep only ERCC reads
genespike<-data.frame(genes=genes.corner,ERCC=spike.corner)
if(length(which(genes.corner > mean(genes/5))) != 0){
stop(paste("Not all 8 corner samples are empty in", names[[i]],": won't be plotted"))
} else {# check if the corner wells were actually empty, otherwise stop
# plot genes/cell and ERCC reads/cell for corner wells
par(mar = c(5, 4, 6, 1))
barplot(t(genespike),main="total genes and ERCCs \n in empty corner",
col=c("blue","red"),space=rep(c(0.7,0),8),cex.names = 0.8,las=3,beside=TRUE,
legend=colnames(genespike),args.legend = list(x = "topright", bty = "n",horiz=TRUE,inset=c(0,-0.25)))
}
# determine top expressed genes in corner and compare to mean expressed genes in plate
if( length(which(spike.corner > 75)) == 0){
stop(paste("There are no samples with more than 75 ERCC reads in", names[[i]]))
}
cornerz<-corner[which(spike.corner>75)] # take only wells which worked (>75 ERCC reads)
cornerz<-rmspike(cornerz) # remove ERCCs
mean.corner<-apply(cornerz,1,sum)[order(apply(cornerz,1,sum),decreasing=TRUE)][1:50] # pick top 50 in corner
mean.all<-apply(data,1,sum)[order(apply(data,1,sum),decreasing=TRUE)][1:200] # pick top 200 in plate
names(mean.corner)<-sapply(names(mean.corner),chop_chr) # remove __chr* from name
names(mean.all)<-sapply(names(mean.all),chop_chr) # remove __chr* from name
overlap<-mean.corner[names(mean.corner) %in% names(mean.all)] # check overal between top 50 corner and 200 in plate
non.overlap<-mean.corner[!names(mean.corner) %in% names(mean.all)]
b<-barplot(log2(rev(overlap[1:10])),las=1,cex.names = 0.6, main="top 10 overlapping genes",sub="barcode leaking in %", xlab="log2(sum of reads in corner)",horiz=TRUE)
text(0.5,b, round((mean.corner[names(overlap)[1:10]]/mean.all[names(overlap)[1:10]])*100,2))
if (length(overlap)==50){
warning(paste("there is complete overlap between corner genes and plate genes in ", names[[i]]))
}
else{
barplot(log2(rev(non.overlap[1:length(non.overlap)])),las=1,cex.names = 0.6, main="top 50 empty corner genes \n not in top 200 plate genes", xlab="log2(mean expression)",horiz=TRUE)
}
} | /plate_diagnostics_functions.R | no_license | MauroJM/single-cell-sequencing | R | false | false | 14,677 | r | #Functions needed for reading, filtering and normalizing Cel-SEQ data
#remove or keep only spike ins from given data frame
rmspike<-function(x){
ERCCs<-grep("ERCC-",row.names(x)) # gives vector with row # of spike ins
data<-x[-ERCCs,] # make new data frame without the specified rows
return(data) # output new data frame
}
keepspike<-function(x){
ERCCs<-grep("ERCC-",row.names(x)) # gives vector with row # of spike ins
data<-x[ERCCs,] # make new data frame with only the specified rows
return(data) # output new data frame
}
# chop of chromosome lables (Abel)
chop_chr <- function (name, splitcharacter = "__") { strsplit(name, splitcharacter)[[1]][1]}
#plot expression of one gene as barplot
plotgene<-function(x,n){
barplot(as.matrix(x[grep(n,rownames(x)),]),main=n)
}
# make GENEID the rownames and remove column GENEID
mvgeneid<-function(data){
data <- as.data.frame(data)
rownames(data) = data[,1]
data= data[,-1]
return(data)
}
#reorder cells from four CS1 primer libraries into one 384 column-long library
# libraries is a vector containing the four library names, in the order A1,A2,B1,B2
reorder.cs1<-function(libraries,name){
tc<-list()
rc<-list()
bc<-list()
for(i in 1:4){
tc[[i]]<- read.csv(paste(libraries[i],".coutt.csv", sep=""), header = TRUE, sep = "\t",row.names =1)
rc[[i]] <- read.csv(paste(libraries[i],".coutc.csv", sep=""), header = TRUE, sep = "\t",row.names =1)
bc[[i]] <- read.csv(paste(libraries[i],".coutb.csv", sep=""), header = TRUE, sep = "\t",row.names =1)
}
merge.tc<-intersectmatrix(tc[[1]],intersectmatrix(tc[[2]],intersectmatrix(tc[[3]],tc[[4]])))
merge.bc<-intersectmatrix(bc[[1]],intersectmatrix(bc[[2]],intersectmatrix(bc[[3]],bc[[4]])))
merge.rc<-intersectmatrix(rc[[1]],intersectmatrix(rc[[2]],intersectmatrix(rc[[3]],rc[[4]])))
order<-c(matrix(c(96*0+seq(1,96), 96*1+seq(1,96)), 2, byrow = T))
order2<-c(matrix(c(96*2+seq(1,96), 96*3+seq(1,96)), 2, byrow = T))
all<-c()
for(i in 0:7){
all<-c(all,order[(1+i*24):((i+1)*24)],order2[(1+i*24):((i+1)*24)])
}
merge.order.tc<-merge.tc[all]
merge.order.bc<-merge.bc[all]
merge.order.rc<-merge.rc[all]
merge.order.tc<- merge.order.tc[order(rownames( merge.order.tc)), ]
merge.order.bc<- merge.order.bc[order(rownames( merge.order.bc)), ]
merge.order.rc<- merge.order.rc[order(rownames( merge.order.rc)), ]
write.table(merge.order.tc,paste(name,".coutt.csv",sep=""),sep="\t")
write.table(merge.order.bc,paste(name,".coutb.csv",sep=""),sep="\t")
write.table(merge.order.rc,paste(name,".coutc.csv",sep=""),sep="\t")
}
#JC's merge function (produces new rows on bottom of new dataframe, so reorder rows alphabetically afterwards)
intersectmatrix<-function(x,y){
a<-setdiff(row.names(x),row.names(y))
b<-setdiff(row.names(y),row.names(x))
d<-matrix(data = 0,nrow = length(a),ncol = ncol(y))
row.names(d)<-a
colnames(d)<-colnames(y)
c<-matrix(data = 0,nrow = length(b),ncol = ncol(x))
row.names(c)<-b
colnames(c)<-colnames(x)
y<-rbind(y,d)
x<-rbind(x,c)
e <- match(rownames(x), rownames(y))
f <- cbind( x, y[e,])
return(f)
}
#overseq2, plot oversequencing per transcript
overseq2 <- function(x,y){
main=paste("oversequencing_molecules") # mixes string + name of choice
xlab=bquote(log[10] ~ "read counts / barcode counts") # subscript in string
rc.v<-as.vector(unlist(x))[as.vector(unlist(x>0))]
bc.v<-as.vector(unlist(y))[as.vector(unlist(y>0))]
results<-rc.v/bc.v
sub=paste("median",round(median(rc.v/bc.v),3),sep=" ")
hist(log10(results),breaks=75, col="red", main=main,xlab=xlab,sub=sub)
}
#plot total number of reads per sample
totalreads <- function(data,plotmethod=c("barplot","hist","cumulative","combo")){
if ( ! plotmethod %in% c("barplot","hist","cumulative","combo") ) stop("invalid method")
if(plotmethod == "hist"){
a<-hist(log10(colSums(data)),breaks=100,xlab="log10(counts)",ylab="frequency",main="total unique reads",col="grey",xaxt="n",col.sub="red")
mtext(paste("mean:",round(mean(colSums(data)))," median:",round(median(colSums(data)))),side=3,col="red",cex=0.8)
axis(1,at=a$breaks[which(a$breaks %in% seq(0,max(a$breaks),1))],labels=a$breaks[which(a$breaks %in% c(0,1,2,3,4,5))])
axis(1,at=a$breaks[which(a$breaks %in% seq(0,max(a$breaks),1000))],labels=a$breaks[which(a$breaks %in% seq(0,max(a$breaks),1000))])
abline(v=log10(mean(colSums(data))/2),col="red")
text(log10(mean(colSums(data))/2),max(a$counts)-2, round(mean(colSums(data))/2), srt=0.2, col = "red",pos=2)
}
if(plotmethod == "barplot"){
b<-barplot(colSums(data),xaxt="n",xlab="cells",sub=paste("mean total read:",round(mean(colSums(data)))),main="total unique reads",col="black",border=NA)
axis(1,at=b,labels=c(1:length(data))) # 1=horizontal at = position of marks
abline(h=mean(colSums(data)),col="red")
}
if(plotmethod == "cumulative"){
plot(ecdf(colSums(data)),xlab="total reads",ylab="fraction",main="total unique reads",col="red",tck=1,pch=19,cex=0.5,cex.axis=0.8)
abline(v=mean(colSums(data)/2),col="red")
mtext(paste("mean:",round(mean(colSums(data)))," median:",round(median(colSums(data)))),side=3,col="red",cex=0.8)
}
if(plotmethod == "combo"){
a<-hist(log10(colSums(data)),breaks=100,xlab="log10(counts)",ylab="frequency",main="total unique reads",col="grey",xaxt="n",col.sub="red")
mtext(paste("mean:",round(mean(colSums(data)))," median:",round(median(colSums(data)))),side=3,col="red",cex=0.8)
axis(1,at=a$breaks[which(a$breaks %in% c(0,1,2,3,4,5))],labels=a$breaks[which(a$breaks %in% c(0,1,2,3,4,5))])
abline(v=log10(mean(colSums(data))/2),col="red")
text(log10(mean(colSums(data))/2),max(a$counts)-2, round(mean(colSums(data))/2), srt=0.2, col = "red",pos=2)
plotInset(log10(1),max(a$counts)/4,log10(250), max(a$counts),mar=c(1,1,1,1),
plot(ecdf(colSums(data)),pch=".",col="red",cex=0.5,ylab=NA,xlab=NA,main=NA,cex.axis=0.8,xaxt="n",las=3,mgp=c(2,0.1,0),tck=1,bty="n"),
debug = getOption("oceDebug"))
}
}
#plot amount of genes detected per cell
cellgenes<-function(data,plotmethod=c("hist","cumulative","combo")){
if ( ! plotmethod %in% c("hist","cumulative","combo") ) stop("invalid plotting method")
genes<-apply(data,2,function(x) sum(x>=1))
if(plotmethod == "hist"){
a<-hist(genes,breaks=100,xlab="total genes",ylab="frequency",main="detected genes/cell",col="steelblue1",xaxt="n")
mtext(paste("mean:",round(mean(genes))," median:",round(median(genes))),side=3,col="red",cex=0.8)
axis(1,at=a$breaks[which(a$breaks %in% seq(0,max(a$breaks),1000))],labels=a$breaks[which(a$breaks %in% seq(0,max(a$breaks),1000))])
}
if(plotmethod == "cumulative"){
plot(ecdf(genes),pch=19,col="red",cex=0.5,ylab="frequency",xlab="detected genes/cell",main="cumulative dist genes",cex.axis=1,las=1,tck=1)
mtext(paste("mean:",round(mean(genes))," median:",round(median(genes))),side=3,col="red",cex=0.8)
}
if(plotmethod == "combo"){
a<-hist(genes,breaks=100,xlab="log10(counts)",ylab="frequency",main="detected genes/cell",col="steelblue1",xaxt="n")
mtext(paste("mean:",round(mean(genes))," median:",round(median(genes))),side=3,col="red",cex=0.8)
axis(1,at=a$breaks[which(a$breaks %in% seq(0,max(a$breaks),1000))],labels=a$breaks[which(a$breaks %in% seq(0,max(a$breaks),1000))])
plotInset(max(genes)/3,max(a$counts)/3,max(genes), max(a$counts),mar=c(1,1,1,1),
plot(ecdf(colSums(data)),pch=19,col="red",cex=0.5,ylab=NA,xlab=NA,main=NA,cex.axis=0.6,las=3),
debug = getOption("oceDebug"))
}
}
#plot ERCC reads
plotspike<-function(data){
erccs<-data[grep("ERCC-",rownames(data)),]
b<-barplot(colSums(erccs),main="ERCC reads",ylab="total ERCC reads",xlab="cells",col="orange",xaxt="n",border=NA)
axis(1,at=b,labels=c(1:length(data)))
}
#plot number of available transcripts vs cutoffs of median detected transcripts
testcutoff<-function(data,n,pdf=FALSE){
main=paste("genes cutoff test",n)
for(l in 1:15){
z = apply(data,1,median) > l
if(l==1){
rc.cutoff = z
} else {
rc.cutoff = cbind(rc.cutoff,z)
}
}
if (pdf){
pdf(paste(getwd(),main,".pdf",sep=""))
plot(apply(rc.cutoff,2,sum),ylab = "number of transcripts",col="black",
xlab = "cutoff (mean transcript no.)",main=main,type="b",lty=2,pch=19)
dev.off()
}
else{
plot(apply(rc.cutoff,2,sum),ylab = "number of transcripts",col="black",
xlab = "cutoff (mean transcript no.)",main=main,type="b",lty=2,pch=19)
}
}
#plot number of total reads, ERCC-reads and genes/cell over a 384-well plate layout
plate.plots<-function(data){
# genes<-apply(data,2,function(x) sum(x>=1))# calculate detected genes/cell
spike<-colSums(keepspike(data))+0.1
# calculate sum of spike in per cell
total<-colSums(rmspike(data+0.1)) # sum of unique reads after removing spike ins
palette <- colorRampPalette(rev(brewer.pal(n = 11,name = "RdYlBu")))(10) # pick which palette for plate plotting
coordinates<-expand.grid(seq(1,24),rev(seq(1,16)))
plot(expand.grid(x = c(1:24), y = c(1:16)),main="Unique non ERCC reads",ylab=NA,xlab=NA) #plate layout
mtext(paste(">1500 unique reads :",round(length(which(colSums(data)>1500))/384*100),"%"),col="red",cex=0.9)
points(coordinates,pch=19,col=palette[cut(log10(total),10)]) # plot total non-ERCC reads/cell over layout
plot(expand.grid(x = c(1:24), y = c(1:16)),main="sum of all ERCCs",ylab=NA,xlab=NA) #plate layout
points(coordinates,pch=19,col=palette[cut(log10(spike),10)]) #plot sum of spike ins over plate
mtext(paste(">100 ERCCs :",round(length(which(colSums(keepspike(data))>100))/384*100),"%"),col="red",cex=0.9)
plot(expand.grid(x = c(1:24), y = c(1:16)),main="sum ERCC/sum non ERCC reads",ylab=NA,xlab=NA)
points(coordinates,pch=19,col=palette[cut(spike/total,10)]) #plot ERCC reads/non-ERCC reads/cell
mtext(paste(">10% spike in reads:",round(length(which(spike/total>0.05))/384*100),"%"),col="red",cex=0.9)
}
# plot the top 20 genes with expresion bar and then a CV plot for the same genes
topgenes<-function(data){
data<-rmspike(data)
means<-apply(data,1,mean)
vars<-apply(data,1,var)
cv<-vars/means
means<-means[order(means, decreasing = TRUE)]
cv<-cv[order(cv, decreasing = TRUE)]
names(means)<-sapply(names(means),chop_chr)
names(cv)<-sapply(names(cv),chop_chr)
barplot(log2(rev(means[1:20])),las=1,cex.names = 0.5, main="top expressed genes", xlab="log2(mean expression)",horiz=TRUE)
barplot(log2(rev(cv[1:20])),las=1,cex.names = 0.5, main="top noisy genes",xlab="log2(var/mean)",horiz=TRUE)
}
#Read files in specified directory automatically (based on Thoms script)
read_files <- function(dir = "", name = Sys.Date()){
#add "/" to dir
if(substr(dir, start = nchar(dir), stop = nchar(dir)) != "/" && dir != ""){
dir <- paste(dir, "/", sep = "")
}
#Read files
files <- list.files(dir, ".cout(t|b|c).csv")
split <- strsplit(files,split = ".cout")
file_names <- unique(as.character(data.frame(split, stringsAsFactors = FALSE)[1,]))
#This check if all necessary files are in the script
error <- ""
for(i in 1:length(file_names)){
if(file.exists(paste(dir, file_names[i],".coutb.csv", sep="")) == FALSE){
f <- paste(file_names[i], ".coutb.csv", " is not found!", sep = "")
error <- paste(error, "\n", f)
}
if(file.exists(paste(dir, file_names[i],".coutc.csv", sep="")) == FALSE){
f <- paste(file_names[i], ".coutc.csv", " is not found!", sep = "")
error <- paste(error, "\n", f)
}
if(file.exists(paste(dir,file_names[i],".coutt.csv", sep="")) == FALSE){
f <- paste(file_names[i], ".coutt.csv", " is not found!", sep = "")
error <- paste(error, "\n", f)
}
}
if(error != ""){
stop(error)
}
cat("the following plates will be processed:\n")
print(file_names)
output <- paste(dir,file_names, sep="")
return(output)
}
# check expression in empty corner of plate and calculate "leakyness" from highly expressed genes
leakygenes<-function(data){
corner<-data[emptywells] # subset data to 8 wells specified in diagnotics script as empty corner
names(corner)<-c("O21","O22","O23","O24","P21","P22","P23","P24")
genes<-apply(data,2,function(x) sum(x>=1)) # check how many genes are detected
genes.corner<-apply(rmspike(corner),2,function(x) sum(x>=1)) # remove ERCC reads
spike.corner<-colSums(keepspike(corner)) # keep only ERCC reads
genespike<-data.frame(genes=genes.corner,ERCC=spike.corner)
if(length(which(genes.corner > mean(genes/5))) != 0){
stop(paste("Not all 8 corner samples are empty in", names[[i]],": won't be plotted"))
} else {# check if the corner wells were actually empty, otherwise stop
# plot genes/cell and ERCC reads/cell for corner wells
par(mar = c(5, 4, 6, 1))
barplot(t(genespike),main="total genes and ERCCs \n in empty corner",
col=c("blue","red"),space=rep(c(0.7,0),8),cex.names = 0.8,las=3,beside=TRUE,
legend=colnames(genespike),args.legend = list(x = "topright", bty = "n",horiz=TRUE,inset=c(0,-0.25)))
}
# determine top expressed genes in corner and compare to mean expressed genes in plate
if( length(which(spike.corner > 75)) == 0){
stop(paste("There are no samples with more than 75 ERCC reads in", names[[i]]))
}
cornerz<-corner[which(spike.corner>75)] # take only wells which worked (>75 ERCC reads)
cornerz<-rmspike(cornerz) # remove ERCCs
mean.corner<-apply(cornerz,1,sum)[order(apply(cornerz,1,sum),decreasing=TRUE)][1:50] # pick top 50 in corner
mean.all<-apply(data,1,sum)[order(apply(data,1,sum),decreasing=TRUE)][1:200] # pick top 200 in plate
names(mean.corner)<-sapply(names(mean.corner),chop_chr) # remove __chr* from name
names(mean.all)<-sapply(names(mean.all),chop_chr) # remove __chr* from name
overlap<-mean.corner[names(mean.corner) %in% names(mean.all)] # check overal between top 50 corner and 200 in plate
non.overlap<-mean.corner[!names(mean.corner) %in% names(mean.all)]
b<-barplot(log2(rev(overlap[1:10])),las=1,cex.names = 0.6, main="top 10 overlapping genes",sub="barcode leaking in %", xlab="log2(sum of reads in corner)",horiz=TRUE)
text(0.5,b, round((mean.corner[names(overlap)[1:10]]/mean.all[names(overlap)[1:10]])*100,2))
if (length(overlap)==50){
warning(paste("there is complete overlap between corner genes and plate genes in ", names[[i]]))
}
else{
barplot(log2(rev(non.overlap[1:length(non.overlap)])),las=1,cex.names = 0.6, main="top 50 empty corner genes \n not in top 200 plate genes", xlab="log2(mean expression)",horiz=TRUE)
}
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hgenes.R
\name{hgenes}
\alias{hgenes}
\title{Retrieves genes from HMDB}
\usage{
hgenes(x)
}
\arguments{
\item{x}{is the metabolite of interest}
}
\value{
returns a list of the gene names related to a specific metabolite.
}
\description{
The function looks at HMDB entry and retrieves the genes related to specific metabolite.
#'
}
| /man/hgenes.Rd | no_license | ExoLab-UPLCMS/MetaboliteHub | R | false | true | 409 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hgenes.R
\name{hgenes}
\alias{hgenes}
\title{Retrieves genes from HMDB}
\usage{
hgenes(x)
}
\arguments{
\item{x}{is the metabolite of interest}
}
\value{
returns a list of the gene names related to a specific metabolite.
}
\description{
The function looks at HMDB entry and retrieves the genes related to specific metabolite.
#'
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ancillary.R
\name{aaf}
\alias{aaf}
\title{Compute the Frequency of Each Amino Acid in Each Species}
\usage{
aaf(data)
}
\arguments{
\item{data}{input data must be a dataframe (see details).}
}
\value{
A dataframe providing amino acid frequencies en the set of species. Rows correspond amino acids and columns to species.
}
\description{
Computes the frequency of each amino acid in each species.
}
\details{
Input data must be a dataframe where each row corresponds to an individual protein, and each column identifies a species. Therefore, the columns' names of this dataframe must be coherent with the names of the OTUs being analyzed.
}
\examples{
aaf(bovids)
}
\seealso{
env.sp(), otu.vector(), otu.space()
}
| /man/aaf.Rd | no_license | cran/EnvNJ | R | false | true | 791 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ancillary.R
\name{aaf}
\alias{aaf}
\title{Compute the Frequency of Each Amino Acid in Each Species}
\usage{
aaf(data)
}
\arguments{
\item{data}{input data must be a dataframe (see details).}
}
\value{
A dataframe providing amino acid frequencies en the set of species. Rows correspond amino acids and columns to species.
}
\description{
Computes the frequency of each amino acid in each species.
}
\details{
Input data must be a dataframe where each row corresponds to an individual protein, and each column identifies a species. Therefore, the columns' names of this dataframe must be coherent with the names of the OTUs being analyzed.
}
\examples{
aaf(bovids)
}
\seealso{
env.sp(), otu.vector(), otu.space()
}
|
#### Copyright 2016 Andrew D Fox
####
#### Licensed under the Apache License, Version 2.0 (the "License");
#### you may not use this file except in compliance with the License.
#### You may obtain a copy of the License at
####
#### http://www.apache.org/licenses/LICENSE-2.0
####
#### Unless required by applicable law or agreed to in writing, software
#### distributed under the License is distributed on an "AS IS" BASIS,
#### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#### See the License for the specific language governing permissions and
#### limitations under the License.
####
# Code adapted from:
# SAMPLE CODE FOR HOUSEMAN, ACCOMANDO, ET AL. (November 6, 2011)
# PubMed ID: 22568884
# Code adapted by: Andrew D Fox
library(nlme)
source("wbcInference.R")
load("metaDataMSmethyl.RData") ## metadata
cellPropsFACS = read.csv("CellPropsFACS.txt", sep="\t")
BetaVal = read.table( "methylation_beta_values_001.txt", header = T, sep = '\t', quote='' )
NUM_COLUMNS = 9
NUM_CTRLS = 8
NUM_CASES = 7
NUM_CELLTYPES = 6
TARG_RANGE_CTRL = 1:6
TARG_RANGE_CASE = 7:19
TARG_RANGE_CTRL_P1 = 1:8
TARG_RANGE_CASE_P1 = 9:15
NUM_CTRLS_TARGDATA = length( TARG_RANGE_CTRL_P1 )
NUM_CASES_TARGDATA = length( TARG_RANGE_CASE_P1 )
NUM_VALS_STORAGE = 5
m2b <- function(m){ return( 2^m/(1+2^m) ) }
b2m <- function(b){ return( log2(b/(1-b)) ) }
BetaVal = as.matrix( BetaVal )
colnames(BetaVal) <- substr(colnames(BetaVal), start = 2 , stop = length(colnames(BetaVal)) )
Mv = b2m(BetaVal)
# Cell Type indexes:
i_n = c(1,10,19,28,37,45,54,63)
i_4 = c(2,11,20,29,38,46,55,64)
i_8 = c(3,12,21,30,39,47,56,65)
i_k = c(4,13,22,31,40,48,57,66)
i_b = c(5,14,23,32,41,49,58,67)
i_m = c(6,15,24,33,42,50,59,68)
i_wbc = c(7,16,25,34,43,51,60,69)
i_wb = c(8,17,26,35,44,52,61,70, 72,73,74,75,76,77)
i_ms = c(9,18,27,36, 53,62,71, 78,79,80,81,82,83)
ind = list( i_n, i_4, i_8, i_k, i_b, i_m, i_wbc, i_wb, i_ms)
dataIndex <- function( ctrlSampleNum, ct_num){ return( (ctrlSampleNum-1)*9 + ct_num ); }
dataIndexVal <- function( ctrlSampleNum, ct_num, vals){ return( vals[(ctrlSampleNum-1)*9 + ct_num] ); }
cpg_data = as.matrix( read.csv("houseman_refSites_n1826.txt", sep="\t", header=F) )
cpgs = cpg_data[,1]
cpg_cts = cpg_data[,2]
cpg_dirs = cpg_data[,3]
##############################################
##### ##### Step 1: Fit Validation Model (S0)
##############################################
trainData = Mv[ cpgs , -c( i_wb, i_ms ) ] ## i_wb are wholeblood(ctrl), 9==failedSample, i_ms are wholeblood(case)
targData = Mv[ cpgs , c(i_wb[1:NUM_CTRLS], i_ms[1:NUM_CASES]) ] # Original controls (8), then original cases (7)
M = length(cpgs)
NTOP_CPGS = length(cpgs)
# Define the validation model:
theModel = y ~ Neut + CD4T + CD8T + NK + Bcell + Mono
sizeModel = 7
validationData_Assay = m2b(trainData)
validationData_Pheno = trainData_pheno
targetData_Assay = m2b(targData)
targetData_Covariates = targData_covariates
# Linear transformation of coefficient vector
# representing contrast to test F statistic
L.forFstat = diag(sizeModel)[-1,] #All non-intercept coefficients
# Initialize various containers
sigmaResid = sigmaIcept = nObserved = nClusters = Fstat = rep(NA, M)
coefEsts = matrix(NA, M, sizeModel)
coefVcovs =list()
for(j in 1:M){ # For each CpG
#Remove missing methylation values
ii = !is.na(validationData_Assay[j,])
nObserved[j] = sum(ii)
validationData_Pheno$y = validationData_Assay[j,]
if(j%%100==0) cat(j,"\n") # Report progress
try({ # Try to fit a mixed model to adjust for plate
fit = try(lme(theModel, random=~1|PLATE, data=validationData_Pheno[ii,]))
if(inherits(fit,"try-error")){ # If LME can't be fit, just use OLS
fit = lm(theModel, data=validationData_Pheno[ii,])
fitCoef = fit$coef
sigmaResid[j] = summary(fit)$sigma
sigmaIcept[j] = 0
nClusters[j] = 0
}
else{
fitCoef = fit$coef$fixed
sigmaResid[j] = fit$sigma
sigmaIcept[j] = sqrt(getVarCov(fit)[1])
nClusters[j] = length(fit$coef$random[[1]])
}
coefEsts[j,] = fitCoef
coefVcovs[[j]] = vcov(fit)
useCoef = L.forFstat %*% fitCoef
useV = L.forFstat %*% coefVcovs[[j]] %*% t(L.forFstat)
Fstat[j] = (t(useCoef) %*% solve(useV, useCoef))/sizeModel
})
}
# Name the rows so that they can be easily matched to the target data set
rownames(coefEsts) = rownames(validationData_Assay)
colnames(coefEsts) = names(fitCoef)
# Get P values corresponding to F statistics
Pval = pf(Fstat, sizeModel, nObserved - nClusters - sizeModel + 1, lower.tail=FALSE)
Fstat_mtx = as.matrix(Fstat)
Pval_mtx = as.matrix(Pval)
rownames(Fstat_mtx) <- rownames(validationData_Assay)
rownames(Pval_mtx) <- rownames(validationData_Assay)
###############################################
######## Step 2: Fit Target Model (S1) ########
###############################################
# Contrast matrix:
Lwbc = diag(7)[-1,]
Lwbc[,1]=1
#colnames( coefEsts )
rownames(Lwbc) = colnames(coefEsts)[-1]
colnames(Lwbc) = colnames(coefEsts)
# Denominator degrees-of-freedom for parametric bootstrap
degFree = nObserved - nClusters - (sizeModel-1)
CpGSelection = rownames(coefEsts)[1:NTOP_CPGS]
#Note: if the CpGs were scattered throughout the array,
# you would want to select them by name as is indicated here.
# For this sample version, it would be easier just to use "[1:NTOP_CPGS]"
targetEst = inferWBCbyLme(
targetData_Assay[1:NTOP_CPGS,], # Target methylation (cpGs x subjects)
targetData_Covariates, # Target phenotype frame (subjects x covariates)
y~case, ######+gender+ageCtr, # Target model (fixed effects)
~1|BeadChip, # Target adjustment (random effects) [*Footnote 3*]
coefEsts[CpGSelection,], # Raw coefficient estimates for WBC
Lwbc # Contrast matrix [*Footnote 2*]
)
##############################################
##### ##### Step 3: View projections
##############################################
# Contrast matrix
Lwbc = diag(NUM_CELLTYPES+1)[-1,]
Lwbc[,1]=1
rownames(Lwbc) = colnames(coefEsts)[-1]
colnames(Lwbc) = colnames(coefEsts)
#Lwbc # View contrast matrix
CpGSelection = rownames(coefEsts)[1:NTOP_CPGS]
####### Projections for target data
constrainedCoefs = projectWBC(
targetData_Assay[1:NTOP_CPGS,],
coefEsts[CpGSelection,],
Lwbc)
cellPropTarget = cellPropsFACS
cellPropsFACS = as.matrix(cellPropsFACS)
rownames(cellPropsFACS) = rownames(constrainedCoefs)[1:NUM_CTRLS]
colnames(cellPropsFACS)<- c("Neut_Gold", "CD4T_Gold", "CD8T_Gold", "NK_Gold", "Bcell_Gold", "Mono_Gold")
ctrlProps = constrainedCoefs[TARG_RANGE_CTRL_P1,]
caseProps = constrainedCoefs[TARG_RANGE_CASE_P1,]
#View the 8 control sample cell proportion predictions:
ctrlProps
cor( c(as.matrix(props_expt)), c(as.matrix(ctrlProps)) )^2
sqrt(mean((props_expt-ctrlProps)^2))
| /houseman-code-for-manuscript.R | permissive | foxandrewd/PSMA | R | false | false | 6,880 | r | #### Copyright 2016 Andrew D Fox
####
#### Licensed under the Apache License, Version 2.0 (the "License");
#### you may not use this file except in compliance with the License.
#### You may obtain a copy of the License at
####
#### http://www.apache.org/licenses/LICENSE-2.0
####
#### Unless required by applicable law or agreed to in writing, software
#### distributed under the License is distributed on an "AS IS" BASIS,
#### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#### See the License for the specific language governing permissions and
#### limitations under the License.
####
# Code adapted from:
# SAMPLE CODE FOR HOUSEMAN, ACCOMANDO, ET AL. (November 6, 2011)
# PubMed ID: 22568884
# Code adapted by: Andrew D Fox
library(nlme)
source("wbcInference.R")
load("metaDataMSmethyl.RData") ## metadata
cellPropsFACS = read.csv("CellPropsFACS.txt", sep="\t")
BetaVal = read.table( "methylation_beta_values_001.txt", header = T, sep = '\t', quote='' )
NUM_COLUMNS = 9
NUM_CTRLS = 8
NUM_CASES = 7
NUM_CELLTYPES = 6
TARG_RANGE_CTRL = 1:6
TARG_RANGE_CASE = 7:19
TARG_RANGE_CTRL_P1 = 1:8
TARG_RANGE_CASE_P1 = 9:15
NUM_CTRLS_TARGDATA = length( TARG_RANGE_CTRL_P1 )
NUM_CASES_TARGDATA = length( TARG_RANGE_CASE_P1 )
NUM_VALS_STORAGE = 5
m2b <- function(m){ return( 2^m/(1+2^m) ) }
b2m <- function(b){ return( log2(b/(1-b)) ) }
BetaVal = as.matrix( BetaVal )
colnames(BetaVal) <- substr(colnames(BetaVal), start = 2 , stop = length(colnames(BetaVal)) )
Mv = b2m(BetaVal)
# Cell Type indexes:
i_n = c(1,10,19,28,37,45,54,63)
i_4 = c(2,11,20,29,38,46,55,64)
i_8 = c(3,12,21,30,39,47,56,65)
i_k = c(4,13,22,31,40,48,57,66)
i_b = c(5,14,23,32,41,49,58,67)
i_m = c(6,15,24,33,42,50,59,68)
i_wbc = c(7,16,25,34,43,51,60,69)
i_wb = c(8,17,26,35,44,52,61,70, 72,73,74,75,76,77)
i_ms = c(9,18,27,36, 53,62,71, 78,79,80,81,82,83)
ind = list( i_n, i_4, i_8, i_k, i_b, i_m, i_wbc, i_wb, i_ms)
dataIndex <- function( ctrlSampleNum, ct_num){ return( (ctrlSampleNum-1)*9 + ct_num ); }
dataIndexVal <- function( ctrlSampleNum, ct_num, vals){ return( vals[(ctrlSampleNum-1)*9 + ct_num] ); }
cpg_data = as.matrix( read.csv("houseman_refSites_n1826.txt", sep="\t", header=F) )
cpgs = cpg_data[,1]
cpg_cts = cpg_data[,2]
cpg_dirs = cpg_data[,3]
##############################################
##### ##### Step 1: Fit Validation Model (S0)
##############################################
trainData = Mv[ cpgs , -c( i_wb, i_ms ) ] ## i_wb are wholeblood(ctrl), 9==failedSample, i_ms are wholeblood(case)
targData = Mv[ cpgs , c(i_wb[1:NUM_CTRLS], i_ms[1:NUM_CASES]) ] # Original controls (8), then original cases (7)
M = length(cpgs)
NTOP_CPGS = length(cpgs)
# Define the validation model:
theModel = y ~ Neut + CD4T + CD8T + NK + Bcell + Mono
sizeModel = 7
validationData_Assay = m2b(trainData)
validationData_Pheno = trainData_pheno
targetData_Assay = m2b(targData)
targetData_Covariates = targData_covariates
# Linear transformation of coefficient vector
# representing contrast to test F statistic
L.forFstat = diag(sizeModel)[-1,] #All non-intercept coefficients
# Initialize various containers
sigmaResid = sigmaIcept = nObserved = nClusters = Fstat = rep(NA, M)
coefEsts = matrix(NA, M, sizeModel)
coefVcovs =list()
for(j in 1:M){ # For each CpG
#Remove missing methylation values
ii = !is.na(validationData_Assay[j,])
nObserved[j] = sum(ii)
validationData_Pheno$y = validationData_Assay[j,]
if(j%%100==0) cat(j,"\n") # Report progress
try({ # Try to fit a mixed model to adjust for plate
fit = try(lme(theModel, random=~1|PLATE, data=validationData_Pheno[ii,]))
if(inherits(fit,"try-error")){ # If LME can't be fit, just use OLS
fit = lm(theModel, data=validationData_Pheno[ii,])
fitCoef = fit$coef
sigmaResid[j] = summary(fit)$sigma
sigmaIcept[j] = 0
nClusters[j] = 0
}
else{
fitCoef = fit$coef$fixed
sigmaResid[j] = fit$sigma
sigmaIcept[j] = sqrt(getVarCov(fit)[1])
nClusters[j] = length(fit$coef$random[[1]])
}
coefEsts[j,] = fitCoef
coefVcovs[[j]] = vcov(fit)
useCoef = L.forFstat %*% fitCoef
useV = L.forFstat %*% coefVcovs[[j]] %*% t(L.forFstat)
Fstat[j] = (t(useCoef) %*% solve(useV, useCoef))/sizeModel
})
}
# Name the rows so that they can be easily matched to the target data set
rownames(coefEsts) = rownames(validationData_Assay)
colnames(coefEsts) = names(fitCoef)
# Get P values corresponding to F statistics
Pval = pf(Fstat, sizeModel, nObserved - nClusters - sizeModel + 1, lower.tail=FALSE)
Fstat_mtx = as.matrix(Fstat)
Pval_mtx = as.matrix(Pval)
rownames(Fstat_mtx) <- rownames(validationData_Assay)
rownames(Pval_mtx) <- rownames(validationData_Assay)
###############################################
######## Step 2: Fit Target Model (S1) ########
###############################################
# Contrast matrix:
Lwbc = diag(7)[-1,]
Lwbc[,1]=1
#colnames( coefEsts )
rownames(Lwbc) = colnames(coefEsts)[-1]
colnames(Lwbc) = colnames(coefEsts)
# Denominator degrees-of-freedom for parametric bootstrap
degFree = nObserved - nClusters - (sizeModel-1)
CpGSelection = rownames(coefEsts)[1:NTOP_CPGS]
#Note: if the CpGs were scattered throughout the array,
# you would want to select them by name as is indicated here.
# For this sample version, it would be easier just to use "[1:NTOP_CPGS]"
targetEst = inferWBCbyLme(
targetData_Assay[1:NTOP_CPGS,], # Target methylation (cpGs x subjects)
targetData_Covariates, # Target phenotype frame (subjects x covariates)
y~case, ######+gender+ageCtr, # Target model (fixed effects)
~1|BeadChip, # Target adjustment (random effects) [*Footnote 3*]
coefEsts[CpGSelection,], # Raw coefficient estimates for WBC
Lwbc # Contrast matrix [*Footnote 2*]
)
##############################################
##### ##### Step 3: View projections
##############################################
# Contrast matrix
Lwbc = diag(NUM_CELLTYPES+1)[-1,]
Lwbc[,1]=1
rownames(Lwbc) = colnames(coefEsts)[-1]
colnames(Lwbc) = colnames(coefEsts)
#Lwbc # View contrast matrix
CpGSelection = rownames(coefEsts)[1:NTOP_CPGS]
####### Projections for target data
constrainedCoefs = projectWBC(
targetData_Assay[1:NTOP_CPGS,],
coefEsts[CpGSelection,],
Lwbc)
cellPropTarget = cellPropsFACS
cellPropsFACS = as.matrix(cellPropsFACS)
rownames(cellPropsFACS) = rownames(constrainedCoefs)[1:NUM_CTRLS]
colnames(cellPropsFACS)<- c("Neut_Gold", "CD4T_Gold", "CD8T_Gold", "NK_Gold", "Bcell_Gold", "Mono_Gold")
ctrlProps = constrainedCoefs[TARG_RANGE_CTRL_P1,]
caseProps = constrainedCoefs[TARG_RANGE_CASE_P1,]
#View the 8 control sample cell proportion predictions:
ctrlProps
cor( c(as.matrix(props_expt)), c(as.matrix(ctrlProps)) )^2
sqrt(mean((props_expt-ctrlProps)^2))
|
require("ggplot2")
require("dplyr")
require("timeSeries")
require("forecast")
# require("tsoutliers")
# require("zoo")
usearchive=TRUE # csv data from archive
sensorid=40
usearchive=FALSE # timestamp needs fixing (in csv and conversion below)
usearchive=TRUE # timestamp needs fixing (in csv and conversion below)
#if(usearchive){require("RCurl");}
#max values for clipping
Pclip<-list(P1=list(min=0, max=10000),
P2=list(min=0.62, max=1000))
dateinterval<-list(min=as.POSIXct(strptime("2015-12-30", format="%Y-%m-%d")),
max=as.POSIXct(Sys.Date()))
plotdir="output_plots/"
if(!dir.exists(plotdir)){dir.create(plotdir, showWarnings = TRUE, recursive = TRUE, mode = "0755")}
#' function to clip values above/below thresholds
clipping<-function(x,min=NULL,max=NULL){
if(is.null(min)){
min=min(na.omit(x))
}
if(is.null(max)){
max=max(na.omit(x))
}
if(is.na(max)||is.na(min)){
warn("NA for min/max while clipping, no clipping done")
return(x)
}
x[x>max]<-max
x[x<min]<-min
return(x)
}
## gaussian filter taps for smoothing function
# adapted from Juan Carlos Borrás http://grokbase.com/p/r/r-help/117c96hy0z/r-gaussian-low-pass-filter
gfcoeffs <- function(s, n) {
t <- seq(-n,n,1) ## assuming 2*n+1 taps
gfiltc<-exp(-(t^2/(2*s^2)))/sqrt(2*pi*s^2)
return (gfiltc/sum(gfiltc))
# sum(gfiltc)=1
}
csvsep=","
if(usearchive){
arcdat_filename<-"arcdat.RData"
if (file.exists(arcdat_filename)){
load(arcdat_filename)
arcdat_filename_mtime<-file.mtime(arcdat_filename)
}else{
arcdat<-NULL
arcdat_filename_mtime<-as.POSIXct(as.Date("2000-01-01"))
}
fpattern<-"*.csv"
# get filelist relative to working directory, pattern = glob2rx(fpattern)
filelist<- dir(path = "archive.luftdaten.info",pattern=glob2rx(fpattern),recursive=TRUE,full.names=TRUE, ignore.case = TRUE) ## files in current directory
# only read files newer than arcdat_filename
for (csvfilename in filelist[file.mtime(filelist)>arcdat_filename_mtime]){
print(paste("reading ", csvfilename))
rdat<-read.csv(csvfilename, sep=";", dec=".", header=TRUE)
arcdat<-dplyr::bind_rows(arcdat,rdat)
}
arcdat$timestampct<-as.POSIXct(strptime(arcdat$timestamp,format="%Y-%m-%dT%H:%M:%OS"))
arctbl<-table(arcdat$sensor_id,as.Date(arcdat$timestamp))#$yday+1000*(as.POSIXlt(arcdat$timestamp)$year+1990))
save(arcdat, arctbl ,file=arcdat_filename)
pdf(file.path(plotdir,"plots_sensordata_overview.pdf"),width=12,height=9)
ggplot(as.data.frame(arctbl), aes(Var2,Var1,size=Freq)) + geom_point()+
labs(x="year, doy", y="sensor id")+
theme(axis.text.x = element_text(angle=90, vjust=0.5, size=6))
dev.off()
# iterate sensors
for (sid in unique(arcdat$sensor_id)){
print(sid)
sdat<-as.data.frame(dplyr::filter(arcdat, sensor_id==sid)) # result type is tbl_df, convert to df
sdat<-sdat[order(sdat$timestampct),] # sort by timestampct
sdat$P2diff1=sdat$P2-sdat$P1
sdat$durP2diff1=sdat$durP2-sdat$durP1
print(dim(sdat))
# stats::filter the data
# create a gaussian smoothing
sigma=5
ntaps=10
gc<-gfcoeffs(sigma,ntaps)
pdffilename=file.path(plotdir,paste("plots_sensor_",sid,".pdf",sep=""))
print (pdffilename)
# set width according to timediff
timespan<-as.double(max(sdat$timestampct)-min(sdat$timestampct))
print(paste("plotwidth", min(timespan/2,10)))
pdf(pdffilename, width=max(timespan/2,10), height=10)
measvalnames=c("P1", "durP1", "ratioP1", "P2", "durP2", "ratioP2", "P2diff1", "durP2diff1")
# have a timeSeries object and plot it
print(paste("tsdat plot"))
tsdat<-timeSeries(sdat[,measvalnames], sdat$timestampct)
plot(tsdat)
for (coln in measvalnames){
print (coln)
if(length(sdat[,coln])>ntaps){
# TODO: identify/handle outliers
# look at forecast::tsoutliers tsoutliers::tso
# outlier filter first forecast::tsclean
sdat$plotdat<-forecast::tsclean(sdat[,coln])
sdat$plotdat<-as.vector(stats::filter(sdat$plotdat, gfcoeffs(sigma,ntaps)))
print(paste(coln,"ggplot"))
p<-ggplot(sdat, aes(timestampct,plotdat))+geom_line()+geom_smooth(span=0.2)+ labs(x="Time",y=coln)
print(p)
# TODO: gleitende 24-Stunden-Mittelwerte (24h means filtering)
# maybe possible via zoo forecast::ma its fts tseries timeSeries
# fts:moving.mean only Date as time?
# look for functions with timestamp based intervals (24h)
# z=zoo(sdat,order.by=sdat$timestampct)
# sdat.fts=as.fts(sdat[,c("timestampct","P1")])
# idat=irts(sdat$timestampct, sdat$P1)
# plot(idat)
# measurement Dates
# mdts<-as.timeSeries(unique(as.Date(sdat$timestampct)))
# fts wants chr dates (timestamps possible?) in row names?
# rownames(sdat)<-as.character(sdat$timestampct)
}
}
dev.off()
}# sensor_id
print(paste("total size of data:", dim(arcdat) ,collapse = " "))
stop("manual break: archive plots done")
}# usearchive
# dates=seq.Date(from=as.Date(dateinterval$min),to=as.Date(dateinterval$max),by=1)
# u<-paste('http://archive.madflex.de/',
# dates,
# '/',
# dates,'_ppd42ns_sensor_',
# sensorid,
# '.csv')
# require("RCurl")
# filelist=urllist
# csvsep=";"
fpattern<-"sensor[0-9]+.csv"
# get filelist relative to working directory, pattern = glob2rx(fpattern)
filelist<- dir(path = ".",pattern=fpattern,recursive=FALSE,full.names=FALSE, ignore.case = TRUE) ## files in current directory
for (csvfilename in filelist){
# get/process the data with scripts from repo feinstaub-monitoring-client-python to sensorXX.csv
# csvfilename<-paste("sensor",sensorid,".csv",sep="")
sensorid<-regmatches(csvfilename, regexpr("[0-9]+", csvfilename))
pdffilename<-paste("plots_sensor",sensorid,".pdf",sep="")
sendat<-read.csv(csvfilename,sep=csvsep)
# have a proper timestamp POSIXct (never use POSIXlt)
sendat$timestampct<-as.POSIXct(strptime(sendat$timestamp,format="%Y-%m-%dT%H:%M:%OSZ"))
sendat$timestamp<-NULL
#sendat<-sendat[sendat$timestampct>strptime("2015-10-24", format="%Y-%m-%d"),]
# select data of latest 2 days measured values
# nval=2*60*24*2
# nval=min(nval,dim(sendat)[1])
# seldat<-sendat[1:nval,]
# filter date interval
seldat<-sendat[sendat$timestampct>dateinterval$min&
sendat$timestampct<dateinterval$max,]
# seldat<-sendat
pdf(pdffilename)
if ("P1" %in% names(sendat)){
# filter range 0
seldat$P1<-clipping(seldat$P1,Pclip$P1$min,Pclip$P1$max)
seldat$P2<-clipping(seldat$P2,Pclip$P2$min,Pclip$P2$max)
seldat$P1[seldat$P1<=Pclip$P1$min]<-NA
seldat$P2[seldat$P2<=Pclip$P2$min]<-NA
# sendat<-sendat[,]
plotdat<-seldat
# plot(plotdat$timestampct, log(plotdat$P2))
p<-ggplot(plotdat,aes(timestampct, P2))+geom_point()+geom_smooth()
print(p)
p<-ggplot(plotdat,aes(timestampct, P2))+geom_point()+scale_y_log10()+geom_smooth()
print(p)
p<-ggplot(plotdat,aes(timestampct, P1))+geom_point()+geom_smooth()
print(p)
p<-ggplot(plotdat,aes(timestampct, P1))+geom_point()+scale_y_log10()+geom_smooth()
print(p)
ntaps=10
sigma=4
gfiltc<-gfcoeffs(sigma,ntaps)
plotdat$P1smoothed<-filter(plotdat$P1,filter=gfiltc)
plotdat$P2smoothed<-filter(plotdat$P2,filter=gfiltc)
p<-ggplot(plotdat,aes(timestampct, P1smoothed))+geom_line()+geom_smooth()
print(p)
p<-ggplot(plotdat,aes(timestampct, P1smoothed))+geom_line()+scale_y_log10()+geom_smooth()
print(p)
p<-ggplot(plotdat,aes(timestampct, P2smoothed))+geom_line()+geom_smooth()
print(p)
p<-ggplot(plotdat,aes(timestampct, P2smoothed))+geom_line()+scale_y_log10()+geom_smooth()
print(p)
}
if ("temperature" %in% names(sendat)){
seldat<-seldat[!is.na(seldat$temperature),]
if ("humidity" %in% names(sendat)){
seldat<-seldat[!is.na(seldat$humidity),]
}
p<-ggplot(seldat, aes(timestampct, temperature))+geom_line()
print(p)
if ("humidity" %in% names(seldat)){
p<-ggplot(seldat, aes(timestampct, temperature))+geom_line(aes(timestampct, humidity),col=4)
}
print(p)
}
dev.off()
}
| /r-scripts/plot_sensordata.R | no_license | wermter/sensors-software | R | false | false | 9,631 | r | require("ggplot2")
require("dplyr")
require("timeSeries")
require("forecast")
# require("tsoutliers")
# require("zoo")
usearchive=TRUE # csv data from archive
sensorid=40
usearchive=FALSE # timestamp needs fixing (in csv and conversion below)
usearchive=TRUE # timestamp needs fixing (in csv and conversion below)
#if(usearchive){require("RCurl");}
#max values for clipping
Pclip<-list(P1=list(min=0, max=10000),
P2=list(min=0.62, max=1000))
dateinterval<-list(min=as.POSIXct(strptime("2015-12-30", format="%Y-%m-%d")),
max=as.POSIXct(Sys.Date()))
plotdir="output_plots/"
if(!dir.exists(plotdir)){dir.create(plotdir, showWarnings = TRUE, recursive = TRUE, mode = "0755")}
#' function to clip values above/below thresholds
clipping<-function(x,min=NULL,max=NULL){
if(is.null(min)){
min=min(na.omit(x))
}
if(is.null(max)){
max=max(na.omit(x))
}
if(is.na(max)||is.na(min)){
warn("NA for min/max while clipping, no clipping done")
return(x)
}
x[x>max]<-max
x[x<min]<-min
return(x)
}
## gaussian filter taps for smoothing function
# adapted from Juan Carlos Borrás http://grokbase.com/p/r/r-help/117c96hy0z/r-gaussian-low-pass-filter
gfcoeffs <- function(s, n) {
t <- seq(-n,n,1) ## assuming 2*n+1 taps
gfiltc<-exp(-(t^2/(2*s^2)))/sqrt(2*pi*s^2)
return (gfiltc/sum(gfiltc))
# sum(gfiltc)=1
}
csvsep=","
if(usearchive){
arcdat_filename<-"arcdat.RData"
if (file.exists(arcdat_filename)){
load(arcdat_filename)
arcdat_filename_mtime<-file.mtime(arcdat_filename)
}else{
arcdat<-NULL
arcdat_filename_mtime<-as.POSIXct(as.Date("2000-01-01"))
}
fpattern<-"*.csv"
# get filelist relative to working directory, pattern = glob2rx(fpattern)
filelist<- dir(path = "archive.luftdaten.info",pattern=glob2rx(fpattern),recursive=TRUE,full.names=TRUE, ignore.case = TRUE) ## files in current directory
# only read files newer than arcdat_filename
for (csvfilename in filelist[file.mtime(filelist)>arcdat_filename_mtime]){
print(paste("reading ", csvfilename))
rdat<-read.csv(csvfilename, sep=";", dec=".", header=TRUE)
arcdat<-dplyr::bind_rows(arcdat,rdat)
}
arcdat$timestampct<-as.POSIXct(strptime(arcdat$timestamp,format="%Y-%m-%dT%H:%M:%OS"))
arctbl<-table(arcdat$sensor_id,as.Date(arcdat$timestamp))#$yday+1000*(as.POSIXlt(arcdat$timestamp)$year+1990))
save(arcdat, arctbl ,file=arcdat_filename)
pdf(file.path(plotdir,"plots_sensordata_overview.pdf"),width=12,height=9)
ggplot(as.data.frame(arctbl), aes(Var2,Var1,size=Freq)) + geom_point()+
labs(x="year, doy", y="sensor id")+
theme(axis.text.x = element_text(angle=90, vjust=0.5, size=6))
dev.off()
# iterate sensors
for (sid in unique(arcdat$sensor_id)){
print(sid)
sdat<-as.data.frame(dplyr::filter(arcdat, sensor_id==sid)) # result type is tbl_df, convert to df
sdat<-sdat[order(sdat$timestampct),] # sort by timestampct
sdat$P2diff1=sdat$P2-sdat$P1
sdat$durP2diff1=sdat$durP2-sdat$durP1
print(dim(sdat))
# stats::filter the data
# create a gaussian smoothing
sigma=5
ntaps=10
gc<-gfcoeffs(sigma,ntaps)
pdffilename=file.path(plotdir,paste("plots_sensor_",sid,".pdf",sep=""))
print (pdffilename)
# set width according to timediff
timespan<-as.double(max(sdat$timestampct)-min(sdat$timestampct))
print(paste("plotwidth", min(timespan/2,10)))
pdf(pdffilename, width=max(timespan/2,10), height=10)
measvalnames=c("P1", "durP1", "ratioP1", "P2", "durP2", "ratioP2", "P2diff1", "durP2diff1")
# have a timeSeries object and plot it
print(paste("tsdat plot"))
tsdat<-timeSeries(sdat[,measvalnames], sdat$timestampct)
plot(tsdat)
for (coln in measvalnames){
print (coln)
if(length(sdat[,coln])>ntaps){
# TODO: identify/handle outliers
# look at forecast::tsoutliers tsoutliers::tso
# outlier filter first forecast::tsclean
sdat$plotdat<-forecast::tsclean(sdat[,coln])
sdat$plotdat<-as.vector(stats::filter(sdat$plotdat, gfcoeffs(sigma,ntaps)))
print(paste(coln,"ggplot"))
p<-ggplot(sdat, aes(timestampct,plotdat))+geom_line()+geom_smooth(span=0.2)+ labs(x="Time",y=coln)
print(p)
# TODO: gleitende 24-Stunden-Mittelwerte (24h means filtering)
# maybe possible via zoo forecast::ma its fts tseries timeSeries
# fts:moving.mean only Date as time?
# look for functions with timestamp based intervals (24h)
# z=zoo(sdat,order.by=sdat$timestampct)
# sdat.fts=as.fts(sdat[,c("timestampct","P1")])
# idat=irts(sdat$timestampct, sdat$P1)
# plot(idat)
# measurement Dates
# mdts<-as.timeSeries(unique(as.Date(sdat$timestampct)))
# fts wants chr dates (timestamps possible?) in row names?
# rownames(sdat)<-as.character(sdat$timestampct)
}
}
dev.off()
}# sensor_id
print(paste("total size of data:", dim(arcdat) ,collapse = " "))
stop("manual break: archive plots done")
}# usearchive
# dates=seq.Date(from=as.Date(dateinterval$min),to=as.Date(dateinterval$max),by=1)
# u<-paste('http://archive.madflex.de/',
# dates,
# '/',
# dates,'_ppd42ns_sensor_',
# sensorid,
# '.csv')
# require("RCurl")
# filelist=urllist
# csvsep=";"
fpattern<-"sensor[0-9]+.csv"
# get filelist relative to working directory, pattern = glob2rx(fpattern)
filelist<- dir(path = ".",pattern=fpattern,recursive=FALSE,full.names=FALSE, ignore.case = TRUE) ## files in current directory
for (csvfilename in filelist){
# get/process the data with scripts from repo feinstaub-monitoring-client-python to sensorXX.csv
# csvfilename<-paste("sensor",sensorid,".csv",sep="")
sensorid<-regmatches(csvfilename, regexpr("[0-9]+", csvfilename))
pdffilename<-paste("plots_sensor",sensorid,".pdf",sep="")
sendat<-read.csv(csvfilename,sep=csvsep)
# have a proper timestamp POSIXct (never use POSIXlt)
sendat$timestampct<-as.POSIXct(strptime(sendat$timestamp,format="%Y-%m-%dT%H:%M:%OSZ"))
sendat$timestamp<-NULL
#sendat<-sendat[sendat$timestampct>strptime("2015-10-24", format="%Y-%m-%d"),]
# select data of latest 2 days measured values
# nval=2*60*24*2
# nval=min(nval,dim(sendat)[1])
# seldat<-sendat[1:nval,]
# filter date interval
seldat<-sendat[sendat$timestampct>dateinterval$min&
sendat$timestampct<dateinterval$max,]
# seldat<-sendat
pdf(pdffilename)
if ("P1" %in% names(sendat)){
# filter range 0
seldat$P1<-clipping(seldat$P1,Pclip$P1$min,Pclip$P1$max)
seldat$P2<-clipping(seldat$P2,Pclip$P2$min,Pclip$P2$max)
seldat$P1[seldat$P1<=Pclip$P1$min]<-NA
seldat$P2[seldat$P2<=Pclip$P2$min]<-NA
# sendat<-sendat[,]
plotdat<-seldat
# plot(plotdat$timestampct, log(plotdat$P2))
p<-ggplot(plotdat,aes(timestampct, P2))+geom_point()+geom_smooth()
print(p)
p<-ggplot(plotdat,aes(timestampct, P2))+geom_point()+scale_y_log10()+geom_smooth()
print(p)
p<-ggplot(plotdat,aes(timestampct, P1))+geom_point()+geom_smooth()
print(p)
p<-ggplot(plotdat,aes(timestampct, P1))+geom_point()+scale_y_log10()+geom_smooth()
print(p)
ntaps=10
sigma=4
gfiltc<-gfcoeffs(sigma,ntaps)
plotdat$P1smoothed<-filter(plotdat$P1,filter=gfiltc)
plotdat$P2smoothed<-filter(plotdat$P2,filter=gfiltc)
p<-ggplot(plotdat,aes(timestampct, P1smoothed))+geom_line()+geom_smooth()
print(p)
p<-ggplot(plotdat,aes(timestampct, P1smoothed))+geom_line()+scale_y_log10()+geom_smooth()
print(p)
p<-ggplot(plotdat,aes(timestampct, P2smoothed))+geom_line()+geom_smooth()
print(p)
p<-ggplot(plotdat,aes(timestampct, P2smoothed))+geom_line()+scale_y_log10()+geom_smooth()
print(p)
}
if ("temperature" %in% names(sendat)){
seldat<-seldat[!is.na(seldat$temperature),]
if ("humidity" %in% names(sendat)){
seldat<-seldat[!is.na(seldat$humidity),]
}
p<-ggplot(seldat, aes(timestampct, temperature))+geom_line()
print(p)
if ("humidity" %in% names(seldat)){
p<-ggplot(seldat, aes(timestampct, temperature))+geom_line(aes(timestampct, humidity),col=4)
}
print(p)
}
dev.off()
}
|
#################################
## <제7장 연습문제>
#################################
# 01. 본문에서 생성된 dataset2의 직급(position) 칼럼을 대상으로 1급 -> 5급, 5급 -> 1급 형식으로
# 역코딩하여 position2 칼럼에 추가하시오.
getwd()
# 02. dataset2의 resident 칼럼을 대상으로 NA 값을 제거한 후 dataset2 변수에 저장하시오.
# 03. dataset2의 gender 칼럼을 대상으로 1->"남자", 2->"여자" 형태로 코딩 변경하여
# gender2 칼럼에 추가하고, 파이 차트로 결과를 확인하시오.
# 04. 나이를 30세 이하 -> 1, 30~55 -> 2, 55이상 -> 3 으로 리코딩하여 age3 칼럼에 추가한 후
# age, age2, age3 칼럼만 확인하시오.
# 05. 정제된 data를 대상으로 작업 디렉터리(c:/Rwork/Part-II)에 cleanData.csv 파일명으로
# 따옴표와 행 이름을 제거하여 저장하고, new_data변수로 읽어오시오.
# (1) 정제된 데이터 저장
# (2) 저장된 파일 불러오기/확인
# 06. user_data.csv와 return_data.csv 파일을 이용하여 각 고객별
# 반품사유코드(return_code)를 대상으로 다음과 같이 파생변수를 추가하시오.
user_data <- read.csv('user_data.csv',header=T)
return_data <- read.csv('return_data.csv',header=T)
head(return_data)
user_return_data <- dcast(return_data,user_id~return_code,length)
names(user_return_data) <- c('user_id','제품이상(1)','변심(2)','원인불명(3)','기타(4)')
user_return_data <- join(user_data,user_return_data, by='user_id')
user_return_data
#<조건1> 반품사유코드에 대한 파생변수 칼럼명 설명
# 제품이상(1) -> return_code1, 변심(2) -> return_code2,
# 원인불명(3) -> return_code3, 기타(4) -> return_code4
#<조건2> 고객별 반품사유코드를 고객정보(user_data) 테이블에 추가(결과화면 참고)
head(user_return_data,10)
#user_id age house_type resident job return_code1 return_code2 return_code3 return_code4
#1 1001 35 4 전북 6 NA NA NA NA
#2 1002 45 4 경남 2 NA NA NA NA
#3 1003 55 4 경기 6 NA NA NA NA
#4 1004 43 3 대전 1 NA NA NA NA
#5 1005 55 4 경기 2 NA NA NA NA
#6 1006 45 1 대구 1 NA NA NA NA
#7 1007 39 4 경남 1 NA NA NA NA
#8 1008 55 2 경기 6 1 0 0 0
#9 1009 33 4 인천 3 0 1 0 0
#10 1010 55 3 서울 6 NA NA NA NA
# 단계1 : 고객 정보 파일 가져오기
# 단계2 : 반품 정보 파일 가져오기
# 단계3 : 고객별 반품사유코드에 따른 파생변수 생성
# 단계4 : 파생변수 추가 : 고객정보에 반품사유 칼럼 추가
# 07. iris 데이터를 이용하여 5겹 2회 반복하는 교차검정 데이터를 샘플링 하시오.
data(iris)
iris
library(cvTools)
cross <- cvFolds(n=150,K=5,R=2,type="random")
cross
cross$subsets
cross$which
R=1:2 # 회전수
K=1:5 # 5겹
for(r in R){ # 회전수 만큼 for문
cat('r=',r,'회전수')
for(k in K){ # 5겹 교차검정
idx <- cross$subset[cross$which==k,r]
cat('k=',k,'검정데이터\n')
print(iris[idx,])
for(i in K[-k]){ # 훈련데이터
idx <- cross$subset[cross$which==i,r]
cat('i=',i,'훈련데이터\n')
print(iris[idx,])
}
}
}
| /R-script/Part-II/제7장 연습문제.R | no_license | LTaeHoon/R_NCS | R | false | false | 3,784 | r | #################################
## <제7장 연습문제>
#################################
# 01. 본문에서 생성된 dataset2의 직급(position) 칼럼을 대상으로 1급 -> 5급, 5급 -> 1급 형식으로
# 역코딩하여 position2 칼럼에 추가하시오.
getwd()
# 02. dataset2의 resident 칼럼을 대상으로 NA 값을 제거한 후 dataset2 변수에 저장하시오.
# 03. dataset2의 gender 칼럼을 대상으로 1->"남자", 2->"여자" 형태로 코딩 변경하여
# gender2 칼럼에 추가하고, 파이 차트로 결과를 확인하시오.
# 04. 나이를 30세 이하 -> 1, 30~55 -> 2, 55이상 -> 3 으로 리코딩하여 age3 칼럼에 추가한 후
# age, age2, age3 칼럼만 확인하시오.
# 05. 정제된 data를 대상으로 작업 디렉터리(c:/Rwork/Part-II)에 cleanData.csv 파일명으로
# 따옴표와 행 이름을 제거하여 저장하고, new_data변수로 읽어오시오.
# (1) 정제된 데이터 저장
# (2) 저장된 파일 불러오기/확인
# 06. user_data.csv와 return_data.csv 파일을 이용하여 각 고객별
# 반품사유코드(return_code)를 대상으로 다음과 같이 파생변수를 추가하시오.
user_data <- read.csv('user_data.csv',header=T)
return_data <- read.csv('return_data.csv',header=T)
head(return_data)
user_return_data <- dcast(return_data,user_id~return_code,length)
names(user_return_data) <- c('user_id','제품이상(1)','변심(2)','원인불명(3)','기타(4)')
user_return_data <- join(user_data,user_return_data, by='user_id')
user_return_data
#<조건1> 반품사유코드에 대한 파생변수 칼럼명 설명
# 제품이상(1) -> return_code1, 변심(2) -> return_code2,
# 원인불명(3) -> return_code3, 기타(4) -> return_code4
#<조건2> 고객별 반품사유코드를 고객정보(user_data) 테이블에 추가(결과화면 참고)
head(user_return_data,10)
#user_id age house_type resident job return_code1 return_code2 return_code3 return_code4
#1 1001 35 4 전북 6 NA NA NA NA
#2 1002 45 4 경남 2 NA NA NA NA
#3 1003 55 4 경기 6 NA NA NA NA
#4 1004 43 3 대전 1 NA NA NA NA
#5 1005 55 4 경기 2 NA NA NA NA
#6 1006 45 1 대구 1 NA NA NA NA
#7 1007 39 4 경남 1 NA NA NA NA
#8 1008 55 2 경기 6 1 0 0 0
#9 1009 33 4 인천 3 0 1 0 0
#10 1010 55 3 서울 6 NA NA NA NA
# 단계1 : 고객 정보 파일 가져오기
# 단계2 : 반품 정보 파일 가져오기
# 단계3 : 고객별 반품사유코드에 따른 파생변수 생성
# 단계4 : 파생변수 추가 : 고객정보에 반품사유 칼럼 추가
# 07. iris 데이터를 이용하여 5겹 2회 반복하는 교차검정 데이터를 샘플링 하시오.
data(iris)
iris
library(cvTools)
cross <- cvFolds(n=150,K=5,R=2,type="random")
cross
cross$subsets
cross$which
R=1:2 # 회전수
K=1:5 # 5겹
for(r in R){ # 회전수 만큼 for문
cat('r=',r,'회전수')
for(k in K){ # 5겹 교차검정
idx <- cross$subset[cross$which==k,r]
cat('k=',k,'검정데이터\n')
print(iris[idx,])
for(i in K[-k]){ # 훈련데이터
idx <- cross$subset[cross$which==i,r]
cat('i=',i,'훈련데이터\n')
print(iris[idx,])
}
}
}
|
source("FindMaxLag.R")
source("GrangerTest.R")
Influence <- function(d1, d2, epsilon=1e-2) {
# Find maximal lag
maxLag <- FindMaxLag(d1, d2)
# Current pvalue is maximal
pvalue <- 1
# Recalculate Grander Casuality for all valid lags
for (lag in seq(maxLag, 1, -1)) {
result <- GrangerTest(d1, d2, lag)
# Is result a lower pvalue?
if (!is.null(result) && result < pvalue) {
pvalue <- result
}
}
hasInfluence <- FALSE
# Check if there is an influence
if (pvalue < epsilon) {
hasInfluence <- TRUE
}
return(data.frame(hasInfluence=hasInfluence, pvalue=pvalue))
}
| /global_influence/Influence.R | no_license | trzytematyczna/SciRePI | R | false | false | 609 | r | source("FindMaxLag.R")
source("GrangerTest.R")
Influence <- function(d1, d2, epsilon=1e-2) {
# Find maximal lag
maxLag <- FindMaxLag(d1, d2)
# Current pvalue is maximal
pvalue <- 1
# Recalculate Grander Casuality for all valid lags
for (lag in seq(maxLag, 1, -1)) {
result <- GrangerTest(d1, d2, lag)
# Is result a lower pvalue?
if (!is.null(result) && result < pvalue) {
pvalue <- result
}
}
hasInfluence <- FALSE
# Check if there is an influence
if (pvalue < epsilon) {
hasInfluence <- TRUE
}
return(data.frame(hasInfluence=hasInfluence, pvalue=pvalue))
}
|
\name{rc.plot.track.id}
\docType{package}
\alias{rc.plot.track.id}
\title{Plot Track Id}
\description{
Plot labels in designated tracks.
}
\usage{rc.plot.track.id(track.id, labels=NULL, degree=0,
col='black', custom.track.height=NULL, ...)}
\arguments{
\item{track.id}{a vector of integers, specifying the tracks for plotting id.}
\item{labels}{NULL or a vector of character string, specifying the text to be written.}
\item{degree}{the angle of the arc rotation.}
\item{col}{color for the text.}
\item{custom.track.height}{NULL or numeric, specifying customized track height.}
\item{...}{further graphical parameters (from par), such as srt and family.}
}
\details{
If \code{labels} is NULL, values of \code{track.id} will be used as text labels.
}
\author{
Minghui Wang <m.h.wang@live.com>
}
\seealso{\code{\link{rc.plot.histogram}}, \code{\link{rc.plot.track}}}
\examples{
#This is not to be run alone. Please see tutorial vignette("netweaver") for usage.
}
| /man/rc.plot.track.id.Rd | no_license | cran/NetWeaver | R | false | false | 997 | rd | \name{rc.plot.track.id}
\docType{package}
\alias{rc.plot.track.id}
\title{Plot Track Id}
\description{
Plot labels in designated tracks.
}
\usage{rc.plot.track.id(track.id, labels=NULL, degree=0,
col='black', custom.track.height=NULL, ...)}
\arguments{
\item{track.id}{a vector of integers, specifying the tracks for plotting id.}
\item{labels}{NULL or a vector of character string, specifying the text to be written.}
\item{degree}{the angle of the arc rotation.}
\item{col}{color for the text.}
\item{custom.track.height}{NULL or numeric, specifying customized track height.}
\item{...}{further graphical parameters (from par), such as srt and family.}
}
\details{
If \code{labels} is NULL, values of \code{track.id} will be used as text labels.
}
\author{
Minghui Wang <m.h.wang@live.com>
}
\seealso{\code{\link{rc.plot.histogram}}, \code{\link{rc.plot.track}}}
\examples{
#This is not to be run alone. Please see tutorial vignette("netweaver") for usage.
}
|
dyn.load('/Library/Java/JavaVirtualMachines/jdk1.8.0_131.jdk/Contents/Home/jre/lib/server/libjvm.dylib')
setwd("/Users/mengmengjiang/all datas/chap5")
library(xlsx)
#针头为25G
n1<-read.xlsx("he-25g.xlsx",sheetName="2kv18",header=TRUE)
n2<-read.xlsx("he-25g.xlsx",sheetName="2kv54",header=TRUE)
n3<-read.xlsx("he-25g.xlsx",sheetName="2kv180",header=TRUE)
#读取针头为30g
n4<-read.xlsx("he-30g.xlsx",sheetName="2kv18",header=TRUE)
n5<-read.xlsx("he-30g.xlsx",sheetName="2kv54",header=TRUE)
n6<-read.xlsx("he-30g.xlsx",sheetName="2kv180",header=TRUE)
#针头为32g
n7<-read.xlsx("he-32g.xlsx",sheetName="2kv18",header=TRUE)
n8<-read.xlsx("he-32g.xlsx",sheetName="2kv54",header=TRUE)
n9<-read.xlsx("he-32g.xlsx",sheetName="2kv180",header=TRUE)
#针头为34g
n10<-read.xlsx("he-34g.xlsx",sheetName="2kv18",header=TRUE)
n11<-read.xlsx("he-34g.xlsx",sheetName="2kv54",header=TRUE)
n12<-read.xlsx("he-34g.xlsx",sheetName="2kv180",header=TRUE)
###画图
plot(n1$fv, n1$he_ra, xlab = expression(italic(log(q["d"]))),
ylab=expression(italic(log(f["e"]))),mgp=c(1.1, 0, 0),tck=0.02,
xlim=c(-14,-2),ylim=c(-2,8),col=0)
###颜色###
yan<-rainbow(9)
####30g针头下###
#1#18nl-30g##
a<-log(0.5*18/(n4$fv*60*3.1^3))
a1<-log((500/n4$fv - n4$tf)/n4$tp)
#2###18nl-32g###
b<-log(0.5*18/(n7$fv*60*2.3^3))
b1<-log((500/n7$fv - n7$tf)/n7$tp)
#3#54nl-30g##
c<-log(0.5*54/(n5$fv*60*3.1^3))
c1<-log((500/n5$fv - n5$tf)/n5$tp)
#4#18nl-34G##
d<-log(0.5*18/(n10$fv*60*1.9^3))
d1<-log((500/n10$fv - n10$tf)/n10$tp)
#5#54nl-32G##
e<-log(0.5*54/(n8$fv*60*2.3^3))
e1<-log((500/n8$fv - n8$tf)/n8$tp)
#6#54nl-34G##
f<-log(0.5*54/(n11$fv*60*1.9^3))
f1<-log((500/n11$fv - n11$tf)/n11$tp)
#7#180nl-30G##
g<-log(0.5*180/(n6$fv*60*3.1^3))
g1<-log((500/n6$fv - n6$tf)/n6$tp)
#8#180nl-32G##
h<-log(0.5*180/(n9$fv*60*2.3^3))
h1<-log((500/n9$fv - n9$tf)/n9$tp)
#9#180nl-34G##
i<-log(0.5*180/(n12$fv*60*1.9^3))
i1<-log((500/n12$fv - n12$tf)/n12$tp)
###
xx<-c(a,b,c,d,e,f,g,h,i)
yy<-c(a1,b1,c1,d1,e1,f1,g1,h1,i1)
pchc<-c(1,2,3,4,5,6,7,22,24)
#画点
points(lowess(a,a1,f=1/4,iter=3),col=yan[1],pch=1,lwd=2,lty=2,cex=0.8)
points(lowess(b,b1,f=1/4,iter=3),col=yan[2],pch=2,lwd=2,lty=2,cex=0.8)
points(lowess(c,c1,f=1/4,iter=3),col=yan[3],pch=3,lwd=2,lty=2,cex=0.8)
points(lowess(d,d1,f=1/4,iter=3),col=yan[4],pch=4,lwd=2,lty=2,cex=0.8)
points(lowess(e,e1,f=1/4,iter=3),col=yan[5],pch=5,lwd=2,lty=2,cex=0.8)
points(lowess(f,f1,f=1/4,iter=3),col=yan[6],pch=6,lwd=2,lty=2,cex=0.8)
points(lowess(g,g1,f=1/4,iter=3),col=yan[7],pch=7,lwd=2,lty=2,cex=0.8)
points(lowess(h,h1,f=1/4,iter=3),col=yan[8],pch=22,lwd=2,lty=2,cex=0.8)
points(lowess(i,i1,f=1/4,iter=3),col=yan[9],pch=24,lwd=2,lty=2,cex=0.8)
##拟合
abline(lm(a1~a),col=yan[1],lty=4)
abline(lm(b1~b),col=yan[2],lty=4)
abline(lm(c1~c),col=yan[3],lty=4)
abline(lm(d1~d),col=yan[4],lty=4)
abline(lm(e1~e),col=yan[5],lty=4)
abline(lm(f1~f),col=yan[6],lty=4)
abline(lm(g1~g),col=yan[7],lty=4)
abline(lm(h1~h),col=yan[8],lty=4)
abline(lm(i1~i),col=yan[9],lty=4)
leg<-c("18nl/min-30G","18nl/min-32G","54nl/min-30G",
"18nl/min-34G","54nl/min-32G",
"180nl/min-30G","54nl/min-34G","180nl/min-32G","180nl/min-34G")
legend("topleft",legend=leg,col=yan,pch=pchc,bty="n",lwd=1.5,lty=2,inset=.02,cex=0.8)
| /thesis/chap5/chap5-fig5-5.R | permissive | shuaimeng/r | R | false | false | 3,259 | r | dyn.load('/Library/Java/JavaVirtualMachines/jdk1.8.0_131.jdk/Contents/Home/jre/lib/server/libjvm.dylib')
setwd("/Users/mengmengjiang/all datas/chap5")
library(xlsx)
#针头为25G
n1<-read.xlsx("he-25g.xlsx",sheetName="2kv18",header=TRUE)
n2<-read.xlsx("he-25g.xlsx",sheetName="2kv54",header=TRUE)
n3<-read.xlsx("he-25g.xlsx",sheetName="2kv180",header=TRUE)
#读取针头为30g
n4<-read.xlsx("he-30g.xlsx",sheetName="2kv18",header=TRUE)
n5<-read.xlsx("he-30g.xlsx",sheetName="2kv54",header=TRUE)
n6<-read.xlsx("he-30g.xlsx",sheetName="2kv180",header=TRUE)
#针头为32g
n7<-read.xlsx("he-32g.xlsx",sheetName="2kv18",header=TRUE)
n8<-read.xlsx("he-32g.xlsx",sheetName="2kv54",header=TRUE)
n9<-read.xlsx("he-32g.xlsx",sheetName="2kv180",header=TRUE)
#针头为34g
n10<-read.xlsx("he-34g.xlsx",sheetName="2kv18",header=TRUE)
n11<-read.xlsx("he-34g.xlsx",sheetName="2kv54",header=TRUE)
n12<-read.xlsx("he-34g.xlsx",sheetName="2kv180",header=TRUE)
###画图
plot(n1$fv, n1$he_ra, xlab = expression(italic(log(q["d"]))),
ylab=expression(italic(log(f["e"]))),mgp=c(1.1, 0, 0),tck=0.02,
xlim=c(-14,-2),ylim=c(-2,8),col=0)
###颜色###
yan<-rainbow(9)
####30g针头下###
#1#18nl-30g##
a<-log(0.5*18/(n4$fv*60*3.1^3))
a1<-log((500/n4$fv - n4$tf)/n4$tp)
#2###18nl-32g###
b<-log(0.5*18/(n7$fv*60*2.3^3))
b1<-log((500/n7$fv - n7$tf)/n7$tp)
#3#54nl-30g##
c<-log(0.5*54/(n5$fv*60*3.1^3))
c1<-log((500/n5$fv - n5$tf)/n5$tp)
#4#18nl-34G##
d<-log(0.5*18/(n10$fv*60*1.9^3))
d1<-log((500/n10$fv - n10$tf)/n10$tp)
#5#54nl-32G##
e<-log(0.5*54/(n8$fv*60*2.3^3))
e1<-log((500/n8$fv - n8$tf)/n8$tp)
#6#54nl-34G##
f<-log(0.5*54/(n11$fv*60*1.9^3))
f1<-log((500/n11$fv - n11$tf)/n11$tp)
#7#180nl-30G##
g<-log(0.5*180/(n6$fv*60*3.1^3))
g1<-log((500/n6$fv - n6$tf)/n6$tp)
#8#180nl-32G##
h<-log(0.5*180/(n9$fv*60*2.3^3))
h1<-log((500/n9$fv - n9$tf)/n9$tp)
#9#180nl-34G##
i<-log(0.5*180/(n12$fv*60*1.9^3))
i1<-log((500/n12$fv - n12$tf)/n12$tp)
###
xx<-c(a,b,c,d,e,f,g,h,i)
yy<-c(a1,b1,c1,d1,e1,f1,g1,h1,i1)
pchc<-c(1,2,3,4,5,6,7,22,24)
#画点
points(lowess(a,a1,f=1/4,iter=3),col=yan[1],pch=1,lwd=2,lty=2,cex=0.8)
points(lowess(b,b1,f=1/4,iter=3),col=yan[2],pch=2,lwd=2,lty=2,cex=0.8)
points(lowess(c,c1,f=1/4,iter=3),col=yan[3],pch=3,lwd=2,lty=2,cex=0.8)
points(lowess(d,d1,f=1/4,iter=3),col=yan[4],pch=4,lwd=2,lty=2,cex=0.8)
points(lowess(e,e1,f=1/4,iter=3),col=yan[5],pch=5,lwd=2,lty=2,cex=0.8)
points(lowess(f,f1,f=1/4,iter=3),col=yan[6],pch=6,lwd=2,lty=2,cex=0.8)
points(lowess(g,g1,f=1/4,iter=3),col=yan[7],pch=7,lwd=2,lty=2,cex=0.8)
points(lowess(h,h1,f=1/4,iter=3),col=yan[8],pch=22,lwd=2,lty=2,cex=0.8)
points(lowess(i,i1,f=1/4,iter=3),col=yan[9],pch=24,lwd=2,lty=2,cex=0.8)
##拟合
abline(lm(a1~a),col=yan[1],lty=4)
abline(lm(b1~b),col=yan[2],lty=4)
abline(lm(c1~c),col=yan[3],lty=4)
abline(lm(d1~d),col=yan[4],lty=4)
abline(lm(e1~e),col=yan[5],lty=4)
abline(lm(f1~f),col=yan[6],lty=4)
abline(lm(g1~g),col=yan[7],lty=4)
abline(lm(h1~h),col=yan[8],lty=4)
abline(lm(i1~i),col=yan[9],lty=4)
leg<-c("18nl/min-30G","18nl/min-32G","54nl/min-30G",
"18nl/min-34G","54nl/min-32G",
"180nl/min-30G","54nl/min-34G","180nl/min-32G","180nl/min-34G")
legend("topleft",legend=leg,col=yan,pch=pchc,bty="n",lwd=1.5,lty=2,inset=.02,cex=0.8)
|
# Power of a number
a=8
print(a**2) # 8*8-->64
print(a^2) # 8*8-->64
print(c(1,2,3,4,5)^2) # All the values are powered in terms of 2
print(c(1,2,3,4)*c(3,4)) # 3 8 9 16
print(c(2,4,6,8)*c(-2,-4,-6,-8)) # The values are multiplied element-wise -4 -16 -36 -64
print(c(1,2,3,4)+5) # All the values are added with 5
# Integer division (quotient)
print(c(2,4,6,8)%/%c(2,3)) # 1 1 3 2
# Modulo division (remainder)
print(c(2,4,6,8)%%c(2,3)) # 0 1 0 2
# Maximum and minimum function
print(max(c(2,4,5,1))) # 5
print(min(c(2,4,5,1))) # 1
# abs(),round(),sqrt(),sum(),prod()
print(abs(-2)) # 2
print(round(12.78)) # 13
print(sqrt(c(2,4,6,8))) # 1.414214 2.000000 2.449490 2.828427
print(sum(c(2,4,6,8))) # 20
print(prod(c(2,4,6,8))) # 384
################ lograthemic function ##################S
# Natural log (ln --> log to the base e)
print(log(5)) # 1.609438
# Common log (log --> log to the base 10)
print(log10(5)) # 0.69897
print(log(5,base=10)) # 0.69897
# log(number,base=<number>) --> We can find the log of any number with any base
print(log(9,base=4)) # 1.584963
################ Complex functions #########################
a = 3+5i
print(Re(a)) # real part of a --> 3
print(Im(a)) # imaginary part of a --> 5
print(Conj(a)) # conjugate of a --> 3-5i
print(Mod(a)) # modulus of a --> 5.830952
print(Arg(a)) # argument of a --> 1.030377
############### Matrix ##########################
x = matrix(nrow=3,ncol=3,data=c(2,4,6,3,6,9,5,10,15)) # Creating a matrix (elements are added column wise)
y = matrix(nrow=3,ncol=3,data=c(2,4,6,3,6,9,5,10,15),byrow=TRUE) # Creating a matrix (elements are added row wise)
t = matrix(nrow=2,ncol=3,data=100) ## Creating a matrix of single data
d = diag(1,nrow=2,ncol=2) ## Creating a diagonal matrix
x[2,3] # Accessing the matrix elements
## Properties of a matrix ##
print(dim(x))
print(attributes(x))
print(nrow(x))
print(ncol(x))
print(mode(x)) # types of storage
print(t(x)) # diagonal of a matrix
print(solve(x)) # Inverse of a matrix
print(x*4) # multiplying a matrix with a constant term
print(y%*%y) # Matrix multiplication
print(y*y) # Normal multiplication
print(crossprod(x)) # t(x)%*%x [transpose(martix) (matrix multiplication) matrix]
print(x+6*x) # addition of a matrix
print(6*x-x) # subtraction of a matrix
print(x[2,]) # second row of a matrix
print(x[,2]) # second column of a matrix
| /MAT2001 Statistics for Engineers/Lab Learning/Basics of R.R | no_license | PrashanthSingaravelan/fall_semester-2020 | R | false | false | 2,445 | r | # Power of a number
a=8
print(a**2) # 8*8-->64
print(a^2) # 8*8-->64
print(c(1,2,3,4,5)^2) # All the values are powered in terms of 2
print(c(1,2,3,4)*c(3,4)) # 3 8 9 16
print(c(2,4,6,8)*c(-2,-4,-6,-8)) # The values are multiplied element-wise -4 -16 -36 -64
print(c(1,2,3,4)+5) # All the values are added with 5
# Integer division (quotient)
print(c(2,4,6,8)%/%c(2,3)) # 1 1 3 2
# Modulo division (remainder)
print(c(2,4,6,8)%%c(2,3)) # 0 1 0 2
# Maximum and minimum function
print(max(c(2,4,5,1))) # 5
print(min(c(2,4,5,1))) # 1
# abs(),round(),sqrt(),sum(),prod()
print(abs(-2)) # 2
print(round(12.78)) # 13
print(sqrt(c(2,4,6,8))) # 1.414214 2.000000 2.449490 2.828427
print(sum(c(2,4,6,8))) # 20
print(prod(c(2,4,6,8))) # 384
################ lograthemic function ##################S
# Natural log (ln --> log to the base e)
print(log(5)) # 1.609438
# Common log (log --> log to the base 10)
print(log10(5)) # 0.69897
print(log(5,base=10)) # 0.69897
# log(number,base=<number>) --> We can find the log of any number with any base
print(log(9,base=4)) # 1.584963
################ Complex functions #########################
a = 3+5i
print(Re(a)) # real part of a --> 3
print(Im(a)) # imaginary part of a --> 5
print(Conj(a)) # conjugate of a --> 3-5i
print(Mod(a)) # modulus of a --> 5.830952
print(Arg(a)) # argument of a --> 1.030377
############### Matrix ##########################
x = matrix(nrow=3,ncol=3,data=c(2,4,6,3,6,9,5,10,15)) # Creating a matrix (elements are added column wise)
y = matrix(nrow=3,ncol=3,data=c(2,4,6,3,6,9,5,10,15),byrow=TRUE) # Creating a matrix (elements are added row wise)
t = matrix(nrow=2,ncol=3,data=100) ## Creating a matrix of single data
d = diag(1,nrow=2,ncol=2) ## Creating a diagonal matrix
x[2,3] # Accessing the matrix elements
## Properties of a matrix ##
print(dim(x))
print(attributes(x))
print(nrow(x))
print(ncol(x))
print(mode(x)) # types of storage
print(t(x)) # diagonal of a matrix
print(solve(x)) # Inverse of a matrix
print(x*4) # multiplying a matrix with a constant term
print(y%*%y) # Matrix multiplication
print(y*y) # Normal multiplication
print(crossprod(x)) # t(x)%*%x [transpose(martix) (matrix multiplication) matrix]
print(x+6*x) # addition of a matrix
print(6*x-x) # subtraction of a matrix
print(x[2,]) # second row of a matrix
print(x[,2]) # second column of a matrix
|
split = function(newcol, concol, c, tag=c[-1]){
# assignes values to intervals, names the intervals.
# newcol : name of new column e.g. train$age-category
# : has to be created beforehands
# concol : name of controle column e.g. train$age
# c : vector of interval borders, e.g. age.s = c(18,25,35,45,55,65)
# : to cover everything set c from minimum to maximum
# tag : name tag of intervals
# : default: upper bound of intervals (from c)
# example: train$user_age = as.factor(split(train$age-category, train$age, c))
c[length(c)] = 1.01*c[length(c)] # change last upper bound to make
# condition in last round work
for (i in 2:length(c)){
newcol[concol < c[i] & concol >= c[i-1]] = tag[i-1]
}
return(newcol)
}
helper.calcloss = function(truevals, predictedvals, itemprice){
# function that given a string of true values and one of the predicted ones, gives us the loss value
# truevals : string of known values e.g. known$return
# predictedvals : column of predictions to be evalued e.g. rf.1$predicted (must have same length as truevals)
# lossone : loss value associated with predicting 0 and have 1 as true value
# losstwo : loss value associated with predicting 1 and have 0 as true value
lossone = 2.5 * ((-3) + (-0.1)*itemprice)
losstwo = (-0.5) * itemprice
temploss = 0
p = predictedvals - truevals
for (i in 1:length(truevals)) {
if (p[i]==(-1)){temploss = temploss + lossone[i]}
else if (p[i]== 1 ){temploss = temploss + losstwo[i]}
}
return(temploss)
}
helper.loss = function(tau_candidates, truevals, predictedvals, itemprice){
loss = 1:length(tau_candidates)
for (s in loss) {
# translate prob.prediction to 1/0 prediction due to tau_candidate
cv_yhat_dt_zerone = 1:length(truevals)
cv_yhat_dt_zerone[predictedvals >= tau_candidates[s]] = 1
cv_yhat_dt_zerone[predictedvals < tau_candidates[s]] = 0
# calculate loss
loss[s] = helper.calcloss(truevals = truevals,
predictedvals = cv_yhat_dt_zerone,
itemprice = itemprice)
}
return(loss)
}
# for data processing for nnet
# assignes numbers to factors of categorical variables
helper.fac2num = function(){
load(file = "./data/known-unknown-data.RData")
colnames(known)[2] = "return"
# full dataset for item_retrate and user_retrate (since target variable was already used anyways)
# smaller dataset for delivery_time and price_comp (to avoid overfitting)
known$return = as.factor(known$return)
set.seed(1234)
split.idx.woe = createDataPartition(y = known$return, p = 0.80, list = FALSE)
split.woe = known[-split.idx.woe,]
woe.object.full = woe(return ~ ., data = known, zeroadj = 0.5)
woe.object.split = woe(return ~ ., data = split.woe, zeroadj = 0.5)
fac2num = list()
fac2num[["delivery_time"]] = woe.object.split$woe$delivery_time
fac2num[["price_comp"]] = woe.object.split$woe$price_comp
fac2num[["item_retrate"]] = woe.object.full$woe$item_retrate
fac2num[["user_retrate"]] = woe.object.full$woe$user_retrate
fac2num[["split.idx"]] = split.idx.woe
return(fac2num)
}
helper.cvlist.tau <- function(cv.list){
# extracts mean and standard deviation of cv.list (m*k repetitions)
# saves results in list tau
k = length(cv.list[[1]][[1]]) # dimension of k-fold cross validation
loss = matrix(data = NA, nrow = length(cv.list)*k, ncol = 6)
tau.m = loss
measure = list()
tau = list()
for(m in 1:length(cv.list)){ # m times repeated cv
run.m = cv.list[[m]]
for(v in 1:length(run.m)){ # tau-category 1:6
tau.v = run.m[[v]]
for (n in 1:1){ # change in parameters DONT NEED ThaT ANYMore
for (k in 1:length(tau.v)){ # same parameters, k-fold
j = k+((m-1)*length(tau.v))
loss[j,v] = tau.v[[k]]$loss
tau.m[j,v] = tau.v[[k]]$tau
}
}
}
measure$loss$mean = apply(loss, 2, mean)
measure$loss$sd = apply(loss, 2, sd)
measure$tau$mean = apply(tau.m, 2, mean)
measure$tau$sd = apply(tau.m, 2, sd)
}
return(measure)
}
helper.cvlist.tune <- function(cv.list){
# extracts mean and standard deviation of cv.list (m*k repetitions)
# saves results in list measure
k = length(cv.list[[1]][[1]][,1]) # dimension of k-fold cross validation
loss = matrix(data = NA, nrow = length(cv.list)*k, ncol = dim(cv.list[[1]][[1]])[2])
tau.m = loss
measure = list()
pars = list()
for(m in 1:length(cv.list)){ # m times repeated cv
run.m = cv.list[[m]]
for(v in 1:length(run.m)){ # tau-category 1:6
tau.v = run.m[[v]]
for (n in 1:dim(tau.v)[2]){ # change in parameters
for (k in 1:dim(tau.v)[1]){ # same parameters, k-fold
j = k+((m-1)*dim(tau.v)[1])
loss[j,n] = tau.v[,n][[k]]$loss # loss of m-th kfold-cv for tau_c == v
}
pars[[v]] = loss
}
}
}
# now calculate mean and standard deviation for each m*k-fold c.v
for (i in 1:6){
loss = pars[[i]]
measure[[paste("tau_c ==", i)]] = apply(loss, 2, mean)
}
return(measure)
}
| /submission/helperfunctions.R | no_license | fractaldust/SPL_DFK | R | false | false | 5,481 | r | split = function(newcol, concol, c, tag=c[-1]){
# assignes values to intervals, names the intervals.
# newcol : name of new column e.g. train$age-category
# : has to be created beforehands
# concol : name of controle column e.g. train$age
# c : vector of interval borders, e.g. age.s = c(18,25,35,45,55,65)
# : to cover everything set c from minimum to maximum
# tag : name tag of intervals
# : default: upper bound of intervals (from c)
# example: train$user_age = as.factor(split(train$age-category, train$age, c))
c[length(c)] = 1.01*c[length(c)] # change last upper bound to make
# condition in last round work
for (i in 2:length(c)){
newcol[concol < c[i] & concol >= c[i-1]] = tag[i-1]
}
return(newcol)
}
helper.calcloss = function(truevals, predictedvals, itemprice){
# function that given a string of true values and one of the predicted ones, gives us the loss value
# truevals : string of known values e.g. known$return
# predictedvals : column of predictions to be evalued e.g. rf.1$predicted (must have same length as truevals)
# lossone : loss value associated with predicting 0 and have 1 as true value
# losstwo : loss value associated with predicting 1 and have 0 as true value
lossone = 2.5 * ((-3) + (-0.1)*itemprice)
losstwo = (-0.5) * itemprice
temploss = 0
p = predictedvals - truevals
for (i in 1:length(truevals)) {
if (p[i]==(-1)){temploss = temploss + lossone[i]}
else if (p[i]== 1 ){temploss = temploss + losstwo[i]}
}
return(temploss)
}
helper.loss = function(tau_candidates, truevals, predictedvals, itemprice){
loss = 1:length(tau_candidates)
for (s in loss) {
# translate prob.prediction to 1/0 prediction due to tau_candidate
cv_yhat_dt_zerone = 1:length(truevals)
cv_yhat_dt_zerone[predictedvals >= tau_candidates[s]] = 1
cv_yhat_dt_zerone[predictedvals < tau_candidates[s]] = 0
# calculate loss
loss[s] = helper.calcloss(truevals = truevals,
predictedvals = cv_yhat_dt_zerone,
itemprice = itemprice)
}
return(loss)
}
# for data processing for nnet
# assignes numbers to factors of categorical variables
helper.fac2num = function(){
load(file = "./data/known-unknown-data.RData")
colnames(known)[2] = "return"
# full dataset for item_retrate and user_retrate (since target variable was already used anyways)
# smaller dataset for delivery_time and price_comp (to avoid overfitting)
known$return = as.factor(known$return)
set.seed(1234)
split.idx.woe = createDataPartition(y = known$return, p = 0.80, list = FALSE)
split.woe = known[-split.idx.woe,]
woe.object.full = woe(return ~ ., data = known, zeroadj = 0.5)
woe.object.split = woe(return ~ ., data = split.woe, zeroadj = 0.5)
fac2num = list()
fac2num[["delivery_time"]] = woe.object.split$woe$delivery_time
fac2num[["price_comp"]] = woe.object.split$woe$price_comp
fac2num[["item_retrate"]] = woe.object.full$woe$item_retrate
fac2num[["user_retrate"]] = woe.object.full$woe$user_retrate
fac2num[["split.idx"]] = split.idx.woe
return(fac2num)
}
helper.cvlist.tau <- function(cv.list){
# extracts mean and standard deviation of cv.list (m*k repetitions)
# saves results in list tau
k = length(cv.list[[1]][[1]]) # dimension of k-fold cross validation
loss = matrix(data = NA, nrow = length(cv.list)*k, ncol = 6)
tau.m = loss
measure = list()
tau = list()
for(m in 1:length(cv.list)){ # m times repeated cv
run.m = cv.list[[m]]
for(v in 1:length(run.m)){ # tau-category 1:6
tau.v = run.m[[v]]
for (n in 1:1){ # change in parameters DONT NEED ThaT ANYMore
for (k in 1:length(tau.v)){ # same parameters, k-fold
j = k+((m-1)*length(tau.v))
loss[j,v] = tau.v[[k]]$loss
tau.m[j,v] = tau.v[[k]]$tau
}
}
}
measure$loss$mean = apply(loss, 2, mean)
measure$loss$sd = apply(loss, 2, sd)
measure$tau$mean = apply(tau.m, 2, mean)
measure$tau$sd = apply(tau.m, 2, sd)
}
return(measure)
}
helper.cvlist.tune <- function(cv.list){
# extracts mean and standard deviation of cv.list (m*k repetitions)
# saves results in list measure
k = length(cv.list[[1]][[1]][,1]) # dimension of k-fold cross validation
loss = matrix(data = NA, nrow = length(cv.list)*k, ncol = dim(cv.list[[1]][[1]])[2])
tau.m = loss
measure = list()
pars = list()
for(m in 1:length(cv.list)){ # m times repeated cv
run.m = cv.list[[m]]
for(v in 1:length(run.m)){ # tau-category 1:6
tau.v = run.m[[v]]
for (n in 1:dim(tau.v)[2]){ # change in parameters
for (k in 1:dim(tau.v)[1]){ # same parameters, k-fold
j = k+((m-1)*dim(tau.v)[1])
loss[j,n] = tau.v[,n][[k]]$loss # loss of m-th kfold-cv for tau_c == v
}
pars[[v]] = loss
}
}
}
# now calculate mean and standard deviation for each m*k-fold c.v
for (i in 1:6){
loss = pars[[i]]
measure[[paste("tau_c ==", i)]] = apply(loss, 2, mean)
}
return(measure)
}
|
#' MDSConjoint: An implementation of metric and nonmetric conjoint models for marketing decisions.
#'
#' This package is an implementation of metric and nonmetric conjoint models for
#' marketing analysis and decisions. It estimates the conjoint models por each individual,
#' computes a data frame with all estimations, another data frame with part worts (partial utilities),
#' a data frame with the importance of attributes for all individuals,
#' plots a summary of attributes' importance, computes market shares,
#' and the optim profile given market competitors.
#'
#' @section MDSConjoint functions:
#' The mktgConjoint functions ...
#'
#' @docType package
#' @name MDSConjoint
#' @importFrom graphics axis pie plot
#' @importFrom stats dist lm predict sd
#' @importFrom utils head
#' @importFrom XLConnect loadWorkbook readWorksheet
#' @importFrom XLConnectJars
#' @importFrom support.CEs Lma.design questionnaire
#'
| /R/MDSConjoint.R | no_license | jlopezsi/MDSConjoint | R | false | false | 926 | r | #' MDSConjoint: An implementation of metric and nonmetric conjoint models for marketing decisions.
#'
#' This package is an implementation of metric and nonmetric conjoint models for
#' marketing analysis and decisions. It estimates the conjoint models por each individual,
#' computes a data frame with all estimations, another data frame with part worts (partial utilities),
#' a data frame with the importance of attributes for all individuals,
#' plots a summary of attributes' importance, computes market shares,
#' and the optim profile given market competitors.
#'
#' @section MDSConjoint functions:
#' The mktgConjoint functions ...
#'
#' @docType package
#' @name MDSConjoint
#' @importFrom graphics axis pie plot
#' @importFrom stats dist lm predict sd
#' @importFrom utils head
#' @importFrom XLConnect loadWorkbook readWorksheet
#' @importFrom XLConnectJars
#' @importFrom support.CEs Lma.design questionnaire
#'
|
library(FFTrees)
library(tidyverse)
library(rhandsontable)
require(gridExtra)
setwd('/Users/ravirane/Desktop/GMU/CS584/myWork/assignment1/data/fold')
setwd('/Users/ravirane/Desktop/GMU/CS584/dm/task-1/data/Sequestered')
# Function to create Fast and Frugal Tree for given train/test data and algo
fast.frugal.tree <- function(trainFile, testFile, algo, info) {
print(info)
adult.train <- read.csv(file=trainFile, header=TRUE, sep=",")
adult.test <- read.csv(file=testFile, header=TRUE, sep=",")
adult.fft <- FFTrees(formula = class ~ .,
data = adult.train,
data.test = adult.test,
algorithm = algo,
main = "Adult data",
do.comp = FALSE,
decision.labels = c("<=50", ">50"))
print(adult.fft)
adult.fft
}
# Creating model on fold
fold1.ifan.fft <- fast.frugal.tree("fold1_train.csv", "fold1_test.csv", "ifan", 'Fold 1 FFT - Algo: ifan')
fold2.ifan.fft <- fast.frugal.tree("fold2_train.csv", "fold2_test.csv", "ifan", 'Fold 2 FFT - Algo: ifan')
fold3.ifan.fft <- fast.frugal.tree("fold3_train.csv", "fold3_test.csv", "ifan", 'Fold 3 FFT - Algo: ifan')
fold4.ifan.fft <- fast.frugal.tree("fold4_train.csv", "fold4_test.csv", "ifan", 'Fold 4 FFT - Algo: ifan')
fold5.ifan.fft <- fast.frugal.tree("fold5_train.csv", "fold5_test.csv", "ifan", 'Fold 5 FFT - Algo: ifan')
fold1.dfan.fft <- fast.frugal.tree("fold1_train.csv", "fold1_test.csv", "dfan", 'Fold 1 FFT - Algo: dfan')
fold2.dfan.fft <- fast.frugal.tree("fold2_train.csv", "fold2_test.csv", "dfan", 'Fold 2 FFT - Algo: dfan')
fold3.dfan.fft <- fast.frugal.tree("fold3_train.csv", "fold3_test.csv", "dfan", 'Fold 3 FFT - Algo: dfan')
fold4.dfan.fft <- fast.frugal.tree("fold4_train.csv", "fold4_test.csv", "dfan", 'Fold 4 FFT - Algo: dfan')
fold5.dfan.fft <- fast.frugal.tree("fold5_train.csv", "fold5_test.csv", "dfan", 'Fold 5 FFT - Algo: dfan')
Seq1.ifan.fft <- fast.frugal.tree("adult_300.csv", "100_S1.csv", "ifan", 'Sequestered 1 - Algo: ifan')
Seq2.ifan.fft <- fast.frugal.tree("adult_300.csv", "100_S2.csv", "ifan", 'Sequestered 2 - Algo: ifan')
Seq3.ifan.fft <- fast.frugal.tree("adult_300.csv", "100_S3.csv", "ifan", 'Sequestered 3 - Algo: ifan')
Seq4.ifan.fft <- fast.frugal.tree("adult_300.csv", "100_S4.csv", "ifan", 'Sequestered 4 - Algo: ifan')
Seq5.ifan.fft <- fast.frugal.tree("adult_300.csv", "100_S5.csv", "ifan", 'Sequestered 5 - Algo: ifan')
#Seq6.ifan.fft <- fast.frugal.tree("adult_ds300_preprocessed.csv", "fold1_test.csv", "ifan", 'Sequestered 6 - Algo: ifan')
# Plotting fold model tree
plot(fold1.ifan.fft, data = "test")
plot(fold2.ifan.fft, data = "test")
plot(fold3.ifan.fft, data = "test")
plot(fold4.ifan.fft, data = "test")
plot(fold5.ifan.fft, data = "test")
plot(fold1.dfan.fft, data = "test")
plot(fold2.dfan.fft, data = "test")
plot(fold3.dfan.fft, data = "test")
plot(fold4.dfan.fft, data = "test")
plot(fold5.dfan.fft, data = "test")
plot(Seq1.ifan.fft, data = "test")
plot(Seq2.ifan.fft, data = "test")
plot(Seq3.ifan.fft, data = "test")
plot(Seq4.ifan.fft, data = "test")
plot(Seq5.ifan.fft, data = "test")
#plot(Seq6.ifan.fft, data = "test")
folds <- c('Fold1', 'Fold2', 'Fold3', 'Fold4', 'Fold5' )
#confusion matrix for fold_ifan
tp_ifan <- c(49, 53, 46, 48,55 )
fp_ifan <- c(12, 14, 20, 21, 17)
tn_ifan <- c(46, 46, 44, 41, 43)
fn_ifan <- c(13, 7, 10, 10, 5)
cm_fold_ifan = tibble(FOLD= folds, TP = tp_ifan, TN = tn_ifan, FP = fp_ifan, FN = fn_ifan)
cm_fold_ifan$accuracy <- round((cm_fold_ifan$TP + cm_fold_ifan$TN)/(cm_fold_ifan$TP + cm_fold_ifan$TN + cm_fold_ifan$FP + cm_fold_ifan$FN), digits = 2)
cm_fold_ifan$precision <- round(cm_fold_ifan$TP/(cm_fold_ifan$TP + cm_fold_ifan$FP), digits = 2)
cm_fold_ifan$recall <- round(cm_fold_ifan$TP/(cm_fold_ifan$TP + cm_fold_ifan$FN), digits = 2)
cm_fold_ifan$f <- round(2*cm_fold_ifan$recall*cm_fold_ifan$precision/(cm_fold_ifan$precision + cm_fold_ifan$recall), digits = 2)
cm_fold_ifan$sensitivity <- c(0.79, 0.88, 0.82, 0.83, 0.92 )
cm_fold_ifan$specificity <- c(0.79, 0.77, 0.69, 0.66, 0.72)
rhandsontable(cm_fold_ifan, rowHeaders = NULL)
#confusion matrix for fold_dfan
tp_dfan <- c(49, 54, 50, 48,55)
fp_dfan <- c(12, 14, 21, 21, 17)
tn_dfan <- c(46, 46, 43, 41, 43)
fn_dfan <- c(13, 6, 6, 10, 5)
cm_fold_dfan = tibble(FOLD= folds, TP = tp_dfan, TN = tn_dfan, FP = fp_dfan, FN = fn_dfan)
cm_fold_dfan$accuracy <- round((cm_fold_dfan$TP + cm_fold_dfan$TN)/(cm_fold_dfan$TP + cm_fold_dfan$TN + cm_fold_dfan$FP + cm_fold_dfan$FN), digits = 2)
cm_fold_dfan$precision <- round(cm_fold_dfan$TP/(cm_fold_dfan$TP + cm_fold_dfan$FP), digits = 2)
cm_fold_dfan$recall <- round(cm_fold_dfan$TP/(cm_fold_dfan$TP + cm_fold_dfan$FN), digits = 2)
cm_fold_dfan$f <- round(2*cm_fold_dfan$recall*cm_fold_dfan$precision/(cm_fold_dfan$precision + cm_fold_dfan$recall), digits = 2)
cm_fold_dfan$sensitivity <- c(0.79, 0.90, 0.89, 0.83, 0.92)
cm_fold_dfan$specificity <- c(0.79, 0.77, 0.67, 0.66, 0.72)
rhandsontable(cm_fold_dfan, rowHeaders = NULL)
sq <- c('Seq 1', 'Seq 2', 'Seq 3', 'Seq 4', 'Seq 5' )
#confusion matrix for fold_ifan
sqtp_ifan <- c(38, 22, 19, 20, 21 )
sqfp_ifan <- c(12, 25, 30, 19, 23)
sqtn_ifan <- c(41, 51, 46, 57, 53)
sqfn_ifan <- c(9, 2, 5, 4, 3)
sq_ifan = tibble(SQ= sq, TP = sqtp_ifan, TN = sqtn_ifan, FP = sqfp_ifan, FN = sqfn_ifan)
sq_ifan$accuracy <- round((sq_ifan$TP + sq_ifan$TN)/(sq_ifan$TP + sq_ifan$TN + sq_ifan$FP + sq_ifan$FN), digits = 2)
sq_ifan$precision <- round(sq_ifan$TP/(sq_ifan$TP + sq_ifan$FP), digits = 2)
sq_ifan$recall <- round(sq_ifan$TP/(sq_ifan$TP + sq_ifan$FN), digits = 2)
sq_ifan$f <- round(2*sq_ifan$recall*sq_ifan$precision/(sq_ifan$precision + sq_ifan$recall), digits = 2)
sq_ifan$sensitivity <- c(0.75, 0.92, 0.79, 0.83, 0.88)
sq_ifan$specificity <- c(0.76, 0.67, 0.61, 0.75, 0.70)
rhandsontable(sq_ifan, rowHeaders = NULL)
## Gini
gini_fold_accuracy = c(0.8, 0.85, 0.78, 0.78, 0.82)
gini_fold_precision = c(0.81, 0.84, 0.73, 0.75, 0.81)
gini_fold_recall = c(0.81, 0.87, 0.86, 0.81, 0.83)
gini_fold_f = c(0.81, 0.85, 0.79, 0.78, 0.82)
gini_fold_sensitivity = c(0.81, 0.87, 0.86, 0.81, 0.83)
gini_fold_specificity = c(0.79, 0.83, 0.72, 0.74, 0.8)
## Entropy
entropy_fold_accuracy = c(0.8, 0.87, 0.78, 0.78, 0.81)
entropy_fold_precision = c(0.81, 0.84, 0.73, 0.75, 0.8)
entropy_fold_recall = c(0.81, 0.9, 0.86, 0.81, 0.82)
entropy_fold_f = c(0.81, 0.87, 0.79, 0.78, 0.81)
entropy_fold_sensitivity = c(0.81, 0.9, 0.86, 0.81, 0.82)
entropy_fold_specificity = c(0.79, 0.83, 0.72, 0.74, 0.8)
## Info gain
infog_fold_accuracy = c(0.62, 0.83, 0.86, 0.80, 0.81)
infog_fold_precision = c(0.78, 0.85, 0.86, 1, 1)
infog_fold_recall = c(0.61, 0.83, 0.86, 0.80, 0.81)
infog_fold_f = c(0.55, 0.84, 0.86, 0.89, 0.89)
infog_fold_sensitivity = c(1, 0.25, 0.33, 0, 0)
infog_fold_specificity = c(0.56, 0.92, 0.92, 1, 1)
## Gain Ratio
gainr_fold_accuracy = c(0.61, 0.83, 0.86, 0.80, 0.82)
gainr_fold_precision = c(0.78, 0.85, 0.86, 0.80, 0.82)
gainr_fold_recall = c(0.61, 0.83, 0.86, 0.89, 0.90)
gainr_fold_f = c(0.55, 0.84, 0.86, 0.89, 0.90)
gainr_fold_sensitivity = c(1, 0.25, 0.33, 0, 0)
gainr_fold_specificity = c(0.56, 0.92, 0.92, 1, 1)
## Unpruned
unpruned_fold_accuracy = c(0.65, 0.75, 0.85, 0.81, 0.86)
unpruned_fold_precision = c(0.77, 0.87, 0.89, 1, 1)
unpruned_fold_recall = c(0.65, 0.75, 0.85, 0.81, 0.86)
unpruned_fold_f = c(0.60, 0.8, 0.87, 0.89, 0.92)
unpruned_fold_sensitivity = c(0.95, 0.22, 0.36, 0, 0)
unpruned_fold_specificity = c(0.59, 0.94, 0.95, 1, 1)
## Pruned
pruned_fold_accuracy = c(0.6, 0.85, 0.86, 0.80, 0.91)
pruned_fold_precision = c(0.78, 0.86, 0.86, 1, 1)
pruned_fold_recall = c(0.6, 0.85, 0.86, 0.88, 0.91)
pruned_fold_f = c(0.34, 0.85, 0.86, 0.88, 0.95)
pruned_fold_sensitivity = c(1, 0.28, 0.33, 0, 0)
pruned_fold_specificity = c(0.56, 0.92, 0.92, 1, 1)
data.accuracy <- bind_rows(tibble(Accuracy = 'gini-Accuracy',Range = gini_fold_accuracy),
tibble(Accuracy = 'entropy-Accuracy',Range = entropy_fold_accuracy),
tibble(Accuracy = 'infogain-Accuracy',Range = infog_fold_accuracy),
tibble(Accuracy = 'gainr-Accuracy',Range = gainr_fold_accuracy),
tibble(Accuracy = 'pruned-Accuracy',Range = pruned_fold_accuracy),
tibble(Accuracy = 'unpruned-Accuracy',Range = unpruned_fold_accuracy),
tibble(Accuracy = 'ffifan-Accuracy',Range = cm_fold_ifan$accuracy),
tibble(Accuracy = 'dfan-Accuracy',Range = cm_fold_dfan$accuracy))
data.precision <- bind_rows(tibble(Precision = 'gini-Precision',Range = gini_fold_precision),
tibble(Precision = 'entropy-Precision',Range = entropy_fold_precision),
tibble(Precision = 'infogain-Precision',Range = infog_fold_precision),
tibble(Precision = 'gainr-Precision',Range = gainr_fold_precision),
tibble(Precision = 'pruned-Precision',Range = pruned_fold_precision),
tibble(Precision = 'unpruned-Precision',Range = unpruned_fold_precision),
tibble(Precision = 'ifan-Precision',Range = cm_fold_ifan$precision),
tibble(Precision = 'dfan-Precision',Range = cm_fold_dfan$precision))
data.recall <- bind_rows(tibble(Recall = 'gini-Recall',Range = gini_fold_recall),
tibble(Recall = 'entropy-Recall',Range = entropy_fold_recall),
tibble(Recall = 'infogain-Recall',Range = infog_fold_recall),
tibble(Recall = 'gainr-Recall',Range = gainr_fold_recall),
tibble(Recall = 'pruned-Recall',Range = pruned_fold_recall),
tibble(Recall = 'unpruned-Recall',Range = unpruned_fold_recall),
tibble(Recall = 'ifan-Recall',Range = cm_fold_ifan$recall),
tibble(Recall = 'dfan-Recall',Range = cm_fold_dfan$recall))
data.f <- bind_rows(tibble(F = 'gini-F',Range = gini_fold_f),
tibble(F = 'entropy-F',Range = entropy_fold_f),
tibble(F = 'infogain-F',Range = infog_fold_f),
tibble(F = 'gnainr-F',Range = gainr_fold_f),
tibble(F = 'pruned-F',Range = pruned_fold_f),
tibble(F = 'unpruned-F',Range = unpruned_fold_f),
tibble(F = 'ifan-F',Range = cm_fold_ifan$f),
tibble(F = 'dfan-F',Range = cm_fold_dfan$f))
data.sensitivity <- bind_rows(tibble(Sensitivity = 'gini-Sensitivity',Range = gini_fold_sensitivity),
tibble(Sensitivity = 'entropy-Sensitivity',Range = entropy_fold_sensitivity),
tibble(Sensitivity = 'infogain-Sensitivity',Range = infog_fold_sensitivity),
tibble(Sensitivity = 'gnainr-Sensitivity',Range = gainr_fold_sensitivity),
tibble(Sensitivity = 'pruned-Sensitivity',Range = pruned_fold_sensitivity),
tibble(Sensitivity = 'unpruned-Sensitivity',Range = unpruned_fold_sensitivity),
tibble(Sensitivity = 'ifan-Sensitivity',Range = cm_fold_ifan$sensitivity),
tibble(Sensitivity = 'dfan-Sensitivity',Range = cm_fold_dfan$sensitivity))
data.specificity <- bind_rows(tibble(Specificity = 'gini-Specificity',Range = gini_fold_specificity),
tibble(Specificity = 'entropy-Specificity',Range = entropy_fold_specificity),
tibble(Specificity = 'infogain-Specificity',Range = infog_fold_specificity),
tibble(Specificity = 'gainr-Specificity',Range = gainr_fold_specificity),
tibble(Specificity = 'pruned-Specificity',Range = pruned_fold_specificity),
tibble(Specificity = 'unpruned-Specificity',Range = unpruned_fold_specificity),
tibble(Specificity = 'dfan-ifan-Specificity',Range = cm_fold_ifan$specificity),
tibble(Specificity = 'dfan-Specificity',Range = cm_fold_dfan$specificity))
ggplot(data.accuracy,aes(x=Accuracy,y=Range))+
geom_boxplot(fill='orange')
ggplot(data.precision,aes(x=Precision,y=Range))+
geom_boxplot(fill='orange')
ggplot(data.recall,aes(x=Recall,y=Range))+
geom_boxplot(fill='orange')
ggplot(data.f,aes(x=F,y=Range))+
geom_boxplot(fill='orange')
ggplot(data.sensitivity,aes(x=Sensitivity,y=Range))+
geom_boxplot(fill='orange')
ggplot(data.specificity,aes(x=Specificity,y=Range))+
geom_boxplot(fill='orange')
| /task-1/fftree.R | no_license | coderscraft/dm | R | false | false | 12,745 | r | library(FFTrees)
library(tidyverse)
library(rhandsontable)
require(gridExtra)
setwd('/Users/ravirane/Desktop/GMU/CS584/myWork/assignment1/data/fold')
setwd('/Users/ravirane/Desktop/GMU/CS584/dm/task-1/data/Sequestered')
# Function to create Fast and Frugal Tree for given train/test data and algo
fast.frugal.tree <- function(trainFile, testFile, algo, info) {
print(info)
adult.train <- read.csv(file=trainFile, header=TRUE, sep=",")
adult.test <- read.csv(file=testFile, header=TRUE, sep=",")
adult.fft <- FFTrees(formula = class ~ .,
data = adult.train,
data.test = adult.test,
algorithm = algo,
main = "Adult data",
do.comp = FALSE,
decision.labels = c("<=50", ">50"))
print(adult.fft)
adult.fft
}
# Creating model on fold
fold1.ifan.fft <- fast.frugal.tree("fold1_train.csv", "fold1_test.csv", "ifan", 'Fold 1 FFT - Algo: ifan')
fold2.ifan.fft <- fast.frugal.tree("fold2_train.csv", "fold2_test.csv", "ifan", 'Fold 2 FFT - Algo: ifan')
fold3.ifan.fft <- fast.frugal.tree("fold3_train.csv", "fold3_test.csv", "ifan", 'Fold 3 FFT - Algo: ifan')
fold4.ifan.fft <- fast.frugal.tree("fold4_train.csv", "fold4_test.csv", "ifan", 'Fold 4 FFT - Algo: ifan')
fold5.ifan.fft <- fast.frugal.tree("fold5_train.csv", "fold5_test.csv", "ifan", 'Fold 5 FFT - Algo: ifan')
fold1.dfan.fft <- fast.frugal.tree("fold1_train.csv", "fold1_test.csv", "dfan", 'Fold 1 FFT - Algo: dfan')
fold2.dfan.fft <- fast.frugal.tree("fold2_train.csv", "fold2_test.csv", "dfan", 'Fold 2 FFT - Algo: dfan')
fold3.dfan.fft <- fast.frugal.tree("fold3_train.csv", "fold3_test.csv", "dfan", 'Fold 3 FFT - Algo: dfan')
fold4.dfan.fft <- fast.frugal.tree("fold4_train.csv", "fold4_test.csv", "dfan", 'Fold 4 FFT - Algo: dfan')
fold5.dfan.fft <- fast.frugal.tree("fold5_train.csv", "fold5_test.csv", "dfan", 'Fold 5 FFT - Algo: dfan')
Seq1.ifan.fft <- fast.frugal.tree("adult_300.csv", "100_S1.csv", "ifan", 'Sequestered 1 - Algo: ifan')
Seq2.ifan.fft <- fast.frugal.tree("adult_300.csv", "100_S2.csv", "ifan", 'Sequestered 2 - Algo: ifan')
Seq3.ifan.fft <- fast.frugal.tree("adult_300.csv", "100_S3.csv", "ifan", 'Sequestered 3 - Algo: ifan')
Seq4.ifan.fft <- fast.frugal.tree("adult_300.csv", "100_S4.csv", "ifan", 'Sequestered 4 - Algo: ifan')
Seq5.ifan.fft <- fast.frugal.tree("adult_300.csv", "100_S5.csv", "ifan", 'Sequestered 5 - Algo: ifan')
#Seq6.ifan.fft <- fast.frugal.tree("adult_ds300_preprocessed.csv", "fold1_test.csv", "ifan", 'Sequestered 6 - Algo: ifan')
# Plotting fold model tree
plot(fold1.ifan.fft, data = "test")
plot(fold2.ifan.fft, data = "test")
plot(fold3.ifan.fft, data = "test")
plot(fold4.ifan.fft, data = "test")
plot(fold5.ifan.fft, data = "test")
plot(fold1.dfan.fft, data = "test")
plot(fold2.dfan.fft, data = "test")
plot(fold3.dfan.fft, data = "test")
plot(fold4.dfan.fft, data = "test")
plot(fold5.dfan.fft, data = "test")
plot(Seq1.ifan.fft, data = "test")
plot(Seq2.ifan.fft, data = "test")
plot(Seq3.ifan.fft, data = "test")
plot(Seq4.ifan.fft, data = "test")
plot(Seq5.ifan.fft, data = "test")
#plot(Seq6.ifan.fft, data = "test")
folds <- c('Fold1', 'Fold2', 'Fold3', 'Fold4', 'Fold5' )
#confusion matrix for fold_ifan
tp_ifan <- c(49, 53, 46, 48,55 )
fp_ifan <- c(12, 14, 20, 21, 17)
tn_ifan <- c(46, 46, 44, 41, 43)
fn_ifan <- c(13, 7, 10, 10, 5)
cm_fold_ifan = tibble(FOLD= folds, TP = tp_ifan, TN = tn_ifan, FP = fp_ifan, FN = fn_ifan)
cm_fold_ifan$accuracy <- round((cm_fold_ifan$TP + cm_fold_ifan$TN)/(cm_fold_ifan$TP + cm_fold_ifan$TN + cm_fold_ifan$FP + cm_fold_ifan$FN), digits = 2)
cm_fold_ifan$precision <- round(cm_fold_ifan$TP/(cm_fold_ifan$TP + cm_fold_ifan$FP), digits = 2)
cm_fold_ifan$recall <- round(cm_fold_ifan$TP/(cm_fold_ifan$TP + cm_fold_ifan$FN), digits = 2)
cm_fold_ifan$f <- round(2*cm_fold_ifan$recall*cm_fold_ifan$precision/(cm_fold_ifan$precision + cm_fold_ifan$recall), digits = 2)
cm_fold_ifan$sensitivity <- c(0.79, 0.88, 0.82, 0.83, 0.92 )
cm_fold_ifan$specificity <- c(0.79, 0.77, 0.69, 0.66, 0.72)
rhandsontable(cm_fold_ifan, rowHeaders = NULL)
#confusion matrix for fold_dfan
tp_dfan <- c(49, 54, 50, 48,55)
fp_dfan <- c(12, 14, 21, 21, 17)
tn_dfan <- c(46, 46, 43, 41, 43)
fn_dfan <- c(13, 6, 6, 10, 5)
cm_fold_dfan = tibble(FOLD= folds, TP = tp_dfan, TN = tn_dfan, FP = fp_dfan, FN = fn_dfan)
cm_fold_dfan$accuracy <- round((cm_fold_dfan$TP + cm_fold_dfan$TN)/(cm_fold_dfan$TP + cm_fold_dfan$TN + cm_fold_dfan$FP + cm_fold_dfan$FN), digits = 2)
cm_fold_dfan$precision <- round(cm_fold_dfan$TP/(cm_fold_dfan$TP + cm_fold_dfan$FP), digits = 2)
cm_fold_dfan$recall <- round(cm_fold_dfan$TP/(cm_fold_dfan$TP + cm_fold_dfan$FN), digits = 2)
cm_fold_dfan$f <- round(2*cm_fold_dfan$recall*cm_fold_dfan$precision/(cm_fold_dfan$precision + cm_fold_dfan$recall), digits = 2)
cm_fold_dfan$sensitivity <- c(0.79, 0.90, 0.89, 0.83, 0.92)
cm_fold_dfan$specificity <- c(0.79, 0.77, 0.67, 0.66, 0.72)
rhandsontable(cm_fold_dfan, rowHeaders = NULL)
sq <- c('Seq 1', 'Seq 2', 'Seq 3', 'Seq 4', 'Seq 5' )
#confusion matrix for fold_ifan
sqtp_ifan <- c(38, 22, 19, 20, 21 )
sqfp_ifan <- c(12, 25, 30, 19, 23)
sqtn_ifan <- c(41, 51, 46, 57, 53)
sqfn_ifan <- c(9, 2, 5, 4, 3)
sq_ifan = tibble(SQ= sq, TP = sqtp_ifan, TN = sqtn_ifan, FP = sqfp_ifan, FN = sqfn_ifan)
sq_ifan$accuracy <- round((sq_ifan$TP + sq_ifan$TN)/(sq_ifan$TP + sq_ifan$TN + sq_ifan$FP + sq_ifan$FN), digits = 2)
sq_ifan$precision <- round(sq_ifan$TP/(sq_ifan$TP + sq_ifan$FP), digits = 2)
sq_ifan$recall <- round(sq_ifan$TP/(sq_ifan$TP + sq_ifan$FN), digits = 2)
sq_ifan$f <- round(2*sq_ifan$recall*sq_ifan$precision/(sq_ifan$precision + sq_ifan$recall), digits = 2)
sq_ifan$sensitivity <- c(0.75, 0.92, 0.79, 0.83, 0.88)
sq_ifan$specificity <- c(0.76, 0.67, 0.61, 0.75, 0.70)
rhandsontable(sq_ifan, rowHeaders = NULL)
## Gini
gini_fold_accuracy = c(0.8, 0.85, 0.78, 0.78, 0.82)
gini_fold_precision = c(0.81, 0.84, 0.73, 0.75, 0.81)
gini_fold_recall = c(0.81, 0.87, 0.86, 0.81, 0.83)
gini_fold_f = c(0.81, 0.85, 0.79, 0.78, 0.82)
gini_fold_sensitivity = c(0.81, 0.87, 0.86, 0.81, 0.83)
gini_fold_specificity = c(0.79, 0.83, 0.72, 0.74, 0.8)
## Entropy
entropy_fold_accuracy = c(0.8, 0.87, 0.78, 0.78, 0.81)
entropy_fold_precision = c(0.81, 0.84, 0.73, 0.75, 0.8)
entropy_fold_recall = c(0.81, 0.9, 0.86, 0.81, 0.82)
entropy_fold_f = c(0.81, 0.87, 0.79, 0.78, 0.81)
entropy_fold_sensitivity = c(0.81, 0.9, 0.86, 0.81, 0.82)
entropy_fold_specificity = c(0.79, 0.83, 0.72, 0.74, 0.8)
## Info gain
infog_fold_accuracy = c(0.62, 0.83, 0.86, 0.80, 0.81)
infog_fold_precision = c(0.78, 0.85, 0.86, 1, 1)
infog_fold_recall = c(0.61, 0.83, 0.86, 0.80, 0.81)
infog_fold_f = c(0.55, 0.84, 0.86, 0.89, 0.89)
infog_fold_sensitivity = c(1, 0.25, 0.33, 0, 0)
infog_fold_specificity = c(0.56, 0.92, 0.92, 1, 1)
## Gain Ratio
gainr_fold_accuracy = c(0.61, 0.83, 0.86, 0.80, 0.82)
gainr_fold_precision = c(0.78, 0.85, 0.86, 0.80, 0.82)
gainr_fold_recall = c(0.61, 0.83, 0.86, 0.89, 0.90)
gainr_fold_f = c(0.55, 0.84, 0.86, 0.89, 0.90)
gainr_fold_sensitivity = c(1, 0.25, 0.33, 0, 0)
gainr_fold_specificity = c(0.56, 0.92, 0.92, 1, 1)
## Unpruned
unpruned_fold_accuracy = c(0.65, 0.75, 0.85, 0.81, 0.86)
unpruned_fold_precision = c(0.77, 0.87, 0.89, 1, 1)
unpruned_fold_recall = c(0.65, 0.75, 0.85, 0.81, 0.86)
unpruned_fold_f = c(0.60, 0.8, 0.87, 0.89, 0.92)
unpruned_fold_sensitivity = c(0.95, 0.22, 0.36, 0, 0)
unpruned_fold_specificity = c(0.59, 0.94, 0.95, 1, 1)
## Pruned
pruned_fold_accuracy = c(0.6, 0.85, 0.86, 0.80, 0.91)
pruned_fold_precision = c(0.78, 0.86, 0.86, 1, 1)
pruned_fold_recall = c(0.6, 0.85, 0.86, 0.88, 0.91)
pruned_fold_f = c(0.34, 0.85, 0.86, 0.88, 0.95)
pruned_fold_sensitivity = c(1, 0.28, 0.33, 0, 0)
pruned_fold_specificity = c(0.56, 0.92, 0.92, 1, 1)
data.accuracy <- bind_rows(tibble(Accuracy = 'gini-Accuracy',Range = gini_fold_accuracy),
tibble(Accuracy = 'entropy-Accuracy',Range = entropy_fold_accuracy),
tibble(Accuracy = 'infogain-Accuracy',Range = infog_fold_accuracy),
tibble(Accuracy = 'gainr-Accuracy',Range = gainr_fold_accuracy),
tibble(Accuracy = 'pruned-Accuracy',Range = pruned_fold_accuracy),
tibble(Accuracy = 'unpruned-Accuracy',Range = unpruned_fold_accuracy),
tibble(Accuracy = 'ffifan-Accuracy',Range = cm_fold_ifan$accuracy),
tibble(Accuracy = 'dfan-Accuracy',Range = cm_fold_dfan$accuracy))
data.precision <- bind_rows(tibble(Precision = 'gini-Precision',Range = gini_fold_precision),
tibble(Precision = 'entropy-Precision',Range = entropy_fold_precision),
tibble(Precision = 'infogain-Precision',Range = infog_fold_precision),
tibble(Precision = 'gainr-Precision',Range = gainr_fold_precision),
tibble(Precision = 'pruned-Precision',Range = pruned_fold_precision),
tibble(Precision = 'unpruned-Precision',Range = unpruned_fold_precision),
tibble(Precision = 'ifan-Precision',Range = cm_fold_ifan$precision),
tibble(Precision = 'dfan-Precision',Range = cm_fold_dfan$precision))
data.recall <- bind_rows(tibble(Recall = 'gini-Recall',Range = gini_fold_recall),
tibble(Recall = 'entropy-Recall',Range = entropy_fold_recall),
tibble(Recall = 'infogain-Recall',Range = infog_fold_recall),
tibble(Recall = 'gainr-Recall',Range = gainr_fold_recall),
tibble(Recall = 'pruned-Recall',Range = pruned_fold_recall),
tibble(Recall = 'unpruned-Recall',Range = unpruned_fold_recall),
tibble(Recall = 'ifan-Recall',Range = cm_fold_ifan$recall),
tibble(Recall = 'dfan-Recall',Range = cm_fold_dfan$recall))
data.f <- bind_rows(tibble(F = 'gini-F',Range = gini_fold_f),
tibble(F = 'entropy-F',Range = entropy_fold_f),
tibble(F = 'infogain-F',Range = infog_fold_f),
tibble(F = 'gnainr-F',Range = gainr_fold_f),
tibble(F = 'pruned-F',Range = pruned_fold_f),
tibble(F = 'unpruned-F',Range = unpruned_fold_f),
tibble(F = 'ifan-F',Range = cm_fold_ifan$f),
tibble(F = 'dfan-F',Range = cm_fold_dfan$f))
data.sensitivity <- bind_rows(tibble(Sensitivity = 'gini-Sensitivity',Range = gini_fold_sensitivity),
tibble(Sensitivity = 'entropy-Sensitivity',Range = entropy_fold_sensitivity),
tibble(Sensitivity = 'infogain-Sensitivity',Range = infog_fold_sensitivity),
tibble(Sensitivity = 'gnainr-Sensitivity',Range = gainr_fold_sensitivity),
tibble(Sensitivity = 'pruned-Sensitivity',Range = pruned_fold_sensitivity),
tibble(Sensitivity = 'unpruned-Sensitivity',Range = unpruned_fold_sensitivity),
tibble(Sensitivity = 'ifan-Sensitivity',Range = cm_fold_ifan$sensitivity),
tibble(Sensitivity = 'dfan-Sensitivity',Range = cm_fold_dfan$sensitivity))
data.specificity <- bind_rows(tibble(Specificity = 'gini-Specificity',Range = gini_fold_specificity),
tibble(Specificity = 'entropy-Specificity',Range = entropy_fold_specificity),
tibble(Specificity = 'infogain-Specificity',Range = infog_fold_specificity),
tibble(Specificity = 'gainr-Specificity',Range = gainr_fold_specificity),
tibble(Specificity = 'pruned-Specificity',Range = pruned_fold_specificity),
tibble(Specificity = 'unpruned-Specificity',Range = unpruned_fold_specificity),
tibble(Specificity = 'dfan-ifan-Specificity',Range = cm_fold_ifan$specificity),
tibble(Specificity = 'dfan-Specificity',Range = cm_fold_dfan$specificity))
ggplot(data.accuracy,aes(x=Accuracy,y=Range))+
geom_boxplot(fill='orange')
ggplot(data.precision,aes(x=Precision,y=Range))+
geom_boxplot(fill='orange')
ggplot(data.recall,aes(x=Recall,y=Range))+
geom_boxplot(fill='orange')
ggplot(data.f,aes(x=F,y=Range))+
geom_boxplot(fill='orange')
ggplot(data.sensitivity,aes(x=Sensitivity,y=Range))+
geom_boxplot(fill='orange')
ggplot(data.specificity,aes(x=Specificity,y=Range))+
geom_boxplot(fill='orange')
|
# integrative DRW on combined feature data (updated in 2018/07/20)
# concat directed pathway graphs within each profile (GM & GMR & GMR_d & GMP)
# For PPI network diffusion, Random Walk with Restart(RWR) algorithm was used.
# In order to find optimized restart probability in PPI diffusion.
# Grid search was performed about combination of p=[0.001, 0.01, 0.2, 0.4, 0.6, 0.8] and Gamma=[0, 0.2, 0.4, 0.6, 0.8]
# p=0.5 had used in before
# parameter tuning for GM model, extra experiment was performed by adding Gamma = [0.7, 0.75, 0.85, 0.9, 0.95]
# All gene symbols are converted to Entrez gene id
# 5-fold CV(10 iters) was performed for tuning parameter in Random Forest.
# 5-fold CV(10 iters) was performed for get top N pathways.
# LOOCV was performed for model evaluation
# Dppigraph(Entrez).rda was used
# edge direction
# m -> g
# p -> g
# Classifier : rf(Random Forest)
################################## Result 18_all ############################################################
################################## GM ######################################################################
#################### Result18_1: prob = 0.001, Gamma = 0 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_1_GM", prob = 0.001, Gamma = 0, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_1 <- fit.classification(y=y, samples = samples, id = "result18_1_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_1, file=file.path('data/model/res_pa_GM_18_1.RData'))
#################### Result18_2: prob = 0.001, Gamma = 0.2 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_2_GM", prob = 0.001, Gamma = 0.2, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_2 <- fit.classification(y=y, samples = samples, id = "result18_2_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_2, file=file.path('data/model/res_pa_GM_18_2.RData'))
#################### Result18_3: prob = 0.001, Gamma = 0.4 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_3_GM", prob = 0.001, Gamma = 0.4, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_3 <- fit.classification(y=y, samples = samples, id = "result18_3_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_3, file=file.path('data/model/res_pa_GM_18_3.RData'))
#################### Result18_4: prob = 0.001, Gamma = 0.6 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_4_GM", prob = 0.001, Gamma = 0.6, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_4 <- fit.classification(y=y, samples = samples, id = "result18_4_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_4, file=file.path('data/model/res_pa_GM_18_4.RData'))
#################### Result18_4.5: prob = 0.001, Gamma = 0.7 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_4.5_GM", prob = 0.001, Gamma = 0.7, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_4.5 <- fit.classification(y=y, samples = samples, id = "result18_4.5_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_4.5, file=file.path('data/model/res_pa_GM_18_4.5.RData'))
#################### Result18_0.75: prob = 0.001, Gamma = 0.75 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_0.75_GM", prob = 0.001, Gamma = 0.75, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_0.75 <- fit.classification(y=y, samples = samples, id = "result18_0.75_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_0.75, file=file.path('data/model/res_pa_GM_18_0.75.RData'))
#################### Result18_5: prob = 0.001, Gamma = 0.8 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_5_GM", prob = 0.001, Gamma = 0.8, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_5 <- fit.classification(y=y, samples = samples, id = "result18_5_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_5, file=file.path('data/model/res_pa_GM_18_5.RData'))
#################### Result18_0.85: prob = 0.001, Gamma = 0.85 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_0.85_GM", prob = 0.001, Gamma = 0.85, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_0.85 <- fit.classification(y=y, samples = samples, id = "result18_0.85_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_0.85, file=file.path('data/model/res_pa_GM_18_0.85.RData'))
#################### Result18_0.9: prob = 0.001, Gamma = 0.9 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_0.9_GM", prob = 0.001, Gamma = 0.9, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_0.9 <- fit.classification(y=y, samples = samples, id = "result18_0.9_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_0.9, file=file.path('data/model/res_pa_GM_18_0.9.RData'))
#################### Result18_0.95: prob = 0.001, Gamma = 0.95 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_0.95_GM", prob = 0.001, Gamma = 0.95, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_0.95 <- fit.classification(y=y, samples = samples, id = "result18_0.95_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_0.95, file=file.path('data/model/res_pa_GM_18_0.95.RData'))
############################################## plot #######################################
# Plot for GM models
res_gm <- list(res_pa_GM_18_1, res_pa_GM_18_2, res_pa_GM_18_3, res_pa_GM_18_4, res_pa_GM_18_4.5,
res_pa_GM_18_0.75, res_pa_GM_18_5, res_pa_GM_18_0.85, res_pa_GM_18_0.9, res_pa_GM_18_0.95)
title <- c("Result 18_GM")
xlabs <- c("[g=0]", "[g=0.2]", "[g=0.4]", "[g=0.6]", "[g=0.7]", "[g=0.75]", "[g=0.8]", "[g=0.85]", "[g=0.9]", "[g=0.95]")
perf_min <- min(sapply(X = res_gm, FUN = function(x){mean(x$resample$Accuracy)}))
perf_max <- max(sapply(X = res_gm, FUN = function(x){mean(x$resample$Accuracy)}))
perf_boxplot(title, xlabs, res_gm, perf_min = perf_min-0.2, perf_max = perf_max+0.2)
# Accuracy((A+D)/(A+B+C+D))
i=0
for(model in res_gm){
print(i)
print(confusionMatrix(model, "none"))
i <- i+1
}
##############################################################################################################################
################################## GMR ######################################################################
#################### Result18_1: prob = 0.001, Gamma = 0 ##################
#------------------------- RNAseq + Methyl + RPPA(Pathway Graph) -------------------------#
gmr <- g %du% m %du% r
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_1_GMR", prob = 0.001, Gamma = 0, pranking = "t-test", mode = "GMR", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_18_1 <- fit.classification(y=y, samples = samples, id = "result18_1_GMR", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_18_1, file=file.path('data/model/res_pa_GMR_18_1.RData'))
#################### Result18_2: prob = 0.001, Gamma = 0.2 ##################
#------------------------- RNAseq + Methyl + RPPA(Pathway Graph) -------------------------#
gmr <- g %du% m %du% r
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_2_GMR", prob = 0.001, Gamma = 0.2, pranking = "t-test", mode = "GMR", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_18_2 <- fit.classification(y=y, samples = samples, id = "result18_2_GMR", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_18_2, file=file.path('data/model/res_pa_GMR_18_2.RData'))
#################### Result18_3: prob = 0.001, Gamma = 0.4 ##################
#------------------------- RNAseq + Methyl + RPPA(Pathway Graph) -------------------------#
gmr <- g %du% m %du% r
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_3_GMR", prob = 0.001, Gamma = 0.4, pranking = "t-test", mode = "GMR", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_18_3 <- fit.classification(y=y, samples = samples, id = "result18_3_GMR", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_18_3, file=file.path('data/model/res_pa_GMR_18_3.RData'))
#################### Result18_4: prob = 0.001, Gamma = 0.6 ##################
#------------------------- RNAseq + Methyl + RPPA(Pathway Graph) -------------------------#
gmr <- g %du% m %du% r
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_4_GMR", prob = 0.001, Gamma = 0.6, pranking = "t-test", mode = "GMR", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_18_4 <- fit.classification(y=y, samples = samples, id = "result18_4_GMR", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_18_4, file=file.path('data/model/res_pa_GMR_18_4.RData'))
#################### Result18_5: prob = 0.001, Gamma = 0.8 ##################
#------------------------- RNAseq + Methyl + RPPA(Pathway Graph) -------------------------#
gmr <- g %du% m %du% r
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_5_GMR", prob = 0.001, Gamma = 0.8, pranking = "t-test", mode = "GMR", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_18_5 <- fit.classification(y=y, samples = samples, id = "result18_5_GMR", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_18_5, file=file.path('data/model/res_pa_GMR_18_5.RData'))
############################################## plot #######################################
# Plot for GMR models
res_gmr <- list(res_pa_GMR_18_1_LOOCV, res_pa_GMR_18_2_LOOCV, res_pa_GMR_18_3_LOOCV, res_pa_GMR_18_4_LOOCV, res_pa_GMR_18_5_LOOCV,
res_pa_GMR_18_6_LOOCV, res_pa_GMR_18_7_LOOCV, res_pa_GMR_18_8_LOOCV)
title <- c("Result 18_GMR")
xlabs <- c("[g=0]", "[g=0.2]", "[g=0.4]", "[g=0.6]", "[g=0.8]", "[g=0.85]", "[g=0.9]", "[g=0.95]")
perf_min <- min(sapply(X = res_gmr, FUN = function(x){max(x$results$Accuracy)}))
perf_max <- max(sapply(X = res_gmr, FUN = function(x){max(x$results$Accuracy)}))
perf_boxplot(title, xlabs, res_gmr, perf_min = perf_min-0.15, perf_max = perf_max+0.15)
##############################################################################################################################
################################## GMR ######################################################################
#################### Result18_1: prob = 0.001, Gamma = 0 ##################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_1_GMR_d", prob = 0.001, Gamma = 0, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_1 <- fit.classification(y=y, samples = samples, id = "result18_1_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_1, file=file.path('data/model/res_pa_GMR_d_18_1.RData'))
#################### Result18_2: prob = 0.001, Gamma = 0.2 ##################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_2_GMR_d", prob = 0.001, Gamma = 0.2, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_2 <- fit.classification(y=y, samples = samples, id = "result18_2_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_2, file=file.path('data/model/res_pa_GMR_d_18_2.RData'))
#################### Result18_3: prob = 0.001, Gamma = 0.4 ##################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_3_GMR_d", prob = 0.001, Gamma = 0.4, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_3 <- fit.classification(y=y, samples = samples, id = "result18_3_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_3, file=file.path('data/model/res_pa_GMR_d_18_3.RData'))
#################### Result18_4: prob = 0.001, Gamma = 0.6 ##################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_4_GMR_d", prob = 0.001, Gamma = 0.6, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_4 <- fit.classification(y=y, samples = samples, id = "result18_4_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_4, file=file.path('data/model/res_pa_GMR_d_18_4.RData'))
#################### Result18_5: prob = 0.001, Gamma = 0.8 ##################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_5_GMR_d", prob = 0.001, Gamma = 0.8, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_5 <- fit.classification(y=y, samples = samples, id = "result18_5_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_5, file=file.path('data/model/res_pa_GMR_d_18_5.RData'))
#################### Result18_6: prob = 0.01, Gamma = 0 ##################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_6_GMR_d", prob = 0.01, Gamma = 0, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_6 <- fit.classification(y=y, samples = samples, id = "result18_6_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_6, file=file.path('data/model/res_pa_GMR_d_18_6.RData'))
################################################### Result18_7: prob = 0.01, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_7_GMR_d", prob = 0.01, Gamma = 0.2, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_7 <- fit.classification(y=y, samples = samples, id = "result18_7_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_7, file=file.path('data/model/res_pa_GMR_d_18_7.RData'))
################################################### Result18_8: prob = 0.01, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_8_GMR_d", prob = 0.01, Gamma = 0.4, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_8 <- fit.classification(y=y, samples = samples, id = "result18_8_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_8, file=file.path('data/model/res_pa_GMR_d_18_8.RData'))
################################################### Result18_9: prob = 0.01, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_9_GMR_d", prob = 0.01, Gamma = 0.6, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_9 <- fit.classification(y=y, samples = samples, id = "result18_9_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_9, file=file.path('data/model/res_pa_GMR_d_18_9.RData'))
################################################### Result18_10: prob = 0.01, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_10_GMR_d", prob = 0.01, Gamma = 0.8, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_10 <- fit.classification(y=y, samples = samples, id = "result18_10_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_10, file=file.path('data/model/res_pa_GMR_d_18_10.RData'))
################################################### Result18_11: prob = 0.2, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_11_GMR_d", prob = 0.2, Gamma = 0, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_11 <- fit.classification(y=y, samples = samples, id = "result18_11_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_11, file=file.path('data/model/res_pa_GMR_d_18_11.RData'))
################################################### Result18_12: prob = 0.2, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_12_GMR_d", prob = 0.2, Gamma = 0.2, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_12 <- fit.classification(y=y, samples = samples, id = "result18_12_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_12, file=file.path('data/model/res_pa_GMR_d_18_12.RData'))
################################################### Result18_13: prob = 0.2, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_13_GMR_d", prob = 0.2, Gamma = 0.4, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_13 <- fit.classification(y=y, samples = samples, id = "result18_13_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_13, file=file.path('data/model/res_pa_GMR_d_18_13.RData'))
################################################### Result18_14: prob = 0.2, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_14_GMR_d", prob = 0.2, Gamma = 0.6, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_14 <- fit.classification(y=y, samples = samples, id = "result18_14_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_14, file=file.path('data/model/res_pa_GMR_d_18_14.RData'))
################################################### Result18_15: prob = 0.2, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_15_GMR_d", prob = 0.2, Gamma = 0.8, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_15 <- fit.classification(y=y, samples = samples, id = "result18_15_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_15, file=file.path('data/model/res_pa_GMR_d_18_15.RData'))
################################################### Result18_16: prob = 0.4, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_16_GMR_d", prob = 0.4, Gamma = 0, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_16 <- fit.classification(y=y, samples = samples, id = "result18_16_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_16, file=file.path('data/model/res_pa_GMR_d_18_16.RData'))
################################################### Result18_17: prob = 0.4, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_17_GMR_d", prob = 0.4, Gamma = 0.2, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_17 <- fit.classification(y=y, samples = samples, id = "result18_17_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_17, file=file.path('data/model/res_pa_GMR_d_18_17.RData'))
################################################### Result18_18: prob = 0.4, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_18_GMR_d", prob = 0.4, Gamma = 0.4, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_18 <- fit.classification(y=y, samples = samples, id = "result18_18_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_18, file=file.path('data/model/res_pa_GMR_d_18_18.RData'))
################################################### Result18_19: prob = 0.4, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_19_GMR_d", prob = 0.4, Gamma = 0.6, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_19 <- fit.classification(y=y, samples = samples, id = "result18_19_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_19, file=file.path('data/model/res_pa_GMR_d_18_19.RData'))
################################################### Result18_20: prob = 0.4, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_20_GMR_d", prob = 0.4, Gamma = 0.8, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_20 <- fit.classification(y=y, samples = samples, id = "result18_20_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_20, file=file.path('data/model/res_pa_GMR_d_18_20.RData'))
################################################### Result18_21: prob = 0.6, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_21_GMR_d", prob = 0.6, Gamma = 0, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_21 <- fit.classification(y=y, samples = samples, id = "result18_21_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_21, file=file.path('data/model/res_pa_GMR_d_18_21.RData'))
################################################### Result18_22: prob = 0.6, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_22_GMR_d", prob = 0.6, Gamma = 0.2, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_22 <- fit.classification(y=y, samples = samples, id = "result18_22_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_22, file=file.path('data/model/res_pa_GMR_d_18_22.RData'))
################################################### Result18_23: prob = 0.6, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_23_GMR_d", prob = 0.6, Gamma = 0.4, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_23 <- fit.classification(y=y, samples = samples, id = "result18_23_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_23, file=file.path('data/model/res_pa_GMR_d_18_23.RData'))
################################################### Result18_24: prob = 0.6, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_24_GMR_d", prob = 0.6, Gamma = 0.6, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_24 <- fit.classification(y=y, samples = samples, id = "result18_24_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_24, file=file.path('data/model/res_pa_GMR_d_18_24.RData'))
################################################### Result18_25: prob = 0.6, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_25_GMR_d", prob = 0.6, Gamma = 0.8, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_25 <- fit.classification(y=y, samples = samples, id = "result18_25_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_25, file=file.path('data/model/res_pa_GMR_d_18_25.RData'))
################################################### Result18_26: prob = 0.8, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_26_GMR_d", prob = 0.8, Gamma = 0, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_26 <- fit.classification(y=y, samples = samples, id = "result18_26_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_26, file=file.path('data/model/res_pa_GMR_d_18_26.RData'))
################################################### Result18_27: prob = 0.8, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_27_GMR_d", prob = 0.8, Gamma = 0.2, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_27 <- fit.classification(y=y, samples = samples, id = "result18_27_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_27, file=file.path('data/model/res_pa_GMR_d_18_27.RData'))
################################################### Result18_28: prob = 0.8, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_28_GMR_d", prob = 0.8, Gamma = 0.4, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_28 <- fit.classification(y=y, samples = samples, id = "result18_28_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_28, file=file.path('data/model/res_pa_GMR_d_18_28.RData'))
################################################### Result18_29: prob = 0.8, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_29_GMR_d", prob = 0.8, Gamma = 0.6, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_29 <- fit.classification(y=y, samples = samples, id = "result18_29_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_29, file=file.path('data/model/res_pa_GMR_d_18_29.RData'))
################################################### Result18_30: prob = 0.8, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_30_GMR_d", prob = 0.8, Gamma = 0.8, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_30 <- fit.classification(y=y, samples = samples, id = "result18_30_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_30, file=file.path('data/model/res_pa_GMR_d_18_30.RData'))
############################################## plot #######################################
# Plot for GMR_d models
res_gmr_d <- list(res_pa_GMR_d_18_1, res_pa_GMR_d_18_2, res_pa_GMR_d_18_3, res_pa_GMR_d_18_4, res_pa_GMR_d_18_5,
res_pa_GMR_d_18_6, res_pa_GMR_d_18_7, res_pa_GMR_d_18_8, res_pa_GMR_d_18_9, res_pa_GMR_d_18_10,
res_pa_GMR_d_18_11, res_pa_GMR_d_18_12, res_pa_GMR_d_18_13, res_pa_GMR_d_18_14, res_pa_GMR_d_18_15,
res_pa_GMR_d_18_16, res_pa_GMR_d_18_17, res_pa_GMR_d_18_18, res_pa_GMR_d_18_19, res_pa_GMR_d_18_20,
res_pa_GMR_d_18_21, res_pa_GMR_d_18_22, res_pa_GMR_d_18_23, res_pa_GMR_d_18_24, res_pa_GMR_d_18_25,
res_pa_GMR_d_18_26, res_pa_GMR_d_18_27, res_pa_GMR_d_18_28, res_pa_GMR_d_18_29, res_pa_GMR_d_18_30)
title <- c("Result 18_GMR_d")
xlabs <- c("[p=0.001,g=0]", "[p=0.001,g=0.2]", "[p=0.001,g=0.4]", "[p=0.001,g=0.6]", "[p=0.001,g=0.8]",
"[p=0.01,g=0]", "[p=0.01,g=0.2]", "[p=0.01,g=0.4]", "[p=0.01,g=0.6]", "[p=0.01,g=0.8]",
"[p=0.2,g=0]", "[p=0.2,g=0.2]", "[p=0.2,g=0.4]", "[p=0.2,g=0.6]", "[p=0.2,g=0.8]",
"[p=0.4,g=0]", "[p=0.4,g=0.2]", "[p=0.4,g=0.4]", "[p=0.4,g=0.6]", "[p=0.4,g=0.8]",
"[p=0.6,g=0]", "[p=0.6,g=0.2]", "[p=0.6,g=0.4]", "[p=0.6,g=0.6]", "[p=0.6,g=0.8]",
"[p=0.8,g=0]", "[p=0.8,g=0.2]", "[p=0.8,g=0.4]", "[p=0.8,g=0.6]", "[p=0.8,g=0.8]")
perf_min <- min(sapply(X = res_gmr_d, FUN = function(x){max(x$results$Accuracy)}))
perf_max <- max(sapply(X = res_gmr_d, FUN = function(x){max(x$results$Accuracy)}))
perf_facet_boxplot(title, xlabs, res_gmr_d, perf_min = perf_min-0.15, perf_max = perf_max+0.15, perf_max)
##################################################################################################################
################################## GMP ######################################################################
#################### Result18_1: prob = 0.001, Gamma = 0 ##################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_1_GMP", prob = 0.001, Gamma = 0, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_1 <- fit.classification(y=y, samples = samples, id = "result18_1_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_1, file=file.path('data/model/res_pa_GMP_18_1.RData'))
#################### Result18_2: prob = 0.001, Gamma = 0.2 ##################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_2_GMP", prob = 0.001, Gamma = 0.2, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_2 <- fit.classification(y=y, samples = samples, id = "result18_2_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_2, file=file.path('data/model/res_pa_GMP_18_2.RData'))
#################### Result18_3: prob = 0.001, Gamma = 0.4 ##################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_3_GMP", prob = 0.001, Gamma = 0.4, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_3 <- fit.classification(y=y, samples = samples, id = "result18_3_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_3, file=file.path('data/model/res_pa_GMP_18_3.RData'))
#################### Result18_4: prob = 0.001, Gamma = 0.6 ##################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_4_GMP", prob = 0.001, Gamma = 0.6, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_4 <- fit.classification(y=y, samples = samples, id = "result18_4_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_4, file=file.path('data/model/res_pa_GMP_18_4.RData'))
#################### Result18_5: prob = 0.001, Gamma = 0.8 ##################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_5_GMP", prob = 0.001, Gamma = 0.8, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_5 <- fit.classification(y=y, samples = samples, id = "result18_5_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_5, file=file.path('data/model/res_pa_GMP_18_5.RData'))
################################################### Result18_6: prob = 0.01, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_6_GMP", prob = 0.01, Gamma = 0, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_6 <- fit.classification(y=y, samples = samples, id = "result18_6_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_6, file=file.path('data/model/res_pa_GMP_18_6.RData'))
################################################### Result18_7: prob = 0.01, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_7_GMP", prob = 0.01, Gamma = 0.2, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_7 <- fit.classification(y=y, samples = samples, id = "result18_7_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_7, file=file.path('data/model/res_pa_GMP_18_7.RData'))
################################################### Result18_8: prob = 0.01, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_8_GMP", prob = 0.01, Gamma = 0.4, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_8 <- fit.classification(y=y, samples = samples, id = "result18_8_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_8, file=file.path('data/model/res_pa_GMP_18_8.RData'))
################################################### Result18_9: prob = 0.01, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_9_GMP", prob = 0.01, Gamma = 0.6, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_9 <- fit.classification(y=y, samples = samples, id = "result18_9_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_9, file=file.path('data/model/res_pa_GMP_18_9.RData'))
################################################### Result18_10: prob = 0.01, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_10_GMP", prob = 0.01, Gamma = 0.8, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_10 <- fit.classification(y=y, samples = samples, id = "result18_10_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_10, file=file.path('data/model/res_pa_GMP_18_10.RData'))
################################################### Result18_11: prob = 0.2, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_11_GMP", prob = 0.2, Gamma = 0, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_11 <- fit.classification(y=y, samples = samples, id = "result18_11_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_11, file=file.path('data/model/res_pa_GMP_18_11.RData'))
################################################### Result18_12: prob = 0.2, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_12_GMP", prob = 0.2, Gamma = 0.2, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_12 <- fit.classification(y=y, samples = samples, id = "result18_12_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_12, file=file.path('data/model/res_pa_GMP_18_12.RData'))
################################################### Result18_13: prob = 0.2, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_13_GMP", prob = 0.2, Gamma = 0.4, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_13 <- fit.classification(y=y, samples = samples, id = "result18_13_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_13, file=file.path('data/model/res_pa_GMP_18_13.RData'))
################################################### Result18_14: prob = 0.2, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_14_GMP", prob = 0.2, Gamma = 0.6, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_14 <- fit.classification(y=y, samples = samples, id = "result18_14_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_14, file=file.path('data/model/res_pa_GMP_18_14.RData'))
################################################### Result18_15: prob = 0.2, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_15_GMP", prob = 0.2, Gamma = 0.8, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_15 <- fit.classification(y=y, samples = samples, id = "result18_15_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_15, file=file.path('data/model/res_pa_GMP_18_15.RData'))
################################################### Result18_16: prob = 0.4, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_16_GMP", prob = 0.4, Gamma = 0, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_16 <- fit.classification(y=y, samples = samples, id = "result18_16_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_16, file=file.path('data/model/res_pa_GMP_18_16.RData'))
################################################### Result18_17: prob = 0.4, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_17_GMP", prob = 0.4, Gamma = 0.2, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_17 <- fit.classification(y=y, samples = samples, id = "result18_17_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_17, file=file.path('data/model/res_pa_GMP_18_17.RData'))
################################################### Result18_18: prob = 0.4, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_18_GMP", prob = 0.4, Gamma = 0.4, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_18 <- fit.classification(y=y, samples = samples, id = "result18_18_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_18, file=file.path('data/model/res_pa_GMP_18_18.RData'))
################################################### Result18_19: prob = 0.4, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_19_GMP", prob = 0.4, Gamma = 0.6, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_19 <- fit.classification(y=y, samples = samples, id = "result18_19_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_19, file=file.path('data/model/res_pa_GMP_18_19.RData'))
################################################### Result18_20: prob = 0.4, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_20_GMP", prob = 0.4, Gamma = 0.8, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_20 <- fit.classification(y=y, samples = samples, id = "result18_20_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_20, file=file.path('data/model/res_pa_GMP_18_20.RData'))
################################################### Result18_21: prob = 0.6, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_21_GMP", prob = 0.6, Gamma = 0, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_21 <- fit.classification(y=y, samples = samples, id = "result18_21_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_21, file=file.path('data/model/res_pa_GMP_18_21.RData'))
################################################### Result18_22: prob = 0.6, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_22_GMP", prob = 0.6, Gamma = 0.2, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_22 <- fit.classification(y=y, samples = samples, id = "result18_22_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_22, file=file.path('data/model/res_pa_GMP_18_22.RData'))
################################################### Result18_23: prob = 0.6, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_23_GMP", prob = 0.6, Gamma = 0.4, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_23 <- fit.classification(y=y, samples = samples, id = "result18_23_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_23, file=file.path('data/model/res_pa_GMP_18_23.RData'))
################################################### Result18_24: prob = 0.6, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_24_GMP", prob = 0.6, Gamma = 0.6, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_24 <- fit.classification(y=y, samples = samples, id = "result18_24_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_24, file=file.path('data/model/res_pa_GMP_18_24.RData'))
################################################### Result18_25: prob = 0.6, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_25_GMP", prob = 0.6, Gamma = 0.8, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_25 <- fit.classification(y=y, samples = samples, id = "result18_25_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_25, file=file.path('data/model/res_pa_GMP_18_25.RData'))
################################################### Result18_26: prob = 0.8, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_26_GMP", prob = 0.8, Gamma = 0, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_26 <- fit.classification(y=y, samples = samples, id = "result18_26_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_26, file=file.path('data/model/res_pa_GMP_18_26.RData'))
################################################### Result18_27: prob = 0.8, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_27_GMP", prob = 0.8, Gamma = 0.2, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_27 <- fit.classification(y=y, samples = samples, id = "result18_27_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_27, file=file.path('data/model/res_pa_GMP_18_27.RData'))
################################################### Result18_28: prob = 0.8, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_28_GMP", prob = 0.8, Gamma = 0.4, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_28 <- fit.classification(y=y, samples = samples, id = "result18_28_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_28, file=file.path('data/model/res_pa_GMP_18_28.RData'))
################################################### Result18_29: prob = 0.8, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_29_GMP", prob = 0.8, Gamma = 0.6, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_29 <- fit.classification(y=y, samples = samples, id = "result18_29_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_29, file=file.path('data/model/res_pa_GMP_18_29.RData'))
################################################### Result18_30: prob = 0.8, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_30_GMP", prob = 0.8, Gamma = 0.8, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_30 <- fit.classification(y=y, samples = samples, id = "result18_30_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_30, file=file.path('data/model/res_pa_GMP_18_30.RData'))
############################################## plot #######################################
# plot
res_gmp <- list(res_pa_GMP_18_1, res_pa_GMP_18_2, res_pa_GMP_18_3, res_pa_GMP_18_4, res_pa_GMP_18_5,
res_pa_GMP_18_6, res_pa_GMP_18_7, res_pa_GMP_18_8, res_pa_GMP_18_9, res_pa_GMP_18_10,
res_pa_GMP_18_11, res_pa_GMP_18_12, res_pa_GMP_18_13, res_pa_GMP_18_14, res_pa_GMP_18_15,
res_pa_GMP_18_16, res_pa_GMP_18_17, res_pa_GMP_18_18, res_pa_GMP_18_19, res_pa_GMP_18_20,
res_pa_GMP_18_21, res_pa_GMP_18_22, res_pa_GMP_18_23, res_pa_GMP_18_24, res_pa_GMP_18_25,
res_pa_GMP_18_26, res_pa_GMP_18_27, res_pa_GMP_18_28, res_pa_GMP_18_29, res_pa_GMP_18_30)
# Plot for GMP models
title <- c("Result 18_GMP")
xlabs <- c("[p=0.001,g=0]", "[p=0.001,g=0.2]", "[p=0.001,g=0.4]", "[p=0.001,g=0.6]", "[p=0.001,g=0.8]",
"[p=0.01,g=0]", "[p=0.01,g=0.2]", "[p=0.01,g=0.4]", "[p=0.01,g=0.6]", "[p=0.01,g=0.8]",
"[p=0.2,g=0]", "[p=0.2,g=0.2]", "[p=0.2,g=0.4]", "[p=0.2,g=0.6]", "[p=0.2,g=0.8]",
"[p=0.4,g=0]", "[p=0.4,g=0.2]", "[p=0.4,g=0.4]", "[p=0.4,g=0.6]", "[p=0.4,g=0.8]",
"[p=0.6,g=0]", "[p=0.6,g=0.2]", "[p=0.6,g=0.4]", "[p=0.6,g=0.6]", "[p=0.6,g=0.8]",
"[p=0.8,g=0]", "[p=0.8,g=0.2]", "[p=0.8,g=0.4]", "[p=0.8,g=0.6]", "[p=0.8,g=0.8]")
perf_min <- min(sapply(X = res_gmp, FUN = function(x){mean(x$resample$Accuracy)}))
perf_max <- max(sapply(X = res_gmp, FUN = function(x){mean(x$resample$Accuracy)}))
perf_facet_boxplot(title, xlabs, res_gmp, perf_min = perf_min-0.15, perf_max = perf_max+0.15, perf_max)
# Accuracy((A+D)/(A+B+C+D))
i=0
for(model in res_gmp){
print(i)
print(confusionMatrix(model, "none"))
i <- i+1
}
####################### LOOCV ###################################
#----------------------- GMR --------------------------------#
res_gmr_LOOCV <- list(res_pa_GMR_18_1_LOOCV, res_pa_GMR_18_2_LOOCV, res_pa_GMR_18_3_LOOCV, res_pa_GMR_18_4_LOOCV, res_pa_GMR_18_5_LOOCV)
title <- c("Result 18_GMR_LOOCV")
xlabs <- c("[g=0]", "[g=0.2]", "[g=0.4]", "[g=0.6]", "[g=0.8]")
perf_min <- min(sapply(X = res_gmr_LOOCV, FUN = function(x){max(x$results$Accuracy)}))
perf_max <- max(sapply(X = res_gmr_LOOCV, FUN = function(x){max(x$results$Accuracy)}))
perf_boxplot(title, xlabs, res_gmr_LOOCV, perf_min = perf_min-0.05, perf_max = perf_max+0.05)
| /experiment/result18_all(acorss_model).R | no_license | taerimmkim/iDRW-GMP | R | false | false | 92,401 | r | # integrative DRW on combined feature data (updated in 2018/07/20)
# concat directed pathway graphs within each profile (GM & GMR & GMR_d & GMP)
# For PPI network diffusion, Random Walk with Restart(RWR) algorithm was used.
# In order to find optimized restart probability in PPI diffusion.
# Grid search was performed about combination of p=[0.001, 0.01, 0.2, 0.4, 0.6, 0.8] and Gamma=[0, 0.2, 0.4, 0.6, 0.8]
# p=0.5 had used in before
# parameter tuning for GM model, extra experiment was performed by adding Gamma = [0.7, 0.75, 0.85, 0.9, 0.95]
# All gene symbols are converted to Entrez gene id
# 5-fold CV(10 iters) was performed for tuning parameter in Random Forest.
# 5-fold CV(10 iters) was performed for get top N pathways.
# LOOCV was performed for model evaluation
# Dppigraph(Entrez).rda was used
# edge direction
# m -> g
# p -> g
# Classifier : rf(Random Forest)
################################## Result 18_all ############################################################
################################## GM ######################################################################
#################### Result18_1: prob = 0.001, Gamma = 0 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_1_GM", prob = 0.001, Gamma = 0, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_1 <- fit.classification(y=y, samples = samples, id = "result18_1_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_1, file=file.path('data/model/res_pa_GM_18_1.RData'))
#################### Result18_2: prob = 0.001, Gamma = 0.2 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_2_GM", prob = 0.001, Gamma = 0.2, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_2 <- fit.classification(y=y, samples = samples, id = "result18_2_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_2, file=file.path('data/model/res_pa_GM_18_2.RData'))
#################### Result18_3: prob = 0.001, Gamma = 0.4 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_3_GM", prob = 0.001, Gamma = 0.4, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_3 <- fit.classification(y=y, samples = samples, id = "result18_3_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_3, file=file.path('data/model/res_pa_GM_18_3.RData'))
#################### Result18_4: prob = 0.001, Gamma = 0.6 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_4_GM", prob = 0.001, Gamma = 0.6, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_4 <- fit.classification(y=y, samples = samples, id = "result18_4_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_4, file=file.path('data/model/res_pa_GM_18_4.RData'))
#################### Result18_4.5: prob = 0.001, Gamma = 0.7 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_4.5_GM", prob = 0.001, Gamma = 0.7, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_4.5 <- fit.classification(y=y, samples = samples, id = "result18_4.5_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_4.5, file=file.path('data/model/res_pa_GM_18_4.5.RData'))
#################### Result18_0.75: prob = 0.001, Gamma = 0.75 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_0.75_GM", prob = 0.001, Gamma = 0.75, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_0.75 <- fit.classification(y=y, samples = samples, id = "result18_0.75_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_0.75, file=file.path('data/model/res_pa_GM_18_0.75.RData'))
#################### Result18_5: prob = 0.001, Gamma = 0.8 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_5_GM", prob = 0.001, Gamma = 0.8, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_5 <- fit.classification(y=y, samples = samples, id = "result18_5_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_5, file=file.path('data/model/res_pa_GM_18_5.RData'))
#################### Result18_0.85: prob = 0.001, Gamma = 0.85 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_0.85_GM", prob = 0.001, Gamma = 0.85, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_0.85 <- fit.classification(y=y, samples = samples, id = "result18_0.85_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_0.85, file=file.path('data/model/res_pa_GM_18_0.85.RData'))
#################### Result18_0.9: prob = 0.001, Gamma = 0.9 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_0.9_GM", prob = 0.001, Gamma = 0.9, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_0.9 <- fit.classification(y=y, samples = samples, id = "result18_0.9_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_0.9, file=file.path('data/model/res_pa_GM_18_0.9.RData'))
#################### Result18_0.95: prob = 0.001, Gamma = 0.95 ##################
#------------------------- RNAseq + Methyl -------------------------#
gm <- g %du% m
testStatistic <- c("DESeq2", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)")
x=list(rnaseq, imputed_methyl)
fit.iDRWPClass(x=x, y=y, globalGraph=gm, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_0.95_GM", prob = 0.001, Gamma = 0.95, pranking = "t-test", mode = "GM", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GM_18_0.95 <- fit.classification(y=y, samples = samples, id = "result18_0.95_GM", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GM_18_0.95, file=file.path('data/model/res_pa_GM_18_0.95.RData'))
############################################## plot #######################################
# Plot for GM models
res_gm <- list(res_pa_GM_18_1, res_pa_GM_18_2, res_pa_GM_18_3, res_pa_GM_18_4, res_pa_GM_18_4.5,
res_pa_GM_18_0.75, res_pa_GM_18_5, res_pa_GM_18_0.85, res_pa_GM_18_0.9, res_pa_GM_18_0.95)
title <- c("Result 18_GM")
xlabs <- c("[g=0]", "[g=0.2]", "[g=0.4]", "[g=0.6]", "[g=0.7]", "[g=0.75]", "[g=0.8]", "[g=0.85]", "[g=0.9]", "[g=0.95]")
perf_min <- min(sapply(X = res_gm, FUN = function(x){mean(x$resample$Accuracy)}))
perf_max <- max(sapply(X = res_gm, FUN = function(x){mean(x$resample$Accuracy)}))
perf_boxplot(title, xlabs, res_gm, perf_min = perf_min-0.2, perf_max = perf_max+0.2)
# Accuracy((A+D)/(A+B+C+D))
i=0
for(model in res_gm){
print(i)
print(confusionMatrix(model, "none"))
i <- i+1
}
##############################################################################################################################
################################## GMR ######################################################################
#################### Result18_1: prob = 0.001, Gamma = 0 ##################
#------------------------- RNAseq + Methyl + RPPA(Pathway Graph) -------------------------#
gmr <- g %du% m %du% r
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_1_GMR", prob = 0.001, Gamma = 0, pranking = "t-test", mode = "GMR", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_18_1 <- fit.classification(y=y, samples = samples, id = "result18_1_GMR", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_18_1, file=file.path('data/model/res_pa_GMR_18_1.RData'))
#################### Result18_2: prob = 0.001, Gamma = 0.2 ##################
#------------------------- RNAseq + Methyl + RPPA(Pathway Graph) -------------------------#
gmr <- g %du% m %du% r
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_2_GMR", prob = 0.001, Gamma = 0.2, pranking = "t-test", mode = "GMR", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_18_2 <- fit.classification(y=y, samples = samples, id = "result18_2_GMR", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_18_2, file=file.path('data/model/res_pa_GMR_18_2.RData'))
#################### Result18_3: prob = 0.001, Gamma = 0.4 ##################
#------------------------- RNAseq + Methyl + RPPA(Pathway Graph) -------------------------#
gmr <- g %du% m %du% r
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_3_GMR", prob = 0.001, Gamma = 0.4, pranking = "t-test", mode = "GMR", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_18_3 <- fit.classification(y=y, samples = samples, id = "result18_3_GMR", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_18_3, file=file.path('data/model/res_pa_GMR_18_3.RData'))
#################### Result18_4: prob = 0.001, Gamma = 0.6 ##################
#------------------------- RNAseq + Methyl + RPPA(Pathway Graph) -------------------------#
gmr <- g %du% m %du% r
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_4_GMR", prob = 0.001, Gamma = 0.6, pranking = "t-test", mode = "GMR", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_18_4 <- fit.classification(y=y, samples = samples, id = "result18_4_GMR", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_18_4, file=file.path('data/model/res_pa_GMR_18_4.RData'))
#################### Result18_5: prob = 0.001, Gamma = 0.8 ##################
#------------------------- RNAseq + Methyl + RPPA(Pathway Graph) -------------------------#
gmr <- g %du% m %du% r
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_5_GMR", prob = 0.001, Gamma = 0.8, pranking = "t-test", mode = "GMR", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_18_5 <- fit.classification(y=y, samples = samples, id = "result18_5_GMR", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_18_5, file=file.path('data/model/res_pa_GMR_18_5.RData'))
############################################## plot #######################################
# Plot for GMR models
res_gmr <- list(res_pa_GMR_18_1_LOOCV, res_pa_GMR_18_2_LOOCV, res_pa_GMR_18_3_LOOCV, res_pa_GMR_18_4_LOOCV, res_pa_GMR_18_5_LOOCV,
res_pa_GMR_18_6_LOOCV, res_pa_GMR_18_7_LOOCV, res_pa_GMR_18_8_LOOCV)
title <- c("Result 18_GMR")
xlabs <- c("[g=0]", "[g=0.2]", "[g=0.4]", "[g=0.6]", "[g=0.8]", "[g=0.85]", "[g=0.9]", "[g=0.95]")
perf_min <- min(sapply(X = res_gmr, FUN = function(x){max(x$results$Accuracy)}))
perf_max <- max(sapply(X = res_gmr, FUN = function(x){max(x$results$Accuracy)}))
perf_boxplot(title, xlabs, res_gmr, perf_min = perf_min-0.15, perf_max = perf_max+0.15)
##############################################################################################################################
################################## GMR ######################################################################
#################### Result18_1: prob = 0.001, Gamma = 0 ##################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_1_GMR_d", prob = 0.001, Gamma = 0, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_1 <- fit.classification(y=y, samples = samples, id = "result18_1_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_1, file=file.path('data/model/res_pa_GMR_d_18_1.RData'))
#################### Result18_2: prob = 0.001, Gamma = 0.2 ##################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_2_GMR_d", prob = 0.001, Gamma = 0.2, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_2 <- fit.classification(y=y, samples = samples, id = "result18_2_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_2, file=file.path('data/model/res_pa_GMR_d_18_2.RData'))
#################### Result18_3: prob = 0.001, Gamma = 0.4 ##################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_3_GMR_d", prob = 0.001, Gamma = 0.4, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_3 <- fit.classification(y=y, samples = samples, id = "result18_3_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_3, file=file.path('data/model/res_pa_GMR_d_18_3.RData'))
#################### Result18_4: prob = 0.001, Gamma = 0.6 ##################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_4_GMR_d", prob = 0.001, Gamma = 0.6, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_4 <- fit.classification(y=y, samples = samples, id = "result18_4_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_4, file=file.path('data/model/res_pa_GMR_d_18_4.RData'))
#################### Result18_5: prob = 0.001, Gamma = 0.8 ##################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_5_GMR_d", prob = 0.001, Gamma = 0.8, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_5 <- fit.classification(y=y, samples = samples, id = "result18_5_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_5, file=file.path('data/model/res_pa_GMR_d_18_5.RData'))
#################### Result18_6: prob = 0.01, Gamma = 0 ##################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_6_GMR_d", prob = 0.01, Gamma = 0, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_6 <- fit.classification(y=y, samples = samples, id = "result18_6_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_6, file=file.path('data/model/res_pa_GMR_d_18_6.RData'))
################################################### Result18_7: prob = 0.01, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_7_GMR_d", prob = 0.01, Gamma = 0.2, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_7 <- fit.classification(y=y, samples = samples, id = "result18_7_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_7, file=file.path('data/model/res_pa_GMR_d_18_7.RData'))
################################################### Result18_8: prob = 0.01, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_8_GMR_d", prob = 0.01, Gamma = 0.4, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_8 <- fit.classification(y=y, samples = samples, id = "result18_8_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_8, file=file.path('data/model/res_pa_GMR_d_18_8.RData'))
################################################### Result18_9: prob = 0.01, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_9_GMR_d", prob = 0.01, Gamma = 0.6, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_9 <- fit.classification(y=y, samples = samples, id = "result18_9_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_9, file=file.path('data/model/res_pa_GMR_d_18_9.RData'))
################################################### Result18_10: prob = 0.01, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_10_GMR_d", prob = 0.01, Gamma = 0.8, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_10 <- fit.classification(y=y, samples = samples, id = "result18_10_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_10, file=file.path('data/model/res_pa_GMR_d_18_10.RData'))
################################################### Result18_11: prob = 0.2, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_11_GMR_d", prob = 0.2, Gamma = 0, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_11 <- fit.classification(y=y, samples = samples, id = "result18_11_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_11, file=file.path('data/model/res_pa_GMR_d_18_11.RData'))
################################################### Result18_12: prob = 0.2, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_12_GMR_d", prob = 0.2, Gamma = 0.2, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_12 <- fit.classification(y=y, samples = samples, id = "result18_12_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_12, file=file.path('data/model/res_pa_GMR_d_18_12.RData'))
################################################### Result18_13: prob = 0.2, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_13_GMR_d", prob = 0.2, Gamma = 0.4, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_13 <- fit.classification(y=y, samples = samples, id = "result18_13_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_13, file=file.path('data/model/res_pa_GMR_d_18_13.RData'))
################################################### Result18_14: prob = 0.2, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_14_GMR_d", prob = 0.2, Gamma = 0.6, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_14 <- fit.classification(y=y, samples = samples, id = "result18_14_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_14, file=file.path('data/model/res_pa_GMR_d_18_14.RData'))
################################################### Result18_15: prob = 0.2, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_15_GMR_d", prob = 0.2, Gamma = 0.8, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_15 <- fit.classification(y=y, samples = samples, id = "result18_15_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_15, file=file.path('data/model/res_pa_GMR_d_18_15.RData'))
################################################### Result18_16: prob = 0.4, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_16_GMR_d", prob = 0.4, Gamma = 0, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_16 <- fit.classification(y=y, samples = samples, id = "result18_16_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_16, file=file.path('data/model/res_pa_GMR_d_18_16.RData'))
################################################### Result18_17: prob = 0.4, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_17_GMR_d", prob = 0.4, Gamma = 0.2, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_17 <- fit.classification(y=y, samples = samples, id = "result18_17_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_17, file=file.path('data/model/res_pa_GMR_d_18_17.RData'))
################################################### Result18_18: prob = 0.4, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_18_GMR_d", prob = 0.4, Gamma = 0.4, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_18 <- fit.classification(y=y, samples = samples, id = "result18_18_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_18, file=file.path('data/model/res_pa_GMR_d_18_18.RData'))
################################################### Result18_19: prob = 0.4, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_19_GMR_d", prob = 0.4, Gamma = 0.6, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_19 <- fit.classification(y=y, samples = samples, id = "result18_19_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_19, file=file.path('data/model/res_pa_GMR_d_18_19.RData'))
################################################### Result18_20: prob = 0.4, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_20_GMR_d", prob = 0.4, Gamma = 0.8, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_20 <- fit.classification(y=y, samples = samples, id = "result18_20_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_20, file=file.path('data/model/res_pa_GMR_d_18_20.RData'))
################################################### Result18_21: prob = 0.6, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_21_GMR_d", prob = 0.6, Gamma = 0, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_21 <- fit.classification(y=y, samples = samples, id = "result18_21_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_21, file=file.path('data/model/res_pa_GMR_d_18_21.RData'))
################################################### Result18_22: prob = 0.6, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_22_GMR_d", prob = 0.6, Gamma = 0.2, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_22 <- fit.classification(y=y, samples = samples, id = "result18_22_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_22, file=file.path('data/model/res_pa_GMR_d_18_22.RData'))
################################################### Result18_23: prob = 0.6, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_23_GMR_d", prob = 0.6, Gamma = 0.4, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_23 <- fit.classification(y=y, samples = samples, id = "result18_23_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_23, file=file.path('data/model/res_pa_GMR_d_18_23.RData'))
################################################### Result18_24: prob = 0.6, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_24_GMR_d", prob = 0.6, Gamma = 0.6, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_24 <- fit.classification(y=y, samples = samples, id = "result18_24_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_24, file=file.path('data/model/res_pa_GMR_d_18_24.RData'))
################################################### Result18_25: prob = 0.6, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_25_GMR_d", prob = 0.6, Gamma = 0.8, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_25 <- fit.classification(y=y, samples = samples, id = "result18_25_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_25, file=file.path('data/model/res_pa_GMR_d_18_25.RData'))
################################################### Result18_26: prob = 0.8, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_26_GMR_d", prob = 0.8, Gamma = 0, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_26 <- fit.classification(y=y, samples = samples, id = "result18_26_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_26, file=file.path('data/model/res_pa_GMR_d_18_26.RData'))
################################################### Result18_27: prob = 0.8, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_27_GMR_d", prob = 0.8, Gamma = 0.2, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_27 <- fit.classification(y=y, samples = samples, id = "result18_27_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_27, file=file.path('data/model/res_pa_GMR_d_18_27.RData'))
################################################### Result18_28: prob = 0.8, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_28_GMR_d", prob = 0.8, Gamma = 0.4, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_28 <- fit.classification(y=y, samples = samples, id = "result18_28_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_28, file=file.path('data/model/res_pa_GMR_d_18_28.RData'))
################################################### Result18_29: prob = 0.8, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_29_GMR_d", prob = 0.8, Gamma = 0.6, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_29 <- fit.classification(y=y, samples = samples, id = "result18_29_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_29, file=file.path('data/model/res_pa_GMR_d_18_29.RData'))
################################################### Result18_30: prob = 0.8, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(diffused Pathway Graph) -------------------------#
gmr <- list(g, m, r)
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(diffused_Pathway_Graph_Entrez)")
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmr, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_30_GMR_d", prob = 0.8, Gamma = 0.8, pranking = "t-test", mode = "GMR_d", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMR_d_18_30 <- fit.classification(y=y, samples = samples, id = "result18_30_GMR_d", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMR_d_18_30, file=file.path('data/model/res_pa_GMR_d_18_30.RData'))
############################################## plot #######################################
# Plot for GMR_d models
res_gmr_d <- list(res_pa_GMR_d_18_1, res_pa_GMR_d_18_2, res_pa_GMR_d_18_3, res_pa_GMR_d_18_4, res_pa_GMR_d_18_5,
res_pa_GMR_d_18_6, res_pa_GMR_d_18_7, res_pa_GMR_d_18_8, res_pa_GMR_d_18_9, res_pa_GMR_d_18_10,
res_pa_GMR_d_18_11, res_pa_GMR_d_18_12, res_pa_GMR_d_18_13, res_pa_GMR_d_18_14, res_pa_GMR_d_18_15,
res_pa_GMR_d_18_16, res_pa_GMR_d_18_17, res_pa_GMR_d_18_18, res_pa_GMR_d_18_19, res_pa_GMR_d_18_20,
res_pa_GMR_d_18_21, res_pa_GMR_d_18_22, res_pa_GMR_d_18_23, res_pa_GMR_d_18_24, res_pa_GMR_d_18_25,
res_pa_GMR_d_18_26, res_pa_GMR_d_18_27, res_pa_GMR_d_18_28, res_pa_GMR_d_18_29, res_pa_GMR_d_18_30)
title <- c("Result 18_GMR_d")
xlabs <- c("[p=0.001,g=0]", "[p=0.001,g=0.2]", "[p=0.001,g=0.4]", "[p=0.001,g=0.6]", "[p=0.001,g=0.8]",
"[p=0.01,g=0]", "[p=0.01,g=0.2]", "[p=0.01,g=0.4]", "[p=0.01,g=0.6]", "[p=0.01,g=0.8]",
"[p=0.2,g=0]", "[p=0.2,g=0.2]", "[p=0.2,g=0.4]", "[p=0.2,g=0.6]", "[p=0.2,g=0.8]",
"[p=0.4,g=0]", "[p=0.4,g=0.2]", "[p=0.4,g=0.4]", "[p=0.4,g=0.6]", "[p=0.4,g=0.8]",
"[p=0.6,g=0]", "[p=0.6,g=0.2]", "[p=0.6,g=0.4]", "[p=0.6,g=0.6]", "[p=0.6,g=0.8]",
"[p=0.8,g=0]", "[p=0.8,g=0.2]", "[p=0.8,g=0.4]", "[p=0.8,g=0.6]", "[p=0.8,g=0.8]")
perf_min <- min(sapply(X = res_gmr_d, FUN = function(x){max(x$results$Accuracy)}))
perf_max <- max(sapply(X = res_gmr_d, FUN = function(x){max(x$results$Accuracy)}))
perf_facet_boxplot(title, xlabs, res_gmr_d, perf_min = perf_min-0.15, perf_max = perf_max+0.15, perf_max)
##################################################################################################################
################################## GMP ######################################################################
#################### Result18_1: prob = 0.001, Gamma = 0 ##################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_1_GMP", prob = 0.001, Gamma = 0, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_1 <- fit.classification(y=y, samples = samples, id = "result18_1_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_1, file=file.path('data/model/res_pa_GMP_18_1.RData'))
#################### Result18_2: prob = 0.001, Gamma = 0.2 ##################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_2_GMP", prob = 0.001, Gamma = 0.2, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_2 <- fit.classification(y=y, samples = samples, id = "result18_2_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_2, file=file.path('data/model/res_pa_GMP_18_2.RData'))
#################### Result18_3: prob = 0.001, Gamma = 0.4 ##################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_3_GMP", prob = 0.001, Gamma = 0.4, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_3 <- fit.classification(y=y, samples = samples, id = "result18_3_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_3, file=file.path('data/model/res_pa_GMP_18_3.RData'))
#################### Result18_4: prob = 0.001, Gamma = 0.6 ##################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_4_GMP", prob = 0.001, Gamma = 0.6, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_4 <- fit.classification(y=y, samples = samples, id = "result18_4_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_4, file=file.path('data/model/res_pa_GMP_18_4.RData'))
#################### Result18_5: prob = 0.001, Gamma = 0.8 ##################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_5_GMP", prob = 0.001, Gamma = 0.8, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_5 <- fit.classification(y=y, samples = samples, id = "result18_5_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_5, file=file.path('data/model/res_pa_GMP_18_5.RData'))
################################################### Result18_6: prob = 0.01, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_6_GMP", prob = 0.01, Gamma = 0, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_6 <- fit.classification(y=y, samples = samples, id = "result18_6_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_6, file=file.path('data/model/res_pa_GMP_18_6.RData'))
################################################### Result18_7: prob = 0.01, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_7_GMP", prob = 0.01, Gamma = 0.2, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_7 <- fit.classification(y=y, samples = samples, id = "result18_7_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_7, file=file.path('data/model/res_pa_GMP_18_7.RData'))
################################################### Result18_8: prob = 0.01, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_8_GMP", prob = 0.01, Gamma = 0.4, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_8 <- fit.classification(y=y, samples = samples, id = "result18_8_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_8, file=file.path('data/model/res_pa_GMP_18_8.RData'))
################################################### Result18_9: prob = 0.01, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_9_GMP", prob = 0.01, Gamma = 0.6, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_9 <- fit.classification(y=y, samples = samples, id = "result18_9_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_9, file=file.path('data/model/res_pa_GMP_18_9.RData'))
################################################### Result18_10: prob = 0.01, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_10_GMP", prob = 0.01, Gamma = 0.8, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_10 <- fit.classification(y=y, samples = samples, id = "result18_10_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_10, file=file.path('data/model/res_pa_GMP_18_10.RData'))
################################################### Result18_11: prob = 0.2, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_11_GMP", prob = 0.2, Gamma = 0, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_11 <- fit.classification(y=y, samples = samples, id = "result18_11_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_11, file=file.path('data/model/res_pa_GMP_18_11.RData'))
################################################### Result18_12: prob = 0.2, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_12_GMP", prob = 0.2, Gamma = 0.2, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_12 <- fit.classification(y=y, samples = samples, id = "result18_12_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_12, file=file.path('data/model/res_pa_GMP_18_12.RData'))
################################################### Result18_13: prob = 0.2, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_13_GMP", prob = 0.2, Gamma = 0.4, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_13 <- fit.classification(y=y, samples = samples, id = "result18_13_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_13, file=file.path('data/model/res_pa_GMP_18_13.RData'))
################################################### Result18_14: prob = 0.2, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_14_GMP", prob = 0.2, Gamma = 0.6, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_14 <- fit.classification(y=y, samples = samples, id = "result18_14_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_14, file=file.path('data/model/res_pa_GMP_18_14.RData'))
################################################### Result18_15: prob = 0.2, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_15_GMP", prob = 0.2, Gamma = 0.8, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_15 <- fit.classification(y=y, samples = samples, id = "result18_15_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_15, file=file.path('data/model/res_pa_GMP_18_15.RData'))
################################################### Result18_16: prob = 0.4, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_16_GMP", prob = 0.4, Gamma = 0, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_16 <- fit.classification(y=y, samples = samples, id = "result18_16_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_16, file=file.path('data/model/res_pa_GMP_18_16.RData'))
################################################### Result18_17: prob = 0.4, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_17_GMP", prob = 0.4, Gamma = 0.2, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_17 <- fit.classification(y=y, samples = samples, id = "result18_17_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_17, file=file.path('data/model/res_pa_GMP_18_17.RData'))
################################################### Result18_18: prob = 0.4, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_18_GMP", prob = 0.4, Gamma = 0.4, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_18 <- fit.classification(y=y, samples = samples, id = "result18_18_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_18, file=file.path('data/model/res_pa_GMP_18_18.RData'))
################################################### Result18_19: prob = 0.4, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_19_GMP", prob = 0.4, Gamma = 0.6, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_19 <- fit.classification(y=y, samples = samples, id = "result18_19_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_19, file=file.path('data/model/res_pa_GMP_18_19.RData'))
################################################### Result18_20: prob = 0.4, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_20_GMP", prob = 0.4, Gamma = 0.8, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_20 <- fit.classification(y=y, samples = samples, id = "result18_20_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_20, file=file.path('data/model/res_pa_GMP_18_20.RData'))
################################################### Result18_21: prob = 0.6, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_21_GMP", prob = 0.6, Gamma = 0, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_21 <- fit.classification(y=y, samples = samples, id = "result18_21_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_21, file=file.path('data/model/res_pa_GMP_18_21.RData'))
################################################### Result18_22: prob = 0.6, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_22_GMP", prob = 0.6, Gamma = 0.2, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_22 <- fit.classification(y=y, samples = samples, id = "result18_22_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_22, file=file.path('data/model/res_pa_GMP_18_22.RData'))
################################################### Result18_23: prob = 0.6, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_23_GMP", prob = 0.6, Gamma = 0.4, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_23 <- fit.classification(y=y, samples = samples, id = "result18_23_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_23, file=file.path('data/model/res_pa_GMP_18_23.RData'))
################################################### Result18_24: prob = 0.6, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_24_GMP", prob = 0.6, Gamma = 0.6, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_24 <- fit.classification(y=y, samples = samples, id = "result18_24_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_24, file=file.path('data/model/res_pa_GMP_18_24.RData'))
################################################### Result18_25: prob = 0.6, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_25_GMP", prob = 0.6, Gamma = 0.8, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_25 <- fit.classification(y=y, samples = samples, id = "result18_25_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_25, file=file.path('data/model/res_pa_GMP_18_25.RData'))
################################################### Result18_26: prob = 0.8, Gamma = 0 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_26_GMP", prob = 0.8, Gamma = 0, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_26 <- fit.classification(y=y, samples = samples, id = "result18_26_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_26, file=file.path('data/model/res_pa_GMP_18_26.RData'))
################################################### Result18_27: prob = 0.8, Gamma = 0.2 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_27_GMP", prob = 0.8, Gamma = 0.2, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_27 <- fit.classification(y=y, samples = samples, id = "result18_27_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_27, file=file.path('data/model/res_pa_GMP_18_27.RData'))
################################################### Result18_28: prob = 0.8, Gamma = 0.4 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_28_GMP", prob = 0.8, Gamma = 0.4, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_28 <- fit.classification(y=y, samples = samples, id = "result18_28_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_28, file=file.path('data/model/res_pa_GMP_18_28.RData'))
################################################### Result18_29: prob = 0.8, Gamma = 0.6 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_29_GMP", prob = 0.8, Gamma = 0.6, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_29 <- fit.classification(y=y, samples = samples, id = "result18_29_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_29, file=file.path('data/model/res_pa_GMP_18_29.RData'))
################################################### Result18_30: prob = 0.8, Gamma = 0.8 #################################################
#------------------------- RNAseq + Methyl + RPPA(PPI Graph) -------------------------#
testStatistic <- c("DESeq2", "t-test", "t-test")
profile_name <- c("rna(Entrez)", "meth(Entrez)", "rppa(Entrez)")
gmp <- list(g, m, p)
x=list(rnaseq, imputed_methyl, rppa)
fit.iDRWPClass(x=x, y=y, globalGraph=gmp, testStatistic= testStatistic, profile_name = profile_name,
datapath = datapath, respath = respath, pathSet=pathSet, method = "DRW", samples = samples,
id = "result18_30_GMP", prob = 0.8, Gamma = 0.8, pranking = "t-test", mode = "GMP", AntiCorr=FALSE, DEBUG=TRUE)
res_pa_GMP_18_30 <- fit.classification(y=y, samples = samples, id = "result18_30_GMP", datapath = datapath, respath = respath,
profile_name = profile_name, method = "DRW", pranking = "t-test", classifier = "rf",
nFolds = 5, numTops=50, iter = 10)
save(res_pa_GMP_18_30, file=file.path('data/model/res_pa_GMP_18_30.RData'))
############################################## plot #######################################
# plot
res_gmp <- list(res_pa_GMP_18_1, res_pa_GMP_18_2, res_pa_GMP_18_3, res_pa_GMP_18_4, res_pa_GMP_18_5,
res_pa_GMP_18_6, res_pa_GMP_18_7, res_pa_GMP_18_8, res_pa_GMP_18_9, res_pa_GMP_18_10,
res_pa_GMP_18_11, res_pa_GMP_18_12, res_pa_GMP_18_13, res_pa_GMP_18_14, res_pa_GMP_18_15,
res_pa_GMP_18_16, res_pa_GMP_18_17, res_pa_GMP_18_18, res_pa_GMP_18_19, res_pa_GMP_18_20,
res_pa_GMP_18_21, res_pa_GMP_18_22, res_pa_GMP_18_23, res_pa_GMP_18_24, res_pa_GMP_18_25,
res_pa_GMP_18_26, res_pa_GMP_18_27, res_pa_GMP_18_28, res_pa_GMP_18_29, res_pa_GMP_18_30)
# Plot for GMP models
title <- c("Result 18_GMP")
xlabs <- c("[p=0.001,g=0]", "[p=0.001,g=0.2]", "[p=0.001,g=0.4]", "[p=0.001,g=0.6]", "[p=0.001,g=0.8]",
"[p=0.01,g=0]", "[p=0.01,g=0.2]", "[p=0.01,g=0.4]", "[p=0.01,g=0.6]", "[p=0.01,g=0.8]",
"[p=0.2,g=0]", "[p=0.2,g=0.2]", "[p=0.2,g=0.4]", "[p=0.2,g=0.6]", "[p=0.2,g=0.8]",
"[p=0.4,g=0]", "[p=0.4,g=0.2]", "[p=0.4,g=0.4]", "[p=0.4,g=0.6]", "[p=0.4,g=0.8]",
"[p=0.6,g=0]", "[p=0.6,g=0.2]", "[p=0.6,g=0.4]", "[p=0.6,g=0.6]", "[p=0.6,g=0.8]",
"[p=0.8,g=0]", "[p=0.8,g=0.2]", "[p=0.8,g=0.4]", "[p=0.8,g=0.6]", "[p=0.8,g=0.8]")
perf_min <- min(sapply(X = res_gmp, FUN = function(x){mean(x$resample$Accuracy)}))
perf_max <- max(sapply(X = res_gmp, FUN = function(x){mean(x$resample$Accuracy)}))
perf_facet_boxplot(title, xlabs, res_gmp, perf_min = perf_min-0.15, perf_max = perf_max+0.15, perf_max)
# Accuracy((A+D)/(A+B+C+D))
i=0
for(model in res_gmp){
print(i)
print(confusionMatrix(model, "none"))
i <- i+1
}
####################### LOOCV ###################################
#----------------------- GMR --------------------------------#
res_gmr_LOOCV <- list(res_pa_GMR_18_1_LOOCV, res_pa_GMR_18_2_LOOCV, res_pa_GMR_18_3_LOOCV, res_pa_GMR_18_4_LOOCV, res_pa_GMR_18_5_LOOCV)
title <- c("Result 18_GMR_LOOCV")
xlabs <- c("[g=0]", "[g=0.2]", "[g=0.4]", "[g=0.6]", "[g=0.8]")
perf_min <- min(sapply(X = res_gmr_LOOCV, FUN = function(x){max(x$results$Accuracy)}))
perf_max <- max(sapply(X = res_gmr_LOOCV, FUN = function(x){max(x$results$Accuracy)}))
perf_boxplot(title, xlabs, res_gmr_LOOCV, perf_min = perf_min-0.05, perf_max = perf_max+0.05)
|
.pkgModelCurrent <- TRUE
.setPkgModels <- function(value) { ## For testing
assignInMyNamespace(".pkgModelCurrent", value)
}
.norm2 <- function(obj) {
if (inherits(obj, "RxODE")) {
if (exists(".linCmtM", obj)) {
return(get(".linCmtM", obj))
}
}
setNames(RxODE::rxModelVars(obj)$model["normModel"], NULL)
}
.isWritable <- function(...) {
.ret <- try(assertthat::is.writeable(...), silent = TRUE)
if (inherits(.ret, "try-error")) {
.ret <- FALSE
}
.ret
}
.rxPkgInst <- function(obj) {
.wd <- getwd()
if (regexpr(obj$package, .wd) != -1) {
.inst <- gsub(paste0("(", obj$package, ").*"), "\\1", .wd)
} else {
.inst <- system.file(package = obj$package)
}
if (.isWritable(.inst)) {
if (regexpr("inst$", .inst) != -1) {
return(.inst)
}
.inst2 <- file.path(.inst, "inst")
if (file.exists(.inst2)) {
return(.inst2)
}
.html <- file.path(.inst, "html")
if (file.exists(.html)) {
return(.inst)
}
return(.inst2)
} else {
.inst <- "~/.rxCache/"
if (.isWritable(.inst)) {
return(.inst)
}
return(rxTempDir())
}
}
.rxPkgDir <- function(obj) {
return(file.path(.rxPkgInst(obj), "rx"))
}
.rxPkgDll <- function(obj) {
obj$mdir <- .rxPkgDir(obj)
.pkgInfo <- getLoadedDLLs()[[obj$package]]
if (!all(is.null(.pkgInfo))) {
if (obj$isValid()) {
.tmp <- .pkgInfo
class(.tmp) <- "list"
return(.tmp$path)
} else {
return(file.path(obj$mdir, basename(obj$rxDll$dll)))
}
} else {
return(file.path(obj$mdir, basename(obj$rxDll$dll)))
}
}
.rxNewMvStr <- function(obj) {
gsub("[.].*", paste0("_new_", .Platform$r_arch, "_model_vars"), basename(obj$rxDll$dll))
}
.rxPkgLoaded <- function(pkg) {
.si <- sessionInfo()
return(length(intersect(pkg, c(
names(.si$otherPkgs) ## ,names(.si$loadedOnly)
))) != 0)
}
.rxUseI <- new.env(parent = emptyenv())
# 1
.rxUseI$i <- 1L
.rxUseCdir <- ""
#' Use model object in your package
#' @param obj model to save.
#' @param internal If this is run internally. By default this is FALSE
#' @inheritParams usethis::use_data
#' @return Nothing; This is used for its side effects and shouldn't be called by a user
#' @export
rxUse <- function(obj, overwrite = TRUE, compress = "bzip2",
internal = FALSE) {
rxReq("usethis")
rxReq("devtools")
internal <- internal
if (missing(obj)) {
.env <- new.env()
assign("internal", internal, .env)
assign("overwrite", overwrite, .env)
assign("compress", compress, .env)
sapply(list.files(devtools::package_file("inst/rx"), full.names = TRUE),
unlink,
force = TRUE, recursive = TRUE
)
.models <- NULL
for (.f in list.files(
path = devtools::package_file("data"),
pattern = "\\.rda$", full.names = TRUE
)) {
load(.f, envir = .env)
.f2 <- basename(.f)
.f2 <- substr(.f2, 0, nchar(.f2) - 4)
if (is(.env[[.f2]], "RxODE")) {
.env[[.f2]]$package <- NULL
.minfo(sprintf("recompile '%s'", .f2))
.models <- c(.models, .f2)
eval(parse(text = sprintf("rxUse(%s, internal=internal, overwrite=overwrite, compress=compress)", .f2)),
envir = .env
)
.docFile <- file.path(devtools::package_file("R"), paste0(.f2, "-doc.R"))
if (!file.exists(.docFile)) {
(sprintf("creating documentation '%s'", .docFile))
sink(.docFile)
.tmp <- .env[[.f2]]
.mv <- rxModelVars(.tmp)
cat(sprintf("#' %s RxODE model\n", .f2))
cat("#'\n")
cat(sprintf(
"#' @format An \\emph{RxODE} model with %s parameters, %s ODE states, and %s calc vars.\n",
length(.tmp$params), length(.tmp$state) + .mv$extraCmt, length(.tmp$lhs)
))
cat("#'\n")
cat(sprintf("#'\\emph{Parameters (%s$params)}\n", .f2))
cat("#'\n")
cat("#' \\describe{\n")
.def <- rxInits(.tmp)
.defs <- paste0(" (default=", .def, ")")
.defs[is.na(.def)] <- ""
cat(paste(paste0("#' \\item{", .tmp$params, "}{", .defs, "}\n"), collapse = ""))
cat("#'}\n")
.state <- .tmp$state
##
if (.mv$extraCmt == 2) {
.state <- c(.state, "depot", "central")
} else if (.mv$extraCmt == 1) {
.state <- c(.state, "central")
}
if (length(.state) > 0) {
cat("#'\n")
cat(sprintf("#' \\emph{State %s$state}\n", .f2))
cat("#'\n")
cat("#' \\describe{\n")
cat(paste(paste0("#' \\item{", .state, "}{ (=", seq_along(.state), ")}\n"), collapse = ""))
cat("#' }\n")
}
.lhs <- .tmp$lhs
if (length(.lhs) > 0) {
cat("#'\n")
cat(sprintf("#' \\emph{Calculated Variables %s$lhs}\n", .f2))
cat("#'\n")
cat("#' \\describe{\n")
cat(paste(paste0("#' \\item{", .lhs, "}{}\n"), collapse = ""))
cat("#' }\n")
}
cat("#'\n")
cat("#' \\emph{Model Code}\n") # sprintf(,.f2)
cat("#'\n")
.code <- deparse(body(eval(parse(text = paste("function(){", .norm2(.tmp), "}")))))
.code[1] <- "RxODE({"
.code[length(.code)] <- "})"
cat(paste(paste0("#' ", .code, "\n"), collapse = ""))
cat("#'\n")
cat(paste(paste0("#' @seealso \\code{\\link[RxODE]{eventTable}}, \\code{\\link[RxODE]{et}}, \\code{\\link[RxODE]{rxSolve}}, \\code{\\link[RxODE]{RxODE}}\n")))
cat("#' \n")
cat("#' @examples\n")
cat("#' ## Showing the model code\n")
cat(sprintf("#' summary(%s)\n", .f2))
cat("#'\n")
cat(sprintf('"%s"\n', .f2))
sink()
}
}
}
if (!dir.exists(devtools::package_file("src"))) {
dir.create(devtools::package_file("src"), recursive = TRUE)
}
.pkg <- basename(usethis::proj_get())
.rx <- loadNamespace("RxODE")
sapply(
list.files(.rxUseCdir, pattern = "[.]c", full.names = TRUE),
function(x) {
.minfo(sprintf("copy '%s'", basename(x)))
.env <- .rx$.rxUseI
.rxUseI <- .env$i
.f0 <- gsub(
"^#define (.*) _rx(.*)$",
paste0("#define \\1 _rxp", .rxUseI, "\\2"), readLines(x)
)
assign("i", .rxUseI + 1, envir = .env)
.f0 <- c("#include <RxODE.h>\n#include <RxODE_model_shared.h>", .f0)
.w <- which(.f0 == "#include \"extraC.h\"")
if (length(.w) > 0) .f0 <- .f0[-.w[1]]
writeLines(text = .f0, con = file.path(devtools::package_file("src"), basename(x)))
}
)
.inits <- paste0("R_init0_", .pkg, "_", .models)
.tmp <- paste0("{\"", .pkg, "_", .models, "_model_vars\", (DL_FUNC) &", .pkg, "_", .models, "_model_vars, 0},\\")
.tmp[length(.tmp)] <- substr(.tmp[length(.tmp)], 0, nchar(.tmp[length(.tmp)]) - 1)
.extraC <- c(
"#define compiledModelCall \\",
.tmp,
paste0("SEXP ", .pkg, "_", .models, "_model_vars();"),
paste0("void ", .inits, "();"),
paste0("void R_init0_", .pkg, "_RxODE_models(){"),
paste0(" ", .inits, "();"),
"}"
)
sink(file.path(devtools::package_file("src"), paste0(.pkg, "_compiled.h")))
if (.pkg == "RxODE") {
cat("#include <R.h>\n#include <Rinternals.h>\n#include <stdlib.h> // for NULL\n#include <R_ext/Rdynload.h>\n#include \"../inst/include/RxODE.h\"\n#include \"../inst/include/RxODE_model_shared.h\"\n")
} else {
cat("#include <R.h>\n#include <Rinternals.h>\n#include <stdlib.h> // for NULL\n#include <R_ext/Rdynload.h>\n#include <RxODE.h>\n#include <RxODE_model_shared.h>\n")
}
cat(paste(.extraC, collapse = "\n"))
cat("\n")
sink()
.files <- list.files(devtools::package_file("src"))
.files <- .files[regexpr("RxODE_model_shared", .files) == -1]
if (all(regexpr(paste0("^", .pkg), .files) != -1)) {
.minfo(sprintf("only compiled models in this package, creating '%s_init.c'", .pkg))
sink(file.path(devtools::package_file("src"), paste0(.pkg, "_init.c")))
cat("#include <R.h>\n#include <Rinternals.h>\n#include <stdlib.h> // for NULL\n#include <R_ext/Rdynload.h>\n")
cat("#include <RxODE.h>\n")
cat("#include <RxODE_model_shared.h>\n")
cat(paste0('#include "', .pkg, '_compiled.h"\n'))
cat(sprintf("void R_init_%s(DllInfo *info){\n", .pkg))
cat(sprintf(" R_init0_%s_RxODE_models();\n", .pkg))
cat(" static const R_CallMethodDef callMethods[] = {\n compiledModelCall\n {NULL, NULL, 0}\n };\n")
cat(" R_registerRoutines(info, NULL, callMethods, NULL, NULL);\n")
cat(" R_useDynamicSymbols(info,FALSE);\n")
cat("}\n")
cat(paste(paste0("void R_unload_", .pkg, "_", .models, "(DllInfo *info);\n"), collapse = ""))
cat(sprintf("void R_unload_%s(DllInfo *info){\n", .pkg))
cat(paste(paste0(" R_unload_", .pkg, "_", .models, "(info);\n"), collapse = ""))
cat("}\n")
sink()
}
if (!file.exists(devtools::package_file("R/rxUpdated.R")) && .pkg != "RxODE") {
sink(devtools::package_file("R/rxUpdated.R"))
cat(".rxUpdated <- new.env(parent=emptyenv())\n")
sink()
}
unlink(devtools::package_file("inst/rx"), recursive = TRUE, force = TRUE)
if (length(list.files(devtools::package_file("inst"))) == 0) {
unlink(devtools::package_file("inst"), recursive = TRUE, force = TRUE)
}
return(invisible(TRUE))
} else {
.modName <- as.character(substitute(obj))
.pkg <- basename(usethis::proj_get())
.env <- new.env(parent = baseenv())
assign(.modName, RxODE(.norm2(obj), package = .pkg, modName = .modName), .env)
assignInMyNamespace(".rxUseCdir", dirname(rxC(.env[[.modName]])))
assign("internal", internal, .env)
assign("overwrite", overwrite, .env)
assign("compress", compress, .env)
eval(parse(text = sprintf("usethis::use_data(%s, internal=internal, overwrite=overwrite, compress=compress)", .modName)), envir = .env)
}
}
#' Creates a package from compiled RxODE models
#'
#' @param ... Models to build a package from
#' @param package String of the package name to create
#' @param action Type of action to take after package is created
#' @param name Full name of author
#' @param license is the type of license for the package.
#' @inheritParams usethis::create_package
#' @inheritParams RxODE
#' @author Matthew Fidler
#' @return this function returns nothing and is used for its side effects
#' @export
rxPkg <- function(..., package,
wd = getwd(),
action = c("install", "build", "binary", "create"),
license = c("gpl3", "lgpl", "mit", "agpl3"),
name = "Firstname Lastname",
fields = list()) {
if (missing(package)) {
stop("'package' needs to be specified")
}
action <- match.arg(action)
license <- match.arg(license)
.owd <- getwd()
.op <- options()
on.exit({
setwd(.owd)
options(.op)
})
.dir <- wd
if (!dir.exists(.dir)) {
dir.create(.dir)
}
setwd(.dir)
options(
usethis.description = list(`Title` = "This is generated from RxODE"),
usethis.full_name = ifelse(missing(name), getOption("usethis.full_name", "Firstname Lastname"), name)
)
.dir2 <- file.path(.dir, package)
usethis::create_package(.dir2,
fields = fields,
rstudio = FALSE,
roxygen = TRUE,
check_name = TRUE,
open = FALSE
)
setwd(.dir2)
usethis::use_package("RxODE", "LinkingTo")
usethis::use_package("RxODE", "Depends")
if (license == "gpl3") {
usethis::use_gpl3_license()
} else if (license == "lgpl") {
usethis::use_lgpl_license()
} else if (license == "agpl3") {
usethis::use_agpl3_license()
} else if (license == "mit") {
usethis::use_mit_license()
}
.p <- devtools::package_file("DESCRIPTION")
writeLines(c(
readLines(.p),
"NeedsCompilation: yes",
"Biarch: true"
), .p)
## Now use rxUse for each item
.env <- new.env()
.lst <- as.list(match.call()[-1])
.w <- which(names(.lst) == "")
.lst <- .lst[.w]
for (.i in seq_along(.lst)) {
.v <- as.character(deparse(.lst[[.i]]))
assign(.v, eval(.lst[[.i]], envir = parent.frame(1)), .env)
print(.env[[.v]])
eval(parse(text = sprintf("rxUse(%s)", .v)), envir = .env)
}
## Final rxUse to generate all code
rxUse()
.p <- file.path(devtools::package_file("R"), "rxUpdated.R")
.f <- readLines(.p)
.w <- which(regexpr("@useDynLib", .f) != -1)
if (length(.w) == 0) {
.f <- c(
paste0("#' @useDynLib ", package, ", .registration=TRUE"),
"#' @import RxODE",
.f
)
writeLines(.f, .p)
}
devtools::document()
if (!file.exists("configure.win")) {
writeLines(c(
"#!/bin/sh",
"echo \"unlink('src', recursive=TRUE);RxODE::rxUse()\" > build.R",
"${R_HOME}/bin/Rscript build.R",
"rm build.R"
), "configure.win")
}
if (!file.exists("configure")) {
writeLines(c(
"#!/bin/sh",
"echo \"unlink('src', recursive=TRUE);RxODE::rxUse()\" > build.R",
"${R_HOME}/bin/Rscript build.R",
"rm build.R"
), "configure")
if (!file.exists("configure.ac")) {
writeLines(
"## dummy autoconf script",
"configure.ac"
)
}
}
if (action == "install") {
devtools::install()
} else if (action == "build") {
devtools::build()
} else if (action == "binary") {
devtools::build(binary = TRUE)
}
invisible()
}
| /R/modlib.R | no_license | cran/RxODE | R | false | false | 13,972 | r | .pkgModelCurrent <- TRUE
.setPkgModels <- function(value) { ## For testing
assignInMyNamespace(".pkgModelCurrent", value)
}
.norm2 <- function(obj) {
if (inherits(obj, "RxODE")) {
if (exists(".linCmtM", obj)) {
return(get(".linCmtM", obj))
}
}
setNames(RxODE::rxModelVars(obj)$model["normModel"], NULL)
}
.isWritable <- function(...) {
.ret <- try(assertthat::is.writeable(...), silent = TRUE)
if (inherits(.ret, "try-error")) {
.ret <- FALSE
}
.ret
}
.rxPkgInst <- function(obj) {
.wd <- getwd()
if (regexpr(obj$package, .wd) != -1) {
.inst <- gsub(paste0("(", obj$package, ").*"), "\\1", .wd)
} else {
.inst <- system.file(package = obj$package)
}
if (.isWritable(.inst)) {
if (regexpr("inst$", .inst) != -1) {
return(.inst)
}
.inst2 <- file.path(.inst, "inst")
if (file.exists(.inst2)) {
return(.inst2)
}
.html <- file.path(.inst, "html")
if (file.exists(.html)) {
return(.inst)
}
return(.inst2)
} else {
.inst <- "~/.rxCache/"
if (.isWritable(.inst)) {
return(.inst)
}
return(rxTempDir())
}
}
.rxPkgDir <- function(obj) {
return(file.path(.rxPkgInst(obj), "rx"))
}
.rxPkgDll <- function(obj) {
obj$mdir <- .rxPkgDir(obj)
.pkgInfo <- getLoadedDLLs()[[obj$package]]
if (!all(is.null(.pkgInfo))) {
if (obj$isValid()) {
.tmp <- .pkgInfo
class(.tmp) <- "list"
return(.tmp$path)
} else {
return(file.path(obj$mdir, basename(obj$rxDll$dll)))
}
} else {
return(file.path(obj$mdir, basename(obj$rxDll$dll)))
}
}
.rxNewMvStr <- function(obj) {
gsub("[.].*", paste0("_new_", .Platform$r_arch, "_model_vars"), basename(obj$rxDll$dll))
}
.rxPkgLoaded <- function(pkg) {
.si <- sessionInfo()
return(length(intersect(pkg, c(
names(.si$otherPkgs) ## ,names(.si$loadedOnly)
))) != 0)
}
.rxUseI <- new.env(parent = emptyenv())
# 1
.rxUseI$i <- 1L
.rxUseCdir <- ""
#' Use model object in your package
#' @param obj model to save.
#' @param internal If this is run internally. By default this is FALSE
#' @inheritParams usethis::use_data
#' @return Nothing; This is used for its side effects and shouldn't be called by a user
#' @export
rxUse <- function(obj, overwrite = TRUE, compress = "bzip2",
internal = FALSE) {
rxReq("usethis")
rxReq("devtools")
internal <- internal
if (missing(obj)) {
.env <- new.env()
assign("internal", internal, .env)
assign("overwrite", overwrite, .env)
assign("compress", compress, .env)
sapply(list.files(devtools::package_file("inst/rx"), full.names = TRUE),
unlink,
force = TRUE, recursive = TRUE
)
.models <- NULL
for (.f in list.files(
path = devtools::package_file("data"),
pattern = "\\.rda$", full.names = TRUE
)) {
load(.f, envir = .env)
.f2 <- basename(.f)
.f2 <- substr(.f2, 0, nchar(.f2) - 4)
if (is(.env[[.f2]], "RxODE")) {
.env[[.f2]]$package <- NULL
.minfo(sprintf("recompile '%s'", .f2))
.models <- c(.models, .f2)
eval(parse(text = sprintf("rxUse(%s, internal=internal, overwrite=overwrite, compress=compress)", .f2)),
envir = .env
)
.docFile <- file.path(devtools::package_file("R"), paste0(.f2, "-doc.R"))
if (!file.exists(.docFile)) {
(sprintf("creating documentation '%s'", .docFile))
sink(.docFile)
.tmp <- .env[[.f2]]
.mv <- rxModelVars(.tmp)
cat(sprintf("#' %s RxODE model\n", .f2))
cat("#'\n")
cat(sprintf(
"#' @format An \\emph{RxODE} model with %s parameters, %s ODE states, and %s calc vars.\n",
length(.tmp$params), length(.tmp$state) + .mv$extraCmt, length(.tmp$lhs)
))
cat("#'\n")
cat(sprintf("#'\\emph{Parameters (%s$params)}\n", .f2))
cat("#'\n")
cat("#' \\describe{\n")
.def <- rxInits(.tmp)
.defs <- paste0(" (default=", .def, ")")
.defs[is.na(.def)] <- ""
cat(paste(paste0("#' \\item{", .tmp$params, "}{", .defs, "}\n"), collapse = ""))
cat("#'}\n")
.state <- .tmp$state
##
if (.mv$extraCmt == 2) {
.state <- c(.state, "depot", "central")
} else if (.mv$extraCmt == 1) {
.state <- c(.state, "central")
}
if (length(.state) > 0) {
cat("#'\n")
cat(sprintf("#' \\emph{State %s$state}\n", .f2))
cat("#'\n")
cat("#' \\describe{\n")
cat(paste(paste0("#' \\item{", .state, "}{ (=", seq_along(.state), ")}\n"), collapse = ""))
cat("#' }\n")
}
.lhs <- .tmp$lhs
if (length(.lhs) > 0) {
cat("#'\n")
cat(sprintf("#' \\emph{Calculated Variables %s$lhs}\n", .f2))
cat("#'\n")
cat("#' \\describe{\n")
cat(paste(paste0("#' \\item{", .lhs, "}{}\n"), collapse = ""))
cat("#' }\n")
}
cat("#'\n")
cat("#' \\emph{Model Code}\n") # sprintf(,.f2)
cat("#'\n")
.code <- deparse(body(eval(parse(text = paste("function(){", .norm2(.tmp), "}")))))
.code[1] <- "RxODE({"
.code[length(.code)] <- "})"
cat(paste(paste0("#' ", .code, "\n"), collapse = ""))
cat("#'\n")
cat(paste(paste0("#' @seealso \\code{\\link[RxODE]{eventTable}}, \\code{\\link[RxODE]{et}}, \\code{\\link[RxODE]{rxSolve}}, \\code{\\link[RxODE]{RxODE}}\n")))
cat("#' \n")
cat("#' @examples\n")
cat("#' ## Showing the model code\n")
cat(sprintf("#' summary(%s)\n", .f2))
cat("#'\n")
cat(sprintf('"%s"\n', .f2))
sink()
}
}
}
if (!dir.exists(devtools::package_file("src"))) {
dir.create(devtools::package_file("src"), recursive = TRUE)
}
.pkg <- basename(usethis::proj_get())
.rx <- loadNamespace("RxODE")
sapply(
list.files(.rxUseCdir, pattern = "[.]c", full.names = TRUE),
function(x) {
.minfo(sprintf("copy '%s'", basename(x)))
.env <- .rx$.rxUseI
.rxUseI <- .env$i
.f0 <- gsub(
"^#define (.*) _rx(.*)$",
paste0("#define \\1 _rxp", .rxUseI, "\\2"), readLines(x)
)
assign("i", .rxUseI + 1, envir = .env)
.f0 <- c("#include <RxODE.h>\n#include <RxODE_model_shared.h>", .f0)
.w <- which(.f0 == "#include \"extraC.h\"")
if (length(.w) > 0) .f0 <- .f0[-.w[1]]
writeLines(text = .f0, con = file.path(devtools::package_file("src"), basename(x)))
}
)
.inits <- paste0("R_init0_", .pkg, "_", .models)
.tmp <- paste0("{\"", .pkg, "_", .models, "_model_vars\", (DL_FUNC) &", .pkg, "_", .models, "_model_vars, 0},\\")
.tmp[length(.tmp)] <- substr(.tmp[length(.tmp)], 0, nchar(.tmp[length(.tmp)]) - 1)
.extraC <- c(
"#define compiledModelCall \\",
.tmp,
paste0("SEXP ", .pkg, "_", .models, "_model_vars();"),
paste0("void ", .inits, "();"),
paste0("void R_init0_", .pkg, "_RxODE_models(){"),
paste0(" ", .inits, "();"),
"}"
)
sink(file.path(devtools::package_file("src"), paste0(.pkg, "_compiled.h")))
if (.pkg == "RxODE") {
cat("#include <R.h>\n#include <Rinternals.h>\n#include <stdlib.h> // for NULL\n#include <R_ext/Rdynload.h>\n#include \"../inst/include/RxODE.h\"\n#include \"../inst/include/RxODE_model_shared.h\"\n")
} else {
cat("#include <R.h>\n#include <Rinternals.h>\n#include <stdlib.h> // for NULL\n#include <R_ext/Rdynload.h>\n#include <RxODE.h>\n#include <RxODE_model_shared.h>\n")
}
cat(paste(.extraC, collapse = "\n"))
cat("\n")
sink()
.files <- list.files(devtools::package_file("src"))
.files <- .files[regexpr("RxODE_model_shared", .files) == -1]
if (all(regexpr(paste0("^", .pkg), .files) != -1)) {
.minfo(sprintf("only compiled models in this package, creating '%s_init.c'", .pkg))
sink(file.path(devtools::package_file("src"), paste0(.pkg, "_init.c")))
cat("#include <R.h>\n#include <Rinternals.h>\n#include <stdlib.h> // for NULL\n#include <R_ext/Rdynload.h>\n")
cat("#include <RxODE.h>\n")
cat("#include <RxODE_model_shared.h>\n")
cat(paste0('#include "', .pkg, '_compiled.h"\n'))
cat(sprintf("void R_init_%s(DllInfo *info){\n", .pkg))
cat(sprintf(" R_init0_%s_RxODE_models();\n", .pkg))
cat(" static const R_CallMethodDef callMethods[] = {\n compiledModelCall\n {NULL, NULL, 0}\n };\n")
cat(" R_registerRoutines(info, NULL, callMethods, NULL, NULL);\n")
cat(" R_useDynamicSymbols(info,FALSE);\n")
cat("}\n")
cat(paste(paste0("void R_unload_", .pkg, "_", .models, "(DllInfo *info);\n"), collapse = ""))
cat(sprintf("void R_unload_%s(DllInfo *info){\n", .pkg))
cat(paste(paste0(" R_unload_", .pkg, "_", .models, "(info);\n"), collapse = ""))
cat("}\n")
sink()
}
if (!file.exists(devtools::package_file("R/rxUpdated.R")) && .pkg != "RxODE") {
sink(devtools::package_file("R/rxUpdated.R"))
cat(".rxUpdated <- new.env(parent=emptyenv())\n")
sink()
}
unlink(devtools::package_file("inst/rx"), recursive = TRUE, force = TRUE)
if (length(list.files(devtools::package_file("inst"))) == 0) {
unlink(devtools::package_file("inst"), recursive = TRUE, force = TRUE)
}
return(invisible(TRUE))
} else {
.modName <- as.character(substitute(obj))
.pkg <- basename(usethis::proj_get())
.env <- new.env(parent = baseenv())
assign(.modName, RxODE(.norm2(obj), package = .pkg, modName = .modName), .env)
assignInMyNamespace(".rxUseCdir", dirname(rxC(.env[[.modName]])))
assign("internal", internal, .env)
assign("overwrite", overwrite, .env)
assign("compress", compress, .env)
eval(parse(text = sprintf("usethis::use_data(%s, internal=internal, overwrite=overwrite, compress=compress)", .modName)), envir = .env)
}
}
#' Creates a package from compiled RxODE models
#'
#' @param ... Models to build a package from
#' @param package String of the package name to create
#' @param action Type of action to take after package is created
#' @param name Full name of author
#' @param license is the type of license for the package.
#' @inheritParams usethis::create_package
#' @inheritParams RxODE
#' @author Matthew Fidler
#' @return this function returns nothing and is used for its side effects
#' @export
rxPkg <- function(..., package,
wd = getwd(),
action = c("install", "build", "binary", "create"),
license = c("gpl3", "lgpl", "mit", "agpl3"),
name = "Firstname Lastname",
fields = list()) {
if (missing(package)) {
stop("'package' needs to be specified")
}
action <- match.arg(action)
license <- match.arg(license)
.owd <- getwd()
.op <- options()
on.exit({
setwd(.owd)
options(.op)
})
.dir <- wd
if (!dir.exists(.dir)) {
dir.create(.dir)
}
setwd(.dir)
options(
usethis.description = list(`Title` = "This is generated from RxODE"),
usethis.full_name = ifelse(missing(name), getOption("usethis.full_name", "Firstname Lastname"), name)
)
.dir2 <- file.path(.dir, package)
usethis::create_package(.dir2,
fields = fields,
rstudio = FALSE,
roxygen = TRUE,
check_name = TRUE,
open = FALSE
)
setwd(.dir2)
usethis::use_package("RxODE", "LinkingTo")
usethis::use_package("RxODE", "Depends")
if (license == "gpl3") {
usethis::use_gpl3_license()
} else if (license == "lgpl") {
usethis::use_lgpl_license()
} else if (license == "agpl3") {
usethis::use_agpl3_license()
} else if (license == "mit") {
usethis::use_mit_license()
}
.p <- devtools::package_file("DESCRIPTION")
writeLines(c(
readLines(.p),
"NeedsCompilation: yes",
"Biarch: true"
), .p)
## Now use rxUse for each item
.env <- new.env()
.lst <- as.list(match.call()[-1])
.w <- which(names(.lst) == "")
.lst <- .lst[.w]
for (.i in seq_along(.lst)) {
.v <- as.character(deparse(.lst[[.i]]))
assign(.v, eval(.lst[[.i]], envir = parent.frame(1)), .env)
print(.env[[.v]])
eval(parse(text = sprintf("rxUse(%s)", .v)), envir = .env)
}
## Final rxUse to generate all code
rxUse()
.p <- file.path(devtools::package_file("R"), "rxUpdated.R")
.f <- readLines(.p)
.w <- which(regexpr("@useDynLib", .f) != -1)
if (length(.w) == 0) {
.f <- c(
paste0("#' @useDynLib ", package, ", .registration=TRUE"),
"#' @import RxODE",
.f
)
writeLines(.f, .p)
}
devtools::document()
if (!file.exists("configure.win")) {
writeLines(c(
"#!/bin/sh",
"echo \"unlink('src', recursive=TRUE);RxODE::rxUse()\" > build.R",
"${R_HOME}/bin/Rscript build.R",
"rm build.R"
), "configure.win")
}
if (!file.exists("configure")) {
writeLines(c(
"#!/bin/sh",
"echo \"unlink('src', recursive=TRUE);RxODE::rxUse()\" > build.R",
"${R_HOME}/bin/Rscript build.R",
"rm build.R"
), "configure")
if (!file.exists("configure.ac")) {
writeLines(
"## dummy autoconf script",
"configure.ac"
)
}
}
if (action == "install") {
devtools::install()
} else if (action == "build") {
devtools::build()
} else if (action == "binary") {
devtools::build(binary = TRUE)
}
invisible()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/random.R
\name{rdm}
\alias{rdm}
\title{Dirichlet-multinomial random sample}
\usage{
rdm(n = NULL, size, alpha, probs = FALSE)
}
\arguments{
\item{n}{sample size}
\item{size}{vector to set the multinomial sampling size. vector is reused to have length equal parameter n}
\item{alpha}{Dirichlet-multinomial parameter}
\item{probs}{logical indicating whether multinomial probabilities should be returned}
}
\value{
Dirichlet-multinomial random sample
}
\description{
Dirichlet-multinomial random sample
}
\examples{
rdm(100, 1000, c(1,1,1))
rdm(size = c(1000, 100, 10, 2, 1), alpha = c(1,1,1))
}
| /man/rdm.Rd | no_license | mcomas/coda.count | R | false | true | 674 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/random.R
\name{rdm}
\alias{rdm}
\title{Dirichlet-multinomial random sample}
\usage{
rdm(n = NULL, size, alpha, probs = FALSE)
}
\arguments{
\item{n}{sample size}
\item{size}{vector to set the multinomial sampling size. vector is reused to have length equal parameter n}
\item{alpha}{Dirichlet-multinomial parameter}
\item{probs}{logical indicating whether multinomial probabilities should be returned}
}
\value{
Dirichlet-multinomial random sample
}
\description{
Dirichlet-multinomial random sample
}
\examples{
rdm(100, 1000, c(1,1,1))
rdm(size = c(1000, 100, 10, 2, 1), alpha = c(1,1,1))
}
|
setwd("C:/R_lecture")
getwd()
.libPaths("C:/R_lecture/Lib")
## 자연어 처리 기능을 이용해보자
#이것은 소리없는 아우성
## KoNLP package를 이용한다 o 는 소문자
# Korean Natural Language Process
# 해당패키지 않에 사전이 포함되어 있다
# 이것은 소리없는 아우성
# 3가지 사전이 포함
# 시스템사전(28만개), 세종사전(32만개), NIADIC사전(98만개)
# Java기능을 이용! 시스템에 JRE가 설치되어 있어야 함
# JRE를 설치한 위치를 R package에 알려주어야함
# JRE를 찾아서 사용하자자
# JAVA_HOME 환경변수를 설정해야함.
# 폴더 내피씨 우클릭 속성 고급시스템설정환경변수 새로만들기
# 변수이름 JAVA_HOME , 변수값 JRE 파일 주소입력, 확인
# 영문 NLP는 openNLP, Snowball 패키지 이용
install.packages("KoNLP")
library(KoNLP)
useNIADic() #사전 선택
txt <- readLines("C:/R_lecture/Data/hiphop.txt",
encoding = "UTF-8")
head(txt) # "\"보고 싶다" 두번재 큰따옴표는 파일에서의 문자
tail(txt)
#데이터가 정상적으로 들어옴
#특수문자가 포함되어 있으면 제거를 해야함!
#문자열 처리할때? stringr
library(stringr)
#정규 표현식을 이용해서 특수문자를 모두 찾아서 ""로 변환
txt <- str_replace_all(txt,"\\W"," ")
# \\W : 특수기호를 나타내는 정규표현식 대문자
#함수를 이요해서 명사만 뽑아보자
nouns <- extractNoun(txt)
head(nouns)
#명사를 추출해서 List형태로 저장
length(nouns)
#list형태를 vector로 변환
words <- unlist(nouns) #list를 vector로 변환
head(words)
length(words)
#워드클라우드를 만들기 위해 많이(빈도) 등장하는 명사만 추출
head(table(words))
wordCloud <- table(words)
df = as.data.frame(wordCloud,
stringsAsFactors = F)
View(df)
ls(df)
#빈도수가 높은 상위 20개의 단어들만 추출
#한글자 짜리는 의미가 없다고 판단 => 제거
#두글자 이상만 추출
library(dplyr)
word_df <- df %>%
filter(nchar(words) >= 2 ) %>% #nchar() :글자의 개수를 확인하는 함수
arrange(desc(Freq)) %>%
head(20)
#데이터가 준비되었으니 워드클라우드를 만들어보자
install.packages("wordcloud")
library(wordcloud)
#워드 클라우드에서 사용할 색상에 대한
#팔래트를 설정
# Dark2라는 색상목록에서 8개의 색상을 추출
pal <- brewer.pal(8,"Dark2")
#워드 클라우드는 만들때마다 랜덤하게 만들어진다.
#랜덤하게 생성되기 때문에 재현성을 확보할 수 없다.
#랜덤함수의 시드값을 고정시켜서 항상 같은 워드 클라우드가
#만들어지게 설정하자(재현성 확보하자)
set.seed(1) #시드값을 정해주는 것이 의미, 어떤숫자인지는 중요 X
wordcloud(words = word_df$words,
freq = word_df$Freq,
min.freq = 2, #적어도 2이상의 빈도를 선택
max.words = 100, #최대 입력하는 단어수
random.order = F, #고빈도 단어를 중앙배치?원하면 =>F
rot.per = 0.1, #회전시킬 단어들의 정도
scale = c(4,03), #글자 크기의 범위
colors = pal) #색상설정?
### 네이버 영화 댓글 사이트에서 특정영화에 대한 review를
### crawling 해서 wordcloud를 만들어보자
| /R_lecture/Day_13/Day13_2(wordcloud).R | no_license | won-spec/TIL | R | false | false | 3,416 | r | setwd("C:/R_lecture")
getwd()
.libPaths("C:/R_lecture/Lib")
## 자연어 처리 기능을 이용해보자
#이것은 소리없는 아우성
## KoNLP package를 이용한다 o 는 소문자
# Korean Natural Language Process
# 해당패키지 않에 사전이 포함되어 있다
# 이것은 소리없는 아우성
# 3가지 사전이 포함
# 시스템사전(28만개), 세종사전(32만개), NIADIC사전(98만개)
# Java기능을 이용! 시스템에 JRE가 설치되어 있어야 함
# JRE를 설치한 위치를 R package에 알려주어야함
# JRE를 찾아서 사용하자자
# JAVA_HOME 환경변수를 설정해야함.
# 폴더 내피씨 우클릭 속성 고급시스템설정환경변수 새로만들기
# 변수이름 JAVA_HOME , 변수값 JRE 파일 주소입력, 확인
# 영문 NLP는 openNLP, Snowball 패키지 이용
install.packages("KoNLP")
library(KoNLP)
useNIADic() #사전 선택
txt <- readLines("C:/R_lecture/Data/hiphop.txt",
encoding = "UTF-8")
head(txt) # "\"보고 싶다" 두번재 큰따옴표는 파일에서의 문자
tail(txt)
#데이터가 정상적으로 들어옴
#특수문자가 포함되어 있으면 제거를 해야함!
#문자열 처리할때? stringr
library(stringr)
#정규 표현식을 이용해서 특수문자를 모두 찾아서 ""로 변환
txt <- str_replace_all(txt,"\\W"," ")
# \\W : 특수기호를 나타내는 정규표현식 대문자
#함수를 이요해서 명사만 뽑아보자
nouns <- extractNoun(txt)
head(nouns)
#명사를 추출해서 List형태로 저장
length(nouns)
#list형태를 vector로 변환
words <- unlist(nouns) #list를 vector로 변환
head(words)
length(words)
#워드클라우드를 만들기 위해 많이(빈도) 등장하는 명사만 추출
head(table(words))
wordCloud <- table(words)
df = as.data.frame(wordCloud,
stringsAsFactors = F)
View(df)
ls(df)
#빈도수가 높은 상위 20개의 단어들만 추출
#한글자 짜리는 의미가 없다고 판단 => 제거
#두글자 이상만 추출
library(dplyr)
word_df <- df %>%
filter(nchar(words) >= 2 ) %>% #nchar() :글자의 개수를 확인하는 함수
arrange(desc(Freq)) %>%
head(20)
#데이터가 준비되었으니 워드클라우드를 만들어보자
install.packages("wordcloud")
library(wordcloud)
#워드 클라우드에서 사용할 색상에 대한
#팔래트를 설정
# Dark2라는 색상목록에서 8개의 색상을 추출
pal <- brewer.pal(8,"Dark2")
#워드 클라우드는 만들때마다 랜덤하게 만들어진다.
#랜덤하게 생성되기 때문에 재현성을 확보할 수 없다.
#랜덤함수의 시드값을 고정시켜서 항상 같은 워드 클라우드가
#만들어지게 설정하자(재현성 확보하자)
set.seed(1) #시드값을 정해주는 것이 의미, 어떤숫자인지는 중요 X
wordcloud(words = word_df$words,
freq = word_df$Freq,
min.freq = 2, #적어도 2이상의 빈도를 선택
max.words = 100, #최대 입력하는 단어수
random.order = F, #고빈도 단어를 중앙배치?원하면 =>F
rot.per = 0.1, #회전시킬 단어들의 정도
scale = c(4,03), #글자 크기의 범위
colors = pal) #색상설정?
### 네이버 영화 댓글 사이트에서 특정영화에 대한 review를
### crawling 해서 wordcloud를 만들어보자
|
library(RCurl)
library(XML)
library(stringr)
library(jsonlite)
# Get the list of major swimming events
# Olympics (Every 4-th years: from 2000, meetType 1)
# World Championships (Every odd years: meetType 2)
# European Championships (Every even years: meetType 3)
# Commonwealth Games (Every non-olympic 4-th years: 2006, 2010, 2014: meetType 5)
# Pan Pacific Championships (Every non-olympic 4-th years: 2006, 2010, 2014: meetType 7450054)
# get meet info for data generation with python and visualization of webapp
meetTypes <- c('1', '2', '3', '5', '7450054')
meetList <- list()
meetIdsAll <- c()
for (mt in meetTypes) {
# Olympics
print(mt)
html <- getURL(paste("https://www.swimrankings.net/index.php?page=meetSelect&selectPage=BYTYPE&nationId=0&meetType=", mt, sep=""))
doc <- htmlParse(html, asText=TRUE)
# check data quality
qualities <- xpathSApply(doc, "//td[@class='name']/img", xmlGetAttr, 'src')[1:10]
hasQuality <- c()
for (q in qualities) {
hasQuality <- c(hasQuality, str_detect(q, '5')) #meetQuality5.png is the indicator
}
count <- sum(hasQuality) + 1
print (count)
# Get meet ids -- roughly cut 10 events
links <- xpathSApply(doc, "//td[@class='name']/a", xmlGetAttr, 'href')[1:count]
meetIds <- c()
for (link in links) {
id <- unlist(str_split(link, "="))[3]
meetIds <- c(meetIds, id)
}
# Get meet info
meets <- xpathSApply(doc, "//table[@class='meetSearch']/tr", xmlValue)[2:count]
for (i in 1:length(meets)) {
meet <- meets[i]
year <- str_extract(meet, "(1|2)[0-9]{3}")
print (as.integer(year))
# set the year to extract data
if ((as.integer(year) >= 2007) == TRUE) {
# Append meet id to all meet ids
meetIdsAll <- c(meetIdsAll, meetIds[i])
# meetList obj
remains <- unlist(str_split(meet, "50m"))[2]
location <- str_extract(remains, "^.*\\([A-Z]*\\)")
location <- str_replace(location, "\u00a0", " ")
name <- str_trim(unlist(str_split(remains, "\\)"))[2])
print(name)
print(meetIds[i])
meetList[meetIds[i]] <- list(list(type = as.character(mt), year = year, location = location, name = name, id = meetIds[i]))
}
}
}
# connect HTML pages and parse contents, later used in python
genders <- c(1, 2)
styles <- list( '1' <- c(1, 2, 3, 5, 8, 10, 11, 13, 14, 16, 17, 18, 19, 27, 29, 40),
'2' <- c(1, 2, 3, 5, 6, 10, 11, 13, 14, 16, 17, 18, 19, 27, 29, 40))
for (meet in meetIdsAll) {
for (gender in genders) {
for (style in unlist(styles[gender])) {
#do only valid meet id
if (!is.null(meetList[[meet]])) {
print(meet)
url <- paste("https://www.swimrankings.net/index.php?page=meetDetail&meetId=",
meet,
"&gender=",
gender,
"&styleId=",
style, sep="")
html <- getURL(url)
doc <- htmlParse(html, asText=TRUE)
#save only accessible sites
if (xpathSApply(doc, "//p", xmlValue)[1] == "You need a valid Swimrankings account in order to access this site.") {
#remove meet list
meetList[meet] = NULL
print(c('not accessible', meet))
} else {
fileName <- paste("../python/R_results/html/", meet, "-", gender, "-", style, ".html", sep="")
sink(fileName)
print(doc, type='html')
sink()
print(fileName)
}
}
}
}
}
# change list to ordered array
meetListArray = list()
i = 1
for (meet in meetList) {
meet
meetListArray[i] = list(meet)
i = i + 1
}
# save as json file
write(toJSON(meetListArray), "../python/R_results/json/meets.json") | /R/swimmers.R | permissive | rogermt/swimmers-history | R | false | false | 3,726 | r | library(RCurl)
library(XML)
library(stringr)
library(jsonlite)
# Get the list of major swimming events
# Olympics (Every 4-th years: from 2000, meetType 1)
# World Championships (Every odd years: meetType 2)
# European Championships (Every even years: meetType 3)
# Commonwealth Games (Every non-olympic 4-th years: 2006, 2010, 2014: meetType 5)
# Pan Pacific Championships (Every non-olympic 4-th years: 2006, 2010, 2014: meetType 7450054)
# get meet info for data generation with python and visualization of webapp
meetTypes <- c('1', '2', '3', '5', '7450054')
meetList <- list()
meetIdsAll <- c()
for (mt in meetTypes) {
# Olympics
print(mt)
html <- getURL(paste("https://www.swimrankings.net/index.php?page=meetSelect&selectPage=BYTYPE&nationId=0&meetType=", mt, sep=""))
doc <- htmlParse(html, asText=TRUE)
# check data quality
qualities <- xpathSApply(doc, "//td[@class='name']/img", xmlGetAttr, 'src')[1:10]
hasQuality <- c()
for (q in qualities) {
hasQuality <- c(hasQuality, str_detect(q, '5')) #meetQuality5.png is the indicator
}
count <- sum(hasQuality) + 1
print (count)
# Get meet ids -- roughly cut 10 events
links <- xpathSApply(doc, "//td[@class='name']/a", xmlGetAttr, 'href')[1:count]
meetIds <- c()
for (link in links) {
id <- unlist(str_split(link, "="))[3]
meetIds <- c(meetIds, id)
}
# Get meet info
meets <- xpathSApply(doc, "//table[@class='meetSearch']/tr", xmlValue)[2:count]
for (i in 1:length(meets)) {
meet <- meets[i]
year <- str_extract(meet, "(1|2)[0-9]{3}")
print (as.integer(year))
# set the year to extract data
if ((as.integer(year) >= 2007) == TRUE) {
# Append meet id to all meet ids
meetIdsAll <- c(meetIdsAll, meetIds[i])
# meetList obj
remains <- unlist(str_split(meet, "50m"))[2]
location <- str_extract(remains, "^.*\\([A-Z]*\\)")
location <- str_replace(location, "\u00a0", " ")
name <- str_trim(unlist(str_split(remains, "\\)"))[2])
print(name)
print(meetIds[i])
meetList[meetIds[i]] <- list(list(type = as.character(mt), year = year, location = location, name = name, id = meetIds[i]))
}
}
}
# connect HTML pages and parse contents, later used in python
genders <- c(1, 2)
styles <- list( '1' <- c(1, 2, 3, 5, 8, 10, 11, 13, 14, 16, 17, 18, 19, 27, 29, 40),
'2' <- c(1, 2, 3, 5, 6, 10, 11, 13, 14, 16, 17, 18, 19, 27, 29, 40))
for (meet in meetIdsAll) {
for (gender in genders) {
for (style in unlist(styles[gender])) {
#do only valid meet id
if (!is.null(meetList[[meet]])) {
print(meet)
url <- paste("https://www.swimrankings.net/index.php?page=meetDetail&meetId=",
meet,
"&gender=",
gender,
"&styleId=",
style, sep="")
html <- getURL(url)
doc <- htmlParse(html, asText=TRUE)
#save only accessible sites
if (xpathSApply(doc, "//p", xmlValue)[1] == "You need a valid Swimrankings account in order to access this site.") {
#remove meet list
meetList[meet] = NULL
print(c('not accessible', meet))
} else {
fileName <- paste("../python/R_results/html/", meet, "-", gender, "-", style, ".html", sep="")
sink(fileName)
print(doc, type='html')
sink()
print(fileName)
}
}
}
}
}
# change list to ordered array
meetListArray = list()
i = 1
for (meet in meetList) {
meet
meetListArray[i] = list(meet)
i = i + 1
}
# save as json file
write(toJSON(meetListArray), "../python/R_results/json/meets.json") |
library(stringdist)
### Name: seq_sim
### Title: Compute similarity scores between sequences of integers
### Aliases: seq_sim
### ** Examples
L1 <- list(1:3,2:4)
L2 <- list(1:3)
seq_sim(L1,L2,method="osa")
# note how missing values are handled (L2 is recycled over L1)
L1 <- list(c(1L,NA_integer_,3L),2:4,NA_integer_)
L2 <- list(1:3)
seq_sim(L1,L2)
| /data/genthat_extracted_code/stringdist/examples/seq_sim.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 358 | r | library(stringdist)
### Name: seq_sim
### Title: Compute similarity scores between sequences of integers
### Aliases: seq_sim
### ** Examples
L1 <- list(1:3,2:4)
L2 <- list(1:3)
seq_sim(L1,L2,method="osa")
# note how missing values are handled (L2 is recycled over L1)
L1 <- list(c(1L,NA_integer_,3L),2:4,NA_integer_)
L2 <- list(1:3)
seq_sim(L1,L2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/venn_functions.R
\name{ellipse}
\alias{ellipse}
\title{A Helper Function Used by Venn4 to Define the Perimeter of an Ellipse}
\usage{
ellipse(x, y, a, b, alpha)
}
\arguments{
\item{x}{the x coordinate of the center of the ellipse.}
\item{y}{the y coordinate of the center of the ellipse.}
\item{a}{the x-direction radius.}
\item{b}{the y-direction radius.}
\item{alpha}{the angle of rotation of the ellipse}
}
\value{
points that define the perimeter of an ellipse.
}
\description{
Draws the ellipses used in venn4.
}
\examples{
plot(dga:::ellipse(0, 0, .5, .2, 1))
}
\author{
Kristian Lum \email{kl@hrdag.org}
}
\keyword{ellipse}
| /man/ellipse.Rd | no_license | HRDAG/DGA | R | false | true | 715 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/venn_functions.R
\name{ellipse}
\alias{ellipse}
\title{A Helper Function Used by Venn4 to Define the Perimeter of an Ellipse}
\usage{
ellipse(x, y, a, b, alpha)
}
\arguments{
\item{x}{the x coordinate of the center of the ellipse.}
\item{y}{the y coordinate of the center of the ellipse.}
\item{a}{the x-direction radius.}
\item{b}{the y-direction radius.}
\item{alpha}{the angle of rotation of the ellipse}
}
\value{
points that define the perimeter of an ellipse.
}
\description{
Draws the ellipses used in venn4.
}
\examples{
plot(dga:::ellipse(0, 0, .5, .2, 1))
}
\author{
Kristian Lum \email{kl@hrdag.org}
}
\keyword{ellipse}
|
library(randomGLM)
### Name: brainCancer
### Title: The brain cancer data set
### Aliases: brainCancer
### ** Examples
data(brainCancer)
| /data/genthat_extracted_code/randomGLM/examples/brainCancer.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 144 | r | library(randomGLM)
### Name: brainCancer
### Title: The brain cancer data set
### Aliases: brainCancer
### ** Examples
data(brainCancer)
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# context.R: SparkContext driven functions
getMinPartitions <- function(sc, minPartitions) {
if (is.null(minPartitions)) {
defaultParallelism <- callJMethod(sc, "defaultParallelism")
minPartitions <- min(defaultParallelism, 2)
}
as.integer(minPartitions)
}
#' Create an RDD from a text file.
#'
#' This function reads a text file from HDFS, a local file system (available on all
#' nodes), or any Hadoop-supported file system URI, and creates an
#' RDD of strings from it. The text files must be encoded as UTF-8.
#'
#' @param sc SparkContext to use
#' @param path Path of file to read. A vector of multiple paths is allowed.
#' @param minPartitions Minimum number of partitions to be created. If NULL, the default
#' value is chosen based on available parallelism.
#' @return RDD where each item is of type \code{character}
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' lines <- textFile(sc, "myfile.txt")
#'}
textFile <- function(sc, path, minPartitions = NULL) {
# Allow the user to have a more flexible definition of the text file path
path <- suppressWarnings(normalizePath(path))
# Convert a string vector of paths to a string containing comma separated paths
path <- paste(path, collapse = ",")
jrdd <- callJMethod(sc, "textFile", path, getMinPartitions(sc, minPartitions))
# jrdd is of type JavaRDD[String]
RDD(jrdd, "string")
}
#' Load an RDD saved as a SequenceFile containing serialized objects.
#'
#' The file to be loaded should be one that was previously generated by calling
#' saveAsObjectFile() of the RDD class.
#'
#' @param sc SparkContext to use
#' @param path Path of file to read. A vector of multiple paths is allowed.
#' @param minPartitions Minimum number of partitions to be created. If NULL, the default
#' value is chosen based on available parallelism.
#' @return RDD containing serialized R objects.
#' @seealso saveAsObjectFile
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- objectFile(sc, "myfile")
#'}
objectFile <- function(sc, path, minPartitions = NULL) {
# Allow the user to have a more flexible definition of the text file path
path <- suppressWarnings(normalizePath(path))
# Convert a string vector of paths to a string containing comma separated paths
path <- paste(path, collapse = ",")
jrdd <- callJMethod(sc, "objectFile", path, getMinPartitions(sc, minPartitions))
# Assume the RDD contains serialized R objects.
RDD(jrdd, "byte")
}
makeSplits <- function(numSerializedSlices, length) {
# Generate the slice ids to put each row
# For instance, for numSerializedSlices of 22, length of 50
# [1] 0 0 2 2 4 4 6 6 6 9 9 11 11 13 13 15 15 15 18 18 20 20 22 22 22
# [26] 25 25 27 27 29 29 31 31 31 34 34 36 36 38 38 40 40 40 43 43 45 45 47 47 47
# Notice the slice group with 3 slices (ie. 6, 15, 22) are roughly evenly spaced.
# We are trying to reimplement the calculation in the positions method in ParallelCollectionRDD
if (numSerializedSlices > 0) {
unlist(lapply(0: (numSerializedSlices - 1), function(x) {
# nolint start
start <- trunc((as.numeric(x) * length) / numSerializedSlices)
end <- trunc(((as.numeric(x) + 1) * length) / numSerializedSlices)
# nolint end
rep(start, end - start)
}))
} else {
1
}
}
#' Create an RDD from a homogeneous list or vector.
#'
#' This function creates an RDD from a local homogeneous list in R. The elements
#' in the list are split into \code{numSlices} slices and distributed to nodes
#' in the cluster.
#'
#' If size of serialized slices is larger than spark.r.maxAllocationLimit or (200MiB), the function
#' will write it to disk and send the file name to JVM. Also to make sure each slice is not
#' larger than that limit, number of slices may be increased.
#'
#' In 2.2.0 we are changing how the numSlices are used/computed to handle
#' 1 < (length(coll) / numSlices) << length(coll) better, and to get the exact number of slices.
#' This change affects both createDataFrame and spark.lapply.
#' In the specific one case that it is used to convert R native object into SparkDataFrame, it has
#' always been kept at the default of 1. In the case the object is large, we are explicitly setting
#' the parallism to numSlices (which is still 1).
#'
#' Specifically, we are changing to split positions to match the calculation in positions() of
#' ParallelCollectionRDD in Spark.
#'
#' @param sc SparkContext to use
#' @param coll collection to parallelize
#' @param numSlices number of partitions to create in the RDD
#' @return an RDD created from this collection
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10, 2)
#' # The RDD should contain 10 elements
#' length(rdd)
#'}
parallelize <- function(sc, coll, numSlices = 1) {
# TODO: bound/safeguard numSlices
# TODO: unit tests for if the split works for all primitives
# TODO: support matrix, data frame, etc
# Note, for data.frame, createDataFrame turns it into a list before it calls here.
# nolint start
# suppress lintr warning: Place a space before left parenthesis, except in a function call.
if ((!is.list(coll) && !is.vector(coll)) || is.data.frame(coll)) {
# nolint end
if (is.data.frame(coll)) {
message(paste("context.R: A data frame is parallelized by columns."))
} else {
if (is.matrix(coll)) {
message(paste("context.R: A matrix is parallelized by elements."))
} else {
message(paste("context.R: parallelize() currently only supports lists and vectors.",
"Calling as.list() to coerce coll into a list."))
}
}
coll <- as.list(coll)
}
sizeLimit <- getMaxAllocationLimit(sc)
objectSize <- object.size(coll)
len <- length(coll)
# For large objects we make sure the size of each slice is also smaller than sizeLimit
numSerializedSlices <- min(len, max(numSlices, ceiling(objectSize / sizeLimit)))
slices <- split(coll, makeSplits(numSerializedSlices, len))
# Serialize each slice: obtain a list of raws, or a list of lists (slices) of
# 2-tuples of raws
serializedSlices <- lapply(slices, serialize, connection = NULL)
# The RPC backend cannot handle arguments larger than 2GB (INT_MAX)
# If serialized data is safely less than that threshold we send it over the PRC channel.
# Otherwise, we write it to a file and send the file name
if (objectSize < sizeLimit) {
jrdd <- callJStatic("org.apache.spark.api.r.RRDD", "createRDDFromArray", sc, serializedSlices)
} else {
if (callJStatic("org.apache.spark.api.r.RUtils", "isEncryptionEnabled", sc)) {
connectionTimeout <- as.numeric(Sys.getenv("SPARKR_BACKEND_CONNECTION_TIMEOUT", "6000"))
# the length of slices here is the parallelism to use in the jvm's sc.parallelize()
parallelism <- as.integer(numSlices)
jserver <- newJObject("org.apache.spark.api.r.RParallelizeServer", sc, parallelism)
authSecret <- callJMethod(jserver, "secret")
port <- callJMethod(jserver, "port")
conn <- socketConnection(
port = port, blocking = TRUE, open = "wb", timeout = connectionTimeout)
doServerAuth(conn, authSecret)
writeToConnection(serializedSlices, conn)
jrdd <- callJMethod(jserver, "getResult")
} else {
fileName <- writeToTempFile(serializedSlices)
jrdd <- tryCatch(callJStatic(
"org.apache.spark.api.r.RRDD", "createRDDFromFile", sc, fileName, as.integer(numSlices)),
finally = {
file.remove(fileName)
})
}
}
RDD(jrdd, "byte")
}
getMaxAllocationLimit <- function(sc) {
conf <- callJMethod(sc, "getConf")
as.numeric(
callJMethod(conf,
"get",
"spark.r.maxAllocationLimit",
toString(.Machine$integer.max / 10) # Default to a safe value: 200MB
))
}
writeToConnection <- function(serializedSlices, conn) {
tryCatch({
for (slice in serializedSlices) {
writeBin(as.integer(length(slice)), conn, endian = "big")
writeBin(slice, conn, endian = "big")
}
}, finally = {
close(conn)
})
}
writeToTempFile <- function(serializedSlices) {
fileName <- tempfile()
conn <- file(fileName, "wb")
writeToConnection(serializedSlices, conn)
fileName
}
#' Include this specified package on all workers
#'
#' This function can be used to include a package on all workers before the
#' user's code is executed. This is useful in scenarios where other R package
#' functions are used in a function passed to functions like \code{lapply}.
#' NOTE: The package is assumed to be installed on every node in the Spark
#' cluster.
#'
#' @param sc SparkContext to use
#' @param pkg Package name
#' @noRd
#' @examples
#'\dontrun{
#' library(Matrix)
#'
#' sc <- sparkR.init()
#' # Include the matrix library we will be using
#' includePackage(sc, Matrix)
#'
#' generateSparse <- function(x) {
#' sparseMatrix(i=c(1, 2, 3), j=c(1, 2, 3), x=c(1, 2, 3))
#' }
#'
#' rdd <- lapplyPartition(parallelize(sc, 1:2, 2L), generateSparse)
#' collect(rdd)
#'}
includePackage <- function(sc, pkg) {
pkg <- as.character(substitute(pkg))
if (exists(".packages", .sparkREnv)) {
packages <- .sparkREnv$.packages
} else {
packages <- list()
}
packages <- c(packages, pkg)
.sparkREnv$.packages <- packages
}
#' Broadcast a variable to all workers
#'
#' Broadcast a read-only variable to the cluster, returning a \code{Broadcast}
#' object for reading it in distributed functions.
#'
#' @param sc Spark Context to use
#' @param object Object to be broadcast
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:2, 2L)
#'
#' # Large Matrix object that we want to broadcast
#' randomMat <- matrix(nrow=100, ncol=10, data=rnorm(1000))
#' randomMatBr <- broadcastRDD(sc, randomMat)
#'
#' # Use the broadcast variable inside the function
#' useBroadcast <- function(x) {
#' sum(value(randomMatBr) * x)
#' }
#' sumRDD <- lapply(rdd, useBroadcast)
#'}
broadcastRDD <- function(sc, object) {
objName <- as.character(substitute(object))
serializedObj <- serialize(object, connection = NULL)
jBroadcast <- callJMethod(sc, "broadcast", serializedObj)
id <- as.character(callJMethod(jBroadcast, "id"))
Broadcast(id, object, jBroadcast, objName)
}
#' Set the checkpoint directory
#'
#' Set the directory under which RDDs are going to be checkpointed. The
#' directory must be a HDFS path if running on a cluster.
#'
#' @param sc Spark Context to use
#' @param dirName Directory path
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' setCheckpointDir(sc, "~/checkpoint")
#' rdd <- parallelize(sc, 1:2, 2L)
#' checkpoint(rdd)
#'}
setCheckpointDirSC <- function(sc, dirName) {
invisible(callJMethod(sc, "setCheckpointDir", suppressWarnings(normalizePath(dirName))))
}
#' Add a file or directory to be downloaded with this Spark job on every node.
#'
#' The path passed can be either a local file, a file in HDFS (or other Hadoop-supported
#' filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
#' use spark.getSparkFiles(fileName) to find its download location.
#'
#' A directory can be given if the recursive option is set to true.
#' Currently directories are only supported for Hadoop-supported filesystems.
#' Refer Hadoop-supported filesystems at
#' \url{https://cwiki.apache.org/confluence/display/HADOOP2/HCFS}.
#'
#' Note: A path can be added only once. Subsequent additions of the same path are ignored.
#'
#' @rdname spark.addFile
#' @param path The path of the file to be added
#' @param recursive Whether to add files recursively from the path. Default is FALSE.
#' @examples
#'\dontrun{
#' spark.addFile("~/myfile")
#'}
#' @note spark.addFile since 2.1.0
spark.addFile <- function(path, recursive = FALSE) {
sc <- getSparkContext()
invisible(callJMethod(sc, "addFile", suppressWarnings(normalizePath(path)), recursive))
}
#' Get the root directory that contains files added through spark.addFile.
#'
#' @rdname spark.getSparkFilesRootDirectory
#' @return the root directory that contains files added through spark.addFile
#' @examples
#'\dontrun{
#' spark.getSparkFilesRootDirectory()
#'}
#' @note spark.getSparkFilesRootDirectory since 2.1.0
spark.getSparkFilesRootDirectory <- function() { # nolint
if (Sys.getenv("SPARKR_IS_RUNNING_ON_WORKER") == "") {
# Running on driver.
callJStatic("org.apache.spark.SparkFiles", "getRootDirectory")
} else {
# Running on worker.
Sys.getenv("SPARKR_SPARKFILES_ROOT_DIR")
}
}
#' Get the absolute path of a file added through spark.addFile.
#'
#' @rdname spark.getSparkFiles
#' @param fileName The name of the file added through spark.addFile
#' @return the absolute path of a file added through spark.addFile.
#' @examples
#'\dontrun{
#' spark.getSparkFiles("myfile")
#'}
#' @note spark.getSparkFiles since 2.1.0
spark.getSparkFiles <- function(fileName) {
if (Sys.getenv("SPARKR_IS_RUNNING_ON_WORKER") == "") {
# Running on driver.
callJStatic("org.apache.spark.SparkFiles", "get", as.character(fileName))
} else {
# Running on worker.
file.path(spark.getSparkFilesRootDirectory(), as.character(fileName))
}
}
#' Run a function over a list of elements, distributing the computations with Spark
#'
#' Run a function over a list of elements, distributing the computations with Spark. Applies a
#' function in a manner that is similar to doParallel or lapply to elements of a list.
#' The computations are distributed using Spark. It is conceptually the same as the following code:
#' lapply(list, func)
#'
#' Known limitations:
#' \itemize{
#' \item variable scoping and capture: compared to R's rich support for variable resolutions,
#' the distributed nature of SparkR limits how variables are resolved at runtime. All the
#' variables that are available through lexical scoping are embedded in the closure of the
#' function and available as read-only variables within the function. The environment variables
#' should be stored into temporary variables outside the function, and not directly accessed
#' within the function.
#'
#' \item loading external packages: In order to use a package, you need to load it inside the
#' closure. For example, if you rely on the MASS module, here is how you would use it:
#' \preformatted{
#' train <- function(hyperparam) {
#' library(MASS)
#' lm.ridge("y ~ x+z", data, lambda=hyperparam)
#' model
#' }
#' }
#' }
#'
#' @rdname spark.lapply
#' @param list the list of elements
#' @param func a function that takes one argument.
#' @return a list of results (the exact type being determined by the function)
#' @examples
#'\dontrun{
#' sparkR.session()
#' doubled <- spark.lapply(1:10, function(x){2 * x})
#'}
#' @note spark.lapply since 2.0.0
spark.lapply <- function(list, func) {
sc <- getSparkContext()
rdd <- parallelize(sc, list, length(list))
results <- map(rdd, func)
local <- collectRDD(results)
local
}
#' Set new log level
#'
#' Set new log level: "ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN"
#'
#' @rdname setLogLevel
#' @param level New log level
#' @examples
#'\dontrun{
#' setLogLevel("ERROR")
#'}
#' @note setLogLevel since 2.0.0
setLogLevel <- function(level) {
sc <- getSparkContext()
invisible(callJMethod(sc, "setLogLevel", level))
}
#' Set checkpoint directory
#'
#' Set the directory under which SparkDataFrame are going to be checkpointed. The directory must be
#' a HDFS path if running on a cluster.
#'
#' @rdname setCheckpointDir
#' @param directory Directory path to checkpoint to
#' @seealso \link{checkpoint}
#' @examples
#'\dontrun{
#' setCheckpointDir("/checkpoint")
#'}
#' @note setCheckpointDir since 2.2.0
setCheckpointDir <- function(directory) {
sc <- getSparkContext()
invisible(callJMethod(sc, "setCheckpointDir", suppressWarnings(normalizePath(directory))))
}
| /R/pkg/R/context.R | permissive | jeff303/spark | R | false | false | 16,688 | r | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# context.R: SparkContext driven functions
getMinPartitions <- function(sc, minPartitions) {
if (is.null(minPartitions)) {
defaultParallelism <- callJMethod(sc, "defaultParallelism")
minPartitions <- min(defaultParallelism, 2)
}
as.integer(minPartitions)
}
#' Create an RDD from a text file.
#'
#' This function reads a text file from HDFS, a local file system (available on all
#' nodes), or any Hadoop-supported file system URI, and creates an
#' RDD of strings from it. The text files must be encoded as UTF-8.
#'
#' @param sc SparkContext to use
#' @param path Path of file to read. A vector of multiple paths is allowed.
#' @param minPartitions Minimum number of partitions to be created. If NULL, the default
#' value is chosen based on available parallelism.
#' @return RDD where each item is of type \code{character}
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' lines <- textFile(sc, "myfile.txt")
#'}
textFile <- function(sc, path, minPartitions = NULL) {
# Allow the user to have a more flexible definition of the text file path
path <- suppressWarnings(normalizePath(path))
# Convert a string vector of paths to a string containing comma separated paths
path <- paste(path, collapse = ",")
jrdd <- callJMethod(sc, "textFile", path, getMinPartitions(sc, minPartitions))
# jrdd is of type JavaRDD[String]
RDD(jrdd, "string")
}
#' Load an RDD saved as a SequenceFile containing serialized objects.
#'
#' The file to be loaded should be one that was previously generated by calling
#' saveAsObjectFile() of the RDD class.
#'
#' @param sc SparkContext to use
#' @param path Path of file to read. A vector of multiple paths is allowed.
#' @param minPartitions Minimum number of partitions to be created. If NULL, the default
#' value is chosen based on available parallelism.
#' @return RDD containing serialized R objects.
#' @seealso saveAsObjectFile
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- objectFile(sc, "myfile")
#'}
objectFile <- function(sc, path, minPartitions = NULL) {
# Allow the user to have a more flexible definition of the text file path
path <- suppressWarnings(normalizePath(path))
# Convert a string vector of paths to a string containing comma separated paths
path <- paste(path, collapse = ",")
jrdd <- callJMethod(sc, "objectFile", path, getMinPartitions(sc, minPartitions))
# Assume the RDD contains serialized R objects.
RDD(jrdd, "byte")
}
makeSplits <- function(numSerializedSlices, length) {
# Generate the slice ids to put each row
# For instance, for numSerializedSlices of 22, length of 50
# [1] 0 0 2 2 4 4 6 6 6 9 9 11 11 13 13 15 15 15 18 18 20 20 22 22 22
# [26] 25 25 27 27 29 29 31 31 31 34 34 36 36 38 38 40 40 40 43 43 45 45 47 47 47
# Notice the slice group with 3 slices (ie. 6, 15, 22) are roughly evenly spaced.
# We are trying to reimplement the calculation in the positions method in ParallelCollectionRDD
if (numSerializedSlices > 0) {
unlist(lapply(0: (numSerializedSlices - 1), function(x) {
# nolint start
start <- trunc((as.numeric(x) * length) / numSerializedSlices)
end <- trunc(((as.numeric(x) + 1) * length) / numSerializedSlices)
# nolint end
rep(start, end - start)
}))
} else {
1
}
}
#' Create an RDD from a homogeneous list or vector.
#'
#' This function creates an RDD from a local homogeneous list in R. The elements
#' in the list are split into \code{numSlices} slices and distributed to nodes
#' in the cluster.
#'
#' If size of serialized slices is larger than spark.r.maxAllocationLimit or (200MiB), the function
#' will write it to disk and send the file name to JVM. Also to make sure each slice is not
#' larger than that limit, number of slices may be increased.
#'
#' In 2.2.0 we are changing how the numSlices are used/computed to handle
#' 1 < (length(coll) / numSlices) << length(coll) better, and to get the exact number of slices.
#' This change affects both createDataFrame and spark.lapply.
#' In the specific one case that it is used to convert R native object into SparkDataFrame, it has
#' always been kept at the default of 1. In the case the object is large, we are explicitly setting
#' the parallism to numSlices (which is still 1).
#'
#' Specifically, we are changing to split positions to match the calculation in positions() of
#' ParallelCollectionRDD in Spark.
#'
#' @param sc SparkContext to use
#' @param coll collection to parallelize
#' @param numSlices number of partitions to create in the RDD
#' @return an RDD created from this collection
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10, 2)
#' # The RDD should contain 10 elements
#' length(rdd)
#'}
parallelize <- function(sc, coll, numSlices = 1) {
# TODO: bound/safeguard numSlices
# TODO: unit tests for if the split works for all primitives
# TODO: support matrix, data frame, etc
# Note, for data.frame, createDataFrame turns it into a list before it calls here.
# nolint start
# suppress lintr warning: Place a space before left parenthesis, except in a function call.
if ((!is.list(coll) && !is.vector(coll)) || is.data.frame(coll)) {
# nolint end
if (is.data.frame(coll)) {
message(paste("context.R: A data frame is parallelized by columns."))
} else {
if (is.matrix(coll)) {
message(paste("context.R: A matrix is parallelized by elements."))
} else {
message(paste("context.R: parallelize() currently only supports lists and vectors.",
"Calling as.list() to coerce coll into a list."))
}
}
coll <- as.list(coll)
}
sizeLimit <- getMaxAllocationLimit(sc)
objectSize <- object.size(coll)
len <- length(coll)
# For large objects we make sure the size of each slice is also smaller than sizeLimit
numSerializedSlices <- min(len, max(numSlices, ceiling(objectSize / sizeLimit)))
slices <- split(coll, makeSplits(numSerializedSlices, len))
# Serialize each slice: obtain a list of raws, or a list of lists (slices) of
# 2-tuples of raws
serializedSlices <- lapply(slices, serialize, connection = NULL)
# The RPC backend cannot handle arguments larger than 2GB (INT_MAX)
# If serialized data is safely less than that threshold we send it over the PRC channel.
# Otherwise, we write it to a file and send the file name
if (objectSize < sizeLimit) {
jrdd <- callJStatic("org.apache.spark.api.r.RRDD", "createRDDFromArray", sc, serializedSlices)
} else {
if (callJStatic("org.apache.spark.api.r.RUtils", "isEncryptionEnabled", sc)) {
connectionTimeout <- as.numeric(Sys.getenv("SPARKR_BACKEND_CONNECTION_TIMEOUT", "6000"))
# the length of slices here is the parallelism to use in the jvm's sc.parallelize()
parallelism <- as.integer(numSlices)
jserver <- newJObject("org.apache.spark.api.r.RParallelizeServer", sc, parallelism)
authSecret <- callJMethod(jserver, "secret")
port <- callJMethod(jserver, "port")
conn <- socketConnection(
port = port, blocking = TRUE, open = "wb", timeout = connectionTimeout)
doServerAuth(conn, authSecret)
writeToConnection(serializedSlices, conn)
jrdd <- callJMethod(jserver, "getResult")
} else {
fileName <- writeToTempFile(serializedSlices)
jrdd <- tryCatch(callJStatic(
"org.apache.spark.api.r.RRDD", "createRDDFromFile", sc, fileName, as.integer(numSlices)),
finally = {
file.remove(fileName)
})
}
}
RDD(jrdd, "byte")
}
getMaxAllocationLimit <- function(sc) {
conf <- callJMethod(sc, "getConf")
as.numeric(
callJMethod(conf,
"get",
"spark.r.maxAllocationLimit",
toString(.Machine$integer.max / 10) # Default to a safe value: 200MB
))
}
writeToConnection <- function(serializedSlices, conn) {
tryCatch({
for (slice in serializedSlices) {
writeBin(as.integer(length(slice)), conn, endian = "big")
writeBin(slice, conn, endian = "big")
}
}, finally = {
close(conn)
})
}
writeToTempFile <- function(serializedSlices) {
fileName <- tempfile()
conn <- file(fileName, "wb")
writeToConnection(serializedSlices, conn)
fileName
}
#' Include this specified package on all workers
#'
#' This function can be used to include a package on all workers before the
#' user's code is executed. This is useful in scenarios where other R package
#' functions are used in a function passed to functions like \code{lapply}.
#' NOTE: The package is assumed to be installed on every node in the Spark
#' cluster.
#'
#' @param sc SparkContext to use
#' @param pkg Package name
#' @noRd
#' @examples
#'\dontrun{
#' library(Matrix)
#'
#' sc <- sparkR.init()
#' # Include the matrix library we will be using
#' includePackage(sc, Matrix)
#'
#' generateSparse <- function(x) {
#' sparseMatrix(i=c(1, 2, 3), j=c(1, 2, 3), x=c(1, 2, 3))
#' }
#'
#' rdd <- lapplyPartition(parallelize(sc, 1:2, 2L), generateSparse)
#' collect(rdd)
#'}
includePackage <- function(sc, pkg) {
pkg <- as.character(substitute(pkg))
if (exists(".packages", .sparkREnv)) {
packages <- .sparkREnv$.packages
} else {
packages <- list()
}
packages <- c(packages, pkg)
.sparkREnv$.packages <- packages
}
#' Broadcast a variable to all workers
#'
#' Broadcast a read-only variable to the cluster, returning a \code{Broadcast}
#' object for reading it in distributed functions.
#'
#' @param sc Spark Context to use
#' @param object Object to be broadcast
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:2, 2L)
#'
#' # Large Matrix object that we want to broadcast
#' randomMat <- matrix(nrow=100, ncol=10, data=rnorm(1000))
#' randomMatBr <- broadcastRDD(sc, randomMat)
#'
#' # Use the broadcast variable inside the function
#' useBroadcast <- function(x) {
#' sum(value(randomMatBr) * x)
#' }
#' sumRDD <- lapply(rdd, useBroadcast)
#'}
broadcastRDD <- function(sc, object) {
objName <- as.character(substitute(object))
serializedObj <- serialize(object, connection = NULL)
jBroadcast <- callJMethod(sc, "broadcast", serializedObj)
id <- as.character(callJMethod(jBroadcast, "id"))
Broadcast(id, object, jBroadcast, objName)
}
#' Set the checkpoint directory
#'
#' Set the directory under which RDDs are going to be checkpointed. The
#' directory must be a HDFS path if running on a cluster.
#'
#' @param sc Spark Context to use
#' @param dirName Directory path
#' @noRd
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' setCheckpointDir(sc, "~/checkpoint")
#' rdd <- parallelize(sc, 1:2, 2L)
#' checkpoint(rdd)
#'}
setCheckpointDirSC <- function(sc, dirName) {
invisible(callJMethod(sc, "setCheckpointDir", suppressWarnings(normalizePath(dirName))))
}
#' Add a file or directory to be downloaded with this Spark job on every node.
#'
#' The path passed can be either a local file, a file in HDFS (or other Hadoop-supported
#' filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
#' use spark.getSparkFiles(fileName) to find its download location.
#'
#' A directory can be given if the recursive option is set to true.
#' Currently directories are only supported for Hadoop-supported filesystems.
#' Refer Hadoop-supported filesystems at
#' \url{https://cwiki.apache.org/confluence/display/HADOOP2/HCFS}.
#'
#' Note: A path can be added only once. Subsequent additions of the same path are ignored.
#'
#' @rdname spark.addFile
#' @param path The path of the file to be added
#' @param recursive Whether to add files recursively from the path. Default is FALSE.
#' @examples
#'\dontrun{
#' spark.addFile("~/myfile")
#'}
#' @note spark.addFile since 2.1.0
spark.addFile <- function(path, recursive = FALSE) {
sc <- getSparkContext()
invisible(callJMethod(sc, "addFile", suppressWarnings(normalizePath(path)), recursive))
}
#' Get the root directory that contains files added through spark.addFile.
#'
#' @rdname spark.getSparkFilesRootDirectory
#' @return the root directory that contains files added through spark.addFile
#' @examples
#'\dontrun{
#' spark.getSparkFilesRootDirectory()
#'}
#' @note spark.getSparkFilesRootDirectory since 2.1.0
spark.getSparkFilesRootDirectory <- function() { # nolint
if (Sys.getenv("SPARKR_IS_RUNNING_ON_WORKER") == "") {
# Running on driver.
callJStatic("org.apache.spark.SparkFiles", "getRootDirectory")
} else {
# Running on worker.
Sys.getenv("SPARKR_SPARKFILES_ROOT_DIR")
}
}
#' Get the absolute path of a file added through spark.addFile.
#'
#' @rdname spark.getSparkFiles
#' @param fileName The name of the file added through spark.addFile
#' @return the absolute path of a file added through spark.addFile.
#' @examples
#'\dontrun{
#' spark.getSparkFiles("myfile")
#'}
#' @note spark.getSparkFiles since 2.1.0
spark.getSparkFiles <- function(fileName) {
if (Sys.getenv("SPARKR_IS_RUNNING_ON_WORKER") == "") {
# Running on driver.
callJStatic("org.apache.spark.SparkFiles", "get", as.character(fileName))
} else {
# Running on worker.
file.path(spark.getSparkFilesRootDirectory(), as.character(fileName))
}
}
#' Run a function over a list of elements, distributing the computations with Spark
#'
#' Run a function over a list of elements, distributing the computations with Spark. Applies a
#' function in a manner that is similar to doParallel or lapply to elements of a list.
#' The computations are distributed using Spark. It is conceptually the same as the following code:
#' lapply(list, func)
#'
#' Known limitations:
#' \itemize{
#' \item variable scoping and capture: compared to R's rich support for variable resolutions,
#' the distributed nature of SparkR limits how variables are resolved at runtime. All the
#' variables that are available through lexical scoping are embedded in the closure of the
#' function and available as read-only variables within the function. The environment variables
#' should be stored into temporary variables outside the function, and not directly accessed
#' within the function.
#'
#' \item loading external packages: In order to use a package, you need to load it inside the
#' closure. For example, if you rely on the MASS module, here is how you would use it:
#' \preformatted{
#' train <- function(hyperparam) {
#' library(MASS)
#' lm.ridge("y ~ x+z", data, lambda=hyperparam)
#' model
#' }
#' }
#' }
#'
#' @rdname spark.lapply
#' @param list the list of elements
#' @param func a function that takes one argument.
#' @return a list of results (the exact type being determined by the function)
#' @examples
#'\dontrun{
#' sparkR.session()
#' doubled <- spark.lapply(1:10, function(x){2 * x})
#'}
#' @note spark.lapply since 2.0.0
spark.lapply <- function(list, func) {
sc <- getSparkContext()
rdd <- parallelize(sc, list, length(list))
results <- map(rdd, func)
local <- collectRDD(results)
local
}
#' Set new log level
#'
#' Set new log level: "ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN"
#'
#' @rdname setLogLevel
#' @param level New log level
#' @examples
#'\dontrun{
#' setLogLevel("ERROR")
#'}
#' @note setLogLevel since 2.0.0
setLogLevel <- function(level) {
sc <- getSparkContext()
invisible(callJMethod(sc, "setLogLevel", level))
}
#' Set checkpoint directory
#'
#' Set the directory under which SparkDataFrame are going to be checkpointed. The directory must be
#' a HDFS path if running on a cluster.
#'
#' @rdname setCheckpointDir
#' @param directory Directory path to checkpoint to
#' @seealso \link{checkpoint}
#' @examples
#'\dontrun{
#' setCheckpointDir("/checkpoint")
#'}
#' @note setCheckpointDir since 2.2.0
setCheckpointDir <- function(directory) {
sc <- getSparkContext()
invisible(callJMethod(sc, "setCheckpointDir", suppressWarnings(normalizePath(directory))))
}
|
report_data <- read.table("D:/research/twitter/cs246/ucla/loss of information/report.txt", header=T, sep="\t")
plot_colors <- c(rgb(r=0.0,g=0.0,b=0.9),
"red",
"forestgreen",
rgb(r=0.0, g=0.0, b=0.0))
par(mar=c(4.2, 4.2, 0.5, 0.5))
plot(report_data$NBA, type="l", col=plot_colors[1],
ylim=range(0.4, 1.0), axes=F, xlab="Percentage of positive examples",
ylab="Precision", cex.lab=1, lwd=2)
lines(report_data$RSA, type="l", lty=2, lwd=2, col=plot_colors[2])
lines(report_data$CM, type="l", lty=3, lwd=2, col=plot_colors[3])
axis(2, las=1, cex.axis=0.8)
axis(1,at=(1:10),lab=c("10%","20%","30%","40%","50%","60%","70%","80%","90%","100%"), cex.axis=0.8)
box()
# Create a legend in the top-left corner that is slightly
legend("topright", names(report_data), cex=1, col=plot_colors,
lty=1:4, lwd=2, bty="n");
----------------------------------------------------------------------------
report_data <- read.table("D:/research/twitter/cs246/ucla/report_active.txt", header=T, sep="\t")
plot_colors <- c(rgb(r=0.0,g=0.0,b=0.9),
"red",
"forestgreen",
rgb(r=0.0, g=0.0, b=0.0))
par(mar=c(4.2, 4.2, 0.5, 0.5))
plot(report_data$SA, type="l", col=plot_colors[1],
ylim=range(0.2, 1.0), axes=F, xlab="k",
ylab="Precision", cex.lab=1, lwd=2)
lines(report_data$BSA, type="l", lty=2, lwd=2, col=plot_colors[2])
lines(report_data$NBA, type="l", lty=3, lwd=2, col=plot_colors[3])
lines(report_data$CA, type="l", lty=4, lwd=2, col=plot_colors[4])
axis(2, las=1, cex.axis=0.8)
axis(1,at=seq(4,20,by=4),lab=c("20","40","60","80","100"), cex.axis=0.8, breaks=c)
box()
# Create a legend in the top-left corner that is slightly
legend("topright", c("SA", "BSA", "NBA", "CA"), cex=0.8, col=plot_colors,
lty=1:4, lwd=2, bty="n");
report_data <- read.table("D:/research/twitter/cs246/ucla/report_loss.txt", header=T, sep="\t")
plot_colors <- c( "red",
"forestgreen",
rgb(r=0.0, g=0.0, b=0.0))
par(mar=c(4.2, 4.2, 0.5, 0.5))
plot(report_data$BSA, type="l", col=plot_colors[1],
ylim=range(0.4, 1.0), axes=F, xlab="t",
ylab="Precision", cex.lab=1, lwd=2, lty=2)
lines(report_data$NBA, type="l", lty=3, lwd=2, col=plot_colors[2])
lines(report_data$CA, type="l", lty=4, lwd=2, col=plot_colors[3])
axis(2, las=1, cex.axis=0.8)
axis(1,at=(1:10),lab=c("10%","20%","30%","40%","50%","60%","70%","80%","90%","100%"), cex.axis=0.8)
box()
# Create a legend in the top-left corner that is slightly
legend("topright", c("BSA", "NBA", "CA"), cex=0.8, col=plot_colors, lty=1:3, lwd=2, bty="n"); | /CS246/experiment/drawPrecision.R | no_license | mohanyang/duanyang | R | false | false | 2,584 | r | report_data <- read.table("D:/research/twitter/cs246/ucla/loss of information/report.txt", header=T, sep="\t")
plot_colors <- c(rgb(r=0.0,g=0.0,b=0.9),
"red",
"forestgreen",
rgb(r=0.0, g=0.0, b=0.0))
par(mar=c(4.2, 4.2, 0.5, 0.5))
plot(report_data$NBA, type="l", col=plot_colors[1],
ylim=range(0.4, 1.0), axes=F, xlab="Percentage of positive examples",
ylab="Precision", cex.lab=1, lwd=2)
lines(report_data$RSA, type="l", lty=2, lwd=2, col=plot_colors[2])
lines(report_data$CM, type="l", lty=3, lwd=2, col=plot_colors[3])
axis(2, las=1, cex.axis=0.8)
axis(1,at=(1:10),lab=c("10%","20%","30%","40%","50%","60%","70%","80%","90%","100%"), cex.axis=0.8)
box()
# Create a legend in the top-left corner that is slightly
legend("topright", names(report_data), cex=1, col=plot_colors,
lty=1:4, lwd=2, bty="n");
----------------------------------------------------------------------------
report_data <- read.table("D:/research/twitter/cs246/ucla/report_active.txt", header=T, sep="\t")
plot_colors <- c(rgb(r=0.0,g=0.0,b=0.9),
"red",
"forestgreen",
rgb(r=0.0, g=0.0, b=0.0))
par(mar=c(4.2, 4.2, 0.5, 0.5))
plot(report_data$SA, type="l", col=plot_colors[1],
ylim=range(0.2, 1.0), axes=F, xlab="k",
ylab="Precision", cex.lab=1, lwd=2)
lines(report_data$BSA, type="l", lty=2, lwd=2, col=plot_colors[2])
lines(report_data$NBA, type="l", lty=3, lwd=2, col=plot_colors[3])
lines(report_data$CA, type="l", lty=4, lwd=2, col=plot_colors[4])
axis(2, las=1, cex.axis=0.8)
axis(1,at=seq(4,20,by=4),lab=c("20","40","60","80","100"), cex.axis=0.8, breaks=c)
box()
# Create a legend in the top-left corner that is slightly
legend("topright", c("SA", "BSA", "NBA", "CA"), cex=0.8, col=plot_colors,
lty=1:4, lwd=2, bty="n");
report_data <- read.table("D:/research/twitter/cs246/ucla/report_loss.txt", header=T, sep="\t")
plot_colors <- c( "red",
"forestgreen",
rgb(r=0.0, g=0.0, b=0.0))
par(mar=c(4.2, 4.2, 0.5, 0.5))
plot(report_data$BSA, type="l", col=plot_colors[1],
ylim=range(0.4, 1.0), axes=F, xlab="t",
ylab="Precision", cex.lab=1, lwd=2, lty=2)
lines(report_data$NBA, type="l", lty=3, lwd=2, col=plot_colors[2])
lines(report_data$CA, type="l", lty=4, lwd=2, col=plot_colors[3])
axis(2, las=1, cex.axis=0.8)
axis(1,at=(1:10),lab=c("10%","20%","30%","40%","50%","60%","70%","80%","90%","100%"), cex.axis=0.8)
box()
# Create a legend in the top-left corner that is slightly
legend("topright", c("BSA", "NBA", "CA"), cex=0.8, col=plot_colors, lty=1:3, lwd=2, bty="n"); |
#Coursera Exploratory data analysis
# Download the data and take a look at them.
household_power_consumption <- read.csv("./household_power_consumption.txt", sep=";", na.strings="?")
View(household_power_consumption)
# Subsetting and view the final data. You can use the filter option to get the number of rows.
data<-household_power_consumption[66637:69516, ]
View(data)
#Transform date date and time variables.
time1<-strptime(paste(data$Date,data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#Create the third plot and save it in a correct file format.
png(file="plot3.png",width=480,height=480)
plot(time1, data$Sub_metering_1, pch=".", ylab = "Energy sub metering", xlab=" ")
lines(time1, data$Sub_metering_1)
lines(time1, data$Sub_metering_2, col="red")
lines(time1, data$Sub_metering_3, col="blue")
legend("topright",lwd=1,pch =c(NA,NA,NA) , col = c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
dev.off() | /plot3.R | no_license | GuglielmoR/ExData_Plotting1 | R | false | false | 968 | r | #Coursera Exploratory data analysis
# Download the data and take a look at them.
household_power_consumption <- read.csv("./household_power_consumption.txt", sep=";", na.strings="?")
View(household_power_consumption)
# Subsetting and view the final data. You can use the filter option to get the number of rows.
data<-household_power_consumption[66637:69516, ]
View(data)
#Transform date date and time variables.
time1<-strptime(paste(data$Date,data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#Create the third plot and save it in a correct file format.
png(file="plot3.png",width=480,height=480)
plot(time1, data$Sub_metering_1, pch=".", ylab = "Energy sub metering", xlab=" ")
lines(time1, data$Sub_metering_1)
lines(time1, data$Sub_metering_2, col="red")
lines(time1, data$Sub_metering_3, col="blue")
legend("topright",lwd=1,pch =c(NA,NA,NA) , col = c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
dev.off() |
## Matrix Inversion
## this is a pair of functions that cache the inverse of a matrix.
## the first function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set<-function(y){
x<<-y
inv<<-NULL
}
get<-function(){x}
SetInv<-function(inverse){inv<<-inverse}
getInv<-function(){inv}
list(set=set,get=get,SetInv=SetInv,getInv=getInv)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix hasn't changed), the cachesolve retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
inv<-x$getInv()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
mat<-x$get()
inv<-solve(mat,...)
x$SetInv(inv)
inv## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | madnatx/ProgrammingAssignment2 | R | false | false | 891 | r | ## Matrix Inversion
## this is a pair of functions that cache the inverse of a matrix.
## the first function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set<-function(y){
x<<-y
inv<<-NULL
}
get<-function(){x}
SetInv<-function(inverse){inv<<-inverse}
getInv<-function(){inv}
list(set=set,get=get,SetInv=SetInv,getInv=getInv)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix hasn't changed), the cachesolve retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
inv<-x$getInv()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
mat<-x$get()
inv<-solve(mat,...)
x$SetInv(inv)
inv## Return a matrix that is the inverse of 'x'
}
|
##load Libraries
library(ggplot2)
library(dplyr)
# If ref-folder doesnt exist, it must be created (a working directory must be selected first form R editor)
if (!file.exists("./ficheros")) {
dir.create("./ficheros")
}
#Init variables
origen <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
destino <- "./ficheros/ficheros.zip"
fileSource <- "./ficheros/household_power_consumption.txt"
#Unzip file downloaded
if (!file.exists(destino )) {
download.file(origen , destino , method = "curl")
unzip(destino , overwrite = T, exdir = "./ficheros")
}
#Read data only of the two days associated
data <- read.table(text = grep("^[1,2]/2/2007",readLines(fileSource),value=TRUE), sep = ';', col.names = c("Date", "Time", "Global_Active_Power", "Global_Reactive_Power", "Voltage", "Global_Intensity", "Sub_Metering_1", "Sub_Metering_2", "Sub_Metering_3"), na.strings = "?")
# format date and time in one
data$Date <- as.Date(data$Date, format = '%d/%m/%Y')
data$DateTime <- as.POSIXct(paste(data$Date, data$Time))
#Prepare File
png(filename = "./plot4.png", width = 480, height = 480, units="px")
#Plot the data
##Prepare panels
par(mfrow = c(2, 2))
##Plot the 4 graphics
plot(data$DateTime, data$Global_Active_Power, xlab = "", ylab = "Global Active Power (KW)", type = "l")
plot(data$DateTime, data$Voltage, xlab = "datetime", ylab = "Voltage", type = "l")
plot(data$DateTime, data$Sub_Metering_1, xlab = "", ylab = "Energy sub metering", type = "l")
lines(data$DateTime, data$Sub_Metering_2, col = "red")
lines(data$DateTime, data$Sub_Metering_3, col = "blue")
legend("topright", col = c("black", "red", "blue"), legend = c("Sub_Metering_1", "Sub_Metering_2", "Sub_Metering_3"), lwd = 1)
plot(data$DateTime, data$Global_Reactive_Power, xlab = "Datetime", ylab = "Global_Reactive_Power", type = "l")
dev.off()
| /plot4.R | no_license | Elmasri-Fathallah/EDA_Course-Project01 | R | false | false | 1,876 | r | ##load Libraries
library(ggplot2)
library(dplyr)
# If ref-folder doesnt exist, it must be created (a working directory must be selected first form R editor)
if (!file.exists("./ficheros")) {
dir.create("./ficheros")
}
#Init variables
origen <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
destino <- "./ficheros/ficheros.zip"
fileSource <- "./ficheros/household_power_consumption.txt"
#Unzip file downloaded
if (!file.exists(destino )) {
download.file(origen , destino , method = "curl")
unzip(destino , overwrite = T, exdir = "./ficheros")
}
#Read data only of the two days associated
data <- read.table(text = grep("^[1,2]/2/2007",readLines(fileSource),value=TRUE), sep = ';', col.names = c("Date", "Time", "Global_Active_Power", "Global_Reactive_Power", "Voltage", "Global_Intensity", "Sub_Metering_1", "Sub_Metering_2", "Sub_Metering_3"), na.strings = "?")
# format date and time in one
data$Date <- as.Date(data$Date, format = '%d/%m/%Y')
data$DateTime <- as.POSIXct(paste(data$Date, data$Time))
#Prepare File
png(filename = "./plot4.png", width = 480, height = 480, units="px")
#Plot the data
##Prepare panels
par(mfrow = c(2, 2))
##Plot the 4 graphics
plot(data$DateTime, data$Global_Active_Power, xlab = "", ylab = "Global Active Power (KW)", type = "l")
plot(data$DateTime, data$Voltage, xlab = "datetime", ylab = "Voltage", type = "l")
plot(data$DateTime, data$Sub_Metering_1, xlab = "", ylab = "Energy sub metering", type = "l")
lines(data$DateTime, data$Sub_Metering_2, col = "red")
lines(data$DateTime, data$Sub_Metering_3, col = "blue")
legend("topright", col = c("black", "red", "blue"), legend = c("Sub_Metering_1", "Sub_Metering_2", "Sub_Metering_3"), lwd = 1)
plot(data$DateTime, data$Global_Reactive_Power, xlab = "Datetime", ylab = "Global_Reactive_Power", type = "l")
dev.off()
|
# script para ler a tabela de dados limpos #
# dados brutos em arquivo xlsx #
?read.table
Tabela <- read.table("data/Dados.csv")
read.csv2("data/Dados.csv")
| /R/0_Script_teste.R | no_license | crisregis/Repositorio_aulas_analises_em_R | R | false | false | 160 | r | # script para ler a tabela de dados limpos #
# dados brutos em arquivo xlsx #
?read.table
Tabela <- read.table("data/Dados.csv")
read.csv2("data/Dados.csv")
|
# 주소 데이터 와 가까운 병원과의 거리
# 거리 계산
install.packages("geosphere")
library(geosphere)
library(dplyr)
library(readxl)
# 아파트 주소 데이터
apt <- read.csv("..\\Data\\preprocessingData\\O_Base.csv", stringsAsFactors = F)
# 머지 할 데이터
merge <- read.csv("..\\Data\\preprocessingData\\O_Base_merge.csv", stringsAsFactors = F)
# 버스 정류장 위치 데이터
sub <- read.csv("..\\Data\\preprocessingData\\H_Seoul_hospital.csv", stringsAsFactors = F)
str(sub)
sum(is.na(sub))
# 주소별 최소 거리와 해당 장소를 저장하기위한 빈 객체 생성
distance <- c()
place <- c()
lon <- c()
lat <- c()
# 좌표를 이용한 두 데이터간의 최소거리 구하기
apt_row <- nrow(apt)
for (i in 1:nrow(apt)){
d <- 100000
loc <- ""
lo <- 0
la <- 0
print(i/apt_row*100)
for (j in 1:nrow(sub)){
dis <- distm(c(apt$long[i],apt$lat[i]),c(sub$lon[j],sub$lat[j]), fun = distHaversine)
if (dis < d) {
d <- dis
loc <- sub$지번[j]
lo <- sub$lon[j]
la <- sub$lat[j]
}
}
distance <- c(distance,d)
place <- c(place,loc)
lon <- c(lon,lo)
lat <- c(lat,la)
}
# 거리 데이터는 m단위 이기 때문에 km 으로 바꾸고 소수점 2자리 이하로 변환
dist_from_hospital <- round(distance/1000,2)
View(dist_from_bus)
# merge 데이터에 거리 데이터 병합
df <- cbind(merge, dist_from_hospital)
# 원본 파일에 덮어쓰기
write.csv(df,"..\\Data\\preprocessingData\\O_Base_merge.csv", row.names = FALSE)
# 데이터 확인
check_df <- read.csv("..\\Data\\preprocessingData\\O_Base_merge.csv", stringsAsFactors = F)
str(check_df)
sum(is.na(df))
# 원 주소의 좌표와 가까운 거리의 병원 이름과 좌표를 보관하기 위한 데이터
df1 <- cbind(apt,distance,place,lon,lat)
sum(is.na(df1))
str(df1)
write.csv(df1,"..\\Data\\preprocessingData\\O_20200423_dist_from_hospital.csv",row.names = FALSE)
| /Data_Preprocessing_R/O_20200423_dist_from_hospital.R | no_license | h0n9670/ApartmentPrice | R | false | false | 1,950 | r | # 주소 데이터 와 가까운 병원과의 거리
# 거리 계산
install.packages("geosphere")
library(geosphere)
library(dplyr)
library(readxl)
# 아파트 주소 데이터
apt <- read.csv("..\\Data\\preprocessingData\\O_Base.csv", stringsAsFactors = F)
# 머지 할 데이터
merge <- read.csv("..\\Data\\preprocessingData\\O_Base_merge.csv", stringsAsFactors = F)
# 버스 정류장 위치 데이터
sub <- read.csv("..\\Data\\preprocessingData\\H_Seoul_hospital.csv", stringsAsFactors = F)
str(sub)
sum(is.na(sub))
# 주소별 최소 거리와 해당 장소를 저장하기위한 빈 객체 생성
distance <- c()
place <- c()
lon <- c()
lat <- c()
# 좌표를 이용한 두 데이터간의 최소거리 구하기
apt_row <- nrow(apt)
for (i in 1:nrow(apt)){
d <- 100000
loc <- ""
lo <- 0
la <- 0
print(i/apt_row*100)
for (j in 1:nrow(sub)){
dis <- distm(c(apt$long[i],apt$lat[i]),c(sub$lon[j],sub$lat[j]), fun = distHaversine)
if (dis < d) {
d <- dis
loc <- sub$지번[j]
lo <- sub$lon[j]
la <- sub$lat[j]
}
}
distance <- c(distance,d)
place <- c(place,loc)
lon <- c(lon,lo)
lat <- c(lat,la)
}
# 거리 데이터는 m단위 이기 때문에 km 으로 바꾸고 소수점 2자리 이하로 변환
dist_from_hospital <- round(distance/1000,2)
View(dist_from_bus)
# merge 데이터에 거리 데이터 병합
df <- cbind(merge, dist_from_hospital)
# 원본 파일에 덮어쓰기
write.csv(df,"..\\Data\\preprocessingData\\O_Base_merge.csv", row.names = FALSE)
# 데이터 확인
check_df <- read.csv("..\\Data\\preprocessingData\\O_Base_merge.csv", stringsAsFactors = F)
str(check_df)
sum(is.na(df))
# 원 주소의 좌표와 가까운 거리의 병원 이름과 좌표를 보관하기 위한 데이터
df1 <- cbind(apt,distance,place,lon,lat)
sum(is.na(df1))
str(df1)
write.csv(df1,"..\\Data\\preprocessingData\\O_20200423_dist_from_hospital.csv",row.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatialDataFunctions.R
\name{getPairwiseDistance}
\alias{getPairwiseDistance}
\title{Distance matrix for a population}
\usage{
getPairwiseDistance(popn)
}
\arguments{
\item{popn}{a 3D population object}
}
\value{
a vector representing the lower triangle of the distance matrix
}
\description{
Compute all pairwise distances for a population. This function
is simply a wrapper for \code{dist} that only returns the vector
}
\examples{
pop <- generatePop()
distance <- getPairwiseDistance(pop)
getDistij(distance, 14, 15)
}
\author{
Danny Hanson
}
\seealso{
\code{\link{dist}} \code{\link{getDistij}}
}
| /man/getPairwiseDistance.Rd | no_license | gretelk/mateable | R | false | true | 680 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatialDataFunctions.R
\name{getPairwiseDistance}
\alias{getPairwiseDistance}
\title{Distance matrix for a population}
\usage{
getPairwiseDistance(popn)
}
\arguments{
\item{popn}{a 3D population object}
}
\value{
a vector representing the lower triangle of the distance matrix
}
\description{
Compute all pairwise distances for a population. This function
is simply a wrapper for \code{dist} that only returns the vector
}
\examples{
pop <- generatePop()
distance <- getPairwiseDistance(pop)
getDistij(distance, 14, 15)
}
\author{
Danny Hanson
}
\seealso{
\code{\link{dist}} \code{\link{getDistij}}
}
|
library(readr)
library(tidyverse)
movie_profit <- read_csv("Coding/movie_profit.csv")
### PROMPT ####
### x axis: multiple of budget
### y axis: percentage of films by GENRE w positive profit
#maintenance
movie_profit$release_date <- mdy(movie_profit$release_date)
movie_profit <- filter(movie_profit, release_date >= "1994-01-01")
#1 create profit variable
movie_profit <- movie_profit %>%
mutate(profit = (domestic_gross + worldwide_gross) / production_budget) %>%
mutate(profit = round(profit, 2))
#2 create percentage variable
N <- movie_profit %>% group_by(genre) %>% count() #counting the number of movies per genre
genre_profit <- movie_profit %>% group_by(genre, profit) %>% count()
#counting number movies per genre with given profit multiple
genre_profit$prop <- NA #creating proportion variable
for (i in c(1:nrow(genre_profit))) {
genre_profit$prop[i] <- genre_profit$n[i] / N$n[N$genre == genre_profit$genre[i]]
} #uses loop to calculate proportion using total stored in N
genre_profit %>% group_by(genre) %>% summarize(sum(prop)) #sanity check
### 3 creating the graph
#first graph
key_genre <- c("Horror", "Comedy", "Drama")
genre_profit %>% filter(profit >= 1 & profit <= 10 & genre %in% key_genre) %>%
ggplot(aes(profit, prop)) + geom_smooth(aes(color = genre), se = FALSE) +
scale_y_continuous(labels = scales::percent) +
labs(x = "Profit Multiple", y = "Percent", title = "Horror Movies Most Profitable Genre",
subtitle = "Return on Investment of Movies by Genre, 1994 - 2014", color = "Genre")
#second graph
genre_colors <- c(rep("#A9A9A9", 4), "#2E74C0")
genre_profit %>% filter(profit >= 1 & profit <= 10) %>%
ggplot(aes(profit, prop)) + geom_smooth(aes(color = genre), se = FALSE) +
scale_y_continuous(labels = scales::percent) +
scale_color_manual(values = genre_colors) +
labs(x = "Profit Multiple", y = "Percentage of Movies",
title = "Horror Is Most Profitable Movie Genre",
subtitle = "Return on Investment of Movies by Genre, 1994 - 2014", color = "Genre") +
guides(color = FALSE)
| /tidy_tuesday_week30.R | no_license | IAjimi/TidyTuesday | R | false | false | 2,135 | r | library(readr)
library(tidyverse)
movie_profit <- read_csv("Coding/movie_profit.csv")
### PROMPT ####
### x axis: multiple of budget
### y axis: percentage of films by GENRE w positive profit
#maintenance
movie_profit$release_date <- mdy(movie_profit$release_date)
movie_profit <- filter(movie_profit, release_date >= "1994-01-01")
#1 create profit variable
movie_profit <- movie_profit %>%
mutate(profit = (domestic_gross + worldwide_gross) / production_budget) %>%
mutate(profit = round(profit, 2))
#2 create percentage variable
N <- movie_profit %>% group_by(genre) %>% count() #counting the number of movies per genre
genre_profit <- movie_profit %>% group_by(genre, profit) %>% count()
#counting number movies per genre with given profit multiple
genre_profit$prop <- NA #creating proportion variable
for (i in c(1:nrow(genre_profit))) {
genre_profit$prop[i] <- genre_profit$n[i] / N$n[N$genre == genre_profit$genre[i]]
} #uses loop to calculate proportion using total stored in N
genre_profit %>% group_by(genre) %>% summarize(sum(prop)) #sanity check
### 3 creating the graph
#first graph
key_genre <- c("Horror", "Comedy", "Drama")
genre_profit %>% filter(profit >= 1 & profit <= 10 & genre %in% key_genre) %>%
ggplot(aes(profit, prop)) + geom_smooth(aes(color = genre), se = FALSE) +
scale_y_continuous(labels = scales::percent) +
labs(x = "Profit Multiple", y = "Percent", title = "Horror Movies Most Profitable Genre",
subtitle = "Return on Investment of Movies by Genre, 1994 - 2014", color = "Genre")
#second graph
genre_colors <- c(rep("#A9A9A9", 4), "#2E74C0")
genre_profit %>% filter(profit >= 1 & profit <= 10) %>%
ggplot(aes(profit, prop)) + geom_smooth(aes(color = genre), se = FALSE) +
scale_y_continuous(labels = scales::percent) +
scale_color_manual(values = genre_colors) +
labs(x = "Profit Multiple", y = "Percentage of Movies",
title = "Horror Is Most Profitable Movie Genre",
subtitle = "Return on Investment of Movies by Genre, 1994 - 2014", color = "Genre") +
guides(color = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phenocam.R
\name{phenocam_image_url}
\alias{phenocam_image_url}
\alias{phenocam_read_day_urls}
\alias{phenocam_read_monthly_midday_urls}
\alias{phenocam_image_url_midday}
\alias{phenocam_info}
\alias{phenocam_download}
\title{Retrieve images from Phenocam}
\usage{
phenocam_image_url(when = NULL, ...)
phenocam_read_day_urls(x = Sys.Date())
phenocam_read_monthly_midday_urls(x = Sys.Date())
phenocam_image_url_midday(x = Sys.Date())
phenocam_info()
phenocam_download(...)
}
\arguments{
\item{when}{a string to be converted into a date-time}
\item{...}{arguments passed to \code{\link[phenocamr]{download_phenocam}}}
\item{x}{a Date}
}
\description{
Phenocam contains over 70,000 images taken from MacLeish.
Photos have been taken every 30 minutes since February 2017.
}
\examples{
phenocam_image_url()
phenocam_image_url("2021-12-25 12:05:05")
\dontrun{
phenocam_read_day_urls()
}
\dontrun{
phenocam_read_monthly_midday_urls()
}
\dontrun{
phenocam_image_url_midday(Sys.Date() - 3)
phenocam_image_url_midday(Sys.Date() - 365)
}
\dontrun{
phenocam_info()
}
\dontrun{
phenocam_download()
df <- read_phenocam(file.path(tempdir(),"macleish_DB_1000_3day.csv"))
print(str(df))
}
}
\references{
\url{https://phenocam.nau.edu/webcam/sites/macleish/}
}
\seealso{
\code{\link[phenocamr]{download_phenocam}}
}
| /man/phenocam_image_url.Rd | no_license | beanumber/macleish | R | false | true | 1,383 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phenocam.R
\name{phenocam_image_url}
\alias{phenocam_image_url}
\alias{phenocam_read_day_urls}
\alias{phenocam_read_monthly_midday_urls}
\alias{phenocam_image_url_midday}
\alias{phenocam_info}
\alias{phenocam_download}
\title{Retrieve images from Phenocam}
\usage{
phenocam_image_url(when = NULL, ...)
phenocam_read_day_urls(x = Sys.Date())
phenocam_read_monthly_midday_urls(x = Sys.Date())
phenocam_image_url_midday(x = Sys.Date())
phenocam_info()
phenocam_download(...)
}
\arguments{
\item{when}{a string to be converted into a date-time}
\item{...}{arguments passed to \code{\link[phenocamr]{download_phenocam}}}
\item{x}{a Date}
}
\description{
Phenocam contains over 70,000 images taken from MacLeish.
Photos have been taken every 30 minutes since February 2017.
}
\examples{
phenocam_image_url()
phenocam_image_url("2021-12-25 12:05:05")
\dontrun{
phenocam_read_day_urls()
}
\dontrun{
phenocam_read_monthly_midday_urls()
}
\dontrun{
phenocam_image_url_midday(Sys.Date() - 3)
phenocam_image_url_midday(Sys.Date() - 365)
}
\dontrun{
phenocam_info()
}
\dontrun{
phenocam_download()
df <- read_phenocam(file.path(tempdir(),"macleish_DB_1000_3day.csv"))
print(str(df))
}
}
\references{
\url{https://phenocam.nau.edu/webcam/sites/macleish/}
}
\seealso{
\code{\link[phenocamr]{download_phenocam}}
}
|
# 1. Merges the training and the test sets to create one data set.
x_train <- read.table("train/X_train.txt")
y_train <- read.table("train/y_train.txt")
subject_train <- read.table("train/subject_train.txt")
x_test <- read.table("test/X_test.txt")
y_test <- read.table("test/y_test.txt")
subject_test <- read.table("test/subject_test.txt")
# merge x train and test
x <- rbind(x_train, x_test)
# merge y train and test
y <- rbind(y_train, y_test)
# merge subject train and test
subject <- rbind(subject_train, subject_test)
library(dplyr)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
features <- read.table("features.txt")
# get the names mean() or std() measurements
meanstd <- grep("mean\\(\\)|std\\(\\)", features[, 2])
# get x with the mean and std measurements
x <- x[, meanstd]
# add the column names based on the measurements
names(x) <- features[meanstd, 2]
# 3. Uses descriptive activity names to name the activities in the data set
activities <- read.table("activity_labels.txt")
# update y data set with correct activity names
y[, 1] <- activities[y[, 1], 2]
colnames(y) <- "activities"
#4. Appropriately labels the data set with descriptive variable names.
# update subject_data as subjects
colnames(subject) <- "subjects"
# now that all the data sets have correct col names, merge all of them.
mergeall <- cbind(x, y, subject)
# 5. From the data set in step 4, creates a second, independent
# tidy data set with the average of each variable for each activity and each subject.
group<-aggregate(. ~subjects + activities, mergeall, mean, na.rm=TRUE)
tidydata<-group[order(group$subjects, group$activities),]
write.table(tidydata, "tidydata.txt", row.name=FALSE)
| /run_analysis.R | no_license | msmirabel/Getting-and-Cleaning-Data-Project | R | false | false | 1,919 | r |
# 1. Merges the training and the test sets to create one data set.
x_train <- read.table("train/X_train.txt")
y_train <- read.table("train/y_train.txt")
subject_train <- read.table("train/subject_train.txt")
x_test <- read.table("test/X_test.txt")
y_test <- read.table("test/y_test.txt")
subject_test <- read.table("test/subject_test.txt")
# merge x train and test
x <- rbind(x_train, x_test)
# merge y train and test
y <- rbind(y_train, y_test)
# merge subject train and test
subject <- rbind(subject_train, subject_test)
library(dplyr)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
features <- read.table("features.txt")
# get the names mean() or std() measurements
meanstd <- grep("mean\\(\\)|std\\(\\)", features[, 2])
# get x with the mean and std measurements
x <- x[, meanstd]
# add the column names based on the measurements
names(x) <- features[meanstd, 2]
# 3. Uses descriptive activity names to name the activities in the data set
activities <- read.table("activity_labels.txt")
# update y data set with correct activity names
y[, 1] <- activities[y[, 1], 2]
colnames(y) <- "activities"
#4. Appropriately labels the data set with descriptive variable names.
# update subject_data as subjects
colnames(subject) <- "subjects"
# now that all the data sets have correct col names, merge all of them.
mergeall <- cbind(x, y, subject)
# 5. From the data set in step 4, creates a second, independent
# tidy data set with the average of each variable for each activity and each subject.
group<-aggregate(. ~subjects + activities, mergeall, mean, na.rm=TRUE)
tidydata<-group[order(group$subjects, group$activities),]
write.table(tidydata, "tidydata.txt", row.name=FALSE)
|
library(data.table)
library(foreign)
##### Visualization #####
browseShinyData <- function()
{
sourceGitHubFile(user='jaywarrick', repo='R-General', branch='master', file='DataClassBrowser/ui.R')
sourceGitHubFile(user='jaywarrick', repo='R-General', branch='master', file='DataClassBrowser/server.R')
shinyApp(ui=myUI, server=myServer)
}
plotHist <- function(x, feature)
{
breaks=c(-1000, seq(-4,4,0.5), 1000)
wt <- x[Class == 'WT'][[feature]]
mt <- x[Class == 'MT'][[feature]]
cmt <- rgb(0,0,1,0.8)
cwt <- rgb(1,0,0,0.8)
wtd <- density(wt, from=-4, to=4)
mtd <- density(mt, from=-4, to=4)
if(max(wtd$y) > max(mtd$y))
{
plot(wtd, col='red', xlim=c(-4,4), main='', xlab=feature)
lines(mtd, col='blue')
}
else
{
plot(mtd, col='blue', xlim=c(-4,4), main='', xlab=feature)
lines(wtd, col='red')
}
legend('topright', legend=c('MT','WT'), col=c('blue','red'), lty=1)
}
##### General #####
resample <- function(x, ...)
{
x[sample.int(length(x), ...)]
}
getLocsFromRCs <- function(r, c, numRows)
{
r + max(numRows) * c
}
sind <- function(x)
{
return(sin(x*pi/180))
}
cosd <- function(x)
{
return(cos(x*pi/180))
}
tand <- function(x)
{
return(tan(x*pi/180))
}
refactor <- function(x)
{
return(x[,lapply(.SD, function(x){if(is.factor(x)){factor(x)}else{x}})])
}
##### Table IO #####
getTableList <- function(dir, fileList, isArff=F, storeFilePath=F, class=NULL, assignClass=T, expt=NULL, repl=NULL, sampleSize=NULL, colsToRemove = c(), cIdCols = c())
{
if(!is.null(sampleSize))
{
subSampleSize <- sampleSize / length(fileList)
}
tableList <- list()
# For each file in the fileList
for(f in fileList)
{
# Read the file in
print(paste0('Reading file: ', file.path(dir, f)))
if(isArff)
{
library(foreign)
temp <- data.table(read.arff(file.path(dir, f)))
}
else
{
temp <- fread(file.path(dir, f))
}
# Store the filepath that was imported if desired
if(storeFilePath)
{
temp$File <- f
}
# Store the name/number of the experiment/replicate associated with this file
if(!is.null(expt))
{
temp$Expt <- expt
}
if(!is.null(replicate))
{
temp$Repl <- repl
}
# Create/Assign a 'Class' column
if(!is.null(class) && assignClass)
{
temp$Class <- class
}
else if(!is.null(class) && !assignClass)
{
setnames(temp,class,'Class')
temp$Class <- as.character(temp$Class)
}
# Create a column with a complex Id that will be completely unique for each sample
idColsFound <- cIdCols[cIdCols %in% names(temp)]
if(length(idColsFound) != length(cIdCols))
{
warning(cat('The specified cIdCols (', cIdCols[!(cIdCols %in% names(temp))], 'is/are not column names of the table being retrieved... (', names(temp), ')'))
}
temp[,c('cId'):=paste(mapply(function(x){unique(as.character(x))}, mget(idColsFound)), collapse='.'), by=idColsFound]
print(temp[cId == '118.11.1.HS5'])
# put the complex Id first and the class column last
setcolorder(temp, c('cId', names(temp)[names(temp) != 'cId']))
# Put the 'Class' column as the last column of the table
setcolorder(temp, c(names(temp)[names(temp) != 'Class'], 'Class'))
# Remove specified columns from the data
for(tempCol in colsToRemove)
{
if(tempCol %in% names(temp))
{
temp[,c(tempCol) := NULL]
}
else
{
warning(paste(tempCol, 'is not a column of the data table so it cannot be removed'))
}
}
# Grab the randomly sampled rows of the file
if(!is.null(sampleSize))
{
rIds <- trySample(unique(temp$cId), subSampleSize)
temp <- temp[cId %in% rIds]
}
# Print the column names for a little feedback
print(names(temp))
# Append this table to the list of tables provided.
tableList <- append(tableList, list(temp))
}
return(tableList)
}
getXYCSVsAsTableFromDir <- function(dir, xName='SNR', xExpression='(x+1)', yName='BLUR', yExpression='(y+1)*0.05')
{
ret <- list()
fList <- list.files(path = dir, recursive = TRUE)
for(f in fList)
{
if((grepl('x', f) || grepl('y', f)) & grepl('.csv', f))
{
fileName <- strsplit(f, "\\.")[[1]][1]
ret[[fileName]] <- getXYCSVAsTable(dir, f, xName, xExpression, yName, yExpression)
}
}
retTable <- rbindlist(ret)
return(retTable)
}
getXYCSVAsTable <- function(dir, file, xName='SNR', xExpression='(x+1)', yName='BLUR', yExpression='(y+1)*0.05')
{
fileName <- strsplit(file, "\\.")[[1]][1]
xy <- strsplit(fileName, "_")[[1]]
y <- as.numeric(substr(xy[1],2,nchar(xy[1])))
x <- as.numeric(substr(xy[2],2,nchar(xy[2])))
xVal <- eval(parse(text=xExpression))
yVal <- eval(parse(text=yExpression))
print(paste0('Reading ', file.path(dir,file), ' as ', xName, '=', xVal, ', ', yName, '=', yVal, '.'))
theTable <- fread(file.path(dir,file))
theTable[,(xName),with=FALSE] <- xVal
theTable[,(yName),with=FALSE] <- yVal
return(theTable)
}
getXYArffsAsTableFromDir <- function(dir, xName='SNR', xExpression='(x+1)', yName='BLUR', yExpression='(y+1)*0.05')
{
ret <- list()
fList <- list.files(path = dir, recursive = TRUE)
for(f in fList)
{
if((grepl('x', f) || grepl('y', f)) & grepl('.arff', f))
{
fileName <- strsplit(f, "\\.")[[1]][1]
ret[[fileName]] <- getXYArffAsTable(dir, f, xName, xExpression, yName, yExpression)
}
}
retTable <- rbindlist(ret)
return(retTable)
}
getXYArffAsTable <- function(dir, file, xName='SNR', xExpression='(x+1)', yName='BLUR', yExpression='(y+1)*0.05')
{
fileName <- strsplit(file, "\\.")[[1]][1]
xy <- strsplit(fileName, "_")[[1]]
y <- as.numeric(substr(xy[1],2,nchar(xy[1])))
x <- as.numeric(substr(xy[2],2,nchar(xy[2])))
xVal <- eval(parse(text=xExpression))
yVal <- eval(parse(text=yExpression))
print(paste0('Reading ', file.path(dir,file), ' as ', xName, '=', xVal, ', ', yName, '=', yVal, '.'))
theTable <- read.arff(file.path(dir,file))
theTable[,xName] <- xVal
theTable[,yName] <- yVal
return(data.table(theTable))
}
##### Wide Table Operations #####
removeColsContainingAny <- function(x, colNames)
{
dumbCols <- c()
for(dumbCol in colNames)
{
dumbCols <- c(dumbCols, getColNamesContaining(x, dumbCol))
}
dumbCols <- unique(dumbCols)
print('Removing the following extraneous columns of information...')
for(dumbCol in dumbCols)
{
print(dumbCol)
}
x[,(dumbCols):=NULL]
return(x)
}
divideColAByColB <- function(x, colA, colB)
{
x[get(colB)==0,(colA):=NA]
x[get(colB)!=0,(colA):=get(colA)/get(colB)]
return(x)
}
removeColsWithInfiniteVals <- function(x)
{
duh <- x[,lapply(.SD, function(y){length(which(!is.finite(y))) > 0}), .SDcols=getNumericCols(x)]
duh2 <- getNumericCols(x)[as.logical(as.vector(duh))]
if(length(duh2 > 0))
{
print("Removing cols with infinite values...")
}
for(col in duh2)
{
print(col)
x[,(col):=NULL]
}
}
getColNamesContaining <- function(x, name)
{
return(names(x)[grepl(name,names(x))])
}
removeColsContaining <- function(x, name)
{
colsToRemove <- getColNamesContaining(x,name)
print(paste0("Removing colums with names containing '", name, "'"))
for(colToRemove in colsToRemove)
{
print(colToRemove)
x[,(colToRemove):=NULL]
}
return(x)
}
removeColsContainingNames <- function(x, namesToMatch)
{
colsToRemove <- getColNamesContaining(x, namesToMatch[1])
print(paste0("Removing colums with names containing..."))
for(nameToMatch in namesToMatch)
{
print(nameToMatch)
colsToRemove <- colsToRemove[colsToRemove %in% getColNamesContaining(x, nameToMatch)]
}
for(colToRemove in unique(colsToRemove))
{
print(colToRemove)
x[,(colToRemove):=NULL]
}
return(x)
}
fixColNames <- function(x)
{
replaceStringInColNames(x,'_Order_','')
replaceStringInColNames(x,'_Rep_','')
replaceStringInColNames(x,'$','.')
replaceStringInColNames(x,'net.imagej.ops.Ops.','')
replaceStringInColNames(x,' ','')
replaceStringInColNames(x,':','_')
}
getAllColNamesExcept <- function(x, names)
{
return(names(x)[!(names(x) %in% names)])
}
getNumericCols <- function(x)
{
return(names(x)[unlist(x[,lapply(.SD, is.numeric)])])
}
getNonNumericCols <- function(x)
{
return(names(x)[!unlist(x[,lapply(.SD, is.numeric)])])
}
replaceStringInColNames <- function(x, old, new)
{
oldNames <- names(x)
newNames <- gsub(old, new, names(x), fixed=T)
setnames(x, oldNames, newNames)
}
getWideTable <- function(x)
{
idCols <- getAllColNamesExcept(x, c('Value','Measurement'))
x <- reorganize(x, idCols)
x <- sortColsByName(x);
return(x)
}
sortColsByName <- function(x)
{
setcolorder(x, sort(names(x)))
}
standardizeWideData <- function(x)
{
removeNoVarianceCols(x)
robustScale <- function(x)
{
m <- median(x, na.rm=TRUE)
return((x-m)/mad(x, center=m, na.rm=TRUE))
}
x[,lapply(.SD, function(x){if(is.numeric(x)){return(robustScale(x))}else{return(x)}})]
}
removeNoVarianceCols <- function(x)
{
namesToRemove <- getNoVarianceCols(x)
if(length(namesToRemove) > 0)
{
print("Removing cols with a variance of zero...")
for(name in namesToRemove)
{
print(name)
x[,(name):=NULL]
}
}
}
getNoVarianceCols <- function(x)
{
tempSD <- function(y){sd(y, na.rm = TRUE)}
tempNames <- x[,lapply(.SD, tempSD), .SDcols=getNumericCols(x)]
return(names(tempNames)[as.numeric(as.vector(tempNames))==0])
}
removeIncompleteRows <- function(x)
{
valid <- NULL
for(colName in names(x))
{
#print(colName)
if(is.numeric(x[, colName, with=F][[1]][1]))
{
temp <- is.finite(x[,colName,with=F][[1]])
if(is.null(valid))
{
valid <- temp
}
else
{
valid <- valid & temp
}
}
}
cat('Removing rows... ', which(!valid), sep=',')
return(x[valid])
}
calculateLogRatiosOfColsContainingName <- function(x, name)
{
mNames <- getColNamesContaining(x, name)
combos <- combn(mNames,2)
for(j in seq_along(combos[1,]))
{
combo <- combos[,j]
ending1 <- substring(combo[1], first=nchar(name) + 2)
ending2 <- substring(combo[2], first=nchar(name) + 2)
x[,c(paste0(name, "LR.", ending1, ".", ending2)) := log(get(combo[1]) / get(combo[2]))]
}
return(x)
}
##### Long Table Operations #####
divideMAbyMBbyRef <- function(x, mA, mB)
{
mATable <- x[Measurement==mA]
mBTable <- x[Measurement==mB]
if(nrow(mATable) != nrow(mBTable))
{
# Try to perform the operation on the subset of the mB column (can't do reverse because we are editing the mA column)
mBTable <- mBTable[MaskChannel %in% unique(mATable$MaskChannel)]
if(nrow(mATable) != nrow(mBTable))
{
stop('Number of rows for these measurements do not match! Aborting operation.')
}
}
ret <- mATable$Value / mBTable$Value
x[Measurement==mA]$Value <- ret
return(x)
}
integratedIntensityNormalizeCentralMoments <- function(x)
{
# mNames <- getMeasurementNamesContaining(x, 'ImageMoments.CentralMoment')
# for(mName in mNames)
# {
# x <- divideMAbyMBbyRef(x, mName, 'Stats.Sum')
# }
# return(x)
mNames <- getColNamesContaining(x, 'ImageMoments.CentralMoment')
newMNames <- paste(mNames, '.M00Normalized', sep='')
for(mName in mNames)
{
x[,c(mName) := get(mName)/Stats.Sum]
}
setnames(x, mNames, newMNames)
return(x)
}
meanNormalizeZernikeMoments <- function(x)
{
# mNames <- getMeasurementNamesContaining(x, 'ZernikeMag')
# for(mName in mNames)
# {
# x <- divideMAbyMBbyRef(x, mName, 'Stats.Mean')
# }
# return(x)
mNames <- getColNamesContaining(x, 'ZernikeMag')
for(mName in mNames)
{
x[,c(mName) := get(mName)/Stats.Mean]
}
return(x)
}
getRowsMatching <- function(x, col, baseName)
{
return(x[grepl(baseName, x[[col]])])
}
getLongTable <- function(x, idCols, measurementName='Measurement', valueName='Value')
{
return(melt(x, getAllColNamesExcept(x, idCols), variable.name=measurementName, value.name=valueName, na.rm=TRUE))
}
getLongTableFromTemplate <- function(x, longTemplate)
{
return(getLongTable(x, idCols=getAllColNamesExcept(x, getAllColNamesExcept(longTemplate, c('Measurement','Value')))))
}
getMeasurementNamesContaining <- function(x, name)
{
ms <- unique(x$Measurement)
return(ms[grepl(name,ms)])
}
removeMeasurementNamesContaining <- function(x, name)
{
namesToRemove <- getMeasurementNamesContaining(x, name)
print("Removing the following Measurements...")
for(name in namesToRemove)
{
print(name)
}
x <- x[!(Measurement %in% namesToRemove)]
return(x)
}
standardizeLongData <- function(x, by=c('MaskChannel','ImageChannel','Measurement','Expt'))
{
robustScale <- function(x, measurement)
{
if(substr(measurement,1,12) == 'ZernikePhase')
{
return(x)
}
else
{
m <- median(x, na.rm=TRUE)
return((x-m)/mad(x, center=m, na.rm=TRUE))
}
}
x <- removeNoMADMeasurements(x, by=by)
x[,Value:=robustScale(Value,Measurement),by=by]
return(x)
}
removeNoVarianceMeasurements <- function(x, val='Value', by=c('MaskChannel','ImageChannel','Measurement','Expt'))
{
# See if we have any columns to remove and record the info for reporting
temp <- x[,list(stdev=sd(get(val))), by=by]
temp <- data.frame(temp[stdev == 0])
print("Removing measurements with 0 variance...")
print(temp)
# Tempororarily add a column in the table with stdev in it
x[,stdev:=sd(get(val)), by=by]
y <- x[stdev != 0]
x[, stdev:=NULL]
y[, stdev:=NULL]
return(y)
}
removeNoMADMeasurements <- function(x, val='Value', by=c('MaskChannel','ImageChannel','Measurement','Expt'))
{
# Tempororarily add a column in the table with stdev in it
x[,MAD:=mad(get(val), na.rm=TRUE), by=by]
toRemove <- unique(x[MAD == 0]$Measurement)
if(length(toRemove)>0)
{
print("Removing measurements with 0 MAD...")
for(m in toRemove)
{
print(m)
}
y <- x[!(Measurement %in% toRemove)]
x[, MAD:=NULL]
y[, MAD:=NULL]
return(y)
}else
{
x[, MAD:=NULL]
return(x)
}
}
# removeNoVarianceMeasurements <- function(x, val='Value', by=c('MaskChannel','ImageChannel','Measurement','Expt'))
# {
# # See if we have any columns to remove and record the info for reporting
# temp <- x[,list(stdev=sd(get(val))), by=by]
# temp <- data.frame(temp[stdev == 0])
# print("Removing measurements with 0 variance...")
# print(temp)
# # Tempororarily add a column in the table with stdev in it
# x[,stdev:=sd(get(val)), by=by]
# y <- x[stdev != 0]
# x[, stdev:=NULL]
# y[, stdev:=NULL]
# return(y)
# }
replaceSubStringInAllRowsOfCol <- function(x, old, new, col)
{
x[,c(col):=gsub(old,new,get(col),fixed=TRUE)]
}
trySample <- function(x, n, replace=F, prob=NULL)
{
if(n > length(x))
{
return(x)
}
else
{
return(sample(x, n, replace, prob))
}
}
fixLongTableStringsInCol <- function(x, col)
{
replaceSubStringInAllRowsOfCol(x,'_Order_','',col)
replaceSubStringInAllRowsOfCol(x,'_Rep_','',col)
replaceSubStringInAllRowsOfCol(x,'$','.',col)
replaceSubStringInAllRowsOfCol(x,'net.imagej.ops.Ops.','',col)
replaceSubStringInAllRowsOfCol(x,' ','',col)
replaceSubStringInAllRowsOfCol(x,':','_',col)
}
##### Feature Calculations #####
unmergeChannelNames <- function(channelString)
{
temp <- unlist(strsplit(channelString,'_minus_',fixed=TRUE))
return(list(channel1=temp[1], channel2=temp[2]))
}
calculateChannelDifferences <- function(x)
{
if(length(unique(x$ImageChannel)) > 1)
{
# Calculate differences between channels for each Cell and Measurement (but keep other column information too so include other cols in 'by')
idCols <- getAllColNamesExcept(x, c('Value','ImageChannel'))
return(x[ImageChannel != 'None' & !grepl('_dot_',ImageChannel,fixed=T),list(ImageChannel=getComboNames(ImageChannel), Value=getComboDifferences(Value)), by=idCols])
}else
{
# return an empty table with the same columns as provided
return(x[FALSE])
}
}
# Meant to be called on a subset of the main table
calculateChannelProducts <- function(x)
{
if(length(unique(x$ImageChannel)) > 1)
{
# Calculate differences between channels for each Cell and Measurement (but keep other column information too so include other cols in 'by')
idCols <- getAllColNamesExcept(x, c('Value','ImageChannel'))
x2 <- x[ImageChannel != 'None',list(ImageChannel=getComboNames(ImageChannel, '_times_'), Value=getComboProducts(Value)), by=idCols]
}else
{
# return an empty table with the same columns as provided
return(x[FALSE])
}
}
getComboNames <- function(x, operation='_minus_')
{
if(length(x) < 2)
{
return(NULL)
}
temp <- combn(x, 2)
#print(temp)
temp <- paste0(temp[1,],operation,temp[2,])
return(temp)
}
getComboDifferences <- function(x)
{
if(length(x) < 2)
{
return(NULL)
}
temp <- combn(x, 2)
temp <- temp[1,]-temp[2,]
return(temp)
}
getComboProducts <- function(x)
{
if(length(x) < 2)
{
return(NULL)
}
temp <- combn(x, 2)
temp <- temp[1,]*temp[2,]
return(temp)
}
calculateRMSofHaralick <- function(x, removeOriginalHaralickMeasures=FALSE)
{
# If keeping Haralick features, combine measures for each direction by averaging to make "rotationally invariant".
# Find all names with Horizontal in them
hNames <- getColNamesContaining(x, 'Horizontal')
vNames <- gsub("Horizontal", "Vertical", hNames)
dNames <- gsub("Horizontal", "Diagonal", hNames)
adNames <- gsub("Horizontal", "AntiDiagonal", hNames)
avgNames <- gsub("Horizontal", "Avg", hNames)
haralickNames <- data.frame(H=hNames, V=vNames, D=dNames, AD=adNames, avg=avgNames, stringsAsFactors=FALSE)
myfunc <- function(row, theNames)
{
return(mean(row[,theNames$H] + row[,theNames$V] + row[,theNames$D] + row[,theNames$AD]))
}
x <- data.frame(x)
for(i in 1:nrow(haralickNames))
{
x[,haralickNames[i,5]] <- (x[,haralickNames[i,1]] + x[,haralickNames[i,2]] + x[,haralickNames[i,3]] + x[,haralickNames[i,4]])/4
if(removeOriginalHaralickMeasures)
{
x <- x[,!(names(x) %in% as.character(haralickNames[i,1:4]))]
}
}
return(data.table(x))
}
getColors <- function(pointClasses)
{
ret <- rep('rgb(0,0,1,0.2)', length(pointClasses))
ret[pointClasses == 'MT'] <- 'rgb(1,0,0,0.2)'
return(ret)
}
##### Testing #####
# testFunc2 <- function(x, measurement)
# {
# sdx <- sd(x, na.rm=TRUE)
# if(is.na(sdx) || sdx == 0 || is.nan(sdx))
# {
# print(paste0("Removing zero variance measure: ", measurement, '.'))
# return(NULL)
# }else
# {
# return(x)
# }
# }
# duh2 <- data.table(a=rep(1:3,each=3), b=c(1:3,c(1,1,1),1:3), c=c('a','b','c','d','e','f','g','h','i'))
# duh2[,list(Value=testFunc2(b, a)), by=c('a')]
| /20160902_CellClustering/PreProcessingHelpers.R | no_license | jaywarrick/R-Cytoprofiling | R | false | false | 18,202 | r | library(data.table)
library(foreign)
##### Visualization #####
browseShinyData <- function()
{
sourceGitHubFile(user='jaywarrick', repo='R-General', branch='master', file='DataClassBrowser/ui.R')
sourceGitHubFile(user='jaywarrick', repo='R-General', branch='master', file='DataClassBrowser/server.R')
shinyApp(ui=myUI, server=myServer)
}
plotHist <- function(x, feature)
{
breaks=c(-1000, seq(-4,4,0.5), 1000)
wt <- x[Class == 'WT'][[feature]]
mt <- x[Class == 'MT'][[feature]]
cmt <- rgb(0,0,1,0.8)
cwt <- rgb(1,0,0,0.8)
wtd <- density(wt, from=-4, to=4)
mtd <- density(mt, from=-4, to=4)
if(max(wtd$y) > max(mtd$y))
{
plot(wtd, col='red', xlim=c(-4,4), main='', xlab=feature)
lines(mtd, col='blue')
}
else
{
plot(mtd, col='blue', xlim=c(-4,4), main='', xlab=feature)
lines(wtd, col='red')
}
legend('topright', legend=c('MT','WT'), col=c('blue','red'), lty=1)
}
##### General #####
resample <- function(x, ...)
{
x[sample.int(length(x), ...)]
}
getLocsFromRCs <- function(r, c, numRows)
{
r + max(numRows) * c
}
sind <- function(x)
{
return(sin(x*pi/180))
}
cosd <- function(x)
{
return(cos(x*pi/180))
}
tand <- function(x)
{
return(tan(x*pi/180))
}
refactor <- function(x)
{
return(x[,lapply(.SD, function(x){if(is.factor(x)){factor(x)}else{x}})])
}
##### Table IO #####
getTableList <- function(dir, fileList, isArff=F, storeFilePath=F, class=NULL, assignClass=T, expt=NULL, repl=NULL, sampleSize=NULL, colsToRemove = c(), cIdCols = c())
{
if(!is.null(sampleSize))
{
subSampleSize <- sampleSize / length(fileList)
}
tableList <- list()
# For each file in the fileList
for(f in fileList)
{
# Read the file in
print(paste0('Reading file: ', file.path(dir, f)))
if(isArff)
{
library(foreign)
temp <- data.table(read.arff(file.path(dir, f)))
}
else
{
temp <- fread(file.path(dir, f))
}
# Store the filepath that was imported if desired
if(storeFilePath)
{
temp$File <- f
}
# Store the name/number of the experiment/replicate associated with this file
if(!is.null(expt))
{
temp$Expt <- expt
}
if(!is.null(replicate))
{
temp$Repl <- repl
}
# Create/Assign a 'Class' column
if(!is.null(class) && assignClass)
{
temp$Class <- class
}
else if(!is.null(class) && !assignClass)
{
setnames(temp,class,'Class')
temp$Class <- as.character(temp$Class)
}
# Create a column with a complex Id that will be completely unique for each sample
idColsFound <- cIdCols[cIdCols %in% names(temp)]
if(length(idColsFound) != length(cIdCols))
{
warning(cat('The specified cIdCols (', cIdCols[!(cIdCols %in% names(temp))], 'is/are not column names of the table being retrieved... (', names(temp), ')'))
}
temp[,c('cId'):=paste(mapply(function(x){unique(as.character(x))}, mget(idColsFound)), collapse='.'), by=idColsFound]
print(temp[cId == '118.11.1.HS5'])
# put the complex Id first and the class column last
setcolorder(temp, c('cId', names(temp)[names(temp) != 'cId']))
# Put the 'Class' column as the last column of the table
setcolorder(temp, c(names(temp)[names(temp) != 'Class'], 'Class'))
# Remove specified columns from the data
for(tempCol in colsToRemove)
{
if(tempCol %in% names(temp))
{
temp[,c(tempCol) := NULL]
}
else
{
warning(paste(tempCol, 'is not a column of the data table so it cannot be removed'))
}
}
# Grab the randomly sampled rows of the file
if(!is.null(sampleSize))
{
rIds <- trySample(unique(temp$cId), subSampleSize)
temp <- temp[cId %in% rIds]
}
# Print the column names for a little feedback
print(names(temp))
# Append this table to the list of tables provided.
tableList <- append(tableList, list(temp))
}
return(tableList)
}
getXYCSVsAsTableFromDir <- function(dir, xName='SNR', xExpression='(x+1)', yName='BLUR', yExpression='(y+1)*0.05')
{
ret <- list()
fList <- list.files(path = dir, recursive = TRUE)
for(f in fList)
{
if((grepl('x', f) || grepl('y', f)) & grepl('.csv', f))
{
fileName <- strsplit(f, "\\.")[[1]][1]
ret[[fileName]] <- getXYCSVAsTable(dir, f, xName, xExpression, yName, yExpression)
}
}
retTable <- rbindlist(ret)
return(retTable)
}
getXYCSVAsTable <- function(dir, file, xName='SNR', xExpression='(x+1)', yName='BLUR', yExpression='(y+1)*0.05')
{
fileName <- strsplit(file, "\\.")[[1]][1]
xy <- strsplit(fileName, "_")[[1]]
y <- as.numeric(substr(xy[1],2,nchar(xy[1])))
x <- as.numeric(substr(xy[2],2,nchar(xy[2])))
xVal <- eval(parse(text=xExpression))
yVal <- eval(parse(text=yExpression))
print(paste0('Reading ', file.path(dir,file), ' as ', xName, '=', xVal, ', ', yName, '=', yVal, '.'))
theTable <- fread(file.path(dir,file))
theTable[,(xName),with=FALSE] <- xVal
theTable[,(yName),with=FALSE] <- yVal
return(theTable)
}
getXYArffsAsTableFromDir <- function(dir, xName='SNR', xExpression='(x+1)', yName='BLUR', yExpression='(y+1)*0.05')
{
ret <- list()
fList <- list.files(path = dir, recursive = TRUE)
for(f in fList)
{
if((grepl('x', f) || grepl('y', f)) & grepl('.arff', f))
{
fileName <- strsplit(f, "\\.")[[1]][1]
ret[[fileName]] <- getXYArffAsTable(dir, f, xName, xExpression, yName, yExpression)
}
}
retTable <- rbindlist(ret)
return(retTable)
}
getXYArffAsTable <- function(dir, file, xName='SNR', xExpression='(x+1)', yName='BLUR', yExpression='(y+1)*0.05')
{
fileName <- strsplit(file, "\\.")[[1]][1]
xy <- strsplit(fileName, "_")[[1]]
y <- as.numeric(substr(xy[1],2,nchar(xy[1])))
x <- as.numeric(substr(xy[2],2,nchar(xy[2])))
xVal <- eval(parse(text=xExpression))
yVal <- eval(parse(text=yExpression))
print(paste0('Reading ', file.path(dir,file), ' as ', xName, '=', xVal, ', ', yName, '=', yVal, '.'))
theTable <- read.arff(file.path(dir,file))
theTable[,xName] <- xVal
theTable[,yName] <- yVal
return(data.table(theTable))
}
##### Wide Table Operations #####
removeColsContainingAny <- function(x, colNames)
{
dumbCols <- c()
for(dumbCol in colNames)
{
dumbCols <- c(dumbCols, getColNamesContaining(x, dumbCol))
}
dumbCols <- unique(dumbCols)
print('Removing the following extraneous columns of information...')
for(dumbCol in dumbCols)
{
print(dumbCol)
}
x[,(dumbCols):=NULL]
return(x)
}
divideColAByColB <- function(x, colA, colB)
{
x[get(colB)==0,(colA):=NA]
x[get(colB)!=0,(colA):=get(colA)/get(colB)]
return(x)
}
removeColsWithInfiniteVals <- function(x)
{
duh <- x[,lapply(.SD, function(y){length(which(!is.finite(y))) > 0}), .SDcols=getNumericCols(x)]
duh2 <- getNumericCols(x)[as.logical(as.vector(duh))]
if(length(duh2 > 0))
{
print("Removing cols with infinite values...")
}
for(col in duh2)
{
print(col)
x[,(col):=NULL]
}
}
getColNamesContaining <- function(x, name)
{
return(names(x)[grepl(name,names(x))])
}
removeColsContaining <- function(x, name)
{
colsToRemove <- getColNamesContaining(x,name)
print(paste0("Removing colums with names containing '", name, "'"))
for(colToRemove in colsToRemove)
{
print(colToRemove)
x[,(colToRemove):=NULL]
}
return(x)
}
removeColsContainingNames <- function(x, namesToMatch)
{
colsToRemove <- getColNamesContaining(x, namesToMatch[1])
print(paste0("Removing colums with names containing..."))
for(nameToMatch in namesToMatch)
{
print(nameToMatch)
colsToRemove <- colsToRemove[colsToRemove %in% getColNamesContaining(x, nameToMatch)]
}
for(colToRemove in unique(colsToRemove))
{
print(colToRemove)
x[,(colToRemove):=NULL]
}
return(x)
}
fixColNames <- function(x)
{
replaceStringInColNames(x,'_Order_','')
replaceStringInColNames(x,'_Rep_','')
replaceStringInColNames(x,'$','.')
replaceStringInColNames(x,'net.imagej.ops.Ops.','')
replaceStringInColNames(x,' ','')
replaceStringInColNames(x,':','_')
}
getAllColNamesExcept <- function(x, names)
{
return(names(x)[!(names(x) %in% names)])
}
getNumericCols <- function(x)
{
return(names(x)[unlist(x[,lapply(.SD, is.numeric)])])
}
getNonNumericCols <- function(x)
{
return(names(x)[!unlist(x[,lapply(.SD, is.numeric)])])
}
replaceStringInColNames <- function(x, old, new)
{
oldNames <- names(x)
newNames <- gsub(old, new, names(x), fixed=T)
setnames(x, oldNames, newNames)
}
getWideTable <- function(x)
{
idCols <- getAllColNamesExcept(x, c('Value','Measurement'))
x <- reorganize(x, idCols)
x <- sortColsByName(x);
return(x)
}
sortColsByName <- function(x)
{
setcolorder(x, sort(names(x)))
}
standardizeWideData <- function(x)
{
removeNoVarianceCols(x)
robustScale <- function(x)
{
m <- median(x, na.rm=TRUE)
return((x-m)/mad(x, center=m, na.rm=TRUE))
}
x[,lapply(.SD, function(x){if(is.numeric(x)){return(robustScale(x))}else{return(x)}})]
}
removeNoVarianceCols <- function(x)
{
namesToRemove <- getNoVarianceCols(x)
if(length(namesToRemove) > 0)
{
print("Removing cols with a variance of zero...")
for(name in namesToRemove)
{
print(name)
x[,(name):=NULL]
}
}
}
getNoVarianceCols <- function(x)
{
tempSD <- function(y){sd(y, na.rm = TRUE)}
tempNames <- x[,lapply(.SD, tempSD), .SDcols=getNumericCols(x)]
return(names(tempNames)[as.numeric(as.vector(tempNames))==0])
}
removeIncompleteRows <- function(x)
{
valid <- NULL
for(colName in names(x))
{
#print(colName)
if(is.numeric(x[, colName, with=F][[1]][1]))
{
temp <- is.finite(x[,colName,with=F][[1]])
if(is.null(valid))
{
valid <- temp
}
else
{
valid <- valid & temp
}
}
}
cat('Removing rows... ', which(!valid), sep=',')
return(x[valid])
}
calculateLogRatiosOfColsContainingName <- function(x, name)
{
mNames <- getColNamesContaining(x, name)
combos <- combn(mNames,2)
for(j in seq_along(combos[1,]))
{
combo <- combos[,j]
ending1 <- substring(combo[1], first=nchar(name) + 2)
ending2 <- substring(combo[2], first=nchar(name) + 2)
x[,c(paste0(name, "LR.", ending1, ".", ending2)) := log(get(combo[1]) / get(combo[2]))]
}
return(x)
}
##### Long Table Operations #####
divideMAbyMBbyRef <- function(x, mA, mB)
{
mATable <- x[Measurement==mA]
mBTable <- x[Measurement==mB]
if(nrow(mATable) != nrow(mBTable))
{
# Try to perform the operation on the subset of the mB column (can't do reverse because we are editing the mA column)
mBTable <- mBTable[MaskChannel %in% unique(mATable$MaskChannel)]
if(nrow(mATable) != nrow(mBTable))
{
stop('Number of rows for these measurements do not match! Aborting operation.')
}
}
ret <- mATable$Value / mBTable$Value
x[Measurement==mA]$Value <- ret
return(x)
}
integratedIntensityNormalizeCentralMoments <- function(x)
{
# mNames <- getMeasurementNamesContaining(x, 'ImageMoments.CentralMoment')
# for(mName in mNames)
# {
# x <- divideMAbyMBbyRef(x, mName, 'Stats.Sum')
# }
# return(x)
mNames <- getColNamesContaining(x, 'ImageMoments.CentralMoment')
newMNames <- paste(mNames, '.M00Normalized', sep='')
for(mName in mNames)
{
x[,c(mName) := get(mName)/Stats.Sum]
}
setnames(x, mNames, newMNames)
return(x)
}
meanNormalizeZernikeMoments <- function(x)
{
# mNames <- getMeasurementNamesContaining(x, 'ZernikeMag')
# for(mName in mNames)
# {
# x <- divideMAbyMBbyRef(x, mName, 'Stats.Mean')
# }
# return(x)
mNames <- getColNamesContaining(x, 'ZernikeMag')
for(mName in mNames)
{
x[,c(mName) := get(mName)/Stats.Mean]
}
return(x)
}
getRowsMatching <- function(x, col, baseName)
{
return(x[grepl(baseName, x[[col]])])
}
getLongTable <- function(x, idCols, measurementName='Measurement', valueName='Value')
{
return(melt(x, getAllColNamesExcept(x, idCols), variable.name=measurementName, value.name=valueName, na.rm=TRUE))
}
getLongTableFromTemplate <- function(x, longTemplate)
{
return(getLongTable(x, idCols=getAllColNamesExcept(x, getAllColNamesExcept(longTemplate, c('Measurement','Value')))))
}
getMeasurementNamesContaining <- function(x, name)
{
ms <- unique(x$Measurement)
return(ms[grepl(name,ms)])
}
removeMeasurementNamesContaining <- function(x, name)
{
namesToRemove <- getMeasurementNamesContaining(x, name)
print("Removing the following Measurements...")
for(name in namesToRemove)
{
print(name)
}
x <- x[!(Measurement %in% namesToRemove)]
return(x)
}
standardizeLongData <- function(x, by=c('MaskChannel','ImageChannel','Measurement','Expt'))
{
robustScale <- function(x, measurement)
{
if(substr(measurement,1,12) == 'ZernikePhase')
{
return(x)
}
else
{
m <- median(x, na.rm=TRUE)
return((x-m)/mad(x, center=m, na.rm=TRUE))
}
}
x <- removeNoMADMeasurements(x, by=by)
x[,Value:=robustScale(Value,Measurement),by=by]
return(x)
}
removeNoVarianceMeasurements <- function(x, val='Value', by=c('MaskChannel','ImageChannel','Measurement','Expt'))
{
# See if we have any columns to remove and record the info for reporting
temp <- x[,list(stdev=sd(get(val))), by=by]
temp <- data.frame(temp[stdev == 0])
print("Removing measurements with 0 variance...")
print(temp)
# Tempororarily add a column in the table with stdev in it
x[,stdev:=sd(get(val)), by=by]
y <- x[stdev != 0]
x[, stdev:=NULL]
y[, stdev:=NULL]
return(y)
}
removeNoMADMeasurements <- function(x, val='Value', by=c('MaskChannel','ImageChannel','Measurement','Expt'))
{
# Tempororarily add a column in the table with stdev in it
x[,MAD:=mad(get(val), na.rm=TRUE), by=by]
toRemove <- unique(x[MAD == 0]$Measurement)
if(length(toRemove)>0)
{
print("Removing measurements with 0 MAD...")
for(m in toRemove)
{
print(m)
}
y <- x[!(Measurement %in% toRemove)]
x[, MAD:=NULL]
y[, MAD:=NULL]
return(y)
}else
{
x[, MAD:=NULL]
return(x)
}
}
# removeNoVarianceMeasurements <- function(x, val='Value', by=c('MaskChannel','ImageChannel','Measurement','Expt'))
# {
# # See if we have any columns to remove and record the info for reporting
# temp <- x[,list(stdev=sd(get(val))), by=by]
# temp <- data.frame(temp[stdev == 0])
# print("Removing measurements with 0 variance...")
# print(temp)
# # Tempororarily add a column in the table with stdev in it
# x[,stdev:=sd(get(val)), by=by]
# y <- x[stdev != 0]
# x[, stdev:=NULL]
# y[, stdev:=NULL]
# return(y)
# }
replaceSubStringInAllRowsOfCol <- function(x, old, new, col)
{
x[,c(col):=gsub(old,new,get(col),fixed=TRUE)]
}
trySample <- function(x, n, replace=F, prob=NULL)
{
if(n > length(x))
{
return(x)
}
else
{
return(sample(x, n, replace, prob))
}
}
fixLongTableStringsInCol <- function(x, col)
{
replaceSubStringInAllRowsOfCol(x,'_Order_','',col)
replaceSubStringInAllRowsOfCol(x,'_Rep_','',col)
replaceSubStringInAllRowsOfCol(x,'$','.',col)
replaceSubStringInAllRowsOfCol(x,'net.imagej.ops.Ops.','',col)
replaceSubStringInAllRowsOfCol(x,' ','',col)
replaceSubStringInAllRowsOfCol(x,':','_',col)
}
##### Feature Calculations #####
unmergeChannelNames <- function(channelString)
{
temp <- unlist(strsplit(channelString,'_minus_',fixed=TRUE))
return(list(channel1=temp[1], channel2=temp[2]))
}
calculateChannelDifferences <- function(x)
{
if(length(unique(x$ImageChannel)) > 1)
{
# Calculate differences between channels for each Cell and Measurement (but keep other column information too so include other cols in 'by')
idCols <- getAllColNamesExcept(x, c('Value','ImageChannel'))
return(x[ImageChannel != 'None' & !grepl('_dot_',ImageChannel,fixed=T),list(ImageChannel=getComboNames(ImageChannel), Value=getComboDifferences(Value)), by=idCols])
}else
{
# return an empty table with the same columns as provided
return(x[FALSE])
}
}
# Meant to be called on a subset of the main table
calculateChannelProducts <- function(x)
{
if(length(unique(x$ImageChannel)) > 1)
{
# Calculate differences between channels for each Cell and Measurement (but keep other column information too so include other cols in 'by')
idCols <- getAllColNamesExcept(x, c('Value','ImageChannel'))
x2 <- x[ImageChannel != 'None',list(ImageChannel=getComboNames(ImageChannel, '_times_'), Value=getComboProducts(Value)), by=idCols]
}else
{
# return an empty table with the same columns as provided
return(x[FALSE])
}
}
getComboNames <- function(x, operation='_minus_')
{
if(length(x) < 2)
{
return(NULL)
}
temp <- combn(x, 2)
#print(temp)
temp <- paste0(temp[1,],operation,temp[2,])
return(temp)
}
getComboDifferences <- function(x)
{
if(length(x) < 2)
{
return(NULL)
}
temp <- combn(x, 2)
temp <- temp[1,]-temp[2,]
return(temp)
}
getComboProducts <- function(x)
{
if(length(x) < 2)
{
return(NULL)
}
temp <- combn(x, 2)
temp <- temp[1,]*temp[2,]
return(temp)
}
calculateRMSofHaralick <- function(x, removeOriginalHaralickMeasures=FALSE)
{
# If keeping Haralick features, combine measures for each direction by averaging to make "rotationally invariant".
# Find all names with Horizontal in them
hNames <- getColNamesContaining(x, 'Horizontal')
vNames <- gsub("Horizontal", "Vertical", hNames)
dNames <- gsub("Horizontal", "Diagonal", hNames)
adNames <- gsub("Horizontal", "AntiDiagonal", hNames)
avgNames <- gsub("Horizontal", "Avg", hNames)
haralickNames <- data.frame(H=hNames, V=vNames, D=dNames, AD=adNames, avg=avgNames, stringsAsFactors=FALSE)
myfunc <- function(row, theNames)
{
return(mean(row[,theNames$H] + row[,theNames$V] + row[,theNames$D] + row[,theNames$AD]))
}
x <- data.frame(x)
for(i in 1:nrow(haralickNames))
{
x[,haralickNames[i,5]] <- (x[,haralickNames[i,1]] + x[,haralickNames[i,2]] + x[,haralickNames[i,3]] + x[,haralickNames[i,4]])/4
if(removeOriginalHaralickMeasures)
{
x <- x[,!(names(x) %in% as.character(haralickNames[i,1:4]))]
}
}
return(data.table(x))
}
getColors <- function(pointClasses)
{
ret <- rep('rgb(0,0,1,0.2)', length(pointClasses))
ret[pointClasses == 'MT'] <- 'rgb(1,0,0,0.2)'
return(ret)
}
##### Testing #####
# testFunc2 <- function(x, measurement)
# {
# sdx <- sd(x, na.rm=TRUE)
# if(is.na(sdx) || sdx == 0 || is.nan(sdx))
# {
# print(paste0("Removing zero variance measure: ", measurement, '.'))
# return(NULL)
# }else
# {
# return(x)
# }
# }
# duh2 <- data.table(a=rep(1:3,each=3), b=c(1:3,c(1,1,1),1:3), c=c('a','b','c','d','e','f','g','h','i'))
# duh2[,list(Value=testFunc2(b, a)), by=c('a')]
|
library(ape)
testtree <- read.tree("7279_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="7279_0_unrooted.txt") | /codeml_files/newick_trees_processed/7279_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("7279_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="7279_0_unrooted.txt") |
#' Extract Metapop management action details
#'
#' Extract management action details from RAMAS Metapop .mp files.
#'
#' @param mp A character string containing the path to a RAMAS Metapop .mp file.
#' @return A \code{data.frame} containing one row per management action, with
#' columns: \item{do.action}{Logical. Will the action be performed
#' (\code{TRUE}) or ignored (\code{FALSE}).} \item{action}{Factor. The type of
#' action to be performed.} \item{sourcepop}{The identity of the source
#' population.} \item{targetpop}{The identity of the target population.}
#' \item{start}{The timestep at which the action will commence.}
#' \item{end}{The timestep at which the action will end.} \item{freq}{The
#' frequency of the action, in timestep units.}
#' \item{after.dispersal}{Logical. Whether the action will be performed after
#' (\code{TRUE}) or before (\code{FALSE}) dispersal has taken place.}
#' \item{quantity}{Factor. Whether the action affects an absolute
#' \code{number} of individuals, or a \code{proportion} of the source
#' population.} \item{number}{The absolute number of individuals involved in
#' the action.} \item{proportion}{The proportion of the source population
#' involved in the action.} \item{fromstage}{The lowest stage involved in the
#' action.} \item{tostage}{The highest stage involved in the action.}
#' \item{condition}{The condition under which the action will be performed.}
#' \item{thr1}{If \code{condition} is either \code{N<thr1} or
#' \code{N<thr1_and_N>thr2}, this is the abundance threshold \code{thr1}.}
#' \item{thr2}{If \code{condition} is either \code{N>thr2} or
#' \code{N<thr1_and_N>thr2}, this is the abundance threshold \code{thr2}.}
#' \item{unknownfield}{Unknown.} \item{linear.to}{If \code{condition} is
#' \code{linear}, this is the upper quantity (absolute number, or proportion,
#' depending on \code{quantity}) towards which linear change will move.}
#' \item{linear.lowerN}{If \code{condition} is \code{linear}, this is the
#' abundance at which the quantity affected is equal to \code{number} or
#' \code{proportion}, depending on the value of \code{quantity}.}
#' \item{linear.upperN}{If \code{condition} is \code{linear}, this is the
#' abundance at which the quantity affected is equal to \code{linear.to}.}
#' \item{N.comprises.stages}{Factor. The stages included in the definition of
#' N, when calculating \code{thr1}, \code{thr2}, \code{linear.lowerN} and
#' \code{linear.upperN}.} \item{N.comprises.pops}{Factor. The populations
#' included in the definition of N, when calculating \code{thr1}, \code{thr2},
#' \code{linear.lowerN} and \code{linear.upperN}.}
#' @export
actions <- function(mp) {
message("Extracting population management action info from file:\n", mp)
metapop <- check_mp(mp)
metapop <- metapop[-(1:6)]
mgmt.start <- grep('pop mgmnt', metapop)
n.actions <- as.numeric(gsub('\\D', '', metapop[mgmt.start]))
if(n.actions==0) stop(sprintf('No management actions in %s.', mp))
metapop <- metapop[(mgmt.start + 1):(mgmt.start + n.actions)]
metapop <- gsub('\\s+', ' ', metapop)
metapop <- as.data.frame(apply(do.call(rbind, strsplit(metapop, ' ')),
2, as.numeric))
colnames(metapop) <- c(
'do.action', 'action', 'sourcepop', 'targetpop', 'start', 'end', 'freq',
'after.dispersal', 'quantity', 'number', 'proportion', 'fromstage',
'tostage', 'condition', 'thr1', 'thr2', 'unknownfield', 'linear.to',
'linear.lowerN', 'linear.upperN', 'N.comprises.stages', 'N.comprises.pops')
metapop$do.action <- metapop$do.action == 1
metapop$action <- factor(metapop$action, 0:2, c('harvest', 'intro', 'translo'))
metapop$after.dispersal <- metapop$after.dispersal == 1
metapop$quantity <- factor(metapop$quantity, 0:1, c('number', 'proportion'))
metapop$number <- ifelse(metapop$quantity == 'proportion',
NA, metapop$quantity)
metapop$proportion <- ifelse(metapop$quantity == 'proportion',
metapop$proportion, NA)
metapop$condition <- factor(
metapop$condition, 0:4,
c('none', 'N<thr1', 'N>thr2', 'N<thr1_and_N>thr2', 'linear'))
metapop$thr1 <- ifelse(metapop$condition %in%
c('none', 'N>thr2', 'linear'), NA, metapop$thr1)
metapop$thr2 <- ifelse(metapop$condition %in%
c('none', 'N<thr1', 'linear'), NA, metapop$thr2)
metapop$linear.to <- ifelse(metapop$condition != 'linear',
NA, metapop$linear.to)
metapop$linear.lowerN <- ifelse(metapop$condition != 'linear',
NA, metapop$linear.lowerN)
metapop$linear.upperN <- ifelse(metapop$condition != 'linear',
NA, metapop$linear.upperN)
metapop$N.comprises.stages <- ifelse(metapop$condition == 'none',
NA, metapop$N.comprises.stages)
metapop$N.comprises.pops <- ifelse(metapop$condition == 'none',
NA, metapop$N.comprises.pops)
metapop$N.comprises.stages <- factor(
metapop$N.comprises.stages, 0:2,
c('each stage', 'all selected stages', 'all stages'))
metapop$N.comprises.pops <-
factor(metapop$N.comprises.stages, c(0, 2), c('each pop', 'all pops'))
metapop
}
| /mptools/R/actions.R | no_license | ingted/R-Examples | R | false | false | 5,498 | r | #' Extract Metapop management action details
#'
#' Extract management action details from RAMAS Metapop .mp files.
#'
#' @param mp A character string containing the path to a RAMAS Metapop .mp file.
#' @return A \code{data.frame} containing one row per management action, with
#' columns: \item{do.action}{Logical. Will the action be performed
#' (\code{TRUE}) or ignored (\code{FALSE}).} \item{action}{Factor. The type of
#' action to be performed.} \item{sourcepop}{The identity of the source
#' population.} \item{targetpop}{The identity of the target population.}
#' \item{start}{The timestep at which the action will commence.}
#' \item{end}{The timestep at which the action will end.} \item{freq}{The
#' frequency of the action, in timestep units.}
#' \item{after.dispersal}{Logical. Whether the action will be performed after
#' (\code{TRUE}) or before (\code{FALSE}) dispersal has taken place.}
#' \item{quantity}{Factor. Whether the action affects an absolute
#' \code{number} of individuals, or a \code{proportion} of the source
#' population.} \item{number}{The absolute number of individuals involved in
#' the action.} \item{proportion}{The proportion of the source population
#' involved in the action.} \item{fromstage}{The lowest stage involved in the
#' action.} \item{tostage}{The highest stage involved in the action.}
#' \item{condition}{The condition under which the action will be performed.}
#' \item{thr1}{If \code{condition} is either \code{N<thr1} or
#' \code{N<thr1_and_N>thr2}, this is the abundance threshold \code{thr1}.}
#' \item{thr2}{If \code{condition} is either \code{N>thr2} or
#' \code{N<thr1_and_N>thr2}, this is the abundance threshold \code{thr2}.}
#' \item{unknownfield}{Unknown.} \item{linear.to}{If \code{condition} is
#' \code{linear}, this is the upper quantity (absolute number, or proportion,
#' depending on \code{quantity}) towards which linear change will move.}
#' \item{linear.lowerN}{If \code{condition} is \code{linear}, this is the
#' abundance at which the quantity affected is equal to \code{number} or
#' \code{proportion}, depending on the value of \code{quantity}.}
#' \item{linear.upperN}{If \code{condition} is \code{linear}, this is the
#' abundance at which the quantity affected is equal to \code{linear.to}.}
#' \item{N.comprises.stages}{Factor. The stages included in the definition of
#' N, when calculating \code{thr1}, \code{thr2}, \code{linear.lowerN} and
#' \code{linear.upperN}.} \item{N.comprises.pops}{Factor. The populations
#' included in the definition of N, when calculating \code{thr1}, \code{thr2},
#' \code{linear.lowerN} and \code{linear.upperN}.}
#' @export
actions <- function(mp) {
message("Extracting population management action info from file:\n", mp)
metapop <- check_mp(mp)
metapop <- metapop[-(1:6)]
mgmt.start <- grep('pop mgmnt', metapop)
n.actions <- as.numeric(gsub('\\D', '', metapop[mgmt.start]))
if(n.actions==0) stop(sprintf('No management actions in %s.', mp))
metapop <- metapop[(mgmt.start + 1):(mgmt.start + n.actions)]
metapop <- gsub('\\s+', ' ', metapop)
metapop <- as.data.frame(apply(do.call(rbind, strsplit(metapop, ' ')),
2, as.numeric))
colnames(metapop) <- c(
'do.action', 'action', 'sourcepop', 'targetpop', 'start', 'end', 'freq',
'after.dispersal', 'quantity', 'number', 'proportion', 'fromstage',
'tostage', 'condition', 'thr1', 'thr2', 'unknownfield', 'linear.to',
'linear.lowerN', 'linear.upperN', 'N.comprises.stages', 'N.comprises.pops')
metapop$do.action <- metapop$do.action == 1
metapop$action <- factor(metapop$action, 0:2, c('harvest', 'intro', 'translo'))
metapop$after.dispersal <- metapop$after.dispersal == 1
metapop$quantity <- factor(metapop$quantity, 0:1, c('number', 'proportion'))
metapop$number <- ifelse(metapop$quantity == 'proportion',
NA, metapop$quantity)
metapop$proportion <- ifelse(metapop$quantity == 'proportion',
metapop$proportion, NA)
metapop$condition <- factor(
metapop$condition, 0:4,
c('none', 'N<thr1', 'N>thr2', 'N<thr1_and_N>thr2', 'linear'))
metapop$thr1 <- ifelse(metapop$condition %in%
c('none', 'N>thr2', 'linear'), NA, metapop$thr1)
metapop$thr2 <- ifelse(metapop$condition %in%
c('none', 'N<thr1', 'linear'), NA, metapop$thr2)
metapop$linear.to <- ifelse(metapop$condition != 'linear',
NA, metapop$linear.to)
metapop$linear.lowerN <- ifelse(metapop$condition != 'linear',
NA, metapop$linear.lowerN)
metapop$linear.upperN <- ifelse(metapop$condition != 'linear',
NA, metapop$linear.upperN)
metapop$N.comprises.stages <- ifelse(metapop$condition == 'none',
NA, metapop$N.comprises.stages)
metapop$N.comprises.pops <- ifelse(metapop$condition == 'none',
NA, metapop$N.comprises.pops)
metapop$N.comprises.stages <- factor(
metapop$N.comprises.stages, 0:2,
c('each stage', 'all selected stages', 'all stages'))
metapop$N.comprises.pops <-
factor(metapop$N.comprises.stages, c(0, 2), c('each pop', 'all pops'))
metapop
}
|
\name{RLQ}
\alias{RLQ}
\title{coeficientes de localización interindustrial de Round}
\description{ Propuesta es la sugerida por Round (1978), simbolizada normalmente mediante la abreviatura RLQ. Su expresión es del siguiente modo: RLQ(ij) =SLQ(i)/log2[1+SLQ(j))].
}
\usage{
RLQ(a,b)
}
\arguments{
\item{a}{vector de valores añadidos de la región.}
\item{b}{vector de valores añadidos de la nacion.}
}
\references{Round, J. I. (1978): “An Inter-regional Input-Output Approach to the Evaluation of Non-survey Methods”, Journal of Regional Science, Vol. 18, nº 2, pp 179-194.
Parra, F. (2018), Técnicas de Análisis Input-Output con R, (https://wordpress.com/view/modelosinputoutput.wordpress.com)
}
\examples{
a=c(170,2227,403,821,4896,2484)
b=c(24019,129248,36320,63521,484087,216831)
RLQ(a,b)
}
| /man/RLQ.Rd | no_license | PacoParra/UtilMio | R | false | false | 847 | rd |
\name{RLQ}
\alias{RLQ}
\title{coeficientes de localización interindustrial de Round}
\description{ Propuesta es la sugerida por Round (1978), simbolizada normalmente mediante la abreviatura RLQ. Su expresión es del siguiente modo: RLQ(ij) =SLQ(i)/log2[1+SLQ(j))].
}
\usage{
RLQ(a,b)
}
\arguments{
\item{a}{vector de valores añadidos de la región.}
\item{b}{vector de valores añadidos de la nacion.}
}
\references{Round, J. I. (1978): “An Inter-regional Input-Output Approach to the Evaluation of Non-survey Methods”, Journal of Regional Science, Vol. 18, nº 2, pp 179-194.
Parra, F. (2018), Técnicas de Análisis Input-Output con R, (https://wordpress.com/view/modelosinputoutput.wordpress.com)
}
\examples{
a=c(170,2227,403,821,4896,2484)
b=c(24019,129248,36320,63521,484087,216831)
RLQ(a,b)
}
|
clx <- function(fm, dfcw, cluster)
{
library(sandwich)
library(lmtest)
M <- length(unique(cluster))
N <- length(cluster)
dfc <- (M/(M-1))*((N-1)/(N-fm$rank))
u <- apply(estfun(fm), 2,
function(x) tapply(x, cluster, sum))
vcovCL <- dfc * sandwich(fm, meat = crossprod(u)/N) * dfcw
coeftest(fm, vcovCL)
}
## ------------------------------------------------------------------------
library(PivotalR)
db.connect(port = 14526, dbname = "madlib")
dat <- lookat(db.data.frame("abalone"), "all")
fit <- lm(rings ~ length + diameter + height + shell, data = dat)
clx(fit, 1, dat$sex)
## ------------------------------------------------------------------------
git <- glm(rings < 10 ~ length + diameter + height + shell, data = dat, family = binomial)
summary(git)
clx(git, 1, dat$sex)
| /clustered_variance/test04.R | no_license | walkingsparrow/tests | R | false | false | 832 | r | clx <- function(fm, dfcw, cluster)
{
library(sandwich)
library(lmtest)
M <- length(unique(cluster))
N <- length(cluster)
dfc <- (M/(M-1))*((N-1)/(N-fm$rank))
u <- apply(estfun(fm), 2,
function(x) tapply(x, cluster, sum))
vcovCL <- dfc * sandwich(fm, meat = crossprod(u)/N) * dfcw
coeftest(fm, vcovCL)
}
## ------------------------------------------------------------------------
library(PivotalR)
db.connect(port = 14526, dbname = "madlib")
dat <- lookat(db.data.frame("abalone"), "all")
fit <- lm(rings ~ length + diameter + height + shell, data = dat)
clx(fit, 1, dat$sex)
## ------------------------------------------------------------------------
git <- glm(rings < 10 ~ length + diameter + height + shell, data = dat, family = binomial)
summary(git)
clx(git, 1, dat$sex)
|
# Tokenizing and Visualization --------------------------------------------
# Text, Characters, and Strings
library(tidyverse)
library(tidytext)
text <- c(
"So long and thanks for all the fish,",
"So sad that it should come to this,",
"We tried to warn you all but oh dear!"
)
text
str(text)
text_df <- data_frame(
line = 1:3,
text = text
)
text_df
# Tokenize
text_df %>%
unnest_tokens(word, text)
# Down the Rabbit Hole
library(gutenbergr)
tidy_carroll <- gutenberg_download(11) %>%
unnest_tokens(word, text)
tidy_carroll %>%
count(word) %>%
arrange(desc(n))
# Remove Stop Words
stop_words
tidy_carroll <- tidy_carroll %>%
anti_join(stop_words)
tidy_carroll %>%
count(word) %>%
arrange(desc(n))
# Visualize Word Frequencies
tidy_carroll %>%
count(word) %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(x = word, y = n)) +
geom_col()
tidy_carroll %>%
count(word) %>%
mutate(word = reorder(word, n)) %>%
filter(n > 30) %>%
ggplot(aes(x = word, y = n)) +
geom_col() +
coord_flip()
# Word Clouds
library(wordcloud)
tidy_carroll %>%
count(word) %>%
with(wordcloud(word, n, min.freq = 10))
# Exercise
tidy_carroll2 <- gutenberg_download(12) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words)
tidy_carroll2 %>%
count(word) %>%
mutate(word = reorder(word, n)) %>%
filter(n > 30) %>%
ggplot(aes(x = word, y = n)) +
geom_col() +
coord_flip()
tidy_carroll2 %>%
count(word) %>%
with(wordcloud(word, n, min.freq = 10))
# Sentiment Analysis ------------------------------------------------------
# Web Scraping
library(rvest)
text <- read_html(
"https://en.wikipedia.org/wiki/Columbus,_Ohio"
) %>%
html_nodes("#content") %>%
html_text() %>%
str_split("\\\n\\\n\\\n") %>%
unlist()
# Tokenize, Tidy, and Visualize
columbus_stop_words <- stop_words %>%
bind_rows(
data_frame(
word = c("retrieved", "edit"),
lexicon = rep("CUSTOM", 2)
)
)
tidy_text <- data_frame(text) %>%
mutate(section = row_number()) %>%
unnest_tokens(word, text) %>%
anti_join(columbus_stop_words)
tidy_text %>%
count(word) %>%
mutate(word = reorder(word, n)) %>%
filter(n > 40) %>%
ggplot(aes(x = word, y = n)) +
geom_col() +
coord_flip()
# Sentiment Dictionaries
get_sentiments("afinn")
get_sentiments("bing")
get_sentiments("nrc")
get_sentiments("nrc") %>%
count(sentiment)
# Sentiment Analysis
sentiment_nrc <- tidy_text %>%
inner_join(get_sentiments("nrc"))
sentiment_nrc %>%
count(sentiment) %>%
arrange(desc(n))
sentiment_nrc %>%
filter(sentiment == "joy") %>%
count(word) %>%
arrange(desc(n))
# Changing Sentiment
tidy_carroll <- gutenberg_download(11) %>%
mutate(line = row_number()) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words)
tidy_carroll %>%
inner_join(get_sentiments("bing")) %>%
count(index = line %/% 30, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(sentiment = positive - negative) %>%
ggplot(aes(x = index, y = sentiment)) +
geom_col()
# Exercise
tidy_carroll2 <- gutenberg_download(12) %>%
mutate(line = row_number()) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words)
tidy_carroll2 %>%
inner_join(get_sentiments("nrc")) %>%
count(sentiment) %>%
arrange(desc(n))
tidy_carroll2 %>%
inner_join(get_sentiments("bing")) %>%
count(index = line %/% 30, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(sentiment = positive - negative) %>%
ggplot(aes(x = index, y = sentiment)) +
geom_col()
# Topic Modeling ----------------------------------------------------------
# Word Frequencies
tidy_carroll <- gutenberg_download(c(11, 12)) %>%
unnest_tokens(word, text) %>%
mutate(
book = factor(
gutenberg_id,
labels = c(
"Alice's Adventures in Wonderland",
"Through the Looking-Glass"
)
)
) %>%
count(book, word) %>%
arrange(desc(n))
tidy_carroll
# Term Frequency-Inverse Document Frequency
tidy_carroll %>%
bind_tf_idf(word, book, n)
tidy_carroll <- tidy_carroll %>%
bind_tf_idf(word, book, n) %>%
arrange(desc(tf_idf))
tidy_carroll
# Visualize tf-idf by Document
tidy_carroll %>%
group_by(book) %>%
top_n(10, tf_idf) %>%
ungroup() %>%
mutate(word = reorder(word, tf_idf)) %>%
ggplot(aes(word, tf_idf, fill = book)) +
geom_col(show.legend = FALSE) +
facet_wrap(~ book, scales = "free") +
coord_flip()
# Create a Document Term Matrix
library(topicmodels)
roomba_650 <- read_csv("Roomba 650 Amazon Reviews.csv") %>%
mutate(review = row_number()) %>%
unnest_tokens(word, Review) %>%
anti_join(stop_words) %>%
select(review, word)
dtm_text <- roomba_650 %>%
count(review, word) %>%
cast_dtm(review, word, n)
# Run a Topic Model
lda_out <- dtm_text %>%
LDA(
k = 2,
method = "Gibbs",
control = list(seed = 42)
)
# Topic Word Probabilities
lda_topics <- lda_out %>%
tidy(matrix = "beta")
lda_topics
# Visualize, Name, and Choose K
lda_topics %>%
group_by(topic) %>%
top_n(15, beta) %>%
ungroup() %>%
mutate(term = reorder(term, beta)) %>%
ggplot(aes(term, beta, fill = as.factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
coord_flip()
# Exercise
lda_out <- vector("list", length = 6)
for (i in seq_along(lda_out)) {
# Run the topic model and save the output.
lda_out[[i]] <- dtm_text %>%
LDA(
k = i + 1,
method = "Gibbs",
control = list(seed = 42)
)
# Visualize.
lda_out[[i]] %>%
tidy(matrix = "beta") %>%
group_by(topic) %>%
top_n(15, beta) %>%
ungroup() %>%
mutate(term = reorder(term, beta)) %>%
ggplot(aes(term, beta, fill = as.factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
coord_flip()
}
| /A Tidy Approach to Text Analysis in R.R | no_license | plear/tidy-text-analysis | R | false | false | 5,875 | r | # Tokenizing and Visualization --------------------------------------------
# Text, Characters, and Strings
library(tidyverse)
library(tidytext)
text <- c(
"So long and thanks for all the fish,",
"So sad that it should come to this,",
"We tried to warn you all but oh dear!"
)
text
str(text)
text_df <- data_frame(
line = 1:3,
text = text
)
text_df
# Tokenize
text_df %>%
unnest_tokens(word, text)
# Down the Rabbit Hole
library(gutenbergr)
tidy_carroll <- gutenberg_download(11) %>%
unnest_tokens(word, text)
tidy_carroll %>%
count(word) %>%
arrange(desc(n))
# Remove Stop Words
stop_words
tidy_carroll <- tidy_carroll %>%
anti_join(stop_words)
tidy_carroll %>%
count(word) %>%
arrange(desc(n))
# Visualize Word Frequencies
tidy_carroll %>%
count(word) %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(x = word, y = n)) +
geom_col()
tidy_carroll %>%
count(word) %>%
mutate(word = reorder(word, n)) %>%
filter(n > 30) %>%
ggplot(aes(x = word, y = n)) +
geom_col() +
coord_flip()
# Word Clouds
library(wordcloud)
tidy_carroll %>%
count(word) %>%
with(wordcloud(word, n, min.freq = 10))
# Exercise
tidy_carroll2 <- gutenberg_download(12) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words)
tidy_carroll2 %>%
count(word) %>%
mutate(word = reorder(word, n)) %>%
filter(n > 30) %>%
ggplot(aes(x = word, y = n)) +
geom_col() +
coord_flip()
tidy_carroll2 %>%
count(word) %>%
with(wordcloud(word, n, min.freq = 10))
# Sentiment Analysis ------------------------------------------------------
# Web Scraping
library(rvest)
text <- read_html(
"https://en.wikipedia.org/wiki/Columbus,_Ohio"
) %>%
html_nodes("#content") %>%
html_text() %>%
str_split("\\\n\\\n\\\n") %>%
unlist()
# Tokenize, Tidy, and Visualize
columbus_stop_words <- stop_words %>%
bind_rows(
data_frame(
word = c("retrieved", "edit"),
lexicon = rep("CUSTOM", 2)
)
)
tidy_text <- data_frame(text) %>%
mutate(section = row_number()) %>%
unnest_tokens(word, text) %>%
anti_join(columbus_stop_words)
tidy_text %>%
count(word) %>%
mutate(word = reorder(word, n)) %>%
filter(n > 40) %>%
ggplot(aes(x = word, y = n)) +
geom_col() +
coord_flip()
# Sentiment Dictionaries
get_sentiments("afinn")
get_sentiments("bing")
get_sentiments("nrc")
get_sentiments("nrc") %>%
count(sentiment)
# Sentiment Analysis
sentiment_nrc <- tidy_text %>%
inner_join(get_sentiments("nrc"))
sentiment_nrc %>%
count(sentiment) %>%
arrange(desc(n))
sentiment_nrc %>%
filter(sentiment == "joy") %>%
count(word) %>%
arrange(desc(n))
# Changing Sentiment
tidy_carroll <- gutenberg_download(11) %>%
mutate(line = row_number()) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words)
tidy_carroll %>%
inner_join(get_sentiments("bing")) %>%
count(index = line %/% 30, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(sentiment = positive - negative) %>%
ggplot(aes(x = index, y = sentiment)) +
geom_col()
# Exercise
tidy_carroll2 <- gutenberg_download(12) %>%
mutate(line = row_number()) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words)
tidy_carroll2 %>%
inner_join(get_sentiments("nrc")) %>%
count(sentiment) %>%
arrange(desc(n))
tidy_carroll2 %>%
inner_join(get_sentiments("bing")) %>%
count(index = line %/% 30, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(sentiment = positive - negative) %>%
ggplot(aes(x = index, y = sentiment)) +
geom_col()
# Topic Modeling ----------------------------------------------------------
# Word Frequencies
tidy_carroll <- gutenberg_download(c(11, 12)) %>%
unnest_tokens(word, text) %>%
mutate(
book = factor(
gutenberg_id,
labels = c(
"Alice's Adventures in Wonderland",
"Through the Looking-Glass"
)
)
) %>%
count(book, word) %>%
arrange(desc(n))
tidy_carroll
# Term Frequency-Inverse Document Frequency
tidy_carroll %>%
bind_tf_idf(word, book, n)
tidy_carroll <- tidy_carroll %>%
bind_tf_idf(word, book, n) %>%
arrange(desc(tf_idf))
tidy_carroll
# Visualize tf-idf by Document
tidy_carroll %>%
group_by(book) %>%
top_n(10, tf_idf) %>%
ungroup() %>%
mutate(word = reorder(word, tf_idf)) %>%
ggplot(aes(word, tf_idf, fill = book)) +
geom_col(show.legend = FALSE) +
facet_wrap(~ book, scales = "free") +
coord_flip()
# Create a Document Term Matrix
library(topicmodels)
roomba_650 <- read_csv("Roomba 650 Amazon Reviews.csv") %>%
mutate(review = row_number()) %>%
unnest_tokens(word, Review) %>%
anti_join(stop_words) %>%
select(review, word)
dtm_text <- roomba_650 %>%
count(review, word) %>%
cast_dtm(review, word, n)
# Run a Topic Model
lda_out <- dtm_text %>%
LDA(
k = 2,
method = "Gibbs",
control = list(seed = 42)
)
# Topic Word Probabilities
lda_topics <- lda_out %>%
tidy(matrix = "beta")
lda_topics
# Visualize, Name, and Choose K
lda_topics %>%
group_by(topic) %>%
top_n(15, beta) %>%
ungroup() %>%
mutate(term = reorder(term, beta)) %>%
ggplot(aes(term, beta, fill = as.factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
coord_flip()
# Exercise
lda_out <- vector("list", length = 6)
for (i in seq_along(lda_out)) {
# Run the topic model and save the output.
lda_out[[i]] <- dtm_text %>%
LDA(
k = i + 1,
method = "Gibbs",
control = list(seed = 42)
)
# Visualize.
lda_out[[i]] %>%
tidy(matrix = "beta") %>%
group_by(topic) %>%
top_n(15, beta) %>%
ungroup() %>%
mutate(term = reorder(term, beta)) %>%
ggplot(aes(term, beta, fill = as.factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
coord_flip()
}
|
library(stringr)
library(data.table)
input <- fread("day02_input.txt", header = F)
count <-0
for(row in 1:nrow(input)) {
times <- unlist(str_split(input[row]$V1, "-"))
char <- str_sub(input[row]$V2, 1, -2)
pwd <- input[row]$V3
num <- str_count(pwd,char)
min <- as.integer(times[1])
max <- as.integer(times[2])
if(min <= num && num <= max) count <- count + 1
}
count
| /day02/day02.R | no_license | marcmace/AdventofCode2020 | R | false | false | 413 | r | library(stringr)
library(data.table)
input <- fread("day02_input.txt", header = F)
count <-0
for(row in 1:nrow(input)) {
times <- unlist(str_split(input[row]$V1, "-"))
char <- str_sub(input[row]$V2, 1, -2)
pwd <- input[row]$V3
num <- str_count(pwd,char)
min <- as.integer(times[1])
max <- as.integer(times[2])
if(min <= num && num <= max) count <- count + 1
}
count
|
# Test multiple RDBMS
dt_sp <- options("datatable.showProgress")
options("datatable.showProgress" = FALSE)
# RDBMS batch tests -------------------------------------------------------
# +- setup ----------------------------------------------------------------
tsqlite <- tempfile()
dbs <- list(
"MySQL via RMariaDB" = list(
conn = try(silent = TRUE, DBI::dbConnect(
RMariaDB::MariaDB(),
username = "travis",
dbname = "travis_ci_test",
host = "localhost"
)),
ctor = AppenderDbi
),
"MySQL via RMySQL" = list(
conn = try(silent = TRUE, DBI::dbConnect(
RMySQL::MySQL(),
username = "travis",
dbname = "travis_ci_test",
host = "localhost"
)),
ctor = AppenderDbi
),
"PostgreSQL via RPostgreSQL" = list(
conn = try(silent = TRUE, DBI::dbConnect(
RPostgreSQL::PostgreSQL(),
user = "postgres",
host = "localhost",
dbname = "travis_ci_test"
)),
ctor = AppenderDbi
),
"PostgreSQL via RPostgres" = list(
conn = try(silent = TRUE, DBI::dbConnect(
RPostgres::Postgres(),
user = "postgres",
host = "localhost",
dbname = "travis_ci_test"
)),
ctor = AppenderDbi
),
"DB2 via RJDBC" = list(
conn = try(silent = TRUE, dataSTAT::dbConnectDB2("RTEST", "rtest", "rtest")),
ctor = AppenderRjdbc
),
"SQLite via RSQLite" = list(
conn = DBI::dbConnect(RSQLite::SQLite(), database = tsqlite),
ctor = AppenderDbi
)
)
options("datatable.showProgress" = dt_sp)
nm <- "SQLite via RSQLite" # for manual testing, can be deleted
nm <- "DB2 via RJDBC" # for manual testing, can be deleted
# +- tests -------------------------------------------------------------------
for (nm in names(dbs)){
conn <- dbs[[nm]]$conn
ctor <- dbs[[nm]]$ctor
title <- paste(ctor$classname, "/", nm)
context(title)
if (inherits(conn, "try-error")) {
test_that(title, {trimws(strwrap(skip(conn)))})
next
}
# setup test environment
tname <- "logging_test"
suppressMessages(
app <- ctor$new(
conn = conn,
table = tname,
close_on_exit = FALSE, # we are closing manually and dont want warnings
buffer_size = 0L
)
)
e <- LogEvent$new(
lgr, level = 600L, msg = "ohno", caller = "nope()", timestamp = Sys.time()
)
test_that(paste0(nm, ": round trip event inserts"), {
expect_silent(app$append(e))
expect_silent(app$append(e))
tres <- app$data
eres <- rbind(
as.data.frame(e, stringsAsFactors = FALSE),
as.data.frame(e, stringsAsFactors = FALSE)
)
expect_equal(tres[, -2], eres[, -2])
# small tolerance is allowed for timestamps
tdiff <- as.numeric(tres[, 2]) - as.numeric(eres[, 2])
expect_true(all(tdiff < 1), info = tdiff)
expect_true(all(format(tres$timestamp) == format(e$timestamp, usetz = FALSE)))
})
test_that(paste0(nm, ": col order does not impact inserts"), {
for (i in 1:20){
app$layout$set_col_types(sample(app$layout$col_types))
expect_silent(app$append(e))
}
expect_true(all(vapply(app$data$timestamp, all_are_identical, logical(1))))
expect_true(all(format(app$data$timestamp) == format(e$timestamp)))
})
test_that(paste0(nm, ": querying / displaying logs works"), {
expect_output(app$show(n = 5), paste(rep("TRACE.*", 5), collapse = "") )
expect_output(expect_identical(nrow(app$show(n = 1)), 1L), "TRACE")
expect_output(expect_identical(show_log(target = app), app$show()))
expect_identical(
capture.output(show_log(target = app)),
capture.output(app$show())
)
})
# custom fields
test_that(paste0(nm, ": Creating tables with custom fields works"), {
try(DBI::dbRemoveTable(conn, "logging_test_create"), silent = TRUE)
lg <- Logger$new(
"test_dbi",
threshold = "trace",
propagate = FALSE,
exception_handler = function (...) stop(...)
)
if (ctor$classname == "AppenderRjdbc"){
lo <- LayoutRjdbc$new(
col_types = c(
level = "smallint",
timestamp = "timestamp",
logger= "varchar(512)",
msg = "varchar(1024)",
caller = "varchar(1024)",
foo = "varchar(256)"
)
)
} else {
lo <- LayoutSqlite$new(
col_types = c(
level = "INTEGER",
timestamp = "TEXT",
logger= "TEXT",
msg = "TEXT",
caller = "TEXT",
foo = "TEXT"
)
)
}
expect_message(
lg$add_appender(
ctor$new(
conn = conn,
table = "logging_test_create",
layout = lo,
close_on_exit = FALSE
), "db"
),
"Creating"
)
lg$fatal("test", foo = "bar")
expect_false(is.na(lg$appenders$db$data$foo[[1]]))
lg$fatal("test")
expect_true(is.na(lg$appenders$db$data$foo[[2]]))
lg$remove_appender("db")
})
test_that(paste0(nm, ": Log to all fields that are already present in table by default"), {
lg <- Logger$new(
"test_dbi",
threshold = "trace",
propagate = FALSE,
exception_handler = function (...) stop(...)
)
lg$set_appenders(list(db =
ctor$new(
conn = conn,
table = "logging_test_create",
close_on_exit = FALSE
))
)
lg$fatal("test2", foo = "baz", blubb = "blah")
expect_identical(tail(lg$appenders$db$data, 1)$foo, "baz")
try(DBI::dbRemoveTable(conn, "logging_test_create"), silent = TRUE)
})
test_that(paste0(nm, ": Buffered inserts work"), {
lg <- Logger$new(
"test_dbi",
threshold = "trace",
propagate = FALSE,
exception_handler = function (...) stop(...)
)
lg$set_appenders(list(db =
ctor$new(
conn = conn,
table = "logging_test_buffer",
close_on_exit = FALSE,
buffer_size = 10
))
)
replicate(10, lg$info("buffered_insert", foo = "baz", blubb = "blah"))
expect_length(lg$appenders$db$buffer_events, 10)
expect_true(
is.null(lg$appenders$db$data) ||
identical(nrow(lg$appenders$db$data), 0L)
)
lg$info("test")
expect_length(lg$appenders$db$buffer_events, 0)
expect_identical(nrow(lg$appenders$db$data), 11L)
# cleanup
expect_true(
x <- tryCatch({
r <- DBI::dbRemoveTable(conn, lg$appenders$db$layout$format_table_name("LOGGING_TEST_BUFFER"))
if (!length(r)) TRUE else r # for RJDBC
},
error = function(e) FALSE # for RJDBC
)
)
})
test_that(paste0(nm, ": SQL is sanitzed"), {
msg <- ";*/; \"' /* blubb;"
e <- LogEvent$new(
lgr, level = 600L, msg = msg, caller = "nope()", timestamp = Sys.time()
)
app$append(e)
res <- app$data$msg
expect_identical(res[length(res)], msg)
})
test_that(paste0(nm, ": cleanup behaves as expected"), {
expect_true(
DBI::dbExistsTable(conn, tname) ||
DBI::dbExistsTable(conn, toupper(tname))
)
expect_silent({
DBI::dbRemoveTable(conn, tname)
expect_false(DBI::dbExistsTable(conn, tname))
DBI::dbDisconnect(conn)
})
})
}
# +- teardown looped tests ---------------------------------------------------
unlink(tsqlite)
# SQLite extra tests ------------------------------------------------------
context("AppenderDbi / SQLite: Extra Tests")
test_that("AppenderDbi / RSQLite: manual field types work", {
if (!requireNamespace("RSQLite", quietly = TRUE))
skip("Test requires RSQLite")
# setup test environment
tdb <- tempfile()
tname <- "LOGGING_TEST"
expect_message(
app <- AppenderDbi$new(
conn = DBI::dbConnect(RSQLite::SQLite(), tdb),
layout = LayoutSqlite$new(col_types = c(
level = "INTEGER",
timestamp = "TEXT",
caller = "TEXT",
msg = "TEXT"
)),
table = tname
),
"column types"
)
e <- LogEvent$new(lgr, level = 600, msg = "ohno", caller = "nope()", timestamp = Sys.time())
# do a few inserts
for (i in 1:10){
app$layout$set_col_types(sample(app$layout$col_types))
expect_silent(app$append(e))
}
# verify correct data types (sqlite doesnt have that many)
t <- DBI::dbGetQuery(app$conn, sprintf("PRAGMA table_info(%s)", tname))
expect_true(t[t$name == "level", ]$type == "INTEGER")
expect_true(all(vapply(app$data$timestamp, all_are_identical, logical(1))))
expect_true(all(format(app$data$timestamp) == format(e$timestamp)))
# cleanup
rm(app)
gc()
unlink(tdb)
})
test_that("displaying logs works for Loggers", {
if (!requireNamespace("RSQLite", quietly = TRUE))
skip("Test requires RSQLite")
# Setup test environment
conn <- DBI::dbConnect(RSQLite::SQLite(), ":memory:")
tname <- "LOGGING_TEST"
expect_message(
lg <- Logger$new(
"test_dbi",
threshold = "trace",
appenders = list(db = AppenderDbi$new(
conn = conn,
table = tname,
close_on_exit = FALSE,
buffer_size = 0
)),
propagate = FALSE
),
"manual"
)
lg$fatal("blubb")
lg$trace("blah")
expect_output(lg$appenders$db$show(), "FATAL.*TRACE")
expect_output(
expect_identical(nrow(lg$appenders$db$show(n = 1)), 1L),
"TRACE"
)
expect_identical(nrow(lg$appenders$db$data), 2L)
expect_output(
expect_identical(
show_log(target = lg),
lg$appenders$db$show()
)
)
expect_silent(DBI::dbDisconnect(conn))
})
test_that("Automatic closing of connections works", {
if (!requireNamespace("RSQLite", quietly = TRUE))
skip("Test requires RSQLite")
# setup test environment
conn <- DBI::dbConnect(RSQLite::SQLite(), ":memory:")
tname <- "LOGGING_TEST"
# With close_on_exit
lg <- Logger$new(
"test_dbi",
threshold = "trace",
appenders = list(db = AppenderDbi$new(conn = conn, table = tname, close_on_exit = TRUE))
)
rm(lg)
gc()
expect_warning(DBI::dbDisconnect(conn), "Already disconnected")
# Without close_on_exit
conn <- DBI::dbConnect(RSQLite::SQLite(), ":memory:")
tname <- "LOGGING_TEST"
lg <- Logger$new(
"test_dbi",
threshold = "trace",
appenders = list(db = AppenderDbi$new(conn = conn, table = tname, close_on_exit = FALSE))
)
rm(lg)
gc()
expect_silent(DBI::dbDisconnect(conn))
})
| /data/genthat_extracted_code/lgr/tests/test_AppenderDbi.R | no_license | surayaaramli/typeRrh | R | false | false | 10,303 | r | # Test multiple RDBMS
dt_sp <- options("datatable.showProgress")
options("datatable.showProgress" = FALSE)
# RDBMS batch tests -------------------------------------------------------
# +- setup ----------------------------------------------------------------
tsqlite <- tempfile()
dbs <- list(
"MySQL via RMariaDB" = list(
conn = try(silent = TRUE, DBI::dbConnect(
RMariaDB::MariaDB(),
username = "travis",
dbname = "travis_ci_test",
host = "localhost"
)),
ctor = AppenderDbi
),
"MySQL via RMySQL" = list(
conn = try(silent = TRUE, DBI::dbConnect(
RMySQL::MySQL(),
username = "travis",
dbname = "travis_ci_test",
host = "localhost"
)),
ctor = AppenderDbi
),
"PostgreSQL via RPostgreSQL" = list(
conn = try(silent = TRUE, DBI::dbConnect(
RPostgreSQL::PostgreSQL(),
user = "postgres",
host = "localhost",
dbname = "travis_ci_test"
)),
ctor = AppenderDbi
),
"PostgreSQL via RPostgres" = list(
conn = try(silent = TRUE, DBI::dbConnect(
RPostgres::Postgres(),
user = "postgres",
host = "localhost",
dbname = "travis_ci_test"
)),
ctor = AppenderDbi
),
"DB2 via RJDBC" = list(
conn = try(silent = TRUE, dataSTAT::dbConnectDB2("RTEST", "rtest", "rtest")),
ctor = AppenderRjdbc
),
"SQLite via RSQLite" = list(
conn = DBI::dbConnect(RSQLite::SQLite(), database = tsqlite),
ctor = AppenderDbi
)
)
options("datatable.showProgress" = dt_sp)
nm <- "SQLite via RSQLite" # for manual testing, can be deleted
nm <- "DB2 via RJDBC" # for manual testing, can be deleted
# +- tests -------------------------------------------------------------------
for (nm in names(dbs)){
conn <- dbs[[nm]]$conn
ctor <- dbs[[nm]]$ctor
title <- paste(ctor$classname, "/", nm)
context(title)
if (inherits(conn, "try-error")) {
test_that(title, {trimws(strwrap(skip(conn)))})
next
}
# setup test environment
tname <- "logging_test"
suppressMessages(
app <- ctor$new(
conn = conn,
table = tname,
close_on_exit = FALSE, # we are closing manually and dont want warnings
buffer_size = 0L
)
)
e <- LogEvent$new(
lgr, level = 600L, msg = "ohno", caller = "nope()", timestamp = Sys.time()
)
test_that(paste0(nm, ": round trip event inserts"), {
expect_silent(app$append(e))
expect_silent(app$append(e))
tres <- app$data
eres <- rbind(
as.data.frame(e, stringsAsFactors = FALSE),
as.data.frame(e, stringsAsFactors = FALSE)
)
expect_equal(tres[, -2], eres[, -2])
# small tolerance is allowed for timestamps
tdiff <- as.numeric(tres[, 2]) - as.numeric(eres[, 2])
expect_true(all(tdiff < 1), info = tdiff)
expect_true(all(format(tres$timestamp) == format(e$timestamp, usetz = FALSE)))
})
test_that(paste0(nm, ": col order does not impact inserts"), {
for (i in 1:20){
app$layout$set_col_types(sample(app$layout$col_types))
expect_silent(app$append(e))
}
expect_true(all(vapply(app$data$timestamp, all_are_identical, logical(1))))
expect_true(all(format(app$data$timestamp) == format(e$timestamp)))
})
test_that(paste0(nm, ": querying / displaying logs works"), {
expect_output(app$show(n = 5), paste(rep("TRACE.*", 5), collapse = "") )
expect_output(expect_identical(nrow(app$show(n = 1)), 1L), "TRACE")
expect_output(expect_identical(show_log(target = app), app$show()))
expect_identical(
capture.output(show_log(target = app)),
capture.output(app$show())
)
})
# custom fields
test_that(paste0(nm, ": Creating tables with custom fields works"), {
try(DBI::dbRemoveTable(conn, "logging_test_create"), silent = TRUE)
lg <- Logger$new(
"test_dbi",
threshold = "trace",
propagate = FALSE,
exception_handler = function (...) stop(...)
)
if (ctor$classname == "AppenderRjdbc"){
lo <- LayoutRjdbc$new(
col_types = c(
level = "smallint",
timestamp = "timestamp",
logger= "varchar(512)",
msg = "varchar(1024)",
caller = "varchar(1024)",
foo = "varchar(256)"
)
)
} else {
lo <- LayoutSqlite$new(
col_types = c(
level = "INTEGER",
timestamp = "TEXT",
logger= "TEXT",
msg = "TEXT",
caller = "TEXT",
foo = "TEXT"
)
)
}
expect_message(
lg$add_appender(
ctor$new(
conn = conn,
table = "logging_test_create",
layout = lo,
close_on_exit = FALSE
), "db"
),
"Creating"
)
lg$fatal("test", foo = "bar")
expect_false(is.na(lg$appenders$db$data$foo[[1]]))
lg$fatal("test")
expect_true(is.na(lg$appenders$db$data$foo[[2]]))
lg$remove_appender("db")
})
test_that(paste0(nm, ": Log to all fields that are already present in table by default"), {
lg <- Logger$new(
"test_dbi",
threshold = "trace",
propagate = FALSE,
exception_handler = function (...) stop(...)
)
lg$set_appenders(list(db =
ctor$new(
conn = conn,
table = "logging_test_create",
close_on_exit = FALSE
))
)
lg$fatal("test2", foo = "baz", blubb = "blah")
expect_identical(tail(lg$appenders$db$data, 1)$foo, "baz")
try(DBI::dbRemoveTable(conn, "logging_test_create"), silent = TRUE)
})
test_that(paste0(nm, ": Buffered inserts work"), {
lg <- Logger$new(
"test_dbi",
threshold = "trace",
propagate = FALSE,
exception_handler = function (...) stop(...)
)
lg$set_appenders(list(db =
ctor$new(
conn = conn,
table = "logging_test_buffer",
close_on_exit = FALSE,
buffer_size = 10
))
)
replicate(10, lg$info("buffered_insert", foo = "baz", blubb = "blah"))
expect_length(lg$appenders$db$buffer_events, 10)
expect_true(
is.null(lg$appenders$db$data) ||
identical(nrow(lg$appenders$db$data), 0L)
)
lg$info("test")
expect_length(lg$appenders$db$buffer_events, 0)
expect_identical(nrow(lg$appenders$db$data), 11L)
# cleanup
expect_true(
x <- tryCatch({
r <- DBI::dbRemoveTable(conn, lg$appenders$db$layout$format_table_name("LOGGING_TEST_BUFFER"))
if (!length(r)) TRUE else r # for RJDBC
},
error = function(e) FALSE # for RJDBC
)
)
})
test_that(paste0(nm, ": SQL is sanitzed"), {
msg <- ";*/; \"' /* blubb;"
e <- LogEvent$new(
lgr, level = 600L, msg = msg, caller = "nope()", timestamp = Sys.time()
)
app$append(e)
res <- app$data$msg
expect_identical(res[length(res)], msg)
})
test_that(paste0(nm, ": cleanup behaves as expected"), {
expect_true(
DBI::dbExistsTable(conn, tname) ||
DBI::dbExistsTable(conn, toupper(tname))
)
expect_silent({
DBI::dbRemoveTable(conn, tname)
expect_false(DBI::dbExistsTable(conn, tname))
DBI::dbDisconnect(conn)
})
})
}
# +- teardown looped tests ---------------------------------------------------
unlink(tsqlite)
# SQLite extra tests ------------------------------------------------------
context("AppenderDbi / SQLite: Extra Tests")
test_that("AppenderDbi / RSQLite: manual field types work", {
if (!requireNamespace("RSQLite", quietly = TRUE))
skip("Test requires RSQLite")
# setup test environment
tdb <- tempfile()
tname <- "LOGGING_TEST"
expect_message(
app <- AppenderDbi$new(
conn = DBI::dbConnect(RSQLite::SQLite(), tdb),
layout = LayoutSqlite$new(col_types = c(
level = "INTEGER",
timestamp = "TEXT",
caller = "TEXT",
msg = "TEXT"
)),
table = tname
),
"column types"
)
e <- LogEvent$new(lgr, level = 600, msg = "ohno", caller = "nope()", timestamp = Sys.time())
# do a few inserts
for (i in 1:10){
app$layout$set_col_types(sample(app$layout$col_types))
expect_silent(app$append(e))
}
# verify correct data types (sqlite doesnt have that many)
t <- DBI::dbGetQuery(app$conn, sprintf("PRAGMA table_info(%s)", tname))
expect_true(t[t$name == "level", ]$type == "INTEGER")
expect_true(all(vapply(app$data$timestamp, all_are_identical, logical(1))))
expect_true(all(format(app$data$timestamp) == format(e$timestamp)))
# cleanup
rm(app)
gc()
unlink(tdb)
})
test_that("displaying logs works for Loggers", {
if (!requireNamespace("RSQLite", quietly = TRUE))
skip("Test requires RSQLite")
# Setup test environment
conn <- DBI::dbConnect(RSQLite::SQLite(), ":memory:")
tname <- "LOGGING_TEST"
expect_message(
lg <- Logger$new(
"test_dbi",
threshold = "trace",
appenders = list(db = AppenderDbi$new(
conn = conn,
table = tname,
close_on_exit = FALSE,
buffer_size = 0
)),
propagate = FALSE
),
"manual"
)
lg$fatal("blubb")
lg$trace("blah")
expect_output(lg$appenders$db$show(), "FATAL.*TRACE")
expect_output(
expect_identical(nrow(lg$appenders$db$show(n = 1)), 1L),
"TRACE"
)
expect_identical(nrow(lg$appenders$db$data), 2L)
expect_output(
expect_identical(
show_log(target = lg),
lg$appenders$db$show()
)
)
expect_silent(DBI::dbDisconnect(conn))
})
test_that("Automatic closing of connections works", {
if (!requireNamespace("RSQLite", quietly = TRUE))
skip("Test requires RSQLite")
# setup test environment
conn <- DBI::dbConnect(RSQLite::SQLite(), ":memory:")
tname <- "LOGGING_TEST"
# With close_on_exit
lg <- Logger$new(
"test_dbi",
threshold = "trace",
appenders = list(db = AppenderDbi$new(conn = conn, table = tname, close_on_exit = TRUE))
)
rm(lg)
gc()
expect_warning(DBI::dbDisconnect(conn), "Already disconnected")
# Without close_on_exit
conn <- DBI::dbConnect(RSQLite::SQLite(), ":memory:")
tname <- "LOGGING_TEST"
lg <- Logger$new(
"test_dbi",
threshold = "trace",
appenders = list(db = AppenderDbi$new(conn = conn, table = tname, close_on_exit = FALSE))
)
rm(lg)
gc()
expect_silent(DBI::dbDisconnect(conn))
})
|
subset_data<-function(fulldata, start_date, end_date){
fulldata$Date <- as.Date(fulldata$Date, format="%d/%m/%Y")
twodaydata = subset(fulldata, as.Date(Date) >= start_date &
as.Date(Date) <= end_date)
twodaydata
}
plot1<-function(file_name){
data <- read.table("household_power_consumption.txt",sep=';',header=TRUE,na='?',colClasses=c("character","character","numeric"));
twodaydata = subset_data(data,"2007-02-01","2007-02-02")
png(filename = file_name,width = 480,height = 480,units = "px",bg = 'white')
hist(twodaydata$Global_active_power, main = "Global Active Power",
xlab = "Global Active Power (kilowatts)", ylab = "Frequency", col = "red")
dev.off()
}
| /plot1.R | no_license | polikepati/ExData_Plotting1 | R | false | false | 735 | r | subset_data<-function(fulldata, start_date, end_date){
fulldata$Date <- as.Date(fulldata$Date, format="%d/%m/%Y")
twodaydata = subset(fulldata, as.Date(Date) >= start_date &
as.Date(Date) <= end_date)
twodaydata
}
plot1<-function(file_name){
data <- read.table("household_power_consumption.txt",sep=';',header=TRUE,na='?',colClasses=c("character","character","numeric"));
twodaydata = subset_data(data,"2007-02-01","2007-02-02")
png(filename = file_name,width = 480,height = 480,units = "px",bg = 'white')
hist(twodaydata$Global_active_power, main = "Global Active Power",
xlab = "Global Active Power (kilowatts)", ylab = "Frequency", col = "red")
dev.off()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggyearday.R
\name{ggyearday}
\alias{ggyearday}
\title{ggplot2 heatmap for diurnal-yearly time series}
\usage{
ggyearday(
data,
time,
z,
date_breaks = "1 month",
date_labels = "\%b",
ybreaks = seq(6, 18, 6),
ylabels = format_sprintf("\%02d:00"),
fill_scale = scale_fill_viridis_c(direction = -1, na.value = NA, option = "A"),
...
)
}
\arguments{
\item{data}{a data.frame or tibble with input data (containing a POSIXct variable as time parameter).}
\item{time}{symbol giving time column}
\item{z}{symbol giving z column used as fill}
\item{date_breaks}{character string as input for \code{\link[ggplot2:scale_x_date]{ggplot2::scale_x_date()}},
e.g. '1 month', defines date breaks on x-axis.}
\item{date_labels}{character string as input for \code{\link[ggplot2:scale_x_date]{ggplot2::scale_x_date()}},
formatter for date labels on x-axis.}
\item{ybreaks}{numeric vector, specifies y-axis breaks.}
\item{ylabels}{function, format function for y-axis labels.}
\item{fill_scale}{ggplot2 continuous fill scale, e.g. \code{\link[=scale_fill_gradient]{scale_fill_gradient()}}.}
\item{...}{other arguments passed on to \code{\link[ggplot2:geom_raster]{ggplot2::geom_raster()}}.}
}
\value{
ggplot
}
\description{
creates a heatmap with date on x-axis and time of day on y-axis; z values as fill scale.
}
\examples{
library(ggplot2)
fn <- rOstluft.data::f("Zch_Stampfenbachstrasse_2010-2014.csv")
# only 4 years for smaller plot size in examples
df <-
rOstluft::read_airmo_csv(fn) \%>\%
dplyr::filter(starttime < lubridate::ymd(20140101)) \%>\%
rOstluft::rolf_to_openair()
ggyearday(df, time = "date", z = "O3")
# data with outliers / extreme values => not very informative...
ggyearday(df, time = date, z = PM10)
# ...use a custom scale and squish the outliers / extreme values
fill_scale <- scale_fill_viridis_squished(breaks=c(0, 25, 50, 75), limits = c(0, 75),
direction = -1, na.value = NA, option = "A")
ggyearday(df, time = date, z = PM10, fill_scale = fill_scale)
}
| /man/ggyearday.Rd | permissive | Ostluft/rOstluft.plot | R | false | true | 2,123 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggyearday.R
\name{ggyearday}
\alias{ggyearday}
\title{ggplot2 heatmap for diurnal-yearly time series}
\usage{
ggyearday(
data,
time,
z,
date_breaks = "1 month",
date_labels = "\%b",
ybreaks = seq(6, 18, 6),
ylabels = format_sprintf("\%02d:00"),
fill_scale = scale_fill_viridis_c(direction = -1, na.value = NA, option = "A"),
...
)
}
\arguments{
\item{data}{a data.frame or tibble with input data (containing a POSIXct variable as time parameter).}
\item{time}{symbol giving time column}
\item{z}{symbol giving z column used as fill}
\item{date_breaks}{character string as input for \code{\link[ggplot2:scale_x_date]{ggplot2::scale_x_date()}},
e.g. '1 month', defines date breaks on x-axis.}
\item{date_labels}{character string as input for \code{\link[ggplot2:scale_x_date]{ggplot2::scale_x_date()}},
formatter for date labels on x-axis.}
\item{ybreaks}{numeric vector, specifies y-axis breaks.}
\item{ylabels}{function, format function for y-axis labels.}
\item{fill_scale}{ggplot2 continuous fill scale, e.g. \code{\link[=scale_fill_gradient]{scale_fill_gradient()}}.}
\item{...}{other arguments passed on to \code{\link[ggplot2:geom_raster]{ggplot2::geom_raster()}}.}
}
\value{
ggplot
}
\description{
creates a heatmap with date on x-axis and time of day on y-axis; z values as fill scale.
}
\examples{
library(ggplot2)
fn <- rOstluft.data::f("Zch_Stampfenbachstrasse_2010-2014.csv")
# only 4 years for smaller plot size in examples
df <-
rOstluft::read_airmo_csv(fn) \%>\%
dplyr::filter(starttime < lubridate::ymd(20140101)) \%>\%
rOstluft::rolf_to_openair()
ggyearday(df, time = "date", z = "O3")
# data with outliers / extreme values => not very informative...
ggyearday(df, time = date, z = PM10)
# ...use a custom scale and squish the outliers / extreme values
fill_scale <- scale_fill_viridis_squished(breaks=c(0, 25, 50, 75), limits = c(0, 75),
direction = -1, na.value = NA, option = "A")
ggyearday(df, time = date, z = PM10, fill_scale = fill_scale)
}
|
## cladophora
## 1,230 e–0.55 * Depth
#max_depth = 1,230 e–0.55 * Depth
library(tidyverse)
library(tidyr)
library(sm)
library(lubridate) # work with dates
library(dplyr) # data manipulation (filter, summarize, mutate)
library(ggplot2) # graphics
library(gridExtra) # tile several plots next to each other
library(scales)
library(data.table)
library(mgcv)
depth <- read.csv("input_data/Depth_2_Higgins_etal_2005.csv")
depth <- na.omit(depth)
depth
## convert depth to cm and biomass to % and presence/absence for glm
depth <- depth %>%
mutate(depth_cm = depth_m*100) %>%
mutate(max_biomass_percent = (maximum_biomass_g_DW_m..2/1230)*100) %>%
mutate(presence_absence = ifelse(max_biomass_percent == 0, 0, 1)) %>%
mutate(max_biomass_percent_log = log(max_biomass_percent+1))
depth_lmq <- lm(max_biomass_percent ~ depth_cm + I(depth_cm^2), data=depth)
clad_depth_mod <- depth_lmq
summary(depth_lmq) ## 0.010316, Adjusted R-squared: 0.9305
save(depth_lmq, file="clad_depth_mod.RData")
load(file="clad_depth_mod.RData")
## qqplot awful
## qqplot not good but passed normality above
## plot
png("figures/Final_curves/Depth/C1_Cladophora_depth_model.png", width = 500, height = 600)
ggplot(data = depth, mapping = aes(x = depth_cm, y = max_biomass_percent))+
geom_point(size = 2)+
stat_smooth(method="lm", formula = y ~ x + I(x^2)) +
# scale_y_continuous(trans=log1p_trans()) +
# scale_y_log10()+
labs(x = "Depth (cm)", y = "Biomass (%)")+
theme_classic()+
# scale_y_continuous(limits=c(,100)) +
theme(axis.text = element_text(size = 20), axis.title = element_text(size = 20))
dev.off()
# F34D <- read.csv("input_data/HecRas/hydraulic_ts_F34D.csv")
# F37B_High <- read.csv("input_data/HecRas/hydraulic_ts_F37B_High.csv")
# F45B <- read.csv("input_data/HecRas/hydraulic_ts_F45B.csv")
F319 <- read.csv("input_data/HecRas/hydraulic_ts_F319.csv")
# LA13 <- read.csv("input_data/HecRas/hydraulic_ts_LA13.csv")
# LA1 <- read.csv("input_data/HecRas/hydraulic_ts_LA1.csv")
## select columns
hydraul <- F319[,-1]
names(hydraul)
head(hydraul)
## select columns
hyd_dep <- hydraul[,c(1:3,5,9,13)]
colnames(hyd_dep) <-c("DateTime", "node", "Q", "depth_ft_LOB", "depth_ft_MC", "depth_ft_ROB")
# nas <- which(complete.cases(hyd_dep) == FALSE)
# hyd_dep[nas,]
## convert unit from feet to meters
hyd_dep <- hyd_dep %>%
mutate(depth_cm_LOB = (depth_ft_LOB*0.3048)*100,
depth_cm_MC = (depth_ft_MC*0.3048)*100,
depth_cm_ROB = (depth_ft_ROB*0.3048)*100) %>%
select(-contains("ft")) %>%
mutate(date_num = seq(1,length(DateTime), 1))
hyd_dep
hyd_dep<-reshape2::melt(hyd_dep, id=c("DateTime","Q", "node", "date_num"))
hyd_dep <- hyd_dep %>% rename(depth_cm = value)
hyd_dep <- filter(hyd_dep, variable == "depth_cm_MC")
new_data <- hyd_dep %>%
mutate(prob_fit = predict(clad_depth_mod, newdata = hyd_dep, type="response")) %>%
mutate(prob_fit = ifelse(prob_fit<=0, 0, prob_fit)) ## predicts negative percentages - cut off at 0 for quick fix
## format date time
new_data$DateTime<-as.POSIXct(new_data$DateTime,
format = "%Y-%m-%d %H:%M",
tz = "GMT")
## create year, month, day and hour columns and add water year
new_data <- new_data %>%
mutate(month = month(DateTime)) %>%
mutate(year = year(DateTime)) %>%
mutate(day = day(DateTime)) %>%
mutate(hour = hour(DateTime)) %>%
mutate(water_year = ifelse(month == 10 | month == 11 | month == 12, year, year-1))
head(new_data)
## plot
range(new_data$Q) ## 26.22926 41750.16797
range(new_data$prob_fit) ## -3.2183121 0.3989423
peak <- new_data %>%
group_by(variable) %>%
filter(prob_fit == max(prob_fit)) #%>%
peakQM <- filter(peak, variable=="depth_cm_MC")
peakQM <- max(peakQM$Q)
peakQM ## 706.7369
## filter data by cross section position
new_dataM <- filter(new_data, variable == "depth_cm_MC")
## Main channel curves
load(file="root_interpolation_function.Rdata")
newx1a <- RootLinearInterpolant(new_dataM$Q, new_dataM$prob_fit, 25)
newx1a
if(length(newx1a) > 4) {
newx1a <- c(newx1a[1], newx1a[length(newx1a)])
} else {
newx1a <- newx1a
}
newx1a
newx2a <- RootLinearInterpolant(new_dataM$Q, new_dataM$prob_fit, 50)
if(length(newx2a) > 4) {
newx2a <- c(newx2a[1], newx2a[length(newx2a)])
} else {
newx2a <- newx2a
}
newx3a <- RootLinearInterpolant(new_dataM$Q, new_dataM$prob_fit, 75)
newx3a
if(min(new_data$prob_fit)>75) {
newx3a <- min(new_data$Q)
} else {
newx3a <- newx3a
}
if(length(newx3a) > 4) {
newx3a <- c(newx3a[1], newx3a[length(newx3a)])
} else {
newx3a <- newx3a
}
newx3a
## MAKE DF OF Q LIMITS
limits <- as.data.frame(matrix(ncol=3, nrow=12)) %>%
rename(LOB = V1, MC = V2, ROB = V3)
rownames(limits)<-c("Low_Prob_1", "Low_Prob_2", "Low_Prob_3", "Low_Prob_4",
"Med_Prob_1", "Med_Prob_2", "Med_Prob_3", "Med_Prob_4",
"High_Prob_1", "High_Prob_2", "High_Prob_3", "High_Prob_4")
limits$MC <- c(newx1a[1], newx1a[2],newx1a[3], newx1a[4],
newx2a[1], newx2a[2],newx2a[3], newx2a[4],
newx3a[1], newx3a[2],newx3a[3],newx3a[4])
limits
write.csv(limits, "output_data/C1_F319_clad_depth_Q_limits.csv")
png("figures/Application_curves/Depth/F319_clad_depth_prob_Q_thresholds.png", width = 500, height = 600)
labels <- c(depth_cm_LOB = "Left Over Bank", depth_cm_MC = "Main Channel", depth_cm_ROB = "Right Over Bank")
ggplot(new_data, aes(x = Q, y=prob_fit)) +
geom_line(aes(group = variable, lty = variable)) +
scale_linetype_manual(values= c("dotted", "solid", "dashed"))+
# name="Cross\nSection\nPosition",
# breaks=c("depth_cm_LOB", "depth_cm_MC", "depth_cm_ROB"),
# labels = c("LOB", "MC", "ROB")) +
facet_wrap(~variable, scales="free_x", nrow=3, labeller=labeller(variable = labels)) +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=25, x=newx1a[1]), color="green") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=25, x=newx1a[2]), color="green") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=25, x=newx1a[3]), color="green") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=25, x=newx1a[4]), color="green") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=50, x=newx2a[1]), color="red") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=50, x=newx2a[2]), color="red") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=50, x=newx2a[3]), color="red") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=50, x=newx2a[4]), color="red") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=75, x=newx3a[1]), color="blue") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=75, x=newx3a[2]), color="blue") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=75, x=newx3a[3]), color="blue") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=75, x=newx3a[4]), color="blue") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "none") +
labs(title = "F319: Cladophora/Depth: Probability ~ Q",
y = "Probability",
x = "Q (cfs)") #+ theme_bw(base_size = 15)
dev.off()
# create year_month column
new_dataMx <- new_dataM %>% unite(month_year, year:month, sep="-", remove=F)
head(new_dataMx)
## make dataframe for all years
## define critical period or season for adult as all year is critical
## define seasons/critical period
non_critical <- c(1,2,8:12)
critical <- c(3:7)
new_dataMx <- new_dataMx %>%
mutate(season = ifelse(month %in% non_critical, "non_critical", "critical") )
# time stats - mid channel ------------------------------------------------
if(is.na(newx1a[1])) {
low_threshM <- expression(Q < 0)
## 1a) if 1 threshold value and it's lower than the peak (ascending slope)
} else if(length(newx1a)==1 && newx1a < peakQM){
# sum the amount of time above threshold
low_threshM <- expression(Q >= newx1a)
## 1b) if 1 threshold value and it's higher than the peak (descending slope)
} else if (length(newx1a)==1 && newx1a > peakQM){
# sum the amount of time below the threshold
low_threshM <- expression(Q <= newx1a)
## 2a) if 2 threshold values and the first one is lower than the peak(positive parabol)
} else if (length(newx1a)==2 && newx1a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold
low_threshM <- expression(Q >= newx1a[1] & Q <= newx1a[2])
## 2b) if 2 threshold values and the first one is higher OR the 2nd one is lower than the peak (negative parabol)
} else if(length(newx1a)==2 && (newx1a[1] > peakQM || newx1a[2] < peakQM )) {
# sum the amount of time below the first and above the 2nd threshold
low_threshM <- expression(Q <= newx1a[1] & Q >= newx1a[2])
## if 3 threshold values and the 1st one is lower then the peak (begins negative slope)
} else if (length(newx1a) == 3 && (newx1a[1] < peakQM && newx1a[2] < peakQM && newx1a[3] > peakQM) ||
(newx1a[1] > peakQM && newx1a[2] > peakQM && newx1a[3] > peakQM)) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
low_threshM <- expression(Q <= newx1a[1] | Q >= newx1a[2] & Q <= newx1a[3])
## if 3 threshold values and the 3rd one is higher then the peak (begins positive slope)
} else if (length(newx1a) == 3 && (newx1a[1] < peakQM && newx1a[2] > peakQM && newx1a[3] > peakQM) ||
(newx1a[1] > peakQM && newx1a[2] > peakQM && newx1a[3] < peakQM)) {
# sum the amount of time below the first and above the 2nd threshold and below the 3rd
low_threshM <- expression(Q >= newx1a[1] & Q <= newx1a[2] | Q >= newx1a[3])
## 4a) if 4 threshold values and all are higher than the peak (begins positive slope)
} else if (length(newx1a) == 4 && newx1a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold or above the 3rd and below 2nd
low_threshM <- expression(Q >= newx1a[1] & Q <= newx1a[2] | Q >= newx1a[3] & Q <= newx1a[4])
## 4b) if 4 threshold values and all are higher than the peak, the 1st one and 2nd are lower, or all are lower (begins negative slope)
} else if (length(newx1a) == 4 && (newx1a[1] < peakQM && newx1a[2] < peakQM && newx1a[3] < peakQM && newx1a[4] < peakQM) || (newx1a[1] > peakQM
&& newx1a[2] > peakQM && newx1a[3] > peakQM && newx1a[4] > peakQM) || (newx1a[2] < peakQM && newx1a[3] > peakQM)) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
low_threshM <- expression(Q <= newx1a[1] & Q >= newx1a[2] | Q <= newx1a[3] & Q >= newx1a[4])
}
low_threshM
newx1a
### medium threshold
if(is.na(newx2a[1])) {
med_threshM <- expression(Q < 0)
## if 1 threshold value and it's lower than the peak (ascending slope)
} else if(length(newx2a)==1 && newx2a < peakQM){
# sum the amount of time above threshold
med_threshM <- expression(Q >= newx2a)
## if 1 threshold value and it's higher than the peak (descending slope)
} else if (length(newx2a)==1 && newx2a > peakQM){
# sum the amount of time below the threshold
med_threshM <- expression(Q <= newx2a)
## if 2 threshold values and the first one is lower than the peak(positive parabol)
} else if (length(newx2a)==2 && newx2a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold
med_threshM <- expression(Q >= newx2a[1] & Q <= newx2a[2])
## if 2 threshold values and the first one is higher OR the 2nd one is lower than the peak (negative parabol)
} else if(length(newx2a)==2 && (newx2a[1] > peakQM || newx2a[2] < peakQM) ) {
# sum the amount of time below the first and above the 2nd threshold
med_threshM <- expression(Q <= newx2a[1] & Q >= newx2a[2])
## if 3 threshold values and the 1st one is lower then the peak (begins negative slope)
} else if (length(newx2a) == 3 && (newx2a[1] < peakQM && newx2a[2] < peakQM && newx2a[3] > peakQM) ||
(newx2a[1] > peakQM && newx2a[2] > peakQM && newx2a[3] > peakQM)) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
med_threshM <- expression(Q <= newx2a[1] | Q >= newx2a[2] & Q <= newx2a[3])
## if 3 threshold values and the 3rd one is higher then the peak (begins positive slope)
} else if (length(newx2a) == 3 && (newx2a[1] < peakQM && newx2a[2] > peakQM && newx2a[3] > peakQM) ||
(newx2a[1] > peakQM && newx2a[2] > peakQM && newx2a[3] < peakQM)) {
# sum the amount of time below the first and above the 2nd threshold and below the 3rd
med_threshM <- expression(Q >= newx2a[1] & Q <= newx2a[2] | Q >= newx2a[3])
## 4a) if 4 threshold values and all are higher than the peak (begins positive slope)
} else if (length(newx2a) == 4 && newx2a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold or above the 3rd and below 2nd
med_threshM <- expression(Q >= newx2a[1] & Q <= newx2a[2] | Q >= newx2a[3] & Q <= newx2a[4])
## 4b) if 4 threshold values and all are higher than the peak, the 1st one and 2nd are lower, or all are lower (begins negative slope)
} else if (length(newx2a) == 4 && (newx2a[1] < peakQM && newx2a[2] < peakQM && newx2a[3] < peakQM && newx2a[4] < peakQM) || (newx2a[1] > peakQM
&& newx2a[2] > peakQM && newx2a[3] > peakQM && newx2a[4] > peakQM) || (newx2a[2] < peakQM && newx2a[3] > peakQM)) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
med_threshM <- expression(Q <= newx2a[1] & Q >= newx2a[2] | Q <= newx2a[3] & Q >= newx2a[4])
}
med_threshM
### high threshold
if(is.na(newx3a[1])) {
high_threshM <- expression(Q < 0)
## if 1 threshold value and it's lower than the peak (ascending slope)
} else if(length(newx3a)==1 && newx3a < peakQM){
# sum the amount of time above threshold
high_threshM <- expression(Q >= newx3a)
## if 1 threshold value and it's higher than the peak (descending slope)
} else if (length(newx3a)==1 && newx3a > peakQM){
# sum the amount of time below the threshold
high_threshM <- expression(Q <= newx3a)
## if 2 threshold values and the first one is lower than the peak(positive parabol)
} else if (length(newx3a)==2 && newx3a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold
high_threshM <- expression(Q >= newx3a[1] & Q <= newx3a[2])
## if 2 threshold values and the first one is higher OR the 2nd one is lower than the peak (negative parabol)
} else if(length(newx3a)==2 && (newx3a[1] > peakQM || newx3a[2] < peakQM) ) {
# sum the amount of time below the first and above the 2nd threshold
high_threshM <- expression(Q <= newx3a[1] & Q >= newx3a[2])
## if 3 threshold values (begins negative slope)
} else if (length(newx3a) == 3 && (newx3a[1] < peakQM && newx3a[2] < peakQM && newx3a[3] > peakQM) ||
(newx3a[1] > peakQM && newx3a[2] > peakQM && newx3a[3] > peakQM)) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
high_threshM <- expression(Q <= newx3a[1] | Q >= newx3a[2] & Q <= newx3a[3])
## if 3 threshold values (begins positive slope)
} else if (length(newx3a) == 3 && (newx3a[1] < peakQM && newx3a[2] > peakQM && newx3a[3] > peakQM) ||
(newx3a[1] > peakQM && newx3a[2] > peakQM && newx3a[3] < peakQM)) {
# sum the amount of time below the first and above the 2nd threshold and below the 3rd
high_threshM <- expression(Q >= newx3a[1] & Q <= newx3a[2] | Q >= newx3a[3])
## 4a) if 4 threshold values and all are higher than the peak (begins positive slope)
} else if (length(newx3a) == 4 && newx3a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold or above the 3rd and below 2nd
high_threshM <- expression(Q >= newx3a[1] & Q <= newx3a[2] | Q >= newx3a[3] & Q <= newx3a[4])
## 4b) if 4 threshold values and all are higher than the peak, the 1st one and 2nd are lower, or all are lower (begins negative slope)
} else if (length(newx3a) == 4 && (newx3a[1] < peakQM && newx3a[2] < peakQM && newx3a[3] < peakQM && newx3a[4] < peakQM) || (newx3a[1] > peakQM
&& newx3a[2] > peakQM && newx3a[3] > peakQM && newx3a[4] > peakQM) || (newx3a[2] < peakQM && newx3a[3] > peakQM)) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
high_threshM <- expression(Q <= newx3a[1] & Q >= newx3a[2] | Q <= newx3a[3] & Q >= newx3a[4])
}
high_threshM
med_threshM
low_threshM
###### calculate amount of time
time_statsm <- new_dataMx %>%
dplyr::group_by(water_year) %>%
dplyr::mutate(Low = sum(eval(low_threshM))/length(DateTime)*100) %>%
dplyr::mutate(Medium = sum(eval(med_threshM))/length(DateTime)*100) %>%
dplyr::mutate(High = sum(eval(high_threshM))/length(DateTime)*100) %>%
ungroup() %>%
dplyr::group_by(water_year, season) %>%
dplyr::mutate(Low.Seasonal = sum(eval(low_threshM))/length(DateTime)*100) %>%
dplyr::mutate(Medium.Seasonal = sum(eval(med_threshM))/length(DateTime)*100) %>%
dplyr::mutate(High.Seasonal = sum(eval(high_threshM))/length(DateTime)*100) %>%
distinct(year, Low , Medium , High , Low.Seasonal, Medium.Seasonal, High.Seasonal) %>%
mutate(position="MC")
time_statsm
time_stats <- time_statsm
## melt
melt_time<-reshape2::melt(time_stats, id=c("year","season", "position", "water_year"))
melt_time <- rename(melt_time, Probability_Threshold = variable)
unique(melt_time$position)
write.csv(melt_time, "output_data/C1_F319_clad_Depth_time_stats.csv")
## subset annual stats
ann_stats <- unique(melt_time$Probability_Threshold)[1:3]
melt_time_ann <- melt_time %>% filter(Probability_Threshold %in% ann_stats ) %>%
select(-season) %>% distinct()
## subset seasonal stats
seas_stats <- unique(melt_time$Probability_Threshold)[4:6]
melt_time_seas <- filter(melt_time, Probability_Threshold %in% seas_stats )
melt_time_seas
## plot for annual stats - need probs in order
png("figures/Application_curves/Depth/C1_F319_clad_Depth_perc_time_above_threshold_annual.png", width = 500, height = 600)
ggplot(melt_time_ann, aes(x = water_year, y=value)) +
geom_line(aes( group =c(), color = Probability_Threshold)) +
scale_color_manual(name = "Probability Threshold", breaks = c("Low", "Medium", "High"),
values=c( "green", "red", "blue"),
labels = c("Low", "Medium", "High")) +
theme(axis.text.x = element_text(angle = 90, vjust = 1)) +
# scale_x_continuous(breaks=as.numeric(total_days$month_year), labels=format(total_days$month_year,"%b %Y")) +
facet_wrap(~position, scales="free_x", nrow=3) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
labs(title = "F319: Time within discharge limit in relation to Depth (Annual)",
y = "Time (%)",
x = "Year") #+ theme_bw(base_size = 15)
dev.off()
## plot for winter stats - need probs in order
melt_time_winter <- filter(melt_time_seas, season == "non_critical")
unique(melt_time_winter$season)
png("figures/Application_curves/Depth/C1_F319_clad_Depth_perc_time_above_threshold_winter.png", width = 500, height = 600)
ggplot(melt_time_winter, aes(x = water_year, y=value)) +
geom_line(aes( group = c(), color = Probability_Threshold)) +
scale_color_manual(name = "Probability Threshold", breaks = c("Low.Seasonal", "Medium.Seasonal", "High.Seasonal"),
values=c( "green", "red", "blue"),
labels = c("Low", "Medium", "High")) +
theme(axis.text.x = element_text(angle = 90, vjust = 1)) +
# scale_x_continuous(breaks=as.numeric(total_days$month_year), labels=format(total_days$month_year,"%b %Y")) +
facet_wrap(~position, scales="free_x", nrow=3) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
labs(title = "F319: Time within discharge limit in relation to Depth (Non_critical)",
y = "Time (%)",
x = "Year") #+ theme_bw(base_size = 15)
dev.off()
## plot for summer stats - need probs in order
melt_time_summer <- filter(melt_time_seas, season == "critical")
png("figures/Application_curves/Depth/C1_F319_clad_Depth_perc_time_above_threshold_critical.png", width = 500, height = 600)
ggplot(melt_time_summer, aes(x = water_year, y=value)) +
geom_line(aes( group = c(), color = Probability_Threshold)) +
scale_color_manual(name = "Probability Threshold", breaks = c("Low.Seasonal", "Medium.Seasonal", "High.Seasonal"),
values=c( "green", "red", "blue"),
labels = c("Low", "Medium", "High")) +
theme(axis.text.x = element_text(angle = 90, vjust = 1)) +
# scale_x_continuous(breaks=as.numeric(total_days$month_year), labels=format(total_days$month_year,"%b %Y")) +
facet_wrap(~position, scales="free_x", nrow=3) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
labs(title = "F319: Time within discharge limit in relation to Depth (Critical)",
y = "Time (%)",
x = "Year") #+ theme_bw(base_size = 15)
dev.off()
# Number of days above discharge ------------------------------------------
# all columns based on different probabilities
## count number events within each threshold with a running total - max total is the number of consequative
# events (hours) per day. if else statements to consider the thresholds newx1a/b etc
## order by datetime
limits
new_dataM <- new_dataM %>%
ungroup() %>%
group_by(month, day, water_year, ID01 = data.table::rleid(eval(low_threshM))) %>%
mutate(Low = if_else(eval(low_threshM), row_number(), 0L)) %>%
ungroup() %>%
group_by(month, day, water_year, ID02 = data.table::rleid(eval(med_threshM))) %>%
mutate(Medium = if_else(eval(med_threshM), row_number(), 0L)) %>%
ungroup() %>%
group_by(month, day, water_year, ID03 = data.table::rleid(eval(high_threshM))) %>%
mutate(High = if_else(eval(high_threshM), row_number(), 0L))
new_dataM <- mutate(new_dataM, position="MC")
new_datax <- select(new_dataM, c(Q, month, water_year, day, ID01, Low, ID02, Medium, ID03, High, position, DateTime) )# all probs
## melt
melt_data<-reshape2::melt(new_datax, id=c("ID01", "ID02", "ID03", "day", "month", "water_year", "Q", "position"))
melt_data <- rename(melt_data, Probability_Threshold = variable,
consec_hours = value)
melt_data
## groups data by year, month and ID & threshold
## counts the number of days in each month probability is within the depth of each threshold - days are not necessarily conseq
## each threshold separately
## count how many full days i.e. 24 hours
total_days01 <- melt_data %>%
filter(Probability_Threshold == "Low") %>%
group_by(ID01, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_low = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days01
## count the number of days in each month
total_days_per_month01 <- total_days01 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_low = sum(n_days_low))
total_days_per_month01
total_days02 <- melt_data %>%
filter(Probability_Threshold == "Medium") %>%
group_by(ID02, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_medium = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days_per_month02 <- total_days02 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_medium = sum(n_days_medium))
# total_days_per_month02
total_days03 <- melt_data %>%
filter(Probability_Threshold == "High") %>%
group_by(ID03, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_high = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days_per_month03 <- total_days03 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_high = sum(n_days_high))
total_days_per_month03
## combine all thresholds
total_days <- cbind( total_days_per_month01,total_days_per_month02[,4], total_days_per_month03[,4])
head(total_days)
write.csv(total_days, "output_data/C1_F319_clad_Depth_total_days.csv")
# # create year_month column
total_days <- ungroup(total_days) %>%
unite(month_year, water_year:month, sep="-", remove=F)
## convert month year to date format
library(zoo)
total_days$month_year <- zoo::as.yearmon(total_days$month_year)
total_days$month_year <- as.Date(total_days$month_year)
## change names of columns
total_days <- rename(total_days, Low = days_per_month_low, Medium = days_per_month_medium, High = days_per_month_high)
# total_hours <- rename(total_hours, Low = n_days_low, Medium = n_days_medium, High = n_days_high)
## define seasons/critical period
non_critical <- c(1,2,8:12)
critical <- c(3:7)
total_days <- total_days %>%
mutate(season = ifelse(month %in% non_critical, "non_critical", "critical") )
# ## melt data
melt_days<-reshape2::melt(total_days, id=c("month_year", "water_year", "month", "season", "position"))
melt_days <- rename(melt_days, Probability_Threshold = variable,
n_days = value)
head(melt_days)
## save df
write.csv(melt_days, "output_data/C1_F319_clad_Depth_total_days_long.csv")
# melt_daysx <- filter(melt_days, position=="MC")
library(scales)
## plot all ts
png("figures/Application_curves/Depth/C1_F319_clad_Depth_lob_rob_mc_no_days_within_Q.png", width = 500, height = 600)
ggplot(melt_days, aes(x =month_year, y=n_days)) +
geom_line(aes( group = Probability_Threshold, color = Probability_Threshold)) +
scale_color_manual(name="Probability Threshold",breaks = c("Low", "Medium", "High"),
values=c( "green", "red", "blue")) +
theme(axis.text.x = element_text(angle = 45, vjust = 0.5)) +
scale_x_date(breaks=pretty_breaks(), labels = date_format("%b %Y")) +
scale_y_continuous(limits=c(0,31)) +
# scale_x_continuous(breaks=as.numeric(melt_days$month_year), labels=format(melt_days$month_year,"%b %Y")) +
facet_wrap(~position, nrow=3) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
labs(title = "F319: Number of days within discharge limit in relation to Depth",
y = "Number of days per Month",
x = "Year") #+ theme_bw(base_size = 15)
dev.off()
## plot by year
png("figures/Application_curves/Depth/C1_F319_clad_Depth_lob_rob_mc_no_days_within_Q_by_year.png", width = 500, height = 600)
ggplot(melt_days, aes(x =month_year, y=n_days)) +
geom_line(aes( group = Probability_Threshold, color = Probability_Threshold)) +
scale_color_manual(name="Probability Threshold", breaks = c("Low", "Medium", "High"),
values=c( "green", "red", "blue")) +
theme(axis.text.x = element_text(angle = 0, vjust = 1)) +
scale_x_date(breaks=pretty_breaks(),labels = date_format("%b")) +
scale_y_continuous(limits=c(0,31)) +
# scale_x_continuous(breaks=as.numeric(month_year), labels=format(month_year,"%b")) +
facet_wrap(~water_year+position, scale="free_x", nrow=4) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
labs(title = "F319: Number of days within discharge limit in relation to Depth",
y = "Number of days per Month",
x = "Month") #+ theme_bw(base_size = 15)
dev.off()
## plot by season/critical period
png("figures/Application_curves/Depth/C1_F319_clad_Depth_lob_rob_mc_no_days_within_Q_by_season.png", width = 500, height = 600)
ggplot(melt_days, aes(x =month_year, y=n_days)) +
geom_line(aes( group = Probability_Threshold, color = Probability_Threshold)) +
scale_color_manual(name="Probability Threshold",breaks = c("Low", "Medium", "High"),
values=c( "green", "red", "blue")) +
theme(axis.text.x = element_text(angle = 0, vjust = 1)) +
scale_x_date(breaks=pretty_breaks(),labels = date_format("%Y")) +
scale_y_continuous(limits=c(0,31)) +
# scale_x_continuous(breaks=as.numeric(melt_days$month_year), labels=format(melt_days$month_year,"%Y")) +
facet_wrap(~season +position, scales="free", nrow=2) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
labs(title = "F319: Number of days within discharge limit in relation to Depth",
y = "Number of days per Month",
x = "Year") #+ theme_bw(base_size = 15)
dev.off()
| /scripts/cladophora_depth/C1_F319_Cladophora_depth.R | no_license | ksirving/flow_eco_mech | R | false | false | 28,777 | r | ## cladophora
## 1,230 e–0.55 * Depth
#max_depth = 1,230 e–0.55 * Depth
library(tidyverse)
library(tidyr)
library(sm)
library(lubridate) # work with dates
library(dplyr) # data manipulation (filter, summarize, mutate)
library(ggplot2) # graphics
library(gridExtra) # tile several plots next to each other
library(scales)
library(data.table)
library(mgcv)
depth <- read.csv("input_data/Depth_2_Higgins_etal_2005.csv")
depth <- na.omit(depth)
depth
## convert depth to cm and biomass to % and presence/absence for glm
depth <- depth %>%
mutate(depth_cm = depth_m*100) %>%
mutate(max_biomass_percent = (maximum_biomass_g_DW_m..2/1230)*100) %>%
mutate(presence_absence = ifelse(max_biomass_percent == 0, 0, 1)) %>%
mutate(max_biomass_percent_log = log(max_biomass_percent+1))
depth_lmq <- lm(max_biomass_percent ~ depth_cm + I(depth_cm^2), data=depth)
clad_depth_mod <- depth_lmq
summary(depth_lmq) ## 0.010316, Adjusted R-squared: 0.9305
save(depth_lmq, file="clad_depth_mod.RData")
load(file="clad_depth_mod.RData")
## qqplot awful
## qqplot not good but passed normality above
## plot
png("figures/Final_curves/Depth/C1_Cladophora_depth_model.png", width = 500, height = 600)
ggplot(data = depth, mapping = aes(x = depth_cm, y = max_biomass_percent))+
geom_point(size = 2)+
stat_smooth(method="lm", formula = y ~ x + I(x^2)) +
# scale_y_continuous(trans=log1p_trans()) +
# scale_y_log10()+
labs(x = "Depth (cm)", y = "Biomass (%)")+
theme_classic()+
# scale_y_continuous(limits=c(,100)) +
theme(axis.text = element_text(size = 20), axis.title = element_text(size = 20))
dev.off()
# F34D <- read.csv("input_data/HecRas/hydraulic_ts_F34D.csv")
# F37B_High <- read.csv("input_data/HecRas/hydraulic_ts_F37B_High.csv")
# F45B <- read.csv("input_data/HecRas/hydraulic_ts_F45B.csv")
F319 <- read.csv("input_data/HecRas/hydraulic_ts_F319.csv")
# LA13 <- read.csv("input_data/HecRas/hydraulic_ts_LA13.csv")
# LA1 <- read.csv("input_data/HecRas/hydraulic_ts_LA1.csv")
## select columns
hydraul <- F319[,-1]
names(hydraul)
head(hydraul)
## select columns
hyd_dep <- hydraul[,c(1:3,5,9,13)]
colnames(hyd_dep) <-c("DateTime", "node", "Q", "depth_ft_LOB", "depth_ft_MC", "depth_ft_ROB")
# nas <- which(complete.cases(hyd_dep) == FALSE)
# hyd_dep[nas,]
## convert unit from feet to meters
hyd_dep <- hyd_dep %>%
mutate(depth_cm_LOB = (depth_ft_LOB*0.3048)*100,
depth_cm_MC = (depth_ft_MC*0.3048)*100,
depth_cm_ROB = (depth_ft_ROB*0.3048)*100) %>%
select(-contains("ft")) %>%
mutate(date_num = seq(1,length(DateTime), 1))
hyd_dep
hyd_dep<-reshape2::melt(hyd_dep, id=c("DateTime","Q", "node", "date_num"))
hyd_dep <- hyd_dep %>% rename(depth_cm = value)
hyd_dep <- filter(hyd_dep, variable == "depth_cm_MC")
new_data <- hyd_dep %>%
mutate(prob_fit = predict(clad_depth_mod, newdata = hyd_dep, type="response")) %>%
mutate(prob_fit = ifelse(prob_fit<=0, 0, prob_fit)) ## predicts negative percentages - cut off at 0 for quick fix
## format date time
new_data$DateTime<-as.POSIXct(new_data$DateTime,
format = "%Y-%m-%d %H:%M",
tz = "GMT")
## create year, month, day and hour columns and add water year
new_data <- new_data %>%
mutate(month = month(DateTime)) %>%
mutate(year = year(DateTime)) %>%
mutate(day = day(DateTime)) %>%
mutate(hour = hour(DateTime)) %>%
mutate(water_year = ifelse(month == 10 | month == 11 | month == 12, year, year-1))
head(new_data)
## plot
range(new_data$Q) ## 26.22926 41750.16797
range(new_data$prob_fit) ## -3.2183121 0.3989423
peak <- new_data %>%
group_by(variable) %>%
filter(prob_fit == max(prob_fit)) #%>%
peakQM <- filter(peak, variable=="depth_cm_MC")
peakQM <- max(peakQM$Q)
peakQM ## 706.7369
## filter data by cross section position
new_dataM <- filter(new_data, variable == "depth_cm_MC")
## Main channel curves
load(file="root_interpolation_function.Rdata")
newx1a <- RootLinearInterpolant(new_dataM$Q, new_dataM$prob_fit, 25)
newx1a
if(length(newx1a) > 4) {
newx1a <- c(newx1a[1], newx1a[length(newx1a)])
} else {
newx1a <- newx1a
}
newx1a
newx2a <- RootLinearInterpolant(new_dataM$Q, new_dataM$prob_fit, 50)
if(length(newx2a) > 4) {
newx2a <- c(newx2a[1], newx2a[length(newx2a)])
} else {
newx2a <- newx2a
}
newx3a <- RootLinearInterpolant(new_dataM$Q, new_dataM$prob_fit, 75)
newx3a
if(min(new_data$prob_fit)>75) {
newx3a <- min(new_data$Q)
} else {
newx3a <- newx3a
}
if(length(newx3a) > 4) {
newx3a <- c(newx3a[1], newx3a[length(newx3a)])
} else {
newx3a <- newx3a
}
newx3a
## MAKE DF OF Q LIMITS
limits <- as.data.frame(matrix(ncol=3, nrow=12)) %>%
rename(LOB = V1, MC = V2, ROB = V3)
rownames(limits)<-c("Low_Prob_1", "Low_Prob_2", "Low_Prob_3", "Low_Prob_4",
"Med_Prob_1", "Med_Prob_2", "Med_Prob_3", "Med_Prob_4",
"High_Prob_1", "High_Prob_2", "High_Prob_3", "High_Prob_4")
limits$MC <- c(newx1a[1], newx1a[2],newx1a[3], newx1a[4],
newx2a[1], newx2a[2],newx2a[3], newx2a[4],
newx3a[1], newx3a[2],newx3a[3],newx3a[4])
limits
write.csv(limits, "output_data/C1_F319_clad_depth_Q_limits.csv")
png("figures/Application_curves/Depth/F319_clad_depth_prob_Q_thresholds.png", width = 500, height = 600)
labels <- c(depth_cm_LOB = "Left Over Bank", depth_cm_MC = "Main Channel", depth_cm_ROB = "Right Over Bank")
ggplot(new_data, aes(x = Q, y=prob_fit)) +
geom_line(aes(group = variable, lty = variable)) +
scale_linetype_manual(values= c("dotted", "solid", "dashed"))+
# name="Cross\nSection\nPosition",
# breaks=c("depth_cm_LOB", "depth_cm_MC", "depth_cm_ROB"),
# labels = c("LOB", "MC", "ROB")) +
facet_wrap(~variable, scales="free_x", nrow=3, labeller=labeller(variable = labels)) +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=25, x=newx1a[1]), color="green") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=25, x=newx1a[2]), color="green") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=25, x=newx1a[3]), color="green") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=25, x=newx1a[4]), color="green") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=50, x=newx2a[1]), color="red") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=50, x=newx2a[2]), color="red") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=50, x=newx2a[3]), color="red") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=50, x=newx2a[4]), color="red") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=75, x=newx3a[1]), color="blue") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=75, x=newx3a[2]), color="blue") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=75, x=newx3a[3]), color="blue") +
geom_point(data = subset(new_data, variable =="depth_cm_MC"), aes(y=75, x=newx3a[4]), color="blue") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "none") +
labs(title = "F319: Cladophora/Depth: Probability ~ Q",
y = "Probability",
x = "Q (cfs)") #+ theme_bw(base_size = 15)
dev.off()
# create year_month column
new_dataMx <- new_dataM %>% unite(month_year, year:month, sep="-", remove=F)
head(new_dataMx)
## make dataframe for all years
## define critical period or season for adult as all year is critical
## define seasons/critical period
non_critical <- c(1,2,8:12)
critical <- c(3:7)
new_dataMx <- new_dataMx %>%
mutate(season = ifelse(month %in% non_critical, "non_critical", "critical") )
# time stats - mid channel ------------------------------------------------
if(is.na(newx1a[1])) {
low_threshM <- expression(Q < 0)
## 1a) if 1 threshold value and it's lower than the peak (ascending slope)
} else if(length(newx1a)==1 && newx1a < peakQM){
# sum the amount of time above threshold
low_threshM <- expression(Q >= newx1a)
## 1b) if 1 threshold value and it's higher than the peak (descending slope)
} else if (length(newx1a)==1 && newx1a > peakQM){
# sum the amount of time below the threshold
low_threshM <- expression(Q <= newx1a)
## 2a) if 2 threshold values and the first one is lower than the peak(positive parabol)
} else if (length(newx1a)==2 && newx1a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold
low_threshM <- expression(Q >= newx1a[1] & Q <= newx1a[2])
## 2b) if 2 threshold values and the first one is higher OR the 2nd one is lower than the peak (negative parabol)
} else if(length(newx1a)==2 && (newx1a[1] > peakQM || newx1a[2] < peakQM )) {
# sum the amount of time below the first and above the 2nd threshold
low_threshM <- expression(Q <= newx1a[1] & Q >= newx1a[2])
## if 3 threshold values and the 1st one is lower then the peak (begins negative slope)
} else if (length(newx1a) == 3 && (newx1a[1] < peakQM && newx1a[2] < peakQM && newx1a[3] > peakQM) ||
(newx1a[1] > peakQM && newx1a[2] > peakQM && newx1a[3] > peakQM)) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
low_threshM <- expression(Q <= newx1a[1] | Q >= newx1a[2] & Q <= newx1a[3])
## if 3 threshold values and the 3rd one is higher then the peak (begins positive slope)
} else if (length(newx1a) == 3 && (newx1a[1] < peakQM && newx1a[2] > peakQM && newx1a[3] > peakQM) ||
(newx1a[1] > peakQM && newx1a[2] > peakQM && newx1a[3] < peakQM)) {
# sum the amount of time below the first and above the 2nd threshold and below the 3rd
low_threshM <- expression(Q >= newx1a[1] & Q <= newx1a[2] | Q >= newx1a[3])
## 4a) if 4 threshold values and all are higher than the peak (begins positive slope)
} else if (length(newx1a) == 4 && newx1a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold or above the 3rd and below 2nd
low_threshM <- expression(Q >= newx1a[1] & Q <= newx1a[2] | Q >= newx1a[3] & Q <= newx1a[4])
## 4b) if 4 threshold values and all are higher than the peak, the 1st one and 2nd are lower, or all are lower (begins negative slope)
} else if (length(newx1a) == 4 && (newx1a[1] < peakQM && newx1a[2] < peakQM && newx1a[3] < peakQM && newx1a[4] < peakQM) || (newx1a[1] > peakQM
&& newx1a[2] > peakQM && newx1a[3] > peakQM && newx1a[4] > peakQM) || (newx1a[2] < peakQM && newx1a[3] > peakQM)) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
low_threshM <- expression(Q <= newx1a[1] & Q >= newx1a[2] | Q <= newx1a[3] & Q >= newx1a[4])
}
low_threshM
newx1a
### medium threshold
if(is.na(newx2a[1])) {
med_threshM <- expression(Q < 0)
## if 1 threshold value and it's lower than the peak (ascending slope)
} else if(length(newx2a)==1 && newx2a < peakQM){
# sum the amount of time above threshold
med_threshM <- expression(Q >= newx2a)
## if 1 threshold value and it's higher than the peak (descending slope)
} else if (length(newx2a)==1 && newx2a > peakQM){
# sum the amount of time below the threshold
med_threshM <- expression(Q <= newx2a)
## if 2 threshold values and the first one is lower than the peak(positive parabol)
} else if (length(newx2a)==2 && newx2a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold
med_threshM <- expression(Q >= newx2a[1] & Q <= newx2a[2])
## if 2 threshold values and the first one is higher OR the 2nd one is lower than the peak (negative parabol)
} else if(length(newx2a)==2 && (newx2a[1] > peakQM || newx2a[2] < peakQM) ) {
# sum the amount of time below the first and above the 2nd threshold
med_threshM <- expression(Q <= newx2a[1] & Q >= newx2a[2])
## if 3 threshold values and the 1st one is lower then the peak (begins negative slope)
} else if (length(newx2a) == 3 && (newx2a[1] < peakQM && newx2a[2] < peakQM && newx2a[3] > peakQM) ||
(newx2a[1] > peakQM && newx2a[2] > peakQM && newx2a[3] > peakQM)) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
med_threshM <- expression(Q <= newx2a[1] | Q >= newx2a[2] & Q <= newx2a[3])
## if 3 threshold values and the 3rd one is higher then the peak (begins positive slope)
} else if (length(newx2a) == 3 && (newx2a[1] < peakQM && newx2a[2] > peakQM && newx2a[3] > peakQM) ||
(newx2a[1] > peakQM && newx2a[2] > peakQM && newx2a[3] < peakQM)) {
# sum the amount of time below the first and above the 2nd threshold and below the 3rd
med_threshM <- expression(Q >= newx2a[1] & Q <= newx2a[2] | Q >= newx2a[3])
## 4a) if 4 threshold values and all are higher than the peak (begins positive slope)
} else if (length(newx2a) == 4 && newx2a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold or above the 3rd and below 2nd
med_threshM <- expression(Q >= newx2a[1] & Q <= newx2a[2] | Q >= newx2a[3] & Q <= newx2a[4])
## 4b) if 4 threshold values and all are higher than the peak, the 1st one and 2nd are lower, or all are lower (begins negative slope)
} else if (length(newx2a) == 4 && (newx2a[1] < peakQM && newx2a[2] < peakQM && newx2a[3] < peakQM && newx2a[4] < peakQM) || (newx2a[1] > peakQM
&& newx2a[2] > peakQM && newx2a[3] > peakQM && newx2a[4] > peakQM) || (newx2a[2] < peakQM && newx2a[3] > peakQM)) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
med_threshM <- expression(Q <= newx2a[1] & Q >= newx2a[2] | Q <= newx2a[3] & Q >= newx2a[4])
}
med_threshM
### high threshold
if(is.na(newx3a[1])) {
high_threshM <- expression(Q < 0)
## if 1 threshold value and it's lower than the peak (ascending slope)
} else if(length(newx3a)==1 && newx3a < peakQM){
# sum the amount of time above threshold
high_threshM <- expression(Q >= newx3a)
## if 1 threshold value and it's higher than the peak (descending slope)
} else if (length(newx3a)==1 && newx3a > peakQM){
# sum the amount of time below the threshold
high_threshM <- expression(Q <= newx3a)
## if 2 threshold values and the first one is lower than the peak(positive parabol)
} else if (length(newx3a)==2 && newx3a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold
high_threshM <- expression(Q >= newx3a[1] & Q <= newx3a[2])
## if 2 threshold values and the first one is higher OR the 2nd one is lower than the peak (negative parabol)
} else if(length(newx3a)==2 && (newx3a[1] > peakQM || newx3a[2] < peakQM) ) {
# sum the amount of time below the first and above the 2nd threshold
high_threshM <- expression(Q <= newx3a[1] & Q >= newx3a[2])
## if 3 threshold values (begins negative slope)
} else if (length(newx3a) == 3 && (newx3a[1] < peakQM && newx3a[2] < peakQM && newx3a[3] > peakQM) ||
(newx3a[1] > peakQM && newx3a[2] > peakQM && newx3a[3] > peakQM)) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
high_threshM <- expression(Q <= newx3a[1] | Q >= newx3a[2] & Q <= newx3a[3])
## if 3 threshold values (begins positive slope)
} else if (length(newx3a) == 3 && (newx3a[1] < peakQM && newx3a[2] > peakQM && newx3a[3] > peakQM) ||
(newx3a[1] > peakQM && newx3a[2] > peakQM && newx3a[3] < peakQM)) {
# sum the amount of time below the first and above the 2nd threshold and below the 3rd
high_threshM <- expression(Q >= newx3a[1] & Q <= newx3a[2] | Q >= newx3a[3])
## 4a) if 4 threshold values and all are higher than the peak (begins positive slope)
} else if (length(newx3a) == 4 && newx3a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold or above the 3rd and below 2nd
high_threshM <- expression(Q >= newx3a[1] & Q <= newx3a[2] | Q >= newx3a[3] & Q <= newx3a[4])
## 4b) if 4 threshold values and all are higher than the peak, the 1st one and 2nd are lower, or all are lower (begins negative slope)
} else if (length(newx3a) == 4 && (newx3a[1] < peakQM && newx3a[2] < peakQM && newx3a[3] < peakQM && newx3a[4] < peakQM) || (newx3a[1] > peakQM
&& newx3a[2] > peakQM && newx3a[3] > peakQM && newx3a[4] > peakQM) || (newx3a[2] < peakQM && newx3a[3] > peakQM)) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
high_threshM <- expression(Q <= newx3a[1] & Q >= newx3a[2] | Q <= newx3a[3] & Q >= newx3a[4])
}
high_threshM
med_threshM
low_threshM
###### calculate amount of time
time_statsm <- new_dataMx %>%
dplyr::group_by(water_year) %>%
dplyr::mutate(Low = sum(eval(low_threshM))/length(DateTime)*100) %>%
dplyr::mutate(Medium = sum(eval(med_threshM))/length(DateTime)*100) %>%
dplyr::mutate(High = sum(eval(high_threshM))/length(DateTime)*100) %>%
ungroup() %>%
dplyr::group_by(water_year, season) %>%
dplyr::mutate(Low.Seasonal = sum(eval(low_threshM))/length(DateTime)*100) %>%
dplyr::mutate(Medium.Seasonal = sum(eval(med_threshM))/length(DateTime)*100) %>%
dplyr::mutate(High.Seasonal = sum(eval(high_threshM))/length(DateTime)*100) %>%
distinct(year, Low , Medium , High , Low.Seasonal, Medium.Seasonal, High.Seasonal) %>%
mutate(position="MC")
time_statsm
time_stats <- time_statsm
## melt
melt_time<-reshape2::melt(time_stats, id=c("year","season", "position", "water_year"))
melt_time <- rename(melt_time, Probability_Threshold = variable)
unique(melt_time$position)
write.csv(melt_time, "output_data/C1_F319_clad_Depth_time_stats.csv")
## subset annual stats
ann_stats <- unique(melt_time$Probability_Threshold)[1:3]
melt_time_ann <- melt_time %>% filter(Probability_Threshold %in% ann_stats ) %>%
select(-season) %>% distinct()
## subset seasonal stats
seas_stats <- unique(melt_time$Probability_Threshold)[4:6]
melt_time_seas <- filter(melt_time, Probability_Threshold %in% seas_stats )
melt_time_seas
## plot for annual stats - need probs in order
png("figures/Application_curves/Depth/C1_F319_clad_Depth_perc_time_above_threshold_annual.png", width = 500, height = 600)
ggplot(melt_time_ann, aes(x = water_year, y=value)) +
geom_line(aes( group =c(), color = Probability_Threshold)) +
scale_color_manual(name = "Probability Threshold", breaks = c("Low", "Medium", "High"),
values=c( "green", "red", "blue"),
labels = c("Low", "Medium", "High")) +
theme(axis.text.x = element_text(angle = 90, vjust = 1)) +
# scale_x_continuous(breaks=as.numeric(total_days$month_year), labels=format(total_days$month_year,"%b %Y")) +
facet_wrap(~position, scales="free_x", nrow=3) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
labs(title = "F319: Time within discharge limit in relation to Depth (Annual)",
y = "Time (%)",
x = "Year") #+ theme_bw(base_size = 15)
dev.off()
## plot for winter stats - need probs in order
melt_time_winter <- filter(melt_time_seas, season == "non_critical")
unique(melt_time_winter$season)
png("figures/Application_curves/Depth/C1_F319_clad_Depth_perc_time_above_threshold_winter.png", width = 500, height = 600)
ggplot(melt_time_winter, aes(x = water_year, y=value)) +
geom_line(aes( group = c(), color = Probability_Threshold)) +
scale_color_manual(name = "Probability Threshold", breaks = c("Low.Seasonal", "Medium.Seasonal", "High.Seasonal"),
values=c( "green", "red", "blue"),
labels = c("Low", "Medium", "High")) +
theme(axis.text.x = element_text(angle = 90, vjust = 1)) +
# scale_x_continuous(breaks=as.numeric(total_days$month_year), labels=format(total_days$month_year,"%b %Y")) +
facet_wrap(~position, scales="free_x", nrow=3) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
labs(title = "F319: Time within discharge limit in relation to Depth (Non_critical)",
y = "Time (%)",
x = "Year") #+ theme_bw(base_size = 15)
dev.off()
## plot for summer stats - need probs in order
melt_time_summer <- filter(melt_time_seas, season == "critical")
png("figures/Application_curves/Depth/C1_F319_clad_Depth_perc_time_above_threshold_critical.png", width = 500, height = 600)
ggplot(melt_time_summer, aes(x = water_year, y=value)) +
geom_line(aes( group = c(), color = Probability_Threshold)) +
scale_color_manual(name = "Probability Threshold", breaks = c("Low.Seasonal", "Medium.Seasonal", "High.Seasonal"),
values=c( "green", "red", "blue"),
labels = c("Low", "Medium", "High")) +
theme(axis.text.x = element_text(angle = 90, vjust = 1)) +
# scale_x_continuous(breaks=as.numeric(total_days$month_year), labels=format(total_days$month_year,"%b %Y")) +
facet_wrap(~position, scales="free_x", nrow=3) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
labs(title = "F319: Time within discharge limit in relation to Depth (Critical)",
y = "Time (%)",
x = "Year") #+ theme_bw(base_size = 15)
dev.off()
# Number of days above discharge ------------------------------------------
# all columns based on different probabilities
## count number events within each threshold with a running total - max total is the number of consequative
# events (hours) per day. if else statements to consider the thresholds newx1a/b etc
## order by datetime
limits
new_dataM <- new_dataM %>%
ungroup() %>%
group_by(month, day, water_year, ID01 = data.table::rleid(eval(low_threshM))) %>%
mutate(Low = if_else(eval(low_threshM), row_number(), 0L)) %>%
ungroup() %>%
group_by(month, day, water_year, ID02 = data.table::rleid(eval(med_threshM))) %>%
mutate(Medium = if_else(eval(med_threshM), row_number(), 0L)) %>%
ungroup() %>%
group_by(month, day, water_year, ID03 = data.table::rleid(eval(high_threshM))) %>%
mutate(High = if_else(eval(high_threshM), row_number(), 0L))
new_dataM <- mutate(new_dataM, position="MC")
new_datax <- select(new_dataM, c(Q, month, water_year, day, ID01, Low, ID02, Medium, ID03, High, position, DateTime) )# all probs
## melt
melt_data<-reshape2::melt(new_datax, id=c("ID01", "ID02", "ID03", "day", "month", "water_year", "Q", "position"))
melt_data <- rename(melt_data, Probability_Threshold = variable,
consec_hours = value)
melt_data
## groups data by year, month and ID & threshold
## counts the number of days in each month probability is within the depth of each threshold - days are not necessarily conseq
## each threshold separately
## count how many full days i.e. 24 hours
total_days01 <- melt_data %>%
filter(Probability_Threshold == "Low") %>%
group_by(ID01, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_low = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days01
## count the number of days in each month
total_days_per_month01 <- total_days01 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_low = sum(n_days_low))
total_days_per_month01
total_days02 <- melt_data %>%
filter(Probability_Threshold == "Medium") %>%
group_by(ID02, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_medium = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days_per_month02 <- total_days02 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_medium = sum(n_days_medium))
# total_days_per_month02
total_days03 <- melt_data %>%
filter(Probability_Threshold == "High") %>%
group_by(ID03, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_high = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days_per_month03 <- total_days03 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_high = sum(n_days_high))
total_days_per_month03
## combine all thresholds
total_days <- cbind( total_days_per_month01,total_days_per_month02[,4], total_days_per_month03[,4])
head(total_days)
write.csv(total_days, "output_data/C1_F319_clad_Depth_total_days.csv")
# # create year_month column
total_days <- ungroup(total_days) %>%
unite(month_year, water_year:month, sep="-", remove=F)
## convert month year to date format
library(zoo)
total_days$month_year <- zoo::as.yearmon(total_days$month_year)
total_days$month_year <- as.Date(total_days$month_year)
## change names of columns
total_days <- rename(total_days, Low = days_per_month_low, Medium = days_per_month_medium, High = days_per_month_high)
# total_hours <- rename(total_hours, Low = n_days_low, Medium = n_days_medium, High = n_days_high)
## define seasons/critical period
non_critical <- c(1,2,8:12)
critical <- c(3:7)
total_days <- total_days %>%
mutate(season = ifelse(month %in% non_critical, "non_critical", "critical") )
# ## melt data
melt_days<-reshape2::melt(total_days, id=c("month_year", "water_year", "month", "season", "position"))
melt_days <- rename(melt_days, Probability_Threshold = variable,
n_days = value)
head(melt_days)
## save df
write.csv(melt_days, "output_data/C1_F319_clad_Depth_total_days_long.csv")
# melt_daysx <- filter(melt_days, position=="MC")
library(scales)
## plot all ts
png("figures/Application_curves/Depth/C1_F319_clad_Depth_lob_rob_mc_no_days_within_Q.png", width = 500, height = 600)
ggplot(melt_days, aes(x =month_year, y=n_days)) +
geom_line(aes( group = Probability_Threshold, color = Probability_Threshold)) +
scale_color_manual(name="Probability Threshold",breaks = c("Low", "Medium", "High"),
values=c( "green", "red", "blue")) +
theme(axis.text.x = element_text(angle = 45, vjust = 0.5)) +
scale_x_date(breaks=pretty_breaks(), labels = date_format("%b %Y")) +
scale_y_continuous(limits=c(0,31)) +
# scale_x_continuous(breaks=as.numeric(melt_days$month_year), labels=format(melt_days$month_year,"%b %Y")) +
facet_wrap(~position, nrow=3) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
labs(title = "F319: Number of days within discharge limit in relation to Depth",
y = "Number of days per Month",
x = "Year") #+ theme_bw(base_size = 15)
dev.off()
## plot by year
png("figures/Application_curves/Depth/C1_F319_clad_Depth_lob_rob_mc_no_days_within_Q_by_year.png", width = 500, height = 600)
ggplot(melt_days, aes(x =month_year, y=n_days)) +
geom_line(aes( group = Probability_Threshold, color = Probability_Threshold)) +
scale_color_manual(name="Probability Threshold", breaks = c("Low", "Medium", "High"),
values=c( "green", "red", "blue")) +
theme(axis.text.x = element_text(angle = 0, vjust = 1)) +
scale_x_date(breaks=pretty_breaks(),labels = date_format("%b")) +
scale_y_continuous(limits=c(0,31)) +
# scale_x_continuous(breaks=as.numeric(month_year), labels=format(month_year,"%b")) +
facet_wrap(~water_year+position, scale="free_x", nrow=4) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
labs(title = "F319: Number of days within discharge limit in relation to Depth",
y = "Number of days per Month",
x = "Month") #+ theme_bw(base_size = 15)
dev.off()
## plot by season/critical period
png("figures/Application_curves/Depth/C1_F319_clad_Depth_lob_rob_mc_no_days_within_Q_by_season.png", width = 500, height = 600)
ggplot(melt_days, aes(x =month_year, y=n_days)) +
geom_line(aes( group = Probability_Threshold, color = Probability_Threshold)) +
scale_color_manual(name="Probability Threshold",breaks = c("Low", "Medium", "High"),
values=c( "green", "red", "blue")) +
theme(axis.text.x = element_text(angle = 0, vjust = 1)) +
scale_x_date(breaks=pretty_breaks(),labels = date_format("%Y")) +
scale_y_continuous(limits=c(0,31)) +
# scale_x_continuous(breaks=as.numeric(melt_days$month_year), labels=format(melt_days$month_year,"%Y")) +
facet_wrap(~season +position, scales="free", nrow=2) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
labs(title = "F319: Number of days within discharge limit in relation to Depth",
y = "Number of days per Month",
x = "Year") #+ theme_bw(base_size = 15)
dev.off()
|
#' Visualize Profiles, e.g. of Partial Dependence
#'
#' Minimal visualization of an object of class \code{light_profile}. The object returned is of class \code{ggplot} and can be further customized.
#'
#' Either lines and points are plotted (if stats = "mean") or quartile boxes. If there is a "by" variable or a multiflashlight, this first dimension is taken care by color (or if \code{swap_dim = TRUE} by facets). If there are two "by" variables or a multiflashlight with one "by" variable, the first "by" variable is visualized as color, the second one or the multiflashlight via facet (change with \code{swap_dim}).
#'
#' @import ggplot2
#' @importFrom stats reformulate
#' @method plot light_profile
#' @param x An object of class \code{light_profile}.
#' @param swap_dim If multiflashlight and one "by" variable or single flashlight with two "by" variables, swap the role of dodge/fill variable and facet variable. If multiflashlight or one "by" variable, use facets instead of colors.
#' @param facet_scales Scales argument passed to \code{facet_wrap}.
#' @param rotate_x Should x axis labels be rotated by 45 degrees? TRUE, except for type "partial dependence".
#' @param ... Further arguments passed to \code{geom_point} and \code{geom_line}.
#' @return An object of class \code{ggplot2}.
#' @export
#' @examples
#' fit_full <- lm(Sepal.Length ~ ., data = iris)
#' fit_part <- lm(Sepal.Length ~ Petal.Length, data = iris)
#' mod_full <- flashlight(model = fit_full, label = "full", data = iris, y = "Sepal.Length")
#' mod_part <- flashlight(model = fit_part, label = "part", data = iris, y = "Sepal.Length")
#' mods <- multiflashlight(list(mod_full, mod_part))
#'
#' plot(light_profile(mod_full, v = "Species"))
#' plot(light_profile(mod_full, v = "Species", type = "residual", stats = "quartiles"))
#' plot(light_profile(mod_full, v = "Petal.Width", by = "Species"))
#' plot(light_profile(mods, v = "Petal.Width", by = "Species"))
#' @seealso \code{\link{light_profile}}, \code{\link{plot.light_effects}}.
plot.light_profile <- function(x, swap_dim = FALSE, facet_scales = "free_x",
rotate_x = x$type != "partial dependence", ...) {
data <- x$data
nby <- length(x$by)
multi <- is.light_profile_multi(x)
ndim <- nby + multi
if (ndim > 2L) {
stop("Plot method not defined for more than two by variables or
multiflashlight with more than one by variable.")
}
if (length(x$v) >= 2L) {
stop("No plot method defined for two or higher dimensional grids.")
}
# Distinguish some cases
if (x$stats == "quartiles") {
p <- ggplot(x$data, aes_string(y = x$value, x = x$v, ymin = x$q1_name, ymax = x$q3_name))
} else {
p <- ggplot(x$data, aes_string(y = x$value, x = x$v))
}
if (ndim == 0L) {
if (x$stats == "quartiles") {
p <- p + geom_crossbar(...)
}
else {
p <- p + geom_point(...) +
geom_line(aes(group = 1), ...)
}
} else if (ndim == 1L) {
first_dim <- if (multi) x$label_name else x$by[1]
if (!swap_dim) {
if (x$stats == "quartiles") {
p <- p + geom_crossbar(aes_string(color = first_dim), position = "dodge", ...)
} else {
p <- p + geom_point(aes_string(color = first_dim), ...) +
geom_line(aes_string(color = first_dim, group = first_dim), ...)
}
} else {
p <- p +
facet_wrap(reformulate(first_dim), scales = facet_scales)
if (x$stats == "quartiles") {
p <- p + geom_crossbar(...)
} else {
p <- p + geom_point(...) +
geom_line(aes(group = 1), ...)
}
}
} else {
second_dim <- if (multi) x$label_name else x$by[2]
wrap_var <- if (swap_dim) x$by[1] else second_dim
col_var <- if (swap_dim) second_dim else x$by[1]
if (x$stats == "quartiles") {
p <- p + geom_crossbar(aes_string(color = col_var), position = "dodge", ...)
} else {
p <- p + geom_point(aes_string(color = col_var), ...) +
geom_line(aes_string(color = col_var, group = col_var), ...)
}
p <- p + facet_wrap(wrap_var, scales = facet_scales)
}
if (rotate_x) {
p <- p + theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))
}
p + ylab(x$type)
}
| /R/plot_light_profile.R | no_license | agosiewska/flashlight | R | false | false | 4,314 | r | #' Visualize Profiles, e.g. of Partial Dependence
#'
#' Minimal visualization of an object of class \code{light_profile}. The object returned is of class \code{ggplot} and can be further customized.
#'
#' Either lines and points are plotted (if stats = "mean") or quartile boxes. If there is a "by" variable or a multiflashlight, this first dimension is taken care by color (or if \code{swap_dim = TRUE} by facets). If there are two "by" variables or a multiflashlight with one "by" variable, the first "by" variable is visualized as color, the second one or the multiflashlight via facet (change with \code{swap_dim}).
#'
#' @import ggplot2
#' @importFrom stats reformulate
#' @method plot light_profile
#' @param x An object of class \code{light_profile}.
#' @param swap_dim If multiflashlight and one "by" variable or single flashlight with two "by" variables, swap the role of dodge/fill variable and facet variable. If multiflashlight or one "by" variable, use facets instead of colors.
#' @param facet_scales Scales argument passed to \code{facet_wrap}.
#' @param rotate_x Should x axis labels be rotated by 45 degrees? TRUE, except for type "partial dependence".
#' @param ... Further arguments passed to \code{geom_point} and \code{geom_line}.
#' @return An object of class \code{ggplot2}.
#' @export
#' @examples
#' fit_full <- lm(Sepal.Length ~ ., data = iris)
#' fit_part <- lm(Sepal.Length ~ Petal.Length, data = iris)
#' mod_full <- flashlight(model = fit_full, label = "full", data = iris, y = "Sepal.Length")
#' mod_part <- flashlight(model = fit_part, label = "part", data = iris, y = "Sepal.Length")
#' mods <- multiflashlight(list(mod_full, mod_part))
#'
#' plot(light_profile(mod_full, v = "Species"))
#' plot(light_profile(mod_full, v = "Species", type = "residual", stats = "quartiles"))
#' plot(light_profile(mod_full, v = "Petal.Width", by = "Species"))
#' plot(light_profile(mods, v = "Petal.Width", by = "Species"))
#' @seealso \code{\link{light_profile}}, \code{\link{plot.light_effects}}.
plot.light_profile <- function(x, swap_dim = FALSE, facet_scales = "free_x",
rotate_x = x$type != "partial dependence", ...) {
data <- x$data
nby <- length(x$by)
multi <- is.light_profile_multi(x)
ndim <- nby + multi
if (ndim > 2L) {
stop("Plot method not defined for more than two by variables or
multiflashlight with more than one by variable.")
}
if (length(x$v) >= 2L) {
stop("No plot method defined for two or higher dimensional grids.")
}
# Distinguish some cases
if (x$stats == "quartiles") {
p <- ggplot(x$data, aes_string(y = x$value, x = x$v, ymin = x$q1_name, ymax = x$q3_name))
} else {
p <- ggplot(x$data, aes_string(y = x$value, x = x$v))
}
if (ndim == 0L) {
if (x$stats == "quartiles") {
p <- p + geom_crossbar(...)
}
else {
p <- p + geom_point(...) +
geom_line(aes(group = 1), ...)
}
} else if (ndim == 1L) {
first_dim <- if (multi) x$label_name else x$by[1]
if (!swap_dim) {
if (x$stats == "quartiles") {
p <- p + geom_crossbar(aes_string(color = first_dim), position = "dodge", ...)
} else {
p <- p + geom_point(aes_string(color = first_dim), ...) +
geom_line(aes_string(color = first_dim, group = first_dim), ...)
}
} else {
p <- p +
facet_wrap(reformulate(first_dim), scales = facet_scales)
if (x$stats == "quartiles") {
p <- p + geom_crossbar(...)
} else {
p <- p + geom_point(...) +
geom_line(aes(group = 1), ...)
}
}
} else {
second_dim <- if (multi) x$label_name else x$by[2]
wrap_var <- if (swap_dim) x$by[1] else second_dim
col_var <- if (swap_dim) second_dim else x$by[1]
if (x$stats == "quartiles") {
p <- p + geom_crossbar(aes_string(color = col_var), position = "dodge", ...)
} else {
p <- p + geom_point(aes_string(color = col_var), ...) +
geom_line(aes_string(color = col_var, group = col_var), ...)
}
p <- p + facet_wrap(wrap_var, scales = facet_scales)
}
if (rotate_x) {
p <- p + theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))
}
p + ylab(x$type)
}
|
library(tidyverse)
library(Lahman)
tail(Teams, 3)
help(Teams)
#Studying Runs and its correlation to Wins
#Creating a df of teams from 2000 - Now, only looking at Games, Runs and Runs Against
my_teams <- Teams %>%
filter(yearID > 2000) %>%
select(teamID, yearID, lgID, G, W, L, R, RA)
tail(my_teams)
#Calculating Run Differential (RD) and Winning Percentage (Wpct)
my_teams <- my_teams %>%
mutate(RD = R - RA, Wpct = W / (W+L))
#Plotting a scatter with Wpct on the Y and RD on the X
run_diff <- ggplot(my_teams, aes(x = RD, y = Wpct)) +
geom_point() +
scale_x_continuous("Run Differential")+
scale_y_continuous("Winning Percentage")
linfit <- lm(Wpct ~ RD, data = my_teams)
linfit
run_diff +
geom_smooth(method = "lm", se = FALSE)
#Plot
install.packages("ggrepel")
library(ggrepel)
library(broom)
my_teams_aug <- augment(linfit, data = my_teams)
base_plot <- ggplot(my_teams_aug, aes(x = RD, y = .resid)) +
geom_point(alpha = 0.3)+
geom_hline(yintercept = 0, linetype = 3)+
xlab("Run Differential") + ylab("Residual")
highlight_teams <- my_teams_aug %>%
arrange(desc(abs(.resid))) %>%
head(4)
base_plot +
geom_point(data = highlight_teams)+
geom_text_repel(data = highlight_teams, aes(label=paste(teamID, yearID)))
| /R/baseball.r | no_license | JGlessner757/dawg | R | false | false | 1,316 | r | library(tidyverse)
library(Lahman)
tail(Teams, 3)
help(Teams)
#Studying Runs and its correlation to Wins
#Creating a df of teams from 2000 - Now, only looking at Games, Runs and Runs Against
my_teams <- Teams %>%
filter(yearID > 2000) %>%
select(teamID, yearID, lgID, G, W, L, R, RA)
tail(my_teams)
#Calculating Run Differential (RD) and Winning Percentage (Wpct)
my_teams <- my_teams %>%
mutate(RD = R - RA, Wpct = W / (W+L))
#Plotting a scatter with Wpct on the Y and RD on the X
run_diff <- ggplot(my_teams, aes(x = RD, y = Wpct)) +
geom_point() +
scale_x_continuous("Run Differential")+
scale_y_continuous("Winning Percentage")
linfit <- lm(Wpct ~ RD, data = my_teams)
linfit
run_diff +
geom_smooth(method = "lm", se = FALSE)
#Plot
install.packages("ggrepel")
library(ggrepel)
library(broom)
my_teams_aug <- augment(linfit, data = my_teams)
base_plot <- ggplot(my_teams_aug, aes(x = RD, y = .resid)) +
geom_point(alpha = 0.3)+
geom_hline(yintercept = 0, linetype = 3)+
xlab("Run Differential") + ylab("Residual")
highlight_teams <- my_teams_aug %>%
arrange(desc(abs(.resid))) %>%
head(4)
base_plot +
geom_point(data = highlight_teams)+
geom_text_repel(data = highlight_teams, aes(label=paste(teamID, yearID)))
|
# add the class attribute to the tree
.add_class <- function(tree) {
class(tree) <- unique(c("sdc_hierarchy", class(tree)))
tree
}
# initializes an empty tree
.init <- function(rootnode) {
tree <- data.table(
root = rootnode,
leaf = rootnode,
level = 1
)
class(tree) <- unique(c("sdc_hierarchy", class(tree)))
tree
}
# checks if the given tree is valid
.is_valid <- function(tree) {
if (!inherits(tree, "sdc_hierarchy")) {
e <- "The provided input `tree` is not a sdc_hierarchy object."
stop(e, call. = FALSE)
}
# check only one rootnode
if (nrow(tree) > 0) {
if (sum(duplicated(tree$leaf)) > 0) {
stop("non-unique leaf nodes detected!", call. = FALSE)
}
}
TRUE
}
# returns the names of all nodes in the correct order
.all_nodes <- function(tree) {
.is_valid(tree)
hier_convert(tree, "dt")$name
}
# returns the name of the rootnode
.rootnode <- function(tree) {
rcpp_rootnode(tree = tree)
}
# adds multiple rows to an existing tree
.add_nodes <- function(tree, new) {
tree <- rbind(tree, new)
tree <- .add_class(tree)
tree
}
# all direct children of a given leaf in the tree
.children <- function(tree, leaf) {
stopifnot(rlang::is_scalar_character(leaf))
rcpp_children(tree = tree, leaf = leaf)
}
# returns number of children for a given leaf in the tree
.nr_children <- function(tree, leaf) {
length(.children(tree = tree, leaf = leaf))
}
# returns TRUE if the given leaf has no children
.is_leaf <- function(tree, leaf) {
.nr_children(tree = tree, leaf = leaf) == 0
}
# computes all siblings for each node
.siblings <- function(tree, leaf) {
.is_valid_leaf(tree, leaf)
rcpp_siblings(tree = tree, leaf = leaf)
}
# returns number of sibligns for a given leaf in the tree
.nr_siblings <- function(tree, leaf) {
length(.siblings(tree = tree, leaf = leaf))
}
# checks if a given leaf is valid in the tree
.is_valid_leaf <- function(tree, leaf) {
stopifnot(rlang::is_scalar_character(leaf))
if (!rcpp_exists(tree, leaf)) {
stop("leaf", shQuote(leaf), "does not exist", call. = FALSE)
}
invisible(TRUE)
}
# returns TRUE, if a given leaf exists in the tree
.exists <- function(tree, leaf) {
stopifnot(rlang::is_scalar_character(leaf))
rcpp_exists(tree, leaf)
}
# returns TRUE if given leaf is the rootnode
.is_rootnode <- function(tree, leaf) {
.is_valid_leaf(tree, leaf)
rcpp_is_rootnode(tree = tree, leaf = leaf)
}
# returns path from rootnode to given leaf
.path <- function(tree, leaf) {
.is_valid_leaf(tree, leaf)
rcpp_path(tree = tree, leaf = leaf)
}
# numeric level of given leaf in the tree
.level <- function(tree, leaf) {
.is_valid_leaf(tree, leaf)
rcpp_level(tree = tree, leaf = leaf)
}
# all levels (numeric of the given tree)
.levels <- function(tree) {
rcpp_levels(tree = tree)
}
# number of levels
.nr_levels <- function(tree) {
rcpp_nr_levels(tree = tree)
}
# returns TRUE if it is a bogus (duplicated) leaf
# this is the case if it has no siblings and is a leaf-node
.is_bogus <- function(tree, leaf) {
.is_valid_leaf(tree, leaf)
rcpp_is_bogus(tree = tree, leaf = leaf)
}
# returns all bogus_codes
.bogus_codes <- function(tree) {
rcpp_bogus_codes(tree = tree)
}
# returns name of parent node
.parent <- function(tree, leaf) {
.is_valid_leaf(tree, leaf)
rcpp_parent(tree = tree, leaf = leaf)
}
# returns all codes contributing to a specific leaf
.contributing_leaves <- function(tree, leaf) {
.is_valid_leaf(tree, leaf)
rcpp_contributing_leaves(tree = tree, leaf = leaf)
}
# sort the tree, top to bottom
.sort <- function(tree) {
path <- NULL
# only root node available
if (nrow(tree) == 1) {
return(tree)
}
nn <- sort(.all_nodes(tree))
# use / seperated paths to generate correct order
res <- lapply(nn, function(x) {
p <- .path(tree, x)
list(path = p, leaf = tail(p, 1))
})
res <- data.table(
path = sapply(1:length(res), function(x) {
paste(res[[x]]$path, collapse = "/")
}),
leaf = sapply(1:length(res), function(x) {
res[[x]]$leaf
})
)
setkey(res, path)
# create a new tree based on this order
newtree <- list()
length(newtree) <- nrow(tree)
ii <- which(tree$root == .rootnode(tree) & is.na(tree$leaf))
newtree[[1]] <- tree[ii]
for (i in 1:nrow(res)) {
ind <- tree$leaf == res$leaf[i]
newtree[[i]] <- tree[ind]
}
newtree <- rbindlist(newtree)
newtree <- .add_class(newtree)
attr(newtree, "is_sorted") <- TRUE
newtree
}
# info about a single leaf in the tree
.info <- function(tree, leaf) {
stopifnot(rlang::is_scalar_character(leaf))
rcpp_info(tree = tree, leaf = leaf)
}
# is the tree sorted?
.is_sorted <- function(tree) {
x <- attr(tree, "is_sorted")
if (is.null(x)) {
return(FALSE)
}
x == TRUE
}
# data.table with each level being in a sperate column
.tree_to_cols <- function(tree) {
dt <- lapply(.all_nodes(tree), function(x) {
data.table(t(.path(tree, x)))
})
rbindlist(dt, fill = TRUE)
}
# compute the number of required digits for each level of the tree
.required_digits <- function(tree) {
dt <- .tree_to_cols(tree)
# only rootnode
if (ncol(dt) == 1) {
return(c(1))
}
req_digits <- rep(NA, .nr_levels(tree))
req_digits[1] <- 1
for (i in 2:ncol(dt)) {
tmp <- na.omit(unique(dt[, c(i - 1, i), with = FALSE]))
s <- split(tmp, tmp[[1]])
req_digits[i] <- max(nchar(sapply(s, nrow)))
}
req_digits
}
# returns TRUE if the code is a minimal code (eg. is required to build the hierarchy)
.is_minimal_code <- function(tree) {
rcpp_is_minimal_code(tree = tree)
}
# returns names of minimal codes
.minimal_codes <- function(tree) {
rcpp_minimal_codes(tree = tree)
}
# returns TRUE if the code is a subtotal (not required to build the hierarchy)
.is_subtotal <- function(tree) {
rcpp_is_subtotal(tree = tree)
}
# returns names of subtotals
.subtotals <- function(tree) {
rcpp_subtotals(tree = tree)
}
# remove a leaf and all sub-leaves from a tree
.prune <- function(tree, leaf) {
stopifnot(rlang::is_scalar_character(leaf))
tree <- rcpp_prune(tree = tree, leaf = leaf)
tree <- data.table::setalloccol(tree)
return(tree)
}
| /R/hier_helpers.R | no_license | bernhard-da/sdcHierarchies | R | false | false | 6,172 | r | # add the class attribute to the tree
.add_class <- function(tree) {
class(tree) <- unique(c("sdc_hierarchy", class(tree)))
tree
}
# initializes an empty tree
.init <- function(rootnode) {
tree <- data.table(
root = rootnode,
leaf = rootnode,
level = 1
)
class(tree) <- unique(c("sdc_hierarchy", class(tree)))
tree
}
# checks if the given tree is valid
.is_valid <- function(tree) {
if (!inherits(tree, "sdc_hierarchy")) {
e <- "The provided input `tree` is not a sdc_hierarchy object."
stop(e, call. = FALSE)
}
# check only one rootnode
if (nrow(tree) > 0) {
if (sum(duplicated(tree$leaf)) > 0) {
stop("non-unique leaf nodes detected!", call. = FALSE)
}
}
TRUE
}
# returns the names of all nodes in the correct order
.all_nodes <- function(tree) {
.is_valid(tree)
hier_convert(tree, "dt")$name
}
# returns the name of the rootnode
.rootnode <- function(tree) {
rcpp_rootnode(tree = tree)
}
# adds multiple rows to an existing tree
.add_nodes <- function(tree, new) {
tree <- rbind(tree, new)
tree <- .add_class(tree)
tree
}
# all direct children of a given leaf in the tree
.children <- function(tree, leaf) {
stopifnot(rlang::is_scalar_character(leaf))
rcpp_children(tree = tree, leaf = leaf)
}
# returns number of children for a given leaf in the tree
.nr_children <- function(tree, leaf) {
length(.children(tree = tree, leaf = leaf))
}
# returns TRUE if the given leaf has no children
.is_leaf <- function(tree, leaf) {
.nr_children(tree = tree, leaf = leaf) == 0
}
# computes all siblings for each node
.siblings <- function(tree, leaf) {
.is_valid_leaf(tree, leaf)
rcpp_siblings(tree = tree, leaf = leaf)
}
# returns number of sibligns for a given leaf in the tree
.nr_siblings <- function(tree, leaf) {
length(.siblings(tree = tree, leaf = leaf))
}
# checks if a given leaf is valid in the tree
.is_valid_leaf <- function(tree, leaf) {
stopifnot(rlang::is_scalar_character(leaf))
if (!rcpp_exists(tree, leaf)) {
stop("leaf", shQuote(leaf), "does not exist", call. = FALSE)
}
invisible(TRUE)
}
# returns TRUE, if a given leaf exists in the tree
.exists <- function(tree, leaf) {
stopifnot(rlang::is_scalar_character(leaf))
rcpp_exists(tree, leaf)
}
# returns TRUE if given leaf is the rootnode
.is_rootnode <- function(tree, leaf) {
.is_valid_leaf(tree, leaf)
rcpp_is_rootnode(tree = tree, leaf = leaf)
}
# returns path from rootnode to given leaf
.path <- function(tree, leaf) {
.is_valid_leaf(tree, leaf)
rcpp_path(tree = tree, leaf = leaf)
}
# numeric level of given leaf in the tree
.level <- function(tree, leaf) {
.is_valid_leaf(tree, leaf)
rcpp_level(tree = tree, leaf = leaf)
}
# all levels (numeric of the given tree)
.levels <- function(tree) {
rcpp_levels(tree = tree)
}
# number of levels
.nr_levels <- function(tree) {
rcpp_nr_levels(tree = tree)
}
# returns TRUE if it is a bogus (duplicated) leaf
# this is the case if it has no siblings and is a leaf-node
.is_bogus <- function(tree, leaf) {
.is_valid_leaf(tree, leaf)
rcpp_is_bogus(tree = tree, leaf = leaf)
}
# returns all bogus_codes
.bogus_codes <- function(tree) {
rcpp_bogus_codes(tree = tree)
}
# returns name of parent node
.parent <- function(tree, leaf) {
.is_valid_leaf(tree, leaf)
rcpp_parent(tree = tree, leaf = leaf)
}
# returns all codes contributing to a specific leaf
.contributing_leaves <- function(tree, leaf) {
.is_valid_leaf(tree, leaf)
rcpp_contributing_leaves(tree = tree, leaf = leaf)
}
# sort the tree, top to bottom
.sort <- function(tree) {
path <- NULL
# only root node available
if (nrow(tree) == 1) {
return(tree)
}
nn <- sort(.all_nodes(tree))
# use / seperated paths to generate correct order
res <- lapply(nn, function(x) {
p <- .path(tree, x)
list(path = p, leaf = tail(p, 1))
})
res <- data.table(
path = sapply(1:length(res), function(x) {
paste(res[[x]]$path, collapse = "/")
}),
leaf = sapply(1:length(res), function(x) {
res[[x]]$leaf
})
)
setkey(res, path)
# create a new tree based on this order
newtree <- list()
length(newtree) <- nrow(tree)
ii <- which(tree$root == .rootnode(tree) & is.na(tree$leaf))
newtree[[1]] <- tree[ii]
for (i in 1:nrow(res)) {
ind <- tree$leaf == res$leaf[i]
newtree[[i]] <- tree[ind]
}
newtree <- rbindlist(newtree)
newtree <- .add_class(newtree)
attr(newtree, "is_sorted") <- TRUE
newtree
}
# info about a single leaf in the tree
.info <- function(tree, leaf) {
stopifnot(rlang::is_scalar_character(leaf))
rcpp_info(tree = tree, leaf = leaf)
}
# is the tree sorted?
.is_sorted <- function(tree) {
x <- attr(tree, "is_sorted")
if (is.null(x)) {
return(FALSE)
}
x == TRUE
}
# data.table with each level being in a sperate column
.tree_to_cols <- function(tree) {
dt <- lapply(.all_nodes(tree), function(x) {
data.table(t(.path(tree, x)))
})
rbindlist(dt, fill = TRUE)
}
# compute the number of required digits for each level of the tree
.required_digits <- function(tree) {
dt <- .tree_to_cols(tree)
# only rootnode
if (ncol(dt) == 1) {
return(c(1))
}
req_digits <- rep(NA, .nr_levels(tree))
req_digits[1] <- 1
for (i in 2:ncol(dt)) {
tmp <- na.omit(unique(dt[, c(i - 1, i), with = FALSE]))
s <- split(tmp, tmp[[1]])
req_digits[i] <- max(nchar(sapply(s, nrow)))
}
req_digits
}
# returns TRUE if the code is a minimal code (eg. is required to build the hierarchy)
.is_minimal_code <- function(tree) {
rcpp_is_minimal_code(tree = tree)
}
# returns names of minimal codes
.minimal_codes <- function(tree) {
rcpp_minimal_codes(tree = tree)
}
# returns TRUE if the code is a subtotal (not required to build the hierarchy)
.is_subtotal <- function(tree) {
rcpp_is_subtotal(tree = tree)
}
# returns names of subtotals
.subtotals <- function(tree) {
rcpp_subtotals(tree = tree)
}
# remove a leaf and all sub-leaves from a tree
.prune <- function(tree, leaf) {
stopifnot(rlang::is_scalar_character(leaf))
tree <- rcpp_prune(tree = tree, leaf = leaf)
tree <- data.table::setalloccol(tree)
return(tree)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/my_lm.R
\name{my_lm}
\alias{my_lm}
\title{Linear model function}
\usage{
my_lm(my_fml, my_data)
}
\arguments{
\item{my_fml}{'formula' class object.}
\item{my_data}{Input data frame.}
}
\value{
A table of coeffiencts for the linear regression, which contains `Estimate`, `Std. Error`, `t value`, and `Pr(>|t|)`.
}
\description{
This function fits a linear model.
}
\examples{
data(my_gapminder)
my_lm(my_fml = pop ~ gdpPercap, my_data = my_gapminder)
}
\keyword{prediction}
| /man/my_lm.Rd | no_license | celeste-zeng/R_package_development | R | false | true | 553 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/my_lm.R
\name{my_lm}
\alias{my_lm}
\title{Linear model function}
\usage{
my_lm(my_fml, my_data)
}
\arguments{
\item{my_fml}{'formula' class object.}
\item{my_data}{Input data frame.}
}
\value{
A table of coeffiencts for the linear regression, which contains `Estimate`, `Std. Error`, `t value`, and `Pr(>|t|)`.
}
\description{
This function fits a linear model.
}
\examples{
data(my_gapminder)
my_lm(my_fml = pop ~ gdpPercap, my_data = my_gapminder)
}
\keyword{prediction}
|
library(shiny)
library(UsingR)
library(lattice)
library(zoo)
library(plotrix)
payments<- read.csv("payments1.csv", header=T)
shinyServer(
function(input,output){
x <- reactive({as.yearmon(input$yearmonth)})
#output$inputValue<-renderPrint({subset(payments,REF_DATE>=input$startdate & REF_DATE<=input$enddate)})
output$newHist<-renderPlot({
barchart(PAY_RATIO ~ EXTC_NAME, subset(payments,as.yearmon(REF_DATE)>=x() & as.yearmon(REF_DATE)<=x()),col='forestgreen')
})
#mytable<-subset(payments,as.yearmon(REF_DATE)==x())
lbls <- c("DCA1","DCA2","DCA3", "DCA4", "Unassigned")
output$newPie<-renderPlot({pie3D(subset(payments,as.yearmon(REF_DATE)==x())$DEBT_AMOUNT,labels=lbls,explode=0.2,main="Pie Chart of Portfolio Debt",radius=0.9,theta=pi/3)
})
}
)
| /server.R | no_license | ntzortzis/Shiny_03 | R | false | false | 799 | r | library(shiny)
library(UsingR)
library(lattice)
library(zoo)
library(plotrix)
payments<- read.csv("payments1.csv", header=T)
shinyServer(
function(input,output){
x <- reactive({as.yearmon(input$yearmonth)})
#output$inputValue<-renderPrint({subset(payments,REF_DATE>=input$startdate & REF_DATE<=input$enddate)})
output$newHist<-renderPlot({
barchart(PAY_RATIO ~ EXTC_NAME, subset(payments,as.yearmon(REF_DATE)>=x() & as.yearmon(REF_DATE)<=x()),col='forestgreen')
})
#mytable<-subset(payments,as.yearmon(REF_DATE)==x())
lbls <- c("DCA1","DCA2","DCA3", "DCA4", "Unassigned")
output$newPie<-renderPlot({pie3D(subset(payments,as.yearmon(REF_DATE)==x())$DEBT_AMOUNT,labels=lbls,explode=0.2,main="Pie Chart of Portfolio Debt",radius=0.9,theta=pi/3)
})
}
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.