content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
#' remove_apostrophe
#'
#' Remove apostrophes from column field.
#'
#' @param .data A data frame or data frame extension (e.g., tibble).
#' @param ... Name of the column(s) to remove apostrophes from.
#'
#' @return The mutated tibble.
#'
#' @export
remove_apostrophe <- function(.data, ...) {
# quote the expression
vars <- dplyr::enquos(..., .named = TRUE)
# modify quoted expression
mutate_vars <- purrr::map(vars, function(var) { dplyr::expr(gsub("'", "", !!var)) })
# replace apostrophes with empty spaces
dplyr::mutate(.data, !!!mutate_vars)
}
#' datetime_stampify
#'
#' Create a datetime column from character strings or POSIXct class.
#' Assumes datetime has a timezone of UTC, but this can be modified with the .tz argument.
#'
#' @inheritParams remove_apostrophe
#' @param .datetime Name of the column field that identifies the datetime.
#' @param .tz Time zone of the datetime.
#'
#' @importFrom dplyr "%>%"
#' @importFrom magrittr "%<>%"
#'
#' @return The mutated tibble with a new column labeled datetime_[.tz].
#'
#' @export
datetime_stampify <- function(.data, .datetime, .tz = "UTC") {
# quote the expression
datetime_name_var <- dplyr::enquo(.datetime)
# new column name as string
datetime_name_str <- paste0('datetime_', .tz %>% tolower())
# make sure .datetime field is type character, otherwise assume POSIXct class
if(!is.character(.data %>% dplyr::pull(!!datetime_name_var))) {
# call warning
warning("Warning: Datetime field is not a character type.")
# rename datetime column
.data %<>% dplyr::rename(!!datetime_name_str := !!datetime_name_var)
} else {
# split datetime string into date and time components and assign to new column name
.data %<>% dplyr::mutate(!!datetime_name_str := paste(stringr::str_sub(!!datetime_name_var, start = 1, end = 10), stringr::str_sub(!!datetime_name_var, start = 12, end = 19), sep = " "))
# convert datetime column to POSIXct class
.data %<>% dplyr::mutate(!!datetime_name_str := as.POSIXct(.data[[!!datetime_name_str]], format = "%Y-%m-%d %H:%M:%S", tz = .tz))
}
}
#' observer_codify
#'
#' Replace observer name with code from database.
#'
#' @inheritParams remove_apostrophe
#' @param .channel A DBI object that connects to the internal NEFSC Oracle database.
#' @param .observer_name Name of the column field that identifies the observer name.
#'
#' @importFrom dplyr "%>%"
#' @importFrom magrittr "%<>%"
#'
#' @return The mutated tibble.
#'
#' @export
observer_codify <- function(.data, .channel, .observer_name) {
# check database connection is from DBI
channel_valid <- inherits(.channel, "DBIConnection")
# issue error if invalid DBI connection
stopifnot(channel_valid)
# make sure there is no column named 'link', otherwise exit
if('link' %in% colnames(.data)) stop("Error: Cannot have a column named 'link'.")
# get database table of observer name codes and clean column names so they're snake case
dbs_obs_name <- DBI::dbGetQuery(conn = .channel, statement = "SELECT LASTNAME, FIRSTNAME, OBSCODE FROM MAMMAL_SOUND.MAMMAL_OBSERVER") %>% janitor::clean_names(case = 'snake')
# create field to link tables
dbs_obs_name %<>% dplyr::mutate(link = paste0(lastname, ', ', firstname))
# quote the expression
obs_name_var <- dplyr::enquo(.observer_name)
# set the linking 'by' variable in .data to 'link'
.data %<>% dplyr::mutate(!!obs_name_var := !!obs_name_var %>% toupper) %>% dplyr::rename(link = !!obs_name_var)
# left join in observer name codes
.data %<>% dplyr::left_join(dbs_obs_name %>% dplyr::select(link, obscode), by = "link")
# drop 'by' var in .data
.data %<>% dplyr::select(-link)
# rename obscode to .oserver_name
.data %>% dplyr::rename(!!obs_name_var := obscode)
}
#' position_codify
#'
#' Replace observer position with code from database.
#'
#' @inheritParams remove_apostrophe
#' @inheritParams observer_codify
#' @param .observer_position Name of the column field that identifies the observer position.
#'
#' @importFrom dplyr "%>%"
#' @importFrom magrittr "%<>%"
#'
#' @return The mutated tibble.
#'
#' @export
position_codify <- function(.data, .channel, .observer_position) {
# check database connection is from DBI
channel_valid <- inherits(.channel, "DBIConnection")
# issue error if invalid DBI connection
stopifnot(channel_valid)
# make sure there is no column named 'link', otherwise exit
if('link' %in% colnames(.data)) stop("Error: Cannot have a column named 'link'.")
# get database table of observer position codes and clean column names so they're snake case
dbs_obs_pos <- DBI::dbGetQuery(conn = .channel, statement = "SELECT SIDEOFSHIP, SIDE FROM MAMMAL_SOUND.BIRDSIDEOFSHIP") %>% janitor::clean_names(case = 'snake')
# create field to link tables
dbs_obs_pos %<>% dplyr::rename(link = side)
# quote the expression
obs_pos_var <- dplyr::enquo(.observer_position)
# set the linking 'by' variable in .data to 'link'
.data %<>% dplyr::mutate(!!obs_pos_var := !!obs_pos_var %>% tolower) %>% dplyr::rename(link = !!obs_pos_var)
# left join in observer position codes
.data %<>% dplyr::left_join(dbs_obs_pos %>% dplyr::select(link, sideofship), by = "link")
# drop 'by' var in .data
.data %<>% dplyr::select(-link)
# rename sideofship code to .oserver_position
.data %>% dplyr::rename(!!obs_pos_var := sideofship)
}
#' behavior_codify
#'
#' Replace seabird behavior with code from database.
#'
#' @inheritParams remove_apostrophe
#' @inheritParams observer_codify
#' @param .seabird_behavior Name of the column field that identifies the seabird behavior.
#'
#' @importFrom dplyr "%>%"
#' @importFrom magrittr "%<>%"
#'
#' @return The mutated tibble.
#'
#' @export
behavior_codify <- function(.data, .channel, .seabird_behavior) {
# check database connection is from DBI
channel_valid <- inherits(.channel, "DBIConnection")
# issue error if invalid DBI connection
stopifnot(channel_valid)
# make sure there is no column named 'link', otherwise exit
if('link' %in% colnames(.data)) stop("Error: Cannot have a column named 'link'.")
# quote the expression
bird_behave_var <- dplyr::enquo(.seabird_behavior)
# modify seabird behavior variable to match database descriptions
.data %<>% mutate(!!bird_behave_var := case_when(
!!bird_behave_var == 'Plunge Diving' ~ 'diving',
!!bird_behave_var == 'Blowing' ~ 'swimming',
!!bird_behave_var == 'Kleptoparasitizing' ~ 'piracy',
!!bird_behave_var == 'Stationary' ~ 'milling',
!!bird_behave_var == 'Resting' ~ 'sitting',
!!bird_behave_var == 'Under attack' ~ NA_character_,
!!bird_behave_var == 'Taking Off' ~ 'unknown flight',
!!bird_behave_var == 'Flying' ~ 'unknown flight',
!!bird_behave_var == 'Following' ~ 'following ship',
TRUE ~ as.character(!!bird_behave_var)
))
# get database table of seabird behavior codes and clean column names so they're snake case
dbs_bird_behave <- DBI::dbGetQuery(conn = .channel, statement = "SELECT BEHAVIORDESC, BEHAVIORCODE FROM MAMMAL_SOUND.BIRDBEHAVIOR") %>% janitor::clean_names(case = 'snake')
# create field to link tables
dbs_bird_behave %<>% dplyr::rename(link = behaviordesc)
# set the linking 'by' variable in .data to 'link'
.data %<>% dplyr::mutate(!!bird_behave_var := !!bird_behave_var %>% tolower) %>% dplyr::rename(link = !!bird_behave_var)
# left join in seabird behavior codes
.data %<>% dplyr::left_join(dbs_bird_behave %>% dplyr::select(link, behaviorcode), by = "link")
# drop 'by' var in .data
.data %<>% dplyr::select(-link)
# reaname behaviorcode to .seabird_behavior
.data %>% dplyr::rename(!!bird_behave_var := behaviorcode)
}
#' direction_codify
#'
#' Replace flight direction with code from database.
#' Flight direction should be in degrees and is to be entered relative to the ship.
#'
#' @inheritParams remove_apostrophe
#' @param .flight_direction Name of the column field that identifies the flight direction.
#'
#' @importFrom dplyr "%>%"
#'
#' @return The mutated tibble.
#'
#' @export
direction_codify <- function(.data, .flight_direction) {
# quote the expression
flight_dir_var <- dplyr::enquo(.flight_direction)
# modify flight direction variable to match database descriptions
.data %>% mutate(!!flight_dir_var := case_when(
!!flight_dir_var == 'N' ~ '0',
!!flight_dir_var == 'NE' ~ '45',
!!flight_dir_var == 'E' ~ '90',
!!flight_dir_var == 'SE' ~ '135',
!!flight_dir_var == 'S' ~ '180',
!!flight_dir_var == 'SW' ~ '225',
!!flight_dir_var == 'W' ~ '270',
!!flight_dir_var == 'NW' ~ '315',
!!flight_dir_var == 'No Direction' ~ NA_character_,
!!flight_dir_var == '' ~ NA_character_,
TRUE ~ as.character(!!flight_dir_var)
))
}
#' age_codify
#'
#' Replace bird age with code from database.
#'
#' @inheritParams remove_apostrophe
#' @inheritParams observer_codify
#' @param .bird_age Name of the column field that identifies the bird age.
#'
#' @importFrom dplyr "%>%"
#' @importFrom magrittr "%<>%"
#'
#' @return The mutated tibble.
#'
#' @export
age_codify <- function(.data, .channel, .bird_age) {
# check database connection is from DBI
channel_valid <- inherits(.channel, "DBIConnection")
# issue error if invalid DBI connection
stopifnot(channel_valid)
# make sure there is no column named 'link', otherwise exit
if('link' %in% colnames(.data)) stop("Error: Cannot have a column named 'link'.")
# quote the expression
bird_age_var <- dplyr::enquo(.bird_age)
# modify bird age variable to match database descriptions
.data %<>% mutate(!!bird_age_var := case_when(
!!bird_age_var == 'Immature' ~ 'subadult',
!!bird_age_var == 'Juvenile' ~ 'subadult',
!!bird_age_var == 'First cycle' ~ 'subadult',
!!bird_age_var == 'Second cycle' ~ 'subadult',
!!bird_age_var == 'Third cycle' ~ 'subadult',
!!bird_age_var == 'Fourth cycle' ~ 'subadult',
!!bird_age_var == 'First year' ~ 'subadult',
!!bird_age_var == 'Second year' ~ 'subadult',
!!bird_age_var == 'Third year' ~ 'subadult',
!!bird_age_var == 'Fourth year' ~ 'subadult',
TRUE ~ as.character(!!bird_age_var)
))
# get database table of bird age codes and clean column names so they're snake case
dbs_bird_age <- DBI::dbGetQuery(conn = .channel, statement = "SELECT AGECODE, AGE FROM MAMMAL_SOUND.BIRDAGE") %>% janitor::clean_names(case = 'snake')
# create field to link tables (ensure lower case)
dbs_bird_age %<>% dplyr::rename(link = age) %>% mutate(link = link %>% tolower)
# set the linking 'by' variable in .data to 'link'
.data %<>% dplyr::mutate(!!bird_age_var := !!bird_age_var %>% tolower) %>% dplyr::rename(link = !!bird_age_var)
# left join in bird age codes
.data %<>% dplyr::left_join(dbs_bird_age %>% dplyr::select(link, agecode), by = "link")
# drop 'by' var in .data
.data %<>% dplyr::select(-link)
# rename agecode to .bird_age
.data %>% dplyr::rename(!!bird_age_var := agecode)
}
#' association_codify
#'
#' Replace seabird association with code from database.
#'
#' @inheritParams remove_apostrophe
#' @inheritParams observer_codify
#' @param .seabird_association Name of the column field that identifies the seabird association.
#'
#' @importFrom dplyr "%>%"
#' @importFrom magrittr "%<>%"
#'
#' @return The mutated tibble.
#'
#' @export
association_codify <- function(.data, .channel, .seabird_association) {
# check database connection is from DBI
channel_valid <- inherits(.channel, "DBIConnection")
# issue error if invalid DBI connection
stopifnot(channel_valid)
# make sure there is no column named 'link', otherwise exit
if('link' %in% colnames(.data)) stop("Error: Cannot have a column named 'link'.")
# quote the expression
bird_assoc_var <- dplyr::enquo(.seabird_association)
# modify seabird association variable to match database descriptions
.data %<>% mutate(!!bird_assoc_var := case_when(
!!bird_assoc_var == 'Associated with or on a buoy' ~ 'buoy',
!!bird_assoc_var == 'Associated with observation platform' ~ 'observation platform',
!!bird_assoc_var == 'Sitting on observation platform' ~ 'observation platform',
!!bird_assoc_var == 'Associated with other species feeding in same location' ~ 'associated with other individuals',
!!bird_assoc_var == 'Approaching observation platform' ~ 'observation platform',
!!bird_assoc_var == 'Associated with sea weed' ~ 'floating weed',
!!bird_assoc_var == 'Associated with fishing vessel' ~ 'fishing vessel',
!!bird_assoc_var == 'Associated with cetaceans' ~ 'cetaceans',
!!bird_assoc_var == 'Associated with fish shoal' ~ 'fish shoal',
!!bird_assoc_var == 'Associated with other vessel (excluding fishing vessel; see code 26)' ~ 'non-fishing vessel',
TRUE ~ as.character(!!bird_assoc_var)
))
# get database table of seabird association codes and clean column names so they're snake case
dbs_bird_assoc <- DBI::dbGetQuery(conn = .channel, statement = "SELECT ASSOCDESC, ASSOCCODE FROM MAMMAL_SOUND.BIRDASSOCIATION") %>% janitor::clean_names(case = 'snake')
# create field to link tables
dbs_bird_assoc %<>% dplyr::rename(link = assocdesc)
# set the linking 'by' variable in .data to 'link'
.data %<>% dplyr::mutate(!!bird_assoc_var := !!bird_assoc_var %>% tolower) %>% dplyr::rename(link = !!bird_assoc_var)
# left join in seabird association codes
.data %<>% dplyr::left_join(dbs_bird_assoc %>% dplyr::select(link, assoccode), by = "link")
# drop 'by' var in .data
.data %<>% dplyr::select(-link)
# reaname assoccode to .seabird_association
.data %>% dplyr::rename(!!bird_assoc_var := assoccode)
}
#' height_codify
#'
#' Replace flight height with code from database.
#'
#' @inheritParams remove_apostrophe
#' @inheritParams observer_codify
#' @param .flight_height Name of the column field that identifies the flight height.
#'
#' @importFrom dplyr "%>%"
#' @importFrom magrittr "%<>%"
#'
#' @return The mutated tibble.
#'
#' @export
height_codify <- function(.data, .channel, .flight_height) {
# check database connection is from DBI
channel_valid <- inherits(.channel, "DBIConnection")
# issue error if invalid DBI connection
stopifnot(channel_valid)
# make sure there is no column named 'link', otherwise exit
if('link' %in% colnames(.data)) stop("Error: Cannot have a column named 'link'.")
# get database table of flight height codes and clean column names so they're snake case
dbs_fly_high <- DBI::dbGetQuery(conn = .channel, statement = "SELECT HEIGHTRANGE, HEIGHTCODE FROM MAMMAL_SOUND.BIRDFLIGHTHEIGHT") %>% janitor::clean_names(case = 'snake')
# create field to link tables
dbs_fly_high %<>% dplyr::rename(link = heightrange)
# quote the expression
fly_high_var <- dplyr::enquo(.flight_height)
# set the linking 'by' variable in .data to 'link'
.data %<>% dplyr::mutate(!!fly_high_var := !!fly_high_var %>% tolower) %>% dplyr::rename(link = !!fly_high_var)
# left join in flight height codes
.data %<>% dplyr::left_join(dbs_fly_high %>% dplyr::select(link, heightcode), by = "link")
# drop 'by' var in .data
.data %<>% dplyr::select(-link)
# rename heightcode to .flight_height
.data %>% dplyr::rename(!!fly_high_var := heightcode)
}
#' plumage_codify
#'
#' Replace seabird plumage with code from database.
#'
#' @inheritParams remove_apostrophe
#' @inheritParams observer_codify
#' @param .seabird_plumage Name of the column field that identifies the seabird plumage.
#'
#' @importFrom dplyr "%>%"
#' @importFrom magrittr "%<>%"
#'
#' @return The mutated tibble.
#'
#' @export
plumage_codify <- function(.data, .channel, .seabird_plumage) {
# check database connection is from DBI
channel_valid <- inherits(.channel, "DBIConnection")
# issue error if invalid DBI connection
stopifnot(channel_valid)
# make sure there is no column named 'link', otherwise exit
if('link' %in% colnames(.data)) stop("Error: Cannot have a column named 'link'.")
# quote the expression
bird_plum_var <- dplyr::enquo(.seabird_plumage)
# modify seabird plumage variable to match database descriptions
.data %<>% mutate(!!bird_plum_var := case_when(
!!bird_plum_var == 'Non-breeding/Basic fall and winter plumage' ~ 'non-breeding',
!!bird_plum_var == 'Breeding/Alternate spring and summer plumage' ~ 'breeding',
!!bird_plum_var == 'Gannet plumage 1' ~ NA_character_,
!!bird_plum_var == 'Gannet plumage 2' ~ NA_character_,
TRUE ~ as.character(!!bird_plum_var)
))
# get database table of seabird plumage codes and clean column names so they're snake case
dbs_bird_plum <- DBI::dbGetQuery(conn = .channel, statement = "SELECT PLUMAGEDESC, PLUMAGECODE FROM MAMMAL_SOUND.BIRDPLUMAGE") %>% janitor::clean_names(case = 'snake')
# create field to link tables
dbs_bird_plum %<>% dplyr::rename(link = plumagedesc)
# set the linking 'by' variable in .data to 'link'
.data %<>% dplyr::mutate(!!bird_plum_var := !!bird_plum_var %>% tolower) %>% dplyr::rename(link = !!bird_plum_var)
# left join in seabird plumage codes
.data %<>% dplyr::left_join(dbs_bird_plum %>% dplyr::select(link, plumagecode), by = "link")
# drop 'by' var in .data
.data %<>% dplyr::select(-link)
# rename plumagecode to .seabird_plumage
.data %>% dplyr::rename(!!bird_plum_var := plumagecode)
}
#' sex_codify
#'
#' Replace seabird sex with code from database.
#'
#' @inheritParams remove_apostrophe
#' @param .sex Name of the column field that identifies the seabird sex.
#'
#' @importFrom dplyr "%>%"
#'
#' @return The mutated tibble.
#'
#' @export
sex_codify <- function(.data, .sex) {
# quote the expression
bird_sex_var <- dplyr::enquo(.sex)
# modify flight direction variable to match database descriptions
.data %>% mutate(!!bird_sex_var := case_when(
!!bird_sex_var == 'Unknown' ~ '1',
!!bird_sex_var == 'Female' ~ '2',
!!bird_sex_var == 'Male' ~ '3',
TRUE ~ as.character(!!bird_sex_var)
))
}
|
/R/clean_data.R
|
no_license
|
jmhatch-NOAA/seebirdr
|
R
| false
| false
| 17,984
|
r
|
#' remove_apostrophe
#'
#' Remove apostrophes from column field.
#'
#' @param .data A data frame or data frame extension (e.g., tibble).
#' @param ... Name of the column(s) to remove apostrophes from.
#'
#' @return The mutated tibble.
#'
#' @export
remove_apostrophe <- function(.data, ...) {
# quote the expression
vars <- dplyr::enquos(..., .named = TRUE)
# modify quoted expression
mutate_vars <- purrr::map(vars, function(var) { dplyr::expr(gsub("'", "", !!var)) })
# replace apostrophes with empty spaces
dplyr::mutate(.data, !!!mutate_vars)
}
#' datetime_stampify
#'
#' Create a datetime column from character strings or POSIXct class.
#' Assumes datetime has a timezone of UTC, but this can be modified with the .tz argument.
#'
#' @inheritParams remove_apostrophe
#' @param .datetime Name of the column field that identifies the datetime.
#' @param .tz Time zone of the datetime.
#'
#' @importFrom dplyr "%>%"
#' @importFrom magrittr "%<>%"
#'
#' @return The mutated tibble with a new column labeled datetime_[.tz].
#'
#' @export
datetime_stampify <- function(.data, .datetime, .tz = "UTC") {
# quote the expression
datetime_name_var <- dplyr::enquo(.datetime)
# new column name as string
datetime_name_str <- paste0('datetime_', .tz %>% tolower())
# make sure .datetime field is type character, otherwise assume POSIXct class
if(!is.character(.data %>% dplyr::pull(!!datetime_name_var))) {
# call warning
warning("Warning: Datetime field is not a character type.")
# rename datetime column
.data %<>% dplyr::rename(!!datetime_name_str := !!datetime_name_var)
} else {
# split datetime string into date and time components and assign to new column name
.data %<>% dplyr::mutate(!!datetime_name_str := paste(stringr::str_sub(!!datetime_name_var, start = 1, end = 10), stringr::str_sub(!!datetime_name_var, start = 12, end = 19), sep = " "))
# convert datetime column to POSIXct class
.data %<>% dplyr::mutate(!!datetime_name_str := as.POSIXct(.data[[!!datetime_name_str]], format = "%Y-%m-%d %H:%M:%S", tz = .tz))
}
}
#' observer_codify
#'
#' Replace observer name with code from database.
#'
#' @inheritParams remove_apostrophe
#' @param .channel A DBI object that connects to the internal NEFSC Oracle database.
#' @param .observer_name Name of the column field that identifies the observer name.
#'
#' @importFrom dplyr "%>%"
#' @importFrom magrittr "%<>%"
#'
#' @return The mutated tibble.
#'
#' @export
observer_codify <- function(.data, .channel, .observer_name) {
# check database connection is from DBI
channel_valid <- inherits(.channel, "DBIConnection")
# issue error if invalid DBI connection
stopifnot(channel_valid)
# make sure there is no column named 'link', otherwise exit
if('link' %in% colnames(.data)) stop("Error: Cannot have a column named 'link'.")
# get database table of observer name codes and clean column names so they're snake case
dbs_obs_name <- DBI::dbGetQuery(conn = .channel, statement = "SELECT LASTNAME, FIRSTNAME, OBSCODE FROM MAMMAL_SOUND.MAMMAL_OBSERVER") %>% janitor::clean_names(case = 'snake')
# create field to link tables
dbs_obs_name %<>% dplyr::mutate(link = paste0(lastname, ', ', firstname))
# quote the expression
obs_name_var <- dplyr::enquo(.observer_name)
# set the linking 'by' variable in .data to 'link'
.data %<>% dplyr::mutate(!!obs_name_var := !!obs_name_var %>% toupper) %>% dplyr::rename(link = !!obs_name_var)
# left join in observer name codes
.data %<>% dplyr::left_join(dbs_obs_name %>% dplyr::select(link, obscode), by = "link")
# drop 'by' var in .data
.data %<>% dplyr::select(-link)
# rename obscode to .oserver_name
.data %>% dplyr::rename(!!obs_name_var := obscode)
}
#' position_codify
#'
#' Replace observer position with code from database.
#'
#' @inheritParams remove_apostrophe
#' @inheritParams observer_codify
#' @param .observer_position Name of the column field that identifies the observer position.
#'
#' @importFrom dplyr "%>%"
#' @importFrom magrittr "%<>%"
#'
#' @return The mutated tibble.
#'
#' @export
position_codify <- function(.data, .channel, .observer_position) {
# check database connection is from DBI
channel_valid <- inherits(.channel, "DBIConnection")
# issue error if invalid DBI connection
stopifnot(channel_valid)
# make sure there is no column named 'link', otherwise exit
if('link' %in% colnames(.data)) stop("Error: Cannot have a column named 'link'.")
# get database table of observer position codes and clean column names so they're snake case
dbs_obs_pos <- DBI::dbGetQuery(conn = .channel, statement = "SELECT SIDEOFSHIP, SIDE FROM MAMMAL_SOUND.BIRDSIDEOFSHIP") %>% janitor::clean_names(case = 'snake')
# create field to link tables
dbs_obs_pos %<>% dplyr::rename(link = side)
# quote the expression
obs_pos_var <- dplyr::enquo(.observer_position)
# set the linking 'by' variable in .data to 'link'
.data %<>% dplyr::mutate(!!obs_pos_var := !!obs_pos_var %>% tolower) %>% dplyr::rename(link = !!obs_pos_var)
# left join in observer position codes
.data %<>% dplyr::left_join(dbs_obs_pos %>% dplyr::select(link, sideofship), by = "link")
# drop 'by' var in .data
.data %<>% dplyr::select(-link)
# rename sideofship code to .oserver_position
.data %>% dplyr::rename(!!obs_pos_var := sideofship)
}
#' behavior_codify
#'
#' Replace seabird behavior with code from database.
#'
#' @inheritParams remove_apostrophe
#' @inheritParams observer_codify
#' @param .seabird_behavior Name of the column field that identifies the seabird behavior.
#'
#' @importFrom dplyr "%>%"
#' @importFrom magrittr "%<>%"
#'
#' @return The mutated tibble.
#'
#' @export
behavior_codify <- function(.data, .channel, .seabird_behavior) {
# check database connection is from DBI
channel_valid <- inherits(.channel, "DBIConnection")
# issue error if invalid DBI connection
stopifnot(channel_valid)
# make sure there is no column named 'link', otherwise exit
if('link' %in% colnames(.data)) stop("Error: Cannot have a column named 'link'.")
# quote the expression
bird_behave_var <- dplyr::enquo(.seabird_behavior)
# modify seabird behavior variable to match database descriptions
.data %<>% mutate(!!bird_behave_var := case_when(
!!bird_behave_var == 'Plunge Diving' ~ 'diving',
!!bird_behave_var == 'Blowing' ~ 'swimming',
!!bird_behave_var == 'Kleptoparasitizing' ~ 'piracy',
!!bird_behave_var == 'Stationary' ~ 'milling',
!!bird_behave_var == 'Resting' ~ 'sitting',
!!bird_behave_var == 'Under attack' ~ NA_character_,
!!bird_behave_var == 'Taking Off' ~ 'unknown flight',
!!bird_behave_var == 'Flying' ~ 'unknown flight',
!!bird_behave_var == 'Following' ~ 'following ship',
TRUE ~ as.character(!!bird_behave_var)
))
# get database table of seabird behavior codes and clean column names so they're snake case
dbs_bird_behave <- DBI::dbGetQuery(conn = .channel, statement = "SELECT BEHAVIORDESC, BEHAVIORCODE FROM MAMMAL_SOUND.BIRDBEHAVIOR") %>% janitor::clean_names(case = 'snake')
# create field to link tables
dbs_bird_behave %<>% dplyr::rename(link = behaviordesc)
# set the linking 'by' variable in .data to 'link'
.data %<>% dplyr::mutate(!!bird_behave_var := !!bird_behave_var %>% tolower) %>% dplyr::rename(link = !!bird_behave_var)
# left join in seabird behavior codes
.data %<>% dplyr::left_join(dbs_bird_behave %>% dplyr::select(link, behaviorcode), by = "link")
# drop 'by' var in .data
.data %<>% dplyr::select(-link)
# reaname behaviorcode to .seabird_behavior
.data %>% dplyr::rename(!!bird_behave_var := behaviorcode)
}
#' direction_codify
#'
#' Replace flight direction with code from database.
#' Flight direction should be in degrees and is to be entered relative to the ship.
#'
#' @inheritParams remove_apostrophe
#' @param .flight_direction Name of the column field that identifies the flight direction.
#'
#' @importFrom dplyr "%>%"
#'
#' @return The mutated tibble.
#'
#' @export
direction_codify <- function(.data, .flight_direction) {
# quote the expression
flight_dir_var <- dplyr::enquo(.flight_direction)
# modify flight direction variable to match database descriptions
.data %>% mutate(!!flight_dir_var := case_when(
!!flight_dir_var == 'N' ~ '0',
!!flight_dir_var == 'NE' ~ '45',
!!flight_dir_var == 'E' ~ '90',
!!flight_dir_var == 'SE' ~ '135',
!!flight_dir_var == 'S' ~ '180',
!!flight_dir_var == 'SW' ~ '225',
!!flight_dir_var == 'W' ~ '270',
!!flight_dir_var == 'NW' ~ '315',
!!flight_dir_var == 'No Direction' ~ NA_character_,
!!flight_dir_var == '' ~ NA_character_,
TRUE ~ as.character(!!flight_dir_var)
))
}
#' age_codify
#'
#' Replace bird age with code from database.
#'
#' @inheritParams remove_apostrophe
#' @inheritParams observer_codify
#' @param .bird_age Name of the column field that identifies the bird age.
#'
#' @importFrom dplyr "%>%"
#' @importFrom magrittr "%<>%"
#'
#' @return The mutated tibble.
#'
#' @export
age_codify <- function(.data, .channel, .bird_age) {
# check database connection is from DBI
channel_valid <- inherits(.channel, "DBIConnection")
# issue error if invalid DBI connection
stopifnot(channel_valid)
# make sure there is no column named 'link', otherwise exit
if('link' %in% colnames(.data)) stop("Error: Cannot have a column named 'link'.")
# quote the expression
bird_age_var <- dplyr::enquo(.bird_age)
# modify bird age variable to match database descriptions
.data %<>% mutate(!!bird_age_var := case_when(
!!bird_age_var == 'Immature' ~ 'subadult',
!!bird_age_var == 'Juvenile' ~ 'subadult',
!!bird_age_var == 'First cycle' ~ 'subadult',
!!bird_age_var == 'Second cycle' ~ 'subadult',
!!bird_age_var == 'Third cycle' ~ 'subadult',
!!bird_age_var == 'Fourth cycle' ~ 'subadult',
!!bird_age_var == 'First year' ~ 'subadult',
!!bird_age_var == 'Second year' ~ 'subadult',
!!bird_age_var == 'Third year' ~ 'subadult',
!!bird_age_var == 'Fourth year' ~ 'subadult',
TRUE ~ as.character(!!bird_age_var)
))
# get database table of bird age codes and clean column names so they're snake case
dbs_bird_age <- DBI::dbGetQuery(conn = .channel, statement = "SELECT AGECODE, AGE FROM MAMMAL_SOUND.BIRDAGE") %>% janitor::clean_names(case = 'snake')
# create field to link tables (ensure lower case)
dbs_bird_age %<>% dplyr::rename(link = age) %>% mutate(link = link %>% tolower)
# set the linking 'by' variable in .data to 'link'
.data %<>% dplyr::mutate(!!bird_age_var := !!bird_age_var %>% tolower) %>% dplyr::rename(link = !!bird_age_var)
# left join in bird age codes
.data %<>% dplyr::left_join(dbs_bird_age %>% dplyr::select(link, agecode), by = "link")
# drop 'by' var in .data
.data %<>% dplyr::select(-link)
# rename agecode to .bird_age
.data %>% dplyr::rename(!!bird_age_var := agecode)
}
#' association_codify
#'
#' Replace seabird association with code from database.
#'
#' @inheritParams remove_apostrophe
#' @inheritParams observer_codify
#' @param .seabird_association Name of the column field that identifies the seabird association.
#'
#' @importFrom dplyr "%>%"
#' @importFrom magrittr "%<>%"
#'
#' @return The mutated tibble.
#'
#' @export
association_codify <- function(.data, .channel, .seabird_association) {
# check database connection is from DBI
channel_valid <- inherits(.channel, "DBIConnection")
# issue error if invalid DBI connection
stopifnot(channel_valid)
# make sure there is no column named 'link', otherwise exit
if('link' %in% colnames(.data)) stop("Error: Cannot have a column named 'link'.")
# quote the expression
bird_assoc_var <- dplyr::enquo(.seabird_association)
# modify seabird association variable to match database descriptions
.data %<>% mutate(!!bird_assoc_var := case_when(
!!bird_assoc_var == 'Associated with or on a buoy' ~ 'buoy',
!!bird_assoc_var == 'Associated with observation platform' ~ 'observation platform',
!!bird_assoc_var == 'Sitting on observation platform' ~ 'observation platform',
!!bird_assoc_var == 'Associated with other species feeding in same location' ~ 'associated with other individuals',
!!bird_assoc_var == 'Approaching observation platform' ~ 'observation platform',
!!bird_assoc_var == 'Associated with sea weed' ~ 'floating weed',
!!bird_assoc_var == 'Associated with fishing vessel' ~ 'fishing vessel',
!!bird_assoc_var == 'Associated with cetaceans' ~ 'cetaceans',
!!bird_assoc_var == 'Associated with fish shoal' ~ 'fish shoal',
!!bird_assoc_var == 'Associated with other vessel (excluding fishing vessel; see code 26)' ~ 'non-fishing vessel',
TRUE ~ as.character(!!bird_assoc_var)
))
# get database table of seabird association codes and clean column names so they're snake case
dbs_bird_assoc <- DBI::dbGetQuery(conn = .channel, statement = "SELECT ASSOCDESC, ASSOCCODE FROM MAMMAL_SOUND.BIRDASSOCIATION") %>% janitor::clean_names(case = 'snake')
# create field to link tables
dbs_bird_assoc %<>% dplyr::rename(link = assocdesc)
# set the linking 'by' variable in .data to 'link'
.data %<>% dplyr::mutate(!!bird_assoc_var := !!bird_assoc_var %>% tolower) %>% dplyr::rename(link = !!bird_assoc_var)
# left join in seabird association codes
.data %<>% dplyr::left_join(dbs_bird_assoc %>% dplyr::select(link, assoccode), by = "link")
# drop 'by' var in .data
.data %<>% dplyr::select(-link)
# reaname assoccode to .seabird_association
.data %>% dplyr::rename(!!bird_assoc_var := assoccode)
}
#' height_codify
#'
#' Replace flight height with code from database.
#'
#' @inheritParams remove_apostrophe
#' @inheritParams observer_codify
#' @param .flight_height Name of the column field that identifies the flight height.
#'
#' @importFrom dplyr "%>%"
#' @importFrom magrittr "%<>%"
#'
#' @return The mutated tibble.
#'
#' @export
height_codify <- function(.data, .channel, .flight_height) {
# check database connection is from DBI
channel_valid <- inherits(.channel, "DBIConnection")
# issue error if invalid DBI connection
stopifnot(channel_valid)
# make sure there is no column named 'link', otherwise exit
if('link' %in% colnames(.data)) stop("Error: Cannot have a column named 'link'.")
# get database table of flight height codes and clean column names so they're snake case
dbs_fly_high <- DBI::dbGetQuery(conn = .channel, statement = "SELECT HEIGHTRANGE, HEIGHTCODE FROM MAMMAL_SOUND.BIRDFLIGHTHEIGHT") %>% janitor::clean_names(case = 'snake')
# create field to link tables
dbs_fly_high %<>% dplyr::rename(link = heightrange)
# quote the expression
fly_high_var <- dplyr::enquo(.flight_height)
# set the linking 'by' variable in .data to 'link'
.data %<>% dplyr::mutate(!!fly_high_var := !!fly_high_var %>% tolower) %>% dplyr::rename(link = !!fly_high_var)
# left join in flight height codes
.data %<>% dplyr::left_join(dbs_fly_high %>% dplyr::select(link, heightcode), by = "link")
# drop 'by' var in .data
.data %<>% dplyr::select(-link)
# rename heightcode to .flight_height
.data %>% dplyr::rename(!!fly_high_var := heightcode)
}
#' plumage_codify
#'
#' Replace seabird plumage with code from database.
#'
#' @inheritParams remove_apostrophe
#' @inheritParams observer_codify
#' @param .seabird_plumage Name of the column field that identifies the seabird plumage.
#'
#' @importFrom dplyr "%>%"
#' @importFrom magrittr "%<>%"
#'
#' @return The mutated tibble.
#'
#' @export
plumage_codify <- function(.data, .channel, .seabird_plumage) {
# check database connection is from DBI
channel_valid <- inherits(.channel, "DBIConnection")
# issue error if invalid DBI connection
stopifnot(channel_valid)
# make sure there is no column named 'link', otherwise exit
if('link' %in% colnames(.data)) stop("Error: Cannot have a column named 'link'.")
# quote the expression
bird_plum_var <- dplyr::enquo(.seabird_plumage)
# modify seabird plumage variable to match database descriptions
.data %<>% mutate(!!bird_plum_var := case_when(
!!bird_plum_var == 'Non-breeding/Basic fall and winter plumage' ~ 'non-breeding',
!!bird_plum_var == 'Breeding/Alternate spring and summer plumage' ~ 'breeding',
!!bird_plum_var == 'Gannet plumage 1' ~ NA_character_,
!!bird_plum_var == 'Gannet plumage 2' ~ NA_character_,
TRUE ~ as.character(!!bird_plum_var)
))
# get database table of seabird plumage codes and clean column names so they're snake case
dbs_bird_plum <- DBI::dbGetQuery(conn = .channel, statement = "SELECT PLUMAGEDESC, PLUMAGECODE FROM MAMMAL_SOUND.BIRDPLUMAGE") %>% janitor::clean_names(case = 'snake')
# create field to link tables
dbs_bird_plum %<>% dplyr::rename(link = plumagedesc)
# set the linking 'by' variable in .data to 'link'
.data %<>% dplyr::mutate(!!bird_plum_var := !!bird_plum_var %>% tolower) %>% dplyr::rename(link = !!bird_plum_var)
# left join in seabird plumage codes
.data %<>% dplyr::left_join(dbs_bird_plum %>% dplyr::select(link, plumagecode), by = "link")
# drop 'by' var in .data
.data %<>% dplyr::select(-link)
# rename plumagecode to .seabird_plumage
.data %>% dplyr::rename(!!bird_plum_var := plumagecode)
}
#' sex_codify
#'
#' Replace seabird sex with code from database.
#'
#' @inheritParams remove_apostrophe
#' @param .sex Name of the column field that identifies the seabird sex.
#'
#' @importFrom dplyr "%>%"
#'
#' @return The mutated tibble.
#'
#' @export
sex_codify <- function(.data, .sex) {
# quote the expression
bird_sex_var <- dplyr::enquo(.sex)
# modify flight direction variable to match database descriptions
.data %>% mutate(!!bird_sex_var := case_when(
!!bird_sex_var == 'Unknown' ~ '1',
!!bird_sex_var == 'Female' ~ '2',
!!bird_sex_var == 'Male' ~ '3',
TRUE ~ as.character(!!bird_sex_var)
))
}
|
#' Adult Data Set
#'
#' Extraction was done by Barry Becker from the 1994 Census database.
#' A set of reasonably clean records was extracted using the following
#' conditions: ((AAGE > 16) && (AGI > 100) && (AFNLWGT > 1) && (HRSWK > 0))
#'
#' @format A data frame with 32561 observations on the following 15 variables.
#' - `age`: Integer
#' - Number of years alive
#' - `workclass`: Factor
#' - Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov,
#' State-gov, Without-pay, Never-worked.
#' - `fnlwgt`: Numeric
#' - The variable represents the Final Weight, which is more so a sampling weight.
#' See the names file listed in references for more details.
#' - `education`: Factor
#' - Highest level of education attained
#' - Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm,
#' Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate,
#' 5th-6th, Preschool.
#' - `education_num`: Numeric
#' - Number of years of education
#' - `marital_status`: Factor
#' - Married-civ-spouse, Divorced, Never-married, Separated, Widowed,
#' Married-spouse-absent, Married-AF-spouse
#' - `occupation`: Factor
#' - Tech-support, Craft-repair, Other-service, Sales, Exec-managerial,
#' Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical,
#' Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv,
#' Armed-Forces.
#' - `relationship`: Factor
#' - Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
#' - `race`: Factor
#' - White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
#' - `sex`: Factor
#' - Female, Male
#' - `capital_gain`: Integer
#' - Income from investment sources, apart from wages/salary
#' - `capital_loss`: Integer
#' - Losses from investment sources, apart from wages/salary
#' - `hours_per_week`: Integer
#' - Amount of hours worked per week
#' - `native_country`: Factor
#' - Country of origin
#' - United-States, Cambodia, England, Puerto-Rico, Canada, Germany,
#' Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba,
#' Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico,
#' Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan,
#' Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand,
#' Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
#' - `income`: Factor
#' - Whether the income greater than $50,000 or not.
#' - <=50K, >50K
#' @details
#' Prediction task is to determine whether a person makes over 50K a year.
#' @references
#' <https://archive.ics.uci.edu/ml/machine-learning-databases/adult/>
#'
#' <http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.names>
#'
#' <https://archive.ics.uci.edu/ml/datasets/adult>
#' @source
#' Ronny Kohavi and Barry Becker
#' Data Mining and Visualization
#' Silicon Graphics.
#' e-mail: ronnyk '@' live.com for questions.
"adult"
|
/R/adult_docs.R
|
no_license
|
daviddalpiaz/ucidata
|
R
| false
| false
| 2,997
|
r
|
#' Adult Data Set
#'
#' Extraction was done by Barry Becker from the 1994 Census database.
#' A set of reasonably clean records was extracted using the following
#' conditions: ((AAGE > 16) && (AGI > 100) && (AFNLWGT > 1) && (HRSWK > 0))
#'
#' @format A data frame with 32561 observations on the following 15 variables.
#' - `age`: Integer
#' - Number of years alive
#' - `workclass`: Factor
#' - Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov,
#' State-gov, Without-pay, Never-worked.
#' - `fnlwgt`: Numeric
#' - The variable represents the Final Weight, which is more so a sampling weight.
#' See the names file listed in references for more details.
#' - `education`: Factor
#' - Highest level of education attained
#' - Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm,
#' Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate,
#' 5th-6th, Preschool.
#' - `education_num`: Numeric
#' - Number of years of education
#' - `marital_status`: Factor
#' - Married-civ-spouse, Divorced, Never-married, Separated, Widowed,
#' Married-spouse-absent, Married-AF-spouse
#' - `occupation`: Factor
#' - Tech-support, Craft-repair, Other-service, Sales, Exec-managerial,
#' Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical,
#' Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv,
#' Armed-Forces.
#' - `relationship`: Factor
#' - Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
#' - `race`: Factor
#' - White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
#' - `sex`: Factor
#' - Female, Male
#' - `capital_gain`: Integer
#' - Income from investment sources, apart from wages/salary
#' - `capital_loss`: Integer
#' - Losses from investment sources, apart from wages/salary
#' - `hours_per_week`: Integer
#' - Amount of hours worked per week
#' - `native_country`: Factor
#' - Country of origin
#' - United-States, Cambodia, England, Puerto-Rico, Canada, Germany,
#' Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba,
#' Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico,
#' Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan,
#' Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand,
#' Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
#' - `income`: Factor
#' - Whether the income greater than $50,000 or not.
#' - <=50K, >50K
#' @details
#' Prediction task is to determine whether a person makes over 50K a year.
#' @references
#' <https://archive.ics.uci.edu/ml/machine-learning-databases/adult/>
#'
#' <http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.names>
#'
#' <https://archive.ics.uci.edu/ml/datasets/adult>
#' @source
#' Ronny Kohavi and Barry Becker
#' Data Mining and Visualization
#' Silicon Graphics.
#' e-mail: ronnyk '@' live.com for questions.
"adult"
|
shinyUI(fluidPage(
useShinyjs(),
# -- Add Tracking JS File
tags$head(includeScript("google-analytics.js")),
titlePanel("Matching Game"),
sidebarLayout(
sidebarPanel(
actionButton("new_game", "New Game"),
uiOutput("info")
),
mainPanel(
plotOutput("gameboard",
click = "game_click",
hover = hoverOpts(id = "game_hover", delay = 100, delayType = c("debounce", "throttle"))
)
)
)
))
|
/matching-game/ui.R
|
no_license
|
anthonypileggi/avp-shiny
|
R
| false
| false
| 478
|
r
|
shinyUI(fluidPage(
useShinyjs(),
# -- Add Tracking JS File
tags$head(includeScript("google-analytics.js")),
titlePanel("Matching Game"),
sidebarLayout(
sidebarPanel(
actionButton("new_game", "New Game"),
uiOutput("info")
),
mainPanel(
plotOutput("gameboard",
click = "game_click",
hover = hoverOpts(id = "game_hover", delay = 100, delayType = c("debounce", "throttle"))
)
)
)
))
|
Sys.setenv(LANG = "en")
library(showtext)
library(scales)
font.add("myriad", "/Users/acailliau/Downloads/Adobe Font Folio 11/Western Fonts/Myriad Pro/MyriadPro-Regular.otf")
showtext.auto()
library(ggplot2)
library(gridExtra)
library("ggrepel")
cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
data = read.csv("/Users/acailliau/Google Drive/PhD/Dissertation/case-studies/ambulance-dispatching-system/violation_diagram_1.csv",
header = TRUE, sep = ",")
data2 = read.csv("/Users/acailliau/Google Drive/PhD/Dissertation/case-studies/ambulance-dispatching-system/violation_diagram_1_single_expert.csv",
header = TRUE, sep = ",")
pointsToLabel <- c()
data$facet <- ifelse(data$violation_uncertainty == 0, 2, 1)
data2$facet <- ifelse(data2$violation_uncertainty == 0, 2, 1)
data$violation_uncertainty <- pmax(.0000001, data$violation_uncertainty)
data2$violation_uncertainty <- pmax(.0000001, data2$violation_uncertainty)
breaks <- subset(data, violation_uncertainty > 0)[,2:3]
#breaks$uncertainty_spread <- round(breaks$uncertainty_spread, digits = 2)
#breaks$violation_uncertainty <- round(breaks$violation_uncertainty, digits = 4)
print(data)
p <- ggplot (data, aes(y = violation_uncertainty, x = uncertainty_spread))
p <- p + geom_point(data = subset(data, facet == 1), aes(colour = "a", shape = "a"))
p <- p + geom_point(data = subset(data, facet == 2), aes(colour = "a", shape = "a"))
p <- p + geom_point(data = subset(data2, facet == 2), aes(colour = "b", shape = "b"))
p <- p + geom_point(data = subset(data2, facet == 1), aes(colour = "b", shape = "b"))
p <- p + geom_text_repel(aes(label = combination),
data = subset(data, combination %in% pointsToLabel),
box.padding = unit(5, "pt"),
point.padding = unit(5, "pt"),
segment.color = 'grey70',
force = 10,
family="myriad",size=2.8222222)
p <- p + labs(y = "Violation Uncertainty (Log2)", x = "Uncertainty Spread")
p <- p + scale_x_continuous(breaks = c(0,.2,.4,.6),
labels = function (x) round(x, digit=2))
p <- p + scale_y_continuous(breaks = c(0,.0000001,.125,.25,.5,1),
labels = function(x) { lapply(x, function(y) { if (is.na(y) | y <= .000001) { percent(0) } else { percent(y) } } ) },
trans = log2_trans())
p <- p + scale_color_manual(
name = NULL,
labels = c("Experts 1 to 5 combined", "Expert 1"),
breaks = c("a", "b"),
values = cbPalette) +
scale_shape_discrete(
name = NULL,
labels = c("Experts 1 to 5 combined", "Expert 1"),
breaks = c("a", "b")
)
p <- p + theme(legend.position = c(.86,.28)) + labs(col=NULL) +
theme(legend.key = element_rect(colour = NA, fill = NA), legend.margin=margin(t = c(-.1,.2,0,0), unit='cm')) +
theme (legend.key = element_rect(size = 5),
legend.key.size = unit(.9, 'lines'))
p <- p + facet_grid(facet~., scales = 'free', space = 'free')
p <- p + theme(strip.background = element_blank(),
strip.text = element_blank())
p <- p + theme(
axis.ticks.y=element_blank())
p <- p + theme(panel.grid.minor.y = element_blank())
p <- p + theme(text = element_text(family="myriad",size=8),
axis.text.x = element_text(angle=45,hjust=0.95),
panel.grid.minor = element_blank(),
plot.title = element_text(family="myriad",size=8),
plot.margin = unit(c(2,2,2,2),"pt"))
img_width = 4.527559
img_height = 2.5 #4.527559
ggsave(file="/Users/acailliau/Google Drive/PhD/Dissertation/case-studies/ambulance-dispatching-system/violation_diagram_1.pdf", p, width=img_width, height=img_height, dpi=300)
|
/case-studies/ambulance-dispatching-system/Generate-Violation-Diagram.R
|
no_license
|
ancailliau/phd-thesis
|
R
| false
| false
| 3,874
|
r
|
Sys.setenv(LANG = "en")
library(showtext)
library(scales)
font.add("myriad", "/Users/acailliau/Downloads/Adobe Font Folio 11/Western Fonts/Myriad Pro/MyriadPro-Regular.otf")
showtext.auto()
library(ggplot2)
library(gridExtra)
library("ggrepel")
cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
data = read.csv("/Users/acailliau/Google Drive/PhD/Dissertation/case-studies/ambulance-dispatching-system/violation_diagram_1.csv",
header = TRUE, sep = ",")
data2 = read.csv("/Users/acailliau/Google Drive/PhD/Dissertation/case-studies/ambulance-dispatching-system/violation_diagram_1_single_expert.csv",
header = TRUE, sep = ",")
pointsToLabel <- c()
data$facet <- ifelse(data$violation_uncertainty == 0, 2, 1)
data2$facet <- ifelse(data2$violation_uncertainty == 0, 2, 1)
data$violation_uncertainty <- pmax(.0000001, data$violation_uncertainty)
data2$violation_uncertainty <- pmax(.0000001, data2$violation_uncertainty)
breaks <- subset(data, violation_uncertainty > 0)[,2:3]
#breaks$uncertainty_spread <- round(breaks$uncertainty_spread, digits = 2)
#breaks$violation_uncertainty <- round(breaks$violation_uncertainty, digits = 4)
print(data)
p <- ggplot (data, aes(y = violation_uncertainty, x = uncertainty_spread))
p <- p + geom_point(data = subset(data, facet == 1), aes(colour = "a", shape = "a"))
p <- p + geom_point(data = subset(data, facet == 2), aes(colour = "a", shape = "a"))
p <- p + geom_point(data = subset(data2, facet == 2), aes(colour = "b", shape = "b"))
p <- p + geom_point(data = subset(data2, facet == 1), aes(colour = "b", shape = "b"))
p <- p + geom_text_repel(aes(label = combination),
data = subset(data, combination %in% pointsToLabel),
box.padding = unit(5, "pt"),
point.padding = unit(5, "pt"),
segment.color = 'grey70',
force = 10,
family="myriad",size=2.8222222)
p <- p + labs(y = "Violation Uncertainty (Log2)", x = "Uncertainty Spread")
p <- p + scale_x_continuous(breaks = c(0,.2,.4,.6),
labels = function (x) round(x, digit=2))
p <- p + scale_y_continuous(breaks = c(0,.0000001,.125,.25,.5,1),
labels = function(x) { lapply(x, function(y) { if (is.na(y) | y <= .000001) { percent(0) } else { percent(y) } } ) },
trans = log2_trans())
p <- p + scale_color_manual(
name = NULL,
labels = c("Experts 1 to 5 combined", "Expert 1"),
breaks = c("a", "b"),
values = cbPalette) +
scale_shape_discrete(
name = NULL,
labels = c("Experts 1 to 5 combined", "Expert 1"),
breaks = c("a", "b")
)
p <- p + theme(legend.position = c(.86,.28)) + labs(col=NULL) +
theme(legend.key = element_rect(colour = NA, fill = NA), legend.margin=margin(t = c(-.1,.2,0,0), unit='cm')) +
theme (legend.key = element_rect(size = 5),
legend.key.size = unit(.9, 'lines'))
p <- p + facet_grid(facet~., scales = 'free', space = 'free')
p <- p + theme(strip.background = element_blank(),
strip.text = element_blank())
p <- p + theme(
axis.ticks.y=element_blank())
p <- p + theme(panel.grid.minor.y = element_blank())
p <- p + theme(text = element_text(family="myriad",size=8),
axis.text.x = element_text(angle=45,hjust=0.95),
panel.grid.minor = element_blank(),
plot.title = element_text(family="myriad",size=8),
plot.margin = unit(c(2,2,2,2),"pt"))
img_width = 4.527559
img_height = 2.5 #4.527559
ggsave(file="/Users/acailliau/Google Drive/PhD/Dissertation/case-studies/ambulance-dispatching-system/violation_diagram_1.pdf", p, width=img_width, height=img_height, dpi=300)
|
rm(list=ls(all=T))
library(e1071)
B = 100
k=1
Accuracy_SVM = matrix(0, nrow=B, ncol=1)
final_table = matrix(0, nrow = 2, ncol = 2) # Table for confusion matrix
for(i in 1:B){
load(sprintf("C:/Users/Piotr/Documents/NMR.diabetes/data%d.RData",i))
train = unique(train)
test = unique(test)
Class.train = train[,1]
Class.test = test[,1]
mean.train = sapply(train[,2:189],mean)
std.train = sapply(train[,2:189],sd)
train = train[,2:189]
for (v in 1: length(train)){
train[,v] = (train[,v]-mean.train[v])/ std.train[v]*(mean.train[v]/std.train[v])
}
test = test[,2:189]
for (p in 1:length(test)){
test[,p] = (test[,p]-mean.train[p])/std.train[p]*(mean.train[p]/std.train[p])
}
svm.model = svm(train,Class.train,kernel = "linear",scale=F)
svm.prediction = predict(svm.model, new = test,kernel = "linear")
final_table = final_table + table(Class.test,svm.prediction)
Accuracy_SVM[k,1] = 100*(sum(diag(final_table))/sum(final_table))
k=k+1
print(i)
flush.console()
}
outputFile = "NMR.diabetes.Vast.scaling_SVM.csv"
write.table(Accuracy_SVM, file = outputFile, sep = ",")
|
/Chapter-4/NMR.diabetes.Vast.scaling.SVM.r
|
no_license
|
Biospec/Application-of-chemometrics-for-the-robust-analysis-of-chemical-and-biochemical-data
|
R
| false
| false
| 1,156
|
r
|
rm(list=ls(all=T))
library(e1071)
B = 100
k=1
Accuracy_SVM = matrix(0, nrow=B, ncol=1)
final_table = matrix(0, nrow = 2, ncol = 2) # Table for confusion matrix
for(i in 1:B){
load(sprintf("C:/Users/Piotr/Documents/NMR.diabetes/data%d.RData",i))
train = unique(train)
test = unique(test)
Class.train = train[,1]
Class.test = test[,1]
mean.train = sapply(train[,2:189],mean)
std.train = sapply(train[,2:189],sd)
train = train[,2:189]
for (v in 1: length(train)){
train[,v] = (train[,v]-mean.train[v])/ std.train[v]*(mean.train[v]/std.train[v])
}
test = test[,2:189]
for (p in 1:length(test)){
test[,p] = (test[,p]-mean.train[p])/std.train[p]*(mean.train[p]/std.train[p])
}
svm.model = svm(train,Class.train,kernel = "linear",scale=F)
svm.prediction = predict(svm.model, new = test,kernel = "linear")
final_table = final_table + table(Class.test,svm.prediction)
Accuracy_SVM[k,1] = 100*(sum(diag(final_table))/sum(final_table))
k=k+1
print(i)
flush.console()
}
outputFile = "NMR.diabetes.Vast.scaling_SVM.csv"
write.table(Accuracy_SVM, file = outputFile, sep = ",")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diagnostics.R
\name{find_config}
\alias{find_config}
\title{find the lintr config file}
\usage{
find_config(filename)
}
\description{
find the lintr config file
}
\keyword{internal}
|
/man/find_config.Rd
|
no_license
|
kongdd/languageserver
|
R
| false
| true
| 260
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diagnostics.R
\name{find_config}
\alias{find_config}
\title{find the lintr config file}
\usage{
find_config(filename)
}
\description{
find the lintr config file
}
\keyword{internal}
|
### R code from vignette source 'pan-tr.Rnw'
|
/data/genthat_extracted_code/pan/vignettes/pan-tr.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 46
|
r
|
### R code from vignette source 'pan-tr.Rnw'
|
library(rstudioapi)
library(dplyr)
library(chron)
library(sf)
# library(tmap)
# import 'nwfscSurvey' package
library(nwfscSurvey)
# vignette("nwfscSurvey", package = "nwfscSurvey")
# Set working directory
current_path <- getActiveDocumentContext()$path
setwd(dirname(current_path))
print(getwd())
# import shapefiles
stns <- sf::st_read("WCGBTS_Grid_v2008_GCSWGS84.shp")
# sf::st_crs(poly)
# import haul data
haul_dat = PullHaul.fn(SurveyName = "NWFSC.Combo", YearRange=c(2003,5000))
# convert haul data to point spatial object
projcrs <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
pts <- st_as_sf(x = haul_dat,
coords = c("longitude_dd", "latitude_dd"),
crs = st_crs(poly))
# convert polygon shapefile to polygon spatial object
stns <- st_as_sf(stns)
# Get start and end years from haul data
yr_srt = substr(as.character(min(pts$date_yyyymmdd)),1,4)
yr_end = substr(as.character(max(pts$date_yyyymmdd)),1,4)
# pts$date_yyyymmdd <- dates(pts$date_yyyymmdd, format = "ymd") # Proposed by John Wallace
yr_str = paste0(yr_srt, "_", yr_end)
# spatial join points with polygons
res1 <- st_join(pts, stns) %>%
filter(!is.na(CentroidID)) %>%
group_by(CentroidID) %>%
tally(name = paste0("cntHauls_", yr_str))
st_geometry(res1) <- NULL
# join results to polygon shape
res2 = left_join(stns, res1, by = c("CentroidID"))
st_geometry(res2) <- NULL
# Export to CSV or ArcGIS geodatabase table
library(readr)
outCSV = paste0("cntHauls_", yr_str, ".csv")
write_csv(res1, outCSV, na = "", append = FALSE)
# following code taken from https://hansenjohnson.org/post/leaflet-map-with-inset-in-r/
# make a few points
# pts = data.frame(lon = c(-65.3, -65.7, -64.1),
# lat = c(43.4, 43, 42.9))
# build a polygon (in this case the 'Roseway Basin Area To Be Avoided')
# ply = data.frame(lon = c(-64.916667, -64.983333, -65.516667, -66.083333),
# lat = c(43.266667, 42.783333, 42.65, 42.866667))
# required libraries
library(leaflet, quietly = T, warn.conflicts = F)
library(mapview, quietly = T, warn.conflicts = F)
# start basemap (note the argument to hide the zoom buttons)
map <- leaflet(options = leafletOptions(zoomControl = FALSE)) %>%
# add ocean basemap
addProviderTiles(providers$Esri.OceanBasemap) %>%
# focus map in a certain area / zoom level
setView(lng = -121, lat = 41, zoom = 6) %>%
# setMaxBounds(lng1 = -126, lat1 = 32, lng2 = -116, lat2 = 50) %>%
# add inset map
addMiniMap(
tiles = providers$Esri.OceanBasemap,
position = 'topright',
width = 200, height = 200,
toggleDisplay = FALSE) %>%
# add graticules with nice labels (recommended for static plot)
addSimpleGraticule(interval = 2) %>%
# add graticules from a NOAA webserver (recommended for interactive plot)
# addWMSTiles(
# "https://gis.ngdc.noaa.gov/arcgis/services/graticule/MapServer/WMSServer/",
# layers = c("1-degree grid", "5-degree grid"),
# options = WMSTileOptions(format = "image/png8", transparent = TRUE),
# attribution = NULL,group = 'Graticules') %>%
# add points (as circle markers)
# addCircleMarkers(data = pts, lng = ~geometry(x), lat = ~geometry(y),
# weight = 0.5,
# col = 'black',
# fillColor = 'darkslategrey',
# radius = 4,
# fillOpacity = 0.9,
# stroke = T,
# label = ~paste0('Point at: ',
# as.character(round(lat,3)), ', ',
# as.character(round(lon,3))),
# group = 'Points') %>%
#
# add lines
# addPolylines(data = lin, ~lon, ~lat,
# weight = 3,
# color = 'red',
# popup = 'This is a line!',
# smoothFactor = 3,
# group = 'Lines') %>%
# add polygons
addPolygons(data=res2,
weight = 1,
color = 'grey',
# fillColor = 'grey',
# fill = T,
# fillOpacity = 0.25,
stroke = T,
dashArray = c(5,5),
smoothFactor = 3,
options = pathOptions(clickable = F),
group = 'Polygons')
# show map
map
# save map as static image
mapshot(map, file = 'leaflet_map.png')
|
/haulsByStn.R
|
no_license
|
Curt-Whitmire-NOAA/SurveyCoverage
|
R
| false
| false
| 4,411
|
r
|
library(rstudioapi)
library(dplyr)
library(chron)
library(sf)
# library(tmap)
# import 'nwfscSurvey' package
library(nwfscSurvey)
# vignette("nwfscSurvey", package = "nwfscSurvey")
# Set working directory
current_path <- getActiveDocumentContext()$path
setwd(dirname(current_path))
print(getwd())
# import shapefiles
stns <- sf::st_read("WCGBTS_Grid_v2008_GCSWGS84.shp")
# sf::st_crs(poly)
# import haul data
haul_dat = PullHaul.fn(SurveyName = "NWFSC.Combo", YearRange=c(2003,5000))
# convert haul data to point spatial object
projcrs <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
pts <- st_as_sf(x = haul_dat,
coords = c("longitude_dd", "latitude_dd"),
crs = st_crs(poly))
# convert polygon shapefile to polygon spatial object
stns <- st_as_sf(stns)
# Get start and end years from haul data
yr_srt = substr(as.character(min(pts$date_yyyymmdd)),1,4)
yr_end = substr(as.character(max(pts$date_yyyymmdd)),1,4)
# pts$date_yyyymmdd <- dates(pts$date_yyyymmdd, format = "ymd") # Proposed by John Wallace
yr_str = paste0(yr_srt, "_", yr_end)
# spatial join points with polygons
res1 <- st_join(pts, stns) %>%
filter(!is.na(CentroidID)) %>%
group_by(CentroidID) %>%
tally(name = paste0("cntHauls_", yr_str))
st_geometry(res1) <- NULL
# join results to polygon shape
res2 = left_join(stns, res1, by = c("CentroidID"))
st_geometry(res2) <- NULL
# Export to CSV or ArcGIS geodatabase table
library(readr)
outCSV = paste0("cntHauls_", yr_str, ".csv")
write_csv(res1, outCSV, na = "", append = FALSE)
# following code taken from https://hansenjohnson.org/post/leaflet-map-with-inset-in-r/
# make a few points
# pts = data.frame(lon = c(-65.3, -65.7, -64.1),
# lat = c(43.4, 43, 42.9))
# build a polygon (in this case the 'Roseway Basin Area To Be Avoided')
# ply = data.frame(lon = c(-64.916667, -64.983333, -65.516667, -66.083333),
# lat = c(43.266667, 42.783333, 42.65, 42.866667))
# required libraries
library(leaflet, quietly = T, warn.conflicts = F)
library(mapview, quietly = T, warn.conflicts = F)
# start basemap (note the argument to hide the zoom buttons)
map <- leaflet(options = leafletOptions(zoomControl = FALSE)) %>%
# add ocean basemap
addProviderTiles(providers$Esri.OceanBasemap) %>%
# focus map in a certain area / zoom level
setView(lng = -121, lat = 41, zoom = 6) %>%
# setMaxBounds(lng1 = -126, lat1 = 32, lng2 = -116, lat2 = 50) %>%
# add inset map
addMiniMap(
tiles = providers$Esri.OceanBasemap,
position = 'topright',
width = 200, height = 200,
toggleDisplay = FALSE) %>%
# add graticules with nice labels (recommended for static plot)
addSimpleGraticule(interval = 2) %>%
# add graticules from a NOAA webserver (recommended for interactive plot)
# addWMSTiles(
# "https://gis.ngdc.noaa.gov/arcgis/services/graticule/MapServer/WMSServer/",
# layers = c("1-degree grid", "5-degree grid"),
# options = WMSTileOptions(format = "image/png8", transparent = TRUE),
# attribution = NULL,group = 'Graticules') %>%
# add points (as circle markers)
# addCircleMarkers(data = pts, lng = ~geometry(x), lat = ~geometry(y),
# weight = 0.5,
# col = 'black',
# fillColor = 'darkslategrey',
# radius = 4,
# fillOpacity = 0.9,
# stroke = T,
# label = ~paste0('Point at: ',
# as.character(round(lat,3)), ', ',
# as.character(round(lon,3))),
# group = 'Points') %>%
#
# add lines
# addPolylines(data = lin, ~lon, ~lat,
# weight = 3,
# color = 'red',
# popup = 'This is a line!',
# smoothFactor = 3,
# group = 'Lines') %>%
# add polygons
addPolygons(data=res2,
weight = 1,
color = 'grey',
# fillColor = 'grey',
# fill = T,
# fillOpacity = 0.25,
stroke = T,
dashArray = c(5,5),
smoothFactor = 3,
options = pathOptions(clickable = F),
group = 'Polygons')
# show map
map
# save map as static image
mapshot(map, file = 'leaflet_map.png')
|
MyClass =
function(x, ...)
{
structure(list(x, ...), class = "MyClass")
}
plot.MyClass =
function(x, y, ...)
{
plot(1:10, main = "MyClass")
}
|
/Pkg/R/myclass.R
|
no_license
|
dsidavis/RFundamentals
|
R
| false
| false
| 150
|
r
|
MyClass =
function(x, ...)
{
structure(list(x, ...), class = "MyClass")
}
plot.MyClass =
function(x, y, ...)
{
plot(1:10, main = "MyClass")
}
|
library(ggplot2)
library(apaTables)
library(officer)
make_lm_plots<-function(model, output_path, reg_table, reg_table_name){
word_doc <- read_docx()
p1<-ggplot(model, aes(.fitted, .resid))+geom_point()
p1<-p1+stat_smooth(method="loess")+geom_hline(yintercept=0, col="red", linetype="dashed")
p1<-p1+xlab("Fitted values")+ylab("Residuals")
p1<-p1+ggtitle("Residual vs Fitted Plot")+theme_bw()
ggsave("FittedvResid.png",
p1,
width = 4,
height = 3,
units = "in")
word_doc <- body_add_img(word_doc,
src = "FittedvResid.png",
width = 4,
height = 3)
p3<-ggplot(model, aes(.fitted, sqrt(abs(.stdresid))))+geom_point(na.rm=TRUE)
p3<-p3+stat_smooth(method="loess", na.rm = TRUE)+xlab("Fitted Value")
p3<-p3+ylab(expression(sqrt("|Standardized residuals|")))
p3<-p3+ggtitle("Scale-Location")+theme_bw()
ggsave("Scale-Location.png",
p3,
width = 4,
height = 3,
units = "in")
word_doc <- body_add_img(word_doc,
src = "Scale-Location.png",
width = 4,
height = 3)
p4<-ggplot(model, aes(seq_along(.cooksd), .cooksd))+geom_bar(stat="identity", position="identity")
p4<-p4+xlab("Obs. Number")+ylab("Cook's distance")
p4<-p4+ggtitle("Cook's distance")+theme_bw()
ggsave("Cook's distance.png",
p4,
width = 4,
height = 3,
units = "in")
word_doc <- body_add_img(word_doc,
src = "Cook's distance.png",
width = 4,
height = 3)
p5<-ggplot(model, aes(.hat, .stdresid))+geom_point(aes(size=.cooksd), na.rm=TRUE)
p5<-p5+stat_smooth(method="loess", na.rm=TRUE)
p5<-p5+xlab("Leverage")+ylab("Standardized Residuals")
p5<-p5+ggtitle("Residual vs Leverage Plot")
p5<-p5+scale_size_continuous("Cook's Distance", range=c(1,5))
p5<-p5+theme_bw()+theme(legend.position="bottom")
ggsave("Residual vs Leverage Plot.png",
p5,
width = 4,
height = 3,
units = "in")
word_doc <- body_add_img(word_doc,
src = "Residual vs Leverage Plot.png",
width = 4,
height = 3)
p6<-ggplot(model, aes(.hat, .cooksd))+geom_point(na.rm=TRUE)+stat_smooth(method="loess", na.rm=TRUE)
p6<-p6+xlab("Leverage hii")+ylab("Cook's Distance")
p6<-p6+ggtitle("Cook's dist vs Leverage hii/(1-hii)")
p6<-p6+geom_abline(slope=seq(0,3,0.5), color="gray", linetype="dashed")
p6<-p6+theme_bw()
ggsave("Cook's dist vs Leverage.png",
p6,
width = 4,
height = 3,
units = "in")
word_doc <- body_add_img(word_doc,
src = "Cook's dist vs Leverage.png",
width = 4,
height = 3)
print(word_doc, target = output_path)
unlink("Cook's dist vs Leverage.png")
unlink("Cook's distance.png")
unlink("FittedvResid.png")
unlink("Scale-Location.png")
unlink("Residual vs Leverage Plot.png")
if(reg_table == 1){
apa.reg.table(
mod,
filename = reg_table_name,
table.number = NA,
prop.var.conf.level = 0.95
)
}
}
#Example code
mod <- lm(mpg ~ wt, data = mtcars)
make_lm_plots(mod, "test.docx", 1, "table.doc")
#ggplot code from:
#https://rpubs.com/therimalaya/43190
#helpful YT video with officeR tutorial:
#https://www.youtube.com/watch?v=HCc3z9BGpJQ
|
/package.R
|
no_license
|
ng4567/LM2APA
|
R
| false
| false
| 3,592
|
r
|
library(ggplot2)
library(apaTables)
library(officer)
make_lm_plots<-function(model, output_path, reg_table, reg_table_name){
word_doc <- read_docx()
p1<-ggplot(model, aes(.fitted, .resid))+geom_point()
p1<-p1+stat_smooth(method="loess")+geom_hline(yintercept=0, col="red", linetype="dashed")
p1<-p1+xlab("Fitted values")+ylab("Residuals")
p1<-p1+ggtitle("Residual vs Fitted Plot")+theme_bw()
ggsave("FittedvResid.png",
p1,
width = 4,
height = 3,
units = "in")
word_doc <- body_add_img(word_doc,
src = "FittedvResid.png",
width = 4,
height = 3)
p3<-ggplot(model, aes(.fitted, sqrt(abs(.stdresid))))+geom_point(na.rm=TRUE)
p3<-p3+stat_smooth(method="loess", na.rm = TRUE)+xlab("Fitted Value")
p3<-p3+ylab(expression(sqrt("|Standardized residuals|")))
p3<-p3+ggtitle("Scale-Location")+theme_bw()
ggsave("Scale-Location.png",
p3,
width = 4,
height = 3,
units = "in")
word_doc <- body_add_img(word_doc,
src = "Scale-Location.png",
width = 4,
height = 3)
p4<-ggplot(model, aes(seq_along(.cooksd), .cooksd))+geom_bar(stat="identity", position="identity")
p4<-p4+xlab("Obs. Number")+ylab("Cook's distance")
p4<-p4+ggtitle("Cook's distance")+theme_bw()
ggsave("Cook's distance.png",
p4,
width = 4,
height = 3,
units = "in")
word_doc <- body_add_img(word_doc,
src = "Cook's distance.png",
width = 4,
height = 3)
p5<-ggplot(model, aes(.hat, .stdresid))+geom_point(aes(size=.cooksd), na.rm=TRUE)
p5<-p5+stat_smooth(method="loess", na.rm=TRUE)
p5<-p5+xlab("Leverage")+ylab("Standardized Residuals")
p5<-p5+ggtitle("Residual vs Leverage Plot")
p5<-p5+scale_size_continuous("Cook's Distance", range=c(1,5))
p5<-p5+theme_bw()+theme(legend.position="bottom")
ggsave("Residual vs Leverage Plot.png",
p5,
width = 4,
height = 3,
units = "in")
word_doc <- body_add_img(word_doc,
src = "Residual vs Leverage Plot.png",
width = 4,
height = 3)
p6<-ggplot(model, aes(.hat, .cooksd))+geom_point(na.rm=TRUE)+stat_smooth(method="loess", na.rm=TRUE)
p6<-p6+xlab("Leverage hii")+ylab("Cook's Distance")
p6<-p6+ggtitle("Cook's dist vs Leverage hii/(1-hii)")
p6<-p6+geom_abline(slope=seq(0,3,0.5), color="gray", linetype="dashed")
p6<-p6+theme_bw()
ggsave("Cook's dist vs Leverage.png",
p6,
width = 4,
height = 3,
units = "in")
word_doc <- body_add_img(word_doc,
src = "Cook's dist vs Leverage.png",
width = 4,
height = 3)
print(word_doc, target = output_path)
unlink("Cook's dist vs Leverage.png")
unlink("Cook's distance.png")
unlink("FittedvResid.png")
unlink("Scale-Location.png")
unlink("Residual vs Leverage Plot.png")
if(reg_table == 1){
apa.reg.table(
mod,
filename = reg_table_name,
table.number = NA,
prop.var.conf.level = 0.95
)
}
}
#Example code
mod <- lm(mpg ~ wt, data = mtcars)
make_lm_plots(mod, "test.docx", 1, "table.doc")
#ggplot code from:
#https://rpubs.com/therimalaya/43190
#helpful YT video with officeR tutorial:
#https://www.youtube.com/watch?v=HCc3z9BGpJQ
|
bioinformatics <- 1
is <- 2
f_o_r <- 3
cool <- 4
people <- 5
print(bioinformatics + is + f_o_r + cool + people)
|
/Assignment-1/6.R
|
no_license
|
VivianeLovatel/Brasil_2019
|
R
| false
| false
| 111
|
r
|
bioinformatics <- 1
is <- 2
f_o_r <- 3
cool <- 4
people <- 5
print(bioinformatics + is + f_o_r + cool + people)
|
#Load .csv file
data <- read.csv("C:/Users/weiyi/Desktop/R/exam2/Q2/CasinoA.csv", header = T, stringsAsFactors = T)
str(data)
summary(data)
#Explore "Source"
data$Source
unique(data$Source)
table(data$Source)
#Change the data type for Source to a factor variable
data$Source <- factor(data$Source)
str(data)
#Create dummy variables for each of the categories
data$AAA <- as.logical(0)
data$WALK <- as.logical(0)
data$WEB <- as.logical(0)
str(data)
for (i in 1:nrow(data)){
if (data$Source[i]=="AAA")
data$AAA[i]<-1
else if (data$Source[i]=="WALK")
data$WALK[i]<-1
else
data$WEB[i]<-1}
View(data)
model1 <- lm(data$Total.Spend ~ data$Gender + data$Age + data$AAA + data$WALK)
summary(model1)
model2 <- lm(data$Total.Spend ~ data$Age + data$WEB + data$AAA)
summary(model2)
model2
for (i in 1:nrow(data)){
if (data$Source[i]=="WEB")
data$WEB[i]<-1
else if (data$Source[i]=="WALK")
data$WALK[i]<-1
else
data$AAA[i]<-1}
View(data)
model2 <- lm(data$Total.Spend ~ data$Gender + data$Age + data$WEB + data$WALK)
summary(model2)
#Correlation analysis
sapply(data, is.numeric)
num_data <- data[, sapply(data, is.numeric)]
cor(num_data, use = "complete.obs", method = "pearson")
#Linear regression model
model1 <- lm(data$Total.Spend ~ data$Gender + data$Age + data$WEB + data$WALK)
summary(model1)
model2 <- lm(data$Total.Spend ~ data$Age + data$AAA + data$WALK)
summary(model2)
#Subset age
age20_30 <- subset(data, data$Age <= 30)
summary(age20_30)
sum(age20_30$Total.Spend)
age30_40 <- subset(data, data$Age > 30 & data$Age <= 40)
summary(age30_40)
sum(age30_40$Total.Spend)
age40_50 <- subset(data, data$Age > 40 & data$Age <= 50)
summary(age40_50)
sum(age40_50$Total.Spend)
age50_60 <- subset(data, data$Age > 50 & data$Age <= 60)
summary(age50_60)
sum(age50_60$Total.Spend)
age60 <- subset(data, data$Age > 60)
summary(age60)
sum(age60$Total.Spend)
player.web <- subset(data, data$Source == "WEB")
summary(player.web)
player.aaa <- subset(data, data$Source == "AAA")
summary(player.aaa)
player.walk <- subset(data, data$Source == "WALK")
summary(player.walk)
|
/Q2.R
|
no_license
|
dduwill/BA-Exam-Source
|
R
| false
| false
| 2,205
|
r
|
#Load .csv file
data <- read.csv("C:/Users/weiyi/Desktop/R/exam2/Q2/CasinoA.csv", header = T, stringsAsFactors = T)
str(data)
summary(data)
#Explore "Source"
data$Source
unique(data$Source)
table(data$Source)
#Change the data type for Source to a factor variable
data$Source <- factor(data$Source)
str(data)
#Create dummy variables for each of the categories
data$AAA <- as.logical(0)
data$WALK <- as.logical(0)
data$WEB <- as.logical(0)
str(data)
for (i in 1:nrow(data)){
if (data$Source[i]=="AAA")
data$AAA[i]<-1
else if (data$Source[i]=="WALK")
data$WALK[i]<-1
else
data$WEB[i]<-1}
View(data)
model1 <- lm(data$Total.Spend ~ data$Gender + data$Age + data$AAA + data$WALK)
summary(model1)
model2 <- lm(data$Total.Spend ~ data$Age + data$WEB + data$AAA)
summary(model2)
model2
for (i in 1:nrow(data)){
if (data$Source[i]=="WEB")
data$WEB[i]<-1
else if (data$Source[i]=="WALK")
data$WALK[i]<-1
else
data$AAA[i]<-1}
View(data)
model2 <- lm(data$Total.Spend ~ data$Gender + data$Age + data$WEB + data$WALK)
summary(model2)
#Correlation analysis
sapply(data, is.numeric)
num_data <- data[, sapply(data, is.numeric)]
cor(num_data, use = "complete.obs", method = "pearson")
#Linear regression model
model1 <- lm(data$Total.Spend ~ data$Gender + data$Age + data$WEB + data$WALK)
summary(model1)
model2 <- lm(data$Total.Spend ~ data$Age + data$AAA + data$WALK)
summary(model2)
#Subset age
age20_30 <- subset(data, data$Age <= 30)
summary(age20_30)
sum(age20_30$Total.Spend)
age30_40 <- subset(data, data$Age > 30 & data$Age <= 40)
summary(age30_40)
sum(age30_40$Total.Spend)
age40_50 <- subset(data, data$Age > 40 & data$Age <= 50)
summary(age40_50)
sum(age40_50$Total.Spend)
age50_60 <- subset(data, data$Age > 50 & data$Age <= 60)
summary(age50_60)
sum(age50_60$Total.Spend)
age60 <- subset(data, data$Age > 60)
summary(age60)
sum(age60$Total.Spend)
player.web <- subset(data, data$Source == "WEB")
summary(player.web)
player.aaa <- subset(data, data$Source == "AAA")
summary(player.aaa)
player.walk <- subset(data, data$Source == "WALK")
summary(player.walk)
|
suppressPackageStartupMessages({
library(tercen)
library(tercenApi)
library(tidyr)
library(dplyr)
library(dtplyr)
})
ctx <- tercenCtx()
method <- ctx$op.value("method", as.character, "mean_per_row")
df <- ctx %>%
select(.ri, .ci, .y) %>%
dtplyr::lazy_dt() %>%
complete(.ri, .ci)
if(method != "constant") {
if(method == "mean_per_column") df <- df %>% group_by(.ci)
if(method == "mean_per_row") df <- df %>% group_by(.ri)
impute.mean <- function(x) replace(x, is.na(x), mean(x, na.rm = TRUE))
df_out <- df %>% mutate(.y = impute.mean(.y)) %>% rename(imputed = .y)
} else {
val <- ctx$op.value("value", as.double, 0)
df_out <- df %>% replace_na(list(.y = val)) %>% rename(imputed = .y)
}
df_out <- df_out %>% as_tibble() %>%
ctx$addNamespace()
df_out %>%
ctx$save()
# build_test_data(df_out, ctx, test_name = "test", test_folder = "./tests")
|
/main.R
|
no_license
|
tercen/impute_operator
|
R
| false
| false
| 894
|
r
|
suppressPackageStartupMessages({
library(tercen)
library(tercenApi)
library(tidyr)
library(dplyr)
library(dtplyr)
})
ctx <- tercenCtx()
method <- ctx$op.value("method", as.character, "mean_per_row")
df <- ctx %>%
select(.ri, .ci, .y) %>%
dtplyr::lazy_dt() %>%
complete(.ri, .ci)
if(method != "constant") {
if(method == "mean_per_column") df <- df %>% group_by(.ci)
if(method == "mean_per_row") df <- df %>% group_by(.ri)
impute.mean <- function(x) replace(x, is.na(x), mean(x, na.rm = TRUE))
df_out <- df %>% mutate(.y = impute.mean(.y)) %>% rename(imputed = .y)
} else {
val <- ctx$op.value("value", as.double, 0)
df_out <- df %>% replace_na(list(.y = val)) %>% rename(imputed = .y)
}
df_out <- df_out %>% as_tibble() %>%
ctx$addNamespace()
df_out %>%
ctx$save()
# build_test_data(df_out, ctx, test_name = "test", test_folder = "./tests")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drive_objects.R
\name{File.appProperties}
\alias{File.appProperties}
\title{File.appProperties Object}
\usage{
File.appProperties()
}
\value{
File.appProperties object
}
\description{
File.appProperties Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A collection of arbitrary key-value pairs which are private to the requesting app.Entries with null values are cleared in update and copy requests.
}
\seealso{
Other File functions: \code{\link{File.capabilities}},
\code{\link{File.contentHints.thumbnail}},
\code{\link{File.contentHints}},
\code{\link{File.imageMediaMetadata.location}},
\code{\link{File.imageMediaMetadata}},
\code{\link{File.properties}},
\code{\link{File.videoMediaMetadata}},
\code{\link{File}}, \code{\link{files.copy}},
\code{\link{files.create}}, \code{\link{files.update}}
}
|
/googledrivev3.auto/man/File.appProperties.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false
| true
| 936
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drive_objects.R
\name{File.appProperties}
\alias{File.appProperties}
\title{File.appProperties Object}
\usage{
File.appProperties()
}
\value{
File.appProperties object
}
\description{
File.appProperties Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A collection of arbitrary key-value pairs which are private to the requesting app.Entries with null values are cleared in update and copy requests.
}
\seealso{
Other File functions: \code{\link{File.capabilities}},
\code{\link{File.contentHints.thumbnail}},
\code{\link{File.contentHints}},
\code{\link{File.imageMediaMetadata.location}},
\code{\link{File.imageMediaMetadata}},
\code{\link{File.properties}},
\code{\link{File.videoMediaMetadata}},
\code{\link{File}}, \code{\link{files.copy}},
\code{\link{files.create}}, \code{\link{files.update}}
}
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Age Calculator"),
sidebarPanel(
p("This is an age calculator application. This application allows user to pick any date of birth for the calenar and calculate the age for the given birtdate."),
dateInput("birthday", "Birthdate:"),
em("Click on the textbox to display calendar"),
p(),
checkboxGroupInput("display", "Result options:",
c("Calculate age" = "1",
"Display zodiac" = "2"))
# h3("Function:"),
#h5("This is an age calculator application"),
#h5("You choose what to show by clicking any of the above checkboxes."),
#h6("The ui.r and server.r scripts can be viewed via GitHub repository at: https://github.com/nazilacj/Developing_Data_Products")
),
mainPanel(
h1("Results"),
img(src="zodiac.png", height = 400, width = 400),
p(),
strong("Below are the results for the given birthdate with the chosen options:"),
p('The given birth day and date:'),
verbatimTextOutput("age"),
verbatimTextOutput("zod")
)
))
|
/ui.R
|
no_license
|
Khairulhak/Coursera-ShinyApp-Age-Calculator
|
R
| false
| false
| 1,090
|
r
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Age Calculator"),
sidebarPanel(
p("This is an age calculator application. This application allows user to pick any date of birth for the calenar and calculate the age for the given birtdate."),
dateInput("birthday", "Birthdate:"),
em("Click on the textbox to display calendar"),
p(),
checkboxGroupInput("display", "Result options:",
c("Calculate age" = "1",
"Display zodiac" = "2"))
# h3("Function:"),
#h5("This is an age calculator application"),
#h5("You choose what to show by clicking any of the above checkboxes."),
#h6("The ui.r and server.r scripts can be viewed via GitHub repository at: https://github.com/nazilacj/Developing_Data_Products")
),
mainPanel(
h1("Results"),
img(src="zodiac.png", height = 400, width = 400),
p(),
strong("Below are the results for the given birthdate with the chosen options:"),
p('The given birth day and date:'),
verbatimTextOutput("age"),
verbatimTextOutput("zod")
)
))
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("GSVA"),
sidebarPanel(
h5("Type GSE ID",align = "left",style = "color:gray"),
textInput("symb", "", "gse35014"),
actionButton("get", "Select"),
tags$style(type='text/css', "button#get { margin-bottom: 9px; }"),
p("To browse GEO series, visit the ",
a("GEO homepage.",
href = "http://www.ncbi.nlm.nih.gov/geo/",target="_blank")),
br(),
h5("Upload GeneSetCollection",align = "left",style = "color:gray"),
fileInput("gset", "", multiple = FALSE, accept = NULL),
actionButton("gsv", "Apply GSVA"),
h6("_____________________",align = "center",style = "color:gray"),
uiOutput("testui"),
uiOutput("choose_columns_plot"),
uiOutput("choose_columns_boxplot"),
uiOutput("choose_cohort_plot"),
br(),
img(src="janssen_logo.jpg")
),
mainPanel(
### show timer
conditionalPanel("updateBusy() || $('html').hasClass('shiny-busy')",
img(src="loading_icon.gif"),
id='progressIndicator',
div(id='progress',includeHTML("timer.js"))
),
tags$head(tags$style(type="text/css",
'#progressIndicator {',
' position:fixed; top: 8px; right: 710px; top: 310px; width: 130px; height: 30px;',
' padding: 8px; border: 1px transparent #CCC; border-radius: 8px;',
'}'
)),
tabsetPanel(
tabPanel("Table",tableOutput("table")),
tabPanel("Plot",plotOutput("plot")),
tabPanel("BoxPlot",plotOutput("boxplot")),
id = "selectedTab"
)
)
))
|
/ui.R
|
no_license
|
spavlidi/GSVA_test
|
R
| false
| false
| 1,729
|
r
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("GSVA"),
sidebarPanel(
h5("Type GSE ID",align = "left",style = "color:gray"),
textInput("symb", "", "gse35014"),
actionButton("get", "Select"),
tags$style(type='text/css', "button#get { margin-bottom: 9px; }"),
p("To browse GEO series, visit the ",
a("GEO homepage.",
href = "http://www.ncbi.nlm.nih.gov/geo/",target="_blank")),
br(),
h5("Upload GeneSetCollection",align = "left",style = "color:gray"),
fileInput("gset", "", multiple = FALSE, accept = NULL),
actionButton("gsv", "Apply GSVA"),
h6("_____________________",align = "center",style = "color:gray"),
uiOutput("testui"),
uiOutput("choose_columns_plot"),
uiOutput("choose_columns_boxplot"),
uiOutput("choose_cohort_plot"),
br(),
img(src="janssen_logo.jpg")
),
mainPanel(
### show timer
conditionalPanel("updateBusy() || $('html').hasClass('shiny-busy')",
img(src="loading_icon.gif"),
id='progressIndicator',
div(id='progress',includeHTML("timer.js"))
),
tags$head(tags$style(type="text/css",
'#progressIndicator {',
' position:fixed; top: 8px; right: 710px; top: 310px; width: 130px; height: 30px;',
' padding: 8px; border: 1px transparent #CCC; border-radius: 8px;',
'}'
)),
tabsetPanel(
tabPanel("Table",tableOutput("table")),
tabPanel("Plot",plotOutput("plot")),
tabPanel("BoxPlot",plotOutput("boxplot")),
id = "selectedTab"
)
)
))
|
basePlot2 <- function(yin, plotTitle, drug)
{
xLab <- "Time (Days since first drug start)"
yLab = "Bactericidal effect"
laby <- c(10, 20, 30, 40, 50, 60, 70, 80, 90, 100)
namesy <- c(10, 20, 30, 40, 50, 60, 70, 80, 90, 100)
labx <- c(60, 120, 180, 240, 300, 360, 420, 480, 540, 600, 660, 720, 780, 840, 900, 960, 1020)
namesx <- c(-120, -60, 0, 60, 120, 180, 240, 300, 360, 420, 480, 540, 600, 660, 720, 780, 840)
# Prepare data
legendPosition <-"none"
if (plotTitle == "IntraCellular Granuloma") {legendPosition <- "top"}
timeSelect <- drugStart:(drugStart+360)
yint <- t(yin[drugStart:(drugStart+360)])
yset <- data.frame(timeSelect, yint)
name1 <- paste(drug[1])
name2 <- paste(drug[2])
name3 <- paste(drug[3])
name4 <- paste(drug[4])
name5 <- "Immune"
colnames(yset) <- c('time', name1, name2, name3, name4, name5)
dfm <- melt(yset, id='time')
#dfm <- dfm[seq(1, nrow(dfm), 10), ]
# Prepare plot
plot1 <- ggplot(data = dfm, aes(x = time, y= value, colour=variable, fill=variable)) +
#stat_smooth(size=0.5, rm.na=TRUE) +
geom_line(size=1, rm.na=TRUE) +
#geom_area(rm.na=TRUE) +
#scale_y_log10() +
#scale_y_continuous(breaks = laby , labels = namesy) +
scale_x_continuous(breaks = labx, labels = namesx) +
scale_y_continuous(limits=c(0, 1)) +
ggtitle(plotTitle) +
theme(plot.title = element_text(size=12, face="bold", vjust=0.5)) +
theme(legend.position=legendPosition, legend.text =element_text(size=8)) +
theme(legend.title=element_blank()) +
xlab(xLab) +
theme(axis.title = element_text(size=10)) +
ylab(yLab)
return (plot1)
}
|
/inst/R_old/basePlot2.R
|
no_license
|
WillFox/TBsim
|
R
| false
| false
| 1,696
|
r
|
basePlot2 <- function(yin, plotTitle, drug)
{
xLab <- "Time (Days since first drug start)"
yLab = "Bactericidal effect"
laby <- c(10, 20, 30, 40, 50, 60, 70, 80, 90, 100)
namesy <- c(10, 20, 30, 40, 50, 60, 70, 80, 90, 100)
labx <- c(60, 120, 180, 240, 300, 360, 420, 480, 540, 600, 660, 720, 780, 840, 900, 960, 1020)
namesx <- c(-120, -60, 0, 60, 120, 180, 240, 300, 360, 420, 480, 540, 600, 660, 720, 780, 840)
# Prepare data
legendPosition <-"none"
if (plotTitle == "IntraCellular Granuloma") {legendPosition <- "top"}
timeSelect <- drugStart:(drugStart+360)
yint <- t(yin[drugStart:(drugStart+360)])
yset <- data.frame(timeSelect, yint)
name1 <- paste(drug[1])
name2 <- paste(drug[2])
name3 <- paste(drug[3])
name4 <- paste(drug[4])
name5 <- "Immune"
colnames(yset) <- c('time', name1, name2, name3, name4, name5)
dfm <- melt(yset, id='time')
#dfm <- dfm[seq(1, nrow(dfm), 10), ]
# Prepare plot
plot1 <- ggplot(data = dfm, aes(x = time, y= value, colour=variable, fill=variable)) +
#stat_smooth(size=0.5, rm.na=TRUE) +
geom_line(size=1, rm.na=TRUE) +
#geom_area(rm.na=TRUE) +
#scale_y_log10() +
#scale_y_continuous(breaks = laby , labels = namesy) +
scale_x_continuous(breaks = labx, labels = namesx) +
scale_y_continuous(limits=c(0, 1)) +
ggtitle(plotTitle) +
theme(plot.title = element_text(size=12, face="bold", vjust=0.5)) +
theme(legend.position=legendPosition, legend.text =element_text(size=8)) +
theme(legend.title=element_blank()) +
xlab(xLab) +
theme(axis.title = element_text(size=10)) +
ylab(yLab)
return (plot1)
}
|
\name{data11Plot}
\alias{data11Plot}
\docType{data}
\title{
data11Plot
%% ~~ data name/kind ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
}
\usage{data("data11Plot")}
\format{
A data frame with 200 observations on the following 2 variables.
\describe{
\item{\code{x}}{a numeric vector}
\item{\code{y}}{a numeric vector}
}
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
data(data11Plot)
## maybe str(data11Plot) ; plot(data11Plot) ...
}
\keyword{datasets}
|
/WHUMSurveyDataAnalysis/man/data11Plot.Rd
|
no_license
|
jnobuyuki/WHUMSurveyDataAnalysis
|
R
| false
| false
| 707
|
rd
|
\name{data11Plot}
\alias{data11Plot}
\docType{data}
\title{
data11Plot
%% ~~ data name/kind ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
}
\usage{data("data11Plot")}
\format{
A data frame with 200 observations on the following 2 variables.
\describe{
\item{\code{x}}{a numeric vector}
\item{\code{y}}{a numeric vector}
}
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
data(data11Plot)
## maybe str(data11Plot) ; plot(data11Plot) ...
}
\keyword{datasets}
|
##crosstalk-plotly-shiny example:
# Might make linked brushing alot easier, but we'll see if it delivers.
#installation steps:
devtools::install_github('ropensci/plotly')
devtools::install_github('rstudio/crosstalk')
##devtools::install_github('rstudio/DT')
library(crosstalk)
library(plotly)
library(DT)
library(shiny)
#------- a simple example using iris data:
shared_iris <- SharedData$new(iris)
ui <- fluidPage(
fluidRow(column(6,
plotlyOutput("plot1")),
column(6,
plotlyOutput("plot2"))),
tableOutput("table")
)
server <- function(input, output) {
shared_iris <- SharedData$new(iris)
output$plot1 <- renderPlotly({
plot_ly(shared_iris, x = ~Petal.Width, y = ~Petal.Length, color = ~Species, type = "scatter", mode = "markers")
})
output$plot2 <- renderPlotly({
plot_ly(shared_iris, x = ~Sepal.Width, y = ~Sepal.Length, color = ~Species, type = "scatter", mode = "markers")
})
output$table <- renderTable({
event_data("plotly_selected")
})
}
shinyApp(ui, server)
#you could expand this to doing more things than just trying to do linked brushing/filtering (otherwise why couple it with Shiny...).
#Though, if you're thinking of just linked brushing alone - maybe crosstalk alone does the job neatly.
#TODO: Make a more complex example??
|
/code/crosstalk-plotly-shiny.R
|
no_license
|
ysoh286/honours-project-2017
|
R
| false
| false
| 1,346
|
r
|
##crosstalk-plotly-shiny example:
# Might make linked brushing alot easier, but we'll see if it delivers.
#installation steps:
devtools::install_github('ropensci/plotly')
devtools::install_github('rstudio/crosstalk')
##devtools::install_github('rstudio/DT')
library(crosstalk)
library(plotly)
library(DT)
library(shiny)
#------- a simple example using iris data:
shared_iris <- SharedData$new(iris)
ui <- fluidPage(
fluidRow(column(6,
plotlyOutput("plot1")),
column(6,
plotlyOutput("plot2"))),
tableOutput("table")
)
server <- function(input, output) {
shared_iris <- SharedData$new(iris)
output$plot1 <- renderPlotly({
plot_ly(shared_iris, x = ~Petal.Width, y = ~Petal.Length, color = ~Species, type = "scatter", mode = "markers")
})
output$plot2 <- renderPlotly({
plot_ly(shared_iris, x = ~Sepal.Width, y = ~Sepal.Length, color = ~Species, type = "scatter", mode = "markers")
})
output$table <- renderTable({
event_data("plotly_selected")
})
}
shinyApp(ui, server)
#you could expand this to doing more things than just trying to do linked brushing/filtering (otherwise why couple it with Shiny...).
#Though, if you're thinking of just linked brushing alone - maybe crosstalk alone does the job neatly.
#TODO: Make a more complex example??
|
# Takes in input data, and goes therough a series of possible lower
# bounds to create an array of means. The rate of change of the
# array of means is then taken. The mean of this array is found and
# that mean is then compared to the rest of the array to find the
# number of points in the array are less than the mean. This is the
# best lower bound for the data set.
# Input: data and a number of lower bounds you want to test
# Output: the best lower bound
source("~/Documents/MyFolders/DaphniaLab/Functions/DxFunction.R")
source("~/Documents/MyFolders/DaphniaLab/Functions/ConcatFunction.R")
source("~/Documents/MyFolders/DaphniaLab/Functions/ForceFilterFunction.R")
Lowerbounds <- function(input)
{
interval <- 0.01
MIN <- min(input, na.rm = T)
MAX <- mean(input, na.rm = T)
itterations <- MAX/interval
v <- seq(MIN,MAX,interval)
out <- numeric(itterations)
for(i in 1:length(v))
{
out[i] <- mean(input[input > v[i]], na.rm = T)
}
M <- match(min(Dx(out),na.rm=TRUE), Dx(out), nomatch = NA_integer_, incomparables = NULL)
best <- v[M]
return(best)
}
|
/Functions/Old/LowerboundsFunction.R
|
no_license
|
rmcdonnell/Daphnia-Project
|
R
| false
| false
| 1,095
|
r
|
# Takes in input data, and goes therough a series of possible lower
# bounds to create an array of means. The rate of change of the
# array of means is then taken. The mean of this array is found and
# that mean is then compared to the rest of the array to find the
# number of points in the array are less than the mean. This is the
# best lower bound for the data set.
# Input: data and a number of lower bounds you want to test
# Output: the best lower bound
source("~/Documents/MyFolders/DaphniaLab/Functions/DxFunction.R")
source("~/Documents/MyFolders/DaphniaLab/Functions/ConcatFunction.R")
source("~/Documents/MyFolders/DaphniaLab/Functions/ForceFilterFunction.R")
Lowerbounds <- function(input)
{
interval <- 0.01
MIN <- min(input, na.rm = T)
MAX <- mean(input, na.rm = T)
itterations <- MAX/interval
v <- seq(MIN,MAX,interval)
out <- numeric(itterations)
for(i in 1:length(v))
{
out[i] <- mean(input[input > v[i]], na.rm = T)
}
M <- match(min(Dx(out),na.rm=TRUE), Dx(out), nomatch = NA_integer_, incomparables = NULL)
best <- v[M]
return(best)
}
|
testlist <- list(n = 252645135L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result)
|
/breakfast/inst/testfiles/setBitNumber/libFuzzer_setBitNumber/setBitNumber_valgrind_files/1609960818-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 97
|
r
|
testlist <- list(n = 252645135L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result)
|
source('common.R', encoding = 'utf-8')
## @knitr init_stan
y <- ukdrivers
standata <- within(list(), {
y <- as.vector(y)
n <- length(y)
})
## @knitr show_model
model_file <- '../models/fig03_01.stan'
cat(paste(readLines(model_file)), sep = '\n')
## @knitr fit_stan
lmresult <- lm(y ~ x, data = data.frame(x = 1:length(y), y = as.numeric(y)))
init <- list(list(mu = rep(mean(y), length(y)),
v = rep(coefficients(lmresult)[[2]], length(y) - 1),
sigma_level = sd(y) / 2,
sigma_drift = sd(y) / 2,
sigma_irreg = 0.001))
fit <- stan(file = model_file, data = standata,
iter = 10000, chains = 4, seed = 12345)
stopifnot(is.converged(fit))
mu <- get_posterior_mean(fit, par = 'mu')[, 'mean-all chains']
v <- get_posterior_mean(fit, par = 'v')[, 'mean-all chains']
sigma <- get_posterior_mean(fit, par = 'sigma')[, 'mean-all chains']
sigma_drift <- sigma[[1]]
sigma_irreg <- sigma[[2]]
sigma_level <- sigma[[3]]
# stopifnot(is.almost.fitted(mu[[1]], 7.4157))
is.almost.fitted(mu[[1]], 7.4157)
stopifnot(is.almost.fitted(v[[1]], 0.00028896))
stopifnot(is.almost.fitted(sigma_irreg^2, 0.0021181))
stopifnot(is.almost.fitted(sigma_level^2, 0.012128))
stopifnot(is.almost.fitted(sigma_drift^2, 1.5e-11))
## @knitr output_figures
title <- 'Figure 3.1. Trend of stochastic linear trend model.'
title <- '図 3.1 確率的線形トレンド・モデルのトレンド'
# 原系列
p <- autoplot(y)
# stan
yhat <- ts(mu, start = start(y), frequency = frequency(y))
p <- autoplot(yhat, p = p, ts.colour = 'blue')
p + ggtitle(title)
fmt <- function(){
function(x) format(x, nsmall = 5, scientific = FALSE)
}
title <- 'Figure 3.2. Slope of stochastic linear trend model.'
title <- '図 3.2 確率的線形トレンド・モデルの傾き'
slope <- ts(v, start = start(y), frequency = frequency(y))
autoplot(slope) + scale_y_continuous(labels = fmt()) + ggtitle(title)
title <- 'Figure 3.3. Irregular component of stochastic linear trend model.'
title <- '図 3.3 確率的線形トレンド・モデルに対する不規則要素'
autoplot(y - yhat, ts.linetype = 'dashed') + ggtitle(title)
|
/R/fig03_01.R
|
permissive
|
enguang2/stan-statespace
|
R
| false
| false
| 2,180
|
r
|
source('common.R', encoding = 'utf-8')
## @knitr init_stan
y <- ukdrivers
standata <- within(list(), {
y <- as.vector(y)
n <- length(y)
})
## @knitr show_model
model_file <- '../models/fig03_01.stan'
cat(paste(readLines(model_file)), sep = '\n')
## @knitr fit_stan
lmresult <- lm(y ~ x, data = data.frame(x = 1:length(y), y = as.numeric(y)))
init <- list(list(mu = rep(mean(y), length(y)),
v = rep(coefficients(lmresult)[[2]], length(y) - 1),
sigma_level = sd(y) / 2,
sigma_drift = sd(y) / 2,
sigma_irreg = 0.001))
fit <- stan(file = model_file, data = standata,
iter = 10000, chains = 4, seed = 12345)
stopifnot(is.converged(fit))
mu <- get_posterior_mean(fit, par = 'mu')[, 'mean-all chains']
v <- get_posterior_mean(fit, par = 'v')[, 'mean-all chains']
sigma <- get_posterior_mean(fit, par = 'sigma')[, 'mean-all chains']
sigma_drift <- sigma[[1]]
sigma_irreg <- sigma[[2]]
sigma_level <- sigma[[3]]
# stopifnot(is.almost.fitted(mu[[1]], 7.4157))
is.almost.fitted(mu[[1]], 7.4157)
stopifnot(is.almost.fitted(v[[1]], 0.00028896))
stopifnot(is.almost.fitted(sigma_irreg^2, 0.0021181))
stopifnot(is.almost.fitted(sigma_level^2, 0.012128))
stopifnot(is.almost.fitted(sigma_drift^2, 1.5e-11))
## @knitr output_figures
title <- 'Figure 3.1. Trend of stochastic linear trend model.'
title <- '図 3.1 確率的線形トレンド・モデルのトレンド'
# 原系列
p <- autoplot(y)
# stan
yhat <- ts(mu, start = start(y), frequency = frequency(y))
p <- autoplot(yhat, p = p, ts.colour = 'blue')
p + ggtitle(title)
fmt <- function(){
function(x) format(x, nsmall = 5, scientific = FALSE)
}
title <- 'Figure 3.2. Slope of stochastic linear trend model.'
title <- '図 3.2 確率的線形トレンド・モデルの傾き'
slope <- ts(v, start = start(y), frequency = frequency(y))
autoplot(slope) + scale_y_continuous(labels = fmt()) + ggtitle(title)
title <- 'Figure 3.3. Irregular component of stochastic linear trend model.'
title <- '図 3.3 確率的線形トレンド・モデルに対する不規則要素'
autoplot(y - yhat, ts.linetype = 'dashed') + ggtitle(title)
|
#' CRS of various PROJ.4 projections
#'
#' coordinate reference system (CRS) Object for several proj4 character strings.
#' \code{posm} and \code{pll} are taken directly from
#' \code{OpenStreetMap::\link[OpenStreetMap]{osm}} and
#' \code{\link[OpenStreetMap]{longlat}}.\cr
#' \code{pmap} gets the projection string from map objects as returned by \code{\link{pointsMap}}.
#'
#' @name proj
#' @aliases posm pll putm pmap
#'
#' @return \code{sp::\link[sp]{CRS}} objects for one of: \cr
#' - UTM projection with given zone\cr
#' - Open street map (and google) mercator projection\cr
#' - Latitude Longitude projection\cr
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, Aug 2016
#' @seealso \code{\link{projectPoints}}, \code{\link{degree}}
#' @keywords spatial
#' @importFrom sp CRS
#' @export
#' @examples
#' posm()
#' str(posm())
#' posm()@projargs
#' pll()
#' putm(5:14) # Germany
#' putm(zone=33) # Berlin
#'
#' map <- list(tiles=list(dummy=list(projection=pll())),
#' bbox=list(p1=par("usr")[c(1,4)], p2=par("usr")[2:3]) )
#' pmap(map)
#'
#' @param long Vector of decimal longitude coordinates (East/West values).
#' Not needed of \code{zone} is given.
#' @param zone UTM (Universal Transverse Mercator) zone, see e.g.
#' \url{https://upload.wikimedia.org/wikipedia/commons/e/ed/Utm-zones.jpg}.
#' DEFAULT: UTM zone at \link{mean} of \code{long}
#' @param map for pmap: map object as returned by \code{\link{pointsMap}}
#'
putm <- function
(
long,
zone=mean(long,na.rm=TRUE)%/%6+31
)
{
if(!missing(long)) checkLL(long=long, lat=0)
sp::CRS(paste0("+proj=utm +zone=",zone," +ellps=WGS84 +datum=WGS84"))
}
#' @export
#' @rdname proj
posm <- function() sp::CRS("+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs")
#' @export
#' @rdname proj
pll <- function() sp::CRS("+proj=longlat +datum=WGS84")
#' @export
#' @rdname proj
pmap <- function(map)
{
name <- deparse(substitute(map))
prj <- map$tiles[[1]]$projection
if(is.null(prj)) stop("Projection could not be obtained from '",name,"'.",
"It seems there is no element $tiles[[1]]$projection.")
prj
}
|
/R/proj.R
|
no_license
|
brry/OSMscale
|
R
| false
| false
| 2,232
|
r
|
#' CRS of various PROJ.4 projections
#'
#' coordinate reference system (CRS) Object for several proj4 character strings.
#' \code{posm} and \code{pll} are taken directly from
#' \code{OpenStreetMap::\link[OpenStreetMap]{osm}} and
#' \code{\link[OpenStreetMap]{longlat}}.\cr
#' \code{pmap} gets the projection string from map objects as returned by \code{\link{pointsMap}}.
#'
#' @name proj
#' @aliases posm pll putm pmap
#'
#' @return \code{sp::\link[sp]{CRS}} objects for one of: \cr
#' - UTM projection with given zone\cr
#' - Open street map (and google) mercator projection\cr
#' - Latitude Longitude projection\cr
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, Aug 2016
#' @seealso \code{\link{projectPoints}}, \code{\link{degree}}
#' @keywords spatial
#' @importFrom sp CRS
#' @export
#' @examples
#' posm()
#' str(posm())
#' posm()@projargs
#' pll()
#' putm(5:14) # Germany
#' putm(zone=33) # Berlin
#'
#' map <- list(tiles=list(dummy=list(projection=pll())),
#' bbox=list(p1=par("usr")[c(1,4)], p2=par("usr")[2:3]) )
#' pmap(map)
#'
#' @param long Vector of decimal longitude coordinates (East/West values).
#' Not needed of \code{zone} is given.
#' @param zone UTM (Universal Transverse Mercator) zone, see e.g.
#' \url{https://upload.wikimedia.org/wikipedia/commons/e/ed/Utm-zones.jpg}.
#' DEFAULT: UTM zone at \link{mean} of \code{long}
#' @param map for pmap: map object as returned by \code{\link{pointsMap}}
#'
putm <- function
(
long,
zone=mean(long,na.rm=TRUE)%/%6+31
)
{
if(!missing(long)) checkLL(long=long, lat=0)
sp::CRS(paste0("+proj=utm +zone=",zone," +ellps=WGS84 +datum=WGS84"))
}
#' @export
#' @rdname proj
posm <- function() sp::CRS("+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs")
#' @export
#' @rdname proj
pll <- function() sp::CRS("+proj=longlat +datum=WGS84")
#' @export
#' @rdname proj
pmap <- function(map)
{
name <- deparse(substitute(map))
prj <- map$tiles[[1]]$projection
if(is.null(prj)) stop("Projection could not be obtained from '",name,"'.",
"It seems there is no element $tiles[[1]]$projection.")
prj
}
|
# Load Packages
library(dplyr)
library(ggplot2)
getwd()
# bring in data
confounding <- read.csv(file="confounding example.csv", header = TRUE, sep = ",", strip.white = TRUE )
names(confounding)[names(confounding)=="?..VarA"] <- "VarA"
str(confounding)
#Confounding Plot
ggplot(confounding, aes(x=VarE, y=VarD, color=VarC)) + geom_point()
#Confounding model without controlling for VarC
model3 <- lm(VarD ~ VarE, data = confounding)
summary(model3)
#Confounding model with VarC
model4 <- lm(VarD ~ VarE + VarC, data = confounding)
summary(model4)
#Interaction Plot
ggplot(confounding, aes(x=VarA, y=VarB, color=VarC)) + geom_point()
#Interaction model without interaction
model1 <- lm(VarB ~ VarA, data= confounding)
print(model1)
summary(model1)
#Interaction model controlling for VarC but without interaction term
model2 <- lm(VarB ~ VarA + VarC, data = confounding)
summary(model2)
#Interaction model with interaction term for VarC
model5 <- lm(VarB ~ VarA + VarC + VarA*VarC, data = confounding)
summary(model5)
|
/01.14.18 - Confounding and Interaction/confounding.R
|
no_license
|
Ogweno/SynopticSignals
|
R
| false
| false
| 1,028
|
r
|
# Load Packages
library(dplyr)
library(ggplot2)
getwd()
# bring in data
confounding <- read.csv(file="confounding example.csv", header = TRUE, sep = ",", strip.white = TRUE )
names(confounding)[names(confounding)=="?..VarA"] <- "VarA"
str(confounding)
#Confounding Plot
ggplot(confounding, aes(x=VarE, y=VarD, color=VarC)) + geom_point()
#Confounding model without controlling for VarC
model3 <- lm(VarD ~ VarE, data = confounding)
summary(model3)
#Confounding model with VarC
model4 <- lm(VarD ~ VarE + VarC, data = confounding)
summary(model4)
#Interaction Plot
ggplot(confounding, aes(x=VarA, y=VarB, color=VarC)) + geom_point()
#Interaction model without interaction
model1 <- lm(VarB ~ VarA, data= confounding)
print(model1)
summary(model1)
#Interaction model controlling for VarC but without interaction term
model2 <- lm(VarB ~ VarA + VarC, data = confounding)
summary(model2)
#Interaction model with interaction term for VarC
model5 <- lm(VarB ~ VarA + VarC + VarA*VarC, data = confounding)
summary(model5)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_edge_point.R
\name{geom_edge_point}
\alias{geom_edge_point}
\title{Draw edges as glyphs}
\usage{
geom_edge_point(mapping = NULL, data = get_edges(),
position = "identity", mirror = FALSE, show.legend = NA, ...)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[ggplot2:aes]{ggplot2::aes()}}
or \code{\link[ggplot2:aes_]{ggplot2::aes_()}}. By default x, y, xend, yend, group and
circular are mapped to x, y, xend, yend, edge.id and circular in the edge
data.}
\item{data}{The return of a call to \code{get_edges()} or a data.frame
giving edges in correct format (see details for for guidance on the format).
See \code{\link[=get_edges]{get_edges()}} for more details on edge extraction.}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{mirror}{Logical. Should edge points be duplicated on both sides of the
diagonal. Intended for undirected graphs. Default to \code{FALSE}}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.
It can also be a named logical vector to finely select the aesthetics to
display.}
\item{...}{Other arguments passed on to \code{\link[=layer]{layer()}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{colour = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
}
\description{
This geom draws edges as glyphs with their x-position defined by the
x-position of the start node, and the y-position defined by the y-position of
the end node. As such it will result in a matrix layout when used in
conjunction with \code{\link[=layout_tbl_graph_matrix]{layout_tbl_graph_matrix()}}
}
\section{Aesthetics}{
\code{geom_edge_point} understands the following
aesthetics. Bold aesthetics are automatically set, but can be overridden.
\itemize{
\item \strong{x}
\item \strong{y}
\item edge_shape
\item edge_colour
\item edge_size
\item edge_alpha
\item filter
}
}
\section{Edge aesthetic name expansion}{
In order to avoid excessive typing edge aesthetic names are
automatically expanded. Because of this it is not necessary to write
\code{edge_colour} within the \code{aes()} call as \code{colour} will
automatically be renamed appropriately.
}
\examples{
require(tidygraph)
gr <- create_notable('zachary') \%>\%
mutate(group = group_infomap()) \%>\%
morph(to_split, group) \%>\%
activate(edges) \%>\%
mutate(edge_group = as.character(.N()$group[1])) \%>\%
unmorph()
ggraph(gr, 'matrix', sort.by = node_rank_hclust()) +
geom_edge_point(aes(colour = edge_group), mirror = TRUE, edge_size = 3) +
scale_y_reverse() +
coord_fixed() +
labs(edge_colour = 'Infomap Cluster') +
ggtitle("Zachary' Karate Club")
}
\seealso{
Other geom_edge_*: \code{\link{geom_edge_arc}},
\code{\link{geom_edge_density}},
\code{\link{geom_edge_diagonal}},
\code{\link{geom_edge_elbow}},
\code{\link{geom_edge_fan}},
\code{\link{geom_edge_hive}},
\code{\link{geom_edge_link}},
\code{\link{geom_edge_loop}}
}
\author{
Thomas Lin Pedersen
}
\concept{geom_edge_*}
|
/man/geom_edge_point.Rd
|
permissive
|
billdenney/ggraph
|
R
| false
| true
| 3,291
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_edge_point.R
\name{geom_edge_point}
\alias{geom_edge_point}
\title{Draw edges as glyphs}
\usage{
geom_edge_point(mapping = NULL, data = get_edges(),
position = "identity", mirror = FALSE, show.legend = NA, ...)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[ggplot2:aes]{ggplot2::aes()}}
or \code{\link[ggplot2:aes_]{ggplot2::aes_()}}. By default x, y, xend, yend, group and
circular are mapped to x, y, xend, yend, edge.id and circular in the edge
data.}
\item{data}{The return of a call to \code{get_edges()} or a data.frame
giving edges in correct format (see details for for guidance on the format).
See \code{\link[=get_edges]{get_edges()}} for more details on edge extraction.}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{mirror}{Logical. Should edge points be duplicated on both sides of the
diagonal. Intended for undirected graphs. Default to \code{FALSE}}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.
It can also be a named logical vector to finely select the aesthetics to
display.}
\item{...}{Other arguments passed on to \code{\link[=layer]{layer()}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{colour = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
}
\description{
This geom draws edges as glyphs with their x-position defined by the
x-position of the start node, and the y-position defined by the y-position of
the end node. As such it will result in a matrix layout when used in
conjunction with \code{\link[=layout_tbl_graph_matrix]{layout_tbl_graph_matrix()}}
}
\section{Aesthetics}{
\code{geom_edge_point} understands the following
aesthetics. Bold aesthetics are automatically set, but can be overridden.
\itemize{
\item \strong{x}
\item \strong{y}
\item edge_shape
\item edge_colour
\item edge_size
\item edge_alpha
\item filter
}
}
\section{Edge aesthetic name expansion}{
In order to avoid excessive typing edge aesthetic names are
automatically expanded. Because of this it is not necessary to write
\code{edge_colour} within the \code{aes()} call as \code{colour} will
automatically be renamed appropriately.
}
\examples{
require(tidygraph)
gr <- create_notable('zachary') \%>\%
mutate(group = group_infomap()) \%>\%
morph(to_split, group) \%>\%
activate(edges) \%>\%
mutate(edge_group = as.character(.N()$group[1])) \%>\%
unmorph()
ggraph(gr, 'matrix', sort.by = node_rank_hclust()) +
geom_edge_point(aes(colour = edge_group), mirror = TRUE, edge_size = 3) +
scale_y_reverse() +
coord_fixed() +
labs(edge_colour = 'Infomap Cluster') +
ggtitle("Zachary' Karate Club")
}
\seealso{
Other geom_edge_*: \code{\link{geom_edge_arc}},
\code{\link{geom_edge_density}},
\code{\link{geom_edge_diagonal}},
\code{\link{geom_edge_elbow}},
\code{\link{geom_edge_fan}},
\code{\link{geom_edge_hive}},
\code{\link{geom_edge_link}},
\code{\link{geom_edge_loop}}
}
\author{
Thomas Lin Pedersen
}
\concept{geom_edge_*}
|
setwd("C:/Users/Aaron/Documents/JH/EDA project")
load('data_for_plots.RData')
# hist(tvals,breaks=60)
# hist(pvals,breaks=100)
# median(pvals)
# hist(pvals,breaks=c(0,.001,.01,.05,.15,.6,1),plot=F)$breaks
# hist(pvals,breaks=c(0,.001,.01,.05,.15,.6,1),plot=F)$counts
# hist(pvals[pvals<.06],breaks=50)
#ticker<-1
ticker<-ticker+1
n<-nes[ticker]
x<-xes[ticker,1:n]
y<-yes[ticker,1:n]
pval<-pvals[ticker]
tval<-tvals[ticker]
m<-lm(y~x)
style<-pres[ticker]
#png('FigureQuestion.png',height=500,width=500)
par(mfrow=c(1,1))
plot(x,y,xlab='X',ylab='Y',main=ticker)
if(style=='lowess') lines(lowess(x,y))
if(style=='bestFit') abline(m$coef)
if(style=='axes')plot(x,y,xlab='X',ylab='Y',main=ticker,xlim=c(min(x)-1.5*sd(x),max(x)+1.5*sd(x)),ylim=c(min(y)-1.5*sd(y),max(y)+1.5*sd(y)))
#dev.off()
print(c('pval -',round(pval,digits=5),' tstat -',round(tval,digits=4),'n -',n,' ticker -',ticker,'pres -',style,'cor -',round(cor(x,y),digits=5)))
q2<-''
if(style=='lowess') q2<-'Here we also show a lowess line.'
if(style=='bestFit') q2<-'Here we also show the best fit linear model through the data (from ordinary least squares regression).'
question<-paste(q2,"<br/><br/>What do you think the p-value is?")
# if(style=='5plots'){
#you can fix this in later
#png('FigureQuestion.png',height=400,width=1300)
# plot(x,y,xlab='X',ylab='Y')
# par(mfrow=c(1,3))
# for(i in 1:3){ #right now you're only looking at 3
# x<-x5[[ticker]][i,]
# y<-y5[[ticker]][i,]
# plot(x,y,xlab='X',ylab='Y',main=paste(ticker,i,sep='-'))
# }
#dev.off()
# }
|
/Aaron's General Workspace/outdated stuff/generate_questions_1.r
|
no_license
|
Adamyazori/EDA-Project
|
R
| false
| false
| 1,594
|
r
|
setwd("C:/Users/Aaron/Documents/JH/EDA project")
load('data_for_plots.RData')
# hist(tvals,breaks=60)
# hist(pvals,breaks=100)
# median(pvals)
# hist(pvals,breaks=c(0,.001,.01,.05,.15,.6,1),plot=F)$breaks
# hist(pvals,breaks=c(0,.001,.01,.05,.15,.6,1),plot=F)$counts
# hist(pvals[pvals<.06],breaks=50)
#ticker<-1
ticker<-ticker+1
n<-nes[ticker]
x<-xes[ticker,1:n]
y<-yes[ticker,1:n]
pval<-pvals[ticker]
tval<-tvals[ticker]
m<-lm(y~x)
style<-pres[ticker]
#png('FigureQuestion.png',height=500,width=500)
par(mfrow=c(1,1))
plot(x,y,xlab='X',ylab='Y',main=ticker)
if(style=='lowess') lines(lowess(x,y))
if(style=='bestFit') abline(m$coef)
if(style=='axes')plot(x,y,xlab='X',ylab='Y',main=ticker,xlim=c(min(x)-1.5*sd(x),max(x)+1.5*sd(x)),ylim=c(min(y)-1.5*sd(y),max(y)+1.5*sd(y)))
#dev.off()
print(c('pval -',round(pval,digits=5),' tstat -',round(tval,digits=4),'n -',n,' ticker -',ticker,'pres -',style,'cor -',round(cor(x,y),digits=5)))
q2<-''
if(style=='lowess') q2<-'Here we also show a lowess line.'
if(style=='bestFit') q2<-'Here we also show the best fit linear model through the data (from ordinary least squares regression).'
question<-paste(q2,"<br/><br/>What do you think the p-value is?")
# if(style=='5plots'){
#you can fix this in later
#png('FigureQuestion.png',height=400,width=1300)
# plot(x,y,xlab='X',ylab='Y')
# par(mfrow=c(1,3))
# for(i in 1:3){ #right now you're only looking at 3
# x<-x5[[ticker]][i,]
# y<-y5[[ticker]][i,]
# plot(x,y,xlab='X',ylab='Y',main=paste(ticker,i,sep='-'))
# }
#dev.off()
# }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bbEstDisp.R
\name{bbEstDisp}
\alias{bbEstDisp}
\title{Simple line-search estimator for dispersion of a beta binomial}
\usage{
bbEstDisp(success, size, weights = 1, x, beta, minDisp, maxDisp, se = FALSE)
}
\arguments{
\item{success}{the observed successes (a matrix)}
\item{size}{the total trials (a matrix)}
\item{weights}{the weights (1 or a matrix)}
\item{x}{the design matrix, as many rows as columns of \code{success} and \code{size}}
\item{beta}{a matrix of MLE coefficients, as many rows as \code{success} and \code{size}}
\item{minDisp}{the minimum dispersion value}
\item{maxDisp}{the maximum dispersion value}
\item{se}{logical, whether to return standard error estimate on the log of
the dispersion (theta). Warning: the standard error estimates are not
reliable at the boundary (log of minDisp and maxDisp), and should be
interpreted with caution!}
}
\value{
a vector of estimated dispersions (theta). if \code{se=TRUE} a matrix
with columns: the vector of estimated dispersions and the standard
errors for the log of the estimated dispersions
}
\description{
Uses R's \code{optimize} function to find the maximum likelihood
estimate of dispersion for a beta binomial distribution
(\code{theta} for the \code{dbetabinom} function in the
emdbook package). The counts, size, and beta are matrices,
such that each row could be treated as a beta-binomial GLM
problem.
}
\examples{
library(emdbook)
n <- 100
m <- 100
size <- matrix(rnbinom(n*m, mu=100, size=10),ncol=m)
success <- matrix(rbetabinom(n*m, prob=.5, size=size, theta=100),ncol=m)
x <- matrix(rep(1,m),ncol=1)
beta <- matrix(rep(0,n),ncol=1)
theta <- bbEstDisp(success=success, size=size, x=x, beta=beta, minDisp=1, maxDisp=500)
summary(theta)
# with standard error estimates on log of dispersion
fit <- bbEstDisp(success=success, size=size, x=x, beta=beta, minDisp=1, maxDisp=500, se=TRUE)
plot(fit[1:20,"theta"], ylim=c(0,500), ylab="theta-hat")
log.theta <- log(fit[1:20,"theta"])
log.theta.se <- fit[1:20,"se"]
segments(1:20, exp(log.theta - 2 * log.theta.se),
1:20, exp(log.theta + 2 * log.theta.se))
abline(h=100,col="red")
}
|
/man/bbEstDisp.Rd
|
no_license
|
azhu513/apeglm
|
R
| false
| true
| 2,198
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bbEstDisp.R
\name{bbEstDisp}
\alias{bbEstDisp}
\title{Simple line-search estimator for dispersion of a beta binomial}
\usage{
bbEstDisp(success, size, weights = 1, x, beta, minDisp, maxDisp, se = FALSE)
}
\arguments{
\item{success}{the observed successes (a matrix)}
\item{size}{the total trials (a matrix)}
\item{weights}{the weights (1 or a matrix)}
\item{x}{the design matrix, as many rows as columns of \code{success} and \code{size}}
\item{beta}{a matrix of MLE coefficients, as many rows as \code{success} and \code{size}}
\item{minDisp}{the minimum dispersion value}
\item{maxDisp}{the maximum dispersion value}
\item{se}{logical, whether to return standard error estimate on the log of
the dispersion (theta). Warning: the standard error estimates are not
reliable at the boundary (log of minDisp and maxDisp), and should be
interpreted with caution!}
}
\value{
a vector of estimated dispersions (theta). if \code{se=TRUE} a matrix
with columns: the vector of estimated dispersions and the standard
errors for the log of the estimated dispersions
}
\description{
Uses R's \code{optimize} function to find the maximum likelihood
estimate of dispersion for a beta binomial distribution
(\code{theta} for the \code{dbetabinom} function in the
emdbook package). The counts, size, and beta are matrices,
such that each row could be treated as a beta-binomial GLM
problem.
}
\examples{
library(emdbook)
n <- 100
m <- 100
size <- matrix(rnbinom(n*m, mu=100, size=10),ncol=m)
success <- matrix(rbetabinom(n*m, prob=.5, size=size, theta=100),ncol=m)
x <- matrix(rep(1,m),ncol=1)
beta <- matrix(rep(0,n),ncol=1)
theta <- bbEstDisp(success=success, size=size, x=x, beta=beta, minDisp=1, maxDisp=500)
summary(theta)
# with standard error estimates on log of dispersion
fit <- bbEstDisp(success=success, size=size, x=x, beta=beta, minDisp=1, maxDisp=500, se=TRUE)
plot(fit[1:20,"theta"], ylim=c(0,500), ylab="theta-hat")
log.theta <- log(fit[1:20,"theta"])
log.theta.se <- fit[1:20,"se"]
segments(1:20, exp(log.theta - 2 * log.theta.se),
1:20, exp(log.theta + 2 * log.theta.se))
abline(h=100,col="red")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hhh4lag_methods.R
\name{fixef.hhh4lag}
\alias{fixef.hhh4lag}
\title{A modified version of \code{fixef.hhh4}}
\usage{
\method{fixef}{hhh4lag}(object, ...)
}
\description{
A modified version of \code{fixef.hhh4}
}
|
/man/fixef.hhh4lag.Rd
|
no_license
|
jbracher/hhh4addon
|
R
| false
| true
| 290
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hhh4lag_methods.R
\name{fixef.hhh4lag}
\alias{fixef.hhh4lag}
\title{A modified version of \code{fixef.hhh4}}
\usage{
\method{fixef}{hhh4lag}(object, ...)
}
\description{
A modified version of \code{fixef.hhh4}
}
|
library(tidyverse)
setwd('C:/Users/Ian/OneDrive - The Ohio State University/work/eggd_data/iupred')
e1iupred<- read_tsv('eggd-1_iupred.txt')
e2iupred<- read_tsv('eggd-2_iupred.txt')
a<- ggplot(e1iupred, aes(x = POS, y = `IUPRED SCORE`)) +
geom_line() +
lims( y = c(0,1))
a
a + theme_classic() + geom_hline(yintercept = 0.5) + scale_x_continuous(breaks = seq(0, 556, 100))
b<-ggplot(e2iupred, aes(x = POS, y = `IUPRED SCORE`)) +
geom_line()+
ylim(c(0,1)) +
xlim(c(0,666))
b + theme_classic() + geom_hline(yintercept = 0.5)+ scale_x_continuous(breaks = seq(0, 666, 100))
|
/iupred_plots_script.R
|
no_license
|
benpastore/TurboID
|
R
| false
| false
| 601
|
r
|
library(tidyverse)
setwd('C:/Users/Ian/OneDrive - The Ohio State University/work/eggd_data/iupred')
e1iupred<- read_tsv('eggd-1_iupred.txt')
e2iupred<- read_tsv('eggd-2_iupred.txt')
a<- ggplot(e1iupred, aes(x = POS, y = `IUPRED SCORE`)) +
geom_line() +
lims( y = c(0,1))
a
a + theme_classic() + geom_hline(yintercept = 0.5) + scale_x_continuous(breaks = seq(0, 556, 100))
b<-ggplot(e2iupred, aes(x = POS, y = `IUPRED SCORE`)) +
geom_line()+
ylim(c(0,1)) +
xlim(c(0,666))
b + theme_classic() + geom_hline(yintercept = 0.5)+ scale_x_continuous(breaks = seq(0, 666, 100))
|
classes <- c("factor", "factor", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
pd <- read.table("household_power_consumption.txt",header=T, sep=";", na.strings="?", colClasses=classes)
pd$Date <- as.Date(pd$Date, format="%d/%m/%Y")
startDate <- as.Date("2007-02-01")
endDate <- as.Date("2007-02-02")
pd <- pd[pd$Date >= startDate & pd$Date <= endDate, ]
pd$FullTimeStr <- paste(as.character(pd$Date), pd$Time, " ")
pd$FullTime <- strptime(pd$FullTimeStr, format="%Y-%m-%d %H:%M:%S")
png("plot2.png", width=480, height=480)
plot(pd$FullTime, pd$Global_active_power, type="l", xlab="", ylab="Global Active Power(kilowatts)")
dev.off()
|
/plot2.R
|
no_license
|
chamal-sapumohotti/ExData_Plotting1
|
R
| false
| false
| 670
|
r
|
classes <- c("factor", "factor", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
pd <- read.table("household_power_consumption.txt",header=T, sep=";", na.strings="?", colClasses=classes)
pd$Date <- as.Date(pd$Date, format="%d/%m/%Y")
startDate <- as.Date("2007-02-01")
endDate <- as.Date("2007-02-02")
pd <- pd[pd$Date >= startDate & pd$Date <= endDate, ]
pd$FullTimeStr <- paste(as.character(pd$Date), pd$Time, " ")
pd$FullTime <- strptime(pd$FullTimeStr, format="%Y-%m-%d %H:%M:%S")
png("plot2.png", width=480, height=480)
plot(pd$FullTime, pd$Global_active_power, type="l", xlab="", ylab="Global Active Power(kilowatts)")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pmfr-package.R
\docType{package}
\name{pmfr}
\alias{pmfr}
\alias{pmfr-package}
\title{R with Positive Matrix Factorization (PMF)}
\description{
pmfr allows users to easily input and analyze results from
US EPA Positive Matrix Factorization (PMF) version 5.0.
Specifically, this package reads in relevant output,
formats the output, and provides data and visual summaries.
}
\details{
Profiles
To read in profile information, users need to have the original data
and pmf output
\code{\link{pmfprof}}
To read in source contributions, users need to have the original data
and pmf output
\code{\link{pmfcont}}
}
\references{
Pentti Paatero and Unto Tapper (1994). Positive matrix
factorization: A non‐negative factor model with optimal utilization
of error estimates of data values. Environmetrics, 5(2), 111-126.
Gary Norris, Rachelle Duvall, Steve Brown, Song Bai (2014).
EPA Positive Matrix Factorization (PMF) 5.0 Fundamentals
and User Guide
}
|
/man/pmfr.Rd
|
no_license
|
kralljr/pmfr
|
R
| false
| true
| 1,034
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pmfr-package.R
\docType{package}
\name{pmfr}
\alias{pmfr}
\alias{pmfr-package}
\title{R with Positive Matrix Factorization (PMF)}
\description{
pmfr allows users to easily input and analyze results from
US EPA Positive Matrix Factorization (PMF) version 5.0.
Specifically, this package reads in relevant output,
formats the output, and provides data and visual summaries.
}
\details{
Profiles
To read in profile information, users need to have the original data
and pmf output
\code{\link{pmfprof}}
To read in source contributions, users need to have the original data
and pmf output
\code{\link{pmfcont}}
}
\references{
Pentti Paatero and Unto Tapper (1994). Positive matrix
factorization: A non‐negative factor model with optimal utilization
of error estimates of data values. Environmetrics, 5(2), 111-126.
Gary Norris, Rachelle Duvall, Steve Brown, Song Bai (2014).
EPA Positive Matrix Factorization (PMF) 5.0 Fundamentals
and User Guide
}
|
library(foreach,quietly=TRUE)
library(doParallel,quietly=TRUE)
library(doRNG,quietly=TRUE)
njobs <- 1600
nnode <- 100
chunk <- 1
## set njobs, nnode chunk from the command line
invisible(eval(parse(text=commandArgs(trailingOnly=TRUE))))
ncore <- 1
cl <- makeCluster(type="MPI",spec=nnode)
registerDoParallel(cl)
clusterSetRNGStream(cl,iseed=1218461302L)
cat("Starting computation of size",njobs,"using",
nnode,"nodes, with",
ncore,"cores/node,",
"and chunksize",chunk,"\n")
tic <- Sys.time()
res <- foreach (i = seq_len(njobs),
.combine=rbind,
.inorder=FALSE) %dopar% {
t1 <- Sys.time()
h <- system("hostname",intern=TRUE)
pid <- Sys.getpid()
x <- quantile(rnorm(n=10000000),prob=0.9)
t2 <- Sys.time()
data.frame(id=i,host=h,pid=pid,t1=t1,t2=t2,x=x)
}
toc <- Sys.time()
stopCluster(cl)
suppressMessages(library(aakmisc,quietly=TRUE))
library(magrittr)
library(plyr)
library(reshape2)
library(ggplot2)
library(grid)
library(digest,quietly=TRUE)
nwork <- getDoParWorkers()
cat(nnode,'nodes x',ncore,'cores,','chunksize',chunk,'nworkers',nwork,'\n')
res |> mutate(etime=difftime(t2,t1,units='secs')) |>
summarize(stime=as.numeric(sum(etime)),
etime=as.numeric(difftime(max(t2),min(t1),units="secs"))) |>
mutate(otime=as.numeric(difftime(toc,tic,units='secs')),
ieffic=stime/etime/nwork,
oeffic=stime/otime/nwork,
njobs=njobs,
nnode=nnode,
ncore=ncore,
nwork=nwork,
chunk=chunk) |>
melt(id=NULL) |>
mutate(y=-seq_along(variable),label=paste0(variable,"\t",signif(value,4))) -> eff
eff |> use_series("label") |> cat(sep="\n")
eff |> ggplot(aes(x=1,y=y,label=label))+
geom_text(hjust="left")+
theme_void() -> txt
res |>
ggplot(aes(x=t1,xend=t2,y=id,yend=id,color=host))+
geom_segment()+
guides(color=FALSE)+
theme_bw()+
labs(x='time',y='job id')+
annotate("segment",x=tic,xend=toc,y=0,yend=njobs,color='black') -> pl
res |>
ggplot(aes(x=host,fill=host,color=host))+
stat_count()+
labs(x="",y="")+
guides(fill=FALSE,color=FALSE)+
theme(axis.text.x=element_text(angle=90,vjust=0.5)) -> pl1
png(filename="doparallel_test.png",width=7,height=8,
units='in',res=300)
print(pl)
print(txt,vp=viewport(x=0.2,y=0.8,width=0.4,height=0.2))
print(pl1,vp=viewport(x=0.75,y=0.3,width=0.4,height=0.4))
dev.off()
res |>
subset(select=c(id,x)) |>
arrange(id) |>
set_rownames(NULL) |>
digest()
## 1ad5a2898de4e21b2baf3a7b7c88afe5
|
/scripts/doparallel_test.R
|
permissive
|
kingaa/kingaa.github.io
|
R
| false
| false
| 2,682
|
r
|
library(foreach,quietly=TRUE)
library(doParallel,quietly=TRUE)
library(doRNG,quietly=TRUE)
njobs <- 1600
nnode <- 100
chunk <- 1
## set njobs, nnode chunk from the command line
invisible(eval(parse(text=commandArgs(trailingOnly=TRUE))))
ncore <- 1
cl <- makeCluster(type="MPI",spec=nnode)
registerDoParallel(cl)
clusterSetRNGStream(cl,iseed=1218461302L)
cat("Starting computation of size",njobs,"using",
nnode,"nodes, with",
ncore,"cores/node,",
"and chunksize",chunk,"\n")
tic <- Sys.time()
res <- foreach (i = seq_len(njobs),
.combine=rbind,
.inorder=FALSE) %dopar% {
t1 <- Sys.time()
h <- system("hostname",intern=TRUE)
pid <- Sys.getpid()
x <- quantile(rnorm(n=10000000),prob=0.9)
t2 <- Sys.time()
data.frame(id=i,host=h,pid=pid,t1=t1,t2=t2,x=x)
}
toc <- Sys.time()
stopCluster(cl)
suppressMessages(library(aakmisc,quietly=TRUE))
library(magrittr)
library(plyr)
library(reshape2)
library(ggplot2)
library(grid)
library(digest,quietly=TRUE)
nwork <- getDoParWorkers()
cat(nnode,'nodes x',ncore,'cores,','chunksize',chunk,'nworkers',nwork,'\n')
res |> mutate(etime=difftime(t2,t1,units='secs')) |>
summarize(stime=as.numeric(sum(etime)),
etime=as.numeric(difftime(max(t2),min(t1),units="secs"))) |>
mutate(otime=as.numeric(difftime(toc,tic,units='secs')),
ieffic=stime/etime/nwork,
oeffic=stime/otime/nwork,
njobs=njobs,
nnode=nnode,
ncore=ncore,
nwork=nwork,
chunk=chunk) |>
melt(id=NULL) |>
mutate(y=-seq_along(variable),label=paste0(variable,"\t",signif(value,4))) -> eff
eff |> use_series("label") |> cat(sep="\n")
eff |> ggplot(aes(x=1,y=y,label=label))+
geom_text(hjust="left")+
theme_void() -> txt
res |>
ggplot(aes(x=t1,xend=t2,y=id,yend=id,color=host))+
geom_segment()+
guides(color=FALSE)+
theme_bw()+
labs(x='time',y='job id')+
annotate("segment",x=tic,xend=toc,y=0,yend=njobs,color='black') -> pl
res |>
ggplot(aes(x=host,fill=host,color=host))+
stat_count()+
labs(x="",y="")+
guides(fill=FALSE,color=FALSE)+
theme(axis.text.x=element_text(angle=90,vjust=0.5)) -> pl1
png(filename="doparallel_test.png",width=7,height=8,
units='in',res=300)
print(pl)
print(txt,vp=viewport(x=0.2,y=0.8,width=0.4,height=0.2))
print(pl1,vp=viewport(x=0.75,y=0.3,width=0.4,height=0.4))
dev.off()
res |>
subset(select=c(id,x)) |>
arrange(id) |>
set_rownames(NULL) |>
digest()
## 1ad5a2898de4e21b2baf3a7b7c88afe5
|
rm(list = ls())
library("stats")
library("ggplot2")
tsbr = read.csv("FilteredTSBR.csv")
#my file reads number as $123,456.78 -> $123456.78 -> 123456.78
#https://stat.ethz.ch/pipermail/r-help/2010-May/237909.html
#http://stackoverflow.com/questions/1523126/how-to-read-a-csv-file-in-r-where-some-numbers-contain-commas
tsbr$Salary.semester = as.numeric(sub('$', '', as.character(gsub(",", "", as.character(tsbr$Salary.semester))), fixed = TRUE))
nintyninePercent = quantile(tsbr$Salary.semester, 0.99)
NNJobs = subset(tsbr, Salary.semester > nintyninePercent)
NNJobs$department
NNJobs$FullName = paste(NNJobs$First.Name, NNJobs$last.name)
ggplot(aes(x = as.character(department), y = Salary.semester), data = NNJobs) +
geom_point() +
facet_wrap(~Institution.Name)
ggsave(filename = "Salary.png")
ggplot() + aes(x = NNJobs$department) + geom_histogram()
summary(NNJobs$Salary.semester)
as.character(NNJobs$department)
medical = c("OB GYN", "Surgery", "Psychiatry", "Family Medicine",
"Pediatrics", "Vice Pres for Health Affairs", "Family Practice Resid Kpt",
"Dean College of Pharmacy", "Internal Medicine", "Family Practice Resid Johnson City",
"Biomedical Sciences", "Dean College of Medicine", "Family Medicine .",
"Pediatrics", "Psychology", "Pathology", "Health Sciences")
administration = c("Provost VP Academic Affairs", "Presidents Office", "Office of the Provost",
"Management", "Vice President for Finance and Adm", "Office of Graduate Medical Educ",
"Office of Sponsored Programs", "Assoc Dean Academic Affairs", "VP University Advancement",
"University Provost", "Office of Senior Vice President", "VP Student Affairs",
"Graduate Programs", "Bureau Business Economic Research", "Office of President",
"Chair of Excellence Free Enterprise", "Ofc VP Research & Economic Dev", "Provost VP Academic Affairs",
"Office of the President", "Research and Spons Programs", "President's Office", "Vice President Research", "Provost Office")
sports = c("Football", "Athletic Administration", "Office of Intercoll Athletics", "Mens Basketball",
"Womens Basketball", "Athletic Director", "Football Regular Season", "Basketball Men",
"Basketball Women", "Men's Basketball", "Athletics Director", "Athletic Business Office")
stem = c("Dean of Engineering Admin Office", "Mechanical Engineering", "Dean Basic and Applied Sciences",
"Biomedical Engineering", "Management Information Systems", "College of Engineering",
"Information Technology Division", "Math B Bollobas")
buisness = c("Dean College Of Business", "College of Business Economics", "Finance Insurance Real Estate",
"School of Accountancy", "School Hospitality and Resort Mgmt", "Dean Mass Communication")
liberalArts = c("Philosophy", "College of Arts and Sciences")
is.na(tsbr$fte)
law = c("School of Law")
NNJobs$deptBucket[which(NNJobs$department %in% administration)] = "Administration"
NNJobs$deptBucket[which(NNJobs$department %in% medical)] = "Medical"
NNJobs$deptBucket[which(NNJobs$department %in% sports)] = "Sports"
NNJobs$deptBucket[which(NNJobs$department %in% stem)] = "STEM"
NNJobs$deptBucket[which(NNJobs$department %in% buisness)] = "Buisness"
NNJobs$deptBucket[which(NNJobs$department %in% liberalArts)] = "Liberal Arts"
NNJobs$deptBucket[which(NNJobs$department %in% law)] = "Law"
NNJobs$deptBucket
is.na(NNJobs$deptBucket)
ggplot(aes(x = deptBucket, y = Salary.semester), data = NNJobs) +
geom_point() +
facet_wrap(~Institution.Name)
|
/TNEmployment.R
|
no_license
|
Riuchando/CollegeGraphs
|
R
| false
| false
| 3,641
|
r
|
rm(list = ls())
library("stats")
library("ggplot2")
tsbr = read.csv("FilteredTSBR.csv")
#my file reads number as $123,456.78 -> $123456.78 -> 123456.78
#https://stat.ethz.ch/pipermail/r-help/2010-May/237909.html
#http://stackoverflow.com/questions/1523126/how-to-read-a-csv-file-in-r-where-some-numbers-contain-commas
tsbr$Salary.semester = as.numeric(sub('$', '', as.character(gsub(",", "", as.character(tsbr$Salary.semester))), fixed = TRUE))
nintyninePercent = quantile(tsbr$Salary.semester, 0.99)
NNJobs = subset(tsbr, Salary.semester > nintyninePercent)
NNJobs$department
NNJobs$FullName = paste(NNJobs$First.Name, NNJobs$last.name)
ggplot(aes(x = as.character(department), y = Salary.semester), data = NNJobs) +
geom_point() +
facet_wrap(~Institution.Name)
ggsave(filename = "Salary.png")
ggplot() + aes(x = NNJobs$department) + geom_histogram()
summary(NNJobs$Salary.semester)
as.character(NNJobs$department)
medical = c("OB GYN", "Surgery", "Psychiatry", "Family Medicine",
"Pediatrics", "Vice Pres for Health Affairs", "Family Practice Resid Kpt",
"Dean College of Pharmacy", "Internal Medicine", "Family Practice Resid Johnson City",
"Biomedical Sciences", "Dean College of Medicine", "Family Medicine .",
"Pediatrics", "Psychology", "Pathology", "Health Sciences")
administration = c("Provost VP Academic Affairs", "Presidents Office", "Office of the Provost",
"Management", "Vice President for Finance and Adm", "Office of Graduate Medical Educ",
"Office of Sponsored Programs", "Assoc Dean Academic Affairs", "VP University Advancement",
"University Provost", "Office of Senior Vice President", "VP Student Affairs",
"Graduate Programs", "Bureau Business Economic Research", "Office of President",
"Chair of Excellence Free Enterprise", "Ofc VP Research & Economic Dev", "Provost VP Academic Affairs",
"Office of the President", "Research and Spons Programs", "President's Office", "Vice President Research", "Provost Office")
sports = c("Football", "Athletic Administration", "Office of Intercoll Athletics", "Mens Basketball",
"Womens Basketball", "Athletic Director", "Football Regular Season", "Basketball Men",
"Basketball Women", "Men's Basketball", "Athletics Director", "Athletic Business Office")
stem = c("Dean of Engineering Admin Office", "Mechanical Engineering", "Dean Basic and Applied Sciences",
"Biomedical Engineering", "Management Information Systems", "College of Engineering",
"Information Technology Division", "Math B Bollobas")
buisness = c("Dean College Of Business", "College of Business Economics", "Finance Insurance Real Estate",
"School of Accountancy", "School Hospitality and Resort Mgmt", "Dean Mass Communication")
liberalArts = c("Philosophy", "College of Arts and Sciences")
is.na(tsbr$fte)
law = c("School of Law")
NNJobs$deptBucket[which(NNJobs$department %in% administration)] = "Administration"
NNJobs$deptBucket[which(NNJobs$department %in% medical)] = "Medical"
NNJobs$deptBucket[which(NNJobs$department %in% sports)] = "Sports"
NNJobs$deptBucket[which(NNJobs$department %in% stem)] = "STEM"
NNJobs$deptBucket[which(NNJobs$department %in% buisness)] = "Buisness"
NNJobs$deptBucket[which(NNJobs$department %in% liberalArts)] = "Liberal Arts"
NNJobs$deptBucket[which(NNJobs$department %in% law)] = "Law"
NNJobs$deptBucket
is.na(NNJobs$deptBucket)
ggplot(aes(x = deptBucket, y = Salary.semester), data = NNJobs) +
geom_point() +
facet_wrap(~Institution.Name)
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{occurrencecount}
\alias{occurrencecount}
\title{Counts taxon concept records matching a range of filters.}
\usage{
occurrencecount(scientificname = NULL, taxonconceptKey = NULL,
dataproviderkey = NULL, dataresourcekey = NULL, institutioncode = NULL,
collectioncode = NULL, catalognumber = NULL, resourcenetworkkey = NULL,
basisofrecordcode = NULL, minlatitude = NULL, maxlatitude = NULL,
minlongitude = NULL, maxlongitude = NULL, minaltitude = NULL,
maxaltitude = NULL, mindepth = NULL, maxdepth = NULL, cellid = NULL,
centicellid = NULL, typesonly = NULL, coordinatestatus = NULL,
coordinateissues = NULL, hostisocountrycode = NULL,
originisocountrycode = NULL, originregioncode = NULL, startdate = NULL,
enddate = NULL, startyear = NULL, endyear = NULL, year = NULL,
month = NULL, day = NULL, modifiedsince = NULL)
}
\arguments{
\item{scientificname}{count only records where the scientific name matches
that supplied, use an asterisk * for any name starting with preseding
string (character). does not make use of extra knowledge of possible synonyms
or of child taxa. For these functions, use taxonconceptkey. May be repeted in
single request.}
\item{taxonconceptKey}{unique key for taxon (numeric). Count only records which are
for the taxon identified by the supplied numeric key, including any records provided
under synonyms of the taxon concerned, and any records for child taxa
(e.g. all genera and species within a family). May be repeted in single request.}
\item{dataproviderkey}{Filter records to those provided by the supplied
numeric key for a data provider. See \link{providers}. (character)}
\item{dataresourcekey}{Filter records to those provided by the supplied
numeric key for a data resource See \link{resources}. (character)}
\item{institutioncode}{Return only records from a given institution code.}
\item{collectioncode}{Return only records from a given collection code.}
\item{catalognumber}{Return only records from a given catalog number.}
\item{resourcenetworkkey}{count only records which have been made available by
resources identified as belonging to the network identified by the supplied
numeric key.}
\item{basisofrecordcode}{return only records with the specified basis of record.
Supported values are: "specimen, observation, living, germplasm, fossil, unknown".
(character)}
\item{minlatitude}{return only records from locations with latitudes greater
than the supplied value (southern hemisphere with negative latitudes). (numeric)}
\item{maxlatitude}{return only records from locations with latitudes lower than
the supplied value (southern hemisphere with negative latitudes). (numeric)}
\item{minlongitude}{return only records from locations with longitudes greater
than the supplied value (western hemisphere with negative longitudes). (numeric)}
\item{maxlongitude}{return only records from locations with longitudes lower
than the supplied value (western hemisphere with negative longitudes). (numeric)}
\item{minaltitude}{return only records from altitudes greater than or equal to
the supplied value. (integer)}
\item{maxaltitude}{return only records from altitudes less than or equals to
the supplied value. (integer)}
\item{mindepth}{return only records from depth greater than or equal to the supplied
value. (numeric 2 decimal places)}
\item{maxdepth}{return only records from depth less than or equals to the supplied
value. (numeric 2 decimal places)}
\item{cellid}{identifier for a one degree cell (O - 64,799).
Using a cellid is more efficient than using a bounding box for the same cell.}
\item{centicellid}{identifier for a 0.1 degree cell within a one degree cell}
\item{typesonly}{if set to "true", return only records with a type status specified.}
\item{coordinatestatus}{if set to "true", return only records with coordinates.
If set to "false", return only records without coordinates.}
\item{coordinateissues}{if set to "true", return only records for which the portal
has detected possible issues in georeferencing. If set to "false", return only
records for which the portal has not detected any such issues.}
\item{hostisocountrycode}{return only records served by providers from the country
identified by the supplied 2-letter ISO code.}
\item{originisocountrycode}{return only records of occurrences which occurred
within the country identified by the supplied 2-letter ISO code.}
\item{originregioncode}{return only records of occurrences which occurred
within the region identified by the supplied 3-letter code.}
\item{startdate}{return only records occurring on or after the supplied date
(format YYYY-MM-DD, e.g. 2006-11-28).}
\item{enddate}{return only records occurring on or before the supplied date
(format YYYY-MM-DD, e.g. 2006-11-28).}
\item{startyear}{return only records from during or after the supplied year.}
\item{endyear}{return only records from during or before the supplied year.}
\item{year}{return only records from during the supplied year.}
\item{month}{return only records from during the supplied month (expressed as
an integer in the range 1 to 12).}
\item{day}{return only records from during the supplied day of month
(expressed as an integer in the range 1 to 31).}
\item{modifiedsince}{return only records which have been indexed or modified
in the GBIF data portal index on or after the supplied date
(format YYYY-MM-DD, e.g. 2006-11-28).}
}
\value{
A single numeric value - the number of records found in GBIF matching
the query.
}
\description{
This function is deprecated.
}
\examples{
\donttest{
occurrencecount(scientificname = 'Accipiter erythronemius', coordinatestatus = TRUE)
occurrencecount(scientificname = 'Helianthus annuus', coordinatestatus = TRUE,
year=2009)
occurrencecount(scientificname = 'Helianthus annuus', coordinatestatus = TRUE,
year=2005, maxlatitude=20)
}
}
\seealso{
occ_count
}
|
/man/occurrencecount-deprecated.Rd
|
permissive
|
smartinsightsfromdata/rgbif
|
R
| false
| false
| 5,951
|
rd
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{occurrencecount}
\alias{occurrencecount}
\title{Counts taxon concept records matching a range of filters.}
\usage{
occurrencecount(scientificname = NULL, taxonconceptKey = NULL,
dataproviderkey = NULL, dataresourcekey = NULL, institutioncode = NULL,
collectioncode = NULL, catalognumber = NULL, resourcenetworkkey = NULL,
basisofrecordcode = NULL, minlatitude = NULL, maxlatitude = NULL,
minlongitude = NULL, maxlongitude = NULL, minaltitude = NULL,
maxaltitude = NULL, mindepth = NULL, maxdepth = NULL, cellid = NULL,
centicellid = NULL, typesonly = NULL, coordinatestatus = NULL,
coordinateissues = NULL, hostisocountrycode = NULL,
originisocountrycode = NULL, originregioncode = NULL, startdate = NULL,
enddate = NULL, startyear = NULL, endyear = NULL, year = NULL,
month = NULL, day = NULL, modifiedsince = NULL)
}
\arguments{
\item{scientificname}{count only records where the scientific name matches
that supplied, use an asterisk * for any name starting with preseding
string (character). does not make use of extra knowledge of possible synonyms
or of child taxa. For these functions, use taxonconceptkey. May be repeted in
single request.}
\item{taxonconceptKey}{unique key for taxon (numeric). Count only records which are
for the taxon identified by the supplied numeric key, including any records provided
under synonyms of the taxon concerned, and any records for child taxa
(e.g. all genera and species within a family). May be repeted in single request.}
\item{dataproviderkey}{Filter records to those provided by the supplied
numeric key for a data provider. See \link{providers}. (character)}
\item{dataresourcekey}{Filter records to those provided by the supplied
numeric key for a data resource See \link{resources}. (character)}
\item{institutioncode}{Return only records from a given institution code.}
\item{collectioncode}{Return only records from a given collection code.}
\item{catalognumber}{Return only records from a given catalog number.}
\item{resourcenetworkkey}{count only records which have been made available by
resources identified as belonging to the network identified by the supplied
numeric key.}
\item{basisofrecordcode}{return only records with the specified basis of record.
Supported values are: "specimen, observation, living, germplasm, fossil, unknown".
(character)}
\item{minlatitude}{return only records from locations with latitudes greater
than the supplied value (southern hemisphere with negative latitudes). (numeric)}
\item{maxlatitude}{return only records from locations with latitudes lower than
the supplied value (southern hemisphere with negative latitudes). (numeric)}
\item{minlongitude}{return only records from locations with longitudes greater
than the supplied value (western hemisphere with negative longitudes). (numeric)}
\item{maxlongitude}{return only records from locations with longitudes lower
than the supplied value (western hemisphere with negative longitudes). (numeric)}
\item{minaltitude}{return only records from altitudes greater than or equal to
the supplied value. (integer)}
\item{maxaltitude}{return only records from altitudes less than or equals to
the supplied value. (integer)}
\item{mindepth}{return only records from depth greater than or equal to the supplied
value. (numeric 2 decimal places)}
\item{maxdepth}{return only records from depth less than or equals to the supplied
value. (numeric 2 decimal places)}
\item{cellid}{identifier for a one degree cell (O - 64,799).
Using a cellid is more efficient than using a bounding box for the same cell.}
\item{centicellid}{identifier for a 0.1 degree cell within a one degree cell}
\item{typesonly}{if set to "true", return only records with a type status specified.}
\item{coordinatestatus}{if set to "true", return only records with coordinates.
If set to "false", return only records without coordinates.}
\item{coordinateissues}{if set to "true", return only records for which the portal
has detected possible issues in georeferencing. If set to "false", return only
records for which the portal has not detected any such issues.}
\item{hostisocountrycode}{return only records served by providers from the country
identified by the supplied 2-letter ISO code.}
\item{originisocountrycode}{return only records of occurrences which occurred
within the country identified by the supplied 2-letter ISO code.}
\item{originregioncode}{return only records of occurrences which occurred
within the region identified by the supplied 3-letter code.}
\item{startdate}{return only records occurring on or after the supplied date
(format YYYY-MM-DD, e.g. 2006-11-28).}
\item{enddate}{return only records occurring on or before the supplied date
(format YYYY-MM-DD, e.g. 2006-11-28).}
\item{startyear}{return only records from during or after the supplied year.}
\item{endyear}{return only records from during or before the supplied year.}
\item{year}{return only records from during the supplied year.}
\item{month}{return only records from during the supplied month (expressed as
an integer in the range 1 to 12).}
\item{day}{return only records from during the supplied day of month
(expressed as an integer in the range 1 to 31).}
\item{modifiedsince}{return only records which have been indexed or modified
in the GBIF data portal index on or after the supplied date
(format YYYY-MM-DD, e.g. 2006-11-28).}
}
\value{
A single numeric value - the number of records found in GBIF matching
the query.
}
\description{
This function is deprecated.
}
\examples{
\donttest{
occurrencecount(scientificname = 'Accipiter erythronemius', coordinatestatus = TRUE)
occurrencecount(scientificname = 'Helianthus annuus', coordinatestatus = TRUE,
year=2009)
occurrencecount(scientificname = 'Helianthus annuus', coordinatestatus = TRUE,
year=2005, maxlatitude=20)
}
}
\seealso{
occ_count
}
|
folder_path<-"/Users/MenaL/Desktop/GROUP P/R_project"
solar<-readRDS("/Users/MenaL/Desktop/GROUP P/R_project/solar_dataset.RData")
add<-readRDS("/Users/MenaL/Desktop/GROUP P/R_project/additional_variables.RData")
geo<- read.table(file.path(folder_path, "station_info.csv"), sep = ",", header = TRUE)
library(data.table)
write.csv(solar, "/Users/MenaL/Desktop/GROUP P/R_project/solar_dataset.csv",
quote = F, row.names = F)
################################## BUILD elevation non-constant DATATABLE #######################################
solar[solar$IDAB==39442800,]$IDAB<-30795000
solar[solar$IDAB==32855400,]$IDAB<-30795000
station<-as.data.frame(solar[1:5113,1:99],stringsAsFactors = F)
station_t_s<-as.data.frame(t(station),stringsAsFactors = F)
##change back to numeric after t()
station_t<-as.data.frame(lapply(station_t_s, type.convert, as.is = TRUE))
pca<-as.data.frame(solar[1:5113,100:117],stringasFactors=F)
prepare<-station_t[2:99,]
new_all<-data.frame()
for (i in 1:5113) {
new_df<-data.frame()
new_df<-as.data.frame(prepare[,i],stringsAsFactors = FALSE)
new_df<-cbind(new_df,as.data.frame(pca[i,1:18]),as.data.frame(geo$elev))
new_all<-rbind(new_all,new_df)
}
names(new_df)[1]<-"solar_p"
names(new_df)[20]<-"elev"
################################## BUILD MODEL ######################################
set.seed(100);
train_index_1<- sample(1:nrow(new_df), 0.7*nrow(new_df));
train_1<- new_df[train_index_1,]
test<-new_df[-train_index_1,]
production<-lm(solar_p~ PC1+PC2+PC3+PC4+PC5+PC6+PC7+PC8+PC9+PC10+PC11+PC12+PC13+PC14
+PC15+PC16+PC17+PC18+elev, data=train_1)
predictions_test <- predict(production, newdata = test)
predictions_train<-predict(production, newdata = train_1)
errors_test <- predictions_test - test$solar_p
errors_train<-predictions_train-train_1$solar_p
mae_test<- round(mean(abs(errors_test)), 2);
mae_train<- round(mean(abs(errors_train)), 2);
mse_test<- round(mean(errors_test^2), 2);
mse_train<- round(mean(errors_train^2), 2);
|
/3_Predicting_solar_energy_production/02_Model/02_Modelling Script(2.1.3).R
|
no_license
|
diegoct6/Machine-Learning
|
R
| false
| false
| 2,014
|
r
|
folder_path<-"/Users/MenaL/Desktop/GROUP P/R_project"
solar<-readRDS("/Users/MenaL/Desktop/GROUP P/R_project/solar_dataset.RData")
add<-readRDS("/Users/MenaL/Desktop/GROUP P/R_project/additional_variables.RData")
geo<- read.table(file.path(folder_path, "station_info.csv"), sep = ",", header = TRUE)
library(data.table)
write.csv(solar, "/Users/MenaL/Desktop/GROUP P/R_project/solar_dataset.csv",
quote = F, row.names = F)
################################## BUILD elevation non-constant DATATABLE #######################################
solar[solar$IDAB==39442800,]$IDAB<-30795000
solar[solar$IDAB==32855400,]$IDAB<-30795000
station<-as.data.frame(solar[1:5113,1:99],stringsAsFactors = F)
station_t_s<-as.data.frame(t(station),stringsAsFactors = F)
##change back to numeric after t()
station_t<-as.data.frame(lapply(station_t_s, type.convert, as.is = TRUE))
pca<-as.data.frame(solar[1:5113,100:117],stringasFactors=F)
prepare<-station_t[2:99,]
new_all<-data.frame()
for (i in 1:5113) {
new_df<-data.frame()
new_df<-as.data.frame(prepare[,i],stringsAsFactors = FALSE)
new_df<-cbind(new_df,as.data.frame(pca[i,1:18]),as.data.frame(geo$elev))
new_all<-rbind(new_all,new_df)
}
names(new_df)[1]<-"solar_p"
names(new_df)[20]<-"elev"
################################## BUILD MODEL ######################################
set.seed(100);
train_index_1<- sample(1:nrow(new_df), 0.7*nrow(new_df));
train_1<- new_df[train_index_1,]
test<-new_df[-train_index_1,]
production<-lm(solar_p~ PC1+PC2+PC3+PC4+PC5+PC6+PC7+PC8+PC9+PC10+PC11+PC12+PC13+PC14
+PC15+PC16+PC17+PC18+elev, data=train_1)
predictions_test <- predict(production, newdata = test)
predictions_train<-predict(production, newdata = train_1)
errors_test <- predictions_test - test$solar_p
errors_train<-predictions_train-train_1$solar_p
mae_test<- round(mean(abs(errors_test)), 2);
mae_train<- round(mean(abs(errors_train)), 2);
mse_test<- round(mean(errors_test^2), 2);
mse_train<- round(mean(errors_train^2), 2);
|
#' calculate trade statistics for round turn trades.
#'
#' One 'trade' is defined as a series of transactions which make up a 'round turn'.
#' It may contain many transactions. This function reports statistics on these
#' round turn trades which may be used on their own or which are also used
#' by other functions, including \code{\link{tradeStats}} and \code{\link{tradeQuantiles}}
#'
#' @details
#' Additional methods of determining 'round turns' are also supported.
#'
#' \strong{Supported Methods for \code{tradeDef}:}
#' \describe{
#' \item{\code{flat.to.flat}}{From the initial transaction that moves the
#' position away from zero to the last transaction that flattens the position
#' make up one round turn trade for the purposes of 'flat to flat' analysis.}
#' \item{\code{flat.to.reduced}}{The \emph{flat.to.reduced} method starts the
#' round turn trade at the same point as \emph{flat.to.flat}, at the first
#' transaction which moves the position from zero to a new open position. The
#' end of each round turn is described by transactions which move the position
#' closer to zero, regardless of any other transactions which may have
#' increased the position along the way.}
#' \item{\code{increased.to.reduced}}{The \emph{increased.to.reduced} method
#' is appropriate for analyzing round turns in a portfolio which is rarely
#' flat, or which regularly adds to and reduces positions. Every transaction
#' which moves the position closer to zero (reduced position) will close a
#' round turn. this closing transaction will be paired with one or more
#' transaction which move the position further from zero to locate the
#' initiating transactions. \code{acfifo} is an alias for this method.}
#' }
#'
#' As with the rest of \code{blotter}, \code{perTradeStats} uses average cost
#' accounting. For the purposes of round turns, the average cost in force is
#' the average cost of the open position at the time of the closing transaction.
#'
#' Note that a trade that is open at the end of the measured period will
#' be marked to the timestamp of the end of the series.
#' If that trade is later closed, the stats for it will likely change.
#' This is 'mark to market' for the open position, and corresponds to
#' most trade accounting systems and risk systems in including the open
#' position in reporting.
#'
#' \code{Net.Trading.PL} and MAE/MFE are calculated somewhat differently
#' depending on the \code{tradeDef}. All three \code{tradeDef} types utilize
#' \emph{Period.Realized.PL} to calculate the trading P&L of the round turn trade.
#' If the method is \emph{flat.to.flat}, the cumulative sum of all transaction
#' realized P&L's during the round turn is added. If the method is
#' \emph{flat.to.reduced}, the realized P&L is the period realized P&L of the
#' closing transaction. If the method is \emph{increased.to.reduced} (or the
#' equvalent \emph{acfifo}), the realized P&L for the round turn trade will be
#' the period realized P&L potentially pro-rated by the difference in size between
#' the initiating and closing transactions.
#'
#' MAE and MFE are pro-rated for \emph{increased.to.reduced} (or \emph{acfifo})
#' and \emph{flat.to.reduced} using the proportion of the total traded quantity
#' over \emph{timespan} that is attributable to this round turn. For these
#' definitions of round turns, this is complicated because there can be multiple
#' initating transactions which will adjust the average cost (and thus the net P&L)
#' of the position. After pro-rating the cash measures, the percent and tick
#' versions are constructed by dividing by the maximum notional position cost and
#' the tick value, respectively.
#'
#' If \code{includeFlatPeriods=TRUE}, \code{perTradeStats} will include periods
#' when the series is flat (holds no position). Flat periods at the beginning of
#' the series will be removed, as they are presumed to hold no information, and
#' may be easily retrived if desired. This information is likely most useful in
#' constructing stylized facts about the trading style, calculating values such
#' as time in market. It is also extremely useful for Monte Carlo simulation of
#' random trading strategies with similar style to the series under investigation.
#' For more information on this latter use, see \code{\link{txnsim}}.
#'
#' @param Portfolio string identifying the portfolio
#' @param Symbol string identifying the symbol to examin trades for. If missing, the first symbol found in the \code{Portfolio} portfolio will be used
#' @param includeOpenTrade whether to process only finished trades, or the last trade if it is still open, default TRUE
#' @param tradeDef string, one of 'flat.to.flat', 'flat.to.reduced', 'increased.to.reduced' or 'acfifo'. See Details.
#' @param \dots any other passthrough parameters
#' @param includeFlatPeriods boolean, default FALSE, whether to include flat periods in output, mostly useful for Monte Carlo simulation as in \code{\link{txnsim}}
#' @author Brian G. Peterson, Jasen Mackie, Jan Humme
#' @references Tomasini, E. and Jaekle, U. \emph{Trading Systems - A new approach to system development and portfolio optimisation} (ISBN 978-1-905641-79-6)
#' @return
#' A \code{data.frame} containing:
#'
#' \describe{
#' \item{Start}{the \code{POSIXct} timestamp of the start of the trade}
#' \item{End}{the \code{POSIXct} timestamp of the end of the trade, when flat}
#' \item{Init.Qty}{ transaction quantity initiating the trade}
#' \item{Init.Pos}{ position held after the initiating transaction of the round turn trade}
#' \item{Max.Pos}{the maximum (largest) position held during the open trade}
#' \item{End.Pos}{ the remaining quantity held after closing the trade}
#' \item{Closing.Txn.Qty}{ the transaction quantity which closes the round turn trade }
#' \item{Num.Txns}{ the number of transactions included in this trade}
#' \item{Max.Notional.Cost}{ the largest notional investment cost of this trade}
#' \item{Net.Trading.PL}{ net trading P&L in the currency of \code{Symbol}}
#' \item{MAE}{ Maximum Adverse Excursion (MAE), in the currency of \code{Symbol}}
#' \item{MFE}{ Maximum Favorable Excursion (MFE), in the currency of \code{Symbol}}
#' \item{Pct.Net.Trading.PL}{ net trading P&L in percent of invested \code{Symbol} price gained or lost}
#' \item{Pct.MAE}{ Maximum Adverse Excursion (MAE), in percent}
#' \item{Pct.MFE}{ Maximum Favorable Excursion (MFE), in percent}
#' \item{tick.Net.Trading.PL}{ net trading P&L in ticks}
#' \item{tick.MAE}{ Maximum Adverse Excursion (MAE) in ticks}
#' \item{tick.MFE}{ Maximum Favorable Excursion (MFE) in ticks}
#' \item{duration}{ \code{difftime} describing the duration of the round turn, in seconds }
#' }
#'
#' @seealso \code{\link{chart.ME}} for a chart of MAE and MFE derived from this function,
#' and \code{\link{tradeStats}} for a summary view of the performance, and
#' \code{\link{tradeQuantiles}} for round turns classified by quantile.
#' @export
perTradeStats <- function(Portfolio
, Symbol
, includeOpenTrade=TRUE
, tradeDef="flat.to.flat"
, ...
, includeFlatPeriods=FALSE)
{
portf <- .getPortfolio(Portfolio)
if(missing(Symbol)) Symbol <- ls(portf$symbols)[[1]]
posPL <- portf$symbols[[Symbol]]$posPL
instr <- getInstrument(Symbol)
tick_value <- instr$multiplier*instr$tick_size
tradeDef <- match.arg(tradeDef, c("flat.to.flat","flat.to.reduced","increased.to.reduced","acfifo"))
if(tradeDef=='acfifo') tradeDef<-'increased.to.reduced'
trades <- list()
switch(tradeDef,
flat.to.flat = {
# identify start and end for each trade, where end means flat position
trades$Start <- which(posPL$Pos.Qty!=0 & lag(posPL$Pos.Qty)==0)
trades$End <- which(posPL$Pos.Qty==0 & lag(posPL$Pos.Qty)!=0)
},
flat.to.reduced = {
# find all transactions that bring position closer to zero ('trade ends')
decrPos <- diff(abs(posPL$Pos.Qty)) < 0
# find all transactions that open a position ('trade starts')
initPos <- posPL$Pos.Qty!=0 & lag(posPL$Pos.Qty)==0
# 'trades' start when we open a position, so determine which starts correspond to each end
# add small amount to Start index, so starts will always occur before ends in StartEnd
Start <- xts(initPos[initPos,which.i=TRUE],index(initPos[initPos])+1e-5)
End <- xts(decrPos[decrPos,which.i=TRUE],index(decrPos[decrPos]))
StartEnd <- merge(Start,End)
StartEnd$Start <- na.locf(StartEnd$Start)
if(includeOpenTrade){
SEtail <- StartEnd[paste0(index(last(StartEnd[!is.na(StartEnd$End)])+1),'/')]
SEtail <- SEtail[-1,]
}
StartEnd <- StartEnd[!is.na(StartEnd$End),]
if(includeOpenTrade) StartEnd <- rbind(StartEnd,SEtail)
# populate trades list
trades$Start <- drop(coredata(StartEnd[!is.na(StartEnd$Start[]),]$Start))
trades$End <- drop(coredata(StartEnd[!is.na(StartEnd$End),]$End))
},
increased.to.reduced = {
# find all transactions that bring position closer to zero ('trade ends')
decrPos <- diff(abs(posPL$Pos.Qty)) < 0
decrPosCount <- ifelse(diff(abs(posPL$Pos.Qty)) < 0,-1,0)
decrPosCount <- ifelse(decrPosCount[-1] == 0, 0, cumsum(decrPosCount[-1]))
decrPosQty <- ifelse(diff(abs(posPL$Pos.Qty)) < 0, diff(abs(posPL$Pos.Qty)),0)
decrPosQtyCum <- ifelse(decrPosQty[-1] == 0, 0, cumsum(decrPosQty[-1])) #subset for the leading NA
# find all transactions that take position further from zero ('trade starts')
incrPos <- diff(abs(posPL$Pos.Qty)) > 0
incrPosCount <- ifelse(diff(abs(posPL$Pos.Qty)) > 0,1,0)
incrPosCount <- ifelse(incrPosCount[-1] == 0, 0, cumsum(incrPosCount[-1]))
incrPosQty <- ifelse(diff(abs(posPL$Pos.Qty)) > 0, diff(abs(posPL$Pos.Qty)),0)
incrPosQtyCum <- ifelse(incrPosQty[-1] == 0, 0, cumsum(incrPosQty[-1])) #subset for the leading NA
# Calculate txn qty
txnqty <- rbind(incrPosQtyCum, abs(decrPosQtyCum))
txnqty <- txnqty[-which(txnqty == 0)]
txnqty <- rbind(xts(0,as.POSIXct("1950-01-01")),txnqty)
txnqty <- as.data.frame(txnqty)
txnqty <- txnqty[order(txnqty[,1]),]
txnqty <- diff(txnqty)
txnqty <- na.omit(txnqty)
# Get start and end dates
starts <- incrPosQtyCum[-which(incrPosQtyCum==0)]
ends <- abs(decrPosQtyCum[-which(decrPosQtyCum==0)])
cumsum(txnqty) # let's investigate cumsum(txnqty)
end_idx <- findInterval(cumsum(txnqty), ends, left.open = TRUE) + 1 # can disregard last element as it relates to open trade
end_idx
start_idx <- findInterval(cumsum(txnqty), starts, left.open = TRUE) + 1 # can disregard last element as it relates to open trade
start_idx
testdf <- data.frame(cbind(txnqty, cumsum(txnqty), start_idx, end_idx))
testdf$start_ts <- index(starts)[start_idx]
testdf$end_ts <- index(ends)[end_idx]
testdf <- testdf[-which(testdf$txnqty == 0),]
# build trades$Start and trades$End in trades list
# iterating over testdf, for all txns that have an end date
# and are therefore round turn trades
for(i in 1:length(which(!is.na(testdf$end_ts)))){
trades$Start[i] <- which(index(incrPosQtyCum) == testdf$start_ts[i])
trades$End[i] <- which(index(decrPosQtyCum) == testdf$end_ts[i])
}
# now add 1 to idx for missing initdate from incr/decrPosQtyCum - adds consistency with flat.to.reduced and flat.to.flat
trades$Start <- trades$Start + 1
trades$End <- trades$End + 1
# add extra 'trade start' if there's an open trade, so 'includeOpenTrade' logic will work
if(any(is.na(testdf$end_ts))){
trades$Start <- c(trades$Start,last(which(index(incrPos) == testdf$start_ts[first(which(is.na(testdf$end_ts)))])))
}
}
) # end round turn trade separation by tradeDef
# if the last trade is still open, adjust depending on whether we want open trades or not
if(last(posPL)[,"Pos.Qty"] != 0)
{
if(includeOpenTrade)
trades$End <- c(trades$End,nrow(posPL))
else
trades$Start <- head(trades$Start, -1)
}
if(length(trades$Start)!=length(trades$End)){
trades$Start[(length(trades$Start)+1):length(trades$End)] <- last(trades$Start)
}
# check for an open trade that starts on the last observation, remove
last.trade.is.open <- FALSE
if(last(trades$End)==last(trades$Start)){
last.trade.is.open <- TRUE
trades$End <- trades$End[-length(trades$End)]
trades$Start <- trades$Start[-length(trades$Start)]
}
# pre-allocate trades list
N <- length(trades$End)
trades <- c(trades, list(
Init.Qty = numeric(N),
Init.Pos = numeric(N),
Max.Pos = numeric(N),
End.Pos = numeric(N),
Closing.Txn.Qty = numeric(N),
Num.Txns = integer(N),
Max.Notional.Cost = numeric(N),
Net.Trading.PL = numeric(N),
MAE = numeric(N),
MFE = numeric(N),
Pct.Net.Trading.PL = numeric(N),
Pct.MAE = numeric(N),
Pct.MFE = numeric(N),
tick.Net.Trading.PL = numeric(N),
tick.MAE = numeric(N),
tick.MFE = numeric(N)))
# create txn.qty vector for computing Init.Qty and End.Pos
txn.qty <- diff(posPL$Pos.Qty)
# calculate information about each round turn 'trade'
for(i in 1:N)
{
timespan <- seq.int(trades$Start[i], trades$End[i])
trade <- posPL[timespan]
n <- nrow(trade)
# calculate cost basis, PosPL, Pct.PL, tick.PL columns
Pos.Qty <- trade[,"Pos.Qty"] # avoid repeated subsetting
# position sizes
Max.Pos.Qty.loc <- which.max(abs(Pos.Qty)) # find max position quantity location
trades$Init.Pos[i] <- Pos.Qty[1]
trades$Max.Pos[i] <- Pos.Qty[Max.Pos.Qty.loc]
# initiating and ending quantities
trades$End.Pos[i] <- Pos.Qty[n]
#trades$Init.Qty[i] <- txn.qty[timespan][1]
if(tradeDef == "flat.to.flat" || tradeDef == "flat.to.reduced"){
trades$Init.Qty[i] <- txn.qty[timespan][1]
trades$Closing.Txn.Qty[i] <- trades$End.Pos[i] - Pos.Qty[n-1]
if(trades$Closing.Txn.Qty[i] == 0) trades$Closing.Txn.Qty[i] <- Pos.Qty[n] * -1
} else if(tradeDef == "increased.to.reduced"){
trades$Init.Qty[i] <- testdf$txnqty[i] * sign(txn.qty[timespan][1])
trades$Closing.Txn.Qty[i] <- trades$Init.Qty[i] * -1
}
Pos.Cost.Basis <- cumsum(trade[,"Txn.Value"])
switch(tradeDef,
flat.to.flat = {
prorata <- 1
ts.prop <- 1
trade.PL <- sum(trade[,"Net.Trading.PL"])
Cum.PL <- cumsum(trade[,"Net.Trading.PL"])
},
flat.to.reduced = {
#prorata <- abs(trades$Closing.Txn.Qty[i] / trades$Max.Pos[i]) #not precisely correct?
gettxns <- getTxns(Portfolio, Symbol) # used in computing trade.cost
if(index(trade[nrow(trade),]) %in% index(gettxns)){
closeqty <- coredata(gettxns$Txn.Qty[index(trade[nrow(trade),])]) # total qty traded at closure of round-turn/s
}
tradecost <- coredata(gettxns$Txn.Price[index(trade[1,])]) # used in computing trade.PL
if(abs(trades$Closing.Txn.Qty[i] / closeqty) >= 1) { # closing qty less than init.pos, incl full realized.pl
prorata <- 1
} else {
prorata <- as.numeric((abs(trades$Closing.Txn.Qty[i] / closeqty)))
}
ts.prop <- abs(trades$Closing.Txn.Qty[i] / Pos.Qty)
if(i==N && includeOpenTrade){
ts.prop[n] <- 1 # all unrealized PL for last observation is counted
} else {
ts.prop[n] <- 0 # no unrealized PL for last observation is counted
}
if(i==N && includeOpenTrade && trade[n,"Period.Realized.PL"] !=0 && last.trade.is.open == FALSE){
trade.PL <- 0
} else {
trade.PL <- trade[n,"Period.Realized.PL"]
}
fees <- sum(trade[,'Txn.Fees']) * prorata
trade.PL <- trade.PL + fees
#Cum.PL <- cumsum(trade[n,'Period.Realized.PL'])*prorata + cumsum(trade[,'Period.Unrealized.PL']*ts.prop) + trade[,'Txn.Fees']
#Cum.PL <- cumsum(trade[,'Period.Realized.PL'] + (trade[,'Period.Unrealized.PL']*ts.prop)) + trade[,'Txn.Fees']
Cum.PL <- merge(trade[n,'Period.Realized.PL']*prorata, cumsum(trade[,'Period.Unrealized.PL'])*ts.prop, trade[,'Txn.Fees'])
Cum.PL[is.na(Cum.PL)] <- 0
Cum.PL <- rowSums(Cum.PL)
#colnames(Cum.PL) <- 'Cum.PL'
},
increased.to.reduced = {
tradeqty <- as.numeric((coredata(trade[n,'Pos.Qty']) - coredata(trade[n-1,'Pos.Qty']))) # used in computing trade.PL
gettxns <- getTxns(Portfolio, Symbol) # used in computing trade.cost
if(index(trade[nrow(trade),]) %in% index(gettxns)){
closeqty <- coredata(gettxns$Txn.Qty[index(trade[nrow(trade),])]) # total qty traded at closure of round-turn/s
if(length(closeqty)>1) closeqty<-sum(closeqty) #multiple closing trades share the timestamp, so combine for pro-rata calc
}
tradecost <- coredata(gettxns$Txn.Price[index(trade[1,])]) # used in computing trade.PL
if(abs(trades$Closing.Txn.Qty[i] / closeqty) >= 1) { # closing qty less than init.pos, incl full realized.pl
prorata <- 1
} else {
prorata <- as.numeric((abs(trades$Closing.Txn.Qty[i] / closeqty)))
}
# calculate trade size as proportion of total position size (ts.prop)
ts.prop <- abs(trades$Closing.Txn.Qty[i] / Pos.Qty) # slightly different implementation compared with flat.to.reduced for trade size proportion
colnames(ts.prop) <- 'ts.prop'
if(i==N && includeOpenTrade){
ts.prop[n] <- 1 # all unrealized PL for last observation is counted
} else {
ts.prop[n] <- 0 # no unrealized PL for last observation is counted
}
if(i==N && includeOpenTrade && trade[n,"Period.Realized.PL"] !=0 && last.trade.is.open == FALSE){
trade.PL <- 0
} else {
trade.PL <- trade[n,"Period.Realized.PL"]*prorata
}
ts.prop[is.infinite(ts.prop)] <- 0 # once a position is closed out to flat, dividing by 0 gives an infinite number so we zero it out as there should be no
fees <- as.numeric(trade[1,'Txn.Fees'] * prorata) + as.numeric(trade[n,'Txn.Fees'])
trade.PL <- trade.PL + fees
# remove fees not part of this round turn
# increased.to.reduced has precisely one opening and closing trade
trade$Txn.Fees[2:(n-1)] <- 0
# scale opening trade fees to correct proportion
trade$Txn.Fees[1] <- trade[1,'Txn.Fees'] * prorata
# for cumulative P&L for increased.to.reduced/acfifo, we have precise
# numbers for Period.Realized.PL and Txn.Fees, but need to take prorata
# for unrealized P&L
#Cum.PL <- cumsum(trade[n,'Period.Realized.PL'])*prorata + cumsum(trade[,'Period.Unrealized.PL']*ts.prop) + trade[,'Txn.Fees']
Cum.PL <- merge(trade[n,'Period.Realized.PL']*prorata, cumsum(trade[,'Period.Unrealized.PL'])*ts.prop, trade[,'Txn.Fees'])
Cum.PL[is.na(Cum.PL)] <- 0
Cum.PL <- rowSums(Cum.PL)
#colnames(Cum.PL) <- 'Cum.PL'
}
)
# scale cost basis based on how much of the Txn.Value should be used for this round turn
Pos.Cost.Basis <- Pos.Cost.Basis * prorata
# count number of transactions
trades$Num.Txns[i] <- sum(trade[,"Txn.Value"]!=0)
# investment
trades$Max.Notional.Cost[i] <- Pos.Cost.Basis[Max.Pos.Qty.loc]
# cash P&L
trades$Net.Trading.PL[i] <- trade.PL
#include unrealized P&L for open position, if necessary
if(i==N && trades$Net.Trading.PL[i]==0 && includeOpenTrade){
#trades$Net.Trading.PL[i] <- sum(trade[,'Period.Unrealized.PL'])
trades$Net.Trading.PL[i] <- sum(posPL$Net.Trading.PL) - sum(posPL$Period.Realized.PL)
#trades$Net.Trading.PL[i] <- sum(posPL$Net.Trading.PL) - sum(trades$Net.Trading.PL) # balancing final inclOpenTrade round turn PL
}
# cash MAE/MFE
trades$MAE[i] <- min(0,Cum.PL)
trades$MFE[i] <- max(0,Cum.PL)
# percentage P&L
Pct.PL <- Cum.PL/abs(trades$Max.Notional.Cost[i])
#if(nrow(Pct.PL)>1){trades$Pct.Net.Trading.PL[i] <- Pct.PL[n]}
#if(nrow(Pct.PL)==1){trades$Pct.Net.Trading.PL[i] <- Pct.PL}
if(length(Pct.PL)>1){trades$Pct.Net.Trading.PL[i] <- Pct.PL[n]}
if(length(Pct.PL)==1){trades$Pct.Net.Trading.PL[i] <- Pct.PL}
trades$Pct.MAE[i] <- min(0,trades$MAE[i]/abs(trades$Max.Notional.Cost[i]))
trades$Pct.MFE[i] <- max(0,trades$MFE[i]/abs(trades$Max.Notional.Cost[i]))
# tick P&L
# Net.Trading.PL/position/tick value = ticks
Tick.PL <- Cum.PL/abs(trades$Max.Pos[i])/tick_value
# if(nrow(Tick.PL)>1){trades$tick.Net.Trading.PL[i] <- Tick.PL[n]}
# if(nrow(Tick.PL)==1){trades$tick.Net.Trading.PL[i] <- Tick.PL}
if(length(Tick.PL)>1){trades$tick.Net.Trading.PL[i] <- Tick.PL[n]}
if(length(Tick.PL)==1){trades$tick.Net.Trading.PL[i] <- Tick.PL}
trades$tick.MAE[i] <- min(0,trades$MAE[i]/tick_value)
trades$tick.MFE[i] <- max(0,trades$MFE[i]/tick_value)
}
trades$Start <- index(posPL)[trades$Start]
trades$End <- index(posPL)[trades$End]
#make into data.frame
trades<- as.data.frame(trades)
if(includeFlatPeriods){
# use a list to put things together
flat.p<-list()
flat.p$Start <- which(posPL$Pos.Qty==0 & lag(posPL$Pos.Qty)!=0)
flat.p$End <- which(posPL$Pos.Qty!=0 & lag(posPL$Pos.Qty)==0)
# check for initial flat period, remove as non-informational
if(first(flat.p$End)<first(flat.p$Start)){
flat.p$End <- flat.p$End[-1]
}
# check for a flat period that starts on the last observation, remove
if(last(flat.p$End)==last(flat.p$Start)){
flat.p$End <- flat.p$End[-length(flat.p$End)]
flat.p$Start <- flat.p$Start[-length(flat.p$Start)]
}
# check for trailing flat period, keep this if it exists
if(last(flat.p$End) < last(flat.p$Start) &&
length(flat.p$Start)>length(flat.p$End)){
# add an artificial end at the end of the series
flat.p$End <- c(flat.p$End,length(index(posPL)))
}
# allocate flat periods list
N <- length(flat.p$End)
flat.p <- c(flat.p, list(
Init.Qty = rep(0,N),
Init.Pos = rep(0,N),
Max.Pos = rep(0,N),
End.Pos = rep(0,N),
Closing.Txn.Qty = rep(0,N),
Num.Txns = rep(0,N),
Max.Notional.Cost = rep(0,N),
Net.Trading.PL = rep(0,N),
MAE = rep(0,N),
MFE = rep(0,N),
Pct.Net.Trading.PL = rep(0,N),
Pct.MAE = rep(0,N),
Pct.MFE = rep(0,N),
tick.Net.Trading.PL = rep(0,N),
tick.MAE = rep(0,N),
tick.MFE = rep(0,N)))
flat.p$Start <- index(posPL)[flat.p$Start]
flat.p$End <- index(posPL)[flat.p$End]
flat.p <- as.data.frame(flat.p)
#combine with the trades data.frame
trades <- rbind(trades,flat.p)
}
#add duration
trades$duration <- difftime(trades$End, trades$Start, units='secs') #for POSIXct compliance
#add periodicity
attr(trades, 'trade.periodicity') <- periodicity(posPL)
return(trades)
} # end fn perTradeStats
#' quantiles of per-trade stats
#'
#' The quantiles of your trade statistics get to the heart of quantitatively
#' setting rational stops and possibly even profit taking targets
#' for a trading strategy or system.
#' When applied to theoretical trades from a backtest, they may help to adjust
#' parameters prior to trying the strategy with real money.
#' When applied to real historical trades, they should help in examining what
#' is working and where there is room for improvement in a trading system
#' or strategy.
#'
#' This function will use the \code{\link{quantile}} function to calculate
#' quantiles of per-trade net P&L, MAE, and MFE using the output from
#' \code{\link{perTradeStats}}. These quantiles are chosen by the \code{probs}
#' parameter and will be calculated for one or all of
#' 'cash','percent',or 'tick', controlled by the \code{scale} argument.
#' Quantiles will be calculated separately for trades that end positive (gains)
#' and trades that end negative (losses), and will be denoted
#' 'pos' and 'neg',respectively.
#'
#' Additionally, this function will return the MAE with respect to
#' the maximum cumulative P&L achieved for each \code{scale} you request.
#' Tomasini&Jaekle recommend plotting MAE or MFE with respect to cumulative P&L
#' and choosing a stop or profit target in the 'stable region'. The reported
#' max should help the user to locate the stable region, perhaps mechanically.
#' There is room for improvement here, but this should give the user
#' information to work with in addition to the raw quantiles.
#' For example, it may make more sense to use the max of a loess or
#' kernel or other non-linear fit as the target point.
#'
#' @param Portfolio string identifying the portfolio
#' @param Symbol string identifying the symbol to examin trades for. If missing, the first symbol found in the \code{Portfolio} portfolio will be used
#' @param \dots any other passthrough parameters
#' @param scale string specifying 'cash', or 'percent' for percentage of investment, or 'tick'
#' @param probs vector of probabilities for \code{quantile}
#' @author Brian G. Peterson
#' @references Tomasini, E. and Jaekle, U. \emph{Trading Systems - A new approach to system development and portfolio optimisation} (ISBN 978-1-905641-79-6)
#' @seealso \code{\link{tradeStats}}
#' @export
tradeQuantiles <- function(Portfolio, Symbol, ..., scale=c('cash','percent','tick'),probs=c(.5,.75,.9,.95,.99,1))
{
trades <- perTradeStats(Portfolio, Symbol, ...)
#order them by increasing MAE and decreasing P&L (to resolve ties)
trades <- trades[with(trades, order(-Pct.MAE, -Pct.Net.Trading.PL)), ]
#we could argue that we need three separate sorts, but we'll come back to that if we need to
trades$Cum.Pct.PL <- cumsum(trades$Pct.Net.Trading.PL) #NOTE: this is adding simple returns, so not perfect, but gets the job done
trades$Cum.PL <- cumsum(trades$Net.Trading.PL)
trades$Cum.tick.PL <- cumsum(trades$tick.Net.Trading.PL)
# example plot
# plot(-trades$Pct.MAE,trades$Cum.Pct.PL,type='l')
#TODO: put this into a chart. fn
post <- trades[trades$Net.Trading.PL>0,]
negt <- trades[trades$Net.Trading.PL<0,]
ret<-NULL
for (sc in scale){
switch(sc,
cash = {
posq <- quantile(post$Net.Trading.PL,probs=probs)
names(posq)<-paste('posPL',names(posq))
negq <- -1*quantile(abs(negt$Net.Trading.PL),probs=probs)
names(negq)<-paste('negPL',names(negq))
posMFEq <-quantile(post$MFE,probs=probs)
names(posMFEq) <- paste('posMFE',names(posMFEq))
posMAEq <--1*quantile(abs(post$MAE),probs=probs)
names(posMAEq) <- paste('posMAE',names(posMAEq))
negMFEq <-quantile(negt$MFE,probs=probs)
names(negMFEq) <- paste('negMFE',names(negMFEq))
negMAEq <--1*quantile(abs(negt$MAE),probs=probs)
names(negMAEq) <- paste('negMAE',names(negMAEq))
MAEmax <- trades[which(trades$Cum.PL==max(trades$Cum.PL)),]$MAE
names(MAEmax)<-'MAE~max(cumPL)'
ret<-c(ret,posq,negq,posMFEq,posMAEq,negMFEq,negMAEq,MAEmax)
},
percent = {
posq <- quantile(post$Pct.Net.Trading.PL,probs=probs)
names(posq)<-paste('posPctPL',names(posq))
negq <- -1*quantile(abs(negt$Pct.Net.Trading.PL),probs=probs)
names(negq)<-paste('negPctPL',names(negq))
posMFEq <-quantile(post$Pct.MFE,probs=probs)
names(posMFEq) <- paste('posPctMFE',names(posMFEq))
posMAEq <--1*quantile(abs(post$Pct.MAE),probs=probs)
names(posMAEq) <- paste('posPctMAE',names(posMAEq))
negMFEq <-quantile(negt$Pct.MFE,probs=probs)
names(negMFEq) <- paste('negPctMFE',names(negMFEq))
negMAEq <--1*quantile(abs(negt$Pct.MAE),probs=probs)
names(negMAEq) <- paste('negPctMAE',names(negMAEq))
MAEmax <- trades[which(trades$Cum.Pct.PL==max(trades$Cum.Pct.PL)),]$Pct.MAE
names(MAEmax)<-'%MAE~max(cum%PL)'
ret<-c(ret,posq,negq,posMFEq,posMAEq,negMFEq,negMAEq,MAEmax)
},
tick = {
posq <- quantile(post$tick.Net.Trading.PL,probs=probs)
names(posq)<-paste('posTickPL',names(posq))
negq <- -1*quantile(abs(negt$tick.Net.Trading.PL),probs=probs)
names(negq)<-paste('negTickPL',names(negq))
posMFEq <-quantile(post$tick.MFE,probs=probs)
names(posMFEq) <- paste('posTickMFE',names(posMFEq))
posMAEq <--1*quantile(abs(post$tick.MAE),probs=probs)
names(posMAEq) <- paste('posTickMAE',names(posMAEq))
negMFEq <-quantile(negt$tick.MFE,probs=probs)
names(negMFEq) <- paste('negTickMFE',names(negMFEq))
negMAEq <--1*quantile(abs(negt$tick.MAE),probs=probs)
names(negMAEq) <- paste('negTickMAE',names(negMAEq))
MAEmax <- trades[which(trades$Cum.tick.PL==max(trades$Cum.tick.PL)),]$tick.MAE
names(MAEmax)<-'tick.MAE~max(cum.tick.PL)'
ret<-c(ret,posq,negq,posMFEq,posMAEq,negMFEq,negMAEq,MAEmax)
}
) #end scale switch
} #end for loop
#return a single column for now, could be multiple column if we looped on Symbols
ret<-t(t(ret))
colnames(ret)<-paste(Portfolio,Symbol,sep='.')
ret
}
###############################################################################
# Blotter: Tools for transaction-oriented trading systems development
# for R (see http://r-project.org/)
# Copyright (c) 2008-2015 Peter Carl and Brian G. Peterson
#
# This library is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id$
#
###############################################################################
|
/R/perTradeStats.R
|
no_license
|
claymoremarshall/blotter
|
R
| false
| false
| 31,431
|
r
|
#' calculate trade statistics for round turn trades.
#'
#' One 'trade' is defined as a series of transactions which make up a 'round turn'.
#' It may contain many transactions. This function reports statistics on these
#' round turn trades which may be used on their own or which are also used
#' by other functions, including \code{\link{tradeStats}} and \code{\link{tradeQuantiles}}
#'
#' @details
#' Additional methods of determining 'round turns' are also supported.
#'
#' \strong{Supported Methods for \code{tradeDef}:}
#' \describe{
#' \item{\code{flat.to.flat}}{From the initial transaction that moves the
#' position away from zero to the last transaction that flattens the position
#' make up one round turn trade for the purposes of 'flat to flat' analysis.}
#' \item{\code{flat.to.reduced}}{The \emph{flat.to.reduced} method starts the
#' round turn trade at the same point as \emph{flat.to.flat}, at the first
#' transaction which moves the position from zero to a new open position. The
#' end of each round turn is described by transactions which move the position
#' closer to zero, regardless of any other transactions which may have
#' increased the position along the way.}
#' \item{\code{increased.to.reduced}}{The \emph{increased.to.reduced} method
#' is appropriate for analyzing round turns in a portfolio which is rarely
#' flat, or which regularly adds to and reduces positions. Every transaction
#' which moves the position closer to zero (reduced position) will close a
#' round turn. this closing transaction will be paired with one or more
#' transaction which move the position further from zero to locate the
#' initiating transactions. \code{acfifo} is an alias for this method.}
#' }
#'
#' As with the rest of \code{blotter}, \code{perTradeStats} uses average cost
#' accounting. For the purposes of round turns, the average cost in force is
#' the average cost of the open position at the time of the closing transaction.
#'
#' Note that a trade that is open at the end of the measured period will
#' be marked to the timestamp of the end of the series.
#' If that trade is later closed, the stats for it will likely change.
#' This is 'mark to market' for the open position, and corresponds to
#' most trade accounting systems and risk systems in including the open
#' position in reporting.
#'
#' \code{Net.Trading.PL} and MAE/MFE are calculated somewhat differently
#' depending on the \code{tradeDef}. All three \code{tradeDef} types utilize
#' \emph{Period.Realized.PL} to calculate the trading P&L of the round turn trade.
#' If the method is \emph{flat.to.flat}, the cumulative sum of all transaction
#' realized P&L's during the round turn is added. If the method is
#' \emph{flat.to.reduced}, the realized P&L is the period realized P&L of the
#' closing transaction. If the method is \emph{increased.to.reduced} (or the
#' equvalent \emph{acfifo}), the realized P&L for the round turn trade will be
#' the period realized P&L potentially pro-rated by the difference in size between
#' the initiating and closing transactions.
#'
#' MAE and MFE are pro-rated for \emph{increased.to.reduced} (or \emph{acfifo})
#' and \emph{flat.to.reduced} using the proportion of the total traded quantity
#' over \emph{timespan} that is attributable to this round turn. For these
#' definitions of round turns, this is complicated because there can be multiple
#' initating transactions which will adjust the average cost (and thus the net P&L)
#' of the position. After pro-rating the cash measures, the percent and tick
#' versions are constructed by dividing by the maximum notional position cost and
#' the tick value, respectively.
#'
#' If \code{includeFlatPeriods=TRUE}, \code{perTradeStats} will include periods
#' when the series is flat (holds no position). Flat periods at the beginning of
#' the series will be removed, as they are presumed to hold no information, and
#' may be easily retrived if desired. This information is likely most useful in
#' constructing stylized facts about the trading style, calculating values such
#' as time in market. It is also extremely useful for Monte Carlo simulation of
#' random trading strategies with similar style to the series under investigation.
#' For more information on this latter use, see \code{\link{txnsim}}.
#'
#' @param Portfolio string identifying the portfolio
#' @param Symbol string identifying the symbol to examin trades for. If missing, the first symbol found in the \code{Portfolio} portfolio will be used
#' @param includeOpenTrade whether to process only finished trades, or the last trade if it is still open, default TRUE
#' @param tradeDef string, one of 'flat.to.flat', 'flat.to.reduced', 'increased.to.reduced' or 'acfifo'. See Details.
#' @param \dots any other passthrough parameters
#' @param includeFlatPeriods boolean, default FALSE, whether to include flat periods in output, mostly useful for Monte Carlo simulation as in \code{\link{txnsim}}
#' @author Brian G. Peterson, Jasen Mackie, Jan Humme
#' @references Tomasini, E. and Jaekle, U. \emph{Trading Systems - A new approach to system development and portfolio optimisation} (ISBN 978-1-905641-79-6)
#' @return
#' A \code{data.frame} containing:
#'
#' \describe{
#' \item{Start}{the \code{POSIXct} timestamp of the start of the trade}
#' \item{End}{the \code{POSIXct} timestamp of the end of the trade, when flat}
#' \item{Init.Qty}{ transaction quantity initiating the trade}
#' \item{Init.Pos}{ position held after the initiating transaction of the round turn trade}
#' \item{Max.Pos}{the maximum (largest) position held during the open trade}
#' \item{End.Pos}{ the remaining quantity held after closing the trade}
#' \item{Closing.Txn.Qty}{ the transaction quantity which closes the round turn trade }
#' \item{Num.Txns}{ the number of transactions included in this trade}
#' \item{Max.Notional.Cost}{ the largest notional investment cost of this trade}
#' \item{Net.Trading.PL}{ net trading P&L in the currency of \code{Symbol}}
#' \item{MAE}{ Maximum Adverse Excursion (MAE), in the currency of \code{Symbol}}
#' \item{MFE}{ Maximum Favorable Excursion (MFE), in the currency of \code{Symbol}}
#' \item{Pct.Net.Trading.PL}{ net trading P&L in percent of invested \code{Symbol} price gained or lost}
#' \item{Pct.MAE}{ Maximum Adverse Excursion (MAE), in percent}
#' \item{Pct.MFE}{ Maximum Favorable Excursion (MFE), in percent}
#' \item{tick.Net.Trading.PL}{ net trading P&L in ticks}
#' \item{tick.MAE}{ Maximum Adverse Excursion (MAE) in ticks}
#' \item{tick.MFE}{ Maximum Favorable Excursion (MFE) in ticks}
#' \item{duration}{ \code{difftime} describing the duration of the round turn, in seconds }
#' }
#'
#' @seealso \code{\link{chart.ME}} for a chart of MAE and MFE derived from this function,
#' and \code{\link{tradeStats}} for a summary view of the performance, and
#' \code{\link{tradeQuantiles}} for round turns classified by quantile.
#' @export
perTradeStats <- function(Portfolio
, Symbol
, includeOpenTrade=TRUE
, tradeDef="flat.to.flat"
, ...
, includeFlatPeriods=FALSE)
{
portf <- .getPortfolio(Portfolio)
if(missing(Symbol)) Symbol <- ls(portf$symbols)[[1]]
posPL <- portf$symbols[[Symbol]]$posPL
instr <- getInstrument(Symbol)
tick_value <- instr$multiplier*instr$tick_size
tradeDef <- match.arg(tradeDef, c("flat.to.flat","flat.to.reduced","increased.to.reduced","acfifo"))
if(tradeDef=='acfifo') tradeDef<-'increased.to.reduced'
trades <- list()
switch(tradeDef,
flat.to.flat = {
# identify start and end for each trade, where end means flat position
trades$Start <- which(posPL$Pos.Qty!=0 & lag(posPL$Pos.Qty)==0)
trades$End <- which(posPL$Pos.Qty==0 & lag(posPL$Pos.Qty)!=0)
},
flat.to.reduced = {
# find all transactions that bring position closer to zero ('trade ends')
decrPos <- diff(abs(posPL$Pos.Qty)) < 0
# find all transactions that open a position ('trade starts')
initPos <- posPL$Pos.Qty!=0 & lag(posPL$Pos.Qty)==0
# 'trades' start when we open a position, so determine which starts correspond to each end
# add small amount to Start index, so starts will always occur before ends in StartEnd
Start <- xts(initPos[initPos,which.i=TRUE],index(initPos[initPos])+1e-5)
End <- xts(decrPos[decrPos,which.i=TRUE],index(decrPos[decrPos]))
StartEnd <- merge(Start,End)
StartEnd$Start <- na.locf(StartEnd$Start)
if(includeOpenTrade){
SEtail <- StartEnd[paste0(index(last(StartEnd[!is.na(StartEnd$End)])+1),'/')]
SEtail <- SEtail[-1,]
}
StartEnd <- StartEnd[!is.na(StartEnd$End),]
if(includeOpenTrade) StartEnd <- rbind(StartEnd,SEtail)
# populate trades list
trades$Start <- drop(coredata(StartEnd[!is.na(StartEnd$Start[]),]$Start))
trades$End <- drop(coredata(StartEnd[!is.na(StartEnd$End),]$End))
},
increased.to.reduced = {
# find all transactions that bring position closer to zero ('trade ends')
decrPos <- diff(abs(posPL$Pos.Qty)) < 0
decrPosCount <- ifelse(diff(abs(posPL$Pos.Qty)) < 0,-1,0)
decrPosCount <- ifelse(decrPosCount[-1] == 0, 0, cumsum(decrPosCount[-1]))
decrPosQty <- ifelse(diff(abs(posPL$Pos.Qty)) < 0, diff(abs(posPL$Pos.Qty)),0)
decrPosQtyCum <- ifelse(decrPosQty[-1] == 0, 0, cumsum(decrPosQty[-1])) #subset for the leading NA
# find all transactions that take position further from zero ('trade starts')
incrPos <- diff(abs(posPL$Pos.Qty)) > 0
incrPosCount <- ifelse(diff(abs(posPL$Pos.Qty)) > 0,1,0)
incrPosCount <- ifelse(incrPosCount[-1] == 0, 0, cumsum(incrPosCount[-1]))
incrPosQty <- ifelse(diff(abs(posPL$Pos.Qty)) > 0, diff(abs(posPL$Pos.Qty)),0)
incrPosQtyCum <- ifelse(incrPosQty[-1] == 0, 0, cumsum(incrPosQty[-1])) #subset for the leading NA
# Calculate txn qty
txnqty <- rbind(incrPosQtyCum, abs(decrPosQtyCum))
txnqty <- txnqty[-which(txnqty == 0)]
txnqty <- rbind(xts(0,as.POSIXct("1950-01-01")),txnqty)
txnqty <- as.data.frame(txnqty)
txnqty <- txnqty[order(txnqty[,1]),]
txnqty <- diff(txnqty)
txnqty <- na.omit(txnqty)
# Get start and end dates
starts <- incrPosQtyCum[-which(incrPosQtyCum==0)]
ends <- abs(decrPosQtyCum[-which(decrPosQtyCum==0)])
cumsum(txnqty) # let's investigate cumsum(txnqty)
end_idx <- findInterval(cumsum(txnqty), ends, left.open = TRUE) + 1 # can disregard last element as it relates to open trade
end_idx
start_idx <- findInterval(cumsum(txnqty), starts, left.open = TRUE) + 1 # can disregard last element as it relates to open trade
start_idx
testdf <- data.frame(cbind(txnqty, cumsum(txnqty), start_idx, end_idx))
testdf$start_ts <- index(starts)[start_idx]
testdf$end_ts <- index(ends)[end_idx]
testdf <- testdf[-which(testdf$txnqty == 0),]
# build trades$Start and trades$End in trades list
# iterating over testdf, for all txns that have an end date
# and are therefore round turn trades
for(i in 1:length(which(!is.na(testdf$end_ts)))){
trades$Start[i] <- which(index(incrPosQtyCum) == testdf$start_ts[i])
trades$End[i] <- which(index(decrPosQtyCum) == testdf$end_ts[i])
}
# now add 1 to idx for missing initdate from incr/decrPosQtyCum - adds consistency with flat.to.reduced and flat.to.flat
trades$Start <- trades$Start + 1
trades$End <- trades$End + 1
# add extra 'trade start' if there's an open trade, so 'includeOpenTrade' logic will work
if(any(is.na(testdf$end_ts))){
trades$Start <- c(trades$Start,last(which(index(incrPos) == testdf$start_ts[first(which(is.na(testdf$end_ts)))])))
}
}
) # end round turn trade separation by tradeDef
# if the last trade is still open, adjust depending on whether we want open trades or not
if(last(posPL)[,"Pos.Qty"] != 0)
{
if(includeOpenTrade)
trades$End <- c(trades$End,nrow(posPL))
else
trades$Start <- head(trades$Start, -1)
}
if(length(trades$Start)!=length(trades$End)){
trades$Start[(length(trades$Start)+1):length(trades$End)] <- last(trades$Start)
}
# check for an open trade that starts on the last observation, remove
last.trade.is.open <- FALSE
if(last(trades$End)==last(trades$Start)){
last.trade.is.open <- TRUE
trades$End <- trades$End[-length(trades$End)]
trades$Start <- trades$Start[-length(trades$Start)]
}
# pre-allocate trades list
N <- length(trades$End)
trades <- c(trades, list(
Init.Qty = numeric(N),
Init.Pos = numeric(N),
Max.Pos = numeric(N),
End.Pos = numeric(N),
Closing.Txn.Qty = numeric(N),
Num.Txns = integer(N),
Max.Notional.Cost = numeric(N),
Net.Trading.PL = numeric(N),
MAE = numeric(N),
MFE = numeric(N),
Pct.Net.Trading.PL = numeric(N),
Pct.MAE = numeric(N),
Pct.MFE = numeric(N),
tick.Net.Trading.PL = numeric(N),
tick.MAE = numeric(N),
tick.MFE = numeric(N)))
# create txn.qty vector for computing Init.Qty and End.Pos
txn.qty <- diff(posPL$Pos.Qty)
# calculate information about each round turn 'trade'
for(i in 1:N)
{
timespan <- seq.int(trades$Start[i], trades$End[i])
trade <- posPL[timespan]
n <- nrow(trade)
# calculate cost basis, PosPL, Pct.PL, tick.PL columns
Pos.Qty <- trade[,"Pos.Qty"] # avoid repeated subsetting
# position sizes
Max.Pos.Qty.loc <- which.max(abs(Pos.Qty)) # find max position quantity location
trades$Init.Pos[i] <- Pos.Qty[1]
trades$Max.Pos[i] <- Pos.Qty[Max.Pos.Qty.loc]
# initiating and ending quantities
trades$End.Pos[i] <- Pos.Qty[n]
#trades$Init.Qty[i] <- txn.qty[timespan][1]
if(tradeDef == "flat.to.flat" || tradeDef == "flat.to.reduced"){
trades$Init.Qty[i] <- txn.qty[timespan][1]
trades$Closing.Txn.Qty[i] <- trades$End.Pos[i] - Pos.Qty[n-1]
if(trades$Closing.Txn.Qty[i] == 0) trades$Closing.Txn.Qty[i] <- Pos.Qty[n] * -1
} else if(tradeDef == "increased.to.reduced"){
trades$Init.Qty[i] <- testdf$txnqty[i] * sign(txn.qty[timespan][1])
trades$Closing.Txn.Qty[i] <- trades$Init.Qty[i] * -1
}
Pos.Cost.Basis <- cumsum(trade[,"Txn.Value"])
switch(tradeDef,
flat.to.flat = {
prorata <- 1
ts.prop <- 1
trade.PL <- sum(trade[,"Net.Trading.PL"])
Cum.PL <- cumsum(trade[,"Net.Trading.PL"])
},
flat.to.reduced = {
#prorata <- abs(trades$Closing.Txn.Qty[i] / trades$Max.Pos[i]) #not precisely correct?
gettxns <- getTxns(Portfolio, Symbol) # used in computing trade.cost
if(index(trade[nrow(trade),]) %in% index(gettxns)){
closeqty <- coredata(gettxns$Txn.Qty[index(trade[nrow(trade),])]) # total qty traded at closure of round-turn/s
}
tradecost <- coredata(gettxns$Txn.Price[index(trade[1,])]) # used in computing trade.PL
if(abs(trades$Closing.Txn.Qty[i] / closeqty) >= 1) { # closing qty less than init.pos, incl full realized.pl
prorata <- 1
} else {
prorata <- as.numeric((abs(trades$Closing.Txn.Qty[i] / closeqty)))
}
ts.prop <- abs(trades$Closing.Txn.Qty[i] / Pos.Qty)
if(i==N && includeOpenTrade){
ts.prop[n] <- 1 # all unrealized PL for last observation is counted
} else {
ts.prop[n] <- 0 # no unrealized PL for last observation is counted
}
if(i==N && includeOpenTrade && trade[n,"Period.Realized.PL"] !=0 && last.trade.is.open == FALSE){
trade.PL <- 0
} else {
trade.PL <- trade[n,"Period.Realized.PL"]
}
fees <- sum(trade[,'Txn.Fees']) * prorata
trade.PL <- trade.PL + fees
#Cum.PL <- cumsum(trade[n,'Period.Realized.PL'])*prorata + cumsum(trade[,'Period.Unrealized.PL']*ts.prop) + trade[,'Txn.Fees']
#Cum.PL <- cumsum(trade[,'Period.Realized.PL'] + (trade[,'Period.Unrealized.PL']*ts.prop)) + trade[,'Txn.Fees']
Cum.PL <- merge(trade[n,'Period.Realized.PL']*prorata, cumsum(trade[,'Period.Unrealized.PL'])*ts.prop, trade[,'Txn.Fees'])
Cum.PL[is.na(Cum.PL)] <- 0
Cum.PL <- rowSums(Cum.PL)
#colnames(Cum.PL) <- 'Cum.PL'
},
increased.to.reduced = {
tradeqty <- as.numeric((coredata(trade[n,'Pos.Qty']) - coredata(trade[n-1,'Pos.Qty']))) # used in computing trade.PL
gettxns <- getTxns(Portfolio, Symbol) # used in computing trade.cost
if(index(trade[nrow(trade),]) %in% index(gettxns)){
closeqty <- coredata(gettxns$Txn.Qty[index(trade[nrow(trade),])]) # total qty traded at closure of round-turn/s
if(length(closeqty)>1) closeqty<-sum(closeqty) #multiple closing trades share the timestamp, so combine for pro-rata calc
}
tradecost <- coredata(gettxns$Txn.Price[index(trade[1,])]) # used in computing trade.PL
if(abs(trades$Closing.Txn.Qty[i] / closeqty) >= 1) { # closing qty less than init.pos, incl full realized.pl
prorata <- 1
} else {
prorata <- as.numeric((abs(trades$Closing.Txn.Qty[i] / closeqty)))
}
# calculate trade size as proportion of total position size (ts.prop)
ts.prop <- abs(trades$Closing.Txn.Qty[i] / Pos.Qty) # slightly different implementation compared with flat.to.reduced for trade size proportion
colnames(ts.prop) <- 'ts.prop'
if(i==N && includeOpenTrade){
ts.prop[n] <- 1 # all unrealized PL for last observation is counted
} else {
ts.prop[n] <- 0 # no unrealized PL for last observation is counted
}
if(i==N && includeOpenTrade && trade[n,"Period.Realized.PL"] !=0 && last.trade.is.open == FALSE){
trade.PL <- 0
} else {
trade.PL <- trade[n,"Period.Realized.PL"]*prorata
}
ts.prop[is.infinite(ts.prop)] <- 0 # once a position is closed out to flat, dividing by 0 gives an infinite number so we zero it out as there should be no
fees <- as.numeric(trade[1,'Txn.Fees'] * prorata) + as.numeric(trade[n,'Txn.Fees'])
trade.PL <- trade.PL + fees
# remove fees not part of this round turn
# increased.to.reduced has precisely one opening and closing trade
trade$Txn.Fees[2:(n-1)] <- 0
# scale opening trade fees to correct proportion
trade$Txn.Fees[1] <- trade[1,'Txn.Fees'] * prorata
# for cumulative P&L for increased.to.reduced/acfifo, we have precise
# numbers for Period.Realized.PL and Txn.Fees, but need to take prorata
# for unrealized P&L
#Cum.PL <- cumsum(trade[n,'Period.Realized.PL'])*prorata + cumsum(trade[,'Period.Unrealized.PL']*ts.prop) + trade[,'Txn.Fees']
Cum.PL <- merge(trade[n,'Period.Realized.PL']*prorata, cumsum(trade[,'Period.Unrealized.PL'])*ts.prop, trade[,'Txn.Fees'])
Cum.PL[is.na(Cum.PL)] <- 0
Cum.PL <- rowSums(Cum.PL)
#colnames(Cum.PL) <- 'Cum.PL'
}
)
# scale cost basis based on how much of the Txn.Value should be used for this round turn
Pos.Cost.Basis <- Pos.Cost.Basis * prorata
# count number of transactions
trades$Num.Txns[i] <- sum(trade[,"Txn.Value"]!=0)
# investment
trades$Max.Notional.Cost[i] <- Pos.Cost.Basis[Max.Pos.Qty.loc]
# cash P&L
trades$Net.Trading.PL[i] <- trade.PL
#include unrealized P&L for open position, if necessary
if(i==N && trades$Net.Trading.PL[i]==0 && includeOpenTrade){
#trades$Net.Trading.PL[i] <- sum(trade[,'Period.Unrealized.PL'])
trades$Net.Trading.PL[i] <- sum(posPL$Net.Trading.PL) - sum(posPL$Period.Realized.PL)
#trades$Net.Trading.PL[i] <- sum(posPL$Net.Trading.PL) - sum(trades$Net.Trading.PL) # balancing final inclOpenTrade round turn PL
}
# cash MAE/MFE
trades$MAE[i] <- min(0,Cum.PL)
trades$MFE[i] <- max(0,Cum.PL)
# percentage P&L
Pct.PL <- Cum.PL/abs(trades$Max.Notional.Cost[i])
#if(nrow(Pct.PL)>1){trades$Pct.Net.Trading.PL[i] <- Pct.PL[n]}
#if(nrow(Pct.PL)==1){trades$Pct.Net.Trading.PL[i] <- Pct.PL}
if(length(Pct.PL)>1){trades$Pct.Net.Trading.PL[i] <- Pct.PL[n]}
if(length(Pct.PL)==1){trades$Pct.Net.Trading.PL[i] <- Pct.PL}
trades$Pct.MAE[i] <- min(0,trades$MAE[i]/abs(trades$Max.Notional.Cost[i]))
trades$Pct.MFE[i] <- max(0,trades$MFE[i]/abs(trades$Max.Notional.Cost[i]))
# tick P&L
# Net.Trading.PL/position/tick value = ticks
Tick.PL <- Cum.PL/abs(trades$Max.Pos[i])/tick_value
# if(nrow(Tick.PL)>1){trades$tick.Net.Trading.PL[i] <- Tick.PL[n]}
# if(nrow(Tick.PL)==1){trades$tick.Net.Trading.PL[i] <- Tick.PL}
if(length(Tick.PL)>1){trades$tick.Net.Trading.PL[i] <- Tick.PL[n]}
if(length(Tick.PL)==1){trades$tick.Net.Trading.PL[i] <- Tick.PL}
trades$tick.MAE[i] <- min(0,trades$MAE[i]/tick_value)
trades$tick.MFE[i] <- max(0,trades$MFE[i]/tick_value)
}
trades$Start <- index(posPL)[trades$Start]
trades$End <- index(posPL)[trades$End]
#make into data.frame
trades<- as.data.frame(trades)
if(includeFlatPeriods){
# use a list to put things together
flat.p<-list()
flat.p$Start <- which(posPL$Pos.Qty==0 & lag(posPL$Pos.Qty)!=0)
flat.p$End <- which(posPL$Pos.Qty!=0 & lag(posPL$Pos.Qty)==0)
# check for initial flat period, remove as non-informational
if(first(flat.p$End)<first(flat.p$Start)){
flat.p$End <- flat.p$End[-1]
}
# check for a flat period that starts on the last observation, remove
if(last(flat.p$End)==last(flat.p$Start)){
flat.p$End <- flat.p$End[-length(flat.p$End)]
flat.p$Start <- flat.p$Start[-length(flat.p$Start)]
}
# check for trailing flat period, keep this if it exists
if(last(flat.p$End) < last(flat.p$Start) &&
length(flat.p$Start)>length(flat.p$End)){
# add an artificial end at the end of the series
flat.p$End <- c(flat.p$End,length(index(posPL)))
}
# allocate flat periods list
N <- length(flat.p$End)
flat.p <- c(flat.p, list(
Init.Qty = rep(0,N),
Init.Pos = rep(0,N),
Max.Pos = rep(0,N),
End.Pos = rep(0,N),
Closing.Txn.Qty = rep(0,N),
Num.Txns = rep(0,N),
Max.Notional.Cost = rep(0,N),
Net.Trading.PL = rep(0,N),
MAE = rep(0,N),
MFE = rep(0,N),
Pct.Net.Trading.PL = rep(0,N),
Pct.MAE = rep(0,N),
Pct.MFE = rep(0,N),
tick.Net.Trading.PL = rep(0,N),
tick.MAE = rep(0,N),
tick.MFE = rep(0,N)))
flat.p$Start <- index(posPL)[flat.p$Start]
flat.p$End <- index(posPL)[flat.p$End]
flat.p <- as.data.frame(flat.p)
#combine with the trades data.frame
trades <- rbind(trades,flat.p)
}
#add duration
trades$duration <- difftime(trades$End, trades$Start, units='secs') #for POSIXct compliance
#add periodicity
attr(trades, 'trade.periodicity') <- periodicity(posPL)
return(trades)
} # end fn perTradeStats
#' quantiles of per-trade stats
#'
#' The quantiles of your trade statistics get to the heart of quantitatively
#' setting rational stops and possibly even profit taking targets
#' for a trading strategy or system.
#' When applied to theoretical trades from a backtest, they may help to adjust
#' parameters prior to trying the strategy with real money.
#' When applied to real historical trades, they should help in examining what
#' is working and where there is room for improvement in a trading system
#' or strategy.
#'
#' This function will use the \code{\link{quantile}} function to calculate
#' quantiles of per-trade net P&L, MAE, and MFE using the output from
#' \code{\link{perTradeStats}}. These quantiles are chosen by the \code{probs}
#' parameter and will be calculated for one or all of
#' 'cash','percent',or 'tick', controlled by the \code{scale} argument.
#' Quantiles will be calculated separately for trades that end positive (gains)
#' and trades that end negative (losses), and will be denoted
#' 'pos' and 'neg',respectively.
#'
#' Additionally, this function will return the MAE with respect to
#' the maximum cumulative P&L achieved for each \code{scale} you request.
#' Tomasini&Jaekle recommend plotting MAE or MFE with respect to cumulative P&L
#' and choosing a stop or profit target in the 'stable region'. The reported
#' max should help the user to locate the stable region, perhaps mechanically.
#' There is room for improvement here, but this should give the user
#' information to work with in addition to the raw quantiles.
#' For example, it may make more sense to use the max of a loess or
#' kernel or other non-linear fit as the target point.
#'
#' @param Portfolio string identifying the portfolio
#' @param Symbol string identifying the symbol to examin trades for. If missing, the first symbol found in the \code{Portfolio} portfolio will be used
#' @param \dots any other passthrough parameters
#' @param scale string specifying 'cash', or 'percent' for percentage of investment, or 'tick'
#' @param probs vector of probabilities for \code{quantile}
#' @author Brian G. Peterson
#' @references Tomasini, E. and Jaekle, U. \emph{Trading Systems - A new approach to system development and portfolio optimisation} (ISBN 978-1-905641-79-6)
#' @seealso \code{\link{tradeStats}}
#' @export
tradeQuantiles <- function(Portfolio, Symbol, ..., scale=c('cash','percent','tick'),probs=c(.5,.75,.9,.95,.99,1))
{
trades <- perTradeStats(Portfolio, Symbol, ...)
#order them by increasing MAE and decreasing P&L (to resolve ties)
trades <- trades[with(trades, order(-Pct.MAE, -Pct.Net.Trading.PL)), ]
#we could argue that we need three separate sorts, but we'll come back to that if we need to
trades$Cum.Pct.PL <- cumsum(trades$Pct.Net.Trading.PL) #NOTE: this is adding simple returns, so not perfect, but gets the job done
trades$Cum.PL <- cumsum(trades$Net.Trading.PL)
trades$Cum.tick.PL <- cumsum(trades$tick.Net.Trading.PL)
# example plot
# plot(-trades$Pct.MAE,trades$Cum.Pct.PL,type='l')
#TODO: put this into a chart. fn
post <- trades[trades$Net.Trading.PL>0,]
negt <- trades[trades$Net.Trading.PL<0,]
ret<-NULL
for (sc in scale){
switch(sc,
cash = {
posq <- quantile(post$Net.Trading.PL,probs=probs)
names(posq)<-paste('posPL',names(posq))
negq <- -1*quantile(abs(negt$Net.Trading.PL),probs=probs)
names(negq)<-paste('negPL',names(negq))
posMFEq <-quantile(post$MFE,probs=probs)
names(posMFEq) <- paste('posMFE',names(posMFEq))
posMAEq <--1*quantile(abs(post$MAE),probs=probs)
names(posMAEq) <- paste('posMAE',names(posMAEq))
negMFEq <-quantile(negt$MFE,probs=probs)
names(negMFEq) <- paste('negMFE',names(negMFEq))
negMAEq <--1*quantile(abs(negt$MAE),probs=probs)
names(negMAEq) <- paste('negMAE',names(negMAEq))
MAEmax <- trades[which(trades$Cum.PL==max(trades$Cum.PL)),]$MAE
names(MAEmax)<-'MAE~max(cumPL)'
ret<-c(ret,posq,negq,posMFEq,posMAEq,negMFEq,negMAEq,MAEmax)
},
percent = {
posq <- quantile(post$Pct.Net.Trading.PL,probs=probs)
names(posq)<-paste('posPctPL',names(posq))
negq <- -1*quantile(abs(negt$Pct.Net.Trading.PL),probs=probs)
names(negq)<-paste('negPctPL',names(negq))
posMFEq <-quantile(post$Pct.MFE,probs=probs)
names(posMFEq) <- paste('posPctMFE',names(posMFEq))
posMAEq <--1*quantile(abs(post$Pct.MAE),probs=probs)
names(posMAEq) <- paste('posPctMAE',names(posMAEq))
negMFEq <-quantile(negt$Pct.MFE,probs=probs)
names(negMFEq) <- paste('negPctMFE',names(negMFEq))
negMAEq <--1*quantile(abs(negt$Pct.MAE),probs=probs)
names(negMAEq) <- paste('negPctMAE',names(negMAEq))
MAEmax <- trades[which(trades$Cum.Pct.PL==max(trades$Cum.Pct.PL)),]$Pct.MAE
names(MAEmax)<-'%MAE~max(cum%PL)'
ret<-c(ret,posq,negq,posMFEq,posMAEq,negMFEq,negMAEq,MAEmax)
},
tick = {
posq <- quantile(post$tick.Net.Trading.PL,probs=probs)
names(posq)<-paste('posTickPL',names(posq))
negq <- -1*quantile(abs(negt$tick.Net.Trading.PL),probs=probs)
names(negq)<-paste('negTickPL',names(negq))
posMFEq <-quantile(post$tick.MFE,probs=probs)
names(posMFEq) <- paste('posTickMFE',names(posMFEq))
posMAEq <--1*quantile(abs(post$tick.MAE),probs=probs)
names(posMAEq) <- paste('posTickMAE',names(posMAEq))
negMFEq <-quantile(negt$tick.MFE,probs=probs)
names(negMFEq) <- paste('negTickMFE',names(negMFEq))
negMAEq <--1*quantile(abs(negt$tick.MAE),probs=probs)
names(negMAEq) <- paste('negTickMAE',names(negMAEq))
MAEmax <- trades[which(trades$Cum.tick.PL==max(trades$Cum.tick.PL)),]$tick.MAE
names(MAEmax)<-'tick.MAE~max(cum.tick.PL)'
ret<-c(ret,posq,negq,posMFEq,posMAEq,negMFEq,negMAEq,MAEmax)
}
) #end scale switch
} #end for loop
#return a single column for now, could be multiple column if we looped on Symbols
ret<-t(t(ret))
colnames(ret)<-paste(Portfolio,Symbol,sep='.')
ret
}
###############################################################################
# Blotter: Tools for transaction-oriented trading systems development
# for R (see http://r-project.org/)
# Copyright (c) 2008-2015 Peter Carl and Brian G. Peterson
#
# This library is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id$
#
###############################################################################
|
\name{erebus.70}
\alias{erebus.70}
\docType{data}
\title{
%% ~~ data name/kind ... ~~
Image of Erebus Volcano, Antarctica
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
The 70th frame in a series of images that was recorded during a bubble bursting event at Mount Erebus.
JPEG image read in as an array where the third dimension corresponds to the red, green, and blue color channels. Note the image has been compressed significantly to reduce the memory size.
}
\usage{data("erebus.70")}
\format{
The format is:
num [1:225, 1:300, 1:3] 0.06667 0 0 0.02353 0.00392 ...
}
\references{
%% ~~ possibly secondary sources and usages ~~
Witsil and Johnson (2018) <10.1016/j.jvolgeores.2018.05.002>
}
\examples{
data(erebus.70)
image2(erebus.70)
}
\keyword{datasets}
|
/man/erebus.70.Rd
|
no_license
|
cran/imagefx
|
R
| false
| false
| 796
|
rd
|
\name{erebus.70}
\alias{erebus.70}
\docType{data}
\title{
%% ~~ data name/kind ... ~~
Image of Erebus Volcano, Antarctica
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
The 70th frame in a series of images that was recorded during a bubble bursting event at Mount Erebus.
JPEG image read in as an array where the third dimension corresponds to the red, green, and blue color channels. Note the image has been compressed significantly to reduce the memory size.
}
\usage{data("erebus.70")}
\format{
The format is:
num [1:225, 1:300, 1:3] 0.06667 0 0 0.02353 0.00392 ...
}
\references{
%% ~~ possibly secondary sources and usages ~~
Witsil and Johnson (2018) <10.1016/j.jvolgeores.2018.05.002>
}
\examples{
data(erebus.70)
image2(erebus.70)
}
\keyword{datasets}
|
testlist <- list(doy = c(-4.99473634551392e+226, 1.6641339537879e-121, 4.94065645841247e-323, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), latitude = numeric(0), temp = c(NaN, 0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615830900-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 429
|
r
|
testlist <- list(doy = c(-4.99473634551392e+226, 1.6641339537879e-121, 4.94065645841247e-323, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), latitude = numeric(0), temp = c(NaN, 0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
library(zoo)
library(forecast)
#library(data.table)
model_arima = function(idnum,dt,date,pred_time){
dt = dt[dt$id==idnum,]
dt = merge(dt,date,by = "sale_date",all = T)
dt$count[is.na(dt$count)] = ifelse(is.na(mean(dt$count,na.rm = T)),0,median(dt$count,na.rm = T))
dt$id=NULL
dt_series = zoo(x = dt$count,order.by = dt$sale_date)
train = ts(dt_series,frequency=7,start=c(1,1))
fit=auto.arima(train)
#x = forecast(fit,h=62)$mean
x = forecast(fit,h=59)$mean
predict = round(as.data.frame(x))
predict$sale_date = pred_time
predict$id = idnum
predict$x = as.numeric(predict$x)
return(predict)
}
|
/function/model_arima.R
|
no_license
|
yahcong/supermarket
|
R
| false
| false
| 641
|
r
|
library(zoo)
library(forecast)
#library(data.table)
model_arima = function(idnum,dt,date,pred_time){
dt = dt[dt$id==idnum,]
dt = merge(dt,date,by = "sale_date",all = T)
dt$count[is.na(dt$count)] = ifelse(is.na(mean(dt$count,na.rm = T)),0,median(dt$count,na.rm = T))
dt$id=NULL
dt_series = zoo(x = dt$count,order.by = dt$sale_date)
train = ts(dt_series,frequency=7,start=c(1,1))
fit=auto.arima(train)
#x = forecast(fit,h=62)$mean
x = forecast(fit,h=59)$mean
predict = round(as.data.frame(x))
predict$sale_date = pred_time
predict$id = idnum
predict$x = as.numeric(predict$x)
return(predict)
}
|
/Co.Temp.R
|
no_license
|
UniToDSTGruppoClima/CoTemp
|
R
| false
| false
| 24,916
|
r
| ||
\name{epi.sssimpleestc}
\alias{epi.sssimpleestc}
\title{
Sample size to estimate a continuous outcome using simple random sampling
}
\description{
Sample size to estimate a continuous outcome using simple random sampling.
}
\usage{
epi.sssimpleestc(N = NA, xbar, sigma, epsilon, error = "relative",
nfractional = FALSE, conf.level = 0.95)
}
\arguments{
\item{N}{scalar integer, the total number of individuals eligible for inclusion in the study. If \code{N = NA} the number of individuals eligible for inclusion is assumed to be infinite.}
\item{xbar}{scalar number, the expected mean of the continuous variable to be estimated.}
\item{sigma}{scalar number, the expected standard deviation of the continuous variable to be estimated.}
\item{epsilon}{scalar number, the maximum difference between the estimate and the unknown population value expressed in absolute or relative terms.}
\item{error}{character string. Options are \code{absolute} for absolute error and \code{relative} for relative error.}
\item{nfractional}{logical, return fractional sample size.}
\item{conf.level}{scalar number, the level of confidence in the computed result.}
}
\details{
A finite population correction factor is applied to the sample size estimates when a value for \code{N} is provided.
}
\value{
Returns an integer defining the required sample size.
}
\references{
Levy PS, Lemeshow S (1999). Sampling of Populations Methods and Applications. Wiley Series in Probability and Statistics, London, pp. 70 - 75.
Scheaffer RL, Mendenhall W, Lyman Ott R (1996). Elementary Survey Sampling. Duxbury Press, New York, pp. 95.
Otte J, Gumm I (1997). Intra-cluster correlation coefficients of 20 infections calculated from the results of cluster-sample surveys. Preventive Veterinary Medicine 31: 147 - 150.
}
\note{
If \code{epsilon.r} equals the relative error the sample estimate should not differ in absolute value from the true unknown population parameter \code{d} by more than \code{epsilon.r * d}.
}
\examples{
## EXAMPLE 1:
## A city contains 20 neighbourhood health clinics and it is desired to take a
## sample of clinics to estimate the total number of persons from all these
## clinics who have been given, during the past 12 month period, prescriptions
## for a recently approved antidepressant. If we assume that the average number
## of people seen at these clinics is 1500 per year with the standard deviation
## equal to 300, and that approximately 5\% of patients (regardless of clinic)
## are given this drug, how many clinics need to be sampled to yield an estimate
## that is within 20\% of the true population value?
pmean <- 1500 * 0.05; psigma <- (300 * 0.05)
epi.sssimpleestc(N = 20, xbar = pmean, sigma = psigma, epsilon = 0.20,
error = "relative", nfractional = FALSE, conf.level = 0.95)
## Four clinics need to be sampled to meet the requirements of the survey.
## EXAMPLE 2:
## We want to estimate the mean bodyweight of deer on a farm. There are 278
## animals present. We anticipate the mean body weight to be around 200 kg
## and the standard deviation of body weight to be 30 kg. We would like to
## be 95\% certain that our estimate is within 10 kg of the true mean. How
## many deer should be sampled?
epi.sssimpleestc(N = 278, xbar = 200, sigma = 30, epsilon = 10,
error = "absolute", nfractional = FALSE, conf.level = 0.95)
## A total of 28 deer need to be sampled to meet the requirements of the survey.
}
\keyword{univar}% at least one, from doc/KEYWORDS
\keyword{univar}% __ONLY ONE__ keyword per line
|
/man/epi.sssimpleestc.Rd
|
no_license
|
cran/epiR
|
R
| false
| false
| 3,655
|
rd
|
\name{epi.sssimpleestc}
\alias{epi.sssimpleestc}
\title{
Sample size to estimate a continuous outcome using simple random sampling
}
\description{
Sample size to estimate a continuous outcome using simple random sampling.
}
\usage{
epi.sssimpleestc(N = NA, xbar, sigma, epsilon, error = "relative",
nfractional = FALSE, conf.level = 0.95)
}
\arguments{
\item{N}{scalar integer, the total number of individuals eligible for inclusion in the study. If \code{N = NA} the number of individuals eligible for inclusion is assumed to be infinite.}
\item{xbar}{scalar number, the expected mean of the continuous variable to be estimated.}
\item{sigma}{scalar number, the expected standard deviation of the continuous variable to be estimated.}
\item{epsilon}{scalar number, the maximum difference between the estimate and the unknown population value expressed in absolute or relative terms.}
\item{error}{character string. Options are \code{absolute} for absolute error and \code{relative} for relative error.}
\item{nfractional}{logical, return fractional sample size.}
\item{conf.level}{scalar number, the level of confidence in the computed result.}
}
\details{
A finite population correction factor is applied to the sample size estimates when a value for \code{N} is provided.
}
\value{
Returns an integer defining the required sample size.
}
\references{
Levy PS, Lemeshow S (1999). Sampling of Populations Methods and Applications. Wiley Series in Probability and Statistics, London, pp. 70 - 75.
Scheaffer RL, Mendenhall W, Lyman Ott R (1996). Elementary Survey Sampling. Duxbury Press, New York, pp. 95.
Otte J, Gumm I (1997). Intra-cluster correlation coefficients of 20 infections calculated from the results of cluster-sample surveys. Preventive Veterinary Medicine 31: 147 - 150.
}
\note{
If \code{epsilon.r} equals the relative error the sample estimate should not differ in absolute value from the true unknown population parameter \code{d} by more than \code{epsilon.r * d}.
}
\examples{
## EXAMPLE 1:
## A city contains 20 neighbourhood health clinics and it is desired to take a
## sample of clinics to estimate the total number of persons from all these
## clinics who have been given, during the past 12 month period, prescriptions
## for a recently approved antidepressant. If we assume that the average number
## of people seen at these clinics is 1500 per year with the standard deviation
## equal to 300, and that approximately 5\% of patients (regardless of clinic)
## are given this drug, how many clinics need to be sampled to yield an estimate
## that is within 20\% of the true population value?
pmean <- 1500 * 0.05; psigma <- (300 * 0.05)
epi.sssimpleestc(N = 20, xbar = pmean, sigma = psigma, epsilon = 0.20,
error = "relative", nfractional = FALSE, conf.level = 0.95)
## Four clinics need to be sampled to meet the requirements of the survey.
## EXAMPLE 2:
## We want to estimate the mean bodyweight of deer on a farm. There are 278
## animals present. We anticipate the mean body weight to be around 200 kg
## and the standard deviation of body weight to be 30 kg. We would like to
## be 95\% certain that our estimate is within 10 kg of the true mean. How
## many deer should be sampled?
epi.sssimpleestc(N = 278, xbar = 200, sigma = 30, epsilon = 10,
error = "absolute", nfractional = FALSE, conf.level = 0.95)
## A total of 28 deer need to be sampled to meet the requirements of the survey.
}
\keyword{univar}% at least one, from doc/KEYWORDS
\keyword{univar}% __ONLY ONE__ keyword per line
|
##
rm(list=ls())
# setwd('d:/Study/My projects/Statchem-Diudea/Codes')
source('RobustQSAR_functions.R')
library(ddalpha)
library(ICSNP)
# combined descriptors
combined95 = read.csv("../Data/Combined-descriptors-95.csv")
y95 = as.numeric(combined95[,2])
X95 = as.matrix(combined95[,-(1:2)])
# apply robust scaling
delta = 1e-3
spa = spatial.median(X95, delta)
mu = spa$mu
ep = spa$ep
sigma.vec = apply(X95, 2, mad)
X95 = as.matrix(scale(X95, mu, sigma.vec))
# X95 = as.matrix(scale(X95, mu, scale=F))
which.na = which(is.na(apply(X95,2,var)))
# which.na = which(apply(X95,2,var) < 1e-3)
X95 = X95[,-which.na]
names95 = names(combined95)[-(1:2)][-which.na]
df95 = data.frame(cbind(y95, X95))
n = nrow(X95)
p = ncol(X95)
## Principal Component Analysis
set.seed(04172018)
Xd = X95
depth = depth.projection(X95, X95)
depth = max(depth) - depth
for(i in 1:n)
{
z = sqrt(sum((Xd[i, ])^2))
if(z > ep)
{
Xd[i, ] = depth[i] * (Xd[i, ] )/z
}
}
svd95 = svd(Xd)
df.list = list()
for(i in 1:10){
V1 = names95[order(abs(svd95$v[,i]), decreasing=T)][1:10]
V2 = svd95$v[order(abs(svd95$v[,i]), decreasing=T),i][1:10]
idf = data.frame(cbind(V1,round(V2,2)))
colnames(idf) = c("Descriptor","Loading")
df.list[[i]] = idf
}
names(df.list) = paste0("PC",1:10)
## explained variance proportions in data
props = svd95$d/sum(svd95$d)
cumsum(props)
# V1 = names95[order(abs(svd95$v[,1]), decreasing=T)][1:10]
# V2 = svd95$v[order(abs(svd95$v[,1]), decreasing=T),1][1:10]
# data.frame(cbind(V1,round(V2,2)))
#
# V1 = names95[order(abs(svd95$v[,2]), decreasing=T)][1:10]
# V2 = svd95$v[order(abs(svd95$v[,2]), decreasing=T),2][1:10]
# data.frame(cbind(V1,round(V2,2)))
#
# V1 = names95[order(abs(svd95$v[,3]), decreasing=T)][1:10]
# V2 = svd95$v[order(abs(svd95$v[,3]), decreasing=T),3][1:10]
# data.frame(cbind(V1,round(V2,2)))
#
# V1 = names95[order(abs(svd95$v[,4]), decreasing=T)][1:10]
# V2 = svd95$v[order(abs(svd95$v[,4]), decreasing=T),4][1:10]
# data.frame(cbind(V1,round(V2,2)))
|
/Codes/pca_95amine.R
|
no_license
|
shubhobm/Statchem-Diudea
|
R
| false
| false
| 1,998
|
r
|
##
rm(list=ls())
# setwd('d:/Study/My projects/Statchem-Diudea/Codes')
source('RobustQSAR_functions.R')
library(ddalpha)
library(ICSNP)
# combined descriptors
combined95 = read.csv("../Data/Combined-descriptors-95.csv")
y95 = as.numeric(combined95[,2])
X95 = as.matrix(combined95[,-(1:2)])
# apply robust scaling
delta = 1e-3
spa = spatial.median(X95, delta)
mu = spa$mu
ep = spa$ep
sigma.vec = apply(X95, 2, mad)
X95 = as.matrix(scale(X95, mu, sigma.vec))
# X95 = as.matrix(scale(X95, mu, scale=F))
which.na = which(is.na(apply(X95,2,var)))
# which.na = which(apply(X95,2,var) < 1e-3)
X95 = X95[,-which.na]
names95 = names(combined95)[-(1:2)][-which.na]
df95 = data.frame(cbind(y95, X95))
n = nrow(X95)
p = ncol(X95)
## Principal Component Analysis
set.seed(04172018)
Xd = X95
depth = depth.projection(X95, X95)
depth = max(depth) - depth
for(i in 1:n)
{
z = sqrt(sum((Xd[i, ])^2))
if(z > ep)
{
Xd[i, ] = depth[i] * (Xd[i, ] )/z
}
}
svd95 = svd(Xd)
df.list = list()
for(i in 1:10){
V1 = names95[order(abs(svd95$v[,i]), decreasing=T)][1:10]
V2 = svd95$v[order(abs(svd95$v[,i]), decreasing=T),i][1:10]
idf = data.frame(cbind(V1,round(V2,2)))
colnames(idf) = c("Descriptor","Loading")
df.list[[i]] = idf
}
names(df.list) = paste0("PC",1:10)
## explained variance proportions in data
props = svd95$d/sum(svd95$d)
cumsum(props)
# V1 = names95[order(abs(svd95$v[,1]), decreasing=T)][1:10]
# V2 = svd95$v[order(abs(svd95$v[,1]), decreasing=T),1][1:10]
# data.frame(cbind(V1,round(V2,2)))
#
# V1 = names95[order(abs(svd95$v[,2]), decreasing=T)][1:10]
# V2 = svd95$v[order(abs(svd95$v[,2]), decreasing=T),2][1:10]
# data.frame(cbind(V1,round(V2,2)))
#
# V1 = names95[order(abs(svd95$v[,3]), decreasing=T)][1:10]
# V2 = svd95$v[order(abs(svd95$v[,3]), decreasing=T),3][1:10]
# data.frame(cbind(V1,round(V2,2)))
#
# V1 = names95[order(abs(svd95$v[,4]), decreasing=T)][1:10]
# V2 = svd95$v[order(abs(svd95$v[,4]), decreasing=T),4][1:10]
# data.frame(cbind(V1,round(V2,2)))
|
#!/usr/bin/env Rscript
#region(kanto), year(2000~2005)=>2005~2010, model (listas, ga), depth(25,60,100)
setwd("~/Documents/estudos/unb/earthquakemodels/Zona2/dataForR")
loadData = function(region, year, depth, type){
file = paste(region,"_",year,"_LastGen",depth,type,".txt",sep="")
data = read.csv2(file, sep='\n', header=F)
return(data)
}
chooseRegion = function(i){
if (i==1) {
region="Kanto"
}
else if (i==2) {
region="Kansai"
}
else if (i==3) {
region = "Tohoku"
}
else{
region = "EastJapan"
}
return(region)
}
convertToNumeric = function(model){
values = rep(0, length(model$V1))
for (k in 1:length(model$V1)){
values[k] = as.numeric(levels(model$V1[k]))[model$V1[k]]
}
return(values)
}
loadRI = function(year){
file = paste("ri_",year,sep="")
dataRI = read.csv2(file, sep='', header=F)
return(dataRI)
}
finalData = data.frame(
setNames(replicate(5,numeric(0), simplify = F),
c("loglikeValues", "model", "depths", "years", "regions")))
for (i in 1:4) {
region = chooseRegion(i)
for (year in 2000:2005) {
gaModel25 = loadData(region, year, '25', 'gaModel')
gaModel60 = loadData(region, year, '60', 'gaModel')
gaModel100 = loadData(region, year, '100', 'gaModel')
lista25 = loadData(region, year, '25', 'listaGA_New')
lista60 = loadData(region, year, '60', 'listaGA_New')
lista100 = loadData(region, year, '100', 'listaGA_New')
valuesGA25 = convertToNumeric(gaModel25)
valuesGA60 = convertToNumeric(gaModel60)
valuesGA100 = convertToNumeric(gaModel100)
valuesLista25 = convertToNumeric(lista25)
valuesLista60 = convertToNumeric(lista60)
valuesLista100 = convertToNumeric(lista100)
loglikeGA = c(valuesGA25, valuesGA60, valuesGA100)
loglikeLista = c(valuesLista25, valuesLista60, valuesLista100)
loglikeValues = c(loglikeGA, loglikeLista)
nameGa = c(rep("gaModel",30))
nameLista = c(rep("lista",30))
years = c(rep(toString(year+5),60))
regions = c(rep(region, 60))
depth25 = c(rep('25',10))
depth60 = c(rep('60',10))
depth100 = c(rep('100',10))
depthsAmodel = c(depth25, depth60, depth100)
model = c(nameGa, nameLista)
depths= c(depthsAmodel, depthsAmodel)
data = data.frame(loglikeValues, model, depths, years, regions)
if (dim(finalData)[1]==0) {
finalData = merge(finalData, data, all.y=T)
}
else{
finalData=rbind(finalData, data)
}
rm(data)
gaModel25 = loadData(region, year+5, '25', 'gaModelClustered')
gaModel60 = loadData(region, year+5, '60', 'gaModelClustered')
gaModel100 = loadData(region, year+5, '100', 'gaModelClustered')
lista25 = loadData(region, year+5, '25', 'listaGA_NewClustered')
lista60 = loadData(region, year+5, '60', 'listaGA_NewClustered')
lista100 = loadData(region, year+5, '100', 'listaGA_NewClustered')
valuesGA25 = convertToNumeric(gaModel25)
valuesGA60 = convertToNumeric(gaModel60)
valuesGA100 = convertToNumeric(gaModel100)
valuesLista25 = convertToNumeric(lista25)
valuesLista60 = convertToNumeric(lista60)
valuesLista100 = convertToNumeric(lista100)
loglikeGA = c(valuesGA25, valuesGA60,valuesGA100)
loglikeLista = c(valuesLista25, valuesLista60, valuesLista100)
loglikeValues = c(loglikeGA, loglikeLista)
nameGa = c(rep("gaModelCluster",30))
nameLista = c(rep("listaCluster",30))
years = c(rep(toString(year+5),60))
regions = c(rep(region, 60))
depth25 = c(rep('25',10))
depth60 = c(rep('60',10))
depth100 = c(rep('100',10))
depthsAmodel = c(depth25, depth60, depth100)
model = c(nameGa, nameLista)
depths = c(depthsAmodel, depthsAmodel)
data = data.frame(loglikeValues, model,depths, years, regions)
if (dim(finalData)[1]==0) {
finalData = merge(finalData, data, all.y=T)
}
else{
finalData=rbind(finalData, data)
}
rm(data)
}
}
# for (year in 2005:2010){
# riData = loadRI(year)
# loglikeRI = as.numeric(as.character(riData[9,9]))
# model = c("RI")
# depths= c('RI')
# years = year
# regions = region
# data = data.frame(loglikeValues, model, depths, years, regions, clustered)
# print(data)
# finalData=rbind(finalData, data)
# if (dim(finalData)[1]==0) {
# finalData = merge(finalData, data, all.y=T)
# }
# else{
# finalData=rbind(finalData, data)
# }
# rm(data)
# }
save(finalData,file="data.Rda")
|
/code/R-files/anovaGAXilsta.R
|
permissive
|
PyQuake/earthquakemodels
|
R
| false
| false
| 4,628
|
r
|
#!/usr/bin/env Rscript
#region(kanto), year(2000~2005)=>2005~2010, model (listas, ga), depth(25,60,100)
setwd("~/Documents/estudos/unb/earthquakemodels/Zona2/dataForR")
loadData = function(region, year, depth, type){
file = paste(region,"_",year,"_LastGen",depth,type,".txt",sep="")
data = read.csv2(file, sep='\n', header=F)
return(data)
}
chooseRegion = function(i){
if (i==1) {
region="Kanto"
}
else if (i==2) {
region="Kansai"
}
else if (i==3) {
region = "Tohoku"
}
else{
region = "EastJapan"
}
return(region)
}
convertToNumeric = function(model){
values = rep(0, length(model$V1))
for (k in 1:length(model$V1)){
values[k] = as.numeric(levels(model$V1[k]))[model$V1[k]]
}
return(values)
}
loadRI = function(year){
file = paste("ri_",year,sep="")
dataRI = read.csv2(file, sep='', header=F)
return(dataRI)
}
finalData = data.frame(
setNames(replicate(5,numeric(0), simplify = F),
c("loglikeValues", "model", "depths", "years", "regions")))
for (i in 1:4) {
region = chooseRegion(i)
for (year in 2000:2005) {
gaModel25 = loadData(region, year, '25', 'gaModel')
gaModel60 = loadData(region, year, '60', 'gaModel')
gaModel100 = loadData(region, year, '100', 'gaModel')
lista25 = loadData(region, year, '25', 'listaGA_New')
lista60 = loadData(region, year, '60', 'listaGA_New')
lista100 = loadData(region, year, '100', 'listaGA_New')
valuesGA25 = convertToNumeric(gaModel25)
valuesGA60 = convertToNumeric(gaModel60)
valuesGA100 = convertToNumeric(gaModel100)
valuesLista25 = convertToNumeric(lista25)
valuesLista60 = convertToNumeric(lista60)
valuesLista100 = convertToNumeric(lista100)
loglikeGA = c(valuesGA25, valuesGA60, valuesGA100)
loglikeLista = c(valuesLista25, valuesLista60, valuesLista100)
loglikeValues = c(loglikeGA, loglikeLista)
nameGa = c(rep("gaModel",30))
nameLista = c(rep("lista",30))
years = c(rep(toString(year+5),60))
regions = c(rep(region, 60))
depth25 = c(rep('25',10))
depth60 = c(rep('60',10))
depth100 = c(rep('100',10))
depthsAmodel = c(depth25, depth60, depth100)
model = c(nameGa, nameLista)
depths= c(depthsAmodel, depthsAmodel)
data = data.frame(loglikeValues, model, depths, years, regions)
if (dim(finalData)[1]==0) {
finalData = merge(finalData, data, all.y=T)
}
else{
finalData=rbind(finalData, data)
}
rm(data)
gaModel25 = loadData(region, year+5, '25', 'gaModelClustered')
gaModel60 = loadData(region, year+5, '60', 'gaModelClustered')
gaModel100 = loadData(region, year+5, '100', 'gaModelClustered')
lista25 = loadData(region, year+5, '25', 'listaGA_NewClustered')
lista60 = loadData(region, year+5, '60', 'listaGA_NewClustered')
lista100 = loadData(region, year+5, '100', 'listaGA_NewClustered')
valuesGA25 = convertToNumeric(gaModel25)
valuesGA60 = convertToNumeric(gaModel60)
valuesGA100 = convertToNumeric(gaModel100)
valuesLista25 = convertToNumeric(lista25)
valuesLista60 = convertToNumeric(lista60)
valuesLista100 = convertToNumeric(lista100)
loglikeGA = c(valuesGA25, valuesGA60,valuesGA100)
loglikeLista = c(valuesLista25, valuesLista60, valuesLista100)
loglikeValues = c(loglikeGA, loglikeLista)
nameGa = c(rep("gaModelCluster",30))
nameLista = c(rep("listaCluster",30))
years = c(rep(toString(year+5),60))
regions = c(rep(region, 60))
depth25 = c(rep('25',10))
depth60 = c(rep('60',10))
depth100 = c(rep('100',10))
depthsAmodel = c(depth25, depth60, depth100)
model = c(nameGa, nameLista)
depths = c(depthsAmodel, depthsAmodel)
data = data.frame(loglikeValues, model,depths, years, regions)
if (dim(finalData)[1]==0) {
finalData = merge(finalData, data, all.y=T)
}
else{
finalData=rbind(finalData, data)
}
rm(data)
}
}
# for (year in 2005:2010){
# riData = loadRI(year)
# loglikeRI = as.numeric(as.character(riData[9,9]))
# model = c("RI")
# depths= c('RI')
# years = year
# regions = region
# data = data.frame(loglikeValues, model, depths, years, regions, clustered)
# print(data)
# finalData=rbind(finalData, data)
# if (dim(finalData)[1]==0) {
# finalData = merge(finalData, data, all.y=T)
# }
# else{
# finalData=rbind(finalData, data)
# }
# rm(data)
# }
save(finalData,file="data.Rda")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/motifUtils.r
\name{consenusIUPAC}
\alias{consenusIUPAC}
\title{Consensus IUPAC}
\usage{
consenusIUPAC(mstring)
}
\arguments{
\item{mstring}{a base pare string with only nucleotides eg A[AG]AGT}
}
\description{
The reverse of IUPACtoBase, takes base nucleotides and transforms
them them to their IUPAC equivelent
}
\examples{
consenusIUPAC( IUPACtoBase("ARYS"))
# [1] "ARYS"
}
|
/man/consenusIUPAC.Rd
|
permissive
|
alexjgriffith/CCCA
|
R
| false
| true
| 455
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/motifUtils.r
\name{consenusIUPAC}
\alias{consenusIUPAC}
\title{Consensus IUPAC}
\usage{
consenusIUPAC(mstring)
}
\arguments{
\item{mstring}{a base pare string with only nucleotides eg A[AG]AGT}
}
\description{
The reverse of IUPACtoBase, takes base nucleotides and transforms
them them to their IUPAC equivelent
}
\examples{
consenusIUPAC( IUPACtoBase("ARYS"))
# [1] "ARYS"
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pred_int_jags.R
\name{pred_int_jags}
\alias{pred_int_jags}
\title{pred_int_jags}
\usage{
pred_int_jags(
formula,
data,
df = NA,
level = 0.95,
quiet = FALSE,
return = c("pdi", "all"),
...
)
}
\arguments{
\item{formula}{standard R formula response ~ trial}
\item{data}{data frame}
\item{df}{degrees of freedom for the t-distribution. If 'NA' it will be treated as a nuisance parameter (default)}
\item{level}{probability level}
\item{quiet}{whether to display progress of sampling}
\item{return}{type of information to return either 'pdi' just the prediction interval or 'all' which include the output from the model and Gelman diagnostics}
\item{...}{further arguments passed to or from other methods.}
}
\value{
prediction interval (or more detail)
}
\description{
Prediction intervals using JAGS
}
|
/man/pred_int_jags.Rd
|
no_license
|
femiguez/predintma
|
R
| false
| true
| 897
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pred_int_jags.R
\name{pred_int_jags}
\alias{pred_int_jags}
\title{pred_int_jags}
\usage{
pred_int_jags(
formula,
data,
df = NA,
level = 0.95,
quiet = FALSE,
return = c("pdi", "all"),
...
)
}
\arguments{
\item{formula}{standard R formula response ~ trial}
\item{data}{data frame}
\item{df}{degrees of freedom for the t-distribution. If 'NA' it will be treated as a nuisance parameter (default)}
\item{level}{probability level}
\item{quiet}{whether to display progress of sampling}
\item{return}{type of information to return either 'pdi' just the prediction interval or 'all' which include the output from the model and Gelman diagnostics}
\item{...}{further arguments passed to or from other methods.}
}
\value{
prediction interval (or more detail)
}
\description{
Prediction intervals using JAGS
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/log-pdtmvn-mode-centered-kernel-fn.R
\name{get_theta_optim_bounds_log_pdtmvn_mode_centered_kernel}
\alias{get_theta_optim_bounds_log_pdtmvn_mode_centered_kernel}
\title{Get lower and upper bounds for the theta parameters being estimated
in the pdtmvn_kernel}
\usage{
get_theta_optim_bounds_log_pdtmvn_mode_centered_kernel(theta_list, ...)
}
\arguments{
\item{theta_list}{parameters to pdtmvn kernel in list format}
\item{...}{mop up arguments}
}
\value{
list with two components: lower and upper, numeric vectors
}
\description{
Get lower and upper bounds for the theta parameters being estimated
in the pdtmvn_kernel
}
|
/man/get_theta_optim_bounds_log_pdtmvn_mode_centered_kernel.Rd
|
no_license
|
reichlab/kcde
|
R
| false
| true
| 700
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/log-pdtmvn-mode-centered-kernel-fn.R
\name{get_theta_optim_bounds_log_pdtmvn_mode_centered_kernel}
\alias{get_theta_optim_bounds_log_pdtmvn_mode_centered_kernel}
\title{Get lower and upper bounds for the theta parameters being estimated
in the pdtmvn_kernel}
\usage{
get_theta_optim_bounds_log_pdtmvn_mode_centered_kernel(theta_list, ...)
}
\arguments{
\item{theta_list}{parameters to pdtmvn kernel in list format}
\item{...}{mop up arguments}
}
\value{
list with two components: lower and upper, numeric vectors
}
\description{
Get lower and upper bounds for the theta parameters being estimated
in the pdtmvn_kernel
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{vectordf}
\alias{vectordf}
\alias{vectordf-package}
\title{vectordf}
\description{
vectordf
}
\author{
Boris Demeshev
}
|
/man/vectordf.Rd
|
no_license
|
bdemeshev/vectordf
|
R
| false
| false
| 198
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{vectordf}
\alias{vectordf}
\alias{vectordf-package}
\title{vectordf}
\description{
vectordf
}
\author{
Boris Demeshev
}
|
\name{SpellCheckFiles}
\alias{SpellCheckFiles}
\alias{print.wordlist}
\title{Spell checking a file or all files within a specified folder}
\usage{
SpellCheckFiles(file = ".", ignore = character(),
local.ignore = TRUE, global.ignore = TRUE)
\method{print}{wordlist}(x, ...)
}
\arguments{
\item{file}{The filename of an individual file, or an individual folder.}
\item{ignore}{The character vector of words to be ignored by hunspell}
\item{local.ignore}{Use a local file of words to be ignored. This file has the same name as the file with .ignore.txt tacked on the end and is colocated with the file being checked. If the file argument is set to a folder then the local.ignore can be set to the name of a file in the current working directory.}
\item{global.ignore}{Use the global word list called words.ignore.txt found in the MyBrailleR folder}
\item{x}{the object to be printed}
\item{...}{other parameters pass to the print method}
}
\value{
A list object with each item of the list being the findings from spell checking each file. The words not found in the dictionary are given as well as the line numbers where they were found.
}
\description{
Check spelling using hunspell. A new print method is also used.
}
\details{
The global list of words to be ignored needs to be saved in the user's MyBrailleR folder. It can be updated as often as the user likes. It should have one word per line, and contain no space, tab or punctuation characters.
}
\author{
A. Jonathan R. Godfrey wrote these functions but leant heavily on functions found in the devtools package.
}
\seealso{
The hunspell package and functions therein.
}
|
/man/SpellCheckFiles.Rd
|
no_license
|
dewarren/BrailleR
|
R
| false
| false
| 1,638
|
rd
|
\name{SpellCheckFiles}
\alias{SpellCheckFiles}
\alias{print.wordlist}
\title{Spell checking a file or all files within a specified folder}
\usage{
SpellCheckFiles(file = ".", ignore = character(),
local.ignore = TRUE, global.ignore = TRUE)
\method{print}{wordlist}(x, ...)
}
\arguments{
\item{file}{The filename of an individual file, or an individual folder.}
\item{ignore}{The character vector of words to be ignored by hunspell}
\item{local.ignore}{Use a local file of words to be ignored. This file has the same name as the file with .ignore.txt tacked on the end and is colocated with the file being checked. If the file argument is set to a folder then the local.ignore can be set to the name of a file in the current working directory.}
\item{global.ignore}{Use the global word list called words.ignore.txt found in the MyBrailleR folder}
\item{x}{the object to be printed}
\item{...}{other parameters pass to the print method}
}
\value{
A list object with each item of the list being the findings from spell checking each file. The words not found in the dictionary are given as well as the line numbers where they were found.
}
\description{
Check spelling using hunspell. A new print method is also used.
}
\details{
The global list of words to be ignored needs to be saved in the user's MyBrailleR folder. It can be updated as often as the user likes. It should have one word per line, and contain no space, tab or punctuation characters.
}
\author{
A. Jonathan R. Godfrey wrote these functions but leant heavily on functions found in the devtools package.
}
\seealso{
The hunspell package and functions therein.
}
|
# Example preprocessing script.
v <- list()
v$player_variables <- c("PlayerID", "Player", "Club", "League_Country", "LeagueID",
"SkinCol", "PhotoID", "PlayerHeightCM", "PlayerBirthd", "BirthD",
"BirthM", "BirthY", "PositionID", "Position", "Position_Detailed")
v$ref_variables <- c("RefereeID", "RefCountryID", "RCperRef", "TotMatPerRef", "meanIAT_RefCountry",
"nIAT", "seIAT", "meanExp_RefCountry", "nExp", "seExp")
v$iat_measures <- c("meanIAT_RefCountry", "nIAT", "seIAT", "meanExp_RefCountry", "nExp", "seExp")
# Fix variable codings
v$make_numeric <- c("BirthD", "meanIAT_RefCountry", "nIAT", "seIAT", "meanExp_RefCountry", "nExp", "seExp")
for (variable in v$make_numeric) {
rdyads[,variable] <- as.numeric(rdyads[,variable])
}
rdyads[rdyads$Position == "", 'Position'] <- NA
# Player and dyad variables
rdyads$SkinCol_4or5 <- as.numeric(rdyads$SkinCol == 4 | rdyads$SkinCol == 5) # binary coding
rdyads$SkinCol_0to1 <- (rdyads$SkinCol -1) / 4 # continuous and rescaled
# create player, referee, and referee country data frames
rplayers <- rdyads[!duplicated(rdyads$PlayerID), v$player_variables]
rrefs <- rdyads[!duplicated(rdyads$RefereeID), v$ref_variables]
rcountries <- rrefs[!duplicated(rrefs$RefCountryID), c('RefCountryID', v$iat_measures)]
# merge in totals to player
merge_sum <- function(variable='RedCards', id='PlayerID', data=rplayers, dyads=rdyads) {
x <- aggregate(dyads[, variable], list(dyads[, id]), sum)
newvar <- paste0(variable, 'Sum')
names(x) <- c(id, newvar)
merge(data,x, all.x=TRUE, sort=FALSE)
}
for (variable in c("Matches", "Goals", "YellowCards", "YellowRed", "RedCards")) {
rplayers <- merge_sum(variable, 'PlayerID', rplayers, rdyads)
rrefs <- merge_sum(variable, 'RefereeID', rrefs, rdyads)
rcountries <- merge_sum(variable, 'RefCountryID', rcountries, rdyads)
}
# create proportion variables
create_prop <- function(data=rdyads, variable='Goals', divisor='Matches') {
newvar <- paste0(variable, 'Prop')
data[,newvar] <- data[,variable] / data[,divisor]
data
}
for (variable in c('Goals', 'YellowCards', 'YellowRed', 'RedCards')) {
rdyads <- create_prop(rdyads, variable, 'Matches')
variable_sum <- paste0(variable, 'Sum')
rplayers <- create_prop(rplayers, variable_sum, 'MatchesSum')
rrefs <- create_prop(rrefs, variable_sum, 'MatchesSum')
rcountries <- create_prop(rcountries, variable_sum, 'MatchesSum')
}
# create modified predictors
rplayers$MatchesSum_log <- log(rplayers$MatchesSum)
rrefs$MatchesSum_log <- log(rrefs$MatchesSum)
# creeate interaction variables
make_interaction <- function(x1, x2, data=rdyads, separator="BY") {
interaction_variable_name <- paste0(x1, separator, x2)
data[ , interaction_variable_name] <- data[,x1] * data[,x2]
data
}
rdyads <- make_interaction('SkinCol', 'meanIAT_RefCountry')
|
/munge/01-rdyads.R
|
no_license
|
jeromyanglim/anglim-redcards-2014
|
R
| false
| false
| 2,919
|
r
|
# Example preprocessing script.
v <- list()
v$player_variables <- c("PlayerID", "Player", "Club", "League_Country", "LeagueID",
"SkinCol", "PhotoID", "PlayerHeightCM", "PlayerBirthd", "BirthD",
"BirthM", "BirthY", "PositionID", "Position", "Position_Detailed")
v$ref_variables <- c("RefereeID", "RefCountryID", "RCperRef", "TotMatPerRef", "meanIAT_RefCountry",
"nIAT", "seIAT", "meanExp_RefCountry", "nExp", "seExp")
v$iat_measures <- c("meanIAT_RefCountry", "nIAT", "seIAT", "meanExp_RefCountry", "nExp", "seExp")
# Fix variable codings
v$make_numeric <- c("BirthD", "meanIAT_RefCountry", "nIAT", "seIAT", "meanExp_RefCountry", "nExp", "seExp")
for (variable in v$make_numeric) {
rdyads[,variable] <- as.numeric(rdyads[,variable])
}
rdyads[rdyads$Position == "", 'Position'] <- NA
# Player and dyad variables
rdyads$SkinCol_4or5 <- as.numeric(rdyads$SkinCol == 4 | rdyads$SkinCol == 5) # binary coding
rdyads$SkinCol_0to1 <- (rdyads$SkinCol -1) / 4 # continuous and rescaled
# create player, referee, and referee country data frames
rplayers <- rdyads[!duplicated(rdyads$PlayerID), v$player_variables]
rrefs <- rdyads[!duplicated(rdyads$RefereeID), v$ref_variables]
rcountries <- rrefs[!duplicated(rrefs$RefCountryID), c('RefCountryID', v$iat_measures)]
# merge in totals to player
merge_sum <- function(variable='RedCards', id='PlayerID', data=rplayers, dyads=rdyads) {
x <- aggregate(dyads[, variable], list(dyads[, id]), sum)
newvar <- paste0(variable, 'Sum')
names(x) <- c(id, newvar)
merge(data,x, all.x=TRUE, sort=FALSE)
}
for (variable in c("Matches", "Goals", "YellowCards", "YellowRed", "RedCards")) {
rplayers <- merge_sum(variable, 'PlayerID', rplayers, rdyads)
rrefs <- merge_sum(variable, 'RefereeID', rrefs, rdyads)
rcountries <- merge_sum(variable, 'RefCountryID', rcountries, rdyads)
}
# create proportion variables
create_prop <- function(data=rdyads, variable='Goals', divisor='Matches') {
newvar <- paste0(variable, 'Prop')
data[,newvar] <- data[,variable] / data[,divisor]
data
}
for (variable in c('Goals', 'YellowCards', 'YellowRed', 'RedCards')) {
rdyads <- create_prop(rdyads, variable, 'Matches')
variable_sum <- paste0(variable, 'Sum')
rplayers <- create_prop(rplayers, variable_sum, 'MatchesSum')
rrefs <- create_prop(rrefs, variable_sum, 'MatchesSum')
rcountries <- create_prop(rcountries, variable_sum, 'MatchesSum')
}
# create modified predictors
rplayers$MatchesSum_log <- log(rplayers$MatchesSum)
rrefs$MatchesSum_log <- log(rrefs$MatchesSum)
# creeate interaction variables
make_interaction <- function(x1, x2, data=rdyads, separator="BY") {
interaction_variable_name <- paste0(x1, separator, x2)
data[ , interaction_variable_name] <- data[,x1] * data[,x2]
data
}
rdyads <- make_interaction('SkinCol', 'meanIAT_RefCountry')
|
## Our aim in this experiment is to write a pair of functions, namely,
## "makeCacheMatrix" and "cacheSolve" that cache the inverse of a matrix
## makeCacheMatrix is a function which creates a special "matrix" object that can
## cache its inverse for the input (which is an invertible square matrix)
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## cacheSolve is a function which computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve the
## inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached result")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
kemprateek003/ProgrammingAssignment2
|
R
| false
| false
| 1,094
|
r
|
## Our aim in this experiment is to write a pair of functions, namely,
## "makeCacheMatrix" and "cacheSolve" that cache the inverse of a matrix
## makeCacheMatrix is a function which creates a special "matrix" object that can
## cache its inverse for the input (which is an invertible square matrix)
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## cacheSolve is a function which computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve the
## inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached result")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nof1gen.summary.R
\name{frequency_plot}
\alias{frequency_plot}
\title{Frequency plot for raw data}
\usage{
frequency_plot(nof1, title = NULL, bins = 10)
}
\arguments{
\item{nof1}{nof1 object created using nof1.data}
\item{title}{The title of the figure}
\item{bins}{Used for continuous data. Specifies the number of bins. The
default value is 10.}
}
\description{
Frequency plot for raw data
}
\examples{
Y <- laughter$Y
Treat <- laughter$Treat
nof1 <- nof1.data(Y, Treat, ncat = 11, baseline = 'Usual Routine', response = 'ordinal')
frequency_plot(nof1)
}
|
/man/frequency_plot.Rd
|
no_license
|
arisp99/nof1gen
|
R
| false
| true
| 637
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nof1gen.summary.R
\name{frequency_plot}
\alias{frequency_plot}
\title{Frequency plot for raw data}
\usage{
frequency_plot(nof1, title = NULL, bins = 10)
}
\arguments{
\item{nof1}{nof1 object created using nof1.data}
\item{title}{The title of the figure}
\item{bins}{Used for continuous data. Specifies the number of bins. The
default value is 10.}
}
\description{
Frequency plot for raw data
}
\examples{
Y <- laughter$Y
Treat <- laughter$Treat
nof1 <- nof1.data(Y, Treat, ncat = 11, baseline = 'Usual Routine', response = 'ordinal')
frequency_plot(nof1)
}
|
d <- read.table("household_power_consumption.txt", header=TRUE,
sep=";", colClasses="character")
d$Date <- as.Date(d$Date, "%d/%m/%Y")
d$Global_active_power <- as.numeric(d$Global_active_power)
x <- subset(d, as.Date("01/02/2007", "%d/%m/%Y") <= Date & Date <= as.Date("02/02/2007", "%d/%m/%Y"))
png(filename="plot3.png")
time <- paste(as.character.Date(x$Date, "%Y-%m-%d"), x$Time)
time <- strptime(time, format="%Y-%m-%d %H:%M:%S")
plot(time, as.numeric(x$Sub_metering_1), type="l",
xlab="", ylab="Energy sub metering",
col="black")
lines(time, as.numeric(x$Sub_metering_2),
col="red")
lines(time, as.numeric(x$Sub_metering_3),
col="blue")
legend("topright", tail(names(x),3), col=c("black","red","blue"),
lty=1, bty="o")
dev.off()
|
/plot3.R
|
no_license
|
caofan/ExData_Plotting1
|
R
| false
| false
| 786
|
r
|
d <- read.table("household_power_consumption.txt", header=TRUE,
sep=";", colClasses="character")
d$Date <- as.Date(d$Date, "%d/%m/%Y")
d$Global_active_power <- as.numeric(d$Global_active_power)
x <- subset(d, as.Date("01/02/2007", "%d/%m/%Y") <= Date & Date <= as.Date("02/02/2007", "%d/%m/%Y"))
png(filename="plot3.png")
time <- paste(as.character.Date(x$Date, "%Y-%m-%d"), x$Time)
time <- strptime(time, format="%Y-%m-%d %H:%M:%S")
plot(time, as.numeric(x$Sub_metering_1), type="l",
xlab="", ylab="Energy sub metering",
col="black")
lines(time, as.numeric(x$Sub_metering_2),
col="red")
lines(time, as.numeric(x$Sub_metering_3),
col="blue")
legend("topright", tail(names(x),3), col=c("black","red","blue"),
lty=1, bty="o")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db.R
\name{get_gxg}
\alias{get_gxg}
\title{Get gene interactions}
\usage{
get_gxg(db, organism, flush)
}
\arguments{
\item{db}{String containing the database to obtain the gene-gene interactions
from. Possible values: 'biogrid', 'string'.}
\item{organism}{Tax ID of the studied organism. The default is 9606 (human).}
\item{flush}{Remove cached results? Boolean value.}
}
\value{
A data.frame with two columns with pairs of interacting proteins.
}
\description{
Wrapper for the different functions to get gene-gene
interactions. Supports cached results.
}
\keyword{internal}
|
/man/get_gxg.Rd
|
no_license
|
hclimente/martini
|
R
| false
| true
| 656
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db.R
\name{get_gxg}
\alias{get_gxg}
\title{Get gene interactions}
\usage{
get_gxg(db, organism, flush)
}
\arguments{
\item{db}{String containing the database to obtain the gene-gene interactions
from. Possible values: 'biogrid', 'string'.}
\item{organism}{Tax ID of the studied organism. The default is 9606 (human).}
\item{flush}{Remove cached results? Boolean value.}
}
\value{
A data.frame with two columns with pairs of interacting proteins.
}
\description{
Wrapper for the different functions to get gene-gene
interactions. Supports cached results.
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/centrality.R
\name{estimate_centrality_threshold}
\alias{estimate_centrality_threshold}
\title{estimate_centrality_threshold}
\usage{
estimate_centrality_threshold(graph, tolerance = 0.001)
}
\arguments{
\item{graph}{'data.frame' or equivalent object representing the network
graph (see Details)}
\item{tolerance}{Desired maximal degree of inaccuracy in centrality estimates
\itemize{
\item values will be accurate to within this amount, subject to a constant
scaling factor. Note that threshold values increase non-linearly with
decreasing values of 'tolerance'
}}
}
\value{
A single value for 'dist_threshold' giving the required tolerance.
}
\description{
Estimate a value for the 'dist_threshold' parameter of the
\link{dodgr_centrality} function. Providing distance thresholds to this
function generally provides considerably speed gains, and results in
approximations of centrality. This function enables the determination of
values of 'dist_threshold' corresponding to specific degrees of accuracy.
}
\note{
This function may take some time to execute. While running, it displays
ongoing information on screen of estimated values of 'dist_threshold' and
associated errors. Thresholds are progressively increased until the error is
reduced below the specified tolerance.
}
|
/fuzzedpackages/dodgr/man/estimate_centrality_threshold.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| true
| 1,358
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/centrality.R
\name{estimate_centrality_threshold}
\alias{estimate_centrality_threshold}
\title{estimate_centrality_threshold}
\usage{
estimate_centrality_threshold(graph, tolerance = 0.001)
}
\arguments{
\item{graph}{'data.frame' or equivalent object representing the network
graph (see Details)}
\item{tolerance}{Desired maximal degree of inaccuracy in centrality estimates
\itemize{
\item values will be accurate to within this amount, subject to a constant
scaling factor. Note that threshold values increase non-linearly with
decreasing values of 'tolerance'
}}
}
\value{
A single value for 'dist_threshold' giving the required tolerance.
}
\description{
Estimate a value for the 'dist_threshold' parameter of the
\link{dodgr_centrality} function. Providing distance thresholds to this
function generally provides considerably speed gains, and results in
approximations of centrality. This function enables the determination of
values of 'dist_threshold' corresponding to specific degrees of accuracy.
}
\note{
This function may take some time to execute. While running, it displays
ongoing information on screen of estimated values of 'dist_threshold' and
associated errors. Thresholds are progressively increased until the error is
reduced below the specified tolerance.
}
|
library(shiny)
library(quantmod)
getSymbols(c('QQQ', 'SPY', 'BND', 'GLD'), src = "yahoo",
from = '2008-01-03',
to = Sys.Date(),
auto.assign = TRUE)
shinyServer(
function(input, output) {
begin_date<- time(head(QQQ, 1))
end_date<- time(tail(QQQ, 1))
output$qqq_begin <- renderText({QQQ[begin_date]$QQQ.Close * input$QQQ})
output$qqq_end <- renderText({QQQ[end_date]$QQQ.Close * input$QQQ})
output$spy_begin <- renderText({ SPY[begin_date]$SPY.Close * input$SPY})
output$spy_end <- renderText({ SPY[end_date]$SPY.Close * input$SPY})
output$bnd_begin <- renderText({ BND[begin_date]$BND.Close * input$BND})
output$bnd_end <- renderText({ BND[end_date]$BND.Close * input$BND})
output$gld_begin <- renderText({ GLD[begin_date]$GLD.Close * input$GLD})
output$gld_end <- renderText({ GLD[end_date]$GLD.Close * input$GLD})
output$chart <- renderPlot({
candleChart(
(SPY$SPY.Close * input$SPY) +
(QQQ$QQQ.Close * input$QQQ) +
(BND$BND.Close * input$BND) +
(GLD$GLD.Close * input$GLD)
,name=paste("Performance since ",begin_date), theme="white")
if (input$addBBands){
addBBands()
}
})
}
)
|
/server.R
|
no_license
|
yzzheng/DevelopingDataProducts
|
R
| false
| false
| 1,292
|
r
|
library(shiny)
library(quantmod)
getSymbols(c('QQQ', 'SPY', 'BND', 'GLD'), src = "yahoo",
from = '2008-01-03',
to = Sys.Date(),
auto.assign = TRUE)
shinyServer(
function(input, output) {
begin_date<- time(head(QQQ, 1))
end_date<- time(tail(QQQ, 1))
output$qqq_begin <- renderText({QQQ[begin_date]$QQQ.Close * input$QQQ})
output$qqq_end <- renderText({QQQ[end_date]$QQQ.Close * input$QQQ})
output$spy_begin <- renderText({ SPY[begin_date]$SPY.Close * input$SPY})
output$spy_end <- renderText({ SPY[end_date]$SPY.Close * input$SPY})
output$bnd_begin <- renderText({ BND[begin_date]$BND.Close * input$BND})
output$bnd_end <- renderText({ BND[end_date]$BND.Close * input$BND})
output$gld_begin <- renderText({ GLD[begin_date]$GLD.Close * input$GLD})
output$gld_end <- renderText({ GLD[end_date]$GLD.Close * input$GLD})
output$chart <- renderPlot({
candleChart(
(SPY$SPY.Close * input$SPY) +
(QQQ$QQQ.Close * input$QQQ) +
(BND$BND.Close * input$BND) +
(GLD$GLD.Close * input$GLD)
,name=paste("Performance since ",begin_date), theme="white")
if (input$addBBands){
addBBands()
}
})
}
)
|
library(mongolite)
mongourl <- "mongodb://<USER>:<PASSWORD>@test..."
inventario <- mongo("inventario_arraysembdoc", url = mongourl)
inventario$find('{"instock": {"warehouse": "A", "qty": 5} }')
inventario$find('{"instock": {"qty": 5, "warehouse": "A" } }')
inventario$find('{"instock.qty": {"$lte": 20} }')
inventario$find('{"instock.0.qty": {"$lte": 20} }')
inventario$find('{"instock": {"$elemMatch": {"qty": 5, "warehouse": "A"} } }')
inventario$find('{"instock":{"$elemMatch": {"qty": {"$gt": 10, "$lt": 20} } } }')
inventario$find('{ "instock.qty": { "$gt": 10, "$lte": 20 } }')
inventario$find('{ "instock.qty": 5, "instock.warehouse": "A" }')
|
/scripts/11_consultandoinventarios3.R
|
no_license
|
maorjuela73/analytics-nosql
|
R
| false
| false
| 663
|
r
|
library(mongolite)
mongourl <- "mongodb://<USER>:<PASSWORD>@test..."
inventario <- mongo("inventario_arraysembdoc", url = mongourl)
inventario$find('{"instock": {"warehouse": "A", "qty": 5} }')
inventario$find('{"instock": {"qty": 5, "warehouse": "A" } }')
inventario$find('{"instock.qty": {"$lte": 20} }')
inventario$find('{"instock.0.qty": {"$lte": 20} }')
inventario$find('{"instock": {"$elemMatch": {"qty": 5, "warehouse": "A"} } }')
inventario$find('{"instock":{"$elemMatch": {"qty": {"$gt": 10, "$lt": 20} } } }')
inventario$find('{ "instock.qty": { "$gt": 10, "$lte": 20 } }')
inventario$find('{ "instock.qty": 5, "instock.warehouse": "A" }')
|
server <- function(input,output, session){
data <- reactive({
x <- df
})
output$distPlot <- renderPlot({
df <-data()
final_data = filter(df,year>=min(input$inputrange) & year<=max(input$inputrange))
qplot(final_data$year,geom="histogram",
alpha=I(.2),
fill=I("blue"),
col=I("black"),
xlab = "Año",
main = "Histograma of avistamientos")
})
output$map <- renderLeaflet({
spiderIcon = makeIcon("icon/spider-icon.png", 15, 15)
df <- data()
final_data = filter(df,year>=min(input$inputrange) & year<=max(input$inputrange))
m <- leaflet(data = final_data) %>%
addMiniMap()%>%
addScaleBar(options = list(imperial=FALSE))%>%
addTiles() %>%
addMarkers(lng = ~Longitude,
lat = ~Latitude,
icon = ~spiderIcon,
popup = paste("Name:", final_data$acceptedNameUsage, "<br>",
"Year:", final_data$year,"<br>", "Provincia:",final_data$stateProvince))
})
}
|
/server.R
|
no_license
|
thebooort/aracnomap
|
R
| false
| false
| 1,050
|
r
|
server <- function(input,output, session){
data <- reactive({
x <- df
})
output$distPlot <- renderPlot({
df <-data()
final_data = filter(df,year>=min(input$inputrange) & year<=max(input$inputrange))
qplot(final_data$year,geom="histogram",
alpha=I(.2),
fill=I("blue"),
col=I("black"),
xlab = "Año",
main = "Histograma of avistamientos")
})
output$map <- renderLeaflet({
spiderIcon = makeIcon("icon/spider-icon.png", 15, 15)
df <- data()
final_data = filter(df,year>=min(input$inputrange) & year<=max(input$inputrange))
m <- leaflet(data = final_data) %>%
addMiniMap()%>%
addScaleBar(options = list(imperial=FALSE))%>%
addTiles() %>%
addMarkers(lng = ~Longitude,
lat = ~Latitude,
icon = ~spiderIcon,
popup = paste("Name:", final_data$acceptedNameUsage, "<br>",
"Year:", final_data$year,"<br>", "Provincia:",final_data$stateProvince))
})
}
|
scalecurve<-function(x,y=NULL,xlab="p",ylab="A(p)",main="Scale curve",lwd=2,...)
{
#Inputs:
# x = Data matrix, where the rows are the observations and the columnns are the variables
# y = optional vector, either numeric or factor, indicating the class of each observation in x
# ... = graphical parameters (except xlab and ylab)
#Outputs:
# r = scale curve defined on previous grid; if y is provided, then r is a list with the scale curve of each class in each component
x<-as.matrix(x)
if (ncol(x)==1) x<-t(x)
if (length(y)==0)
{
n<-nrow(x);p<-ncol(x); # size of the data matrix
t<-1:p;
arg<-matrix(0,1,n); # dimension of the argument vector
arg[1]<-1/n; # first value of the vector arg
cont<-MBD(x,plotting=FALSE)$MBD # calculate the modified band depth of each observations within the sample
I<-order(cont); # order these observations from the one with lowest depth to the one with highest depth
xx<-x[I[n],]; # Deepest curve from the sample
r<-matrix(0,1,n); # inizialize the the vector r as zero
for (j in 2:n)
{
a<-x[I[n-j+1],]
xx<-rbind(xx,a) # curves that delimit the band
M <- apply(xx,2,max); m <- apply(xx,2,min) # area of the band
aa<-(t[p]-t[1])/(p-1);
sM <- ( aa*sum(M[2:p]) + aa*sum(M[1:(p-1)]))/2
sm <- (aa*sum(m[2:p]) + aa*sum(m[1:(p-1)]))/2
r[j]<-sM-sm # area of the band
arg[j]<-j/n; # proportion of curves that define the band
}
arg <- c(0,arg); r <- c(0,r)
plot(arg,r,ty="l",xlab=xlab,ylab=ylab,main=main,lwd=lwd,...)
}
else
{
y<-as.matrix(y)
n<-nrow(x);p<-ncol(x)
if (nrow(y)!=n) {stop("Length of y mismatches the dimension of x")}
y <- as.factor(y)
r <- list(); arg<- list()
t<-1:p;
mx <- 0
for (class in 1:length(levels(y)))
{
n <- sum(y==levels(y)[class])
xc <- x[y==levels(y)[class],]
arg[[class]]<-matrix(0,1,n) # dimension of the argument vector
arg[[class]][1]<-1/n # first value of the vector arg
cont<-MBD(xc, plotting=FALSE)$MBD # calculate the modified band depth of each observations within the sample
I<-order(cont) # order these observations from the one with lowest depth to the one with highest depth
xx<-xc[I[n],] # Deepest curve from the sample
r[[class]]<-matrix(0,1,n) # inizialize the the vector r as zero
for (j in 2:n)
{
a<-xc[I[n-j+1],]
xx<-rbind(xx,a) # curves that delimit the band
M <- apply(xx,2,max); m <- apply(xx,2,min); # area of the band
aa<-(t[p]-t[1])/(p-1);
sM <- ( aa*sum(M[2:p]) + aa*sum(M[1:(p-1)]))/2
sm <- (aa*sum(m[2:p]) + aa*sum(m[1:(p-1)]))/2
r[[class]][j]<-sM-sm;
arg[[class]][j]<-j/n; # proportion of curves that define the band
}
arg[[class]] <- c(0,arg[[class]]); r[[class]] <- c(0,r[[class]])
mx <- max(c(mx,r[[class]]))
} ## end FOR class
plot(seq(0,1,0.1),c(0,rep(mx,10)),ty="n",xlab=xlab,ylab=ylab,main=main,lwd=lwd,...)
for (class in 1:length(levels(y)))
{
lines(arg[[class]],r[[class]], lwd=2, lty=class)
} ### end FOR class
legend("topleft",legend=levels(y),lty=1:length(levels(y)),lwd=2)
} ## end ELSE
return(r)
}
|
/R/scalecurve.R
|
no_license
|
cran/depthTools
|
R
| false
| false
| 3,355
|
r
|
scalecurve<-function(x,y=NULL,xlab="p",ylab="A(p)",main="Scale curve",lwd=2,...)
{
#Inputs:
# x = Data matrix, where the rows are the observations and the columnns are the variables
# y = optional vector, either numeric or factor, indicating the class of each observation in x
# ... = graphical parameters (except xlab and ylab)
#Outputs:
# r = scale curve defined on previous grid; if y is provided, then r is a list with the scale curve of each class in each component
x<-as.matrix(x)
if (ncol(x)==1) x<-t(x)
if (length(y)==0)
{
n<-nrow(x);p<-ncol(x); # size of the data matrix
t<-1:p;
arg<-matrix(0,1,n); # dimension of the argument vector
arg[1]<-1/n; # first value of the vector arg
cont<-MBD(x,plotting=FALSE)$MBD # calculate the modified band depth of each observations within the sample
I<-order(cont); # order these observations from the one with lowest depth to the one with highest depth
xx<-x[I[n],]; # Deepest curve from the sample
r<-matrix(0,1,n); # inizialize the the vector r as zero
for (j in 2:n)
{
a<-x[I[n-j+1],]
xx<-rbind(xx,a) # curves that delimit the band
M <- apply(xx,2,max); m <- apply(xx,2,min) # area of the band
aa<-(t[p]-t[1])/(p-1);
sM <- ( aa*sum(M[2:p]) + aa*sum(M[1:(p-1)]))/2
sm <- (aa*sum(m[2:p]) + aa*sum(m[1:(p-1)]))/2
r[j]<-sM-sm # area of the band
arg[j]<-j/n; # proportion of curves that define the band
}
arg <- c(0,arg); r <- c(0,r)
plot(arg,r,ty="l",xlab=xlab,ylab=ylab,main=main,lwd=lwd,...)
}
else
{
y<-as.matrix(y)
n<-nrow(x);p<-ncol(x)
if (nrow(y)!=n) {stop("Length of y mismatches the dimension of x")}
y <- as.factor(y)
r <- list(); arg<- list()
t<-1:p;
mx <- 0
for (class in 1:length(levels(y)))
{
n <- sum(y==levels(y)[class])
xc <- x[y==levels(y)[class],]
arg[[class]]<-matrix(0,1,n) # dimension of the argument vector
arg[[class]][1]<-1/n # first value of the vector arg
cont<-MBD(xc, plotting=FALSE)$MBD # calculate the modified band depth of each observations within the sample
I<-order(cont) # order these observations from the one with lowest depth to the one with highest depth
xx<-xc[I[n],] # Deepest curve from the sample
r[[class]]<-matrix(0,1,n) # inizialize the the vector r as zero
for (j in 2:n)
{
a<-xc[I[n-j+1],]
xx<-rbind(xx,a) # curves that delimit the band
M <- apply(xx,2,max); m <- apply(xx,2,min); # area of the band
aa<-(t[p]-t[1])/(p-1);
sM <- ( aa*sum(M[2:p]) + aa*sum(M[1:(p-1)]))/2
sm <- (aa*sum(m[2:p]) + aa*sum(m[1:(p-1)]))/2
r[[class]][j]<-sM-sm;
arg[[class]][j]<-j/n; # proportion of curves that define the band
}
arg[[class]] <- c(0,arg[[class]]); r[[class]] <- c(0,r[[class]])
mx <- max(c(mx,r[[class]]))
} ## end FOR class
plot(seq(0,1,0.1),c(0,rep(mx,10)),ty="n",xlab=xlab,ylab=ylab,main=main,lwd=lwd,...)
for (class in 1:length(levels(y)))
{
lines(arg[[class]],r[[class]], lwd=2, lty=class)
} ### end FOR class
legend("topleft",legend=levels(y),lty=1:length(levels(y)),lwd=2)
} ## end ELSE
return(r)
}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% RspShSourceCode.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{RspShSourceCode}
\docType{class}
\alias{RspShSourceCode}
\title{The RspShSourceCode class}
\description{
Package: R.rsp \cr
\bold{Class RspShSourceCode}\cr
\code{character}\cr
\code{~~|}\cr
\code{~~+--}\code{\link[R.rsp]{RspObject}}\cr
\code{~~~~~~~|}\cr
\code{~~~~~~~+--}\code{\link[R.rsp]{RspProduct}}\cr
\code{~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~+--}\code{\link[R.rsp]{RspSourceCode}}\cr
\code{~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~+--}\emph{\code{RspShSourceCode}}\cr
\bold{Directly known subclasses:}\cr
\cr
public abstract class \bold{RspShSourceCode}\cr
extends \emph{\link[R.rsp]{RspSourceCode}}\cr
An RspShSourceCode object is an \code{\link{RspSourceCode}} holding shell code.
}
\usage{
RspShSourceCode(...)
}
\arguments{
\item{...}{\code{\link[base]{character}} strings.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\tabular{rll}{
\tab \code{\link[R.rsp:evaluate.RspShSourceCode]{evaluate}} \tab Evaluates the shell (sh) code.\cr
\tab \code{findProcessor} \tab -\cr
\tab \code{rcat} \tab -\cr
\tab \code{rfile} \tab -\cr
}
\bold{Methods inherited from RspSourceCode}:\cr
evaluate, print, rstring, tangle, tidy
\bold{Methods inherited from RspProduct}:\cr
!, findProcessor, getType, hasProcessor, print, process, view
\bold{Methods inherited from RspObject}:\cr
print
\bold{Methods inherited from character}:\cr
Ops,nonStructure,vector-method, Ops,structure,vector-method, Ops,vector,nonStructure-method, Ops,vector,structure-method, all.equal, as.Date, as.POSIXlt, as.data.frame, as.raster, coerce,ANY,character-method, coerce,character,SuperClassMethod-method, coerce,character,signature-method, coerce<-,ObjectsWithPackage,character-method, coerce<-,signature,character-method, downloadFile, formula, getDLLRegisteredRoutines, isOpen, toAsciiRegExprPattern, toFileListTree, toLatex, uses
}
\author{Henrik Bengtsson}
\keyword{classes}
\keyword{internal}
|
/man/RspShSourceCode.Rd
|
no_license
|
HenrikBengtsson/R.rsp
|
R
| false
| false
| 2,222
|
rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% RspShSourceCode.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{RspShSourceCode}
\docType{class}
\alias{RspShSourceCode}
\title{The RspShSourceCode class}
\description{
Package: R.rsp \cr
\bold{Class RspShSourceCode}\cr
\code{character}\cr
\code{~~|}\cr
\code{~~+--}\code{\link[R.rsp]{RspObject}}\cr
\code{~~~~~~~|}\cr
\code{~~~~~~~+--}\code{\link[R.rsp]{RspProduct}}\cr
\code{~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~+--}\code{\link[R.rsp]{RspSourceCode}}\cr
\code{~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~+--}\emph{\code{RspShSourceCode}}\cr
\bold{Directly known subclasses:}\cr
\cr
public abstract class \bold{RspShSourceCode}\cr
extends \emph{\link[R.rsp]{RspSourceCode}}\cr
An RspShSourceCode object is an \code{\link{RspSourceCode}} holding shell code.
}
\usage{
RspShSourceCode(...)
}
\arguments{
\item{...}{\code{\link[base]{character}} strings.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\tabular{rll}{
\tab \code{\link[R.rsp:evaluate.RspShSourceCode]{evaluate}} \tab Evaluates the shell (sh) code.\cr
\tab \code{findProcessor} \tab -\cr
\tab \code{rcat} \tab -\cr
\tab \code{rfile} \tab -\cr
}
\bold{Methods inherited from RspSourceCode}:\cr
evaluate, print, rstring, tangle, tidy
\bold{Methods inherited from RspProduct}:\cr
!, findProcessor, getType, hasProcessor, print, process, view
\bold{Methods inherited from RspObject}:\cr
print
\bold{Methods inherited from character}:\cr
Ops,nonStructure,vector-method, Ops,structure,vector-method, Ops,vector,nonStructure-method, Ops,vector,structure-method, all.equal, as.Date, as.POSIXlt, as.data.frame, as.raster, coerce,ANY,character-method, coerce,character,SuperClassMethod-method, coerce,character,signature-method, coerce<-,ObjectsWithPackage,character-method, coerce<-,signature,character-method, downloadFile, formula, getDLLRegisteredRoutines, isOpen, toAsciiRegExprPattern, toFileListTree, toLatex, uses
}
\author{Henrik Bengtsson}
\keyword{classes}
\keyword{internal}
|
getwd()
#setwd("C:/Users/Hape/Documents/Biohackathon")
library(readxl)
library(dplyr)
data <- read_excel("Excel interventions.xlsx", sheet=1)
##add additional countries
vector_new_country <- c("Netherlands", NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA )
vector_new_country2 <- c("United_States", NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA )
vector_new_country3 <- c("Greece", NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA )
#bind new country vectors to dataframe
data <- rbind(data, vector_new_country)
data <- rbind(data, vector_new_country2)
data <- rbind(data, vector_new_country3)
data$Country
#arrange in alphabetical order
data <- data %>%
arrange(Country)
data$Country
##rename columns
names(data)[names(data)=="sport"] <- "sport_begin"
#begin of lockdown schools, universities and child care
class(data$schools_universities)
names(data)[names(data)=="schools_universities"] <- "school_close_begin"
##correct dates
#correct dates of school closings (Date is the Date from when restriction is in place)
data$school_close_begin[data$Country=="Germany"] <- "2020-03-16"
data$school_close_begin[data$Country=="Greece"] <- "2020-03-10"
data$school_close_begin[data$Country=="Netherlands"] <- "2020-03-16"
data$school_close_begin[data$Country=="France"] <- "2020-03-16"
#correct dates of public events
data$public_events[data$Country=="Germany"] <- "2020-03-13"
#add dates of planned stop/when restriction stopped or ar loosened, maybe something to consider lateron
#data$schools_universities_close_stop[data$Country=="Germany"] <- "2020-04-20"
#data$schools_universities_close_stop[data$Country=="Netherlands"] <- "2020-05-03"
##define new vectors for restaurant close-down and planned stop
#define vector restaurants close
vector_restaurants_begin <- c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
#define vector border closed
vector_border_close <- c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
#define type border close
vector_border_type <- c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
#land border control
vector_border_control <- c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
#flight restriction incoming passenger
vector_flight_rest <- c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
#add vectors to dataframe
data$rest_begin <- vector_restaurants_begin
data$border_close <- vector_border_close
data$border_type <- vector_border_type
data$border_control <- vector_border_control
data$flight_rest <- vector_flight_rest
##fill empty vectors
#restaurant close begin
##no close of restaurants in Sweden as of 24th of March
##apparently no full closer in Norway, open when they can ensure that customers are 1m apart
data$rest_begin[data$Country=="Austria"] <- "2020-03-16"
data$rest_begin[data$Country=="Belgium"] <- "2020-03-16"
data$rest_begin[data$Country=="Denmark"] <- "2020-03-18"
data$rest_begin[data$Country=="France"] <- "2020-03-15"
data$rest_begin[data$Country=="Germany"] <- "2020-03-23"
data$rest_begin[data$Country=="Greece"] <- "2020-03-13"
data$rest_begin[data$Country=="Italy"] <- "2020-03-12"
data$rest_begin[data$Country=="Netherlands"] <- "2020-03-16"
data$rest_begin[data$Country=="Spain"] <- "2020-03-15"
data$rest_begin[data$Country=="Switzerland"] <- "2020-03-17"
data$rest_begin[data$Country=="United_Kingdom"] <- "2020-03-21"
#sports begin
data$sport_begin[data$Country=="Netherlands"] <- "2020-03-12"
data$sport_begin[data$Country=="Belgium"] <- "2020-03-16"
data$sport_begin[data$Country=="Germany"] <- "2020-03-16"
data$sport_begin[data$Country=="Greece"] <- "2020-03-12"
data$sport_begin[data$Country=="United_Kingdom"] <- "2020-03-21"
##need to add rest of countries
#self-isolating if ill
data$self_isolating_if_ill[data$Country=="Netherlands"] <- "2020-03-12"
data$self_isolating_if_ill[data$Country=="United_States"] <- "2020-02-29"
#need to add: United_States
#public events
data$public_events[data$Country=="Greece"] <- "2020-03-13"
data$public_events[data$Country=="Netherlands"] <- "2020-03-13"
#lockdown
## none in the Netherlands
## different restrictions for each state in the US
data$lockdown[data$Country=="Greece"] <- "2020-03-23"
#border close
## none in the UK
## none in Sweden as 14th of March
## none in Italy --> surrounding countries introduced them
data$border_close[data$Country=="Austria"] <- "2020-03-11"
data$border_close[data$Country=="Belgium"] <- "2020-03-21"
data$border_close[data$Country=="Denmark"] <- "2020-03-14"
data$border_close[data$Country=="France"] <- "2020-03-17"
data$border_close[data$Country=="Germany"] <- "2020-03-16"
data$border_close[data$Country=="Greece"] <- "2020-03-16"
data$border_close[data$Country=="Netherlands"] <- "2020-03-20"
data$border_close[data$Country=="Norway"] <- "2020-03-16"
data$border_close[data$Country=="Spain"] <- "2020-03-17"
data$border_close[data$Country=="Switzerland"] <- "2020-03-25"
data$border_close[data$Country=="United_States"] <- "2020-03-12"
#border type
## full: only citizens from country may travel
## partly: not all boarders closed or certain regulations in place such as non-essential travel is forbidden
## none: no true restrictions, maybe only advice for self-quarantine
data$border_type[data$Country=="Austria"]<-"partly"
data$border_type[data$Country=="Belgium"]<-"partly"
data$border_type[data$Country=="Denmark"]<- "full"
data$border_type[data$Country=="France"] <- "partly"
data$border_type[data$Country=="Germany"]<-"partly"
data$border_type[data$Country=="Greece"]<-"full"
data$border_type[data$Country=="Netherlands"] <- "partly"
data$border_type[data$Country=="Norway"]<- "full"
data$border_type[data$Country=="Spain"]<- "full"
data$border_type[data$Country=="Switzerland"]<- "full"
data$border_type[data$Country=="Sweden"]<- "none"
data$border_type[data$Country=="United_Kingdom"] <- "none"
data$border_type[data$Country=="United_States"] <- "partly"
#travel restrictions
data$travel_restrictions[data$Country=="Greece"] <- "18.03.2020"
data$travel_restrictions[data$Country=="Netherlands"] <- "17.03.2020"
data$travel_restrictions[data$Country=="United_States"] <- "19.03.2020"
#land border control
##Netherlands none as of 18th of March 2020
##apparently none in Sweden
data$border_control[data$Country=="Austria"] <- "2020-03-11"
data$border_control[data$Country=="Belgium"] <- "2020-03-21"
data$border_control[data$Country=="Denmark"] <- "2020-03-14"
data$border_control[data$Country=="France"] <- "2020-03-16"
data$border_control[data$Country=="Germany"] <- "2020-03-16"
data$border_control[data$Country=="Greece"] <- "2020-03-16"
data$border_control[data$Country=="Italy"] <- "2020-03-11"
data$border_control[data$Country=="Norway"] <- "2020-03-16"
data$border_control[data$Country=="Spain"] <- "2020-03-17"
data$border_control[data$Country=="Switzerland"] <- "2020-03-13"
data$border_control[data$Country=="United_Kingdom"] <- "2020-03-25"
data$border_control[data$Country=="United_States"] <- "2020-03-19"
#flight restrictions for incoming passengers
##apparently none in UK?
data$flight_rest[data$Country=="Austria"] <- "2020-03-17"
data$flight_rest[data$Country=="Belgium"] <- "2020-03-21"
data$flight_rest[data$Country=="Denmark"] <- "2020-03-14"
data$flight_rest[data$Country=="France"] <- "2020-03-17"
data$flight_rest[data$Country=="Germany"] <- "2020-03-17"
data$flight_rest[data$Country=="Greece"] <- "2020-03-17"
data$flight_rest[data$Country=="Italy"] <- "2020-01-31"
data$flight_rest[data$Country=="Netherlands"] <- "2020-03-17"
data$flight_rest[data$Country=="Norway"] <- "2020-03-16"
data$flight_rest[data$Country=="Spain"] <- "2020-03-17"
data$flight_rest[data$Country=="Switzerland"] <- "2020-03-17"
data$flight_rest[data$Country=="Sweden"] <- "2020-03-17"
data$flight_rest[data$Country=="United_States"] <- "2020-03-13"
##remove empty or unnecessary columns
data <- data %>%
select(Country, school_close_begin, travel_restrictions, public_events, sport_begin,
lockdown, social_distancing_encouraged, self_isolating_if_ill,
rest_begin, border_close, border_type, flight_rest, border_control)
##change vars to as.Date
class(data$travel_restrictions)
class(data$school_close_begin)
data$travel_restrictions <- as.Date(data$travel_restrictions, format = "%d.%m.%Y")
data$school_close_begin <- as.Date(data$school_close_begin, format = "%Y-%m-%d")
data$public_events <- as.Date(data$public_events, format = "%Y-%m-%d")
data$sport_begin <- as.Date(data$sport_begin, format = "%Y-%m-%d")
data$lockdown <- as.Date(data$lockdown, format = "%Y-%m-%d")
data$social_distancing_encouraged <- as.Date(data$social_distancing_encouraged, format = "%Y-%m-%d")
data$self_isolating_if_ill <- as.Date(data$self_isolating_if_ill, format = "%Y-%m-%d")
data$rest_begin <- as.Date(data$rest_begin, format = "%Y-%m-%d")
data$border_close <- as.Date(data$border_close, format = "%Y-%m-%d")
data$border_control <- as.Date(data$border_control, format = "%Y-%m-%d")
data$flight_rest<- as.Date(data$flight_rest, format = "%Y-%m-%d")
#write data to csv file
write.table(data, file="C:/Users/Hape/Documents/Biohackathon/policies_corona.csv", col.names = TRUE,
sep=",")
#write data to txt file
write.table(data, file="C:/Users/Hape/Documents/Biohackathon/policies_corona.txt", col.names = TRUE,
sep=",")
#**************************
##make extra file for US states
##single states of the US
#restaurants close
data$rest_begin[data$Country=="Minnesota"] <- "2020-03-16"
#sports close
data$sport_begin[data$Country=="Minnesota"] <- "2020-03-16"
#public events
data$public_events[data$Country=="Minnesota"] <- "2020-03-13"
#schools closed (only schools, not universities or child care)
data$school_close_begin[data$Country=="Oregon"] <- "2020-03-13"
data$school_close_begin[data$Country=="Pennsylvenia"] <- "2020-03-13"
data$school_close_begin[data$Country=="Illinois"] <- "2020-03-17"
data$school_close_begin[data$Country=="West_Virginia"] <- "2020-03-16"
data$school_close_begin[data$Country=="South_Dakota"] <- "2020-03-16" ##reopen 20th of March
#universities closed
#data$uni_close[data$Country=="Hawai"] <- "2020-03-16"
|
/Policies Europe/biohack_07042020.R
|
no_license
|
covid19-bh-biostats/data
|
R
| false
| false
| 10,379
|
r
|
getwd()
#setwd("C:/Users/Hape/Documents/Biohackathon")
library(readxl)
library(dplyr)
data <- read_excel("Excel interventions.xlsx", sheet=1)
##add additional countries
vector_new_country <- c("Netherlands", NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA )
vector_new_country2 <- c("United_States", NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA )
vector_new_country3 <- c("Greece", NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA )
#bind new country vectors to dataframe
data <- rbind(data, vector_new_country)
data <- rbind(data, vector_new_country2)
data <- rbind(data, vector_new_country3)
data$Country
#arrange in alphabetical order
data <- data %>%
arrange(Country)
data$Country
##rename columns
names(data)[names(data)=="sport"] <- "sport_begin"
#begin of lockdown schools, universities and child care
class(data$schools_universities)
names(data)[names(data)=="schools_universities"] <- "school_close_begin"
##correct dates
#correct dates of school closings (Date is the Date from when restriction is in place)
data$school_close_begin[data$Country=="Germany"] <- "2020-03-16"
data$school_close_begin[data$Country=="Greece"] <- "2020-03-10"
data$school_close_begin[data$Country=="Netherlands"] <- "2020-03-16"
data$school_close_begin[data$Country=="France"] <- "2020-03-16"
#correct dates of public events
data$public_events[data$Country=="Germany"] <- "2020-03-13"
#add dates of planned stop/when restriction stopped or ar loosened, maybe something to consider lateron
#data$schools_universities_close_stop[data$Country=="Germany"] <- "2020-04-20"
#data$schools_universities_close_stop[data$Country=="Netherlands"] <- "2020-05-03"
##define new vectors for restaurant close-down and planned stop
#define vector restaurants close
vector_restaurants_begin <- c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
#define vector border closed
vector_border_close <- c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
#define type border close
vector_border_type <- c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
#land border control
vector_border_control <- c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
#flight restriction incoming passenger
vector_flight_rest <- c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
#add vectors to dataframe
data$rest_begin <- vector_restaurants_begin
data$border_close <- vector_border_close
data$border_type <- vector_border_type
data$border_control <- vector_border_control
data$flight_rest <- vector_flight_rest
##fill empty vectors
#restaurant close begin
##no close of restaurants in Sweden as of 24th of March
##apparently no full closer in Norway, open when they can ensure that customers are 1m apart
data$rest_begin[data$Country=="Austria"] <- "2020-03-16"
data$rest_begin[data$Country=="Belgium"] <- "2020-03-16"
data$rest_begin[data$Country=="Denmark"] <- "2020-03-18"
data$rest_begin[data$Country=="France"] <- "2020-03-15"
data$rest_begin[data$Country=="Germany"] <- "2020-03-23"
data$rest_begin[data$Country=="Greece"] <- "2020-03-13"
data$rest_begin[data$Country=="Italy"] <- "2020-03-12"
data$rest_begin[data$Country=="Netherlands"] <- "2020-03-16"
data$rest_begin[data$Country=="Spain"] <- "2020-03-15"
data$rest_begin[data$Country=="Switzerland"] <- "2020-03-17"
data$rest_begin[data$Country=="United_Kingdom"] <- "2020-03-21"
#sports begin
data$sport_begin[data$Country=="Netherlands"] <- "2020-03-12"
data$sport_begin[data$Country=="Belgium"] <- "2020-03-16"
data$sport_begin[data$Country=="Germany"] <- "2020-03-16"
data$sport_begin[data$Country=="Greece"] <- "2020-03-12"
data$sport_begin[data$Country=="United_Kingdom"] <- "2020-03-21"
##need to add rest of countries
#self-isolating if ill
data$self_isolating_if_ill[data$Country=="Netherlands"] <- "2020-03-12"
data$self_isolating_if_ill[data$Country=="United_States"] <- "2020-02-29"
#need to add: United_States
#public events
data$public_events[data$Country=="Greece"] <- "2020-03-13"
data$public_events[data$Country=="Netherlands"] <- "2020-03-13"
#lockdown
## none in the Netherlands
## different restrictions for each state in the US
data$lockdown[data$Country=="Greece"] <- "2020-03-23"
#border close
## none in the UK
## none in Sweden as 14th of March
## none in Italy --> surrounding countries introduced them
data$border_close[data$Country=="Austria"] <- "2020-03-11"
data$border_close[data$Country=="Belgium"] <- "2020-03-21"
data$border_close[data$Country=="Denmark"] <- "2020-03-14"
data$border_close[data$Country=="France"] <- "2020-03-17"
data$border_close[data$Country=="Germany"] <- "2020-03-16"
data$border_close[data$Country=="Greece"] <- "2020-03-16"
data$border_close[data$Country=="Netherlands"] <- "2020-03-20"
data$border_close[data$Country=="Norway"] <- "2020-03-16"
data$border_close[data$Country=="Spain"] <- "2020-03-17"
data$border_close[data$Country=="Switzerland"] <- "2020-03-25"
data$border_close[data$Country=="United_States"] <- "2020-03-12"
#border type
## full: only citizens from country may travel
## partly: not all boarders closed or certain regulations in place such as non-essential travel is forbidden
## none: no true restrictions, maybe only advice for self-quarantine
data$border_type[data$Country=="Austria"]<-"partly"
data$border_type[data$Country=="Belgium"]<-"partly"
data$border_type[data$Country=="Denmark"]<- "full"
data$border_type[data$Country=="France"] <- "partly"
data$border_type[data$Country=="Germany"]<-"partly"
data$border_type[data$Country=="Greece"]<-"full"
data$border_type[data$Country=="Netherlands"] <- "partly"
data$border_type[data$Country=="Norway"]<- "full"
data$border_type[data$Country=="Spain"]<- "full"
data$border_type[data$Country=="Switzerland"]<- "full"
data$border_type[data$Country=="Sweden"]<- "none"
data$border_type[data$Country=="United_Kingdom"] <- "none"
data$border_type[data$Country=="United_States"] <- "partly"
#travel restrictions
data$travel_restrictions[data$Country=="Greece"] <- "18.03.2020"
data$travel_restrictions[data$Country=="Netherlands"] <- "17.03.2020"
data$travel_restrictions[data$Country=="United_States"] <- "19.03.2020"
#land border control
##Netherlands none as of 18th of March 2020
##apparently none in Sweden
data$border_control[data$Country=="Austria"] <- "2020-03-11"
data$border_control[data$Country=="Belgium"] <- "2020-03-21"
data$border_control[data$Country=="Denmark"] <- "2020-03-14"
data$border_control[data$Country=="France"] <- "2020-03-16"
data$border_control[data$Country=="Germany"] <- "2020-03-16"
data$border_control[data$Country=="Greece"] <- "2020-03-16"
data$border_control[data$Country=="Italy"] <- "2020-03-11"
data$border_control[data$Country=="Norway"] <- "2020-03-16"
data$border_control[data$Country=="Spain"] <- "2020-03-17"
data$border_control[data$Country=="Switzerland"] <- "2020-03-13"
data$border_control[data$Country=="United_Kingdom"] <- "2020-03-25"
data$border_control[data$Country=="United_States"] <- "2020-03-19"
#flight restrictions for incoming passengers
##apparently none in UK?
data$flight_rest[data$Country=="Austria"] <- "2020-03-17"
data$flight_rest[data$Country=="Belgium"] <- "2020-03-21"
data$flight_rest[data$Country=="Denmark"] <- "2020-03-14"
data$flight_rest[data$Country=="France"] <- "2020-03-17"
data$flight_rest[data$Country=="Germany"] <- "2020-03-17"
data$flight_rest[data$Country=="Greece"] <- "2020-03-17"
data$flight_rest[data$Country=="Italy"] <- "2020-01-31"
data$flight_rest[data$Country=="Netherlands"] <- "2020-03-17"
data$flight_rest[data$Country=="Norway"] <- "2020-03-16"
data$flight_rest[data$Country=="Spain"] <- "2020-03-17"
data$flight_rest[data$Country=="Switzerland"] <- "2020-03-17"
data$flight_rest[data$Country=="Sweden"] <- "2020-03-17"
data$flight_rest[data$Country=="United_States"] <- "2020-03-13"
##remove empty or unnecessary columns
data <- data %>%
select(Country, school_close_begin, travel_restrictions, public_events, sport_begin,
lockdown, social_distancing_encouraged, self_isolating_if_ill,
rest_begin, border_close, border_type, flight_rest, border_control)
##change vars to as.Date
class(data$travel_restrictions)
class(data$school_close_begin)
data$travel_restrictions <- as.Date(data$travel_restrictions, format = "%d.%m.%Y")
data$school_close_begin <- as.Date(data$school_close_begin, format = "%Y-%m-%d")
data$public_events <- as.Date(data$public_events, format = "%Y-%m-%d")
data$sport_begin <- as.Date(data$sport_begin, format = "%Y-%m-%d")
data$lockdown <- as.Date(data$lockdown, format = "%Y-%m-%d")
data$social_distancing_encouraged <- as.Date(data$social_distancing_encouraged, format = "%Y-%m-%d")
data$self_isolating_if_ill <- as.Date(data$self_isolating_if_ill, format = "%Y-%m-%d")
data$rest_begin <- as.Date(data$rest_begin, format = "%Y-%m-%d")
data$border_close <- as.Date(data$border_close, format = "%Y-%m-%d")
data$border_control <- as.Date(data$border_control, format = "%Y-%m-%d")
data$flight_rest<- as.Date(data$flight_rest, format = "%Y-%m-%d")
#write data to csv file
write.table(data, file="C:/Users/Hape/Documents/Biohackathon/policies_corona.csv", col.names = TRUE,
sep=",")
#write data to txt file
write.table(data, file="C:/Users/Hape/Documents/Biohackathon/policies_corona.txt", col.names = TRUE,
sep=",")
#**************************
##make extra file for US states
##single states of the US
#restaurants close
data$rest_begin[data$Country=="Minnesota"] <- "2020-03-16"
#sports close
data$sport_begin[data$Country=="Minnesota"] <- "2020-03-16"
#public events
data$public_events[data$Country=="Minnesota"] <- "2020-03-13"
#schools closed (only schools, not universities or child care)
data$school_close_begin[data$Country=="Oregon"] <- "2020-03-13"
data$school_close_begin[data$Country=="Pennsylvenia"] <- "2020-03-13"
data$school_close_begin[data$Country=="Illinois"] <- "2020-03-17"
data$school_close_begin[data$Country=="West_Virginia"] <- "2020-03-16"
data$school_close_begin[data$Country=="South_Dakota"] <- "2020-03-16" ##reopen 20th of March
#universities closed
#data$uni_close[data$Country=="Hawai"] <- "2020-03-16"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/updates.R
\name{VariableUpdate}
\alias{VariableUpdate}
\title{Class: VariableUpdate
Describes an update to a variable}
\description{
Class: VariableUpdate
Describes an update to a variable
Class: VariableUpdate
Describes an update to a variable
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{individual, }}{the individual to update}
\item{\code{variable, }}{the variable to to update}
\item{\code{value, }}{the value to update the variable with}
\item{\code{index, }}{the index of the variable to update}
\item{\code{type, }}{a helper field for the cpp implementation}
}
\if{html}{\out{</div>}}
}
\section{Active bindings}{
\if{html}{\out{<div class="r6-active-bindings">}}
\describe{
\item{\code{individual, }}{the individual to update}
\item{\code{variable, }}{the variable to to update}
\item{\code{value, }}{the value to update the variable with}
\item{\code{index, }}{the index of the variable to update}
\item{\code{type, }}{a helper field for the cpp implementation}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{VariableUpdate$new()}}
\item \href{#method-clone}{\code{VariableUpdate$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\subsection{Method \code{new()}}{
Create a new VariableUpdate descriptor. There are 4 types of variable
Update:
1. Subset update. The index vector represents a subset of the variable to
update. The value vector, of the same size, represents the new values for
that subset
2. Subset fill. The index vector represents a subset of the variable to
update. The value vector, of size 1, will fill the specified subset
3. Variable reset. The index vector is set to `NULL` and the value vector
replaces all of the current values in the simulation. The value vector is
should match the size of the population.
4. Variable fill. The index vector is set to `NULL` and the value vector,
of size 1, is used to fill all of the variable values in the population.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VariableUpdate$new(individual, variable, value, index = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{individual}}{is the type of individual to update}
\item{\code{variable}}{a Variable object representing the variable to change}
\item{\code{value}}{a vector or scalar of values to assign at the index}
\item{\code{index}}{is the index at which to apply the change, use NULL for the
fill options}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VariableUpdate$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
/man/VariableUpdate.Rd
|
permissive
|
kant/individual
|
R
| false
| true
| 3,135
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/updates.R
\name{VariableUpdate}
\alias{VariableUpdate}
\title{Class: VariableUpdate
Describes an update to a variable}
\description{
Class: VariableUpdate
Describes an update to a variable
Class: VariableUpdate
Describes an update to a variable
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{individual, }}{the individual to update}
\item{\code{variable, }}{the variable to to update}
\item{\code{value, }}{the value to update the variable with}
\item{\code{index, }}{the index of the variable to update}
\item{\code{type, }}{a helper field for the cpp implementation}
}
\if{html}{\out{</div>}}
}
\section{Active bindings}{
\if{html}{\out{<div class="r6-active-bindings">}}
\describe{
\item{\code{individual, }}{the individual to update}
\item{\code{variable, }}{the variable to to update}
\item{\code{value, }}{the value to update the variable with}
\item{\code{index, }}{the index of the variable to update}
\item{\code{type, }}{a helper field for the cpp implementation}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{VariableUpdate$new()}}
\item \href{#method-clone}{\code{VariableUpdate$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\subsection{Method \code{new()}}{
Create a new VariableUpdate descriptor. There are 4 types of variable
Update:
1. Subset update. The index vector represents a subset of the variable to
update. The value vector, of the same size, represents the new values for
that subset
2. Subset fill. The index vector represents a subset of the variable to
update. The value vector, of size 1, will fill the specified subset
3. Variable reset. The index vector is set to `NULL` and the value vector
replaces all of the current values in the simulation. The value vector is
should match the size of the population.
4. Variable fill. The index vector is set to `NULL` and the value vector,
of size 1, is used to fill all of the variable values in the population.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VariableUpdate$new(individual, variable, value, index = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{individual}}{is the type of individual to update}
\item{\code{variable}}{a Variable object representing the variable to change}
\item{\code{value}}{a vector or scalar of values to assign at the index}
\item{\code{index}}{is the index at which to apply the change, use NULL for the
fill options}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VariableUpdate$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
test_that("helloWorld", {
expect_output(helloWorld(), "Hello user of KeOps")
})
|
/rkeops/tests/testthat/test-1_starting.R
|
permissive
|
getkeops/keops
|
R
| false
| false
| 84
|
r
|
test_that("helloWorld", {
expect_output(helloWorld(), "Hello user of KeOps")
})
|
res <- tryCatch(
{
a <- 1
b <- 2
c <- a+s
b
},
error = function(err){
paste("error", sep='')
}
)
|
/R/tryCatch.R
|
no_license
|
dipeshjoshi/MachineLearning
|
R
| false
| false
| 128
|
r
|
res <- tryCatch(
{
a <- 1
b <- 2
c <- a+s
b
},
error = function(err){
paste("error", sep='')
}
)
|
bookdown_file = function(...) {
system.file(..., package = 'bookdown', mustWork = TRUE)
}
# find the y[j] closest to x[i] with y[j] > x[i]
next_nearest = function(x, y) {
n = length(x); m = length(y); z = integer(n)
for (i in seq_len(n)) {
for (j in seq_len(m)) {
if (y[j] > x[i]) {
z[i] = y[j]
break
}
}
}
z
}
# change the filename extension
with_ext = function(x, ext) {
n1 = length(x); n2 = length(ext); r = '([.][[:alnum:]]+)?$'
if (n1 * n2 == 0) return(x)
i = !(grepl('^[.]', ext) | ext == '')
ext[i] = paste0('.', ext[i])
if (all(ext == '')) ext = ''
if (length(ext) == 1) return(sub(r, ext, x))
if (n1 > 1 && n1 != n2) stop("'ext' must be of the same length as 'x'")
mapply(sub, r, ext, x, USE.NAMES = FALSE)
}
# counters for figures/tables
new_counters = function(type, rownames) {
base = matrix(
0L, nrow = length(rownames), ncol = length(type),
dimnames = list(rownames, type)
)
list(
inc = function(type, which) {
base[which, type] <<- base[which, type] + 1L
}
)
}
# set some internal knitr options
set_opts_knit = function(config) {
# use labels of the form (\#label) in knitr
config$knitr$opts_knit$bookdown.internal.label = TRUE
# when the output is LaTeX, force LaTeX tables instead of default Pandoc tables
# http://tex.stackexchange.com/q/276699/9128
config$knitr$opts_knit$kable.force.latex = TRUE
config
}
readUTF8 = function(input) {
readLines(input, encoding = 'UTF-8', warn = FALSE)
}
writeUTF8 = function(text, ...) {
writeLines(enc2utf8(text), ..., useBytes = TRUE)
}
get_base_format = function(format) {
if (is.character(format)) {
format = eval(parse(text = format))
}
if (!is.function(format)) stop('The output format must be a function')
format
}
load_config = function() {
if (length(opts$get('config')) == 0 && file.exists('_bookdown.yml')) {
# store the book config
opts$set(config = yaml_utf8('_bookdown.yml'))
}
opts$get('config')
}
yaml_utf8 = function(x) yaml::yaml.load(
enc2utf8(paste(readUTF8(x), collapse = '\n'))
)
book_filename = function(config = load_config(), fallback = TRUE) {
if (is.character(config[['book_filename']])) {
config[['book_filename']][1]
} else if (fallback) '_main'
}
source_files = function(format = NULL, config = load_config(), all = FALSE) {
# a list of Rmd chapters
files = list.files('.', '[.]Rmd$', ignore.case = TRUE)
if (is.character(config[['rmd_files']])) {
files = config[['rmd_files']]
if (is.list(files)) {
files = if (all && is.null(format)) unlist(files) else files[[format]]
}
} else {
files = grep('^[^_]', files, value = TRUE) # exclude those start with _
index = match('index', with_ext(files, ''))
# if there is a index.Rmd, put it in the beginning
if (!is.na(index)) files = c(files[index], files[-index])
}
check_special_chars(files)
}
output_dirname = function(dir, config = load_config(), create = TRUE) {
if (is.null(dir)) {
dir2 = config[['output_dir']]
if (!is.null(dir2)) dir = dir2
}
if (is.null(dir)) dir = '_book'
if (length(dir)) {
if (create) dir_create(dir)
# ignore dir that is just the current working directory
if (same_path(dir, getwd(), mustWork = FALSE)) dir = NULL
}
dir
}
dir_exists = function(x) utils::file_test('-d', x)
# mark directories with trailing slashes
mark_dirs = function(x) {
i = dir_exists(x)
x[i] = paste0(x[i], '/')
x
}
merge_chapters = function(files, to, before = NULL, after = NULL, orig = files) {
# in the preview mode, only use some placeholder text instead of the full Rmd
preview = opts$get('preview'); input = opts$get('input_rmd')
content = unlist(mapply(files, orig, SIMPLIFY = FALSE, FUN = function(f, o) {
x = readUTF8(f)
if (preview && !(o %in% input)) x = create_placeholder(x)
x = insert_code_chunk(x, before, after)
c(x, '', paste0('<!--chapter:end:', o, '-->'), '')
}))
if (preview) content = c(create_placeholder(readUTF8(files[1]), FALSE), content)
writeUTF8(content, to)
}
match_dashes = function(x) grep('^---\\s*$', x)
create_placeholder = function(x, header = TRUE) {
h = grep('^# ', x, value = TRUE) # chapter title
h1 = grep(reg_part, h, value = TRUE)
h2 = setdiff(h, h1)
h = c('', if (length(h1)) h1[1], if (length(h2)) h2[1] else '# Placeholder')
i = match_dashes(x)
c(if (length(i) >= 2) x[(i[1]):(i[2])], if (header) h)
}
insert_code_chunk = function(x, before, after) {
if (length(before) + length(after) == 0) return(x)
if (length(x) == 0 || length(match_dashes(x[1])) == 0) return(c(before, x, after))
i = match_dashes(x)
if (length(i) < 2) {
warning('There may be something wrong with your YAML frontmatter (no closing ---)')
return(c(before, x, after))
}
# insert `before` after the line i[2], i.e. the second ---
c(append(x, before, i[2]), after)
}
insert_chapter_script = function(config, where = 'before') {
script = config[[sprintf('%s_chapter_script', where)]]
if (is.character(script)) {
c('```{r include=FALSE, cache=FALSE}', unlist(lapply(script, readUTF8)), '```')
}
}
check_special_chars = function(filename) {
reg = getFromNamespace('.shell_chars_regex', 'rmarkdown')
for (i in grep(reg, filename)) warning(
'The filename "', filename[i], '" contains special characters. ',
'You may rename it to, e.g., "', gsub(reg, '-', filename[i]), '".'
)
if (!is.null(i)) stop('Filenames must not contain special characters')
filename
}
Rscript = function(args) {
system2(file.path(R.home('bin'), 'Rscript'), args)
}
Rscript_render = function(file, ...) {
args = shQuote(c(bookdown_file('scripts', 'render_one.R'), file, ...))
if (Rscript(args) != 0) stop('Failed to compile ', file)
}
clean_meta = function(meta_file, files) {
meta = readRDS(meta_file)
for (i in setdiff(names(meta), files)) meta[[i]] = NULL
meta = setNames(meta[files], files) # order by input filenames
for (i in files) if (is.null(meta[[i]])) meta[[i]] = basename(with_ext(i, '.md'))
saveRDS(meta, meta_file)
meta
}
# remove HTML tags and remove extra spaces
strip_html = function(x) {
x = gsub('<[^>]+>', '', x)
x = gsub('\\s{2,}', ' ', x)
x
}
# remove the <script><script> content and references
strip_search_text = function(x) {
x = gsub('<script[^>]*>(.*?)</script>', '', x)
x = gsub('<div id="refs" class="references">.*', '', x)
x = strip_html(x)
x
}
# quote a string and escape backslashes/double quotes
json_string = function(x, toArray = FALSE) {
json_vector(x, toArray)
}
json_vector = function(x, toArray = FALSE, quote = TRUE) {
if (quote) {
x = gsub('(["\\])', "\\\\\\1", x)
x = gsub('[[:space:]]', " ", x)
if (length(x)) x = paste0('"', x, '"')
}
if (toArray) paste0('[', paste(x, collapse = ', '), ']') else x
}
# manipulate internal options
opts = knitr:::new_defaults(list(config = list()))
dir_create = function(path) {
dir_exists(path) || dir.create(path, recursive = TRUE)
}
# a wrapper of file.path to ignore `output_dir` if it is NULL
output_path = function(...) {
dir = opts$get('output_dir')
if (is.null(dir)) file.path(...) else file.path(dir, ...)
}
local_resources = function(x) {
grep('^(f|ht)tps?://.+', x, value = TRUE, invert = TRUE)
}
#' Continously preview the HTML output of a book using the \pkg{servr} package
#'
#' When any files are modified or added to the book directory, the book will be
#' automatically recompiled, and the current HTML page in the browser will be
#' refreshed. This function is based on \code{servr::\link[servr]{httw}()} to
#' continuously watch a directory.
#'
#' For \code{in_session = TRUE}, you will have access to all objects created in
#' the book in the current R session: if you use a daemonized server (via the
#' argument \code{daemon = TRUE}), you can check the objects at any time when
#' the current R session is not busy; otherwise you will have to stop the server
#' before you can check the objects. This can be useful when you need to
#' interactively explore the R objects in the book. The downside of
#' \code{in_session = TRUE} is that the output may be different with the book
#' compiled from a fresh R session, because the state of the current R session
#' may not be clean.
#'
#' For \code{in_sesion = FALSE}, you do not have access to objects in the book
#' from the current R session, but the output is more likely to be reproducible
#' since everything is created from new R sessions. Since this function is only
#' for previewing purposes, the cleanness of the R session may not be a big
#' concern. You may choose \code{in_session = TRUE} or \code{FALSE} depending on
#' your specific applications. Eventually, you should run \code{render_book()}
#' from a fresh R session to generate a reliable copy of the book output.
#' @param dir The root directory of the book (containing the Rmd source files).
#' @param output_dir The directory for output files; see
#' \code{\link{render_book}()}.
#' @param preview Whether to render the modified/added chapters only, or the
#' whole book; see \code{\link{render_book}()}.
#' @param in_session Whether to compile the book using the current R session, or
#' always open a new R session to compile the book whenever changes occur in
#' the book directory.
#' @param ... Other arguments passed to \code{servr::\link[servr]{httw}()} (not
#' including the \code{handler} argument, which has been set internally).
#' @export
serve_book = function(
dir = '.', output_dir = '_book', preview = TRUE, in_session = TRUE, ...
) {
# when this function is called via the RStudio addin, use the dir of the
# current active document
if (missing(dir) && requireNamespace('rstudioapi', quietly = TRUE)) {
path = rstudioapi::getActiveDocumentContext()[['path']]
if (!(is.null(path) || path == '')) dir = dirname(path)
}
owd = setwd(dir); on.exit(setwd(owd), add = TRUE)
if (missing(output_dir) || is.null(output_dir)) {
on.exit(opts$restore(), add = TRUE)
output_dir = load_config()[['output_dir']]
}
if (is.null(output_dir)) output_dir = '_book'
rebuild = function(..., preview_ = preview) {
files = grep('[.]R?md$', c(...), value = TRUE, ignore.case = TRUE)
files = files[dirname(files) == '.']
if (length(files) == 0) return()
# if the output dir has been deleted, rebuild the whole book
if (!dir_exists(output_dir)) preview_ = FALSE
if (in_session) {
render_book(files, output_dir = output_dir, preview = preview_, envir = globalenv())
} else {
args = shQuote(c(bookdown_file('scripts', 'servr.R'), output_dir, preview_, files))
if (Rscript(args) != 0) stop('Failed to compile ', paste(files, collapse = ' '))
}
}
rebuild('index.Rmd', preview_ = FALSE) # build the whole book initially
servr::httw('.', ..., site.dir = output_dir, handler = rebuild)
}
# a simple JSON serializer
tojson = function(x) {
if (is.null(x)) return('null')
if (is.logical(x)) {
if (length(x) != 1 || any(is.na(x)))
stop('Logical values of length > 1 and NA are not supported')
return(tolower(as.character(x)))
}
if (is.character(x) || is.numeric(x)) {
return(json_vector(x, length(x) != 1 || inherits(x, 'AsIs'), is.character(x)))
}
if (is.list(x)) {
if (length(x) == 0) return('{}')
return(if (is.null(names(x))) {
json_vector(unlist(lapply(x, tojson)), TRUE, quote = FALSE)
} else {
nms = paste0('"', names(x), '"')
paste0('{\n', paste(nms, unlist(lapply(x, tojson)), sep = ': ', collapse = ',\n'), '\n}')
})
}
stop('The class of x is not supported: ', paste(class(x), collapse = ', '))
}
same_path = function(f1, f2, ...) {
normalizePath(f1, ...) == normalizePath(f2, ...)
}
in_dir = knitr:::in_dir
# base64 encode resources in url("")
base64_css = function(css, exts = 'png', overwrite = FALSE) {
x = readUTF8(css)
r = sprintf('[.](%s)$', paste(exts, collapse = '|'))
m = gregexpr('url\\("[^"]+"\\)', x)
regmatches(x, m) = lapply(regmatches(x, m), function(ps) {
if (length(ps) == 0) return(ps)
ps = gsub('^url\\("|"\\)$', '', ps)
sprintf('url("%s")', sapply(ps, function(p) {
if (grepl(r, p) && file.exists(p)) knitr::image_uri(p) else p
}))
})
if (overwrite) writeUTF8(x, css) else x
}
files_cache_dirs = function(dir = '.') {
if (!dir_exists(dir)) return(character())
out = list.files(dir, '_(files|cache)$', full.names = TRUE)
out = out[dir_exists(out)]
out = out[basename(out) != '_bookdown_files']
out
}
existing_files = function(x, first = FALSE) {
x = x[file.exists(x)]
if (first) head(x, 1) else x
}
existing_r = function(base, first = FALSE) {
x = apply(expand.grid(base, c('R', 'r')), 1, paste, collapse = '.')
existing_files(x, first)
}
html_or_latex = function(format) {
if (grepl('(html|gitbook|epub)', format)) return('html')
if (grepl('pdf', format)) return('latex')
switch(format, tufte_book2 = 'latex', tufte_handout2 = 'latex')
}
|
/R/utils.R
|
no_license
|
muuksi/bookdown
|
R
| false
| false
| 13,027
|
r
|
bookdown_file = function(...) {
system.file(..., package = 'bookdown', mustWork = TRUE)
}
# find the y[j] closest to x[i] with y[j] > x[i]
next_nearest = function(x, y) {
n = length(x); m = length(y); z = integer(n)
for (i in seq_len(n)) {
for (j in seq_len(m)) {
if (y[j] > x[i]) {
z[i] = y[j]
break
}
}
}
z
}
# change the filename extension
with_ext = function(x, ext) {
n1 = length(x); n2 = length(ext); r = '([.][[:alnum:]]+)?$'
if (n1 * n2 == 0) return(x)
i = !(grepl('^[.]', ext) | ext == '')
ext[i] = paste0('.', ext[i])
if (all(ext == '')) ext = ''
if (length(ext) == 1) return(sub(r, ext, x))
if (n1 > 1 && n1 != n2) stop("'ext' must be of the same length as 'x'")
mapply(sub, r, ext, x, USE.NAMES = FALSE)
}
# counters for figures/tables
new_counters = function(type, rownames) {
base = matrix(
0L, nrow = length(rownames), ncol = length(type),
dimnames = list(rownames, type)
)
list(
inc = function(type, which) {
base[which, type] <<- base[which, type] + 1L
}
)
}
# set some internal knitr options
set_opts_knit = function(config) {
# use labels of the form (\#label) in knitr
config$knitr$opts_knit$bookdown.internal.label = TRUE
# when the output is LaTeX, force LaTeX tables instead of default Pandoc tables
# http://tex.stackexchange.com/q/276699/9128
config$knitr$opts_knit$kable.force.latex = TRUE
config
}
readUTF8 = function(input) {
readLines(input, encoding = 'UTF-8', warn = FALSE)
}
writeUTF8 = function(text, ...) {
writeLines(enc2utf8(text), ..., useBytes = TRUE)
}
get_base_format = function(format) {
if (is.character(format)) {
format = eval(parse(text = format))
}
if (!is.function(format)) stop('The output format must be a function')
format
}
load_config = function() {
if (length(opts$get('config')) == 0 && file.exists('_bookdown.yml')) {
# store the book config
opts$set(config = yaml_utf8('_bookdown.yml'))
}
opts$get('config')
}
yaml_utf8 = function(x) yaml::yaml.load(
enc2utf8(paste(readUTF8(x), collapse = '\n'))
)
book_filename = function(config = load_config(), fallback = TRUE) {
if (is.character(config[['book_filename']])) {
config[['book_filename']][1]
} else if (fallback) '_main'
}
source_files = function(format = NULL, config = load_config(), all = FALSE) {
# a list of Rmd chapters
files = list.files('.', '[.]Rmd$', ignore.case = TRUE)
if (is.character(config[['rmd_files']])) {
files = config[['rmd_files']]
if (is.list(files)) {
files = if (all && is.null(format)) unlist(files) else files[[format]]
}
} else {
files = grep('^[^_]', files, value = TRUE) # exclude those start with _
index = match('index', with_ext(files, ''))
# if there is a index.Rmd, put it in the beginning
if (!is.na(index)) files = c(files[index], files[-index])
}
check_special_chars(files)
}
output_dirname = function(dir, config = load_config(), create = TRUE) {
if (is.null(dir)) {
dir2 = config[['output_dir']]
if (!is.null(dir2)) dir = dir2
}
if (is.null(dir)) dir = '_book'
if (length(dir)) {
if (create) dir_create(dir)
# ignore dir that is just the current working directory
if (same_path(dir, getwd(), mustWork = FALSE)) dir = NULL
}
dir
}
dir_exists = function(x) utils::file_test('-d', x)
# mark directories with trailing slashes
mark_dirs = function(x) {
i = dir_exists(x)
x[i] = paste0(x[i], '/')
x
}
merge_chapters = function(files, to, before = NULL, after = NULL, orig = files) {
# in the preview mode, only use some placeholder text instead of the full Rmd
preview = opts$get('preview'); input = opts$get('input_rmd')
content = unlist(mapply(files, orig, SIMPLIFY = FALSE, FUN = function(f, o) {
x = readUTF8(f)
if (preview && !(o %in% input)) x = create_placeholder(x)
x = insert_code_chunk(x, before, after)
c(x, '', paste0('<!--chapter:end:', o, '-->'), '')
}))
if (preview) content = c(create_placeholder(readUTF8(files[1]), FALSE), content)
writeUTF8(content, to)
}
match_dashes = function(x) grep('^---\\s*$', x)
create_placeholder = function(x, header = TRUE) {
h = grep('^# ', x, value = TRUE) # chapter title
h1 = grep(reg_part, h, value = TRUE)
h2 = setdiff(h, h1)
h = c('', if (length(h1)) h1[1], if (length(h2)) h2[1] else '# Placeholder')
i = match_dashes(x)
c(if (length(i) >= 2) x[(i[1]):(i[2])], if (header) h)
}
insert_code_chunk = function(x, before, after) {
if (length(before) + length(after) == 0) return(x)
if (length(x) == 0 || length(match_dashes(x[1])) == 0) return(c(before, x, after))
i = match_dashes(x)
if (length(i) < 2) {
warning('There may be something wrong with your YAML frontmatter (no closing ---)')
return(c(before, x, after))
}
# insert `before` after the line i[2], i.e. the second ---
c(append(x, before, i[2]), after)
}
insert_chapter_script = function(config, where = 'before') {
script = config[[sprintf('%s_chapter_script', where)]]
if (is.character(script)) {
c('```{r include=FALSE, cache=FALSE}', unlist(lapply(script, readUTF8)), '```')
}
}
check_special_chars = function(filename) {
reg = getFromNamespace('.shell_chars_regex', 'rmarkdown')
for (i in grep(reg, filename)) warning(
'The filename "', filename[i], '" contains special characters. ',
'You may rename it to, e.g., "', gsub(reg, '-', filename[i]), '".'
)
if (!is.null(i)) stop('Filenames must not contain special characters')
filename
}
Rscript = function(args) {
system2(file.path(R.home('bin'), 'Rscript'), args)
}
Rscript_render = function(file, ...) {
args = shQuote(c(bookdown_file('scripts', 'render_one.R'), file, ...))
if (Rscript(args) != 0) stop('Failed to compile ', file)
}
clean_meta = function(meta_file, files) {
meta = readRDS(meta_file)
for (i in setdiff(names(meta), files)) meta[[i]] = NULL
meta = setNames(meta[files], files) # order by input filenames
for (i in files) if (is.null(meta[[i]])) meta[[i]] = basename(with_ext(i, '.md'))
saveRDS(meta, meta_file)
meta
}
# remove HTML tags and remove extra spaces
strip_html = function(x) {
x = gsub('<[^>]+>', '', x)
x = gsub('\\s{2,}', ' ', x)
x
}
# remove the <script><script> content and references
strip_search_text = function(x) {
x = gsub('<script[^>]*>(.*?)</script>', '', x)
x = gsub('<div id="refs" class="references">.*', '', x)
x = strip_html(x)
x
}
# quote a string and escape backslashes/double quotes
json_string = function(x, toArray = FALSE) {
json_vector(x, toArray)
}
json_vector = function(x, toArray = FALSE, quote = TRUE) {
if (quote) {
x = gsub('(["\\])', "\\\\\\1", x)
x = gsub('[[:space:]]', " ", x)
if (length(x)) x = paste0('"', x, '"')
}
if (toArray) paste0('[', paste(x, collapse = ', '), ']') else x
}
# manipulate internal options
opts = knitr:::new_defaults(list(config = list()))
dir_create = function(path) {
dir_exists(path) || dir.create(path, recursive = TRUE)
}
# a wrapper of file.path to ignore `output_dir` if it is NULL
output_path = function(...) {
dir = opts$get('output_dir')
if (is.null(dir)) file.path(...) else file.path(dir, ...)
}
local_resources = function(x) {
grep('^(f|ht)tps?://.+', x, value = TRUE, invert = TRUE)
}
#' Continously preview the HTML output of a book using the \pkg{servr} package
#'
#' When any files are modified or added to the book directory, the book will be
#' automatically recompiled, and the current HTML page in the browser will be
#' refreshed. This function is based on \code{servr::\link[servr]{httw}()} to
#' continuously watch a directory.
#'
#' For \code{in_session = TRUE}, you will have access to all objects created in
#' the book in the current R session: if you use a daemonized server (via the
#' argument \code{daemon = TRUE}), you can check the objects at any time when
#' the current R session is not busy; otherwise you will have to stop the server
#' before you can check the objects. This can be useful when you need to
#' interactively explore the R objects in the book. The downside of
#' \code{in_session = TRUE} is that the output may be different with the book
#' compiled from a fresh R session, because the state of the current R session
#' may not be clean.
#'
#' For \code{in_sesion = FALSE}, you do not have access to objects in the book
#' from the current R session, but the output is more likely to be reproducible
#' since everything is created from new R sessions. Since this function is only
#' for previewing purposes, the cleanness of the R session may not be a big
#' concern. You may choose \code{in_session = TRUE} or \code{FALSE} depending on
#' your specific applications. Eventually, you should run \code{render_book()}
#' from a fresh R session to generate a reliable copy of the book output.
#' @param dir The root directory of the book (containing the Rmd source files).
#' @param output_dir The directory for output files; see
#' \code{\link{render_book}()}.
#' @param preview Whether to render the modified/added chapters only, or the
#' whole book; see \code{\link{render_book}()}.
#' @param in_session Whether to compile the book using the current R session, or
#' always open a new R session to compile the book whenever changes occur in
#' the book directory.
#' @param ... Other arguments passed to \code{servr::\link[servr]{httw}()} (not
#' including the \code{handler} argument, which has been set internally).
#' @export
serve_book = function(
dir = '.', output_dir = '_book', preview = TRUE, in_session = TRUE, ...
) {
# when this function is called via the RStudio addin, use the dir of the
# current active document
if (missing(dir) && requireNamespace('rstudioapi', quietly = TRUE)) {
path = rstudioapi::getActiveDocumentContext()[['path']]
if (!(is.null(path) || path == '')) dir = dirname(path)
}
owd = setwd(dir); on.exit(setwd(owd), add = TRUE)
if (missing(output_dir) || is.null(output_dir)) {
on.exit(opts$restore(), add = TRUE)
output_dir = load_config()[['output_dir']]
}
if (is.null(output_dir)) output_dir = '_book'
rebuild = function(..., preview_ = preview) {
files = grep('[.]R?md$', c(...), value = TRUE, ignore.case = TRUE)
files = files[dirname(files) == '.']
if (length(files) == 0) return()
# if the output dir has been deleted, rebuild the whole book
if (!dir_exists(output_dir)) preview_ = FALSE
if (in_session) {
render_book(files, output_dir = output_dir, preview = preview_, envir = globalenv())
} else {
args = shQuote(c(bookdown_file('scripts', 'servr.R'), output_dir, preview_, files))
if (Rscript(args) != 0) stop('Failed to compile ', paste(files, collapse = ' '))
}
}
rebuild('index.Rmd', preview_ = FALSE) # build the whole book initially
servr::httw('.', ..., site.dir = output_dir, handler = rebuild)
}
# a simple JSON serializer
tojson = function(x) {
if (is.null(x)) return('null')
if (is.logical(x)) {
if (length(x) != 1 || any(is.na(x)))
stop('Logical values of length > 1 and NA are not supported')
return(tolower(as.character(x)))
}
if (is.character(x) || is.numeric(x)) {
return(json_vector(x, length(x) != 1 || inherits(x, 'AsIs'), is.character(x)))
}
if (is.list(x)) {
if (length(x) == 0) return('{}')
return(if (is.null(names(x))) {
json_vector(unlist(lapply(x, tojson)), TRUE, quote = FALSE)
} else {
nms = paste0('"', names(x), '"')
paste0('{\n', paste(nms, unlist(lapply(x, tojson)), sep = ': ', collapse = ',\n'), '\n}')
})
}
stop('The class of x is not supported: ', paste(class(x), collapse = ', '))
}
same_path = function(f1, f2, ...) {
normalizePath(f1, ...) == normalizePath(f2, ...)
}
in_dir = knitr:::in_dir
# base64 encode resources in url("")
base64_css = function(css, exts = 'png', overwrite = FALSE) {
x = readUTF8(css)
r = sprintf('[.](%s)$', paste(exts, collapse = '|'))
m = gregexpr('url\\("[^"]+"\\)', x)
regmatches(x, m) = lapply(regmatches(x, m), function(ps) {
if (length(ps) == 0) return(ps)
ps = gsub('^url\\("|"\\)$', '', ps)
sprintf('url("%s")', sapply(ps, function(p) {
if (grepl(r, p) && file.exists(p)) knitr::image_uri(p) else p
}))
})
if (overwrite) writeUTF8(x, css) else x
}
files_cache_dirs = function(dir = '.') {
if (!dir_exists(dir)) return(character())
out = list.files(dir, '_(files|cache)$', full.names = TRUE)
out = out[dir_exists(out)]
out = out[basename(out) != '_bookdown_files']
out
}
existing_files = function(x, first = FALSE) {
x = x[file.exists(x)]
if (first) head(x, 1) else x
}
existing_r = function(base, first = FALSE) {
x = apply(expand.grid(base, c('R', 'r')), 1, paste, collapse = '.')
existing_files(x, first)
}
html_or_latex = function(format) {
if (grepl('(html|gitbook|epub)', format)) return('html')
if (grepl('pdf', format)) return('latex')
switch(format, tufte_book2 = 'latex', tufte_handout2 = 'latex')
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tipitaka-docs.R
\docType{data}
\name{pali_stop_words}
\alias{pali_stop_words}
\title{Tentative set of "stop words" for Pali}
\format{
An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 245 rows and 1 columns.
}
\source{
\url{https://dsalsrv04.uchicago.edu/dictionaries/pali/}
}
\usage{
pali_stop_words
}
\description{
A list of all declinables and particles from the PTS
Pali-English Dictionary.
}
\examples{
# Find most common words in the Mahāsatipatthāna Sutta excluding stop words
library(dplyr)
sati_sutta_long \%>\%
anti_join(pali_stop_words, by = "word") \%>\%
arrange(desc(freq))
}
\keyword{datasets}
|
/man/pali_stop_words.Rd
|
no_license
|
cran/tipitaka
|
R
| false
| true
| 727
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tipitaka-docs.R
\docType{data}
\name{pali_stop_words}
\alias{pali_stop_words}
\title{Tentative set of "stop words" for Pali}
\format{
An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 245 rows and 1 columns.
}
\source{
\url{https://dsalsrv04.uchicago.edu/dictionaries/pali/}
}
\usage{
pali_stop_words
}
\description{
A list of all declinables and particles from the PTS
Pali-English Dictionary.
}
\examples{
# Find most common words in the Mahāsatipatthāna Sutta excluding stop words
library(dplyr)
sati_sutta_long \%>\%
anti_join(pali_stop_words, by = "word") \%>\%
arrange(desc(freq))
}
\keyword{datasets}
|
server = function(input, output) {
output$table = renderTable({
head(iris)
})
}
ui = flowLayout(
sliderInput("slider", "Slider", min = 1, max = 100, value = 50),
textInput("text", "Text"),
tableOutput("table")
)
shinyApp(ui, server)
|
/Chapter04/flowLayout/app.R
|
permissive
|
PacktPublishing/Web-Application-Development-with-R-Using-Shiny-third-edition
|
R
| false
| false
| 260
|
r
|
server = function(input, output) {
output$table = renderTable({
head(iris)
})
}
ui = flowLayout(
sliderInput("slider", "Slider", min = 1, max = 100, value = 50),
textInput("text", "Text"),
tableOutput("table")
)
shinyApp(ui, server)
|
library(ggplot2)
library(dplyr)
houses <- readLines("data.txt")
houses <- as.list(strsplit(houses, "")[[1]])
# Part 1
directionVector <- function(direction) {
if(direction == "v") {
c(x=0, y=-1)
} else if(direction == ">") {
c(x=1, y=0)
} else if(direction == "<") {
c(x=-1, y=0)
} else if(direction == "^") {
c(x=0, y=1)
} else {
NULL
}
}
direction_matrix <- plyr::ldply(houses, directionVector)
direction_matrix$X <- cumsum(direction_matrix$x)
direction_matrix$Y <- cumsum(direction_matrix$y)
# Find the number of unique coordinates
sum(! duplicated(direction_matrix[, c("X", "Y")]))
# Draw a map of the houses visited by Santa
direction_matrix$color <- sample(c("RED", "GREEN"), size=nrow(direction_matrix), replace=T)
ggplot(direction_matrix, aes(x=X, y=Y, color=color)) + geom_point() +
scale_color_manual(values=c("#d42426", "#3C8D0D")) +
theme(axis.title=element_blank(), legend.position="none") +
ggtitle("Map of Houses Santa Visits")
# Part 2
direction_matrix$santa <- c("Santa", "Robo-Santa")
santa <- filter(direction_matrix, santa == "Santa")
robosanta <- filter(direction_matrix, santa == "Robo-Santa")
santa$X <- cumsum(santa$x)
santa$Y <- cumsum(santa$y)
robosanta$X <- cumsum(robosanta$x)
robosanta$Y <- cumsum(robosanta$y)
santas_matrix <- rbind(santa, robosanta)
sum(! duplicated(santas_matrix[, c("X", "Y")]))
ggplot(santas_matrix, aes(x=X, y=Y, color=santa)) + geom_point() +
scale_color_manual(values=c("#d42426", "#3C8D0D")) +
theme(axis.title=element_blank()) +
ggtitle("Map of Houses Santa & Robo-Santa Visits")
|
/2015/Day03/day_03.r
|
no_license
|
scottshepard/advent-of-code
|
R
| false
| false
| 1,588
|
r
|
library(ggplot2)
library(dplyr)
houses <- readLines("data.txt")
houses <- as.list(strsplit(houses, "")[[1]])
# Part 1
directionVector <- function(direction) {
if(direction == "v") {
c(x=0, y=-1)
} else if(direction == ">") {
c(x=1, y=0)
} else if(direction == "<") {
c(x=-1, y=0)
} else if(direction == "^") {
c(x=0, y=1)
} else {
NULL
}
}
direction_matrix <- plyr::ldply(houses, directionVector)
direction_matrix$X <- cumsum(direction_matrix$x)
direction_matrix$Y <- cumsum(direction_matrix$y)
# Find the number of unique coordinates
sum(! duplicated(direction_matrix[, c("X", "Y")]))
# Draw a map of the houses visited by Santa
direction_matrix$color <- sample(c("RED", "GREEN"), size=nrow(direction_matrix), replace=T)
ggplot(direction_matrix, aes(x=X, y=Y, color=color)) + geom_point() +
scale_color_manual(values=c("#d42426", "#3C8D0D")) +
theme(axis.title=element_blank(), legend.position="none") +
ggtitle("Map of Houses Santa Visits")
# Part 2
direction_matrix$santa <- c("Santa", "Robo-Santa")
santa <- filter(direction_matrix, santa == "Santa")
robosanta <- filter(direction_matrix, santa == "Robo-Santa")
santa$X <- cumsum(santa$x)
santa$Y <- cumsum(santa$y)
robosanta$X <- cumsum(robosanta$x)
robosanta$Y <- cumsum(robosanta$y)
santas_matrix <- rbind(santa, robosanta)
sum(! duplicated(santas_matrix[, c("X", "Y")]))
ggplot(santas_matrix, aes(x=X, y=Y, color=santa)) + geom_point() +
scale_color_manual(values=c("#d42426", "#3C8D0D")) +
theme(axis.title=element_blank()) +
ggtitle("Map of Houses Santa & Robo-Santa Visits")
|
# in addition to counting the number of forms each month,
# this graph also displays the forms where the minimum standard dataset
# is not available
# read in the data
mdata <- read.table("h://mbg_db_reporting//reports//20150409_philly_fes_counts.csv",
header=TRUE, sep=",")
# remove obs from year past 2015
mdata <-mdata[mdata$SubYear<=2015,]
library(psych)
describe(mdata$SubYear)
## finding max value within a county for how many forms
## within the time window
# trying to get the graph using reshape . melt
## max of rows will be same within a county
## should not add any rows to the table
library(reshape2)
mydata<- melt(mdata, id=c("COUNTY_ID", "SubYear", "SubMonth"))
head(mydata)
tail(mydata)
# load lattice library
library(lattice)
#use zoo package to convert year and month into a date
#load(zoo)
library(zoo)
mydata$yrmo <- as.yearmon(paste(mydata$SubYear, mydata$SubMonth, sep="-"))
# add factors for county names
mydata$COUNTY_ID<-factor(mydata$COUNTY_ID,
levels = c(02, 20, 22, 35, 51, 61),
labels = c("Allegheny","Crawford","Dauphin","Lackawanna","Philadelphia","Venango"))
# create labels for what type of number depicted
mydata$fancyname[mydata$variable=="NumComplete"]<-1
mydata$fancyname[mydata$variable=="NumFFS"]<-2
mydata$fancyname[mydata$variable=="TotSurveys"]<-3
# add factors for what type of number depicted
mydata$fancyname<-factor(mydata$fancyname,
levels = c(1, 2, 3),
labels = c("Number of Complete Packets",
"Number of Meetings",
"Total Number of Surveys"))
# create a date that is in word rather than in decimal format
mydata$timelabels<-as.Date(mydata$yrmo)
mydata$timelabels
#describe(mydata$timelabels)
#ylabel_vector--where to put values
mydata$put_vector <- mydata$value+3
### add info for footnote
library(grid)
add.footnote <- function(string="Note: Graph excludes meetings \nlater than 2015", col="grey",
lineheight=0.7, cex=0.7){
grid.text(string,
x=unit(1, "npc") - unit(1, "mm"),
y=unit(1, "mm"), just=c("right", "bottom"),
gp=gpar(col=col,lineheight=lineheight, cex=cex))
}
# setup for no margins on the legend
# bottom margin is the first number in the sequence
# top margin is the third number in the sequence
par(mar=c(5.1, 0, 4.1, 0))
####
####
####Various iterations of the graph
#### towards the final version of the graph
####
####
##saves file as pdf
pdf(file="h://mbg_db_reporting//reports//20150409_Philly_FES_summary.pdf", paper="letter", height = 10)
# with the x-axis now uniform across the counties
# removed the symbols for each point
# do not draw y-axis ticks
xyplot(mydata$value ~ mydata$timelabels |factor(mydata$fancyname),
page=function(n){ add.footnote()},
groups=factor(mydata$variable),
prepanel = function(x, y, subscripts) {
list(ylim=extendrange(mydata$put_vector[subscripts], f=.25))
},
labels=mydata$value,
scales=list(
y=list(relation="free", draw=FALSE )),
type="l", layout = c(1,3), xlab="Month",
ylab="Number Occurring That Month",
pch=NA_integer_,
lty=c(1, 1, 1),
main="Number of Family Engagement Meetings \nby Month",
sub="Data pulled on 2015/04/09",
index.cond=list(c(3, 2, 1)),
panel= panel.superpose,
panel.groups=function(x, y, ..., subscripts) {
panel.xyplot(x, y, ..., subscripts=subscripts );
panel.text(mydata$timelabels[subscripts], mydata$put_vector[subscripts], labels=mydata$value[subscripts], offset=1)
}
)
dev.off()
# trying to draw using plot
plot(mydata$value , mydata$yrmo , type="l")
plot( mydata$yrmo, mydata$value , type="l")
|
/chart_philly_count_fes_forms_for_mtg.r
|
no_license
|
douyany/FES
|
R
| false
| false
| 3,641
|
r
|
# in addition to counting the number of forms each month,
# this graph also displays the forms where the minimum standard dataset
# is not available
# read in the data
mdata <- read.table("h://mbg_db_reporting//reports//20150409_philly_fes_counts.csv",
header=TRUE, sep=",")
# remove obs from year past 2015
mdata <-mdata[mdata$SubYear<=2015,]
library(psych)
describe(mdata$SubYear)
## finding max value within a county for how many forms
## within the time window
# trying to get the graph using reshape . melt
## max of rows will be same within a county
## should not add any rows to the table
library(reshape2)
mydata<- melt(mdata, id=c("COUNTY_ID", "SubYear", "SubMonth"))
head(mydata)
tail(mydata)
# load lattice library
library(lattice)
#use zoo package to convert year and month into a date
#load(zoo)
library(zoo)
mydata$yrmo <- as.yearmon(paste(mydata$SubYear, mydata$SubMonth, sep="-"))
# add factors for county names
mydata$COUNTY_ID<-factor(mydata$COUNTY_ID,
levels = c(02, 20, 22, 35, 51, 61),
labels = c("Allegheny","Crawford","Dauphin","Lackawanna","Philadelphia","Venango"))
# create labels for what type of number depicted
mydata$fancyname[mydata$variable=="NumComplete"]<-1
mydata$fancyname[mydata$variable=="NumFFS"]<-2
mydata$fancyname[mydata$variable=="TotSurveys"]<-3
# add factors for what type of number depicted
mydata$fancyname<-factor(mydata$fancyname,
levels = c(1, 2, 3),
labels = c("Number of Complete Packets",
"Number of Meetings",
"Total Number of Surveys"))
# create a date that is in word rather than in decimal format
mydata$timelabels<-as.Date(mydata$yrmo)
mydata$timelabels
#describe(mydata$timelabels)
#ylabel_vector--where to put values
mydata$put_vector <- mydata$value+3
### add info for footnote
library(grid)
add.footnote <- function(string="Note: Graph excludes meetings \nlater than 2015", col="grey",
lineheight=0.7, cex=0.7){
grid.text(string,
x=unit(1, "npc") - unit(1, "mm"),
y=unit(1, "mm"), just=c("right", "bottom"),
gp=gpar(col=col,lineheight=lineheight, cex=cex))
}
# setup for no margins on the legend
# bottom margin is the first number in the sequence
# top margin is the third number in the sequence
par(mar=c(5.1, 0, 4.1, 0))
####
####
####Various iterations of the graph
#### towards the final version of the graph
####
####
##saves file as pdf
pdf(file="h://mbg_db_reporting//reports//20150409_Philly_FES_summary.pdf", paper="letter", height = 10)
# with the x-axis now uniform across the counties
# removed the symbols for each point
# do not draw y-axis ticks
xyplot(mydata$value ~ mydata$timelabels |factor(mydata$fancyname),
page=function(n){ add.footnote()},
groups=factor(mydata$variable),
prepanel = function(x, y, subscripts) {
list(ylim=extendrange(mydata$put_vector[subscripts], f=.25))
},
labels=mydata$value,
scales=list(
y=list(relation="free", draw=FALSE )),
type="l", layout = c(1,3), xlab="Month",
ylab="Number Occurring That Month",
pch=NA_integer_,
lty=c(1, 1, 1),
main="Number of Family Engagement Meetings \nby Month",
sub="Data pulled on 2015/04/09",
index.cond=list(c(3, 2, 1)),
panel= panel.superpose,
panel.groups=function(x, y, ..., subscripts) {
panel.xyplot(x, y, ..., subscripts=subscripts );
panel.text(mydata$timelabels[subscripts], mydata$put_vector[subscripts], labels=mydata$value[subscripts], offset=1)
}
)
dev.off()
# trying to draw using plot
plot(mydata$value , mydata$yrmo , type="l")
plot( mydata$yrmo, mydata$value , type="l")
|
## Dependencies
dependencies <- c("argparse", "RPostgreSQL", "tidyverse", "glue", "kableExtra",
"rmarkdown", "cowplot", "gtools", "scales", "formattable", "knitr")
args = commandArgs(trailingOnly=TRUE)
if(any(grepl(pattern = "show", args))){
print(dependencies)
quit()
}
for(dep in dependencies)
install.packages(dep, repos='http://cran.us.r-project.org')
|
/install_dependencies.R
|
no_license
|
Militeee/GDA_exam
|
R
| false
| false
| 386
|
r
|
## Dependencies
dependencies <- c("argparse", "RPostgreSQL", "tidyverse", "glue", "kableExtra",
"rmarkdown", "cowplot", "gtools", "scales", "formattable", "knitr")
args = commandArgs(trailingOnly=TRUE)
if(any(grepl(pattern = "show", args))){
print(dependencies)
quit()
}
for(dep in dependencies)
install.packages(dep, repos='http://cran.us.r-project.org')
|
library(waved)
### Name: summary.wvd
### Title: Summary of wvd objects
### Aliases: summary.wvd
### Keywords: internal
### ** Examples
library(waved)
data=waved.example(TRUE,FALSE)
doppler.wvd=WaveD(data$doppler.noisy,data$g)
summary(doppler.wvd)
|
/data/genthat_extracted_code/waved/examples/summary.wvd.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 254
|
r
|
library(waved)
### Name: summary.wvd
### Title: Summary of wvd objects
### Aliases: summary.wvd
### Keywords: internal
### ** Examples
library(waved)
data=waved.example(TRUE,FALSE)
doppler.wvd=WaveD(data$doppler.noisy,data$g)
summary(doppler.wvd)
|
/Unit4. Exploratory Data Analysis/DiplomadoMineriaDatos(C.P y Biplot).R
|
no_license
|
kawaiiblitz/DataMining
|
R
| false
| false
| 15,679
|
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/packTool.R
\name{packTool}
\alias{packTool}
\title{Pack a requested tool}
\usage{
packTool(
model_data_path = NULL,
snuxim_model_data_path = NULL,
undistributed_mer_data = NULL,
tool,
datapack_name,
country_uids,
template_path,
cop_year,
output_folder,
results_archive = TRUE,
expand_formulas = FALSE,
d2_session = dynGet("d2_default_session", inherits = TRUE)
)
}
\arguments{
\item{model_data_path}{Local filepath to a Data Pack model data file.}
\item{snuxim_model_data_path}{Local filepath to an SNUxIM Model Data file.}
\item{undistributed_mer_data}{Data from the \code{d$data$UndistributedMER}
dataset that can be provided while generating an OPU tool such that the
targets to be distributed will be sourced from this file.}
\item{tool}{Type of tool this function will create or interact with. Either
\code{OPU Data Pack} or \code{Data Pack}}
\item{datapack_name}{Name you would like associated with this Data Pack.
(Example: "Western Hemisphere", or "Caribbean Region", or "Kenya".)}
\item{country_uids}{Unique IDs for countries to include in the Data Pack.
For full list of these IDs, see \code{datapackr::valid_OrgUnits}.}
\item{template_path}{Local filepath to Data Pack template Excel (XLSX) file.
This file MUST NOT have any data validation formats present. If left
\code{NULL}, will select the default based on \code{cop_year} and \code{tool}.}
\item{cop_year}{COP Year to use for tailoring functions. Remember,
FY22 targets = COP21.}
\item{output_folder}{Local folder where you would like your Data Pack to be
saved upon export.}
\item{results_archive}{If TRUE, will export compiled results of all tests and
processes to output_folder.}
\item{d2_session}{DHIS2 Session id. R6 datimutils object which handles
authentication with DATIM.}
}
\value{
Exports a Data Pack or OPU Data Pack tool to Excel within
\code{output_folder}.
}
\description{
Generates a requested Data Pack or OPU Data Pack tool by taking an Excel
template file and combining it with data pulled from DATIM API to produce
a file ready for distribution.
}
|
/man/packTool.Rd
|
permissive
|
jason-p-pickering/datapackr
|
R
| false
| true
| 2,147
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/packTool.R
\name{packTool}
\alias{packTool}
\title{Pack a requested tool}
\usage{
packTool(
model_data_path = NULL,
snuxim_model_data_path = NULL,
undistributed_mer_data = NULL,
tool,
datapack_name,
country_uids,
template_path,
cop_year,
output_folder,
results_archive = TRUE,
expand_formulas = FALSE,
d2_session = dynGet("d2_default_session", inherits = TRUE)
)
}
\arguments{
\item{model_data_path}{Local filepath to a Data Pack model data file.}
\item{snuxim_model_data_path}{Local filepath to an SNUxIM Model Data file.}
\item{undistributed_mer_data}{Data from the \code{d$data$UndistributedMER}
dataset that can be provided while generating an OPU tool such that the
targets to be distributed will be sourced from this file.}
\item{tool}{Type of tool this function will create or interact with. Either
\code{OPU Data Pack} or \code{Data Pack}}
\item{datapack_name}{Name you would like associated with this Data Pack.
(Example: "Western Hemisphere", or "Caribbean Region", or "Kenya".)}
\item{country_uids}{Unique IDs for countries to include in the Data Pack.
For full list of these IDs, see \code{datapackr::valid_OrgUnits}.}
\item{template_path}{Local filepath to Data Pack template Excel (XLSX) file.
This file MUST NOT have any data validation formats present. If left
\code{NULL}, will select the default based on \code{cop_year} and \code{tool}.}
\item{cop_year}{COP Year to use for tailoring functions. Remember,
FY22 targets = COP21.}
\item{output_folder}{Local folder where you would like your Data Pack to be
saved upon export.}
\item{results_archive}{If TRUE, will export compiled results of all tests and
processes to output_folder.}
\item{d2_session}{DHIS2 Session id. R6 datimutils object which handles
authentication with DATIM.}
}
\value{
Exports a Data Pack or OPU Data Pack tool to Excel within
\code{output_folder}.
}
\description{
Generates a requested Data Pack or OPU Data Pack tool by taking an Excel
template file and combining it with data pulled from DATIM API to produce
a file ready for distribution.
}
|
#' Plot signature contribution heatmap
#'
#' Plot relative contribution of signatures in a heatmap
#'
#' @param contribution Signature contribution matrix
#' @param sig_order Character vector with the desired order of the signature names for plotting. Optional.
#' @param sample_order Character vector with the desired order of the sample names for plotting. Optional.
#' @param cluster_samples Hierarchically cluster samples based on eucledian distance. Default = T.
#' @param cluster_sigs Hierarchically cluster sigs based on eucledian distance. Default = T.
#' @param method The agglomeration method to be used for hierarchical clustering. This should be one of
#' "ward.D", "ward.D2", "single", "complete", "average" (= UPGMA), "mcquitty" (= WPGMA), "median" (= WPGMC)
#' or "centroid" (= UPGMC). Default = "complete".
#' @param plot_values Plot relative contribution values in heatmap. Default = F.
#'
#' @return Heatmap with relative contribution of each signature for each sample
#'
#' @import ggplot2
#' @importFrom magrittr %>%
#'
#' @examples
#' ## Extracting signatures can be computationally intensive, so
#' ## we use pre-computed data generated with the following command:
#' # nmf_res <- extract_signatures(mut_mat, rank = 2)
#'
#' nmf_res <- readRDS(system.file("states/nmf_res_data.rds",
#' package = "MutationalPatterns"
#' ))
#'
#' ## Set signature names as row names in the contribution matrix
#' rownames(nmf_res$contribution) <- c("Signature A", "Signature B")
#'
#' ## Plot with clustering.
#' plot_contribution_heatmap(nmf_res$contribution, cluster_samples = TRUE, cluster_sigs = TRUE)
#'
#' ## Define signature and sample order for plotting. If you have a mutation or signature
#' ## matrix, then this can be done like in the example of 'plot_cosine_heatmap()'
#' sig_order <- c("Signature B", "Signature A")
#' sample_order <- c(
#' "colon1", "colon2", "colon3", "intestine1", "intestine2",
#' "intestine3", "liver3", "liver2", "liver1"
#' )
#' plot_contribution_heatmap(nmf_res$contribution,
#' cluster_samples = FALSE,
#' sig_order = sig_order, sample_order = sample_order
#' )
#'
#' ## It's also possible to create a contribution heatmap with text values
#' output_text <- plot_contribution_heatmap(nmf_res$contribution, plot_values = TRUE)
#'
#' ## IThis function can also be used on the result of a signature refitting analysis.
#' ## Here we load a existing result as an example.
#' snv_refit <- readRDS(system.file("states/strict_snv_refit.rds",
#' package = "MutationalPatterns"
#' ))
#' plot_contribution_heatmap(snv_refit$contribution, cluster_samples = TRUE, cluster_sigs = TRUE)
#' @seealso
#' \code{\link{extract_signatures}},
#' \code{\link{mut_matrix}},
#' \code{\link{plot_contribution}},
#' \code{\link{plot_cosine_heatmap}}
#'
#' @export
# plotting function for relative contribution of signatures in heatmap
plot_contribution_heatmap <- function(contribution, sig_order = NA, sample_order = NA, cluster_samples = TRUE,
cluster_sigs = FALSE, method = "complete", plot_values = FALSE) {
# These variables use non standard evaluation.
# To avoid R CMD check complaints we initialize them to NULL.
Signature <- Sample <- Contribution <- x <- y <- xend <- yend <- NULL
# check contribution argument
if (!inherits(contribution, "matrix")) {
stop("contribution must be a matrix")
}
# check if there are signatures names in the contribution matrix
if (is.null(row.names(contribution))) {
stop("contribution must have row.names (signature names)")
}
# transpose
contribution <- t(contribution)
# relative contribution
contribution_norm <- contribution / rowSums(contribution)
# If cluster_samples is TRUE perform clustering. Else use supplied sample_order or
# the current column order.
if (!.is_na(sample_order) & cluster_samples == TRUE) {
stop("sample_order can only be provided when cluster_samples is FALSE", call. = FALSE)
} else if (!.is_na(sample_order)) {
# check sample_order argument
if (!inherits(sample_order, "character")) {
stop("sample_order must be a character vector", call. = FALSE)
}
if (length(sample_order) != nrow(contribution_norm)) {
stop("sample_order must have the same length as the number
of samples in the explained matrix", call. = FALSE)
}
} else if (cluster_samples == TRUE) {
# cluster samples based on eucledian distance between relative contribution_norm
hc.sample <- hclust(dist(contribution_norm), method = method)
# order samples according to clustering
sample_order <- rownames(contribution_norm)[hc.sample$order]
dhc <- as.dendrogram(hc.sample)
# rectangular lines
ddata <- ggdendro::dendro_data(dhc, type = "rectangle")
# plot dendrogram of hierachical clustering
dendrogram_rows <- ggplot(ggdendro::segment(ddata)) +
geom_segment(aes(x = x, y = y, xend = xend, yend = yend)) +
coord_flip() +
scale_y_reverse(expand = c(0.2, 0)) +
ggdendro::theme_dendro()
}
else {
sample_order <- rownames(contribution_norm)
}
# If cluster_sigs is TRUE perform clustering. Else use supplied sig_order or
# the current column order.
if (!.is_na(sig_order) & cluster_sigs == TRUE) {
stop("sig_order can only be provided when cluster_sigs is FALSE", call. = FALSE)
} else if (!.is_na(sig_order)) {
# check sig_order argument
if (!inherits(sig_order, "character")) {
stop("sig_order must be a character vector", call. = FALSE)
}
if (length(sig_order) != ncol(contribution_norm)) {
stop("sig_order must have the same length as the number
of signatures in the explained matrix", call. = FALSE)
}
} else if (cluster_sigs == TRUE) {
# Cluster cols
hc.sample2 <- contribution_norm %>%
t() %>%
dist() %>%
hclust(method = method)
sig_order <- colnames(contribution_norm)[hc.sample2$order]
dhc <- as.dendrogram(hc.sample2)
# rectangular lines
ddata <- ggdendro::dendro_data(dhc, type = "rectangle")
# plot dendrogram of hierachical clustering
dendrogram_cols <- ggplot(ggdendro::segment(ddata)) +
geom_segment(aes(x = x, y = y, xend = xend, yend = yend)) +
ggdendro::theme_dendro() +
scale_y_continuous(expand = c(0.2, 0))
} else {
sig_order <- colnames(contribution_norm)
}
# Make matrix long and set factor levels, to get the correct order for plotting.
contribution_norm.m <- contribution_norm %>%
as.data.frame() %>%
tibble::rownames_to_column("Sample") %>%
tidyr::pivot_longer(-Sample, names_to = "Signature", values_to = "Contribution") %>%
dplyr::mutate(
Signature = factor(Signature, levels = sig_order),
Sample = factor(Sample, levels = sample_order)
)
# plot heatmap
heatmap <- ggplot(contribution_norm.m, aes(x = Signature, y = Sample, fill = Contribution, order = Sample)) +
geom_raster() +
scale_fill_distiller(palette = "YlGnBu", direction = 1, name = "Relative \ncontribution", limits = c(0, 1)) +
theme_bw() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
labs(x = NULL, y = NULL)
# if plot_values is TRUE, add values to heatmap
if (plot_values) {
heatmap <- heatmap + geom_text(aes(label = round(Contribution, 2)), size = 3)
}
# if cluster_samples is TRUE, make dendrogram
if (cluster_samples == TRUE & cluster_sigs == TRUE) {
empty_fig <- ggplot() +
theme_void()
plot_final <- cowplot::plot_grid(empty_fig, dendrogram_cols, dendrogram_rows, heatmap,
align = "hv", axis = "tblr", rel_widths = c(0.3, 1), rel_heights = c(0.3, 1)
)
}
else if (cluster_samples == TRUE & cluster_sigs == FALSE) {
# combine plots
plot_final <- cowplot::plot_grid(dendrogram_rows, heatmap, align = "h", rel_widths = c(0.3, 1))
} else if (cluster_samples == FALSE & cluster_sigs == TRUE) {
plot_final <- cowplot::plot_grid(dendrogram_cols, heatmap, align = "v", rel_heights = c(0.3, 1)) +
# reverse order of the samples such that first is up
ylim(rev(levels(factor(contribution_norm.m$Sample))))
} else {
plot_final <- heatmap +
# reverse order of the samples such that first is up
ylim(rev(levels(factor(contribution_norm.m$Sample))))
}
return(plot_final)
}
|
/R/plot_contribution_heatmap.R
|
permissive
|
ipstone/MutationalPatterns
|
R
| false
| false
| 8,427
|
r
|
#' Plot signature contribution heatmap
#'
#' Plot relative contribution of signatures in a heatmap
#'
#' @param contribution Signature contribution matrix
#' @param sig_order Character vector with the desired order of the signature names for plotting. Optional.
#' @param sample_order Character vector with the desired order of the sample names for plotting. Optional.
#' @param cluster_samples Hierarchically cluster samples based on eucledian distance. Default = T.
#' @param cluster_sigs Hierarchically cluster sigs based on eucledian distance. Default = T.
#' @param method The agglomeration method to be used for hierarchical clustering. This should be one of
#' "ward.D", "ward.D2", "single", "complete", "average" (= UPGMA), "mcquitty" (= WPGMA), "median" (= WPGMC)
#' or "centroid" (= UPGMC). Default = "complete".
#' @param plot_values Plot relative contribution values in heatmap. Default = F.
#'
#' @return Heatmap with relative contribution of each signature for each sample
#'
#' @import ggplot2
#' @importFrom magrittr %>%
#'
#' @examples
#' ## Extracting signatures can be computationally intensive, so
#' ## we use pre-computed data generated with the following command:
#' # nmf_res <- extract_signatures(mut_mat, rank = 2)
#'
#' nmf_res <- readRDS(system.file("states/nmf_res_data.rds",
#' package = "MutationalPatterns"
#' ))
#'
#' ## Set signature names as row names in the contribution matrix
#' rownames(nmf_res$contribution) <- c("Signature A", "Signature B")
#'
#' ## Plot with clustering.
#' plot_contribution_heatmap(nmf_res$contribution, cluster_samples = TRUE, cluster_sigs = TRUE)
#'
#' ## Define signature and sample order for plotting. If you have a mutation or signature
#' ## matrix, then this can be done like in the example of 'plot_cosine_heatmap()'
#' sig_order <- c("Signature B", "Signature A")
#' sample_order <- c(
#' "colon1", "colon2", "colon3", "intestine1", "intestine2",
#' "intestine3", "liver3", "liver2", "liver1"
#' )
#' plot_contribution_heatmap(nmf_res$contribution,
#' cluster_samples = FALSE,
#' sig_order = sig_order, sample_order = sample_order
#' )
#'
#' ## It's also possible to create a contribution heatmap with text values
#' output_text <- plot_contribution_heatmap(nmf_res$contribution, plot_values = TRUE)
#'
#' ## IThis function can also be used on the result of a signature refitting analysis.
#' ## Here we load a existing result as an example.
#' snv_refit <- readRDS(system.file("states/strict_snv_refit.rds",
#' package = "MutationalPatterns"
#' ))
#' plot_contribution_heatmap(snv_refit$contribution, cluster_samples = TRUE, cluster_sigs = TRUE)
#' @seealso
#' \code{\link{extract_signatures}},
#' \code{\link{mut_matrix}},
#' \code{\link{plot_contribution}},
#' \code{\link{plot_cosine_heatmap}}
#'
#' @export
# plotting function for relative contribution of signatures in heatmap
plot_contribution_heatmap <- function(contribution, sig_order = NA, sample_order = NA, cluster_samples = TRUE,
cluster_sigs = FALSE, method = "complete", plot_values = FALSE) {
# These variables use non standard evaluation.
# To avoid R CMD check complaints we initialize them to NULL.
Signature <- Sample <- Contribution <- x <- y <- xend <- yend <- NULL
# check contribution argument
if (!inherits(contribution, "matrix")) {
stop("contribution must be a matrix")
}
# check if there are signatures names in the contribution matrix
if (is.null(row.names(contribution))) {
stop("contribution must have row.names (signature names)")
}
# transpose
contribution <- t(contribution)
# relative contribution
contribution_norm <- contribution / rowSums(contribution)
# If cluster_samples is TRUE perform clustering. Else use supplied sample_order or
# the current column order.
if (!.is_na(sample_order) & cluster_samples == TRUE) {
stop("sample_order can only be provided when cluster_samples is FALSE", call. = FALSE)
} else if (!.is_na(sample_order)) {
# check sample_order argument
if (!inherits(sample_order, "character")) {
stop("sample_order must be a character vector", call. = FALSE)
}
if (length(sample_order) != nrow(contribution_norm)) {
stop("sample_order must have the same length as the number
of samples in the explained matrix", call. = FALSE)
}
} else if (cluster_samples == TRUE) {
# cluster samples based on eucledian distance between relative contribution_norm
hc.sample <- hclust(dist(contribution_norm), method = method)
# order samples according to clustering
sample_order <- rownames(contribution_norm)[hc.sample$order]
dhc <- as.dendrogram(hc.sample)
# rectangular lines
ddata <- ggdendro::dendro_data(dhc, type = "rectangle")
# plot dendrogram of hierachical clustering
dendrogram_rows <- ggplot(ggdendro::segment(ddata)) +
geom_segment(aes(x = x, y = y, xend = xend, yend = yend)) +
coord_flip() +
scale_y_reverse(expand = c(0.2, 0)) +
ggdendro::theme_dendro()
}
else {
sample_order <- rownames(contribution_norm)
}
# If cluster_sigs is TRUE perform clustering. Else use supplied sig_order or
# the current column order.
if (!.is_na(sig_order) & cluster_sigs == TRUE) {
stop("sig_order can only be provided when cluster_sigs is FALSE", call. = FALSE)
} else if (!.is_na(sig_order)) {
# check sig_order argument
if (!inherits(sig_order, "character")) {
stop("sig_order must be a character vector", call. = FALSE)
}
if (length(sig_order) != ncol(contribution_norm)) {
stop("sig_order must have the same length as the number
of signatures in the explained matrix", call. = FALSE)
}
} else if (cluster_sigs == TRUE) {
# Cluster cols
hc.sample2 <- contribution_norm %>%
t() %>%
dist() %>%
hclust(method = method)
sig_order <- colnames(contribution_norm)[hc.sample2$order]
dhc <- as.dendrogram(hc.sample2)
# rectangular lines
ddata <- ggdendro::dendro_data(dhc, type = "rectangle")
# plot dendrogram of hierachical clustering
dendrogram_cols <- ggplot(ggdendro::segment(ddata)) +
geom_segment(aes(x = x, y = y, xend = xend, yend = yend)) +
ggdendro::theme_dendro() +
scale_y_continuous(expand = c(0.2, 0))
} else {
sig_order <- colnames(contribution_norm)
}
# Make matrix long and set factor levels, to get the correct order for plotting.
contribution_norm.m <- contribution_norm %>%
as.data.frame() %>%
tibble::rownames_to_column("Sample") %>%
tidyr::pivot_longer(-Sample, names_to = "Signature", values_to = "Contribution") %>%
dplyr::mutate(
Signature = factor(Signature, levels = sig_order),
Sample = factor(Sample, levels = sample_order)
)
# plot heatmap
heatmap <- ggplot(contribution_norm.m, aes(x = Signature, y = Sample, fill = Contribution, order = Sample)) +
geom_raster() +
scale_fill_distiller(palette = "YlGnBu", direction = 1, name = "Relative \ncontribution", limits = c(0, 1)) +
theme_bw() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
labs(x = NULL, y = NULL)
# if plot_values is TRUE, add values to heatmap
if (plot_values) {
heatmap <- heatmap + geom_text(aes(label = round(Contribution, 2)), size = 3)
}
# if cluster_samples is TRUE, make dendrogram
if (cluster_samples == TRUE & cluster_sigs == TRUE) {
empty_fig <- ggplot() +
theme_void()
plot_final <- cowplot::plot_grid(empty_fig, dendrogram_cols, dendrogram_rows, heatmap,
align = "hv", axis = "tblr", rel_widths = c(0.3, 1), rel_heights = c(0.3, 1)
)
}
else if (cluster_samples == TRUE & cluster_sigs == FALSE) {
# combine plots
plot_final <- cowplot::plot_grid(dendrogram_rows, heatmap, align = "h", rel_widths = c(0.3, 1))
} else if (cluster_samples == FALSE & cluster_sigs == TRUE) {
plot_final <- cowplot::plot_grid(dendrogram_cols, heatmap, align = "v", rel_heights = c(0.3, 1)) +
# reverse order of the samples such that first is up
ylim(rev(levels(factor(contribution_norm.m$Sample))))
} else {
plot_final <- heatmap +
# reverse order of the samples such that first is up
ylim(rev(levels(factor(contribution_norm.m$Sample))))
}
return(plot_final)
}
|
# load data
consumption.all <- read.table(file = "household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?")
consumption.plot <- subset(x = consumption.all, subset = Date == "1/2/2007" | Date == "2/2/2007")
# write image file #2
png(filename = "plot2.png")
plot(x = consumption.plot[, 3], type = "l", xlab = "", xaxt = "n", ylab = "Global Active Power (kilowatts)")
observations.count <- nrow(consumption.plot)
axis(side = 1, at = c(0, observations.count / 2, observations.count), labels = c ("Thu", "Fri", "Sat"))
dev.off()
|
/plot2.r
|
no_license
|
robertdobbs/ExData_Plotting1
|
R
| false
| false
| 545
|
r
|
# load data
consumption.all <- read.table(file = "household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?")
consumption.plot <- subset(x = consumption.all, subset = Date == "1/2/2007" | Date == "2/2/2007")
# write image file #2
png(filename = "plot2.png")
plot(x = consumption.plot[, 3], type = "l", xlab = "", xaxt = "n", ylab = "Global Active Power (kilowatts)")
observations.count <- nrow(consumption.plot)
axis(side = 1, at = c(0, observations.count / 2, observations.count), labels = c ("Thu", "Fri", "Sat"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/roxford_extension.R
\name{getDomainModelResponse}
\alias{getDomainModelResponse}
\title{get a model based response back}
\usage{
getDomainModelResponse(img.path, key, model = "celebrities")
}
\arguments{
\item{key}{for the vision api}
\item{model}{see getDomainModels()}
\item{image}{path}
}
\value{
data frame model specific features
}
\description{
provide a specific model (e.g. 'celebrities') and get the classification back
}
\examples{
getDomainModelResponseURL("out/image.png", visionKey, model)
}
|
/man/getDomainModelResponse.Rd
|
no_license
|
anishsingh20/Roxford
|
R
| false
| true
| 587
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/roxford_extension.R
\name{getDomainModelResponse}
\alias{getDomainModelResponse}
\title{get a model based response back}
\usage{
getDomainModelResponse(img.path, key, model = "celebrities")
}
\arguments{
\item{key}{for the vision api}
\item{model}{see getDomainModels()}
\item{image}{path}
}
\value{
data frame model specific features
}
\description{
provide a specific model (e.g. 'celebrities') and get the classification back
}
\examples{
getDomainModelResponseURL("out/image.png", visionKey, model)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mstFreq.R
\name{mstFREQ}
\alias{mstFREQ}
\title{Analysis of Multisite Randomised Education Trials using Multilevel Model under a Frequentist Setting.}
\usage{
mstFREQ(formula, random, intervention, baseln, nPerm, data, seed, nBoot)
}
\arguments{
\item{formula}{the model to be analysed is of the form y ~ x1+x2+.... Where y is the outcome variable and Xs are the independent variables.}
\item{random}{a string variable specifying the "clustering variable" as contained in the data. See example below.}
\item{intervention}{a string variable specifying the "intervention variable" as appearing in the formula and the data. See example below.}
\item{baseln}{A string variable allowing the user to specify the reference category for intervention variable. When not specified, the first level will be used as a reference.}
\item{nPerm}{number of permutations required to generate permutated p-value.}
\item{data}{data frame containing the data to be analysed.}
\item{seed}{seed required for bootstrapping and permutation procedure, if not provided default seed will be used.}
\item{nBoot}{number of bootstraps required to generate bootstrap confidence intervals.}
}
\value{
S3 object; a list consisting of
\itemize{
\item \code{Beta}: Estimates and confidence intervals for variables specified in the model.
\item \code{ES}: Conditional Hedge's g effect size (ES) and its 95% confidence intervals. If nBoot is not specified, 95% confidence intervals are based on standard errors. If nBoot is specified, they are non-parametric bootstrapped confidence intervals.
\item \code{covParm}: A list of variance decomposition into between cluster variance-covariance matrix (schools and school by intervention) and within cluster variance (Pupils). It also contains intra-cluster correlation (ICC).
\item \code{SchEffects}: A vector of the estimated deviation of each school from the intercept and intervention slope.
\item \code{Perm}: A "nPerm x 2w" matrix containing permutated effect sizes using residual variance and total variance. "w" denotes number of intervention. "w=1" for two arm trial and "w=2" for three arm trial excluding the control group. It is produced only when \code{nPerm} is specified.
\item \code{Bootstrap}: A "nBoot x 2w" matrix containing the bootstrapped effect sizes using residual variance (Within) and total variance (Total). "w" denotes number of intervention. "w=1" for two arm trial and "w=2" for three arm trial excluding the control group. It is only prduced when \code{nBoot} is specified.
\item \code{Unconditional}: A list of unconditional effect sizes, covParm, Perm and Bootstrap obtained based on variances from the unconditional model (model with only the intercept as a fixed effect).
}
}
\description{
\code{mstFREQ} performs analysis of multisite randomised education trials using a multilevel model under a frequentist setting.
}
\examples{
if(interactive()){
data(mstData)
########################################################
## MLM analysis of multisite trials + 1.96SE ##
########################################################
output1 <- mstFREQ(Posttest~ Intervention+Prettest,random="School",
intervention="Intervention",data=mstData)
### Fixed effects
beta <- output1$Beta
beta
### Effect size
ES1 <- output1$ES
ES1
## Covariance matrix
covParm <- output1$covParm
covParm
### plot random effects for schools
plot(output1)
###############################################
## MLM analysis of multisite trials ##
## with bootstrap confidence intervals ##
###############################################
output2 <- mstFREQ(Posttest~ Intervention+Prettest,random="School",
intervention="Intervention",nBoot=1000,data=mstData)
tp <- output2$Bootstrap
### Effect size
ES2 <- output2$ES
ES2
### plot bootstrapped values
plot(output2, group=1)
#######################################################################
## MLM analysis of mutltisite trials with permutation p-value##
#######################################################################
output3 <- mstFREQ(Posttest~ Intervention+Prettest,random="School",
intervention="Intervention",nPerm=1000,data=mstData)
ES3 <- output3$ES
ES3
#### plot permutated values
plot(output3, group=1)
}
}
|
/man/mstFREQ.Rd
|
no_license
|
InductiveStep/eefAnalytics
|
R
| false
| true
| 4,315
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mstFreq.R
\name{mstFREQ}
\alias{mstFREQ}
\title{Analysis of Multisite Randomised Education Trials using Multilevel Model under a Frequentist Setting.}
\usage{
mstFREQ(formula, random, intervention, baseln, nPerm, data, seed, nBoot)
}
\arguments{
\item{formula}{the model to be analysed is of the form y ~ x1+x2+.... Where y is the outcome variable and Xs are the independent variables.}
\item{random}{a string variable specifying the "clustering variable" as contained in the data. See example below.}
\item{intervention}{a string variable specifying the "intervention variable" as appearing in the formula and the data. See example below.}
\item{baseln}{A string variable allowing the user to specify the reference category for intervention variable. When not specified, the first level will be used as a reference.}
\item{nPerm}{number of permutations required to generate permutated p-value.}
\item{data}{data frame containing the data to be analysed.}
\item{seed}{seed required for bootstrapping and permutation procedure, if not provided default seed will be used.}
\item{nBoot}{number of bootstraps required to generate bootstrap confidence intervals.}
}
\value{
S3 object; a list consisting of
\itemize{
\item \code{Beta}: Estimates and confidence intervals for variables specified in the model.
\item \code{ES}: Conditional Hedge's g effect size (ES) and its 95% confidence intervals. If nBoot is not specified, 95% confidence intervals are based on standard errors. If nBoot is specified, they are non-parametric bootstrapped confidence intervals.
\item \code{covParm}: A list of variance decomposition into between cluster variance-covariance matrix (schools and school by intervention) and within cluster variance (Pupils). It also contains intra-cluster correlation (ICC).
\item \code{SchEffects}: A vector of the estimated deviation of each school from the intercept and intervention slope.
\item \code{Perm}: A "nPerm x 2w" matrix containing permutated effect sizes using residual variance and total variance. "w" denotes number of intervention. "w=1" for two arm trial and "w=2" for three arm trial excluding the control group. It is produced only when \code{nPerm} is specified.
\item \code{Bootstrap}: A "nBoot x 2w" matrix containing the bootstrapped effect sizes using residual variance (Within) and total variance (Total). "w" denotes number of intervention. "w=1" for two arm trial and "w=2" for three arm trial excluding the control group. It is only prduced when \code{nBoot} is specified.
\item \code{Unconditional}: A list of unconditional effect sizes, covParm, Perm and Bootstrap obtained based on variances from the unconditional model (model with only the intercept as a fixed effect).
}
}
\description{
\code{mstFREQ} performs analysis of multisite randomised education trials using a multilevel model under a frequentist setting.
}
\examples{
if(interactive()){
data(mstData)
########################################################
## MLM analysis of multisite trials + 1.96SE ##
########################################################
output1 <- mstFREQ(Posttest~ Intervention+Prettest,random="School",
intervention="Intervention",data=mstData)
### Fixed effects
beta <- output1$Beta
beta
### Effect size
ES1 <- output1$ES
ES1
## Covariance matrix
covParm <- output1$covParm
covParm
### plot random effects for schools
plot(output1)
###############################################
## MLM analysis of multisite trials ##
## with bootstrap confidence intervals ##
###############################################
output2 <- mstFREQ(Posttest~ Intervention+Prettest,random="School",
intervention="Intervention",nBoot=1000,data=mstData)
tp <- output2$Bootstrap
### Effect size
ES2 <- output2$ES
ES2
### plot bootstrapped values
plot(output2, group=1)
#######################################################################
## MLM analysis of mutltisite trials with permutation p-value##
#######################################################################
output3 <- mstFREQ(Posttest~ Intervention+Prettest,random="School",
intervention="Intervention",nPerm=1000,data=mstData)
ES3 <- output3$ES
ES3
#### plot permutated values
plot(output3, group=1)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-json.R
\name{json}
\alias{json}
\title{Import a JSON file to use for report}
\usage{
json(file)
}
\arguments{
\item{file}{incoming json file}
}
\description{
Import a JSON file to use for report
}
|
/man/json.Rd
|
permissive
|
ee-usgs/repgen
|
R
| false
| true
| 282
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-json.R
\name{json}
\alias{json}
\title{Import a JSON file to use for report}
\usage{
json(file)
}
\arguments{
\item{file}{incoming json file}
}
\description{
Import a JSON file to use for report
}
|
# Title : TODO
# Objective : TODO
# Created by: agladsteinNew
# Created on: 11/2/17
args<-commandArgs(TRUE)
if (length(args)<3) {
stop("Three arguments must be supplied (sim input file), (real input file), (header)", call.=FALSE)
} else{
print(args)
}
file_sim<-args[1] #simulation output, should have the form input_ABCtoolbox_M1_HPC.txt
file_real<-args[2] #real data, should have the form real_output_M23.summary
header<-args[3] #header of simulation containing desired columns
input_ABCtoolbox<-read.table(file_sim, header = T);
real_output<-read.table(file_real, header = T);
keep<-scan(header, character(), quote="")
out_params<-paste(strsplit(file_sim, ".txt")[[1]],"_params.pdf", sep="")
out_stats<-paste(strsplit(file_sim, ".txt")[[1]],"_stats.pdf", sep="")
out_pca<-paste(strsplit(file_sim, ".txt")[[1]],"_pca.pdf", sep="")
# extract columns containing stats. could use for loop to look for the first stat. or use real output to find the stats.
|
/assessment/plot_results.R
|
permissive
|
ElsevierSoftwareX/SOFTX_2018_97
|
R
| false
| false
| 966
|
r
|
# Title : TODO
# Objective : TODO
# Created by: agladsteinNew
# Created on: 11/2/17
args<-commandArgs(TRUE)
if (length(args)<3) {
stop("Three arguments must be supplied (sim input file), (real input file), (header)", call.=FALSE)
} else{
print(args)
}
file_sim<-args[1] #simulation output, should have the form input_ABCtoolbox_M1_HPC.txt
file_real<-args[2] #real data, should have the form real_output_M23.summary
header<-args[3] #header of simulation containing desired columns
input_ABCtoolbox<-read.table(file_sim, header = T);
real_output<-read.table(file_real, header = T);
keep<-scan(header, character(), quote="")
out_params<-paste(strsplit(file_sim, ".txt")[[1]],"_params.pdf", sep="")
out_stats<-paste(strsplit(file_sim, ".txt")[[1]],"_stats.pdf", sep="")
out_pca<-paste(strsplit(file_sim, ".txt")[[1]],"_pca.pdf", sep="")
# extract columns containing stats. could use for loop to look for the first stat. or use real output to find the stats.
|
# HW1: matrix2
#
# 1. Create a 10 x 10 matrix `m1` whose i-th row,j-th column element is (i+1)/(j+1). (hint: check the `outer`` function)
# 2. Copy `m1` into a matrix `m2` and replace the non-integer value by 0.
# 3. Flatten the matrix `m2` column-vise and assign it to `v1`.
# 4. Copy `v1` to `v2`, remove the duplicated value and sort it in decreasing order. (hint: check the `unique` function)
## Do not modify this line! ## Write your code for 1. after this line! ##
a <-c(1:10)
b <-c(1:10)
f <-function(x,y){(x+1)/(y+1)}
m1<-outer(a,b,f)
m1
## Do not modify this line! ## Write your code for 2. after this line! ##
m2 <-m1
m2 <- ifelse(m2-as.integer(m2)!=0,0,m2)
## Do not modify this line! ## Write your code for 3. after this line! ##
v1 <- as.vector(t(m2))
## Do not modify this line! ## Write your code for 4. after this line! ##
v22<-v1
v23<-unique(v22)
v2<-sort(v23, decreasing = TRUE)
|
/week1 program/Matrix.R
|
no_license
|
tomatoJr/GR5206
|
R
| false
| false
| 928
|
r
|
# HW1: matrix2
#
# 1. Create a 10 x 10 matrix `m1` whose i-th row,j-th column element is (i+1)/(j+1). (hint: check the `outer`` function)
# 2. Copy `m1` into a matrix `m2` and replace the non-integer value by 0.
# 3. Flatten the matrix `m2` column-vise and assign it to `v1`.
# 4. Copy `v1` to `v2`, remove the duplicated value and sort it in decreasing order. (hint: check the `unique` function)
## Do not modify this line! ## Write your code for 1. after this line! ##
a <-c(1:10)
b <-c(1:10)
f <-function(x,y){(x+1)/(y+1)}
m1<-outer(a,b,f)
m1
## Do not modify this line! ## Write your code for 2. after this line! ##
m2 <-m1
m2 <- ifelse(m2-as.integer(m2)!=0,0,m2)
## Do not modify this line! ## Write your code for 3. after this line! ##
v1 <- as.vector(t(m2))
## Do not modify this line! ## Write your code for 4. after this line! ##
v22<-v1
v23<-unique(v22)
v2<-sort(v23, decreasing = TRUE)
|
#Script for testing the properties of the cladistic space
#Setwd
if(length(grep("TGuillerme", getwd()))) {
setwd('~/PhD/Projects/SpatioTemporal_Disparity/Analysis')
} else {
warning("You might have to change the directory!")
}
if(length(grep("SpatioTemporal_Disparity/Analysis", getwd()))==0) {
if(length(grep("SpatioTemporal_Disparity-master/Analysis", getwd()))==0) {
stop("Wrong directory!\nThe current directory must be:\nSpatioTemporal_Disparity/Analysis/ OR SpatioTemporal_Disparity-master/Analysis/\nYou can clone the whole repository from:\nhttps://github.com/TGuillerme/SpatioTemporal_Disparity")
}
}
#Load the functions and the packages
source("functions.R")
#Load Beck data (up to line 159)
#BECK 2014 ProcB
chain_name<-"null_test"
data_path<-"../Data/"
file_matrix<-"../Data/2014-Beck-ProcB-matrix-morpho.nex"
file_tree<-"../Data/2014-Beck-ProcB-TEM.tre"
int_breaks<-rev(seq(from=0, to=150, by=20))+5
int_breaks[length(int_breaks)]<-0
slices<-rev(seq(from=0, to=150, by=10))
KT_bin=4.5
KT_sli=9.5
######################
#Tree and matrix
######################
#matrix
Nexus_data<-ReadMorphNexus(file_matrix)
Nexus_matrix<-Nexus_data$matrix
#tree
Tree_data<-read.nexus(file_tree)
#Cleaning the matrices and the trees
#Remove species with only missing data before hand
if (any(apply(as.matrix(Nexus_matrix), 1, function(x) levels(as.factor((x)))) == "?")) {
Nexus_matrix<-Nexus_matrix[-c(as.vector(which(apply(as.matrix(Nexus_matrix), 1, function(x) levels(as.factor(x))) == "?"))),]
}
#Cleaning the tree and the table
#making the saving folder
tree<-clean.tree(Tree_data, Nexus_matrix)
table<-clean.table(Nexus_matrix, Tree_data)
Nexus_data$matrix<-table
#Forcing the tree to be binary
tree<-bin.tree(tree)
#Adding node labels to the tree
tree$node.label<-paste("n",seq(1:Nnode(tree)), sep="")
#Setting the tree root age
ages_data<-tree.age(tree)
tree$root.time<-max(ages_data[,1])
######################
#FADLAD file
######################
#Load the F/LAD for Beck
FADLAD<-read.csv(paste(data_path, "Beck2014_FADLAD.csv", sep=""), row.names=1)
######################
#Selecting only stems
######################
#subtree
tree<-extract.clade(tree, node=150)
ages_data<-tree.age(tree)
tree$root.time<-max(ages_data[,1])
#submatrix
Nexus_data$matrix<-Nexus_data$matrix[match(tree$tip.label, rownames(Nexus_data$matrix)) ,]
#Isolating the states list
states_list<-apply(Nexus_data$matrix, 2, states.count) #states.count function is available in the sanitizing functions
######################
#Generating all the models
######################
observed_mat<-Nexus_data
observed_tree<-tree
ran.mat_obs.tre_init<-null.data(tree=observed_tree, matrix=states_list, matrix.model="random", replicates=20, verbose=TRUE)
save(ran.mat_obs.tre_init, file=paste("../Data/",chain_name,"/ran.mat_obs.tre_init.Rda", sep=""))
sim.mat_obs.tre_init<-null.data(tree=observed_tree, matrix=states_list, matrix.model="sim.char", replicates=20, verbose=TRUE)
save(sim.mat_obs.tre_init, file=paste("../Data/",chain_name,"/sim.mat_obs.tre_init.Rda", sep=""))
#obs.mat_yul.tre_init<-null.data(tree="yule", matrix=observed_mat$matrix, replicates=10, verbose=TRUE, root.time=tree$root.time)
#save(obs.mat_yul.tre_init, file=paste("../Data/",chain_name,"/obs.mat_yul.tre_init.Rda", sep=""))
#obs.mat_bde.tre_init<-null.data(tree="bd", matrix=observed_mat$matrix, replicates=10, verbose=TRUE, root.time=tree$root.time)
#save(obs.mat_bde.tre_init, file=paste("../Data/",chain_name,"/obs.mat_bde.tre_init.Rda", sep=""))
#ran.mat_yul.tre_init<-null.data(tree="yule", matrix=states_list, matrix.model="random", replicates=1, verbose=TRUE, root.time=tree$root.time, n.tips=Ntip(tree))
#ran.mat_bde.tre_init<-null.data(tree="bd", matrix=states_list, matrix.model="random", replicates=1, verbose=TRUE, root.time=tree$root.time, n.tips=Ntip(tree))
#sim.mat_yul.tre_init<-null.data(tree="yule", matrix=states_list, matrix.model="sim.char", replicates=1, verbose=TRUE, root.time=tree$root.time, n.tips=Ntip(tree))
#sim.mat_bde.tre_init<-null.data(tree="bd", matrix=states_list, matrix.model="sim.char", replicates=1, verbose=TRUE, root.time=tree$root.time, n.tips=Ntip(tree))
#Recreating the proper nexus format file with the new matrices
ran.mat_obs.tre<-sim.mat_obs.tre<-obs.mat_yul.tre<-obs.mat_bde.tre<-ran.mat_yul.tre<-ran.mat_bde.tre<-sim.mat_yul.tre<-sim.mat_bde.tre<-list()
for (replicate in 1:length(ran.mat_obs.tre_init)) {
ran.mat_obs.tre[[replicate]]<-observed_mat
ran.mat_obs.tre[[replicate]]$matrix<-ran.mat_obs.tre_init[[replicate]]
sim.mat_obs.tre[[replicate]]<-observed_mat
sim.mat_obs.tre[[replicate]]$matrix<-sim.mat_obs.tre_init[[replicate]]
#obs.mat_yul.tre[[replicate]]<-observed_mat
#obs.mat_yul.tre[[replicate]]$matrix<-obs.mat_yul.tre_init[[replicate]]
#obs.mat_bde.tre[[replicate]]<-observed_mat
#obs.mat_bde.tre[[replicate]]$matrix<-obs.mat_bde.tre_init[[replicate]]
#ran.mat_yul.tre[[replicate]]<-observed_mat
#ran.mat_yul.tre[[replicate]]$matrix<-ran.mat_yul.tre_init[[replicate]]
#ran.mat_bde.tre[[replicate]]<-observed_mat
#ran.mat_bde.tre[[replicate]]$matrix<-ran.mat_bde.tre_init[[replicate]]
#sim.mat_yul.tre[[replicate]]<-observed_mat
#sim.mat_yul.tre[[replicate]]$matrix<-sim.mat_yul.tre_init[[replicate]]
#sim.mat_bde.tre[[replicate]]<-observed_mat
#sim.mat_bde.tre[[replicate]]$matrix<-sim.mat_bde.tre_init[[replicate]]
}
####################################
#Ancestral states reconstruction - Fast version (ACE)
####################################
ace_obs.mat_obs.tre<-anc.state(observed_tree, observed_mat, method='ML-ape', verbose=TRUE)
ace_obs.mat_obs.tre$state<-apply(ace_obs.mat_obs.tre$state, 2, replace.na) #replace.na comes from the sanitizing functions
save(ace_obs.mat_obs.tre, file=paste("../Data/",chain_name,"/ace_obs.mat_obs.tre.Rda", sep=""))
ace_ran.mat_obs.tre<-ace_sim.mat_obs.tre<-ace_obs.mat_yul.tre<-ace_obs.mat_bde.tre<-list()
for (replicate in 1:length(ran.mat_obs.tre_init)) {
ace_ran.mat_obs.tre[[replicate]]<-anc.state(observed_tree, ran.mat_obs.tre[[replicate]], method='ML-ape', verbose=TRUE)
ace_ran.mat_obs.tre[[replicate]]$state<-apply(ace_ran.mat_obs.tre[[replicate]]$state, 2, replace.na)
ace_sim.mat_obs.tre[[replicate]]<-anc.state(observed_tree, sim.mat_obs.tre[[replicate]], method='ML-ape', verbose=TRUE)
ace_sim.mat_obs.tre[[replicate]]$state<-apply(ace_sim.mat_obs.tre[[replicate]]$state, 2, replace.na)
#ace_obs.mat_yul.tre[[replicate]]<-anc.state(obs.mat_yul.tre_init[[replicate]], observed_mat, method='ML-ape', verbose=TRUE)
#ace_obs.mat_yul.tre[[replicate]]$state<-apply(ace_obs.mat_yul.tre[[replicate]]$state, 2, replace.na)
#ace_obs.mat_bde.tre[[replicate]]<-anc.state(obs.mat_bde.tre_init[[replicate]], observed_mat, method='ML-ape', verbose=TRUE)
#ace_obs.mat_bde.tre[[replicate]]$state<-apply(ace_obs.mat_bde.tre[[replicate]]$state, 2, replace.na)
}
save(ace_ran.mat_obs.tre, file=paste("../Data/",chain_name,"/ace_ran.mat_obs.tre.Rda", sep=""))
save(ace_sim.mat_obs.tre, file=paste("../Data/",chain_name,"/ace_sim.mat_obs.tre.Rda", sep=""))
#save(ace_obs.mat_yul.tre, file=paste("../Data/",chain_name,"/ace_obs.mat_yul.tre.Rda", sep=""))
#save(ace_obs.mat_bde.tre, file=paste("../Data/",chain_name,"/ace_obs.mat_bde.tre.Rda", sep=""))
#Adding nodes to the nexus matrices
observed_mat95<-observed_mat
observed_mat95$matrix<-anc.unc(ace_obs.mat_obs.tre, 0.95, missing=NA)$state
observed_mat$matrix<-ace_obs.mat_obs.tre$state
ran.mat_obs.tre95<-ran.mat_obs.tre
sim.mat_obs.tre95<-sim.mat_obs.tre
#obs.mat_yul.tre95<-obs.mat_yul.tre
#obs.mat_bde.tre95<-obs.mat_bde.tre
for (replicate in 1:length(ran.mat_obs.tre_init)) {
ran.mat_obs.tre95[[replicate]]$matrix<-anc.unc(ace_ran.mat_obs.tre[[replicate]], 0.95, missing=NA)$state
ran.mat_obs.tre[[replicate]]$matrix<-ace_ran.mat_obs.tre[[replicate]]$state
sim.mat_obs.tre95[[replicate]]$matrix<-anc.unc(ace_sim.mat_obs.tre[[replicate]], 0.95, missing=NA)$state
sim.mat_obs.tre[[replicate]]$matrix<-ace_sim.mat_obs.tre[[replicate]]$state
#obs.mat_yul.tre95[[replicate]]$matrix<-anc.unc(ace_obs.mat_yul.tre[[replicate]], 0.95, missing=NA)$state
#obs.mat_yul.tre[[replicate]]$matrix<-ace_obs.mat_yul.tre[[replicate]]$state
#obs.mat_bde.tre95[[replicate]]$matrix<-anc.unc(ace_obs.mat_bde.tre[[replicate]], 0.95, missing=NA)$state
#obs.mat_bde.tre[[replicate]]$matrix<-ace_obs.mat_bde.tre[[replicate]]$state
}
####################################
#Distance matrix
####################################
#Distance matrix using also nodes
dist_obs.mat_obs.tre<-MorphDistMatrix.verbose(observed_mat, verbose=TRUE)
save(dist_obs.mat_obs.tre, file=paste("../Data/",chain_name,"/dist_obs.mat_obs.tre.Rda", sep=""))
dist_ran.mat_obs.tre<-lapply(ran.mat_obs.tre, MorphDistMatrix.verbose, verbose=TRUE)
save(dist_ran.mat_obs.tre, file=paste("../Data/",chain_name,"/dist_ran.mat_obs.tre.Rda", sep=""))
dist_sim.mat_obs.tre<-lapply(sim.mat_obs.tre, MorphDistMatrix.verbose, verbose=TRUE)
save(dist_sim.mat_obs.tre, file=paste("../Data/",chain_name,"/dist_sim.mat_obs.tre.Rda", sep=""))
#dist_obs.mat_yul.tre<-lapply(obs.mat_yul.tre, MorphDistMatrix.verbose, verbose=TRUE)
#save(dist_obs.mat_yul.tre, file=paste("../Data/",chain_name,"/dist_obs.mat_yul.tre.Rda", sep=""))
#dist_obs.mat_bde.tre<-lapply(obs.mat_bde.tre, MorphDistMatrix.verbose, verbose=TRUE)
#save(dist_obs.mat_bde.tre, file=paste("../Data/",chain_name,"/dist_obs.mat_bde.tre.Rda", sep=""))
#Distance matrix using also nodes95
#Distance matrix using also nodes
dist_obs.mat_obs.tre95<-MorphDistMatrix.verbose(observed_mat, verbose=TRUE)
save(dist_obs.mat_obs.tre95, file=paste("../Data/",chain_name,"/dist_obs.mat_obs.tree95.Rda", sep=""))
dist_ran.mat_obs.tre95<-lapply(ran.mat_obs.tre95, MorphDistMatrix.verbose, verbose=TRUE)
save(dist_ran.mat_obs.tre95, file=paste("../Data/",chain_name,"/dist_ran.mat_obs.tree95.Rda", sep=""))
dist_sim.mat_obs.tre95<-lapply(sim.mat_obs.tre95, MorphDistMatrix.verbose, verbose=TRUE)
save(dist_sim.mat_obs.tre95, file=paste("../Data/",chain_name,"/dist_sim.mat_obs.tree95.Rda", sep=""))
#dist_obs.mat_yul.tre95<-lapply(obs.mat_yul.tre95, MorphDistMatrix.verbose, verbose=TRUE)
#save(dist_obs.mat_yul.tre95, file=paste("../Data/",chain_name,"/dist_obs.mat_yul.tree95.Rda", sep=""))
#dist_obs.mat_bde.tre95<-lapply(obs.mat_bde.tre95, MorphDistMatrix.verbose, verbose=TRUE)
#save(dist_obs.mat_bde.tre95, file=paste("../Data/",chain_name,"/dist_obs.mat_bde.tree95.Rda", sep=""))
|
/Analysis/Testings/Data_setup_null_test.R
|
no_license
|
yassato/SpatioTemporal_Disparity
|
R
| false
| false
| 10,565
|
r
|
#Script for testing the properties of the cladistic space
#Setwd
if(length(grep("TGuillerme", getwd()))) {
setwd('~/PhD/Projects/SpatioTemporal_Disparity/Analysis')
} else {
warning("You might have to change the directory!")
}
if(length(grep("SpatioTemporal_Disparity/Analysis", getwd()))==0) {
if(length(grep("SpatioTemporal_Disparity-master/Analysis", getwd()))==0) {
stop("Wrong directory!\nThe current directory must be:\nSpatioTemporal_Disparity/Analysis/ OR SpatioTemporal_Disparity-master/Analysis/\nYou can clone the whole repository from:\nhttps://github.com/TGuillerme/SpatioTemporal_Disparity")
}
}
#Load the functions and the packages
source("functions.R")
#Load Beck data (up to line 159)
#BECK 2014 ProcB
chain_name<-"null_test"
data_path<-"../Data/"
file_matrix<-"../Data/2014-Beck-ProcB-matrix-morpho.nex"
file_tree<-"../Data/2014-Beck-ProcB-TEM.tre"
int_breaks<-rev(seq(from=0, to=150, by=20))+5
int_breaks[length(int_breaks)]<-0
slices<-rev(seq(from=0, to=150, by=10))
KT_bin=4.5
KT_sli=9.5
######################
#Tree and matrix
######################
#matrix
Nexus_data<-ReadMorphNexus(file_matrix)
Nexus_matrix<-Nexus_data$matrix
#tree
Tree_data<-read.nexus(file_tree)
#Cleaning the matrices and the trees
#Remove species with only missing data before hand
if (any(apply(as.matrix(Nexus_matrix), 1, function(x) levels(as.factor((x)))) == "?")) {
Nexus_matrix<-Nexus_matrix[-c(as.vector(which(apply(as.matrix(Nexus_matrix), 1, function(x) levels(as.factor(x))) == "?"))),]
}
#Cleaning the tree and the table
#making the saving folder
tree<-clean.tree(Tree_data, Nexus_matrix)
table<-clean.table(Nexus_matrix, Tree_data)
Nexus_data$matrix<-table
#Forcing the tree to be binary
tree<-bin.tree(tree)
#Adding node labels to the tree
tree$node.label<-paste("n",seq(1:Nnode(tree)), sep="")
#Setting the tree root age
ages_data<-tree.age(tree)
tree$root.time<-max(ages_data[,1])
######################
#FADLAD file
######################
#Load the F/LAD for Beck
FADLAD<-read.csv(paste(data_path, "Beck2014_FADLAD.csv", sep=""), row.names=1)
######################
#Selecting only stems
######################
#subtree
tree<-extract.clade(tree, node=150)
ages_data<-tree.age(tree)
tree$root.time<-max(ages_data[,1])
#submatrix
Nexus_data$matrix<-Nexus_data$matrix[match(tree$tip.label, rownames(Nexus_data$matrix)) ,]
#Isolating the states list
states_list<-apply(Nexus_data$matrix, 2, states.count) #states.count function is available in the sanitizing functions
######################
#Generating all the models
######################
observed_mat<-Nexus_data
observed_tree<-tree
ran.mat_obs.tre_init<-null.data(tree=observed_tree, matrix=states_list, matrix.model="random", replicates=20, verbose=TRUE)
save(ran.mat_obs.tre_init, file=paste("../Data/",chain_name,"/ran.mat_obs.tre_init.Rda", sep=""))
sim.mat_obs.tre_init<-null.data(tree=observed_tree, matrix=states_list, matrix.model="sim.char", replicates=20, verbose=TRUE)
save(sim.mat_obs.tre_init, file=paste("../Data/",chain_name,"/sim.mat_obs.tre_init.Rda", sep=""))
#obs.mat_yul.tre_init<-null.data(tree="yule", matrix=observed_mat$matrix, replicates=10, verbose=TRUE, root.time=tree$root.time)
#save(obs.mat_yul.tre_init, file=paste("../Data/",chain_name,"/obs.mat_yul.tre_init.Rda", sep=""))
#obs.mat_bde.tre_init<-null.data(tree="bd", matrix=observed_mat$matrix, replicates=10, verbose=TRUE, root.time=tree$root.time)
#save(obs.mat_bde.tre_init, file=paste("../Data/",chain_name,"/obs.mat_bde.tre_init.Rda", sep=""))
#ran.mat_yul.tre_init<-null.data(tree="yule", matrix=states_list, matrix.model="random", replicates=1, verbose=TRUE, root.time=tree$root.time, n.tips=Ntip(tree))
#ran.mat_bde.tre_init<-null.data(tree="bd", matrix=states_list, matrix.model="random", replicates=1, verbose=TRUE, root.time=tree$root.time, n.tips=Ntip(tree))
#sim.mat_yul.tre_init<-null.data(tree="yule", matrix=states_list, matrix.model="sim.char", replicates=1, verbose=TRUE, root.time=tree$root.time, n.tips=Ntip(tree))
#sim.mat_bde.tre_init<-null.data(tree="bd", matrix=states_list, matrix.model="sim.char", replicates=1, verbose=TRUE, root.time=tree$root.time, n.tips=Ntip(tree))
#Recreating the proper nexus format file with the new matrices
ran.mat_obs.tre<-sim.mat_obs.tre<-obs.mat_yul.tre<-obs.mat_bde.tre<-ran.mat_yul.tre<-ran.mat_bde.tre<-sim.mat_yul.tre<-sim.mat_bde.tre<-list()
for (replicate in 1:length(ran.mat_obs.tre_init)) {
ran.mat_obs.tre[[replicate]]<-observed_mat
ran.mat_obs.tre[[replicate]]$matrix<-ran.mat_obs.tre_init[[replicate]]
sim.mat_obs.tre[[replicate]]<-observed_mat
sim.mat_obs.tre[[replicate]]$matrix<-sim.mat_obs.tre_init[[replicate]]
#obs.mat_yul.tre[[replicate]]<-observed_mat
#obs.mat_yul.tre[[replicate]]$matrix<-obs.mat_yul.tre_init[[replicate]]
#obs.mat_bde.tre[[replicate]]<-observed_mat
#obs.mat_bde.tre[[replicate]]$matrix<-obs.mat_bde.tre_init[[replicate]]
#ran.mat_yul.tre[[replicate]]<-observed_mat
#ran.mat_yul.tre[[replicate]]$matrix<-ran.mat_yul.tre_init[[replicate]]
#ran.mat_bde.tre[[replicate]]<-observed_mat
#ran.mat_bde.tre[[replicate]]$matrix<-ran.mat_bde.tre_init[[replicate]]
#sim.mat_yul.tre[[replicate]]<-observed_mat
#sim.mat_yul.tre[[replicate]]$matrix<-sim.mat_yul.tre_init[[replicate]]
#sim.mat_bde.tre[[replicate]]<-observed_mat
#sim.mat_bde.tre[[replicate]]$matrix<-sim.mat_bde.tre_init[[replicate]]
}
####################################
#Ancestral states reconstruction - Fast version (ACE)
####################################
ace_obs.mat_obs.tre<-anc.state(observed_tree, observed_mat, method='ML-ape', verbose=TRUE)
ace_obs.mat_obs.tre$state<-apply(ace_obs.mat_obs.tre$state, 2, replace.na) #replace.na comes from the sanitizing functions
save(ace_obs.mat_obs.tre, file=paste("../Data/",chain_name,"/ace_obs.mat_obs.tre.Rda", sep=""))
ace_ran.mat_obs.tre<-ace_sim.mat_obs.tre<-ace_obs.mat_yul.tre<-ace_obs.mat_bde.tre<-list()
for (replicate in 1:length(ran.mat_obs.tre_init)) {
ace_ran.mat_obs.tre[[replicate]]<-anc.state(observed_tree, ran.mat_obs.tre[[replicate]], method='ML-ape', verbose=TRUE)
ace_ran.mat_obs.tre[[replicate]]$state<-apply(ace_ran.mat_obs.tre[[replicate]]$state, 2, replace.na)
ace_sim.mat_obs.tre[[replicate]]<-anc.state(observed_tree, sim.mat_obs.tre[[replicate]], method='ML-ape', verbose=TRUE)
ace_sim.mat_obs.tre[[replicate]]$state<-apply(ace_sim.mat_obs.tre[[replicate]]$state, 2, replace.na)
#ace_obs.mat_yul.tre[[replicate]]<-anc.state(obs.mat_yul.tre_init[[replicate]], observed_mat, method='ML-ape', verbose=TRUE)
#ace_obs.mat_yul.tre[[replicate]]$state<-apply(ace_obs.mat_yul.tre[[replicate]]$state, 2, replace.na)
#ace_obs.mat_bde.tre[[replicate]]<-anc.state(obs.mat_bde.tre_init[[replicate]], observed_mat, method='ML-ape', verbose=TRUE)
#ace_obs.mat_bde.tre[[replicate]]$state<-apply(ace_obs.mat_bde.tre[[replicate]]$state, 2, replace.na)
}
save(ace_ran.mat_obs.tre, file=paste("../Data/",chain_name,"/ace_ran.mat_obs.tre.Rda", sep=""))
save(ace_sim.mat_obs.tre, file=paste("../Data/",chain_name,"/ace_sim.mat_obs.tre.Rda", sep=""))
#save(ace_obs.mat_yul.tre, file=paste("../Data/",chain_name,"/ace_obs.mat_yul.tre.Rda", sep=""))
#save(ace_obs.mat_bde.tre, file=paste("../Data/",chain_name,"/ace_obs.mat_bde.tre.Rda", sep=""))
#Adding nodes to the nexus matrices
observed_mat95<-observed_mat
observed_mat95$matrix<-anc.unc(ace_obs.mat_obs.tre, 0.95, missing=NA)$state
observed_mat$matrix<-ace_obs.mat_obs.tre$state
ran.mat_obs.tre95<-ran.mat_obs.tre
sim.mat_obs.tre95<-sim.mat_obs.tre
#obs.mat_yul.tre95<-obs.mat_yul.tre
#obs.mat_bde.tre95<-obs.mat_bde.tre
for (replicate in 1:length(ran.mat_obs.tre_init)) {
ran.mat_obs.tre95[[replicate]]$matrix<-anc.unc(ace_ran.mat_obs.tre[[replicate]], 0.95, missing=NA)$state
ran.mat_obs.tre[[replicate]]$matrix<-ace_ran.mat_obs.tre[[replicate]]$state
sim.mat_obs.tre95[[replicate]]$matrix<-anc.unc(ace_sim.mat_obs.tre[[replicate]], 0.95, missing=NA)$state
sim.mat_obs.tre[[replicate]]$matrix<-ace_sim.mat_obs.tre[[replicate]]$state
#obs.mat_yul.tre95[[replicate]]$matrix<-anc.unc(ace_obs.mat_yul.tre[[replicate]], 0.95, missing=NA)$state
#obs.mat_yul.tre[[replicate]]$matrix<-ace_obs.mat_yul.tre[[replicate]]$state
#obs.mat_bde.tre95[[replicate]]$matrix<-anc.unc(ace_obs.mat_bde.tre[[replicate]], 0.95, missing=NA)$state
#obs.mat_bde.tre[[replicate]]$matrix<-ace_obs.mat_bde.tre[[replicate]]$state
}
####################################
#Distance matrix
####################################
#Distance matrix using also nodes
dist_obs.mat_obs.tre<-MorphDistMatrix.verbose(observed_mat, verbose=TRUE)
save(dist_obs.mat_obs.tre, file=paste("../Data/",chain_name,"/dist_obs.mat_obs.tre.Rda", sep=""))
dist_ran.mat_obs.tre<-lapply(ran.mat_obs.tre, MorphDistMatrix.verbose, verbose=TRUE)
save(dist_ran.mat_obs.tre, file=paste("../Data/",chain_name,"/dist_ran.mat_obs.tre.Rda", sep=""))
dist_sim.mat_obs.tre<-lapply(sim.mat_obs.tre, MorphDistMatrix.verbose, verbose=TRUE)
save(dist_sim.mat_obs.tre, file=paste("../Data/",chain_name,"/dist_sim.mat_obs.tre.Rda", sep=""))
#dist_obs.mat_yul.tre<-lapply(obs.mat_yul.tre, MorphDistMatrix.verbose, verbose=TRUE)
#save(dist_obs.mat_yul.tre, file=paste("../Data/",chain_name,"/dist_obs.mat_yul.tre.Rda", sep=""))
#dist_obs.mat_bde.tre<-lapply(obs.mat_bde.tre, MorphDistMatrix.verbose, verbose=TRUE)
#save(dist_obs.mat_bde.tre, file=paste("../Data/",chain_name,"/dist_obs.mat_bde.tre.Rda", sep=""))
#Distance matrix using also nodes95
#Distance matrix using also nodes
dist_obs.mat_obs.tre95<-MorphDistMatrix.verbose(observed_mat, verbose=TRUE)
save(dist_obs.mat_obs.tre95, file=paste("../Data/",chain_name,"/dist_obs.mat_obs.tree95.Rda", sep=""))
dist_ran.mat_obs.tre95<-lapply(ran.mat_obs.tre95, MorphDistMatrix.verbose, verbose=TRUE)
save(dist_ran.mat_obs.tre95, file=paste("../Data/",chain_name,"/dist_ran.mat_obs.tree95.Rda", sep=""))
dist_sim.mat_obs.tre95<-lapply(sim.mat_obs.tre95, MorphDistMatrix.verbose, verbose=TRUE)
save(dist_sim.mat_obs.tre95, file=paste("../Data/",chain_name,"/dist_sim.mat_obs.tree95.Rda", sep=""))
#dist_obs.mat_yul.tre95<-lapply(obs.mat_yul.tre95, MorphDistMatrix.verbose, verbose=TRUE)
#save(dist_obs.mat_yul.tre95, file=paste("../Data/",chain_name,"/dist_obs.mat_yul.tree95.Rda", sep=""))
#dist_obs.mat_bde.tre95<-lapply(obs.mat_bde.tre95, MorphDistMatrix.verbose, verbose=TRUE)
#save(dist_obs.mat_bde.tre95, file=paste("../Data/",chain_name,"/dist_obs.mat_bde.tree95.Rda", sep=""))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FLDet.R
\name{det}
\alias{det}
\title{Determinant of a Matrix.}
\usage{
det(object, ...)
}
\arguments{
\item{object}{is a FLMatrix object}
\item{...}{any additional arguments}
}
\value{
\code{det} returns determinant as a R vector
which replicates the equivalent R vector output.
}
\description{
\code{det} computes the determinant of FLMatrix objects.
}
\section{Constraints}{
Input can only be a square matrix (n x n) with maximum dimension limitations
of (1000 x 1000).
}
\examples{
flmatrix <- FLMatrix(getTestTableName("tblMatrixMulti"), 5,"MATRIX_ID","ROW_ID",
"COL_ID","CELL_VAL", dims= c(5,5))
resultFLDetValue <- det(flmatrix)
}
\seealso{
\code{\link[base]{det}} for corresponding R function reference.
}
|
/man/det.Rd
|
no_license
|
phani-srikar/AdapteR
|
R
| false
| true
| 795
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FLDet.R
\name{det}
\alias{det}
\title{Determinant of a Matrix.}
\usage{
det(object, ...)
}
\arguments{
\item{object}{is a FLMatrix object}
\item{...}{any additional arguments}
}
\value{
\code{det} returns determinant as a R vector
which replicates the equivalent R vector output.
}
\description{
\code{det} computes the determinant of FLMatrix objects.
}
\section{Constraints}{
Input can only be a square matrix (n x n) with maximum dimension limitations
of (1000 x 1000).
}
\examples{
flmatrix <- FLMatrix(getTestTableName("tblMatrixMulti"), 5,"MATRIX_ID","ROW_ID",
"COL_ID","CELL_VAL", dims= c(5,5))
resultFLDetValue <- det(flmatrix)
}
\seealso{
\code{\link[base]{det}} for corresponding R function reference.
}
|
library(tidyverse)
library(DBI)
library(dbplyr)
library(RPostgreSQL)
myDb <- dbConnect(dbDriver('PostgreSQL'),
host = "db.jla-data.net",
port = 5432,
dbname = "dbase",
user = "babisobot", # user babisobot má pouze select práva...
password = "babisobot") # ... a proto jeho heslo může být na netu
tweet_data <- tbl(myDb, "babisobot") %>%
collect()
dbDisconnect(myDb) # uklidit po sobě je slušnost :)
|
/priklad.R
|
permissive
|
jlacko/babisobot
|
R
| false
| false
| 515
|
r
|
library(tidyverse)
library(DBI)
library(dbplyr)
library(RPostgreSQL)
myDb <- dbConnect(dbDriver('PostgreSQL'),
host = "db.jla-data.net",
port = 5432,
dbname = "dbase",
user = "babisobot", # user babisobot má pouze select práva...
password = "babisobot") # ... a proto jeho heslo může být na netu
tweet_data <- tbl(myDb, "babisobot") %>%
collect()
dbDisconnect(myDb) # uklidit po sobě je slušnost :)
|
source("../setpath.R")
data=read.csv(file.path(data_path,"Database.csv"),header=TRUE,stringsAsFactors = FALSE)
df = data[which(data[,2]==""),-2]
gene_start = grep("rrrD",colnames(df))
p = dim(df)[2]
mut = df[,gene_start:p]
mut[which(mut!=mut[1,2],arr.ind=T)] = "Hit"
id_mut =data.frame(df[,1],mut)
colnames(id_mut)[1]="ID"
n <- colnames(mut)
cols = c(3,4,5,6,7,8,9,10,11,18)
df$label_col = apply( df[ , cols ] , 1 , paste , collapse = "-" )
mut_group = split(df,df[,"label_col"])
total = length(mut_group)
nums_stress = as.numeric()
nums = as.numeric()
for ( i in 1:total)
{
tmp = mut_group[[i]]
cols = c(13,15,16,17)
nums_stress=c(nums_stress, length(unique(apply( tmp[ , cols ] , 1 , paste , collapse = "-" ))))
nums=c(nums,nrow(tmp))
}
ncol(df)
anti = mut_group[[18]][,c(13,21:ncol(df)-1)]
write.csv(mut_group[[18]],file.path(out_data_path,"anti_group.csv"),row.names = FALSE)
check = function(x)
{
num_hit = length(which(x!=""))
return(num_hit/length(x))
}
n <- colnames(anti)[-1]
f <- as.formula(paste("cbind(",paste(n, collapse = " ,"),")","~Stress",sep=""))
anti_each <- aggregate(.~Stress, data=anti, FUN =check)
res = anti_each[,-1]
row.names(res) = anti_each[,1]
d = dist(as.matrix(res))
methods = c("average","ward.D2","single","complete","median","centroid")
df=read.csv("./ref/anti_category.csv",header=FALSE,stringsAsFactors = FALSE)
colors = c("#6ED9B0","#FA6c6C","#42135F","#FACB18","#7D4FFE")
types = c("DNA,protein and cell wall synthesis","Protein synthesis",
"Folic acid synthesis","Cell wall synthesis","DNA synthesis")
assignColor = function(label)
{
index = which(label==df[,1])
return(colors[df[index,2]])
}
## function to set label color
labelCol <- function(x) {
if (is.leaf(x)) {
## fetch label
label <- attr(x, "label")
## set label color to red for A and B, to blue otherwise
attr(x, "nodePar") <- list(lab.col=assignColor(label),pch = 19,col="white")
}
return(x)
}
for (method in methods)
{
hc = hclust(d,method=method)
## apply labelCol on all nodes of the dendrogram
de <- dendrapply(as.dendrogram(hc), labelCol)
pdf(paste("./out_fig/freq/",method,"_fre.pdf",sep=""), 7.5,6);
par(mar=c(4,1, 2,7)+1)
plot(de,horiz=TRUE,xlab="distance",main=paste("frequency",method,sep="-"),cex=0.5)
legend("topleft", legend = types, fill = colors, title = "", box.col = "transparent")
dev.off()
}
|
/2_analysis_ML_visualization/src/Supplemental/S4.R
|
permissive
|
IBPA/MutationDB
|
R
| false
| false
| 2,548
|
r
|
source("../setpath.R")
data=read.csv(file.path(data_path,"Database.csv"),header=TRUE,stringsAsFactors = FALSE)
df = data[which(data[,2]==""),-2]
gene_start = grep("rrrD",colnames(df))
p = dim(df)[2]
mut = df[,gene_start:p]
mut[which(mut!=mut[1,2],arr.ind=T)] = "Hit"
id_mut =data.frame(df[,1],mut)
colnames(id_mut)[1]="ID"
n <- colnames(mut)
cols = c(3,4,5,6,7,8,9,10,11,18)
df$label_col = apply( df[ , cols ] , 1 , paste , collapse = "-" )
mut_group = split(df,df[,"label_col"])
total = length(mut_group)
nums_stress = as.numeric()
nums = as.numeric()
for ( i in 1:total)
{
tmp = mut_group[[i]]
cols = c(13,15,16,17)
nums_stress=c(nums_stress, length(unique(apply( tmp[ , cols ] , 1 , paste , collapse = "-" ))))
nums=c(nums,nrow(tmp))
}
ncol(df)
anti = mut_group[[18]][,c(13,21:ncol(df)-1)]
write.csv(mut_group[[18]],file.path(out_data_path,"anti_group.csv"),row.names = FALSE)
check = function(x)
{
num_hit = length(which(x!=""))
return(num_hit/length(x))
}
n <- colnames(anti)[-1]
f <- as.formula(paste("cbind(",paste(n, collapse = " ,"),")","~Stress",sep=""))
anti_each <- aggregate(.~Stress, data=anti, FUN =check)
res = anti_each[,-1]
row.names(res) = anti_each[,1]
d = dist(as.matrix(res))
methods = c("average","ward.D2","single","complete","median","centroid")
df=read.csv("./ref/anti_category.csv",header=FALSE,stringsAsFactors = FALSE)
colors = c("#6ED9B0","#FA6c6C","#42135F","#FACB18","#7D4FFE")
types = c("DNA,protein and cell wall synthesis","Protein synthesis",
"Folic acid synthesis","Cell wall synthesis","DNA synthesis")
assignColor = function(label)
{
index = which(label==df[,1])
return(colors[df[index,2]])
}
## function to set label color
labelCol <- function(x) {
if (is.leaf(x)) {
## fetch label
label <- attr(x, "label")
## set label color to red for A and B, to blue otherwise
attr(x, "nodePar") <- list(lab.col=assignColor(label),pch = 19,col="white")
}
return(x)
}
for (method in methods)
{
hc = hclust(d,method=method)
## apply labelCol on all nodes of the dendrogram
de <- dendrapply(as.dendrogram(hc), labelCol)
pdf(paste("./out_fig/freq/",method,"_fre.pdf",sep=""), 7.5,6);
par(mar=c(4,1, 2,7)+1)
plot(de,horiz=TRUE,xlab="distance",main=paste("frequency",method,sep="-"),cex=0.5)
legend("topleft", legend = types, fill = colors, title = "", box.col = "transparent")
dev.off()
}
|
#####
# Script R para analise de desempenho do Gromacs
# Todas as tabelas são modeladas e todos os gráficos são feitos com este script em R
# Basta executá-lo ... ;)
####
require(ggplot2)
require(ggthemes)
require(dplyr)
library(stringr)
library(readr)
###################################
###################################
###
### Análise com Hyperthreading
###
##################################
### Análise de desempenho do Gromacs com HyperThreading
### Esta análise considera apenas os 3 ambientes
tabelaDados <- read.table('../Tabelas/GromacsHTTotal.log',
header = T)
tabelaDados <- tabelaDados %>%
group_by(CPUs, Ambiente) %>%
summarize(Tempo_Medio = mean(Tempo),
Tempo_Minimo = min(Tempo),
Tempo_Maximo = max(Tempo),
Desvio_Padrao = sd(Tempo),
Intervalo_Confianca = (2.042*sd(Tempo))/(sqrt(n())),
Coeficiente_Variacao = Desvio_Padrao/Tempo_Medio,
)
# summary(tabelaDados)
# str(tabelaDados)
tabelaDados$CPUs <- factor(tabelaDados$CPUs, levels = c(unique(tabelaDados$CPUs)[order(unique(tabelaDados$CPUs))]))
progHT <- ggplot(tabelaDados, aes(fill = Ambiente, x = CPUs, y = Tempo_Medio, group = Ambiente)) +
geom_bar(position = 'dodge', stat = 'identity') +
geom_errorbar(aes(ymin=Tempo_Medio - Intervalo_Confianca, ymax=Tempo_Medio+Intervalo_Confianca),
position='dodge') +
geom_errorbar(aes(ymin=Tempo_Medio - Intervalo_Confianca, ymax=Tempo_Medio+Intervalo_Confianca),
position='dodge') +
labs(x="CPUs", y = "Tempo (seg)")+
## scale_fill_manual(values=c("#756bb1", "#fdae6b", "#31a354")) +
theme_bw(base_size = 30) +
scale_fill_grey() +
theme(legend.position = c(0.9, 0.8))
ggsave(filename = "../Graficos/Gromacs/01GromacsHT.png", width = 10, height = 7, dpi = 300)
### Gráfico com Coeficiente de Variação dos ambientes
### Esse gráfico tem o objetivo de apresentar a variação dos dados obtidos em porcentagem
progCoeficienteHT <- ggplot(tabelaDados, aes(fill = Ambiente, x = CPUs, y = Coeficiente_Variacao, group = Ambiente)) +
geom_bar(position = 'dodge', stat = 'identity') +
# ggtitle("Coefiente de Variação dos Resultados") +
theme_bw(base_size = 30) +
## scale_fill_manual(values=c("#756bb1", "#fdae6b", "#31a354")) +
scale_y_continuous(limits = c(0,0.1), labels = scales::percent) +
scale_fill_grey() +
theme(legend.position = c(0.2, 0.8))
ggsave(filename = "../Graficos/Gromacs/03GromacsCoeficienteHT.png", width = 10, height = 7, dpi = 300)
### BoxPlot
### Este gráfico tem o objetivo de verificar a variação dos dados obtidos por meio de quartis
### ele indica a mediana e a variabilidade fora do quartil superior, inferior e outliers
tabelaComparada <- read.table('../Tabelas/GromacsHTTotal.log',
header = T)
tabelaComparada$CPUs <- factor(tabelaComparada$CPUs, levels = c(unique(tabelaComparada$CPUs)[order(unique(tabelaComparada$CPUs))]))
progBoxHT <- ggplot(tabelaComparada, aes(x=CPUs, y=Tempo, fill=Ambiente)) +
geom_boxplot(outlier.colour="red", outlier.shape=9, outlier.size=2) +
labs(x="CPUs", y = "Tempo (seg)")+
## scale_fill_manual(values=c("#756bb1", "#fdae6b", "#31a354")) +
theme_bw(base_size = 30) +
scale_fill_grey() +
theme(legend.position = c(0.9, 0.8))
ggsave(filename = "../Graficos/Gromacs/02GromacsBoxHT.png", width = 10, height = 7, dpi = 300)
###################################
###################################
###
### Análise sem Hyperthreading
###
##################################
### Análise de desempenho do Gromacs sem HyperThreading
### Esta análise considera apenas os 3 ambientes
tabelaDados2 <- read.table('../Tabelas/GromacssemHTTotal.log',
header = T)
tabelaDados2 <- tabelaDados2 %>%
group_by(CPUs, Ambiente) %>%
summarize(Tempo_Medio = mean(Tempo),
Tempo_Minimo = min(Tempo),
Tempo_Maximo = max(Tempo),
Desvio_Padrao = sd(Tempo),
Intervalo_Confianca = (2.042*sd(Tempo))/(sqrt(n())),
Coeficiente_Variacao = Desvio_Padrao/Tempo_Medio,
)
# summary(tabelaDados)
# str(tabelaDados)
tabelaDados2$CPUs <- factor(tabelaDados2$CPUs, levels = c(unique(tabelaDados2$CPUs)[order(unique(tabelaDados2$CPUs))]))
progsemHT <- ggplot(tabelaDados2, aes(fill = Ambiente, x = CPUs, y = Tempo_Medio, group = Ambiente)) +
geom_bar(position = 'dodge', stat = 'identity') +
geom_errorbar(aes(ymin=Tempo_Medio - Intervalo_Confianca, ymax=Tempo_Medio+Intervalo_Confianca),
position='dodge') +
geom_errorbar(aes(ymin=Tempo_Medio - Intervalo_Confianca, ymax=Tempo_Medio+Intervalo_Confianca),
position='dodge') +
labs(x="CPUs", y = "Tempo (seg)")+
## scale_fill_manual(values=c("#756bb1", "#fdae6b", "#31a354")) +
theme_bw(base_size = 30) +
scale_fill_grey() +
theme(legend.position = c(0.9, 0.8))
ggsave(filename = "../Graficos/Gromacs/04GromacssemHT.png", width = 10, height = 7, dpi = 300)
### Gráfico com Coeficiente de Variação dos ambientes
### Esse gráfico tem o objetivo de apresentar a variação dos dados obtidos em porcentagem
progCoeficienteSemHT <- ggplot(tabelaDados2, aes(fill = Ambiente, x = CPUs, y = Coeficiente_Variacao, group = Ambiente)) +
geom_bar(position = 'dodge', stat = 'identity') +
# ggtitle("Coefiente de Variação dos Resultados") +
theme_bw(base_size = 30) +
## scale_fill_manual(values=c("#756bb1", "#fdae6b", "#31a354")) +
scale_y_continuous(limits = c(0,0.1), labels = scales::percent) +
scale_fill_grey() +
theme(legend.position = c(0.2, 0.8))
ggsave(filename = "../Graficos/Gromacs/06GromacsCoeficientesemHT.png", width = 10, height = 7, dpi = 300)
### BoxPlot
### Este gráfico tem o objetivo de verificar a variação dos dados obtidos por meio de quartis
### ele indica a mediana e a variabilidade fora do quartil superior, inferior e outliers
tabelaComparada2 <- read.table('../Tabelas/GromacssemHTTotal.log',
header = T)
tabelaComparada2$CPUs <- factor(tabelaComparada2$CPUs, levels = c(unique(tabelaComparada2$CPUs)[order(unique(tabelaComparada2$CPUs))]))
progBoxsemHT <- ggplot(tabelaComparada2, aes(x=CPUs, y=Tempo, fill=Ambiente)) +
geom_boxplot(outlier.colour="red", outlier.shape=9, outlier.size=2) +
labs(x="CPUs", y = "Tempo (seg)")+
## scale_fill_manual(values=c("#756bb1", "#fdae6b", "#31a354")) +
theme_bw(base_size = 30) +
scale_fill_grey() +
theme(legend.position = c(0.9, 0.8))
ggsave(filename = "../Graficos/Gromacs/05GromacsBoxsemHT.png", width = 10, height = 7, dpi = 300)
|
/ScriptsR/11-analiseGromacs.R
|
no_license
|
adrianoferruzzi/vmdata
|
R
| false
| false
| 7,104
|
r
|
#####
# Script R para analise de desempenho do Gromacs
# Todas as tabelas são modeladas e todos os gráficos são feitos com este script em R
# Basta executá-lo ... ;)
####
require(ggplot2)
require(ggthemes)
require(dplyr)
library(stringr)
library(readr)
###################################
###################################
###
### Análise com Hyperthreading
###
##################################
### Análise de desempenho do Gromacs com HyperThreading
### Esta análise considera apenas os 3 ambientes
tabelaDados <- read.table('../Tabelas/GromacsHTTotal.log',
header = T)
tabelaDados <- tabelaDados %>%
group_by(CPUs, Ambiente) %>%
summarize(Tempo_Medio = mean(Tempo),
Tempo_Minimo = min(Tempo),
Tempo_Maximo = max(Tempo),
Desvio_Padrao = sd(Tempo),
Intervalo_Confianca = (2.042*sd(Tempo))/(sqrt(n())),
Coeficiente_Variacao = Desvio_Padrao/Tempo_Medio,
)
# summary(tabelaDados)
# str(tabelaDados)
tabelaDados$CPUs <- factor(tabelaDados$CPUs, levels = c(unique(tabelaDados$CPUs)[order(unique(tabelaDados$CPUs))]))
progHT <- ggplot(tabelaDados, aes(fill = Ambiente, x = CPUs, y = Tempo_Medio, group = Ambiente)) +
geom_bar(position = 'dodge', stat = 'identity') +
geom_errorbar(aes(ymin=Tempo_Medio - Intervalo_Confianca, ymax=Tempo_Medio+Intervalo_Confianca),
position='dodge') +
geom_errorbar(aes(ymin=Tempo_Medio - Intervalo_Confianca, ymax=Tempo_Medio+Intervalo_Confianca),
position='dodge') +
labs(x="CPUs", y = "Tempo (seg)")+
## scale_fill_manual(values=c("#756bb1", "#fdae6b", "#31a354")) +
theme_bw(base_size = 30) +
scale_fill_grey() +
theme(legend.position = c(0.9, 0.8))
ggsave(filename = "../Graficos/Gromacs/01GromacsHT.png", width = 10, height = 7, dpi = 300)
### Gráfico com Coeficiente de Variação dos ambientes
### Esse gráfico tem o objetivo de apresentar a variação dos dados obtidos em porcentagem
progCoeficienteHT <- ggplot(tabelaDados, aes(fill = Ambiente, x = CPUs, y = Coeficiente_Variacao, group = Ambiente)) +
geom_bar(position = 'dodge', stat = 'identity') +
# ggtitle("Coefiente de Variação dos Resultados") +
theme_bw(base_size = 30) +
## scale_fill_manual(values=c("#756bb1", "#fdae6b", "#31a354")) +
scale_y_continuous(limits = c(0,0.1), labels = scales::percent) +
scale_fill_grey() +
theme(legend.position = c(0.2, 0.8))
ggsave(filename = "../Graficos/Gromacs/03GromacsCoeficienteHT.png", width = 10, height = 7, dpi = 300)
### BoxPlot
### Este gráfico tem o objetivo de verificar a variação dos dados obtidos por meio de quartis
### ele indica a mediana e a variabilidade fora do quartil superior, inferior e outliers
tabelaComparada <- read.table('../Tabelas/GromacsHTTotal.log',
header = T)
tabelaComparada$CPUs <- factor(tabelaComparada$CPUs, levels = c(unique(tabelaComparada$CPUs)[order(unique(tabelaComparada$CPUs))]))
progBoxHT <- ggplot(tabelaComparada, aes(x=CPUs, y=Tempo, fill=Ambiente)) +
geom_boxplot(outlier.colour="red", outlier.shape=9, outlier.size=2) +
labs(x="CPUs", y = "Tempo (seg)")+
## scale_fill_manual(values=c("#756bb1", "#fdae6b", "#31a354")) +
theme_bw(base_size = 30) +
scale_fill_grey() +
theme(legend.position = c(0.9, 0.8))
ggsave(filename = "../Graficos/Gromacs/02GromacsBoxHT.png", width = 10, height = 7, dpi = 300)
###################################
###################################
###
### Análise sem Hyperthreading
###
##################################
### Análise de desempenho do Gromacs sem HyperThreading
### Esta análise considera apenas os 3 ambientes
tabelaDados2 <- read.table('../Tabelas/GromacssemHTTotal.log',
header = T)
tabelaDados2 <- tabelaDados2 %>%
group_by(CPUs, Ambiente) %>%
summarize(Tempo_Medio = mean(Tempo),
Tempo_Minimo = min(Tempo),
Tempo_Maximo = max(Tempo),
Desvio_Padrao = sd(Tempo),
Intervalo_Confianca = (2.042*sd(Tempo))/(sqrt(n())),
Coeficiente_Variacao = Desvio_Padrao/Tempo_Medio,
)
# summary(tabelaDados)
# str(tabelaDados)
tabelaDados2$CPUs <- factor(tabelaDados2$CPUs, levels = c(unique(tabelaDados2$CPUs)[order(unique(tabelaDados2$CPUs))]))
progsemHT <- ggplot(tabelaDados2, aes(fill = Ambiente, x = CPUs, y = Tempo_Medio, group = Ambiente)) +
geom_bar(position = 'dodge', stat = 'identity') +
geom_errorbar(aes(ymin=Tempo_Medio - Intervalo_Confianca, ymax=Tempo_Medio+Intervalo_Confianca),
position='dodge') +
geom_errorbar(aes(ymin=Tempo_Medio - Intervalo_Confianca, ymax=Tempo_Medio+Intervalo_Confianca),
position='dodge') +
labs(x="CPUs", y = "Tempo (seg)")+
## scale_fill_manual(values=c("#756bb1", "#fdae6b", "#31a354")) +
theme_bw(base_size = 30) +
scale_fill_grey() +
theme(legend.position = c(0.9, 0.8))
ggsave(filename = "../Graficos/Gromacs/04GromacssemHT.png", width = 10, height = 7, dpi = 300)
### Gráfico com Coeficiente de Variação dos ambientes
### Esse gráfico tem o objetivo de apresentar a variação dos dados obtidos em porcentagem
progCoeficienteSemHT <- ggplot(tabelaDados2, aes(fill = Ambiente, x = CPUs, y = Coeficiente_Variacao, group = Ambiente)) +
geom_bar(position = 'dodge', stat = 'identity') +
# ggtitle("Coefiente de Variação dos Resultados") +
theme_bw(base_size = 30) +
## scale_fill_manual(values=c("#756bb1", "#fdae6b", "#31a354")) +
scale_y_continuous(limits = c(0,0.1), labels = scales::percent) +
scale_fill_grey() +
theme(legend.position = c(0.2, 0.8))
ggsave(filename = "../Graficos/Gromacs/06GromacsCoeficientesemHT.png", width = 10, height = 7, dpi = 300)
### BoxPlot
### Este gráfico tem o objetivo de verificar a variação dos dados obtidos por meio de quartis
### ele indica a mediana e a variabilidade fora do quartil superior, inferior e outliers
tabelaComparada2 <- read.table('../Tabelas/GromacssemHTTotal.log',
header = T)
tabelaComparada2$CPUs <- factor(tabelaComparada2$CPUs, levels = c(unique(tabelaComparada2$CPUs)[order(unique(tabelaComparada2$CPUs))]))
progBoxsemHT <- ggplot(tabelaComparada2, aes(x=CPUs, y=Tempo, fill=Ambiente)) +
geom_boxplot(outlier.colour="red", outlier.shape=9, outlier.size=2) +
labs(x="CPUs", y = "Tempo (seg)")+
## scale_fill_manual(values=c("#756bb1", "#fdae6b", "#31a354")) +
theme_bw(base_size = 30) +
scale_fill_grey() +
theme(legend.position = c(0.9, 0.8))
ggsave(filename = "../Graficos/Gromacs/05GromacsBoxsemHT.png", width = 10, height = 7, dpi = 300)
|
\name{BestSlope}
\alias{BestSlope}
\title{Choose the best-fit slope for the log(y) and x regression by the criteria of adjusted R-square.}
\description{
It sequentially fits (log(y) ~ x) from the last point of x to the previous points with at least 3 points.
It chooses a slope the highest adjusted R-square.
If the difference is less then 1e-4, it pickes longer slope.
}
\usage{
BestSlope(x, y, adm = "Extravascular", TOL=1e-4, excludeDelta = 1)
}
\arguments{
\item{x}{vector values of x-axis, usually time}
\item{y}{vector values of y-axis, usually concentration}
\item{adm}{one of \code{"Bolus"} or \code{"Infusion"} or \code{"Extravascular"} to indicate drug administration mode}
\item{TOL}{tolerance. See Phoneix WinNonlin 6.4 User's Guide p33 for the detail.}
\item{excludeDelta}{Improvement of R2ADJ larger than this value could exclude the last point. Default value 1 is for the compatibility with other software.}
}
\details{
Choosing the best terminal slope (y in log scale) in pharmacokinetic analysis is somewhat challenging, and it could vary by analysis performer.
Pheonix WinNonlin chooses a slope with highest adjusted R-squared and the longest one. The difference of adjusted R-Squared less than TOL considered to be 0.
This function uses ordinary least square method (OLS). Author recommends to use \code{excludeDelta} option with about 0.3.
}
\value{
\item{R2}{R-squared}
\item{R2ADJ}{adjusted R-squared}
\item{LAMZNPT}{number of points used for slope}
\item{LAMZ}{negative of the slope, lambda_z}
\item{b0}{intercept of the regression line}
\item{CORRXY}{correlation of log(y) and x}
\item{LAMZLL}{earliest x for lambda_z}
\item{LAMZUL}{last x for lambda_z}
\item{CLSTP}{predicted y value at the last point, predicted concentration for the last time point}
}
\author{Kyun-Seop Bae <k@acr.kr>}
\seealso{\code{\link{Slope}}}
\examples{
BestSlope(Theoph[Theoph$Subject==1, "Time"], Theoph[Theoph$Subject==1, "conc"])
BestSlope(Indometh[Indometh$Subject==1, "time"], Indometh[Indometh$Subject==1, "conc"],
adm="Bolus")
}
\keyword{slope}
|
/man/BestSlope.Rd
|
no_license
|
asancpt/NonCompart
|
R
| false
| false
| 2,137
|
rd
|
\name{BestSlope}
\alias{BestSlope}
\title{Choose the best-fit slope for the log(y) and x regression by the criteria of adjusted R-square.}
\description{
It sequentially fits (log(y) ~ x) from the last point of x to the previous points with at least 3 points.
It chooses a slope the highest adjusted R-square.
If the difference is less then 1e-4, it pickes longer slope.
}
\usage{
BestSlope(x, y, adm = "Extravascular", TOL=1e-4, excludeDelta = 1)
}
\arguments{
\item{x}{vector values of x-axis, usually time}
\item{y}{vector values of y-axis, usually concentration}
\item{adm}{one of \code{"Bolus"} or \code{"Infusion"} or \code{"Extravascular"} to indicate drug administration mode}
\item{TOL}{tolerance. See Phoneix WinNonlin 6.4 User's Guide p33 for the detail.}
\item{excludeDelta}{Improvement of R2ADJ larger than this value could exclude the last point. Default value 1 is for the compatibility with other software.}
}
\details{
Choosing the best terminal slope (y in log scale) in pharmacokinetic analysis is somewhat challenging, and it could vary by analysis performer.
Pheonix WinNonlin chooses a slope with highest adjusted R-squared and the longest one. The difference of adjusted R-Squared less than TOL considered to be 0.
This function uses ordinary least square method (OLS). Author recommends to use \code{excludeDelta} option with about 0.3.
}
\value{
\item{R2}{R-squared}
\item{R2ADJ}{adjusted R-squared}
\item{LAMZNPT}{number of points used for slope}
\item{LAMZ}{negative of the slope, lambda_z}
\item{b0}{intercept of the regression line}
\item{CORRXY}{correlation of log(y) and x}
\item{LAMZLL}{earliest x for lambda_z}
\item{LAMZUL}{last x for lambda_z}
\item{CLSTP}{predicted y value at the last point, predicted concentration for the last time point}
}
\author{Kyun-Seop Bae <k@acr.kr>}
\seealso{\code{\link{Slope}}}
\examples{
BestSlope(Theoph[Theoph$Subject==1, "Time"], Theoph[Theoph$Subject==1, "conc"])
BestSlope(Indometh[Indometh$Subject==1, "time"], Indometh[Indometh$Subject==1, "conc"],
adm="Bolus")
}
\keyword{slope}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.evol2.r
\name{plot.evol2}
\alias{plot.evol2}
\title{Do something}
\usage{
\method{plot}{evol2}(DT, groups, main, etyp = 0)
}
\description{
Do something
}
\examples{
}
\author{
JuG
}
|
/man/plot.evol2.Rd
|
no_license
|
jgodet/gmrcfun
|
R
| false
| true
| 267
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.evol2.r
\name{plot.evol2}
\alias{plot.evol2}
\title{Do something}
\usage{
\method{plot}{evol2}(DT, groups, main, etyp = 0)
}
\description{
Do something
}
\examples{
}
\author{
JuG
}
|
#' House publications
#'
#' Imports data on House of Commons and House of Lords publications.
#'
#' @param ID Publication ID. Defaults to `NULL`. If not `NULL`,
#' requests a tibble with information on the given publication.
#'
#' @param house The house that produced the particular publication. Accepts
#' `'commons'` and `'lords'`. If `NULL` or not `'commons'`
#' or `'lords'`, returns publications from both House of Commons and
#' House of Lords. This parameter is case-insensitive. Defaults to `NULL`.
#'
#' @param start_date Only includes publications issued on or after this date.
#' Accepts character values in `'YYYY-MM-DD'` format, and objects of
#' class `Date`, `POSIXt`, `POSIXct`, `POSIXlt` or
#' anything else that can be coerced to a date with `as.Date()`.
#' Defaults to `'1900-01-01'`.
#'
#' @param end_date Only includes publications issued on or before this
#' date. Accepts character values in `'YYYY-MM-DD'` format, and
#' objects of class `Date`, `POSIXt`, `POSIXct`,
#' `POSIXlt` or anything else that can be coerced to a date with
#' `as.Date()`. Defaults to the current system date.
#' @inheritParams all_answered_questions
#' @return A tibble with details from publications in the House of
#' Commons and House of Lords
#' @export
#' @examples
#' \dontrun{
#' # All publications in the house of commons
#' x <- publication_logs(house = "commons")
#'
#' # Returns a given publication
#' y <- publication_logs(683267)
#' }
#'
publication_logs <- function(ID = NULL, house = NULL, start_date = "1900-01-01",
end_date = Sys.Date(), extra_args = NULL,
tidy = TRUE, tidy_style = "snake_case",
verbose = TRUE) {
id_query <- ifelse(
is.null(ID) == FALSE,
paste0("/", ID, ".json?"),
".json?"
)
if (is.null(house)) {
house_query <- ""
} else {
house <- tolower(house)
house_query <- house_query_util(house) ## in utils-house.R
}
dates <- paste0(
"&_properties=publicationDate&max-publicationDate=",
as.Date(end_date),
"&min-publicationDate=",
as.Date(start_date)
)
baseurl <- paste0(url_util, "publicationlogs")
if (verbose == TRUE) {
message("Connecting to API")
}
logs <- jsonlite::fromJSON(paste0(
baseurl, id_query, house_query,
dates, extra_args
),
flatten = TRUE
)
if (is.null(ID) == FALSE) {
df <- tibble::as_tibble(as.data.frame(logs$result$primaryTopic))
} else {
jpage <- floor(logs$result$totalResults / 100)
query <- paste0(
baseurl, id_query, house_query, dates,
extra_args, "&_pageSize=100&_page="
)
df <- loop_query(query, jpage, verbose) # in utils-loop.R
}
if (nrow(df) == 0) {
message("The request did not return any data.
Please check your parameters.")
} else {
if (tidy == TRUE) {
df <- pub_tidy(df, tidy_style) ## in utils-publogs.R
}
df
}
}
#' @rdname publication_logs
#' @export
hansard_publication_logs <- publication_logs
|
/R/publication_logs.R
|
permissive
|
basilesimon/hansard
|
R
| false
| false
| 3,030
|
r
|
#' House publications
#'
#' Imports data on House of Commons and House of Lords publications.
#'
#' @param ID Publication ID. Defaults to `NULL`. If not `NULL`,
#' requests a tibble with information on the given publication.
#'
#' @param house The house that produced the particular publication. Accepts
#' `'commons'` and `'lords'`. If `NULL` or not `'commons'`
#' or `'lords'`, returns publications from both House of Commons and
#' House of Lords. This parameter is case-insensitive. Defaults to `NULL`.
#'
#' @param start_date Only includes publications issued on or after this date.
#' Accepts character values in `'YYYY-MM-DD'` format, and objects of
#' class `Date`, `POSIXt`, `POSIXct`, `POSIXlt` or
#' anything else that can be coerced to a date with `as.Date()`.
#' Defaults to `'1900-01-01'`.
#'
#' @param end_date Only includes publications issued on or before this
#' date. Accepts character values in `'YYYY-MM-DD'` format, and
#' objects of class `Date`, `POSIXt`, `POSIXct`,
#' `POSIXlt` or anything else that can be coerced to a date with
#' `as.Date()`. Defaults to the current system date.
#' @inheritParams all_answered_questions
#' @return A tibble with details from publications in the House of
#' Commons and House of Lords
#' @export
#' @examples
#' \dontrun{
#' # All publications in the house of commons
#' x <- publication_logs(house = "commons")
#'
#' # Returns a given publication
#' y <- publication_logs(683267)
#' }
#'
publication_logs <- function(ID = NULL, house = NULL, start_date = "1900-01-01",
end_date = Sys.Date(), extra_args = NULL,
tidy = TRUE, tidy_style = "snake_case",
verbose = TRUE) {
id_query <- ifelse(
is.null(ID) == FALSE,
paste0("/", ID, ".json?"),
".json?"
)
if (is.null(house)) {
house_query <- ""
} else {
house <- tolower(house)
house_query <- house_query_util(house) ## in utils-house.R
}
dates <- paste0(
"&_properties=publicationDate&max-publicationDate=",
as.Date(end_date),
"&min-publicationDate=",
as.Date(start_date)
)
baseurl <- paste0(url_util, "publicationlogs")
if (verbose == TRUE) {
message("Connecting to API")
}
logs <- jsonlite::fromJSON(paste0(
baseurl, id_query, house_query,
dates, extra_args
),
flatten = TRUE
)
if (is.null(ID) == FALSE) {
df <- tibble::as_tibble(as.data.frame(logs$result$primaryTopic))
} else {
jpage <- floor(logs$result$totalResults / 100)
query <- paste0(
baseurl, id_query, house_query, dates,
extra_args, "&_pageSize=100&_page="
)
df <- loop_query(query, jpage, verbose) # in utils-loop.R
}
if (nrow(df) == 0) {
message("The request did not return any data.
Please check your parameters.")
} else {
if (tidy == TRUE) {
df <- pub_tidy(df, tidy_style) ## in utils-publogs.R
}
df
}
}
#' @rdname publication_logs
#' @export
hansard_publication_logs <- publication_logs
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-core.R
\docType{data}
\name{core}
\alias{core}
\title{CORE (Computing Research and Education) list of conference rankings}
\format{An object of class \code{spec_tbl_df} (inherits from \code{tbl_df}, \code{tbl}, \code{data.frame}) with 1630 rows and 2 columns.}
\source{
\url{http://portal.core.edu.au/conf-ranks/?search=&by=all&source=CORE2018&sort=atitle&page=1}
}
\usage{
data(core)
}
\description{
A dataset, \code{core} is provided, which contains the list of conference rankings
according to the CORE executive committee. It is mostly used in
the function, \code{\link[=ranking]{ranking()}}. The details of the CORE organisation,
and its procedure for ranking is provided below.
}
\details{
CORE is an association of university departments of computer science in
Australia and New Zealand. Prior to 2004 it was known as the Computer
Science Association, CSA.
The CORE Conference Ranking provides assessments of major conferences in the
computing disciplines. The rankings are managed by the CORE Executive
Committee, with periodic rounds for submission of requests for addition or
reranking of conferences. Decisions are made by academic committees based
on objective data requested as part of the submission process. Conference
rankings are determined by a mix of indicators, including citation rates,
paper submission and acceptance rates, and the visibility and research
track record of the key people hosting the conference and managing its
technical program. A more detailed statement categorizing the ranks A*, A,
B, and C can be found \href{https://docs.google.com/viewer?a=v&pid=sites&srcid=ZGVmYXVsdGRvbWFpbnx3d3djb3JlZWR1fGd4OjJjNjkxOWE1NWQ4ZGY5MjU}{here}.
Format: A data frame with 1630 observations and two variables:
\itemize{
\item{\verb{conference:}}{ Character with all}
\item{\verb{rank:}}{ Conferences are assigned to one of the following categories:
\itemize{
\item{A*: flagship conference, a leading venue in a discipline area}
\item{A: excellent conference, and highly respected in a discipline area}
\item{B: good conference, and well regarded in a discipline area}
\item{C: other ranked conference venues that meet minimum standards}
\item{Australasian: A conference for which the audience is primarily
Australians and New Zealanders}
\item{Unranked: A conference for which no ranking decision has been made}
\item{National: A conference which is run primarily in a single country,
with Chairs from that country, and which is not sufficiently well
known to be ranked. (Papers and PC may be international}
\item{Regional: Similar to National but may cover a region crossing
national borders.}
}
}
}
}
\examples{
core
}
\keyword{datasets}
|
/man/core.Rd
|
no_license
|
njtierney/Rcademy
|
R
| false
| true
| 2,750
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-core.R
\docType{data}
\name{core}
\alias{core}
\title{CORE (Computing Research and Education) list of conference rankings}
\format{An object of class \code{spec_tbl_df} (inherits from \code{tbl_df}, \code{tbl}, \code{data.frame}) with 1630 rows and 2 columns.}
\source{
\url{http://portal.core.edu.au/conf-ranks/?search=&by=all&source=CORE2018&sort=atitle&page=1}
}
\usage{
data(core)
}
\description{
A dataset, \code{core} is provided, which contains the list of conference rankings
according to the CORE executive committee. It is mostly used in
the function, \code{\link[=ranking]{ranking()}}. The details of the CORE organisation,
and its procedure for ranking is provided below.
}
\details{
CORE is an association of university departments of computer science in
Australia and New Zealand. Prior to 2004 it was known as the Computer
Science Association, CSA.
The CORE Conference Ranking provides assessments of major conferences in the
computing disciplines. The rankings are managed by the CORE Executive
Committee, with periodic rounds for submission of requests for addition or
reranking of conferences. Decisions are made by academic committees based
on objective data requested as part of the submission process. Conference
rankings are determined by a mix of indicators, including citation rates,
paper submission and acceptance rates, and the visibility and research
track record of the key people hosting the conference and managing its
technical program. A more detailed statement categorizing the ranks A*, A,
B, and C can be found \href{https://docs.google.com/viewer?a=v&pid=sites&srcid=ZGVmYXVsdGRvbWFpbnx3d3djb3JlZWR1fGd4OjJjNjkxOWE1NWQ4ZGY5MjU}{here}.
Format: A data frame with 1630 observations and two variables:
\itemize{
\item{\verb{conference:}}{ Character with all}
\item{\verb{rank:}}{ Conferences are assigned to one of the following categories:
\itemize{
\item{A*: flagship conference, a leading venue in a discipline area}
\item{A: excellent conference, and highly respected in a discipline area}
\item{B: good conference, and well regarded in a discipline area}
\item{C: other ranked conference venues that meet minimum standards}
\item{Australasian: A conference for which the audience is primarily
Australians and New Zealanders}
\item{Unranked: A conference for which no ranking decision has been made}
\item{National: A conference which is run primarily in a single country,
with Chairs from that country, and which is not sufficiently well
known to be ranked. (Papers and PC may be international}
\item{Regional: Similar to National but may cover a region crossing
national borders.}
}
}
}
}
\examples{
core
}
\keyword{datasets}
|
rm(list = ls());
minimize <- function(f, m1, s1, m2, s2, n)
{
x_star <- NA;
f_star <- NA;
for(k in 1:n)
{
xk <- rnorm(1, m1, s1);
yk <- rnorm(1, m2, s2);
fk <- f(xk, yk);
if(k==1) { x_star = c(xk, yk); f_star = fk; }
else if(fk < f_star) { x_star = c(xk, yk); f_star = fk; }
}
return( list(x=x_star, f=f_star) );
}
f <- function(x, y) { x <- x-10; y <- y-20; x*x + y*y }
minimize(f, 0, 10, 0, 20, 5000)
|
/week3c-opt/ex2b-gaussian-for-quadric.R
|
no_license
|
tutrunghieu/html2015b
|
R
| false
| false
| 464
|
r
|
rm(list = ls());
minimize <- function(f, m1, s1, m2, s2, n)
{
x_star <- NA;
f_star <- NA;
for(k in 1:n)
{
xk <- rnorm(1, m1, s1);
yk <- rnorm(1, m2, s2);
fk <- f(xk, yk);
if(k==1) { x_star = c(xk, yk); f_star = fk; }
else if(fk < f_star) { x_star = c(xk, yk); f_star = fk; }
}
return( list(x=x_star, f=f_star) );
}
f <- function(x, y) { x <- x-10; y <- y-20; x*x + y*y }
minimize(f, 0, 10, 0, 20, 5000)
|
#' @export
d3bubbles2 <- function(data, valueCol = NULL, labelCol = NULL,
tooltipCol = NULL, colorCol = NULL,
opts = NULL,
width = NULL, height = NULL) {
labelCol <- labelCol %||% names(data)[1]
valueCol <- valueCol %||% names(data)[2]
colorCol <- colorCol %||% valueCol
defaultOpts <- list(
padding = 3,
textSplitWidth = 80,
palette = "Purples",
fixedColor = "#B7D1DF",
textColor = "#444444",
minSizeFactor = NA,
maxSizeFactor = NA
)
settings <- mergeOptions(opts,defaultOpts)
textColor <- settings$textColor
if(is.null(opts$fixedColor)){
v <- data[,colorCol]
color <- NULL
if(class(v) %in% c("factor","character"))
color <- catColor(v,palette = settings$palette)
if(class(v) %in% c("numeric","integer"))
color <- numColor(v,palette = settings$palette)
}else{
color <- settings$fixedColor
}
d = data.frame(
value = data[,valueCol],
label = data[,labelCol],
tooltip = data[,tooltipCol] %||% data[,labelCol],
color = color,
textColor = textColor
)
x <- list(
d = d,
settings = settings
)
str(x)
# create widget
htmlwidgets::createWidget(
name = 'd3bubbles2',
x,
width = width,
height = height,
package = 'd3bubbles2',
sizingPolicy = sizingPolicy(
defaultWidth = "100%",
defaultHeight = 500
)
)
}
#' @export
d3bubbles2Output <- function(outputId, width = '100%', height = '500px'){
shinyWidgetOutput(outputId, 'd3bubbles2', width, height, package = 'd3bubbles2')
}
#' @export
renderd3bubbles2 <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
shinyRenderWidget(expr, d3bubbles2Output, env, quoted = TRUE)
}
|
/R/d3bubbles2.R
|
no_license
|
karthikl6/d3bubbles2
|
R
| false
| false
| 1,917
|
r
|
#' @export
d3bubbles2 <- function(data, valueCol = NULL, labelCol = NULL,
tooltipCol = NULL, colorCol = NULL,
opts = NULL,
width = NULL, height = NULL) {
labelCol <- labelCol %||% names(data)[1]
valueCol <- valueCol %||% names(data)[2]
colorCol <- colorCol %||% valueCol
defaultOpts <- list(
padding = 3,
textSplitWidth = 80,
palette = "Purples",
fixedColor = "#B7D1DF",
textColor = "#444444",
minSizeFactor = NA,
maxSizeFactor = NA
)
settings <- mergeOptions(opts,defaultOpts)
textColor <- settings$textColor
if(is.null(opts$fixedColor)){
v <- data[,colorCol]
color <- NULL
if(class(v) %in% c("factor","character"))
color <- catColor(v,palette = settings$palette)
if(class(v) %in% c("numeric","integer"))
color <- numColor(v,palette = settings$palette)
}else{
color <- settings$fixedColor
}
d = data.frame(
value = data[,valueCol],
label = data[,labelCol],
tooltip = data[,tooltipCol] %||% data[,labelCol],
color = color,
textColor = textColor
)
x <- list(
d = d,
settings = settings
)
str(x)
# create widget
htmlwidgets::createWidget(
name = 'd3bubbles2',
x,
width = width,
height = height,
package = 'd3bubbles2',
sizingPolicy = sizingPolicy(
defaultWidth = "100%",
defaultHeight = 500
)
)
}
#' @export
d3bubbles2Output <- function(outputId, width = '100%', height = '500px'){
shinyWidgetOutput(outputId, 'd3bubbles2', width, height, package = 'd3bubbles2')
}
#' @export
renderd3bubbles2 <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
shinyRenderWidget(expr, d3bubbles2Output, env, quoted = TRUE)
}
|
## ui.R
#install.packages('tm')
library(shiny)
library(shinydashboard)
library(recommenderlab)
library(data.table)
library(ShinyRatingInput)
library(shinyjs)
source('functions/helpers.R')
shinyUI(
dashboardPage(
skin = "blue",
dashboardHeader(title = "Movie Recommender"),
dashboardSidebar(
sidebarMenu(
menuItem(tabName = "genre", "By Movie Genre",icon = icon("film")),
menuItem(tabName = "rate", "By Movie Ratings",icon = icon("film")))),
dashboardBody(includeCSS("css/movies.css"),
tabItems(
tabItem(
tabName = "genre",
fluidRow(
textOutput("Select atleast 3 movie genre")
),
fluidRow(
box(width = 12, title = "Select movie genre for recommendation", status = "info", solidHeader = TRUE, collapsible = TRUE,
div(selectInput("genrelist", "",
c("Action","Adventure", "Animation", "Children",
"Comedy", "Crime","Documentary", "Drama", "Fantasy",
"Film-Noir", "Horror", "Musical", "Mystery","Romance",
"Sci-Fi", "Thriller", "War", "Western"),width = 400)))
),
fluidRow(
useShinyjs(),
box(width = 12, title = "Movies we think you might like ", status = "info", solidHeader = TRUE,
#verbatimTextOutput("verb1"),
#verbatimTextOutput("verb2"),
withBusyIndicatorUI(
actionButton("btn1", "Click here to get your recommendations", class = "btn-warning")),
br(),
tableOutput("results1"),
br()
)
)),
tabItem(
tabName = "rate",
fluidRow(
box(width = 12, title = "Step 1: Rate as many movies as possible", status = "info", solidHeader = TRUE, collapsible = TRUE,
div(class = "rateitems",
uiOutput('ratings')
)
)
),
fluidRow(
useShinyjs(),
box(
width = 12, status = "info", solidHeader = TRUE,
title = "Step 2: Discover movies you might like",
br(),
withBusyIndicatorUI(
actionButton("btn2", "Click here to get your recommendations", class = "btn-warning")
),
br(),
tableOutput("results")
)
)
)
)
)
)
)
|
/ShinyApp/ui.R
|
no_license
|
bsathyamur/MovieRecommender
|
R
| false
| false
| 3,199
|
r
|
## ui.R
#install.packages('tm')
library(shiny)
library(shinydashboard)
library(recommenderlab)
library(data.table)
library(ShinyRatingInput)
library(shinyjs)
source('functions/helpers.R')
shinyUI(
dashboardPage(
skin = "blue",
dashboardHeader(title = "Movie Recommender"),
dashboardSidebar(
sidebarMenu(
menuItem(tabName = "genre", "By Movie Genre",icon = icon("film")),
menuItem(tabName = "rate", "By Movie Ratings",icon = icon("film")))),
dashboardBody(includeCSS("css/movies.css"),
tabItems(
tabItem(
tabName = "genre",
fluidRow(
textOutput("Select atleast 3 movie genre")
),
fluidRow(
box(width = 12, title = "Select movie genre for recommendation", status = "info", solidHeader = TRUE, collapsible = TRUE,
div(selectInput("genrelist", "",
c("Action","Adventure", "Animation", "Children",
"Comedy", "Crime","Documentary", "Drama", "Fantasy",
"Film-Noir", "Horror", "Musical", "Mystery","Romance",
"Sci-Fi", "Thriller", "War", "Western"),width = 400)))
),
fluidRow(
useShinyjs(),
box(width = 12, title = "Movies we think you might like ", status = "info", solidHeader = TRUE,
#verbatimTextOutput("verb1"),
#verbatimTextOutput("verb2"),
withBusyIndicatorUI(
actionButton("btn1", "Click here to get your recommendations", class = "btn-warning")),
br(),
tableOutput("results1"),
br()
)
)),
tabItem(
tabName = "rate",
fluidRow(
box(width = 12, title = "Step 1: Rate as many movies as possible", status = "info", solidHeader = TRUE, collapsible = TRUE,
div(class = "rateitems",
uiOutput('ratings')
)
)
),
fluidRow(
useShinyjs(),
box(
width = 12, status = "info", solidHeader = TRUE,
title = "Step 2: Discover movies you might like",
br(),
withBusyIndicatorUI(
actionButton("btn2", "Click here to get your recommendations", class = "btn-warning")
),
br(),
tableOutput("results")
)
)
)
)
)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tools.R
\name{perform_enrichGO}
\alias{perform_enrichGO}
\title{Perform a clusterPrfiler enrichGO analysis}
\usage{
perform_enrichGO(
ontology,
entrezgenes,
background_genes,
use_background,
species
)
}
\arguments{
\item{ontology}{Can be either "BP", "CC", "MF"}
\item{entrezgenes}{List of entrezgenes to use for GO analysis}
\item{use_background}{use specified background genes}
\item{species}{Species to use for GO analysis, either "HUM" or "MUS"}
\item{entrez_background_genes}{List of background genes}
}
\description{
Perform a clusterPrfiler enrichGO analysis
}
|
/man/perform_enrichGO.Rd
|
permissive
|
paulklemm/mygo
|
R
| false
| true
| 660
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tools.R
\name{perform_enrichGO}
\alias{perform_enrichGO}
\title{Perform a clusterPrfiler enrichGO analysis}
\usage{
perform_enrichGO(
ontology,
entrezgenes,
background_genes,
use_background,
species
)
}
\arguments{
\item{ontology}{Can be either "BP", "CC", "MF"}
\item{entrezgenes}{List of entrezgenes to use for GO analysis}
\item{use_background}{use specified background genes}
\item{species}{Species to use for GO analysis, either "HUM" or "MUS"}
\item{entrez_background_genes}{List of background genes}
}
\description{
Perform a clusterPrfiler enrichGO analysis
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mk_subread_index.R
\name{mk_subread_index}
\alias{mk_subread_index}
\title{Make a Subread index}
\usage{
mk_subread_index(ref_lib, split = 4, mem = 8000)
}
\arguments{
\item{ref_lib}{The name/location of the reference library file, in
(uncompressed) .fasta format}
\item{split}{The maximum allowed size of the genome file (in GB). If the
ref_lib file is larger than this, the function will spolt the library into
multiple parts}
\item{mem}{The maximum amount of memory (in MB) that can be used by the
index generation process (used by the Rsubread::buildindex function)}
}
\value{
Returns one or more Subread indexes for the supplied reference
.fasta file. If multiple indexes are created, the libraries will be
named the ref_lib basename plus _1, _2, etc.
}
\description{
This function is a wrapper for the Rsubread::buildindex function. It will
generate one or more Subread indexes from a .fasta file. If the library is
too large (default >4GB) it will automatically be split into multiple
indexes, with _1, _2, etc at the end of the ref_lib basename.
}
\examples{
## Download all RefSeq reference viral genomes and make an index
download_refseq('viral', compress = FALSE)
mk_subread_index('viral.fasta')
## Download all RefSeq reference viral genomes and make more than one index
download_refseq('viral', compress = FALSE)
mk_subread_index('viral.fasta', split = .0005)
}
|
/man/mk_subread_index.Rd
|
no_license
|
wevanjohnson/MetaScope
|
R
| false
| true
| 1,457
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mk_subread_index.R
\name{mk_subread_index}
\alias{mk_subread_index}
\title{Make a Subread index}
\usage{
mk_subread_index(ref_lib, split = 4, mem = 8000)
}
\arguments{
\item{ref_lib}{The name/location of the reference library file, in
(uncompressed) .fasta format}
\item{split}{The maximum allowed size of the genome file (in GB). If the
ref_lib file is larger than this, the function will spolt the library into
multiple parts}
\item{mem}{The maximum amount of memory (in MB) that can be used by the
index generation process (used by the Rsubread::buildindex function)}
}
\value{
Returns one or more Subread indexes for the supplied reference
.fasta file. If multiple indexes are created, the libraries will be
named the ref_lib basename plus _1, _2, etc.
}
\description{
This function is a wrapper for the Rsubread::buildindex function. It will
generate one or more Subread indexes from a .fasta file. If the library is
too large (default >4GB) it will automatically be split into multiple
indexes, with _1, _2, etc at the end of the ref_lib basename.
}
\examples{
## Download all RefSeq reference viral genomes and make an index
download_refseq('viral', compress = FALSE)
mk_subread_index('viral.fasta')
## Download all RefSeq reference viral genomes and make more than one index
download_refseq('viral', compress = FALSE)
mk_subread_index('viral.fasta', split = .0005)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db-sqlar.R
\name{db.unsqlar}
\alias{db.unsqlar}
\title{Unpack a SQLite archive.}
\usage{
db.unsqlar(db, name, path, files)
}
\arguments{
\item{db}{The database connection. S4 object of class "database".}
\item{name}{The name of the SQLite archive table.}
\item{path}{The path to unpack the archive under.}
\item{files}{The files in the archive to extract. These need not be
complete names. For example, "file1" will be matched using wildcards
on either end. So if "file1" is a directory all files under it will be
extracted.}
}
\value{
None.
}
\description{
Unpack a SQLite archive.
}
\details{
This will read the sqlar table in the database and write out
its content into a filesystem hierarchy under \code{path}.
}
\note{
The \code{\link{sqlar_uncompress}} function must be registered
with the database connection for this function to work. This is done
automatically with \code{\link{db.open}}
}
|
/man/db.unsqlar.Rd
|
no_license
|
blueraleigh/db
|
R
| false
| true
| 980
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db-sqlar.R
\name{db.unsqlar}
\alias{db.unsqlar}
\title{Unpack a SQLite archive.}
\usage{
db.unsqlar(db, name, path, files)
}
\arguments{
\item{db}{The database connection. S4 object of class "database".}
\item{name}{The name of the SQLite archive table.}
\item{path}{The path to unpack the archive under.}
\item{files}{The files in the archive to extract. These need not be
complete names. For example, "file1" will be matched using wildcards
on either end. So if "file1" is a directory all files under it will be
extracted.}
}
\value{
None.
}
\description{
Unpack a SQLite archive.
}
\details{
This will read the sqlar table in the database and write out
its content into a filesystem hierarchy under \code{path}.
}
\note{
The \code{\link{sqlar_uncompress}} function must be registered
with the database connection for this function to work. This is done
automatically with \code{\link{db.open}}
}
|
head(iris)
plot(x = iris$Sepal.Length, y= iris$Sepal.Width, xlab = "Sepal Length", ylab = "Sepal Width", main = "scatter plot")
View(iris)
|
/rproject.R
|
no_license
|
knevola/myFirstGitTry
|
R
| false
| false
| 139
|
r
|
head(iris)
plot(x = iris$Sepal.Length, y= iris$Sepal.Width, xlab = "Sepal Length", ylab = "Sepal Width", main = "scatter plot")
View(iris)
|
#' Estimate ANOVA decomposition-based variable importance.
#'
#' @inheritParams measure_accuracy
#' @param full fitted values from a regression function of the observed outcome
#' on the full set of covariates.
#' @param reduced fitted values from a regression on the reduced set of observed
#' covariates.
#'
#' @return A named list of: (1) the estimated ANOVA (based on a one-step
#' correction) of the fitted regression functions; (2) the estimated
#' influence function; (3) the naive ANOVA estimate; and (4) the IPC EIF
#' predictions.
#' @importFrom SuperLearner predict.SuperLearner SuperLearner
#' @export
measure_anova <- function(full, reduced, y, full_y = NULL,
C = rep(1, length(y)), Z = NULL,
ipc_weights = rep(1, length(y)),
ipc_fit_type = "external",
ipc_eif_preds = rep(1, length(y)),
ipc_est_type = "aipw", scale = "logit",
na.rm = FALSE, nuisance_estimators = NULL,
a = NULL, ...) {
if (is.null(full_y)) {
obs_mn_y <- mean(y, na.rm = na.rm)
} else {
obs_mn_y <- mean(full_y, na.rm = na.rm)
}
# add on if they aren't equal length
if (length(full) < length(reduced)) {
full <- c(full, rep(NA, length(reduced) - length(full)))
}
if (length(reduced) < length(full)) {
reduced <- c(reduced, rep(NA, length(reduced) - length(full)))
}
# compute the EIF: if there is coarsening, do a correction
if (!all(ipc_weights == 1)) {
# observed full-data EIF
obs_num <- mean(((full - reduced) ^ 2), na.rm = na.rm)
obs_var <- measure_mse(
fitted_values = rep(obs_mn_y, length(y)), y, na.rm = na.rm
)
obs_eif_num <- (2 * (y - full) * (full - reduced) +
(full - reduced) ^ 2 - obs_num)[C == 1]
obs_grad <- obs_eif_num / obs_var$point_est -
obs_num / (obs_var$point_est ^ 2) * obs_var$eif
# if IPC EIF preds aren't entered, estimate the regression
ipc_eif_preds <- estimate_eif_projection(obs_grad = obs_grad, C = C,
Z = Z, ipc_fit_type = ipc_fit_type,
ipc_eif_preds = ipc_eif_preds, ...)
weighted_obs_grad <- rep(0, length(C))
weighted_obs_grad[C == 1] <- obs_grad * ipc_weights
grad <- weighted_obs_grad - (C * ipc_weights - 1) * ipc_eif_preds
num <- mean((1 * ipc_weights[C == 1]) * ((full - reduced) ^ 2),
na.rm = na.rm)
denom <- mean((1 * ipc_weights[C == 1]) *
(y - mean(y, na.rm = na.rm)) ^ 2, na.rm = na.rm)
obs_est <- num / denom
if (ipc_est_type == "ipw") {
est <- scale_est(obs_est, rep(0, length(grad)), scale = scale)
} else {
est <- scale_est(obs_est, grad, scale = scale)
}
} else {
num <- mean((full - reduced) ^ 2, na.rm = na.rm)
var <- measure_mse(fitted_values = rep(obs_mn_y, length(y)), y,
na.rm = na.rm)
num_eif <- 2 * (y - full) * (full - reduced) +
(full - reduced) ^ 2 - num
grad <- num_eif / var$point_est - num / (var$point_est ^ 2) * var$eif
est <- num / var$point_est + mean(grad)
}
return(list(point_est = est, eif = grad, naive = num / var$point_est,
ipc_eif_preds = ipc_eif_preds))
}
|
/R/measure_anova.R
|
permissive
|
bdwilliamson/vimp
|
R
| false
| false
| 3,615
|
r
|
#' Estimate ANOVA decomposition-based variable importance.
#'
#' @inheritParams measure_accuracy
#' @param full fitted values from a regression function of the observed outcome
#' on the full set of covariates.
#' @param reduced fitted values from a regression on the reduced set of observed
#' covariates.
#'
#' @return A named list of: (1) the estimated ANOVA (based on a one-step
#' correction) of the fitted regression functions; (2) the estimated
#' influence function; (3) the naive ANOVA estimate; and (4) the IPC EIF
#' predictions.
#' @importFrom SuperLearner predict.SuperLearner SuperLearner
#' @export
measure_anova <- function(full, reduced, y, full_y = NULL,
C = rep(1, length(y)), Z = NULL,
ipc_weights = rep(1, length(y)),
ipc_fit_type = "external",
ipc_eif_preds = rep(1, length(y)),
ipc_est_type = "aipw", scale = "logit",
na.rm = FALSE, nuisance_estimators = NULL,
a = NULL, ...) {
if (is.null(full_y)) {
obs_mn_y <- mean(y, na.rm = na.rm)
} else {
obs_mn_y <- mean(full_y, na.rm = na.rm)
}
# add on if they aren't equal length
if (length(full) < length(reduced)) {
full <- c(full, rep(NA, length(reduced) - length(full)))
}
if (length(reduced) < length(full)) {
reduced <- c(reduced, rep(NA, length(reduced) - length(full)))
}
# compute the EIF: if there is coarsening, do a correction
if (!all(ipc_weights == 1)) {
# observed full-data EIF
obs_num <- mean(((full - reduced) ^ 2), na.rm = na.rm)
obs_var <- measure_mse(
fitted_values = rep(obs_mn_y, length(y)), y, na.rm = na.rm
)
obs_eif_num <- (2 * (y - full) * (full - reduced) +
(full - reduced) ^ 2 - obs_num)[C == 1]
obs_grad <- obs_eif_num / obs_var$point_est -
obs_num / (obs_var$point_est ^ 2) * obs_var$eif
# if IPC EIF preds aren't entered, estimate the regression
ipc_eif_preds <- estimate_eif_projection(obs_grad = obs_grad, C = C,
Z = Z, ipc_fit_type = ipc_fit_type,
ipc_eif_preds = ipc_eif_preds, ...)
weighted_obs_grad <- rep(0, length(C))
weighted_obs_grad[C == 1] <- obs_grad * ipc_weights
grad <- weighted_obs_grad - (C * ipc_weights - 1) * ipc_eif_preds
num <- mean((1 * ipc_weights[C == 1]) * ((full - reduced) ^ 2),
na.rm = na.rm)
denom <- mean((1 * ipc_weights[C == 1]) *
(y - mean(y, na.rm = na.rm)) ^ 2, na.rm = na.rm)
obs_est <- num / denom
if (ipc_est_type == "ipw") {
est <- scale_est(obs_est, rep(0, length(grad)), scale = scale)
} else {
est <- scale_est(obs_est, grad, scale = scale)
}
} else {
num <- mean((full - reduced) ^ 2, na.rm = na.rm)
var <- measure_mse(fitted_values = rep(obs_mn_y, length(y)), y,
na.rm = na.rm)
num_eif <- 2 * (y - full) * (full - reduced) +
(full - reduced) ^ 2 - num
grad <- num_eif / var$point_est - num / (var$point_est ^ 2) * var$eif
est <- num / var$point_est + mean(grad)
}
return(list(point_est = est, eif = grad, naive = num / var$point_est,
ipc_eif_preds = ipc_eif_preds))
}
|
## in this script we will get the
## load("data/expr/rawCounts/genic/fullExExJun.rda")
## load libraries and data
library(devtools)
setwd("C:/Users/mguelfi/projectsR/eQTLPipeline/")
load_all()
library(R.utils)
path <- readWindowsShortcut("data.lnk", verbose=FALSE)
setwd(dirname(path$networkPathname))
rm(path)
## load neuro genes passed by Jana
gene <- read.delim("data/general/NP4.6c.raw.n.bed")
gene <- unique(gene$LOCUS)
library(biomaRt)
ensembl <- useMart(biomart="ENSEMBL_MART_ENSEMBL",host="Jun2013.archive.ensembl.org",
dataset="hsapiens_gene_ensembl")
head(listAttributes(ensembl))
grep("name",head(listFilters(ensembl),300))
geneNames <- getBM(attributes=c("ensembl_gene_id","external_gene_id","chromosome_name","start_position","end_position","gene_biotype"),
verbose = T,
filters="hgnc_symbol",
values=gene, mart=ensembl)
## remove all the LRG genes
geneNames <- geneNames[-which(geneNames$gene_biotype %in% "LRG_gene"),]
geneNames <- geneNames[order(geneNames$chromosome_name),]
## load the genes that have at least one overlapping gene
load("data/general/overlappingGenes.rda")
neuroNonOveGen <- geneNames
## remove the non automosal and the patch genes, don't have the coverage on them
neuroNonOveGen <- neuroNonOveGen[-c(147:158),]
# neuroNonOveGen <- geneNames[-which(geneNames[,1] %in% rownames(as.data.frame(listNonOve))),]
# neuroNonOveGen <- neuroNonOveGen[order(neuroNonOveGen$chromosome_name),]
novelTransRegion(neuroNonOveGen[3,],ensembl,10,"PUTM")
library(doParallel)
library(foreach)
library(GenomicRanges)
detectCores()
## [1] 24
# create the cluster with the functions needed to run
cl <- makeCluster(5)
clusterExport(cl, c("novelTransRegion","getBM","subsetByOverlaps"))
registerDoParallel(cl)
getDoParWorkers()
## remove gene that comes from the patch HG987_PATCH - gene KCNJ12
## neuroNonOveGen <- neuroNonOveGen[c(-58),]
# neuroNonOveGen <- neuroNonOveGen[-c(58,59,60,61,62),]
start <- Sys.time()
novelRegions <- foreach(i=1:nrow(neuroNonOveGen),.combine=rbind,.verbose=F)%dopar%novelTransRegion(neuroNonOveGen[i,],ensembl,10,"PUTM")
##exonicRegions <- foreach(i=1:20,.combine=rbind,.verbose=F)%dopar%getRegionsBED(geneIDs[i],exonsdef)
end <- Sys.time()
end-start
stopCluster(cl)
rm(cl)
neuroGenes <- getBM(attributes=c("ensembl_gene_id","external_gene_id","gene_biotype","source","status"),
verbose = T,
filters="ensembl_gene_id",
values=unlist(lapply(novelRegions,function(x){return(x$gene)})), mart=ensembl)
rownames(novelRegions) <- novelRegions[,1]
novelRegions <- novelRegions[,-1]
rownames(neuroGenes) <- neuroGenes[,1]
neuroGenes <- neuroGenes[,-1]
neuroGenes <- neuroGenes[rownames(novelRegions),]
identical(rownames(neuroNonOveGen),rownames(neuroGenes))
neuroGenes <- cbind(novelRegions,neuroGenes)
neuroGenes$overlapGene <- FALSE
neuroGenes$overlapGene[which(rownames(neuroGenes) %in% rownames(as.data.frame(listNonOve)))] <- TRUE
save(neuroGenes,file="data/results/novelIntragenicRegions.PUTM.rda")
### SNIG
rm(neuroGenes,novelRegions)
start <- Sys.time()
novelRegions <- foreach(i=1:nrow(neuroNonOveGen),.combine=rbind,.verbose=F)%dopar%novelTransRegion(neuroNonOveGen[i,],ensembl,10,tissue="SNIG")
##exonicRegions <- foreach(i=1:20,.combine=rbind,.verbose=F)%dopar%getRegionsBED(geneIDs[i],exonsdef)
end <- Sys.time()
end-start
stopCluster(cl)
rm(cl)
neuroGenes <- getBM(attributes=c("ensembl_gene_id","external_gene_id","gene_biotype","source","status"),
verbose = T,
filters="ensembl_gene_id",
values=novelRegions[,1], mart=ensembl)
rownames(novelRegions) <- novelRegions[,1]
novelRegions <- novelRegions[,-1]
rownames(neuroGenes) <- neuroGenes[,1]
neuroGenes <- neuroGenes[,-1]
neuroGenes <- neuroGenes[rownames(novelRegions),]
identical(rownames(neuroNonOveGen),rownames(neuroGenes))
neuroGenes <- cbind(novelRegions,neuroGenes)
neuroGenes$overlapGene <- FALSE
neuroGenes$overlapGene[which(rownames(neuroGenes) %in% rownames(as.data.frame(listNonOve)))] <- TRUE
save(neuroGenes,file="data/results/novelIntragenicRegions.SNIG.rda")
neuroGenes[which(neuroGenes$external_gene_id %in% "APP"),]
## we now try to see whether we could predict new regions checking in a recent version of ensembl
cl <- makeCluster(5)
clusterExport(cl, c("novelTransRegion","getBM","subsetByOverlaps"))
registerDoParallel(cl)
getDoParWorkers()
ensemblv75 <- useMart(biomart="ENSEMBL_MART_ENSEMBL",host="Feb2014.archive.ensembl.org",
dataset="hsapiens_gene_ensembl")
start <- Sys.time()
novelRegionsv75 <- foreach(i=1:nrow(neuroNonOveGen),.combine=rbind,.verbose=F)%dopar%novelTransRegion(neuroNonOveGen[i,],ensemblv75)
##exonicRegions <- foreach(i=1:20,.combine=rbind,.verbose=F)%dopar%getRegionsBED(geneIDs[i],exonsdef)
end <- Sys.time()
end-start
sum(unlist(novelRegionsv75[,3]),na.rm=T)
stopCluster(cl)
rm(cl)
load("data/general/sampleInfo.rda")
PUTM <- sampleInfo[which(sampleInfo$U.Region_simplified =="PUTM"),]
IDs=PUTM$A.CEL_file
plotReadDepth(gene=neuroNonOveGen[i,1],ensembl=ensembl,IDs=IDs)
ensemblv75 <- useMart(biomart="ENSEMBL_MART_ENSEMBL",host="Feb2014.archive.ensembl.org",
dataset="hsapiens_gene_ensembl")
exonDef <- getBM(attributes=c("ensembl_gene_id","chromosome_name","ensembl_exon_id","exon_chrom_start","exon_chrom_end"),
verbose = T,
filters="ensembl_gene_id",
values=neuroNonOveGen[i,1], mart=ensemblv75)
exonDef <- GRanges(paste0("chr",exonDef[,2]), IRanges(exonDef[,4], exonDef[,5]))
## select the transcribed regions identified in the data
tmp <- subsetByOverlaps(expressedRegions$chr1$regions,
GRanges(paste0("chr",neuroNonOveGen[i,3]),
IRanges(neuroNonOveGen[i,4], neuroNonOveGen[i,5])))
nrow(as.data.frame(tmp))
table(countOverlaps(tmp[which(tmp$value>10),], exonDef)==0)
plotReadDepth(gene=neuroNonOveGen[4,1],ensembl=ensembl,IDs=IDs)
load_all()
load("data/general/sampleInfo.rda")
PUTM <- sampleInfo[which(sampleInfo$U.Region_simplified =="PUTM"),]
IDs=PUTM$A.CEL_file
plotReadDepth(gene="ENSG00000186868",ensembl=ensembl,IDs=IDs)
load("data/general/sampleInfo.rda")
SNIG <- sampleInfo[which(sampleInfo$U.Region_simplified =="SNIG"),]
IDs=SNIG$A.CEL_file
plotReadDepth(gene="ENSG00000186868",ensembl=ensembl,IDs=IDs)
head(novelRegions,20)
### we now try to identify the actual regions:
load(file="data/results/novelIntragenicRegions.SNIG.rda")
head(neuroGenes)
ensembl <- useMart(biomart="ENSEMBL_MART_ENSEMBL",host="Jun2013.archive.ensembl.org",
dataset="hsapiens_gene_ensembl")
tissue <- "SNIG"
##select only the genes that have potentially novel intragenic transcribed regions
neuroGenesTmp <- neuroGenes[which(neuroGenes$coveredNoAnn.NA>0),]
library(doParallel)
library(foreach)
library(GenomicRanges)
detectCores()
## [1] 24
# create the cluster with the functions needed to run
cl <- makeCluster(1)
clusterExport(cl, c("getUnAnnotatedRegions","getBM","subsetByOverlaps","GRanges","countOverlaps"))
registerDoParallel(cl)
getDoParWorkers()
start <- Sys.time()
novelIntraRegion <- foreach(i=1:nrow(neuroGenesTmp),.verbose=F)%dopar%getUnAnnotatedRegions(rownames(neuroGenesTmp)[i],ensembl,"SNIG")
##exonicRegions <- foreach(i=1:20,.combine=rbind,.verbose=F)%dopar%getRegionsBED(geneIDs[i],exonsdef)
end <- Sys.time()
end-start
stopCluster(cl)
rm(cl)
novelIntraRegion <- GRangesList(novelIntraRegion)
names(novelIntraRegion) <- rownames(novelIntraRegion)
save(novelIntraRegion,file="data/results/novelIntragenicRegionsLoc.SNIG.rda")
getwd()
|
/transcribedRegions.R
|
no_license
|
gughi/eQTLPipeline
|
R
| false
| false
| 7,739
|
r
|
## in this script we will get the
## load("data/expr/rawCounts/genic/fullExExJun.rda")
## load libraries and data
library(devtools)
setwd("C:/Users/mguelfi/projectsR/eQTLPipeline/")
load_all()
library(R.utils)
path <- readWindowsShortcut("data.lnk", verbose=FALSE)
setwd(dirname(path$networkPathname))
rm(path)
## load neuro genes passed by Jana
gene <- read.delim("data/general/NP4.6c.raw.n.bed")
gene <- unique(gene$LOCUS)
library(biomaRt)
ensembl <- useMart(biomart="ENSEMBL_MART_ENSEMBL",host="Jun2013.archive.ensembl.org",
dataset="hsapiens_gene_ensembl")
head(listAttributes(ensembl))
grep("name",head(listFilters(ensembl),300))
geneNames <- getBM(attributes=c("ensembl_gene_id","external_gene_id","chromosome_name","start_position","end_position","gene_biotype"),
verbose = T,
filters="hgnc_symbol",
values=gene, mart=ensembl)
## remove all the LRG genes
geneNames <- geneNames[-which(geneNames$gene_biotype %in% "LRG_gene"),]
geneNames <- geneNames[order(geneNames$chromosome_name),]
## load the genes that have at least one overlapping gene
load("data/general/overlappingGenes.rda")
neuroNonOveGen <- geneNames
## remove the non automosal and the patch genes, don't have the coverage on them
neuroNonOveGen <- neuroNonOveGen[-c(147:158),]
# neuroNonOveGen <- geneNames[-which(geneNames[,1] %in% rownames(as.data.frame(listNonOve))),]
# neuroNonOveGen <- neuroNonOveGen[order(neuroNonOveGen$chromosome_name),]
novelTransRegion(neuroNonOveGen[3,],ensembl,10,"PUTM")
library(doParallel)
library(foreach)
library(GenomicRanges)
detectCores()
## [1] 24
# create the cluster with the functions needed to run
cl <- makeCluster(5)
clusterExport(cl, c("novelTransRegion","getBM","subsetByOverlaps"))
registerDoParallel(cl)
getDoParWorkers()
## remove gene that comes from the patch HG987_PATCH - gene KCNJ12
## neuroNonOveGen <- neuroNonOveGen[c(-58),]
# neuroNonOveGen <- neuroNonOveGen[-c(58,59,60,61,62),]
start <- Sys.time()
novelRegions <- foreach(i=1:nrow(neuroNonOveGen),.combine=rbind,.verbose=F)%dopar%novelTransRegion(neuroNonOveGen[i,],ensembl,10,"PUTM")
##exonicRegions <- foreach(i=1:20,.combine=rbind,.verbose=F)%dopar%getRegionsBED(geneIDs[i],exonsdef)
end <- Sys.time()
end-start
stopCluster(cl)
rm(cl)
neuroGenes <- getBM(attributes=c("ensembl_gene_id","external_gene_id","gene_biotype","source","status"),
verbose = T,
filters="ensembl_gene_id",
values=unlist(lapply(novelRegions,function(x){return(x$gene)})), mart=ensembl)
rownames(novelRegions) <- novelRegions[,1]
novelRegions <- novelRegions[,-1]
rownames(neuroGenes) <- neuroGenes[,1]
neuroGenes <- neuroGenes[,-1]
neuroGenes <- neuroGenes[rownames(novelRegions),]
identical(rownames(neuroNonOveGen),rownames(neuroGenes))
neuroGenes <- cbind(novelRegions,neuroGenes)
neuroGenes$overlapGene <- FALSE
neuroGenes$overlapGene[which(rownames(neuroGenes) %in% rownames(as.data.frame(listNonOve)))] <- TRUE
save(neuroGenes,file="data/results/novelIntragenicRegions.PUTM.rda")
### SNIG
rm(neuroGenes,novelRegions)
start <- Sys.time()
novelRegions <- foreach(i=1:nrow(neuroNonOveGen),.combine=rbind,.verbose=F)%dopar%novelTransRegion(neuroNonOveGen[i,],ensembl,10,tissue="SNIG")
##exonicRegions <- foreach(i=1:20,.combine=rbind,.verbose=F)%dopar%getRegionsBED(geneIDs[i],exonsdef)
end <- Sys.time()
end-start
stopCluster(cl)
rm(cl)
neuroGenes <- getBM(attributes=c("ensembl_gene_id","external_gene_id","gene_biotype","source","status"),
verbose = T,
filters="ensembl_gene_id",
values=novelRegions[,1], mart=ensembl)
rownames(novelRegions) <- novelRegions[,1]
novelRegions <- novelRegions[,-1]
rownames(neuroGenes) <- neuroGenes[,1]
neuroGenes <- neuroGenes[,-1]
neuroGenes <- neuroGenes[rownames(novelRegions),]
identical(rownames(neuroNonOveGen),rownames(neuroGenes))
neuroGenes <- cbind(novelRegions,neuroGenes)
neuroGenes$overlapGene <- FALSE
neuroGenes$overlapGene[which(rownames(neuroGenes) %in% rownames(as.data.frame(listNonOve)))] <- TRUE
save(neuroGenes,file="data/results/novelIntragenicRegions.SNIG.rda")
neuroGenes[which(neuroGenes$external_gene_id %in% "APP"),]
## we now try to see whether we could predict new regions checking in a recent version of ensembl
cl <- makeCluster(5)
clusterExport(cl, c("novelTransRegion","getBM","subsetByOverlaps"))
registerDoParallel(cl)
getDoParWorkers()
ensemblv75 <- useMart(biomart="ENSEMBL_MART_ENSEMBL",host="Feb2014.archive.ensembl.org",
dataset="hsapiens_gene_ensembl")
start <- Sys.time()
novelRegionsv75 <- foreach(i=1:nrow(neuroNonOveGen),.combine=rbind,.verbose=F)%dopar%novelTransRegion(neuroNonOveGen[i,],ensemblv75)
##exonicRegions <- foreach(i=1:20,.combine=rbind,.verbose=F)%dopar%getRegionsBED(geneIDs[i],exonsdef)
end <- Sys.time()
end-start
sum(unlist(novelRegionsv75[,3]),na.rm=T)
stopCluster(cl)
rm(cl)
load("data/general/sampleInfo.rda")
PUTM <- sampleInfo[which(sampleInfo$U.Region_simplified =="PUTM"),]
IDs=PUTM$A.CEL_file
plotReadDepth(gene=neuroNonOveGen[i,1],ensembl=ensembl,IDs=IDs)
ensemblv75 <- useMart(biomart="ENSEMBL_MART_ENSEMBL",host="Feb2014.archive.ensembl.org",
dataset="hsapiens_gene_ensembl")
exonDef <- getBM(attributes=c("ensembl_gene_id","chromosome_name","ensembl_exon_id","exon_chrom_start","exon_chrom_end"),
verbose = T,
filters="ensembl_gene_id",
values=neuroNonOveGen[i,1], mart=ensemblv75)
exonDef <- GRanges(paste0("chr",exonDef[,2]), IRanges(exonDef[,4], exonDef[,5]))
## select the transcribed regions identified in the data
tmp <- subsetByOverlaps(expressedRegions$chr1$regions,
GRanges(paste0("chr",neuroNonOveGen[i,3]),
IRanges(neuroNonOveGen[i,4], neuroNonOveGen[i,5])))
nrow(as.data.frame(tmp))
table(countOverlaps(tmp[which(tmp$value>10),], exonDef)==0)
plotReadDepth(gene=neuroNonOveGen[4,1],ensembl=ensembl,IDs=IDs)
load_all()
load("data/general/sampleInfo.rda")
PUTM <- sampleInfo[which(sampleInfo$U.Region_simplified =="PUTM"),]
IDs=PUTM$A.CEL_file
plotReadDepth(gene="ENSG00000186868",ensembl=ensembl,IDs=IDs)
load("data/general/sampleInfo.rda")
SNIG <- sampleInfo[which(sampleInfo$U.Region_simplified =="SNIG"),]
IDs=SNIG$A.CEL_file
plotReadDepth(gene="ENSG00000186868",ensembl=ensembl,IDs=IDs)
head(novelRegions,20)
### we now try to identify the actual regions:
load(file="data/results/novelIntragenicRegions.SNIG.rda")
head(neuroGenes)
ensembl <- useMart(biomart="ENSEMBL_MART_ENSEMBL",host="Jun2013.archive.ensembl.org",
dataset="hsapiens_gene_ensembl")
tissue <- "SNIG"
##select only the genes that have potentially novel intragenic transcribed regions
neuroGenesTmp <- neuroGenes[which(neuroGenes$coveredNoAnn.NA>0),]
library(doParallel)
library(foreach)
library(GenomicRanges)
detectCores()
## [1] 24
# create the cluster with the functions needed to run
cl <- makeCluster(1)
clusterExport(cl, c("getUnAnnotatedRegions","getBM","subsetByOverlaps","GRanges","countOverlaps"))
registerDoParallel(cl)
getDoParWorkers()
start <- Sys.time()
novelIntraRegion <- foreach(i=1:nrow(neuroGenesTmp),.verbose=F)%dopar%getUnAnnotatedRegions(rownames(neuroGenesTmp)[i],ensembl,"SNIG")
##exonicRegions <- foreach(i=1:20,.combine=rbind,.verbose=F)%dopar%getRegionsBED(geneIDs[i],exonsdef)
end <- Sys.time()
end-start
stopCluster(cl)
rm(cl)
novelIntraRegion <- GRangesList(novelIntraRegion)
names(novelIntraRegion) <- rownames(novelIntraRegion)
save(novelIntraRegion,file="data/results/novelIntragenicRegionsLoc.SNIG.rda")
getwd()
|
##format NTAP data into fendR format
library(dplyr)
require(githubr)
#here is where we will store the files
par.id<-'syn8282028'
#drug sensitivity
ncat.file="https://raw.githubusercontent.com/sgosline/pnfCellLines/master/bin/ncatsSingleAgentScreens.R"
source(ncat.file)
this.file='https://raw.githubusercontent.com/Sage-Bionetworks/fendR/master/dev/formatDatasets/formatPlexiNFdata.R?token=ABwyOt9lkMgjvEtDEf408VHcaDvjCCUXks5Yt1v2wA%3D%3D'
targs<-ncatsDrugTargets()
colnames(targs)<-c("Phenotype","Gene")
write.table(targs,file='ncatsDrugTargetTidied.tsv',sep='\t',row.names=F,col.names=T)
aucs<-getValueForAllCells('TAUC')
aucs<-as.data.frame(aucs)
aucs$Phenotype<-rownames(aucs)
auc.vals<-tidyr::gather(aucs,Sample,Response,1:(ncol(aucs)-1))
lac50<-getValueForAllCells('LAC50')
lac50<-as.data.frame(lac50)
lac50$Phenotype<-rownames(lac50)
lac.vals<-tidyr::gather(lac50,Sample,Response,1:(ncol(lac50)-1))
maxr<-getValueForAllCells("MAXR")
maxr<-as.data.frame(maxr)
maxr$Phenotype<-rownames(maxr)
maxr.vals<-tidyr::gather(maxr,Sample,Response,1:(ncol(maxr)-1))
write.table(auc.vals,'ncatsNtapTaucTidied.tsv',sep='\t',row.names=F,col.names=T)
write.table(lac50,'ncatsNtapLac50Tidied.tsv',sep='\t',row.names=F,col.names=T)
write.table(maxr,'ncatsNtapMaxrTidied.tsv',sep='\t',row.names=F,col.names=T)
#now write all to directory and upload to synapse
#RNA-Seq
rna.seq.data<-read.table(synapser::synGet('syn7124098')$path,header=T,as.is=T)
rna.seq.data<-data.frame(rna.seq.data)
rna.seq.data$Gene<-rownames(rna.seq.data)
rna.seq<-tidyr::gather(rna.seq.data,Sample,Value,1:(ncol(rna.seq.data)-1))
rna.seq$Sample<-sapply(as.character(rna.seq$Sample),function(x) gsub("..mixed.clone."," (mixed clone)",gsub("..single.clone."," (single clone)",x,fixed=T),fixed=T))
write.table(rna.seq,'ntapRnaSeqTpmTidied.tsv',sep='\t',row.names=F,col.names=T)
#mutation data
mut.data<-read.table(synapser::synGet('syn6174638')$path,sep='\t',header=T,as.is=T)
red.mut.data<-subset(mut.data,KnownGeneExonFunction%in%c("nonsynonymous SNV","stoploss SNV",'stopgainSNV','frameshift deletion','frameshiftinsertion'))
md.mat<-reshape2::acast(red.mut.data,KnownGeneGeneName~CellLine)
md.mat[which(md.mat>0,arr.ind=T)]<-1
md.mat<-as.data.frame(md.mat)
md.mat$Gene<-rownames(md.mat)
md<-tidyr::gather(md.mat,Sample,Value,1:(ncol(md.mat)-1))
write.table(md,'ntapMutDataTidied.tsv',sep='\t',row.names=F,col.names=T)
file.list=c("ncatsDrugTargetTidied.tsv",'ncatsNtapTaucTidied.tsv','ncatsNtapLac50Tidied.tsv','ncatsNtapMaxrTidied.tsv','ntapRnaSeqTpmTidied.tsv','ntapMutDataTidied.tsv')
for (file in file.list)
synapser::synStore(synapser::File(file,parentId=par.id),activity=synapser::Activity(used=ncat.file,executed=this.file))
|
/dev/formatDatasets/formatPlexiNFdata.R
|
no_license
|
Sage-Bionetworks/fendR
|
R
| false
| false
| 2,713
|
r
|
##format NTAP data into fendR format
library(dplyr)
require(githubr)
#here is where we will store the files
par.id<-'syn8282028'
#drug sensitivity
ncat.file="https://raw.githubusercontent.com/sgosline/pnfCellLines/master/bin/ncatsSingleAgentScreens.R"
source(ncat.file)
this.file='https://raw.githubusercontent.com/Sage-Bionetworks/fendR/master/dev/formatDatasets/formatPlexiNFdata.R?token=ABwyOt9lkMgjvEtDEf408VHcaDvjCCUXks5Yt1v2wA%3D%3D'
targs<-ncatsDrugTargets()
colnames(targs)<-c("Phenotype","Gene")
write.table(targs,file='ncatsDrugTargetTidied.tsv',sep='\t',row.names=F,col.names=T)
aucs<-getValueForAllCells('TAUC')
aucs<-as.data.frame(aucs)
aucs$Phenotype<-rownames(aucs)
auc.vals<-tidyr::gather(aucs,Sample,Response,1:(ncol(aucs)-1))
lac50<-getValueForAllCells('LAC50')
lac50<-as.data.frame(lac50)
lac50$Phenotype<-rownames(lac50)
lac.vals<-tidyr::gather(lac50,Sample,Response,1:(ncol(lac50)-1))
maxr<-getValueForAllCells("MAXR")
maxr<-as.data.frame(maxr)
maxr$Phenotype<-rownames(maxr)
maxr.vals<-tidyr::gather(maxr,Sample,Response,1:(ncol(maxr)-1))
write.table(auc.vals,'ncatsNtapTaucTidied.tsv',sep='\t',row.names=F,col.names=T)
write.table(lac50,'ncatsNtapLac50Tidied.tsv',sep='\t',row.names=F,col.names=T)
write.table(maxr,'ncatsNtapMaxrTidied.tsv',sep='\t',row.names=F,col.names=T)
#now write all to directory and upload to synapse
#RNA-Seq
rna.seq.data<-read.table(synapser::synGet('syn7124098')$path,header=T,as.is=T)
rna.seq.data<-data.frame(rna.seq.data)
rna.seq.data$Gene<-rownames(rna.seq.data)
rna.seq<-tidyr::gather(rna.seq.data,Sample,Value,1:(ncol(rna.seq.data)-1))
rna.seq$Sample<-sapply(as.character(rna.seq$Sample),function(x) gsub("..mixed.clone."," (mixed clone)",gsub("..single.clone."," (single clone)",x,fixed=T),fixed=T))
write.table(rna.seq,'ntapRnaSeqTpmTidied.tsv',sep='\t',row.names=F,col.names=T)
#mutation data
mut.data<-read.table(synapser::synGet('syn6174638')$path,sep='\t',header=T,as.is=T)
red.mut.data<-subset(mut.data,KnownGeneExonFunction%in%c("nonsynonymous SNV","stoploss SNV",'stopgainSNV','frameshift deletion','frameshiftinsertion'))
md.mat<-reshape2::acast(red.mut.data,KnownGeneGeneName~CellLine)
md.mat[which(md.mat>0,arr.ind=T)]<-1
md.mat<-as.data.frame(md.mat)
md.mat$Gene<-rownames(md.mat)
md<-tidyr::gather(md.mat,Sample,Value,1:(ncol(md.mat)-1))
write.table(md,'ntapMutDataTidied.tsv',sep='\t',row.names=F,col.names=T)
file.list=c("ncatsDrugTargetTidied.tsv",'ncatsNtapTaucTidied.tsv','ncatsNtapLac50Tidied.tsv','ncatsNtapMaxrTidied.tsv','ntapRnaSeqTpmTidied.tsv','ntapMutDataTidied.tsv')
for (file in file.list)
synapser::synStore(synapser::File(file,parentId=par.id),activity=synapser::Activity(used=ncat.file,executed=this.file))
|
plot_arh <- function(c,t,sp,symbol){
par(mar=c(5,4,4,5)+.1)
barplot(sp,col="grey",main=paste("Expression et probabilité d'épissage: ",symbol),xlab="",ylab="",las=2,ylim=c(0,max(sp)+0.1))
legend("topleft",col=c("red","blue"),lty=1,legend=c("C","T"))
par(new=TRUE)
plot(c,type="b",col="red",xaxt="n",yaxt="n",xlab="",ylab="")
par(new=TRUE)
plot(t,type="b",col="blue",xaxt="n",yaxt="n",xlab="",ylab="")
mtext("barplot : splicing probability",side=2,line=3)
axis(4)
mtext("lines : exon expression",side=4,line=3)
}
|
/plot.R
|
no_license
|
benhrif/Gene-Array-Alternative-Splicing-Detection-in-Galaxy
|
R
| false
| false
| 513
|
r
|
plot_arh <- function(c,t,sp,symbol){
par(mar=c(5,4,4,5)+.1)
barplot(sp,col="grey",main=paste("Expression et probabilité d'épissage: ",symbol),xlab="",ylab="",las=2,ylim=c(0,max(sp)+0.1))
legend("topleft",col=c("red","blue"),lty=1,legend=c("C","T"))
par(new=TRUE)
plot(c,type="b",col="red",xaxt="n",yaxt="n",xlab="",ylab="")
par(new=TRUE)
plot(t,type="b",col="blue",xaxt="n",yaxt="n",xlab="",ylab="")
mtext("barplot : splicing probability",side=2,line=3)
axis(4)
mtext("lines : exon expression",side=4,line=3)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_eupath_pkg.R
\name{load_eupath_pkg}
\alias{load_eupath_pkg}
\title{Loads a pkg into the current R environment.}
\usage{
load_eupath_pkg(name, webservice = "eupathdb")
}
\arguments{
\item{name}{Package name.}
\item{webservice}{From where to get the package name.}
}
\description{
Loads a pkg into the current R environment.
}
|
/man/load_eupath_pkg.Rd
|
no_license
|
khughitt/EuPathDB
|
R
| false
| true
| 409
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_eupath_pkg.R
\name{load_eupath_pkg}
\alias{load_eupath_pkg}
\title{Loads a pkg into the current R environment.}
\usage{
load_eupath_pkg(name, webservice = "eupathdb")
}
\arguments{
\item{name}{Package name.}
\item{webservice}{From where to get the package name.}
}
\description{
Loads a pkg into the current R environment.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.